[kernel] r18158 - in dists/sid/linux-2.6/debian: . patches/features/all/rt patches/series
Uwe Kleine-König
ukleinek-guest at alioth.debian.org
Fri Oct 7 13:59:18 UTC 2011
Author: ukleinek-guest
Date: Fri Oct 7 13:59:16 2011
New Revision: 18158
Log:
[amd64] Update rt featureset to 3.0.6-rt17
Added:
dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt17.patch
- copied, changed from r18152, dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt16.patch
dists/sid/linux-2.6/debian/patches/series/6-extra
Deleted:
dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt16.patch
dists/sid/linux-2.6/debian/patches/series/5-extra
Modified:
dists/sid/linux-2.6/debian/changelog
Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog Fri Oct 7 04:52:36 2011 (r18157)
+++ dists/sid/linux-2.6/debian/changelog Fri Oct 7 13:59:16 2011 (r18158)
@@ -1,3 +1,9 @@
+linux-2.6 (3.0.0-6) UNRELEASED; urgency=low
+
+ * [amd64] Update rt featureset to 3.0.6-rt17
+
+ -- Uwe Kleine-König <u.kleine-koenig at pengutronix.de> Fri, 07 Oct 2011 15:48:22 +0200
+
linux-2.6 (3.0.0-5) unstable; urgency=low
[ Ben Hutchings ]
Copied and modified: dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt17.patch (from r18152, dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt16.patch)
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt16.patch Thu Oct 6 02:26:56 2011 (r18152, copy source)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt17.patch Fri Oct 7 13:59:16 2011 (r18158)
@@ -1681,43 +1681,6 @@
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
-@@ -2825,7 +2822,7 @@ static void __sched_fork(struct task_str
- void sched_fork(struct task_struct *p)
- {
- unsigned long flags;
-- int cpu = get_cpu();
-+ int cpu;
-
- __sched_fork(p);
- /*
-@@ -2865,6 +2862,7 @@ void sched_fork(struct task_struct *p)
- if (!rt_prio(p->prio))
- p->sched_class = &fair_sched_class;
-
-+ cpu = get_cpu();
- if (p->sched_class->task_fork)
- p->sched_class->task_fork(p);
-
-@@ -2876,8 +2874,9 @@ void sched_fork(struct task_struct *p)
- * Silence PROVE_RCU.
- */
- raw_spin_lock_irqsave(&p->pi_lock, flags);
-- set_task_cpu(p, cpu);
-+ set_task_cpu(p, smp_processor_id());
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+ put_cpu();
-
- #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
- if (likely(sched_info_on()))
-@@ -2893,8 +2892,6 @@ void sched_fork(struct task_struct *p)
- #ifdef CONFIG_SMP
- plist_node_init(&p->pushable_tasks, MAX_PRIO);
- #endif
--
-- put_cpu();
- }
-
- /*
@@ -3060,8 +3057,12 @@ static void finish_task_switch(struct rq
finish_lock_switch(rq, prev);
@@ -1736,7 +1699,7 @@
schedstat_inc(this_rq(), sched_count);
}
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
+#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
+#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
+#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
@@ -2508,22 +2471,22 @@
/* sanity check nr_running */
WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
-@@ -1279,8 +1277,14 @@ __acquires(&gcwq->lock)
- * it races with cpu hotunplug operation. Verify
- * against GCWQ_DISASSOCIATED.
- */
-- if (!(gcwq->flags & GCWQ_DISASSOCIATED))
-+ if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
+@@ -1287,8 +1285,14 @@ __acquires(&gcwq->lock)
+ return false;
+ if (task_cpu(task) == gcwq->cpu &&
+ cpumask_equal(¤t->cpus_allowed,
+- get_cpu_mask(gcwq->cpu)))
++ get_cpu_mask(gcwq->cpu))) {
+ /*
+ * Since we're binding to a particular cpu and need to
+ * stay there for correctness, mark us PF_THREAD_BOUND.
+ */
+ task->flags |= PF_THREAD_BOUND;
- set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
+ return true;
+ }
+ spin_unlock_irq(&gcwq->lock);
- spin_lock_irq(&gcwq->lock);
- if (gcwq->flags & GCWQ_DISASSOCIATED)
+ /*
@@ -1302,20 +1306,15 @@ __acquires(&gcwq->lock)
}
}
@@ -3578,7018 +3541,7129 @@
}
set_cpu_online(cpu, false);
-Index: linux-2.6/kernel/watchdog.c
+Index: linux-2.6/include/linux/jbd.h
===================================================================
---- linux-2.6.orig/kernel/watchdog.c
-+++ linux-2.6/kernel/watchdog.c
-@@ -208,6 +208,8 @@ static struct perf_event_attr wd_hw_attr
- .disabled = 1,
- };
+--- linux-2.6.orig/include/linux/jbd.h
++++ linux-2.6/include/linux/jbd.h
+@@ -244,6 +244,7 @@ typedef struct journal_superblock_s
-+static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
-+
- /* Callback function for perf event subsystem */
- static void watchdog_overflow_callback(struct perf_event *event, int nmi,
- struct perf_sample_data *data,
-@@ -234,10 +236,19 @@ static void watchdog_overflow_callback(s
- if (__this_cpu_read(hard_watchdog_warn) == true)
- return;
+ #include <linux/fs.h>
+ #include <linux/sched.h>
++#include <linux/jbd_common.h>
-- if (hardlockup_panic)
-+ /*
-+ * If early-printk is enabled then make sure we do not
-+ * lock up in printk() and kill console logging:
-+ */
-+ printk_kill();
-+
-+ if (hardlockup_panic) {
- panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
-- else
-+ } else {
-+ raw_spin_lock(&watchdog_output_lock);
- WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
-+ raw_spin_unlock(&watchdog_output_lock);
-+ }
+ #define J_ASSERT(assert) BUG_ON(!(assert))
- __this_cpu_write(hard_watchdog_warn, true);
- return;
-@@ -320,7 +331,7 @@ static enum hrtimer_restart watchdog_tim
- */
- static int watchdog(void *unused)
- {
-- static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
-+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+@@ -270,69 +271,6 @@ typedef struct journal_superblock_s
+ #define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
+ #endif
- sched_setscheduler(current, SCHED_FIFO, ¶m);
-@@ -349,7 +360,8 @@ static int watchdog(void *unused)
- set_current_state(TASK_INTERRUPTIBLE);
- }
- __set_current_state(TASK_RUNNING);
+-enum jbd_state_bits {
+- BH_JBD /* Has an attached ext3 journal_head */
+- = BH_PrivateStart,
+- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
+- BH_Freed, /* Has been freed (truncated) */
+- BH_Revoked, /* Has been revoked from the log */
+- BH_RevokeValid, /* Revoked flag is valid */
+- BH_JBDDirty, /* Is dirty but journaled */
+- BH_State, /* Pins most journal_head state */
+- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
+- BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
+-};
-
-+ param.sched_priority = 0;
-+ sched_setscheduler(current, SCHED_NORMAL, ¶m);
- return 0;
- }
-
-@@ -422,6 +434,7 @@ static void watchdog_prepare_cpu(int cpu
- WARN_ON(per_cpu(softlockup_watchdog, cpu));
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
-+ hrtimer->irqsafe = 1;
- }
+-BUFFER_FNS(JBD, jbd)
+-BUFFER_FNS(JWrite, jwrite)
+-BUFFER_FNS(JBDDirty, jbddirty)
+-TAS_BUFFER_FNS(JBDDirty, jbddirty)
+-BUFFER_FNS(Revoked, revoked)
+-TAS_BUFFER_FNS(Revoked, revoked)
+-BUFFER_FNS(RevokeValid, revokevalid)
+-TAS_BUFFER_FNS(RevokeValid, revokevalid)
+-BUFFER_FNS(Freed, freed)
+-
+-static inline struct buffer_head *jh2bh(struct journal_head *jh)
+-{
+- return jh->b_bh;
+-}
+-
+-static inline struct journal_head *bh2jh(struct buffer_head *bh)
+-{
+- return bh->b_private;
+-}
+-
+-static inline void jbd_lock_bh_state(struct buffer_head *bh)
+-{
+- bit_spin_lock(BH_State, &bh->b_state);
+-}
+-
+-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+-{
+- return bit_spin_trylock(BH_State, &bh->b_state);
+-}
+-
+-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+-{
+- return bit_spin_is_locked(BH_State, &bh->b_state);
+-}
+-
+-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+-{
+- bit_spin_unlock(BH_State, &bh->b_state);
+-}
+-
+-static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+-{
+- bit_spin_lock(BH_JournalHead, &bh->b_state);
+-}
+-
+-static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+-{
+- bit_spin_unlock(BH_JournalHead, &bh->b_state);
+-}
+-
+ struct jbd_revoke_table_s;
- static int watchdog_enable(int cpu)
-Index: linux-2.6/kernel/time/clocksource.c
+ /**
+Index: linux-2.6/include/linux/jbd2.h
===================================================================
---- linux-2.6.orig/kernel/time/clocksource.c
-+++ linux-2.6/kernel/time/clocksource.c
-@@ -186,6 +186,7 @@ static struct timer_list watchdog_timer;
- static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
- static DEFINE_SPINLOCK(watchdog_lock);
- static int watchdog_running;
-+static atomic_t watchdog_reset_pending;
-
- static int clocksource_watchdog_kthread(void *data);
- static void __clocksource_change_rating(struct clocksource *cs, int rating);
-@@ -247,12 +248,14 @@ static void clocksource_watchdog(unsigne
- struct clocksource *cs;
- cycle_t csnow, wdnow;
- int64_t wd_nsec, cs_nsec;
-- int next_cpu;
-+ int next_cpu, reset_pending;
-
- spin_lock(&watchdog_lock);
- if (!watchdog_running)
- goto out;
-
-+ reset_pending = atomic_read(&watchdog_reset_pending);
-+
- list_for_each_entry(cs, &watchdog_list, wd_list) {
-
- /* Clocksource already marked unstable? */
-@@ -268,7 +271,8 @@ static void clocksource_watchdog(unsigne
- local_irq_enable();
+--- linux-2.6.orig/include/linux/jbd2.h
++++ linux-2.6/include/linux/jbd2.h
+@@ -275,6 +275,7 @@ typedef struct journal_superblock_s
- /* Clocksource initialized ? */
-- if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
-+ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
-+ atomic_read(&watchdog_reset_pending)) {
- cs->flags |= CLOCK_SOURCE_WATCHDOG;
- cs->wd_last = wdnow;
- cs->cs_last = csnow;
-@@ -283,8 +287,11 @@ static void clocksource_watchdog(unsigne
- cs->cs_last = csnow;
- cs->wd_last = wdnow;
+ #include <linux/fs.h>
+ #include <linux/sched.h>
++#include <linux/jbd_common.h>
-+ if (atomic_read(&watchdog_reset_pending))
-+ continue;
-+
- /* Check the deviation from the watchdog clocksource. */
-- if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
-+ if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
- clocksource_unstable(cs, cs_nsec - wd_nsec);
- continue;
- }
-@@ -303,6 +310,13 @@ static void clocksource_watchdog(unsigne
- }
+ #define J_ASSERT(assert) BUG_ON(!(assert))
- /*
-+ * We only clear the watchdog_reset_pending, when we did a
-+ * full cycle through all clocksources.
-+ */
-+ if (reset_pending)
-+ atomic_dec(&watchdog_reset_pending);
-+
-+ /*
- * Cycle through CPUs to check if the CPUs stay synchronized
- * to each other.
- */
-@@ -344,23 +358,7 @@ static inline void clocksource_reset_wat
+@@ -302,70 +303,6 @@ typedef struct journal_superblock_s
+ #define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
+ #endif
- static void clocksource_resume_watchdog(void)
- {
-- unsigned long flags;
+-enum jbd_state_bits {
+- BH_JBD /* Has an attached ext3 journal_head */
+- = BH_PrivateStart,
+- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
+- BH_Freed, /* Has been freed (truncated) */
+- BH_Revoked, /* Has been revoked from the log */
+- BH_RevokeValid, /* Revoked flag is valid */
+- BH_JBDDirty, /* Is dirty but journaled */
+- BH_State, /* Pins most journal_head state */
+- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
+- BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
+- BH_JBDPrivateStart, /* First bit available for private use by FS */
+-};
-
-- /*
-- * We use trylock here to avoid a potential dead lock when
-- * kgdb calls this code after the kernel has been stopped with
-- * watchdog_lock held. When watchdog_lock is held we just
-- * return and accept, that the watchdog might trigger and mark
-- * the monitored clock source (usually TSC) unstable.
-- *
-- * This does not affect the other caller clocksource_resume()
-- * because at this point the kernel is UP, interrupts are
-- * disabled and nothing can hold watchdog_lock.
-- */
-- if (!spin_trylock_irqsave(&watchdog_lock, flags))
-- return;
-- clocksource_reset_watchdog();
-- spin_unlock_irqrestore(&watchdog_lock, flags);
-+ atomic_inc(&watchdog_reset_pending);
- }
-
- static void clocksource_enqueue_watchdog(struct clocksource *cs)
-Index: linux-2.6/kernel/rtmutex-debug.c
-===================================================================
---- linux-2.6.orig/kernel/rtmutex-debug.c
-+++ linux-2.6/kernel/rtmutex-debug.c
-@@ -29,61 +29,6 @@
-
- #include "rtmutex_common.h"
-
--# define TRACE_WARN_ON(x) WARN_ON(x)
--# define TRACE_BUG_ON(x) BUG_ON(x)
+-BUFFER_FNS(JBD, jbd)
+-BUFFER_FNS(JWrite, jwrite)
+-BUFFER_FNS(JBDDirty, jbddirty)
+-TAS_BUFFER_FNS(JBDDirty, jbddirty)
+-BUFFER_FNS(Revoked, revoked)
+-TAS_BUFFER_FNS(Revoked, revoked)
+-BUFFER_FNS(RevokeValid, revokevalid)
+-TAS_BUFFER_FNS(RevokeValid, revokevalid)
+-BUFFER_FNS(Freed, freed)
-
--# define TRACE_OFF() \
--do { \
-- if (rt_trace_on) { \
-- rt_trace_on = 0; \
-- console_verbose(); \
-- if (raw_spin_is_locked(¤t->pi_lock)) \
-- raw_spin_unlock(¤t->pi_lock); \
-- } \
--} while (0)
+-static inline struct buffer_head *jh2bh(struct journal_head *jh)
+-{
+- return jh->b_bh;
+-}
-
--# define TRACE_OFF_NOLOCK() \
--do { \
-- if (rt_trace_on) { \
-- rt_trace_on = 0; \
-- console_verbose(); \
-- } \
--} while (0)
+-static inline struct journal_head *bh2jh(struct buffer_head *bh)
+-{
+- return bh->b_private;
+-}
-
--# define TRACE_BUG_LOCKED() \
--do { \
-- TRACE_OFF(); \
-- BUG(); \
--} while (0)
+-static inline void jbd_lock_bh_state(struct buffer_head *bh)
+-{
+- bit_spin_lock(BH_State, &bh->b_state);
+-}
-
--# define TRACE_WARN_ON_LOCKED(c) \
--do { \
-- if (unlikely(c)) { \
-- TRACE_OFF(); \
-- WARN_ON(1); \
-- } \
--} while (0)
+-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+-{
+- return bit_spin_trylock(BH_State, &bh->b_state);
+-}
-
--# define TRACE_BUG_ON_LOCKED(c) \
--do { \
-- if (unlikely(c)) \
-- TRACE_BUG_LOCKED(); \
--} while (0)
+-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+-{
+- return bit_spin_is_locked(BH_State, &bh->b_state);
+-}
-
--#ifdef CONFIG_SMP
--# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
--#else
--# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
--#endif
+-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+-{
+- bit_spin_unlock(BH_State, &bh->b_state);
+-}
-
--/*
-- * deadlock detection flag. We turn it off when we detect
-- * the first problem because we dont want to recurse back
-- * into the tracing code when doing error printk or
-- * executing a BUG():
-- */
--static int rt_trace_on = 1;
+-static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+-{
+- bit_spin_lock(BH_JournalHead, &bh->b_state);
+-}
-
- static void printk_task(struct task_struct *p)
+-static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+-{
+- bit_spin_unlock(BH_JournalHead, &bh->b_state);
+-}
+-
+ /* Flags in jbd_inode->i_flags */
+ #define __JI_COMMIT_RUNNING 0
+ /* Commit of the inode data in progress. We use this flag to protect us from
+Index: linux-2.6/include/linux/jbd_common.h
+===================================================================
+--- /dev/null
++++ linux-2.6/include/linux/jbd_common.h
+@@ -0,0 +1,92 @@
++#ifndef _LINUX_JBD_STATE_H
++#define _LINUX_JBD_STATE_H
++
++enum jbd_state_bits {
++ BH_JBD /* Has an attached ext3 journal_head */
++ = BH_PrivateStart,
++ BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
++ BH_Freed, /* Has been freed (truncated) */
++ BH_Revoked, /* Has been revoked from the log */
++ BH_RevokeValid, /* Revoked flag is valid */
++ BH_JBDDirty, /* Is dirty but journaled */
++ BH_State, /* Pins most journal_head state */
++ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
++ BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
++ BH_JBDPrivateStart, /* First bit available for private use by FS */
++};
++
++BUFFER_FNS(JBD, jbd)
++BUFFER_FNS(JWrite, jwrite)
++BUFFER_FNS(JBDDirty, jbddirty)
++TAS_BUFFER_FNS(JBDDirty, jbddirty)
++BUFFER_FNS(Revoked, revoked)
++TAS_BUFFER_FNS(Revoked, revoked)
++BUFFER_FNS(RevokeValid, revokevalid)
++TAS_BUFFER_FNS(RevokeValid, revokevalid)
++BUFFER_FNS(Freed, freed)
++
++static inline struct buffer_head *jh2bh(struct journal_head *jh)
++{
++ return jh->b_bh;
++}
++
++static inline struct journal_head *bh2jh(struct buffer_head *bh)
++{
++ return bh->b_private;
++}
++
++static inline void jbd_lock_bh_state(struct buffer_head *bh)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++ bit_spin_lock(BH_State, &bh->b_state);
++#else
++ spin_lock(&bh->b_state_lock);
++#endif
++}
++
++static inline int jbd_trylock_bh_state(struct buffer_head *bh)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++ return bit_spin_trylock(BH_State, &bh->b_state);
++#else
++ return spin_trylock(&bh->b_state_lock);
++#endif
++}
++
++static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++ return bit_spin_is_locked(BH_State, &bh->b_state);
++#else
++ return spin_is_locked(&bh->b_state_lock);
++#endif
++}
++
++static inline void jbd_unlock_bh_state(struct buffer_head *bh)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++ bit_spin_unlock(BH_State, &bh->b_state);
++#else
++ spin_unlock(&bh->b_state_lock);
++#endif
++}
++
++static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++ bit_spin_lock(BH_JournalHead, &bh->b_state);
++#else
++ spin_lock(&bh->b_journal_head_lock);
++#endif
++}
++
++static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++ bit_spin_unlock(BH_JournalHead, &bh->b_state);
++#else
++ spin_unlock(&bh->b_journal_head_lock);
++#endif
++}
++
++#endif
+Index: linux-2.6/kernel/sched_rt.c
+===================================================================
+--- linux-2.6.orig/kernel/sched_rt.c
++++ linux-2.6/kernel/sched_rt.c
+@@ -536,6 +536,9 @@ static int balance_runtime(struct rt_rq
{
- if (p)
-@@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex
+ int more = 0;
- void rt_mutex_debug_task_free(struct task_struct *task)
- {
-- WARN_ON(!plist_head_empty(&task->pi_waiters));
-- WARN_ON(task->pi_blocked_on);
-+ DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters));
-+ DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
- }
++ if (!sched_feat(RT_RUNTIME_SHARE))
++ return more;
++
+ if (rt_rq->rt_time > rt_rq->rt_runtime) {
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ more = do_balance_runtime(rt_rq);
+@@ -631,6 +634,7 @@ static int sched_rt_runtime_exceeded(str
- /*
-@@ -125,7 +70,7 @@ void debug_rt_mutex_deadlock(int detect,
+ if (rt_rq->rt_time > runtime) {
+ rt_rq->rt_throttled = 1;
++ printk_once(KERN_WARNING "sched: RT throttling activated\n");
+ if (rt_rq_throttled(rt_rq)) {
+ sched_rt_rq_dequeue(rt_rq);
+ return 1;
+@@ -1038,7 +1042,7 @@ select_task_rq_rt(struct task_struct *p,
+ */
+ if (curr && unlikely(rt_task(curr)) &&
+ (curr->rt.nr_cpus_allowed < 2 ||
+- curr->prio < p->prio) &&
++ curr->prio <= p->prio) &&
+ (p->rt.nr_cpus_allowed > 1)) {
+ int target = find_lowest_rq(p);
+
+@@ -1186,7 +1190,7 @@ static void deactivate_task(struct rq *r
+ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
- struct task_struct *task;
+ if (!task_running(rq, p) &&
+- (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
++ (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
+ (p->rt.nr_cpus_allowed > 1))
+ return 1;
+ return 0;
+@@ -1331,7 +1335,7 @@ static struct rq *find_lock_lowest_rq(st
+ */
+ if (unlikely(task_rq(task) != rq ||
+ !cpumask_test_cpu(lowest_rq->cpu,
+- &task->cpus_allowed) ||
++ tsk_cpus_allowed(task)) ||
+ task_running(rq, task) ||
+ !task->on_rq)) {
-- if (!rt_trace_on || detect || !act_waiter)
-+ if (!debug_locks || detect || !act_waiter)
- return;
+@@ -1569,7 +1573,7 @@ static void task_woken_rt(struct rq *rq,
+ p->rt.nr_cpus_allowed > 1 &&
+ rt_task(rq->curr) &&
+ (rq->curr->rt.nr_cpus_allowed < 2 ||
+- rq->curr->prio < p->prio))
++ rq->curr->prio <= p->prio))
+ push_rt_tasks(rq);
+ }
- task = rt_mutex_owner(act_waiter->lock);
-@@ -139,7 +84,7 @@ void debug_rt_mutex_print_deadlock(struc
- {
- struct task_struct *task;
+@@ -1614,9 +1618,6 @@ static void set_cpus_allowed_rt(struct t
-- if (!waiter->deadlock_lock || !rt_trace_on)
-+ if (!waiter->deadlock_lock || !debug_locks)
- return;
-
- rcu_read_lock();
-@@ -149,7 +94,10 @@ void debug_rt_mutex_print_deadlock(struc
- return;
+ update_rt_migration(&rq->rt);
}
+-
+- cpumask_copy(&p->cpus_allowed, new_mask);
+- p->rt.nr_cpus_allowed = weight;
+ }
-- TRACE_OFF_NOLOCK();
-+ if (!debug_locks_off()) {
-+ rcu_read_unlock();
-+ return;
-+ }
+ /* Assumes rq->lock is held */
+Index: linux-2.6/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/platforms/85xx/mpc85xx_cds.c
++++ linux-2.6/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+@@ -178,7 +178,7 @@ static irqreturn_t mpc85xx_8259_cascade_
- printk("\n============================================\n");
- printk( "[ BUG: circular locking deadlock detected! ]\n");
-@@ -180,7 +128,6 @@ void debug_rt_mutex_print_deadlock(struc
+ static struct irqaction mpc85xxcds_8259_irqaction = {
+ .handler = mpc85xx_8259_cascade_action,
+- .flags = IRQF_SHARED,
++ .flags = IRQF_SHARED | IRQF_NO_THREAD,
+ .name = "8259 cascade",
+ };
+ #endif /* PPC_I8259 */
+Index: linux-2.6/arch/powerpc/platforms/wsp/opb_pic.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/platforms/wsp/opb_pic.c
++++ linux-2.6/arch/powerpc/platforms/wsp/opb_pic.c
+@@ -320,7 +320,8 @@ void __init opb_pic_init(void)
+ }
- printk("[ turning off deadlock detection."
- "Please report this trace. ]\n\n");
-- local_irq_disable();
- }
+ /* Attach opb interrupt handler to new virtual IRQ */
+- rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb);
++ rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD,
++ "OPB LS Cascade", opb);
+ if (rc) {
+ printk("opb: request_irq failed: %d\n", rc);
+ continue;
+Index: linux-2.6/arch/powerpc/kernel/smp.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/kernel/smp.c
++++ linux-2.6/arch/powerpc/kernel/smp.c
+@@ -170,7 +170,7 @@ int smp_request_message_ipi(int virq, in
+ return 1;
+ }
+ #endif
+- err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
++ err = request_irq(virq, smp_ipi_action[msg], IRQF_NO_THREAD|IRQF_PERCPU,
+ smp_ipi_name[msg], 0);
+ WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
+ virq, smp_ipi_name[msg], err);
+Index: linux-2.6/arch/powerpc/platforms/powermac/smp.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/platforms/powermac/smp.c
++++ linux-2.6/arch/powerpc/platforms/powermac/smp.c
+@@ -200,7 +200,7 @@ static int psurge_secondary_ipi_init(voi
+
+ if (psurge_secondary_virq)
+ rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
+- IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
++ IRQF_NO_THREAD|IRQF_PERCPU, "IPI", NULL);
+
+ if (rc)
+ pr_err("Failed to setup secondary cpu IPI\n");
+@@ -408,7 +408,7 @@ static int __init smp_psurge_kick_cpu(in
+
+ static struct irqaction psurge_irqaction = {
+ .handler = psurge_ipi_intr,
+- .flags = IRQF_DISABLED|IRQF_PERCPU,
++ .flags = IRQF_NO_THREAD|IRQF_PERCPU,
+ .name = "primary IPI",
+ };
- void debug_rt_mutex_lock(struct rt_mutex *lock)
-@@ -189,7 +136,7 @@ void debug_rt_mutex_lock(struct rt_mutex
+Index: linux-2.6/arch/powerpc/sysdev/xics/xics-common.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/sysdev/xics/xics-common.c
++++ linux-2.6/arch/powerpc/sysdev/xics/xics-common.c
+@@ -134,11 +134,11 @@ static void xics_request_ipi(void)
+ BUG_ON(ipi == NO_IRQ);
- void debug_rt_mutex_unlock(struct rt_mutex *lock)
- {
-- TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
-+ DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
+ /*
+- * IPIs are marked IRQF_DISABLED as they must run with irqs
+- * disabled, and PERCPU. The handler was set in map.
++ * IPIs are marked PERCPU and also IRQF_NO_THREAD as they must
++ * run in hard interrupt context. The handler was set in map.
+ */
+ BUG_ON(request_irq(ipi, icp_ops->ipi_action,
+- IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL));
++ IRQF_NO_THREAD|IRQF_PERCPU, "IPI", NULL));
}
- void
-@@ -199,7 +146,7 @@ debug_rt_mutex_proxy_lock(struct rt_mute
+ int __init xics_smp_probe(void)
+Index: linux-2.6/arch/powerpc/Kconfig
+===================================================================
+--- linux-2.6.orig/arch/powerpc/Kconfig
++++ linux-2.6/arch/powerpc/Kconfig
+@@ -69,10 +69,11 @@ config LOCKDEP_SUPPORT
- void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
- {
-- TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock));
-+ DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
- }
+ config RWSEM_GENERIC_SPINLOCK
+ bool
++ default y if PREEMPT_RT_FULL
- void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
-@@ -213,8 +160,8 @@ void debug_rt_mutex_init_waiter(struct r
- void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
- {
- put_pid(waiter->deadlock_task_pid);
-- TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
-- TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
-+ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
-+ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
- memset(waiter, 0x22, sizeof(*waiter));
- }
+ config RWSEM_XCHGADD_ALGORITHM
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
-Index: linux-2.6/include/linux/kprobes.h
-===================================================================
---- linux-2.6.orig/include/linux/kprobes.h
-+++ linux-2.6/include/linux/kprobes.h
-@@ -181,7 +181,7 @@ struct kretprobe {
- int nmissed;
- size_t data_size;
- struct hlist_head free_instances;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- };
+ config GENERIC_LOCKBREAK
+ bool
+@@ -134,6 +135,7 @@ config PPC
+ select GENERIC_IRQ_SHOW_LEVEL
+ select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_SYSCALL_TRACEPOINTS
++ select IRQ_FORCED_THREADING
- struct kretprobe_instance {
-Index: linux-2.6/kernel/kprobes.c
+ config EARLY_PRINTK
+ bool
+@@ -271,7 +273,7 @@ menu "Kernel options"
+
+ config HIGHMEM
+ bool "High memory support"
+- depends on PPC32
++ depends on PPC32 && !PREEMPT_RT_FULL
+
+ source kernel/time/Kconfig
+ source kernel/Kconfig.hz
+Index: linux-2.6/net/netfilter/ipvs/ip_vs_ctl.c
===================================================================
---- linux-2.6.orig/kernel/kprobes.c
-+++ linux-2.6/kernel/kprobes.c
-@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
- static DEFINE_MUTEX(kprobe_mutex);
- static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
- static struct {
-- spinlock_t lock ____cacheline_aligned_in_smp;
-+ raw_spinlock_t lock ____cacheline_aligned_in_smp;
- } kretprobe_table_locks[KPROBE_TABLE_SIZE];
+--- linux-2.6.orig/net/netfilter/ipvs/ip_vs_ctl.c
++++ linux-2.6/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -3679,7 +3679,7 @@ int __net_init __ip_vs_control_init(stru
+ int idx;
+ struct netns_ipvs *ipvs = net_ipvs(net);
--static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
-+static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
- {
- return &(kretprobe_table_locks[hash].lock);
- }
-@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kr
- hlist_del(&ri->hlist);
- INIT_HLIST_NODE(&ri->hlist);
- if (likely(rp)) {
-- spin_lock(&rp->lock);
-+ raw_spin_lock(&rp->lock);
- hlist_add_head(&ri->hlist, &rp->free_instances);
-- spin_unlock(&rp->lock);
-+ raw_spin_unlock(&rp->lock);
- } else
- /* Unregistering */
- hlist_add_head(&ri->hlist, head);
-@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struc
- __acquires(hlist_lock)
- {
- unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-- spinlock_t *hlist_lock;
-+ raw_spinlock_t *hlist_lock;
+- ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
++ rwlock_init(&ipvs->rs_lock);
- *head = &kretprobe_inst_table[hash];
- hlist_lock = kretprobe_table_lock_ptr(hash);
-- spin_lock_irqsave(hlist_lock, *flags);
-+ raw_spin_lock_irqsave(hlist_lock, *flags);
- }
+ /* Initialize rs_table */
+ for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
+Index: linux-2.6/kernel/watchdog.c
+===================================================================
+--- linux-2.6.orig/kernel/watchdog.c
++++ linux-2.6/kernel/watchdog.c
+@@ -208,6 +208,8 @@ static struct perf_event_attr wd_hw_attr
+ .disabled = 1,
+ };
- static void __kprobes kretprobe_table_lock(unsigned long hash,
- unsigned long *flags)
- __acquires(hlist_lock)
- {
-- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-- spin_lock_irqsave(hlist_lock, *flags);
-+ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-+ raw_spin_lock_irqsave(hlist_lock, *flags);
- }
++static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
++
+ /* Callback function for perf event subsystem */
+ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
+ struct perf_sample_data *data,
+@@ -234,10 +236,19 @@ static void watchdog_overflow_callback(s
+ if (__this_cpu_read(hard_watchdog_warn) == true)
+ return;
- void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
-@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(str
- __releases(hlist_lock)
+- if (hardlockup_panic)
++ /*
++ * If early-printk is enabled then make sure we do not
++ * lock up in printk() and kill console logging:
++ */
++ printk_kill();
++
++ if (hardlockup_panic) {
+ panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+- else
++ } else {
++ raw_spin_lock(&watchdog_output_lock);
+ WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
++ raw_spin_unlock(&watchdog_output_lock);
++ }
+
+ __this_cpu_write(hard_watchdog_warn, true);
+ return;
+@@ -320,7 +331,7 @@ static enum hrtimer_restart watchdog_tim
+ */
+ static int watchdog(void *unused)
{
- unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-- spinlock_t *hlist_lock;
-+ raw_spinlock_t *hlist_lock;
+- static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
++ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
- hlist_lock = kretprobe_table_lock_ptr(hash);
-- spin_unlock_irqrestore(hlist_lock, *flags);
-+ raw_spin_unlock_irqrestore(hlist_lock, *flags);
+ sched_setscheduler(current, SCHED_FIFO, ¶m);
+@@ -349,7 +360,8 @@ static int watchdog(void *unused)
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
+-
++ param.sched_priority = 0;
++ sched_setscheduler(current, SCHED_NORMAL, ¶m);
+ return 0;
}
- static void __kprobes kretprobe_table_unlock(unsigned long hash,
- unsigned long *flags)
- __releases(hlist_lock)
- {
-- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-- spin_unlock_irqrestore(hlist_lock, *flags);
-+ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-+ raw_spin_unlock_irqrestore(hlist_lock, *flags);
+@@ -422,6 +434,7 @@ static void watchdog_prepare_cpu(int cpu
+ WARN_ON(per_cpu(softlockup_watchdog, cpu));
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
++ hrtimer->irqsafe = 1;
}
- /*
-@@ -1650,12 +1650,12 @@ static int __kprobes pre_handler_kretpro
+ static int watchdog_enable(int cpu)
+Index: linux-2.6/kernel/time/clocksource.c
+===================================================================
+--- linux-2.6.orig/kernel/time/clocksource.c
++++ linux-2.6/kernel/time/clocksource.c
+@@ -186,6 +186,7 @@ static struct timer_list watchdog_timer;
+ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
+ static DEFINE_SPINLOCK(watchdog_lock);
+ static int watchdog_running;
++static atomic_t watchdog_reset_pending;
- /*TODO: consider to only swap the RA after the last pre_handler fired */
- hash = hash_ptr(current, KPROBE_HASH_BITS);
-- spin_lock_irqsave(&rp->lock, flags);
-+ raw_spin_lock_irqsave(&rp->lock, flags);
- if (!hlist_empty(&rp->free_instances)) {
- ri = hlist_entry(rp->free_instances.first,
- struct kretprobe_instance, hlist);
- hlist_del(&ri->hlist);
-- spin_unlock_irqrestore(&rp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rp->lock, flags);
+ static int clocksource_watchdog_kthread(void *data);
+ static void __clocksource_change_rating(struct clocksource *cs, int rating);
+@@ -247,12 +248,14 @@ static void clocksource_watchdog(unsigne
+ struct clocksource *cs;
+ cycle_t csnow, wdnow;
+ int64_t wd_nsec, cs_nsec;
+- int next_cpu;
++ int next_cpu, reset_pending;
- ri->rp = rp;
- ri->task = current;
-@@ -1672,7 +1672,7 @@ static int __kprobes pre_handler_kretpro
- kretprobe_table_unlock(hash, &flags);
- } else {
- rp->nmissed++;
-- spin_unlock_irqrestore(&rp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rp->lock, flags);
- }
- return 0;
- }
-@@ -1708,7 +1708,7 @@ int __kprobes register_kretprobe(struct
- rp->maxactive = num_possible_cpus();
- #endif
- }
-- spin_lock_init(&rp->lock);
-+ raw_spin_lock_init(&rp->lock);
- INIT_HLIST_HEAD(&rp->free_instances);
- for (i = 0; i < rp->maxactive; i++) {
- inst = kmalloc(sizeof(struct kretprobe_instance) +
-@@ -1946,7 +1946,7 @@ static int __init init_kprobes(void)
- for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
- INIT_HLIST_HEAD(&kprobe_table[i]);
- INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
-- spin_lock_init(&(kretprobe_table_locks[i].lock));
-+ raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
+ spin_lock(&watchdog_lock);
+ if (!watchdog_running)
+ goto out;
+
++ reset_pending = atomic_read(&watchdog_reset_pending);
++
+ list_for_each_entry(cs, &watchdog_list, wd_list) {
+
+ /* Clocksource already marked unstable? */
+@@ -268,7 +271,8 @@ static void clocksource_watchdog(unsigne
+ local_irq_enable();
+
+ /* Clocksource initialized ? */
+- if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
++ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
++ atomic_read(&watchdog_reset_pending)) {
+ cs->flags |= CLOCK_SOURCE_WATCHDOG;
+ cs->wd_last = wdnow;
+ cs->cs_last = csnow;
+@@ -283,8 +287,11 @@ static void clocksource_watchdog(unsigne
+ cs->cs_last = csnow;
+ cs->wd_last = wdnow;
+
++ if (atomic_read(&watchdog_reset_pending))
++ continue;
++
+ /* Check the deviation from the watchdog clocksource. */
+- if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
++ if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
+ clocksource_unstable(cs, cs_nsec - wd_nsec);
+ continue;
+ }
+@@ -303,6 +310,13 @@ static void clocksource_watchdog(unsigne
}
/*
-Index: linux-2.6/include/linux/percpu_counter.h
-===================================================================
---- linux-2.6.orig/include/linux/percpu_counter.h
-+++ linux-2.6/include/linux/percpu_counter.h
-@@ -16,7 +16,7 @@
- #ifdef CONFIG_SMP
++ * We only clear the watchdog_reset_pending, when we did a
++ * full cycle through all clocksources.
++ */
++ if (reset_pending)
++ atomic_dec(&watchdog_reset_pending);
++
++ /*
+ * Cycle through CPUs to check if the CPUs stay synchronized
+ * to each other.
+ */
+@@ -344,23 +358,7 @@ static inline void clocksource_reset_wat
- struct percpu_counter {
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- s64 count;
- #ifdef CONFIG_HOTPLUG_CPU
- struct list_head list; /* All percpu_counters are on a list */
-Index: linux-2.6/lib/percpu_counter.c
+ static void clocksource_resume_watchdog(void)
+ {
+- unsigned long flags;
+-
+- /*
+- * We use trylock here to avoid a potential dead lock when
+- * kgdb calls this code after the kernel has been stopped with
+- * watchdog_lock held. When watchdog_lock is held we just
+- * return and accept, that the watchdog might trigger and mark
+- * the monitored clock source (usually TSC) unstable.
+- *
+- * This does not affect the other caller clocksource_resume()
+- * because at this point the kernel is UP, interrupts are
+- * disabled and nothing can hold watchdog_lock.
+- */
+- if (!spin_trylock_irqsave(&watchdog_lock, flags))
+- return;
+- clocksource_reset_watchdog();
+- spin_unlock_irqrestore(&watchdog_lock, flags);
++ atomic_inc(&watchdog_reset_pending);
+ }
+
+ static void clocksource_enqueue_watchdog(struct clocksource *cs)
+Index: linux-2.6/kernel/rtmutex-debug.c
===================================================================
---- linux-2.6.orig/lib/percpu_counter.c
-+++ linux-2.6/lib/percpu_counter.c
-@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_co
+--- linux-2.6.orig/kernel/rtmutex-debug.c
++++ linux-2.6/kernel/rtmutex-debug.c
+@@ -29,61 +29,6 @@
+
+ #include "rtmutex_common.h"
+
+-# define TRACE_WARN_ON(x) WARN_ON(x)
+-# define TRACE_BUG_ON(x) BUG_ON(x)
+-
+-# define TRACE_OFF() \
+-do { \
+- if (rt_trace_on) { \
+- rt_trace_on = 0; \
+- console_verbose(); \
+- if (raw_spin_is_locked(¤t->pi_lock)) \
+- raw_spin_unlock(¤t->pi_lock); \
+- } \
+-} while (0)
+-
+-# define TRACE_OFF_NOLOCK() \
+-do { \
+- if (rt_trace_on) { \
+- rt_trace_on = 0; \
+- console_verbose(); \
+- } \
+-} while (0)
+-
+-# define TRACE_BUG_LOCKED() \
+-do { \
+- TRACE_OFF(); \
+- BUG(); \
+-} while (0)
+-
+-# define TRACE_WARN_ON_LOCKED(c) \
+-do { \
+- if (unlikely(c)) { \
+- TRACE_OFF(); \
+- WARN_ON(1); \
+- } \
+-} while (0)
+-
+-# define TRACE_BUG_ON_LOCKED(c) \
+-do { \
+- if (unlikely(c)) \
+- TRACE_BUG_LOCKED(); \
+-} while (0)
+-
+-#ifdef CONFIG_SMP
+-# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
+-#else
+-# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
+-#endif
+-
+-/*
+- * deadlock detection flag. We turn it off when we detect
+- * the first problem because we dont want to recurse back
+- * into the tracing code when doing error printk or
+- * executing a BUG():
+- */
+-static int rt_trace_on = 1;
+-
+ static void printk_task(struct task_struct *p)
{
- int cpu;
+ if (p)
+@@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex
-- spin_lock(&fbc->lock);
-+ raw_spin_lock(&fbc->lock);
- for_each_possible_cpu(cpu) {
- s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
- *pcount = 0;
- }
- fbc->count = amount;
-- spin_unlock(&fbc->lock);
-+ raw_spin_unlock(&fbc->lock);
+ void rt_mutex_debug_task_free(struct task_struct *task)
+ {
+- WARN_ON(!plist_head_empty(&task->pi_waiters));
+- WARN_ON(task->pi_blocked_on);
++ DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters));
++ DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
}
- EXPORT_SYMBOL(percpu_counter_set);
-@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_
- preempt_disable();
- count = __this_cpu_read(*fbc->counters) + amount;
- if (count >= batch || count <= -batch) {
-- spin_lock(&fbc->lock);
-+ raw_spin_lock(&fbc->lock);
- fbc->count += count;
- __this_cpu_write(*fbc->counters, 0);
-- spin_unlock(&fbc->lock);
-+ raw_spin_unlock(&fbc->lock);
- } else {
- __this_cpu_write(*fbc->counters, count);
- }
-@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_c
- s64 ret;
- int cpu;
+ /*
+@@ -125,7 +70,7 @@ void debug_rt_mutex_deadlock(int detect,
+ {
+ struct task_struct *task;
-- spin_lock(&fbc->lock);
-+ raw_spin_lock(&fbc->lock);
- ret = fbc->count;
- for_each_online_cpu(cpu) {
- s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
- ret += *pcount;
- }
-- spin_unlock(&fbc->lock);
-+ raw_spin_unlock(&fbc->lock);
- return ret;
- }
- EXPORT_SYMBOL(__percpu_counter_sum);
-@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
- int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
- struct lock_class_key *key)
+- if (!rt_trace_on || detect || !act_waiter)
++ if (!debug_locks || detect || !act_waiter)
+ return;
+
+ task = rt_mutex_owner(act_waiter->lock);
+@@ -139,7 +84,7 @@ void debug_rt_mutex_print_deadlock(struc
{
-- spin_lock_init(&fbc->lock);
-+ raw_spin_lock_init(&fbc->lock);
- lockdep_set_class(&fbc->lock, key);
- fbc->count = amount;
- fbc->counters = alloc_percpu(s32);
-@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotc
- s32 *pcount;
- unsigned long flags;
+ struct task_struct *task;
-- spin_lock_irqsave(&fbc->lock, flags);
-+ raw_spin_lock_irqsave(&fbc->lock, flags);
- pcount = per_cpu_ptr(fbc->counters, cpu);
- fbc->count += *pcount;
- *pcount = 0;
-- spin_unlock_irqrestore(&fbc->lock, flags);
-+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
- }
- mutex_unlock(&percpu_counters_lock);
- #endif
-Index: linux-2.6/kernel/cgroup.c
-===================================================================
---- linux-2.6.orig/kernel/cgroup.c
-+++ linux-2.6/kernel/cgroup.c
-@@ -263,7 +263,7 @@ list_for_each_entry(_root, &roots, root_
- /* the list of cgroups eligible for automatic release. Protected by
- * release_list_lock */
- static LIST_HEAD(release_list);
--static DEFINE_SPINLOCK(release_list_lock);
-+static DEFINE_RAW_SPINLOCK(release_list_lock);
- static void cgroup_release_agent(struct work_struct *work);
- static DECLARE_WORK(release_agent_work, cgroup_release_agent);
- static void check_for_release(struct cgroup *cgrp);
-@@ -4010,11 +4010,11 @@ again:
- finish_wait(&cgroup_rmdir_waitq, &wait);
- clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
-
-- spin_lock(&release_list_lock);
-+ raw_spin_lock(&release_list_lock);
- set_bit(CGRP_REMOVED, &cgrp->flags);
- if (!list_empty(&cgrp->release_list))
- list_del_init(&cgrp->release_list);
-- spin_unlock(&release_list_lock);
-+ raw_spin_unlock(&release_list_lock);
+- if (!waiter->deadlock_lock || !rt_trace_on)
++ if (!waiter->deadlock_lock || !debug_locks)
+ return;
- cgroup_lock_hierarchy(cgrp->root);
- /* delete this cgroup from parent->children */
-@@ -4667,13 +4667,13 @@ static void check_for_release(struct cgr
- * already queued for a userspace notification, queue
- * it now */
- int need_schedule_work = 0;
-- spin_lock(&release_list_lock);
-+ raw_spin_lock(&release_list_lock);
- if (!cgroup_is_removed(cgrp) &&
- list_empty(&cgrp->release_list)) {
- list_add(&cgrp->release_list, &release_list);
- need_schedule_work = 1;
- }
-- spin_unlock(&release_list_lock);
-+ raw_spin_unlock(&release_list_lock);
- if (need_schedule_work)
- schedule_work(&release_agent_work);
- }
-@@ -4725,7 +4725,7 @@ static void cgroup_release_agent(struct
- {
- BUG_ON(work != &release_agent_work);
- mutex_lock(&cgroup_mutex);
-- spin_lock(&release_list_lock);
-+ raw_spin_lock(&release_list_lock);
- while (!list_empty(&release_list)) {
- char *argv[3], *envp[3];
- int i;
-@@ -4734,7 +4734,7 @@ static void cgroup_release_agent(struct
- struct cgroup,
- release_list);
- list_del_init(&cgrp->release_list);
-- spin_unlock(&release_list_lock);
-+ raw_spin_unlock(&release_list_lock);
- pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!pathbuf)
- goto continue_free;
-@@ -4764,9 +4764,9 @@ static void cgroup_release_agent(struct
- continue_free:
- kfree(pathbuf);
- kfree(agentbuf);
-- spin_lock(&release_list_lock);
-+ raw_spin_lock(&release_list_lock);
+ rcu_read_lock();
+@@ -149,7 +94,10 @@ void debug_rt_mutex_print_deadlock(struc
+ return;
}
-- spin_unlock(&release_list_lock);
-+ raw_spin_unlock(&release_list_lock);
- mutex_unlock(&cgroup_mutex);
- }
-Index: linux-2.6/include/linux/proportions.h
-===================================================================
---- linux-2.6.orig/include/linux/proportions.h
-+++ linux-2.6/include/linux/proportions.h
-@@ -58,7 +58,7 @@ struct prop_local_percpu {
- */
- int shift;
- unsigned long period;
-- spinlock_t lock; /* protect the snapshot state */
-+ raw_spinlock_t lock; /* protect the snapshot state */
- };
+- TRACE_OFF_NOLOCK();
++ if (!debug_locks_off()) {
++ rcu_read_unlock();
++ return;
++ }
- int prop_local_init_percpu(struct prop_local_percpu *pl);
-@@ -106,11 +106,11 @@ struct prop_local_single {
- */
- unsigned long period;
- int shift;
-- spinlock_t lock; /* protect the snapshot state */
-+ raw_spinlock_t lock; /* protect the snapshot state */
- };
+ printk("\n============================================\n");
+ printk( "[ BUG: circular locking deadlock detected! ]\n");
+@@ -180,7 +128,6 @@ void debug_rt_mutex_print_deadlock(struc
- #define INIT_PROP_LOCAL_SINGLE(name) \
--{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
-+{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ printk("[ turning off deadlock detection."
+ "Please report this trace. ]\n\n");
+- local_irq_disable();
}
- int prop_local_init_single(struct prop_local_single *pl);
-Index: linux-2.6/lib/proportions.c
-===================================================================
---- linux-2.6.orig/lib/proportions.c
-+++ linux-2.6/lib/proportions.c
-@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigne
+ void debug_rt_mutex_lock(struct rt_mutex *lock)
+@@ -189,7 +136,7 @@ void debug_rt_mutex_lock(struct rt_mutex
- int prop_local_init_percpu(struct prop_local_percpu *pl)
+ void debug_rt_mutex_unlock(struct rt_mutex *lock)
{
-- spin_lock_init(&pl->lock);
-+ raw_spin_lock_init(&pl->lock);
- pl->shift = 0;
- pl->period = 0;
- return percpu_counter_init(&pl->events, 0);
-@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global
- if (pl->period == global_period)
- return;
+- TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
++ DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
+ }
-- spin_lock_irqsave(&pl->lock, flags);
-+ raw_spin_lock_irqsave(&pl->lock, flags);
- prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
+ void
+@@ -199,7 +146,7 @@ debug_rt_mutex_proxy_lock(struct rt_mute
- /*
-@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global
- percpu_counter_set(&pl->events, 0);
+ void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
+ {
+- TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock));
++ DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
+ }
- pl->period = global_period;
-- spin_unlock_irqrestore(&pl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+ void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
+@@ -213,8 +160,8 @@ void debug_rt_mutex_init_waiter(struct r
+ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
+ {
+ put_pid(waiter->deadlock_task_pid);
+- TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
+- TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
++ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
++ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+ memset(waiter, 0x22, sizeof(*waiter));
}
- /*
-@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_de
+Index: linux-2.6/include/linux/kprobes.h
+===================================================================
+--- linux-2.6.orig/include/linux/kprobes.h
++++ linux-2.6/include/linux/kprobes.h
+@@ -181,7 +181,7 @@ struct kretprobe {
+ int nmissed;
+ size_t data_size;
+ struct hlist_head free_instances;
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ };
- int prop_local_init_single(struct prop_local_single *pl)
+ struct kretprobe_instance {
+Index: linux-2.6/kernel/kprobes.c
+===================================================================
+--- linux-2.6.orig/kernel/kprobes.c
++++ linux-2.6/kernel/kprobes.c
+@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
+ static DEFINE_MUTEX(kprobe_mutex);
+ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+ static struct {
+- spinlock_t lock ____cacheline_aligned_in_smp;
++ raw_spinlock_t lock ____cacheline_aligned_in_smp;
+ } kretprobe_table_locks[KPROBE_TABLE_SIZE];
+
+-static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
++static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{
-- spin_lock_init(&pl->lock);
-+ raw_spin_lock_init(&pl->lock);
- pl->shift = 0;
- pl->period = 0;
- pl->events = 0;
-@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global
- if (pl->period == global_period)
- return;
+ return &(kretprobe_table_locks[hash].lock);
+ }
+@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kr
+ hlist_del(&ri->hlist);
+ INIT_HLIST_NODE(&ri->hlist);
+ if (likely(rp)) {
+- spin_lock(&rp->lock);
++ raw_spin_lock(&rp->lock);
+ hlist_add_head(&ri->hlist, &rp->free_instances);
+- spin_unlock(&rp->lock);
++ raw_spin_unlock(&rp->lock);
+ } else
+ /* Unregistering */
+ hlist_add_head(&ri->hlist, head);
+@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struc
+ __acquires(hlist_lock)
+ {
+ unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
+- spinlock_t *hlist_lock;
++ raw_spinlock_t *hlist_lock;
-- spin_lock_irqsave(&pl->lock, flags);
-+ raw_spin_lock_irqsave(&pl->lock, flags);
- prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
- /*
- * For each missed period, we half the local counter.
-@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global
- else
- pl->events = 0;
- pl->period = global_period;
-- spin_unlock_irqrestore(&pl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+ *head = &kretprobe_inst_table[hash];
+ hlist_lock = kretprobe_table_lock_ptr(hash);
+- spin_lock_irqsave(hlist_lock, *flags);
++ raw_spin_lock_irqsave(hlist_lock, *flags);
}
- /*
-Index: linux-2.6/kernel/trace/ring_buffer.c
-===================================================================
---- linux-2.6.orig/kernel/trace/ring_buffer.c
-+++ linux-2.6/kernel/trace/ring_buffer.c
-@@ -1040,6 +1040,44 @@ static int rb_allocate_pages(struct ring
- return -ENOMEM;
+ static void __kprobes kretprobe_table_lock(unsigned long hash,
+ unsigned long *flags)
+ __acquires(hlist_lock)
+ {
+- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+- spin_lock_irqsave(hlist_lock, *flags);
++ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
++ raw_spin_lock_irqsave(hlist_lock, *flags);
}
-+static inline int ok_to_lock(void)
-+{
-+ if (in_nmi())
-+ return 0;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (in_atomic())
-+ return 0;
-+#endif
-+ return 1;
-+}
-+
-+static int
-+read_buffer_lock(struct ring_buffer_per_cpu *cpu_buffer,
-+ unsigned long *flags)
-+{
-+ /*
-+ * If an NMI die dumps out the content of the ring buffer
-+ * do not grab locks. We also permanently disable the ring
-+ * buffer too. A one time deal is all you get from reading
-+ * the ring buffer from an NMI.
-+ */
-+ if (!ok_to_lock()) {
-+ if (spin_trylock_irqsave(&cpu_buffer->reader_lock, *flags))
-+ return 1;
-+ tracing_off_permanent();
-+ return 0;
-+ }
-+ spin_lock_irqsave(&cpu_buffer->reader_lock, *flags);
-+ return 1;
-+}
-+
-+static void
-+read_buffer_unlock(struct ring_buffer_per_cpu *cpu_buffer,
-+ unsigned long flags, int locked)
-+{
-+ if (locked)
-+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+}
- static struct ring_buffer_per_cpu *
- rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
- {
-@@ -1250,9 +1288,11 @@ rb_remove_pages(struct ring_buffer_per_c
+ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
+@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(str
+ __releases(hlist_lock)
{
- struct buffer_page *bpage;
- struct list_head *p;
-+ unsigned long flags;
- unsigned i;
-+ int locked;
-
-- spin_lock_irq(&cpu_buffer->reader_lock);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
- rb_head_page_deactivate(cpu_buffer);
-
- for (i = 0; i < nr_pages; i++) {
-@@ -1270,7 +1310,7 @@ rb_remove_pages(struct ring_buffer_per_c
- rb_check_pages(cpu_buffer);
+ unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
+- spinlock_t *hlist_lock;
++ raw_spinlock_t *hlist_lock;
- out:
-- spin_unlock_irq(&cpu_buffer->reader_lock);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
+ hlist_lock = kretprobe_table_lock_ptr(hash);
+- spin_unlock_irqrestore(hlist_lock, *flags);
++ raw_spin_unlock_irqrestore(hlist_lock, *flags);
}
- static void
-@@ -1279,9 +1319,11 @@ rb_insert_pages(struct ring_buffer_per_c
+ static void __kprobes kretprobe_table_unlock(unsigned long hash,
+ unsigned long *flags)
+ __releases(hlist_lock)
{
- struct buffer_page *bpage;
- struct list_head *p;
-+ unsigned long flags;
- unsigned i;
-+ int locked;
+- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+- spin_unlock_irqrestore(hlist_lock, *flags);
++ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
++ raw_spin_unlock_irqrestore(hlist_lock, *flags);
+ }
-- spin_lock_irq(&cpu_buffer->reader_lock);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
- rb_head_page_deactivate(cpu_buffer);
+ /*
+@@ -1650,12 +1650,12 @@ static int __kprobes pre_handler_kretpro
- for (i = 0; i < nr_pages; i++) {
-@@ -1296,7 +1338,7 @@ rb_insert_pages(struct ring_buffer_per_c
- rb_check_pages(cpu_buffer);
+ /*TODO: consider to only swap the RA after the last pre_handler fired */
+ hash = hash_ptr(current, KPROBE_HASH_BITS);
+- spin_lock_irqsave(&rp->lock, flags);
++ raw_spin_lock_irqsave(&rp->lock, flags);
+ if (!hlist_empty(&rp->free_instances)) {
+ ri = hlist_entry(rp->free_instances.first,
+ struct kretprobe_instance, hlist);
+ hlist_del(&ri->hlist);
+- spin_unlock_irqrestore(&rp->lock, flags);
++ raw_spin_unlock_irqrestore(&rp->lock, flags);
- out:
-- spin_unlock_irq(&cpu_buffer->reader_lock);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
+ ri->rp = rp;
+ ri->task = current;
+@@ -1672,7 +1672,7 @@ static int __kprobes pre_handler_kretpro
+ kretprobe_table_unlock(hash, &flags);
+ } else {
+ rp->nmissed++;
+- spin_unlock_irqrestore(&rp->lock, flags);
++ raw_spin_unlock_irqrestore(&rp->lock, flags);
+ }
+ return 0;
}
+@@ -1708,7 +1708,7 @@ int __kprobes register_kretprobe(struct
+ rp->maxactive = num_possible_cpus();
+ #endif
+ }
+- spin_lock_init(&rp->lock);
++ raw_spin_lock_init(&rp->lock);
+ INIT_HLIST_HEAD(&rp->free_instances);
+ for (i = 0; i < rp->maxactive; i++) {
+ inst = kmalloc(sizeof(struct kretprobe_instance) +
+@@ -1946,7 +1946,7 @@ static int __init init_kprobes(void)
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ INIT_HLIST_HEAD(&kprobe_table[i]);
+ INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
+- spin_lock_init(&(kretprobe_table_locks[i].lock));
++ raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
+ }
- /**
-@@ -2784,15 +2826,16 @@ void ring_buffer_iter_reset(struct ring_
+ /*
+Index: linux-2.6/include/linux/percpu_counter.h
+===================================================================
+--- linux-2.6.orig/include/linux/percpu_counter.h
++++ linux-2.6/include/linux/percpu_counter.h
+@@ -16,7 +16,7 @@
+ #ifdef CONFIG_SMP
+
+ struct percpu_counter {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ s64 count;
+ #ifdef CONFIG_HOTPLUG_CPU
+ struct list_head list; /* All percpu_counters are on a list */
+Index: linux-2.6/lib/percpu_counter.c
+===================================================================
+--- linux-2.6.orig/lib/percpu_counter.c
++++ linux-2.6/lib/percpu_counter.c
+@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_co
{
- struct ring_buffer_per_cpu *cpu_buffer;
- unsigned long flags;
-+ int locked;
+ int cpu;
- if (!iter)
- return;
+- spin_lock(&fbc->lock);
++ raw_spin_lock(&fbc->lock);
+ for_each_possible_cpu(cpu) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ *pcount = 0;
+ }
+ fbc->count = amount;
+- spin_unlock(&fbc->lock);
++ raw_spin_unlock(&fbc->lock);
+ }
+ EXPORT_SYMBOL(percpu_counter_set);
- cpu_buffer = iter->cpu_buffer;
+@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_
+ preempt_disable();
+ count = __this_cpu_read(*fbc->counters) + amount;
+ if (count >= batch || count <= -batch) {
+- spin_lock(&fbc->lock);
++ raw_spin_lock(&fbc->lock);
+ fbc->count += count;
+ __this_cpu_write(*fbc->counters, 0);
+- spin_unlock(&fbc->lock);
++ raw_spin_unlock(&fbc->lock);
+ } else {
+ __this_cpu_write(*fbc->counters, count);
+ }
+@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_c
+ s64 ret;
+ int cpu;
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
- rb_iter_reset(iter);
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
+- spin_lock(&fbc->lock);
++ raw_spin_lock(&fbc->lock);
+ ret = fbc->count;
+ for_each_online_cpu(cpu) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ ret += *pcount;
+ }
+- spin_unlock(&fbc->lock);
++ raw_spin_unlock(&fbc->lock);
+ return ret;
}
- EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
+ EXPORT_SYMBOL(__percpu_counter_sum);
+@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
+ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+ struct lock_class_key *key)
+ {
+- spin_lock_init(&fbc->lock);
++ raw_spin_lock_init(&fbc->lock);
+ lockdep_set_class(&fbc->lock, key);
+ fbc->count = amount;
+ fbc->counters = alloc_percpu(s32);
+@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotc
+ s32 *pcount;
+ unsigned long flags;
-@@ -3210,21 +3253,6 @@ rb_iter_peek(struct ring_buffer_iter *it
- }
- EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
+- spin_lock_irqsave(&fbc->lock, flags);
++ raw_spin_lock_irqsave(&fbc->lock, flags);
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ fbc->count += *pcount;
+ *pcount = 0;
+- spin_unlock_irqrestore(&fbc->lock, flags);
++ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+ }
+ mutex_unlock(&percpu_counters_lock);
+ #endif
+Index: linux-2.6/kernel/cgroup.c
+===================================================================
+--- linux-2.6.orig/kernel/cgroup.c
++++ linux-2.6/kernel/cgroup.c
+@@ -263,7 +263,7 @@ list_for_each_entry(_root, &roots, root_
+ /* the list of cgroups eligible for automatic release. Protected by
+ * release_list_lock */
+ static LIST_HEAD(release_list);
+-static DEFINE_SPINLOCK(release_list_lock);
++static DEFINE_RAW_SPINLOCK(release_list_lock);
+ static void cgroup_release_agent(struct work_struct *work);
+ static DECLARE_WORK(release_agent_work, cgroup_release_agent);
+ static void check_for_release(struct cgroup *cgrp);
+@@ -4010,11 +4010,11 @@ again:
+ finish_wait(&cgroup_rmdir_waitq, &wait);
+ clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
--static inline int rb_ok_to_lock(void)
--{
-- /*
-- * If an NMI die dumps out the content of the ring buffer
-- * do not grab locks. We also permanently disable the ring
-- * buffer too. A one time deal is all you get from reading
-- * the ring buffer from an NMI.
-- */
-- if (likely(!in_nmi()))
-- return 1;
--
-- tracing_off_permanent();
-- return 0;
--}
--
- /**
- * ring_buffer_peek - peek at the next event to be read
- * @buffer: The ring buffer to read
-@@ -3242,22 +3270,17 @@ ring_buffer_peek(struct ring_buffer *buf
- struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
- struct ring_buffer_event *event;
- unsigned long flags;
-- int dolock;
-+ int locked;
-
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return NULL;
-
-- dolock = rb_ok_to_lock();
- again:
-- local_irq_save(flags);
-- if (dolock)
-- spin_lock(&cpu_buffer->reader_lock);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
- event = rb_buffer_peek(cpu_buffer, ts, lost_events);
- if (event && event->type_len == RINGBUF_TYPE_PADDING)
- rb_advance_reader(cpu_buffer);
-- if (dolock)
-- spin_unlock(&cpu_buffer->reader_lock);
-- local_irq_restore(flags);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
-
- if (event && event->type_len == RINGBUF_TYPE_PADDING)
- goto again;
-@@ -3279,11 +3302,12 @@ ring_buffer_iter_peek(struct ring_buffer
- struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
- struct ring_buffer_event *event;
- unsigned long flags;
-+ int locked;
-
- again:
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
- event = rb_iter_peek(iter, ts);
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
-
- if (event && event->type_len == RINGBUF_TYPE_PADDING)
- goto again;
-@@ -3309,9 +3333,7 @@ ring_buffer_consume(struct ring_buffer *
- struct ring_buffer_per_cpu *cpu_buffer;
- struct ring_buffer_event *event = NULL;
- unsigned long flags;
-- int dolock;
--
-- dolock = rb_ok_to_lock();
-+ int locked;
-
- again:
- /* might be called in atomic */
-@@ -3321,9 +3343,7 @@ ring_buffer_consume(struct ring_buffer *
- goto out;
-
- cpu_buffer = buffer->buffers[cpu];
-- local_irq_save(flags);
-- if (dolock)
-- spin_lock(&cpu_buffer->reader_lock);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
+- spin_lock(&release_list_lock);
++ raw_spin_lock(&release_list_lock);
+ set_bit(CGRP_REMOVED, &cgrp->flags);
+ if (!list_empty(&cgrp->release_list))
+ list_del_init(&cgrp->release_list);
+- spin_unlock(&release_list_lock);
++ raw_spin_unlock(&release_list_lock);
- event = rb_buffer_peek(cpu_buffer, ts, lost_events);
- if (event) {
-@@ -3331,9 +3351,8 @@ ring_buffer_consume(struct ring_buffer *
- rb_advance_reader(cpu_buffer);
+ cgroup_lock_hierarchy(cgrp->root);
+ /* delete this cgroup from parent->children */
+@@ -4667,13 +4667,13 @@ static void check_for_release(struct cgr
+ * already queued for a userspace notification, queue
+ * it now */
+ int need_schedule_work = 0;
+- spin_lock(&release_list_lock);
++ raw_spin_lock(&release_list_lock);
+ if (!cgroup_is_removed(cgrp) &&
+ list_empty(&cgrp->release_list)) {
+ list_add(&cgrp->release_list, &release_list);
+ need_schedule_work = 1;
+ }
+- spin_unlock(&release_list_lock);
++ raw_spin_unlock(&release_list_lock);
+ if (need_schedule_work)
+ schedule_work(&release_agent_work);
}
-
-- if (dolock)
-- spin_unlock(&cpu_buffer->reader_lock);
-- local_irq_restore(flags);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
-+
-
- out:
- preempt_enable();
-@@ -3418,17 +3437,18 @@ ring_buffer_read_start(struct ring_buffe
+@@ -4725,7 +4725,7 @@ static void cgroup_release_agent(struct
{
- struct ring_buffer_per_cpu *cpu_buffer;
- unsigned long flags;
-+ int locked;
+ BUG_ON(work != &release_agent_work);
+ mutex_lock(&cgroup_mutex);
+- spin_lock(&release_list_lock);
++ raw_spin_lock(&release_list_lock);
+ while (!list_empty(&release_list)) {
+ char *argv[3], *envp[3];
+ int i;
+@@ -4734,7 +4734,7 @@ static void cgroup_release_agent(struct
+ struct cgroup,
+ release_list);
+ list_del_init(&cgrp->release_list);
+- spin_unlock(&release_list_lock);
++ raw_spin_unlock(&release_list_lock);
+ pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pathbuf)
+ goto continue_free;
+@@ -4764,9 +4764,9 @@ static void cgroup_release_agent(struct
+ continue_free:
+ kfree(pathbuf);
+ kfree(agentbuf);
+- spin_lock(&release_list_lock);
++ raw_spin_lock(&release_list_lock);
+ }
+- spin_unlock(&release_list_lock);
++ raw_spin_unlock(&release_list_lock);
+ mutex_unlock(&cgroup_mutex);
+ }
- if (!iter)
- return;
+Index: linux-2.6/include/linux/proportions.h
+===================================================================
+--- linux-2.6.orig/include/linux/proportions.h
++++ linux-2.6/include/linux/proportions.h
+@@ -58,7 +58,7 @@ struct prop_local_percpu {
+ */
+ int shift;
+ unsigned long period;
+- spinlock_t lock; /* protect the snapshot state */
++ raw_spinlock_t lock; /* protect the snapshot state */
+ };
- cpu_buffer = iter->cpu_buffer;
+ int prop_local_init_percpu(struct prop_local_percpu *pl);
+@@ -106,11 +106,11 @@ struct prop_local_single {
+ */
+ unsigned long period;
+ int shift;
+- spinlock_t lock; /* protect the snapshot state */
++ raw_spinlock_t lock; /* protect the snapshot state */
+ };
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
- arch_spin_lock(&cpu_buffer->lock);
- rb_iter_reset(iter);
- arch_spin_unlock(&cpu_buffer->lock);
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
+ #define INIT_PROP_LOCAL_SINGLE(name) \
+-{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
++{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
}
- EXPORT_SYMBOL_GPL(ring_buffer_read_start);
-
-@@ -3462,8 +3482,9 @@ ring_buffer_read(struct ring_buffer_iter
- struct ring_buffer_event *event;
- struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
- unsigned long flags;
-+ int locked;
-
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
- again:
- event = rb_iter_peek(iter, ts);
- if (!event)
-@@ -3474,7 +3495,7 @@ ring_buffer_read(struct ring_buffer_iter
- rb_advance_iter(iter);
- out:
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
+ int prop_local_init_single(struct prop_local_single *pl);
+Index: linux-2.6/lib/proportions.c
+===================================================================
+--- linux-2.6.orig/lib/proportions.c
++++ linux-2.6/lib/proportions.c
+@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigne
- return event;
- }
-@@ -3537,13 +3558,14 @@ void ring_buffer_reset_cpu(struct ring_b
+ int prop_local_init_percpu(struct prop_local_percpu *pl)
{
- struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
- unsigned long flags;
-+ int locked;
-
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
+- spin_lock_init(&pl->lock);
++ raw_spin_lock_init(&pl->lock);
+ pl->shift = 0;
+ pl->period = 0;
+ return percpu_counter_init(&pl->events, 0);
+@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global
+ if (pl->period == global_period)
return;
- atomic_inc(&cpu_buffer->record_disabled);
-
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
-
- if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
- goto out;
-@@ -3555,7 +3577,7 @@ void ring_buffer_reset_cpu(struct ring_b
- arch_spin_unlock(&cpu_buffer->lock);
+- spin_lock_irqsave(&pl->lock, flags);
++ raw_spin_lock_irqsave(&pl->lock, flags);
+ prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
- out:
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
+ /*
+@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global
+ percpu_counter_set(&pl->events, 0);
- atomic_dec(&cpu_buffer->record_disabled);
+ pl->period = global_period;
+- spin_unlock_irqrestore(&pl->lock, flags);
++ raw_spin_unlock_irqrestore(&pl->lock, flags);
}
-@@ -3582,22 +3604,16 @@ int ring_buffer_empty(struct ring_buffer
- {
- struct ring_buffer_per_cpu *cpu_buffer;
- unsigned long flags;
-- int dolock;
-+ int locked;
- int cpu;
- int ret;
-- dolock = rb_ok_to_lock();
--
- /* yes this is racy, but if you don't like the race, lock the buffer */
- for_each_buffer_cpu(buffer, cpu) {
- cpu_buffer = buffer->buffers[cpu];
-- local_irq_save(flags);
-- if (dolock)
-- spin_lock(&cpu_buffer->reader_lock);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
- ret = rb_per_cpu_empty(cpu_buffer);
-- if (dolock)
-- spin_unlock(&cpu_buffer->reader_lock);
-- local_irq_restore(flags);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
+ /*
+@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_de
- if (!ret)
- return 0;
-@@ -3616,22 +3632,16 @@ int ring_buffer_empty_cpu(struct ring_bu
+ int prop_local_init_single(struct prop_local_single *pl)
{
- struct ring_buffer_per_cpu *cpu_buffer;
- unsigned long flags;
-- int dolock;
-+ int locked;
- int ret;
-
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return 1;
+- spin_lock_init(&pl->lock);
++ raw_spin_lock_init(&pl->lock);
+ pl->shift = 0;
+ pl->period = 0;
+ pl->events = 0;
+@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global
+ if (pl->period == global_period)
+ return;
-- dolock = rb_ok_to_lock();
--
- cpu_buffer = buffer->buffers[cpu];
-- local_irq_save(flags);
-- if (dolock)
-- spin_lock(&cpu_buffer->reader_lock);
-+ locked = read_buffer_lock(cpu_buffer, &flags);
- ret = rb_per_cpu_empty(cpu_buffer);
-- if (dolock)
-- spin_unlock(&cpu_buffer->reader_lock);
-- local_irq_restore(flags);
-+ read_buffer_unlock(cpu_buffer, flags, locked);
+- spin_lock_irqsave(&pl->lock, flags);
++ raw_spin_lock_irqsave(&pl->lock, flags);
+ prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
+ /*
+ * For each missed period, we half the local counter.
+@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global
+ else
+ pl->events = 0;
+ pl->period = global_period;
+- spin_unlock_irqrestore(&pl->lock, flags);
++ raw_spin_unlock_irqrestore(&pl->lock, flags);
+ }
- return ret;
+ /*
+Index: linux-2.6/kernel/trace/ring_buffer.c
+===================================================================
+--- linux-2.6.orig/kernel/trace/ring_buffer.c
++++ linux-2.6/kernel/trace/ring_buffer.c
+@@ -1040,6 +1040,44 @@ static int rb_allocate_pages(struct ring
+ return -ENOMEM;
}
-@@ -3805,6 +3815,7 @@ int ring_buffer_read_page(struct ring_bu
- unsigned int commit;
- unsigned int read;
- u64 save_timestamp;
-+ int locked;
- int ret = -1;
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
-@@ -3826,7 +3837,7 @@ int ring_buffer_read_page(struct ring_bu
- if (!bpage)
- goto out;
++static inline int ok_to_lock(void)
++{
++ if (in_nmi())
++ return 0;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (in_atomic())
++ return 0;
++#endif
++ return 1;
++}
++
++static int
++read_buffer_lock(struct ring_buffer_per_cpu *cpu_buffer,
++ unsigned long *flags)
++{
++ /*
++ * If an NMI die dumps out the content of the ring buffer
++ * do not grab locks. We also permanently disable the ring
++ * buffer too. A one time deal is all you get from reading
++ * the ring buffer from an NMI.
++ */
++ if (!ok_to_lock()) {
++ if (spin_trylock_irqsave(&cpu_buffer->reader_lock, *flags))
++ return 1;
++ tracing_off_permanent();
++ return 0;
++ }
++ spin_lock_irqsave(&cpu_buffer->reader_lock, *flags);
++ return 1;
++}
++
++static void
++read_buffer_unlock(struct ring_buffer_per_cpu *cpu_buffer,
++ unsigned long flags, int locked)
++{
++ if (locked)
++ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++}
+ static struct ring_buffer_per_cpu *
+ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
+ {
+@@ -1250,9 +1288,11 @@ rb_remove_pages(struct ring_buffer_per_c
+ {
+ struct buffer_page *bpage;
+ struct list_head *p;
++ unsigned long flags;
+ unsigned i;
++ int locked;
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+- spin_lock_irq(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
+ rb_head_page_deactivate(cpu_buffer);
- reader = rb_get_reader_page(cpu_buffer);
- if (!reader)
-@@ -3949,7 +3960,7 @@ int ring_buffer_read_page(struct ring_bu
- memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
+ for (i = 0; i < nr_pages; i++) {
+@@ -1270,7 +1310,7 @@ rb_remove_pages(struct ring_buffer_per_c
+ rb_check_pages(cpu_buffer);
- out_unlock:
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ out:
+- spin_unlock_irq(&cpu_buffer->reader_lock);
+ read_buffer_unlock(cpu_buffer, flags, locked);
+ }
- out:
- return ret;
-Index: linux-2.6/kernel/trace/trace.c
-===================================================================
---- linux-2.6.orig/kernel/trace/trace.c
-+++ linux-2.6/kernel/trace/trace.c
-@@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_P
- TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
-
- static int trace_stop_count;
--static DEFINE_SPINLOCK(tracing_start_lock);
-+static DEFINE_RAW_SPINLOCK(tracing_start_lock);
-
- /**
- * trace_wake_up - wake up tasks waiting for trace input
-@@ -351,6 +351,7 @@ static DEFINE_SPINLOCK(tracing_start_loc
- */
- void trace_wake_up(void)
+ static void
+@@ -1279,9 +1319,11 @@ rb_insert_pages(struct ring_buffer_per_c
{
-+#ifndef CONFIG_PREEMPT_RT_FULL
- int cpu;
+ struct buffer_page *bpage;
+ struct list_head *p;
++ unsigned long flags;
+ unsigned i;
++ int locked;
- if (trace_flags & TRACE_ITER_BLOCK)
-@@ -363,6 +364,7 @@ void trace_wake_up(void)
- if (!runqueue_is_locked(cpu))
- wake_up(&trace_wait);
- put_cpu();
-+#endif
- }
+- spin_lock_irq(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ rb_head_page_deactivate(cpu_buffer);
- static int __init set_buf_size(char *str)
-@@ -716,6 +718,12 @@ update_max_tr_single(struct trace_array
+ for (i = 0; i < nr_pages; i++) {
+@@ -1296,7 +1338,7 @@ rb_insert_pages(struct ring_buffer_per_c
+ rb_check_pages(cpu_buffer);
+
+ out:
+- spin_unlock_irq(&cpu_buffer->reader_lock);
++ read_buffer_unlock(cpu_buffer, flags, locked);
}
- #endif /* CONFIG_TRACER_MAX_TRACE */
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+static void default_wait_pipe(struct trace_iterator *iter);
-+#else
-+#define default_wait_pipe poll_wait_pipe
-+#endif
-+
/**
- * register_tracer - register a tracer with the ftrace system.
- * @type - the plugin for the tracer
-@@ -958,7 +966,7 @@ void tracing_start(void)
- if (tracing_disabled)
+@@ -2784,15 +2826,16 @@ void ring_buffer_iter_reset(struct ring_
+ {
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
++ int locked;
+
+ if (!iter)
return;
-- spin_lock_irqsave(&tracing_start_lock, flags);
-+ raw_spin_lock_irqsave(&tracing_start_lock, flags);
- if (--trace_stop_count) {
- if (trace_stop_count < 0) {
- /* Someone screwed up their debugging */
-@@ -983,7 +991,7 @@ void tracing_start(void)
+ cpu_buffer = iter->cpu_buffer;
- ftrace_start();
- out:
-- spin_unlock_irqrestore(&tracing_start_lock, flags);
-+ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ rb_iter_reset(iter);
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
+
+@@ -3210,21 +3253,6 @@ rb_iter_peek(struct ring_buffer_iter *it
}
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
+-static inline int rb_ok_to_lock(void)
+-{
+- /*
+- * If an NMI die dumps out the content of the ring buffer
+- * do not grab locks. We also permanently disable the ring
+- * buffer too. A one time deal is all you get from reading
+- * the ring buffer from an NMI.
+- */
+- if (likely(!in_nmi()))
+- return 1;
+-
+- tracing_off_permanent();
+- return 0;
+-}
+-
/**
-@@ -998,7 +1006,7 @@ void tracing_stop(void)
+ * ring_buffer_peek - peek at the next event to be read
+ * @buffer: The ring buffer to read
+@@ -3242,22 +3270,17 @@ ring_buffer_peek(struct ring_buffer *buf
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ struct ring_buffer_event *event;
unsigned long flags;
+- int dolock;
++ int locked;
- ftrace_stop();
-- spin_lock_irqsave(&tracing_start_lock, flags);
-+ raw_spin_lock_irqsave(&tracing_start_lock, flags);
- if (trace_stop_count++)
- goto out;
-
-@@ -1016,7 +1024,7 @@ void tracing_stop(void)
- arch_spin_unlock(&ftrace_max_lock);
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return NULL;
- out:
-- spin_unlock_irqrestore(&tracing_start_lock, flags);
-+ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
- }
+- dolock = rb_ok_to_lock();
+ again:
+- local_irq_save(flags);
+- if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ event = rb_buffer_peek(cpu_buffer, ts, lost_events);
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ rb_advance_reader(cpu_buffer);
+- if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
+- local_irq_restore(flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
- void trace_stop_cmdline_recording(void);
-@@ -1120,6 +1128,8 @@ tracing_generic_entry_update(struct trac
- ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
- (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
-+
-+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
- }
- EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ goto again;
+@@ -3279,11 +3302,12 @@ ring_buffer_iter_peek(struct ring_buffer
+ struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ struct ring_buffer_event *event;
+ unsigned long flags;
++ int locked;
-@@ -1757,9 +1767,10 @@ static void print_lat_help_header(struct
- seq_puts(m, "# | / _----=> need-resched \n");
- seq_puts(m, "# || / _---=> hardirq/softirq \n");
- seq_puts(m, "# ||| / _--=> preempt-depth \n");
-- seq_puts(m, "# |||| / delay \n");
-- seq_puts(m, "# cmd pid ||||| time | caller \n");
-- seq_puts(m, "# \\ / ||||| \\ | / \n");
-+ seq_puts(m, "# |||| / _--=> migrate-disable\n");
-+ seq_puts(m, "# ||||| / delay \n");
-+ seq_puts(m, "# cmd pid |||||| time | caller \n");
-+ seq_puts(m, "# \\ / ||||| \\ | / \n");
- }
+ again:
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ event = rb_iter_peek(iter, ts);
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
- static void print_func_help_header(struct seq_file *m)
-@@ -3067,6 +3078,7 @@ static int tracing_release_pipe(struct i
- return 0;
- }
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ goto again;
+@@ -3309,9 +3333,7 @@ ring_buffer_consume(struct ring_buffer *
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct ring_buffer_event *event = NULL;
+ unsigned long flags;
+- int dolock;
+-
+- dolock = rb_ok_to_lock();
++ int locked;
-+#ifndef CONFIG_PREEMPT_RT_FULL
- static unsigned int
- tracing_poll_pipe(struct file *filp, poll_table *poll_table)
- {
-@@ -3088,8 +3100,7 @@ tracing_poll_pipe(struct file *filp, pol
+ again:
+ /* might be called in atomic */
+@@ -3321,9 +3343,7 @@ ring_buffer_consume(struct ring_buffer *
+ goto out;
+
+ cpu_buffer = buffer->buffers[cpu];
+- local_irq_save(flags);
+- if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+
+ event = rb_buffer_peek(cpu_buffer, ts, lost_events);
+ if (event) {
+@@ -3331,9 +3351,8 @@ ring_buffer_consume(struct ring_buffer *
+ rb_advance_reader(cpu_buffer);
}
- }
--
--void default_wait_pipe(struct trace_iterator *iter)
-+static void default_wait_pipe(struct trace_iterator *iter)
+- if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
+- local_irq_restore(flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
++
+
+ out:
+ preempt_enable();
+@@ -3418,17 +3437,18 @@ ring_buffer_read_start(struct ring_buffe
{
- DEFINE_WAIT(wait);
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
++ int locked;
-@@ -3100,6 +3111,20 @@ void default_wait_pipe(struct trace_iter
+ if (!iter)
+ return;
- finish_wait(&trace_wait, &wait);
- }
-+#else
-+static unsigned int
-+tracing_poll_pipe(struct file *filp, poll_table *poll_table)
-+{
-+ struct trace_iterator *iter = filp->private_data;
-+
-+ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter))
-+ return POLLIN | POLLRDNORM;
-+ poll_wait_pipe(iter);
-+ if (!trace_empty(iter))
-+ return POLLIN | POLLRDNORM;
-+ return 0;
-+}
-+#endif
+ cpu_buffer = iter->cpu_buffer;
- /*
- * This is a make-shift waitqueue.
-Index: linux-2.6/kernel/trace/trace_irqsoff.c
-===================================================================
---- linux-2.6.orig/kernel/trace/trace_irqsoff.c
-+++ linux-2.6/kernel/trace/trace_irqsoff.c
-@@ -17,13 +17,14 @@
- #include <linux/fs.h>
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ arch_spin_lock(&cpu_buffer->lock);
+ rb_iter_reset(iter);
+ arch_spin_unlock(&cpu_buffer->lock);
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_read_start);
- #include "trace.h"
-+#include <trace/events/hist.h>
+@@ -3462,8 +3482,9 @@ ring_buffer_read(struct ring_buffer_iter
+ struct ring_buffer_event *event;
+ struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ unsigned long flags;
++ int locked;
- static struct trace_array *irqsoff_trace __read_mostly;
- static int tracer_enabled __read_mostly;
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ again:
+ event = rb_iter_peek(iter, ts);
+ if (!event)
+@@ -3474,7 +3495,7 @@ ring_buffer_read(struct ring_buffer_iter
- static DEFINE_PER_CPU(int, tracing_cpu);
+ rb_advance_iter(iter);
+ out:
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
--static DEFINE_SPINLOCK(max_trace_lock);
-+static DEFINE_RAW_SPINLOCK(max_trace_lock);
+ return event;
+ }
+@@ -3537,13 +3558,14 @@ void ring_buffer_reset_cpu(struct ring_b
+ {
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ unsigned long flags;
++ int locked;
- enum {
- TRACER_IRQS_OFF = (1 << 1),
-@@ -319,7 +320,7 @@ check_critical_timing(struct trace_array
- if (!report_latency(delta))
- goto out;
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return;
-- spin_lock_irqsave(&max_trace_lock, flags);
-+ raw_spin_lock_irqsave(&max_trace_lock, flags);
+ atomic_inc(&cpu_buffer->record_disabled);
- /* check if we are still the max latency */
- if (!report_latency(delta))
-@@ -342,7 +343,7 @@ check_critical_timing(struct trace_array
- max_sequence++;
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
- out_unlock:
-- spin_unlock_irqrestore(&max_trace_lock, flags);
-+ raw_spin_unlock_irqrestore(&max_trace_lock, flags);
+ if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
+ goto out;
+@@ -3555,7 +3577,7 @@ void ring_buffer_reset_cpu(struct ring_b
+ arch_spin_unlock(&cpu_buffer->lock);
- out:
- data->critical_sequence = max_sequence;
-@@ -424,11 +425,13 @@ void start_critical_timings(void)
- {
- if (preempt_trace() || irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-+ trace_preemptirqsoff_hist(TRACE_START, 1);
- }
- EXPORT_SYMBOL_GPL(start_critical_timings);
+ out:
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
- void stop_critical_timings(void)
- {
-+ trace_preemptirqsoff_hist(TRACE_STOP, 0);
- if (preempt_trace() || irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
-@@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
- #ifdef CONFIG_PROVE_LOCKING
- void time_hardirqs_on(unsigned long a0, unsigned long a1)
- {
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(a0, a1);
+ atomic_dec(&cpu_buffer->record_disabled);
}
-@@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0,
+@@ -3582,22 +3604,16 @@ int ring_buffer_empty(struct ring_buffer
{
- if (!preempt_trace() && irq_trace())
- start_critical_timing(a0, a1);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
+- int dolock;
++ int locked;
+ int cpu;
+ int ret;
- #else /* !CONFIG_PROVE_LOCKING */
-@@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct
- */
- void trace_hardirqs_on(void)
- {
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
-@@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- EXPORT_SYMBOL(trace_hardirqs_off);
+- dolock = rb_ok_to_lock();
+-
+ /* yes this is racy, but if you don't like the race, lock the buffer */
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+- local_irq_save(flags);
+- if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ ret = rb_per_cpu_empty(cpu_buffer);
+- if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
+- local_irq_restore(flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
- void trace_hardirqs_on_caller(unsigned long caller_addr)
- {
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(CALLER_ADDR0, caller_addr);
- }
-@@ -494,6 +502,7 @@ void trace_hardirqs_off_caller(unsigned
+ if (!ret)
+ return 0;
+@@ -3616,22 +3632,16 @@ int ring_buffer_empty_cpu(struct ring_bu
{
- if (!preempt_trace() && irq_trace())
- start_critical_timing(CALLER_ADDR0, caller_addr);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- EXPORT_SYMBOL(trace_hardirqs_off_caller);
-
-@@ -503,13 +512,15 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
- #ifdef CONFIG_PREEMPT_TRACER
- void trace_preempt_on(unsigned long a0, unsigned long a1)
- {
-- if (preempt_trace())
-+ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
-+ if (preempt_trace() && !irq_trace())
- stop_critical_timing(a0, a1);
- }
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
+- int dolock;
++ int locked;
+ int ret;
- void trace_preempt_off(unsigned long a0, unsigned long a1)
- {
-- if (preempt_trace())
-+ trace_preemptirqsoff_hist(PREEMPT_ON, 1);
-+ if (preempt_trace() && !irq_trace())
- start_critical_timing(a0, a1);
- }
- #endif /* CONFIG_PREEMPT_TRACER */
-Index: linux-2.6/include/linux/ratelimit.h
-===================================================================
---- linux-2.6.orig/include/linux/ratelimit.h
-+++ linux-2.6/include/linux/ratelimit.h
-@@ -8,7 +8,7 @@
- #define DEFAULT_RATELIMIT_BURST 10
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 1;
- struct ratelimit_state {
-- spinlock_t lock; /* protect the state */
-+ raw_spinlock_t lock; /* protect the state */
+- dolock = rb_ok_to_lock();
+-
+ cpu_buffer = buffer->buffers[cpu];
+- local_irq_save(flags);
+- if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ ret = rb_per_cpu_empty(cpu_buffer);
+- if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
+- local_irq_restore(flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
- int interval;
- int burst;
-@@ -20,7 +20,7 @@ struct ratelimit_state {
- #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
- \
- struct ratelimit_state name = { \
-- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .interval = interval_init, \
- .burst = burst_init, \
- }
-@@ -28,7 +28,7 @@ struct ratelimit_state {
- static inline void ratelimit_state_init(struct ratelimit_state *rs,
- int interval, int burst)
- {
-- spin_lock_init(&rs->lock);
-+ raw_spin_lock_init(&rs->lock);
- rs->interval = interval;
- rs->burst = burst;
- rs->printed = 0;
-Index: linux-2.6/kernel/printk.c
-===================================================================
---- linux-2.6.orig/kernel/printk.c
-+++ linux-2.6/kernel/printk.c
-@@ -21,6 +21,7 @@
- #include <linux/tty.h>
- #include <linux/tty_driver.h>
- #include <linux/console.h>
-+#include <linux/sysrq.h>
- #include <linux/init.h>
- #include <linux/jiffies.h>
- #include <linux/nmi.h>
-@@ -44,13 +45,6 @@
+ return ret;
+ }
+@@ -3805,6 +3815,7 @@ int ring_buffer_read_page(struct ring_bu
+ unsigned int commit;
+ unsigned int read;
+ u64 save_timestamp;
++ int locked;
+ int ret = -1;
- #include <asm/uaccess.h>
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+@@ -3826,7 +3837,7 @@ int ring_buffer_read_page(struct ring_bu
+ if (!bpage)
+ goto out;
--/*
-- * Architectures can override it:
-- */
--void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
--{
--}
--
- #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
- /* printk's without a loglevel use this.. */
-@@ -100,7 +94,7 @@ static int console_locked, console_suspe
- * It is also used in interesting ways to provide interlocking in
- * console_unlock();.
- */
--static DEFINE_SPINLOCK(logbuf_lock);
-+static DEFINE_RAW_SPINLOCK(logbuf_lock);
+ reader = rb_get_reader_page(cpu_buffer);
+ if (!reader)
+@@ -3949,7 +3960,7 @@ int ring_buffer_read_page(struct ring_bu
+ memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
- #define LOG_BUF_MASK (log_buf_len-1)
- #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
-@@ -212,7 +206,7 @@ void __init setup_log_buf(int early)
- return;
- }
+ out_unlock:
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
-- spin_lock_irqsave(&logbuf_lock, flags);
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
- log_buf_len = new_log_buf_len;
- log_buf = new_log_buf;
- new_log_buf_len = 0;
-@@ -230,7 +224,7 @@ void __init setup_log_buf(int early)
- log_start -= offset;
- con_start -= offset;
- log_end -= offset;
-- spin_unlock_irqrestore(&logbuf_lock, flags);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ out:
+ return ret;
+Index: linux-2.6/kernel/trace/trace.c
+===================================================================
+--- linux-2.6.orig/kernel/trace/trace.c
++++ linux-2.6/kernel/trace/trace.c
+@@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_P
+ TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
- pr_info("log_buf_len: %d\n", log_buf_len);
- pr_info("early log buf free: %d(%d%%)\n",
-@@ -363,18 +357,18 @@ int do_syslog(int type, char __user *buf
- if (error)
- goto out;
- i = 0;
-- spin_lock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
- while (!error && (log_start != log_end) && i < len) {
- c = LOG_BUF(log_start);
- log_start++;
-- spin_unlock_irq(&logbuf_lock);
-+ raw_spin_unlock_irq(&logbuf_lock);
- error = __put_user(c,buf);
- buf++;
- i++;
- cond_resched();
-- spin_lock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
- }
-- spin_unlock_irq(&logbuf_lock);
-+ raw_spin_unlock_irq(&logbuf_lock);
- if (!error)
- error = i;
- break;
-@@ -397,7 +391,7 @@ int do_syslog(int type, char __user *buf
- count = len;
- if (count > log_buf_len)
- count = log_buf_len;
-- spin_lock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
- if (count > logged_chars)
- count = logged_chars;
- if (do_clear)
-@@ -414,12 +408,12 @@ int do_syslog(int type, char __user *buf
- if (j + log_buf_len < log_end)
- break;
- c = LOG_BUF(j);
-- spin_unlock_irq(&logbuf_lock);
-+ raw_spin_unlock_irq(&logbuf_lock);
- error = __put_user(c,&buf[count-1-i]);
- cond_resched();
-- spin_lock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
- }
-- spin_unlock_irq(&logbuf_lock);
-+ raw_spin_unlock_irq(&logbuf_lock);
- if (error)
- break;
- error = i;
-@@ -509,6 +503,7 @@ static void __call_console_drivers(unsig
+ static int trace_stop_count;
+-static DEFINE_SPINLOCK(tracing_start_lock);
++static DEFINE_RAW_SPINLOCK(tracing_start_lock);
+
+ /**
+ * trace_wake_up - wake up tasks waiting for trace input
+@@ -351,6 +351,7 @@ static DEFINE_SPINLOCK(tracing_start_loc
+ */
+ void trace_wake_up(void)
{
- struct console *con;
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int cpu;
-+ migrate_disable();
- for_each_console(con) {
- if (exclusive_console && con != exclusive_console)
- continue;
-@@ -517,8 +512,62 @@ static void __call_console_drivers(unsig
- (con->flags & CON_ANYTIME)))
- con->write(con, &LOG_BUF(start), end - start);
- }
-+ migrate_enable();
-+}
-+
-+#ifdef CONFIG_EARLY_PRINTK
-+struct console *early_console;
-+
-+static void early_vprintk(const char *fmt, va_list ap)
-+{
-+ char buf[512];
-+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
-+ if (early_console)
-+ early_console->write(early_console, buf, n);
-+}
-+
-+asmlinkage void early_printk(const char *fmt, ...)
-+{
-+ va_list ap;
-+ va_start(ap, fmt);
-+ early_vprintk(fmt, ap);
-+ va_end(ap);
-+}
-+
-+/*
-+ * This is independent of any log levels - a global
-+ * kill switch that turns off all of printk.
-+ *
-+ * Used by the NMI watchdog if early-printk is enabled.
-+ */
-+static int __read_mostly printk_killswitch;
-+
-+static int __init force_early_printk_setup(char *str)
-+{
-+ printk_killswitch = 1;
-+ return 0;
-+}
-+early_param("force_early_printk", force_early_printk_setup);
-+
-+void printk_kill(void)
-+{
-+ printk_killswitch = 1;
+ if (trace_flags & TRACE_ITER_BLOCK)
+@@ -363,6 +364,7 @@ void trace_wake_up(void)
+ if (!runqueue_is_locked(cpu))
+ wake_up(&trace_wait);
+ put_cpu();
++#endif
}
-+static int forced_early_printk(const char *fmt, va_list ap)
-+{
-+ if (!printk_killswitch)
-+ return 0;
-+ early_vprintk(fmt, ap);
-+ return 1;
-+}
+ static int __init set_buf_size(char *str)
+@@ -716,6 +718,12 @@ update_max_tr_single(struct trace_array
+ }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++static void default_wait_pipe(struct trace_iterator *iter);
+#else
-+static inline int forced_early_printk(const char *fmt, va_list ap)
-+{
-+ return 0;
-+}
++#define default_wait_pipe poll_wait_pipe
+#endif
+
- static int __read_mostly ignore_loglevel;
+ /**
+ * register_tracer - register a tracer with the ftrace system.
+ * @type - the plugin for the tracer
+@@ -958,7 +966,7 @@ void tracing_start(void)
+ if (tracing_disabled)
+ return;
- static int __init ignore_loglevel_setup(char *str)
-@@ -687,7 +736,7 @@ static void zap_locks(void)
- oops_timestamp = jiffies;
+- spin_lock_irqsave(&tracing_start_lock, flags);
++ raw_spin_lock_irqsave(&tracing_start_lock, flags);
+ if (--trace_stop_count) {
+ if (trace_stop_count < 0) {
+ /* Someone screwed up their debugging */
+@@ -983,7 +991,7 @@ void tracing_start(void)
- /* If a crash is occurring, make sure we can't deadlock */
-- spin_lock_init(&logbuf_lock);
-+ raw_spin_lock_init(&logbuf_lock);
- /* And make sure that we print immediately */
- sema_init(&console_sem, 1);
+ ftrace_start();
+ out:
+- spin_unlock_irqrestore(&tracing_start_lock, flags);
++ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
-@@ -779,12 +828,18 @@ static inline int can_use_console(unsign
- * interrupts disabled. It should return with 'lockbuf_lock'
- * released but interrupts still disabled.
- */
--static int console_trylock_for_printk(unsigned int cpu)
-+static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
- __releases(&logbuf_lock)
- {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int lock = (!early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
-+ !preempt_count()) || sysrq_in_progress;
-+#else
-+ int lock = 1;
-+#endif
- int retval = 0;
-- if (console_trylock()) {
-+ if (lock && console_trylock()) {
- retval = 1;
+ /**
+@@ -998,7 +1006,7 @@ void tracing_stop(void)
+ unsigned long flags;
- /*
-@@ -800,7 +855,7 @@ static int console_trylock_for_printk(un
- }
- }
- printk_cpu = UINT_MAX;
-- spin_unlock(&logbuf_lock);
-+ raw_spin_unlock(&logbuf_lock);
- return retval;
+ ftrace_stop();
+- spin_lock_irqsave(&tracing_start_lock, flags);
++ raw_spin_lock_irqsave(&tracing_start_lock, flags);
+ if (trace_stop_count++)
+ goto out;
+
+@@ -1016,7 +1024,7 @@ void tracing_stop(void)
+ arch_spin_unlock(&ftrace_max_lock);
+
+ out:
+- spin_unlock_irqrestore(&tracing_start_lock, flags);
++ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
- static const char recursion_bug_msg [] =
-@@ -833,6 +888,13 @@ asmlinkage int vprintk(const char *fmt,
- size_t plen;
- char special;
-+ /*
-+ * Fall back to early_printk if a debugging subsystem has
-+ * killed printk output
-+ */
-+ if (unlikely(forced_early_printk(fmt, args)))
-+ return 1;
+ void trace_stop_cmdline_recording(void);
+@@ -1120,6 +1128,8 @@ tracing_generic_entry_update(struct trac
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+ ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
+
- boot_delay_msec();
- printk_delay();
++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -860,7 +922,7 @@ asmlinkage int vprintk(const char *fmt,
- }
+@@ -1757,9 +1767,10 @@ static void print_lat_help_header(struct
+ seq_puts(m, "# | / _----=> need-resched \n");
+ seq_puts(m, "# || / _---=> hardirq/softirq \n");
+ seq_puts(m, "# ||| / _--=> preempt-depth \n");
+- seq_puts(m, "# |||| / delay \n");
+- seq_puts(m, "# cmd pid ||||| time | caller \n");
+- seq_puts(m, "# \\ / ||||| \\ | / \n");
++ seq_puts(m, "# |||| / _--=> migrate-disable\n");
++ seq_puts(m, "# ||||| / delay \n");
++ seq_puts(m, "# cmd pid |||||| time | caller \n");
++ seq_puts(m, "# \\ / ||||| \\ | / \n");
+ }
- lockdep_off();
-- spin_lock(&logbuf_lock);
-+ raw_spin_lock(&logbuf_lock);
- printk_cpu = this_cpu;
+ static void print_func_help_header(struct seq_file *m)
+@@ -3067,6 +3078,7 @@ static int tracing_release_pipe(struct i
+ return 0;
+ }
- if (recursion_bug) {
-@@ -953,8 +1015,15 @@ asmlinkage int vprintk(const char *fmt,
- * will release 'logbuf_lock' regardless of whether it
- * actually gets the semaphore or not.
- */
-- if (console_trylock_for_printk(this_cpu))
-+ if (console_trylock_for_printk(this_cpu, flags)) {
+#ifndef CONFIG_PREEMPT_RT_FULL
-+ console_unlock();
-+#else
-+ raw_local_irq_restore(flags);
- console_unlock();
-+ raw_local_irq_save(flags);
-+#endif
-+ }
+ static unsigned int
+ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+ {
+@@ -3088,8 +3100,7 @@ tracing_poll_pipe(struct file *filp, pol
+ }
+ }
- lockdep_on();
- out_restore_irqs:
-@@ -1252,18 +1321,23 @@ void console_unlock(void)
- console_may_schedule = 0;
+-
+-void default_wait_pipe(struct trace_iterator *iter)
++static void default_wait_pipe(struct trace_iterator *iter)
+ {
+ DEFINE_WAIT(wait);
- for ( ; ; ) {
-- spin_lock_irqsave(&logbuf_lock, flags);
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
- wake_klogd |= log_start - log_end;
- if (con_start == log_end)
- break; /* Nothing to print */
- _con_start = con_start;
- _log_end = log_end;
- con_start = log_end; /* Flush */
-- spin_unlock(&logbuf_lock);
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+ raw_spin_unlock(&logbuf_lock);
- stop_critical_timings(); /* don't trace print latency */
- call_console_drivers(_con_start, _log_end);
- start_critical_timings();
- local_irq_restore(flags);
+@@ -3100,6 +3111,20 @@ void default_wait_pipe(struct trace_iter
+
+ finish_wait(&trace_wait, &wait);
+ }
+#else
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ call_console_drivers(_con_start, _log_end);
++static unsigned int
++tracing_poll_pipe(struct file *filp, poll_table *poll_table)
++{
++ struct trace_iterator *iter = filp->private_data;
++
++ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ poll_wait_pipe(iter);
++ if (!trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
+#endif
- }
- console_locked = 0;
-@@ -1272,7 +1346,7 @@ void console_unlock(void)
- exclusive_console = NULL;
+ /*
+ * This is a make-shift waitqueue.
+Index: linux-2.6/kernel/trace/trace_irqsoff.c
+===================================================================
+--- linux-2.6.orig/kernel/trace/trace_irqsoff.c
++++ linux-2.6/kernel/trace/trace_irqsoff.c
+@@ -17,13 +17,14 @@
+ #include <linux/fs.h>
- up(&console_sem);
-- spin_unlock_irqrestore(&logbuf_lock, flags);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- if (wake_klogd)
- wake_up_klogd();
- }
-@@ -1502,9 +1576,9 @@ void register_console(struct console *ne
- * console_unlock(); will print out the buffered messages
- * for us.
- */
-- spin_lock_irqsave(&logbuf_lock, flags);
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
- con_start = log_start;
-- spin_unlock_irqrestore(&logbuf_lock, flags);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- /*
- * We're about to replay the log buffer. Only do this to the
- * just-registered console to avoid excessive message spam to
-@@ -1711,10 +1785,10 @@ void kmsg_dump(enum kmsg_dump_reason rea
- /* Theoretically, the log could move on after we do this, but
- there's not a lot we can do about that. The new messages
- will overwrite the start of what we dump. */
-- spin_lock_irqsave(&logbuf_lock, flags);
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
- end = log_end & LOG_BUF_MASK;
- chars = logged_chars;
-- spin_unlock_irqrestore(&logbuf_lock, flags);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ #include "trace.h"
++#include <trace/events/hist.h>
- if (chars > end) {
- s1 = log_buf + log_buf_len - chars + end;
-Index: linux-2.6/lib/ratelimit.c
-===================================================================
---- linux-2.6.orig/lib/ratelimit.c
-+++ linux-2.6/lib/ratelimit.c
-@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state
- * in addition to the one that will be printed by
- * the entity that is holding the lock already:
- */
-- if (!spin_trylock_irqsave(&rs->lock, flags))
-+ if (!raw_spin_trylock_irqsave(&rs->lock, flags))
- return 0;
+ static struct trace_array *irqsoff_trace __read_mostly;
+ static int tracer_enabled __read_mostly;
- if (!rs->begin)
-@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state
- rs->missed++;
- ret = 0;
- }
-- spin_unlock_irqrestore(&rs->lock, flags);
-+ raw_spin_unlock_irqrestore(&rs->lock, flags);
+ static DEFINE_PER_CPU(int, tracing_cpu);
- return ret;
- }
-Index: linux-2.6/include/linux/init_task.h
-===================================================================
---- linux-2.6.orig/include/linux/init_task.h
-+++ linux-2.6/include/linux/init_task.h
-@@ -42,7 +42,7 @@ extern struct fs_struct init_fs;
- .cputimer = { \
- .cputime = INIT_CPUTIME, \
- .running = 0, \
-- .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
- }, \
- .cred_guard_mutex = \
- __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
-@@ -126,6 +126,12 @@ extern struct cred init_cred;
- # define INIT_PERF_EVENTS(tsk)
- #endif
+-static DEFINE_SPINLOCK(max_trace_lock);
++static DEFINE_RAW_SPINLOCK(max_trace_lock);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define INIT_TIMER_LIST .posix_timer_list = NULL,
-+#else
-+# define INIT_TIMER_LIST
-+#endif
-+
- /*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
-@@ -179,6 +185,7 @@ extern struct cred init_cred;
- .fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
-+ INIT_TIMER_LIST \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-Index: linux-2.6/include/linux/sched.h
-===================================================================
---- linux-2.6.orig/include/linux/sched.h
-+++ linux-2.6/include/linux/sched.h
-@@ -63,6 +63,7 @@ struct sched_param {
- #include <linux/nodemask.h>
- #include <linux/mm_types.h>
+ enum {
+ TRACER_IRQS_OFF = (1 << 1),
+@@ -319,7 +320,7 @@ check_critical_timing(struct trace_array
+ if (!report_latency(delta))
+ goto out;
-+#include <asm/kmap_types.h>
- #include <asm/system.h>
- #include <asm/page.h>
- #include <asm/ptrace.h>
-@@ -90,6 +91,7 @@ struct sched_param {
- #include <linux/task_io_accounting.h>
- #include <linux/latencytop.h>
- #include <linux/cred.h>
-+#include <linux/hardirq.h>
+- spin_lock_irqsave(&max_trace_lock, flags);
++ raw_spin_lock_irqsave(&max_trace_lock, flags);
- #include <asm/processor.h>
+ /* check if we are still the max latency */
+ if (!report_latency(delta))
+@@ -342,7 +343,7 @@ check_critical_timing(struct trace_array
+ max_sequence++;
-@@ -359,6 +361,7 @@ extern signed long schedule_timeout_inte
- extern signed long schedule_timeout_killable(signed long timeout);
- extern signed long schedule_timeout_uninterruptible(signed long timeout);
- asmlinkage void schedule(void);
-+extern void schedule_preempt_disabled(void);
- extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
+ out_unlock:
+- spin_unlock_irqrestore(&max_trace_lock, flags);
++ raw_spin_unlock_irqrestore(&max_trace_lock, flags);
- struct nsproxy;
-@@ -510,7 +513,7 @@ struct task_cputime {
- struct thread_group_cputimer {
- struct task_cputime cputime;
- int running;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- };
+ out:
+ data->critical_sequence = max_sequence;
+@@ -424,11 +425,13 @@ void start_critical_timings(void)
+ {
+ if (preempt_trace() || irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist(TRACE_START, 1);
+ }
+ EXPORT_SYMBOL_GPL(start_critical_timings);
- #include <linux/rwsem.h>
-@@ -1070,6 +1073,7 @@ struct sched_domain;
- #define WF_SYNC 0x01 /* waker goes to sleep after wakup */
- #define WF_FORK 0x02 /* child wakeup after fork */
- #define WF_MIGRATED 0x04 /* internal use, task got migrated */
-+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
+ void stop_critical_timings(void)
+ {
++ trace_preemptirqsoff_hist(TRACE_STOP, 0);
+ if (preempt_trace() || irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+ #ifdef CONFIG_PROVE_LOCKING
+ void time_hardirqs_on(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(a0, a1);
+ }
+@@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0,
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(a0, a1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
- #define ENQUEUE_WAKEUP 1
- #define ENQUEUE_HEAD 2
-@@ -1219,6 +1223,7 @@ enum perf_event_task_context {
+ #else /* !CONFIG_PROVE_LOCKING */
+@@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct
+ */
+ void trace_hardirqs_on(void)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off);
- struct task_struct {
- volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
-+ volatile long saved_state; /* saved state for "spinlock sleepers" */
- void *stack;
- atomic_t usage;
- unsigned int flags; /* per process flags, defined below */
-@@ -1255,14 +1260,17 @@ struct task_struct {
- #endif
+ void trace_hardirqs_on_caller(unsigned long caller_addr)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, caller_addr);
+ }
+@@ -494,6 +502,7 @@ void trace_hardirqs_off_caller(unsigned
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, caller_addr);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off_caller);
- unsigned int policy;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int migrate_disable;
-+#ifdef CONFIG_SCHED_DEBUG
-+ int migrate_disable_atomic;
-+#endif
-+#endif
- cpumask_t cpus_allowed;
+@@ -503,13 +512,15 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
+ #ifdef CONFIG_PREEMPT_TRACER
+ void trace_preempt_on(unsigned long a0, unsigned long a1)
+ {
+- if (preempt_trace())
++ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
++ if (preempt_trace() && !irq_trace())
+ stop_critical_timing(a0, a1);
+ }
- #ifdef CONFIG_PREEMPT_RCU
- int rcu_read_lock_nesting;
- char rcu_read_unlock_special;
--#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
-- int rcu_boosted;
--#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
- struct list_head rcu_node_entry;
- #endif /* #ifdef CONFIG_PREEMPT_RCU */
- #ifdef CONFIG_TREE_PREEMPT_RCU
-@@ -1356,6 +1364,9 @@ struct task_struct {
+ void trace_preempt_off(unsigned long a0, unsigned long a1)
+ {
+- if (preempt_trace())
++ trace_preemptirqsoff_hist(PREEMPT_ON, 1);
++ if (preempt_trace() && !irq_trace())
+ start_critical_timing(a0, a1);
+ }
+ #endif /* CONFIG_PREEMPT_TRACER */
+Index: linux-2.6/include/linux/ratelimit.h
+===================================================================
+--- linux-2.6.orig/include/linux/ratelimit.h
++++ linux-2.6/include/linux/ratelimit.h
+@@ -8,7 +8,7 @@
+ #define DEFAULT_RATELIMIT_BURST 10
- struct task_cputime cputime_expires;
- struct list_head cpu_timers[3];
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct task_struct *posix_timer_list;
-+#endif
+ struct ratelimit_state {
+- spinlock_t lock; /* protect the state */
++ raw_spinlock_t lock; /* protect the state */
- /* process credentials */
- const struct cred __rcu *real_cred; /* objective and real subjective task
-@@ -1389,6 +1400,7 @@ struct task_struct {
- /* signal handlers */
- struct signal_struct *signal;
- struct sighand_struct *sighand;
-+ struct sigqueue *sigqueue_cache;
+ int interval;
+ int burst;
+@@ -20,7 +20,7 @@ struct ratelimit_state {
+ #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
+ \
+ struct ratelimit_state name = { \
+- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .interval = interval_init, \
+ .burst = burst_init, \
+ }
+@@ -28,7 +28,7 @@ struct ratelimit_state {
+ static inline void ratelimit_state_init(struct ratelimit_state *rs,
+ int interval, int burst)
+ {
+- spin_lock_init(&rs->lock);
++ raw_spin_lock_init(&rs->lock);
+ rs->interval = interval;
+ rs->burst = burst;
+ rs->printed = 0;
+Index: linux-2.6/kernel/printk.c
+===================================================================
+--- linux-2.6.orig/kernel/printk.c
++++ linux-2.6/kernel/printk.c
+@@ -21,6 +21,7 @@
+ #include <linux/tty.h>
+ #include <linux/tty_driver.h>
+ #include <linux/console.h>
++#include <linux/sysrq.h>
+ #include <linux/init.h>
+ #include <linux/jiffies.h>
+ #include <linux/nmi.h>
+@@ -44,13 +45,6 @@
- sigset_t blocked, real_blocked;
- sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
-@@ -1432,6 +1444,9 @@ struct task_struct {
- /* mutex deadlock detection */
- struct mutex_waiter *blocked_on;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int pagefault_disabled;
-+#endif
- #ifdef CONFIG_TRACE_IRQFLAGS
- unsigned int irq_events;
- unsigned long hardirq_enable_ip;
-@@ -1558,6 +1573,12 @@ struct task_struct {
- unsigned long trace;
- /* bitmask and counter of trace recursion */
- unsigned long trace_recursion;
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ u64 preempt_timestamp_hist;
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ unsigned long timer_offset;
-+#endif
-+#endif
- #endif /* CONFIG_TRACING */
- #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
- struct memcg_batch_info {
-@@ -1570,10 +1591,26 @@ struct task_struct {
- #ifdef CONFIG_HAVE_HW_BREAKPOINT
- atomic_t ptrace_bp_refcnt;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct rcu_head put_rcu;
-+ int softirq_nestcnt;
-+#endif
-+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
-+ int kmap_idx;
-+ pte_t kmap_pte[KM_TYPE_NR];
-+#endif
- };
+ #include <asm/uaccess.h>
--/* Future-safe accessor for struct task_struct's cpus_allowed. */
--#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
-+#else
-+static inline bool cur_pf_disabled(void) { return false; }
-+#endif
-+
-+static inline bool pagefault_disabled(void)
-+{
-+ return in_atomic() || cur_pf_disabled();
-+}
+-/*
+- * Architectures can override it:
+- */
+-void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
+-{
+-}
+-
+ #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
- /*
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
-@@ -1743,6 +1780,15 @@ extern struct pid *cad_pid;
- extern void free_task(struct task_struct *tsk);
- #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+ /* printk's without a loglevel use this.. */
+@@ -100,7 +94,7 @@ static int console_locked, console_suspe
+ * It is also used in interesting ways to provide interlocking in
+ * console_unlock();.
+ */
+-static DEFINE_SPINLOCK(logbuf_lock);
++static DEFINE_RAW_SPINLOCK(logbuf_lock);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+extern void __put_task_struct_cb(struct rcu_head *rhp);
-+
-+static inline void put_task_struct(struct task_struct *t)
-+{
-+ if (atomic_dec_and_test(&t->usage))
-+ call_rcu(&t->put_rcu, __put_task_struct_cb);
-+}
-+#else
- extern void __put_task_struct(struct task_struct *t);
+ #define LOG_BUF_MASK (log_buf_len-1)
+ #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
+@@ -212,7 +206,7 @@ void __init setup_log_buf(int early)
+ return;
+ }
- static inline void put_task_struct(struct task_struct *t)
-@@ -1750,6 +1796,7 @@ static inline void put_task_struct(struc
- if (atomic_dec_and_test(&t->usage))
- __put_task_struct(t);
- }
-+#endif
+- spin_lock_irqsave(&logbuf_lock, flags);
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ log_buf_len = new_log_buf_len;
+ log_buf = new_log_buf;
+ new_log_buf_len = 0;
+@@ -230,7 +224,7 @@ void __init setup_log_buf(int early)
+ log_start -= offset;
+ con_start -= offset;
+ log_end -= offset;
+- spin_unlock_irqrestore(&logbuf_lock, flags);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
- extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
-@@ -1774,6 +1821,7 @@ extern void thread_group_times(struct ta
- #define PF_FROZEN 0x00010000 /* frozen for system suspend */
- #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
- #define PF_KSWAPD 0x00040000 /* I am kswapd */
-+#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */
- #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
- #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
- #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
-@@ -2022,15 +2070,27 @@ static inline void sched_autogroup_exit(
- #endif
+ pr_info("log_buf_len: %d\n", log_buf_len);
+ pr_info("early log buf free: %d(%d%%)\n",
+@@ -363,18 +357,18 @@ int do_syslog(int type, char __user *buf
+ if (error)
+ goto out;
+ i = 0;
+- spin_lock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
+ while (!error && (log_start != log_end) && i < len) {
+ c = LOG_BUF(log_start);
+ log_start++;
+- spin_unlock_irq(&logbuf_lock);
++ raw_spin_unlock_irq(&logbuf_lock);
+ error = __put_user(c,buf);
+ buf++;
+ i++;
+ cond_resched();
+- spin_lock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
+ }
+- spin_unlock_irq(&logbuf_lock);
++ raw_spin_unlock_irq(&logbuf_lock);
+ if (!error)
+ error = i;
+ break;
+@@ -397,7 +391,7 @@ int do_syslog(int type, char __user *buf
+ count = len;
+ if (count > log_buf_len)
+ count = log_buf_len;
+- spin_lock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
+ if (count > logged_chars)
+ count = logged_chars;
+ if (do_clear)
+@@ -414,12 +408,12 @@ int do_syslog(int type, char __user *buf
+ if (j + log_buf_len < log_end)
+ break;
+ c = LOG_BUF(j);
+- spin_unlock_irq(&logbuf_lock);
++ raw_spin_unlock_irq(&logbuf_lock);
+ error = __put_user(c,&buf[count-1-i]);
+ cond_resched();
+- spin_lock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
+ }
+- spin_unlock_irq(&logbuf_lock);
++ raw_spin_unlock_irq(&logbuf_lock);
+ if (error)
+ break;
+ error = i;
+@@ -509,6 +503,7 @@ static void __call_console_drivers(unsig
+ {
+ struct console *con;
- #ifdef CONFIG_RT_MUTEXES
-+extern void task_setprio(struct task_struct *p, int prio);
- extern int rt_mutex_getprio(struct task_struct *p);
--extern void rt_mutex_setprio(struct task_struct *p, int prio);
-+static inline void rt_mutex_setprio(struct task_struct *p, int prio)
++ migrate_disable();
+ for_each_console(con) {
+ if (exclusive_console && con != exclusive_console)
+ continue;
+@@ -517,8 +512,62 @@ static void __call_console_drivers(unsig
+ (con->flags & CON_ANYTIME)))
+ con->write(con, &LOG_BUF(start), end - start);
+ }
++ migrate_enable();
++}
++
++#ifdef CONFIG_EARLY_PRINTK
++struct console *early_console;
++
++static void early_vprintk(const char *fmt, va_list ap)
+{
-+ task_setprio(p, prio);
++ char buf[512];
++ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
++ if (early_console)
++ early_console->write(early_console, buf, n);
+}
- extern void rt_mutex_adjust_pi(struct task_struct *p);
-+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++
++asmlinkage void early_printk(const char *fmt, ...)
+{
-+ return tsk->pi_blocked_on != NULL;
++ va_list ap;
++ va_start(ap, fmt);
++ early_vprintk(fmt, ap);
++ va_end(ap);
+}
- #else
- static inline int rt_mutex_getprio(struct task_struct *p)
- {
- return p->normal_prio;
- }
- # define rt_mutex_adjust_pi(p) do { } while (0)
-+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++
++/*
++ * This is independent of any log levels - a global
++ * kill switch that turns off all of printk.
++ *
++ * Used by the NMI watchdog if early-printk is enabled.
++ */
++static int __read_mostly printk_killswitch;
++
++static int __init force_early_printk_setup(char *str)
+{
-+ return false;
++ printk_killswitch = 1;
++ return 0;
+}
- #endif
-
- extern bool yield_to(struct task_struct *p, bool preempt);
-@@ -2110,6 +2170,7 @@ extern void xtime_update(unsigned long t
-
- extern int wake_up_state(struct task_struct *tsk, unsigned int state);
- extern int wake_up_process(struct task_struct *tsk);
-+extern int wake_up_lock_sleeper(struct task_struct * tsk);
- extern void wake_up_new_task(struct task_struct *tsk);
- #ifdef CONFIG_SMP
- extern void kick_process(struct task_struct *tsk);
-@@ -2199,12 +2260,24 @@ extern struct mm_struct * mm_alloc(void)
-
- /* mmdrop drops the mm and the page tables */
- extern void __mmdrop(struct mm_struct *);
++early_param("force_early_printk", force_early_printk_setup);
+
- static inline void mmdrop(struct mm_struct * mm)
- {
- if (unlikely(atomic_dec_and_test(&mm->mm_count)))
- __mmdrop(mm);
++void printk_kill(void)
++{
++ printk_killswitch = 1;
}
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+extern void __mmdrop_delayed(struct rcu_head *rhp);
-+static inline void mmdrop_delayed(struct mm_struct *mm)
++static int forced_early_printk(const char *fmt, va_list ap)
+{
-+ if (atomic_dec_and_test(&mm->mm_count))
-+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
++ if (!printk_killswitch)
++ return 0;
++ early_vprintk(fmt, ap);
++ return 1;
+}
+#else
-+# define mmdrop_delayed(mm) mmdrop(mm)
++static inline int forced_early_printk(const char *fmt, va_list ap)
++{
++ return 0;
++}
+#endif
+
- /* mmput gets rid of the mappings and all user-space */
- extern void mmput(struct mm_struct *);
- /* Grab a reference to a task's mm, if it is not already going away */
-@@ -2510,7 +2583,7 @@ extern int _cond_resched(void);
+ static int __read_mostly ignore_loglevel;
- extern int __cond_resched_lock(spinlock_t *lock);
+ static int __init ignore_loglevel_setup(char *str)
+@@ -687,7 +736,7 @@ static void zap_locks(void)
+ oops_timestamp = jiffies;
--#ifdef CONFIG_PREEMPT
-+#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT_FULL)
- #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
- #else
- #define PREEMPT_LOCK_OFFSET 0
-@@ -2521,12 +2594,16 @@ extern int __cond_resched_lock(spinlock_
- __cond_resched_lock(lock); \
- })
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- extern int __cond_resched_softirq(void);
-
- #define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
- __cond_resched_softirq(); \
- })
+ /* If a crash is occurring, make sure we can't deadlock */
+- spin_lock_init(&logbuf_lock);
++ raw_spin_lock_init(&logbuf_lock);
+ /* And make sure that we print immediately */
+ sema_init(&console_sem, 1);
+ }
+@@ -779,12 +828,18 @@ static inline int can_use_console(unsign
+ * interrupts disabled. It should return with 'lockbuf_lock'
+ * released but interrupts still disabled.
+ */
+-static int console_trylock_for_printk(unsigned int cpu)
++static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+ __releases(&logbuf_lock)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int lock = (!early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
++ !preempt_count()) || sysrq_in_progress;
+#else
-+# define cond_resched_softirq() cond_resched()
++ int lock = 1;
+#endif
+ int retval = 0;
- /*
- * Does a critical section need to be broken due to another
-@@ -2550,7 +2627,7 @@ void thread_group_cputimer(struct task_s
+- if (console_trylock()) {
++ if (lock && console_trylock()) {
+ retval = 1;
- static inline void thread_group_cputime_init(struct signal_struct *sig)
- {
-- spin_lock_init(&sig->cputimer.lock);
-+ raw_spin_lock_init(&sig->cputimer.lock);
+ /*
+@@ -800,7 +855,7 @@ static int console_trylock_for_printk(un
+ }
+ }
+ printk_cpu = UINT_MAX;
+- spin_unlock(&logbuf_lock);
++ raw_spin_unlock(&logbuf_lock);
+ return retval;
}
+ static const char recursion_bug_msg [] =
+@@ -833,6 +888,13 @@ asmlinkage int vprintk(const char *fmt,
+ size_t plen;
+ char special;
- /*
-@@ -2589,6 +2666,26 @@ static inline void set_task_cpu(struct t
++ /*
++ * Fall back to early_printk if a debugging subsystem has
++ * killed printk output
++ */
++ if (unlikely(forced_early_printk(fmt, args)))
++ return 1;
++
+ boot_delay_msec();
+ printk_delay();
- #endif /* CONFIG_SMP */
+@@ -860,7 +922,7 @@ asmlinkage int vprintk(const char *fmt,
+ }
-+static inline int __migrate_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ return p->migrate_disable;
+ lockdep_off();
+- spin_lock(&logbuf_lock);
++ raw_spin_lock(&logbuf_lock);
+ printk_cpu = this_cpu;
+
+ if (recursion_bug) {
+@@ -953,8 +1015,15 @@ asmlinkage int vprintk(const char *fmt,
+ * will release 'logbuf_lock' regardless of whether it
+ * actually gets the semaphore or not.
+ */
+- if (console_trylock_for_printk(this_cpu))
++ if (console_trylock_for_printk(this_cpu, flags)) {
++#ifndef CONFIG_PREEMPT_RT_FULL
++ console_unlock();
+#else
-+ return 0;
-+#endif
-+}
-+
-+/* Future-safe accessor for struct task_struct's cpus_allowed. */
-+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (p->migrate_disable)
-+ return cpumask_of(task_cpu(p));
++ raw_local_irq_restore(flags);
+ console_unlock();
++ raw_local_irq_save(flags);
+#endif
-+
-+ return &p->cpus_allowed;
-+}
-+
- extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
- extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
++ }
-Index: linux-2.6/kernel/posix-cpu-timers.c
-===================================================================
---- linux-2.6.orig/kernel/posix-cpu-timers.c
-+++ linux-2.6/kernel/posix-cpu-timers.c
-@@ -274,7 +274,7 @@ void thread_group_cputimer(struct task_s
- struct task_cputime sum;
- unsigned long flags;
+ lockdep_on();
+ out_restore_irqs:
+@@ -1252,18 +1321,23 @@ void console_unlock(void)
+ console_may_schedule = 0;
-- spin_lock_irqsave(&cputimer->lock, flags);
-+ raw_spin_lock_irqsave(&cputimer->lock, flags);
- if (!cputimer->running) {
- cputimer->running = 1;
- /*
-@@ -287,7 +287,7 @@ void thread_group_cputimer(struct task_s
- update_gt_cputime(&cputimer->cputime, &sum);
+ for ( ; ; ) {
+- spin_lock_irqsave(&logbuf_lock, flags);
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ wake_klogd |= log_start - log_end;
+ if (con_start == log_end)
+ break; /* Nothing to print */
+ _con_start = con_start;
+ _log_end = log_end;
+ con_start = log_end; /* Flush */
+- spin_unlock(&logbuf_lock);
++#ifndef CONFIG_PREEMPT_RT_FULL
++ raw_spin_unlock(&logbuf_lock);
+ stop_critical_timings(); /* don't trace print latency */
+ call_console_drivers(_con_start, _log_end);
+ start_critical_timings();
+ local_irq_restore(flags);
++#else
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(_con_start, _log_end);
++#endif
}
- *times = cputimer->cputime;
-- spin_unlock_irqrestore(&cputimer->lock, flags);
-+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
- }
-
- /*
-@@ -699,7 +699,7 @@ static int posix_cpu_timer_set(struct k_
- /*
- * Disarm any old timer after extracting its expiry time.
- */
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
+ console_locked = 0;
- ret = 0;
- old_incr = timer->it.cpu.incr;
-@@ -997,9 +997,9 @@ static void stop_process_timers(struct s
- struct thread_group_cputimer *cputimer = &sig->cputimer;
- unsigned long flags;
+@@ -1272,7 +1346,7 @@ void console_unlock(void)
+ exclusive_console = NULL;
-- spin_lock_irqsave(&cputimer->lock, flags);
-+ raw_spin_lock_irqsave(&cputimer->lock, flags);
- cputimer->running = 0;
-- spin_unlock_irqrestore(&cputimer->lock, flags);
-+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
+ up(&console_sem);
+- spin_unlock_irqrestore(&logbuf_lock, flags);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ if (wake_klogd)
+ wake_up_klogd();
}
+@@ -1502,9 +1576,9 @@ void register_console(struct console *ne
+ * console_unlock(); will print out the buffered messages
+ * for us.
+ */
+- spin_lock_irqsave(&logbuf_lock, flags);
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ con_start = log_start;
+- spin_unlock_irqrestore(&logbuf_lock, flags);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ /*
+ * We're about to replay the log buffer. Only do this to the
+ * just-registered console to avoid excessive message spam to
+@@ -1711,10 +1785,10 @@ void kmsg_dump(enum kmsg_dump_reason rea
+ /* Theoretically, the log could move on after we do this, but
+ there's not a lot we can do about that. The new messages
+ will overwrite the start of what we dump. */
+- spin_lock_irqsave(&logbuf_lock, flags);
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ end = log_end & LOG_BUF_MASK;
+ chars = logged_chars;
+- spin_unlock_irqrestore(&logbuf_lock, flags);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- static u32 onecputick;
-@@ -1221,7 +1221,7 @@ void posix_cpu_timer_schedule(struct k_i
- /*
- * Now re-arm for the new expiry time.
+ if (chars > end) {
+ s1 = log_buf + log_buf_len - chars + end;
+Index: linux-2.6/lib/ratelimit.c
+===================================================================
+--- linux-2.6.orig/lib/ratelimit.c
++++ linux-2.6/lib/ratelimit.c
+@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state
+ * in addition to the one that will be printed by
+ * the entity that is holding the lock already:
*/
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
- arm_timer(timer);
- spin_unlock(&p->sighand->siglock);
+- if (!spin_trylock_irqsave(&rs->lock, flags))
++ if (!raw_spin_trylock_irqsave(&rs->lock, flags))
+ return 0;
-@@ -1288,10 +1288,11 @@ static inline int fastpath_timer_check(s
- sig = tsk->signal;
- if (sig->cputimer.running) {
- struct task_cputime group_sample;
-+ unsigned long flags;
+ if (!rs->begin)
+@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state
+ rs->missed++;
+ ret = 0;
+ }
+- spin_unlock_irqrestore(&rs->lock, flags);
++ raw_spin_unlock_irqrestore(&rs->lock, flags);
-- spin_lock(&sig->cputimer.lock);
-+ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
- group_sample = sig->cputimer.cputime;
-- spin_unlock(&sig->cputimer.lock);
-+ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
+ return ret;
+ }
+Index: linux-2.6/include/linux/init_task.h
+===================================================================
+--- linux-2.6.orig/include/linux/init_task.h
++++ linux-2.6/include/linux/init_task.h
+@@ -42,7 +42,7 @@ extern struct fs_struct init_fs;
+ .cputimer = { \
+ .cputime = INIT_CPUTIME, \
+ .running = 0, \
+- .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ }, \
+ .cred_guard_mutex = \
+ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
+@@ -126,6 +126,12 @@ extern struct cred init_cred;
+ # define INIT_PERF_EVENTS(tsk)
+ #endif
- if (task_cputime_expired(&group_sample, &sig->cputime_expires))
- return 1;
-@@ -1305,13 +1306,13 @@ static inline int fastpath_timer_check(s
- * already updated our counts. We need to check if any timers fire now.
- * Interrupts are disabled.
- */
--void run_posix_cpu_timers(struct task_struct *tsk)
-+static void __run_posix_cpu_timers(struct task_struct *tsk)
- {
- LIST_HEAD(firing);
- struct k_itimer *timer, *next;
- unsigned long flags;
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define INIT_TIMER_LIST .posix_timer_list = NULL,
++#else
++# define INIT_TIMER_LIST
++#endif
++
+ /*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+@@ -179,6 +185,7 @@ extern struct cred init_cred;
+ .fs_excl = ATOMIC_INIT(0), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
++ INIT_TIMER_LIST \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+Index: linux-2.6/include/linux/sched.h
+===================================================================
+--- linux-2.6.orig/include/linux/sched.h
++++ linux-2.6/include/linux/sched.h
+@@ -63,6 +63,7 @@ struct sched_param {
+ #include <linux/nodemask.h>
+ #include <linux/mm_types.h>
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
++#include <asm/kmap_types.h>
+ #include <asm/system.h>
+ #include <asm/page.h>
+ #include <asm/ptrace.h>
+@@ -90,6 +91,7 @@ struct sched_param {
+ #include <linux/task_io_accounting.h>
+ #include <linux/latencytop.h>
+ #include <linux/cred.h>
++#include <linux/hardirq.h>
- /*
- * The fast path checks that there are no expired thread or thread
-@@ -1369,6 +1370,190 @@ void run_posix_cpu_timers(struct task_st
- }
- }
+ #include <asm/processor.h>
+
+@@ -359,6 +361,7 @@ extern signed long schedule_timeout_inte
+ extern signed long schedule_timeout_killable(signed long timeout);
+ extern signed long schedule_timeout_uninterruptible(signed long timeout);
+ asmlinkage void schedule(void);
++extern void schedule_preempt_disabled(void);
+ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
+
+ struct nsproxy;
+@@ -510,7 +513,7 @@ struct task_cputime {
+ struct thread_group_cputimer {
+ struct task_cputime cputime;
+ int running;
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ };
+
+ #include <linux/rwsem.h>
+@@ -1070,6 +1073,7 @@ struct sched_domain;
+ #define WF_SYNC 0x01 /* waker goes to sleep after wakup */
+ #define WF_FORK 0x02 /* child wakeup after fork */
+ #define WF_MIGRATED 0x04 /* internal use, task got migrated */
++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
+
+ #define ENQUEUE_WAKEUP 1
+ #define ENQUEUE_HEAD 2
+@@ -1219,6 +1223,7 @@ enum perf_event_task_context {
+
+ struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
++ volatile long saved_state; /* saved state for "spinlock sleepers" */
+ void *stack;
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+@@ -1255,14 +1260,17 @@ struct task_struct {
+ #endif
+
+ unsigned int policy;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int migrate_disable;
++#ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable_atomic;
++#endif
++#endif
+ cpumask_t cpus_allowed;
+
+ #ifdef CONFIG_PREEMPT_RCU
+ int rcu_read_lock_nesting;
+ char rcu_read_unlock_special;
+-#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
+- int rcu_boosted;
+-#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
+ struct list_head rcu_node_entry;
+ #endif /* #ifdef CONFIG_PREEMPT_RCU */
+ #ifdef CONFIG_TREE_PREEMPT_RCU
+@@ -1356,6 +1364,9 @@ struct task_struct {
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+#ifdef CONFIG_PREEMPT_RT_BASE
-+#include <linux/kthread.h>
-+#include <linux/cpu.h>
-+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
-+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++ struct task_struct *posix_timer_list;
++#endif
+
+ /* process credentials */
+ const struct cred __rcu *real_cred; /* objective and real subjective task
+@@ -1389,6 +1400,7 @@ struct task_struct {
+ /* signal handlers */
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
++ struct sigqueue *sigqueue_cache;
+
+ sigset_t blocked, real_blocked;
+ sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
+@@ -1432,6 +1444,9 @@ struct task_struct {
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int pagefault_disabled;
++#endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+@@ -1558,6 +1573,12 @@ struct task_struct {
+ unsigned long trace;
+ /* bitmask and counter of trace recursion */
+ unsigned long trace_recursion;
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ u64 preempt_timestamp_hist;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ unsigned long timer_offset;
++#endif
++#endif
+ #endif /* CONFIG_TRACING */
+ #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
+ struct memcg_batch_info {
+@@ -1570,10 +1591,26 @@ struct task_struct {
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+ atomic_t ptrace_bp_refcnt;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head put_rcu;
++ int softirq_nestcnt;
++#endif
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++ int kmap_idx;
++ pte_t kmap_pte[KM_TYPE_NR];
++#endif
+ };
+
+-/* Future-safe accessor for struct task_struct's cpus_allowed. */
+-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
++#else
++static inline bool cur_pf_disabled(void) { return false; }
++#endif
+
-+static int posix_cpu_timers_thread(void *data)
++static inline bool pagefault_disabled(void)
+{
-+ int cpu = (long)data;
-+
-+ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
-+
-+ while (!kthread_should_stop()) {
-+ struct task_struct *tsk = NULL;
-+ struct task_struct *next = NULL;
-+
-+ if (cpu_is_offline(cpu))
-+ goto wait_to_die;
-+
-+ /* grab task list */
-+ raw_local_irq_disable();
-+ tsk = per_cpu(posix_timer_tasklist, cpu);
-+ per_cpu(posix_timer_tasklist, cpu) = NULL;
-+ raw_local_irq_enable();
-+
-+ /* its possible the list is empty, just return */
-+ if (!tsk) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ schedule();
-+ __set_current_state(TASK_RUNNING);
-+ continue;
-+ }
-+
-+ /* Process task list */
-+ while (1) {
-+ /* save next */
-+ next = tsk->posix_timer_list;
-+
-+ /* run the task timers, clear its ptr and
-+ * unreference it
-+ */
-+ __run_posix_cpu_timers(tsk);
-+ tsk->posix_timer_list = NULL;
-+ put_task_struct(tsk);
-+
-+ /* check if this is the last on the list */
-+ if (next == tsk)
-+ break;
-+ tsk = next;
-+ }
-+ }
-+ return 0;
-+
-+wait_to_die:
-+ /* Wait for kthread_stop */
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ while (!kthread_should_stop()) {
-+ schedule();
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
++ return in_atomic() || cur_pf_disabled();
+}
+
+ /*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+@@ -1743,6 +1780,15 @@ extern struct pid *cad_pid;
+ extern void free_task(struct task_struct *tsk);
+ #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __put_task_struct_cb(struct rcu_head *rhp);
+
-+static inline int __fastpath_timer_check(struct task_struct *tsk)
++static inline void put_task_struct(struct task_struct *t)
+{
-+ /* tsk == current, ensure it is safe to use ->signal/sighand */
-+ if (unlikely(tsk->exit_state))
-+ return 0;
-+
-+ if (!task_cputime_zero(&tsk->cputime_expires))
-+ return 1;
-+
-+ if (!task_cputime_zero(&tsk->signal->cputime_expires))
-+ return 1;
-+
-+ return 0;
++ if (atomic_dec_and_test(&t->usage))
++ call_rcu(&t->put_rcu, __put_task_struct_cb);
+}
-+
-+void run_posix_cpu_timers(struct task_struct *tsk)
-+{
-+ unsigned long cpu = smp_processor_id();
-+ struct task_struct *tasklist;
-+
-+ BUG_ON(!irqs_disabled());
-+ if(!per_cpu(posix_timer_task, cpu))
-+ return;
-+ /* get per-cpu references */
-+ tasklist = per_cpu(posix_timer_tasklist, cpu);
-+
-+ /* check to see if we're already queued */
-+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
-+ get_task_struct(tsk);
-+ if (tasklist) {
-+ tsk->posix_timer_list = tasklist;
-+ } else {
-+ /*
-+ * The list is terminated by a self-pointing
-+ * task_struct
-+ */
-+ tsk->posix_timer_list = tsk;
-+ }
-+ per_cpu(posix_timer_tasklist, cpu) = tsk;
-+
-+ wake_up_process(per_cpu(posix_timer_task, cpu));
-+ }
++#else
+ extern void __put_task_struct(struct task_struct *t);
+
+ static inline void put_task_struct(struct task_struct *t)
+@@ -1750,6 +1796,7 @@ static inline void put_task_struct(struc
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
+ }
++#endif
+
+ extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+@@ -1774,6 +1821,7 @@ extern void thread_group_times(struct ta
+ #define PF_FROZEN 0x00010000 /* frozen for system suspend */
+ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
+ #define PF_KSWAPD 0x00040000 /* I am kswapd */
++#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */
+ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
+@@ -2022,15 +2070,27 @@ static inline void sched_autogroup_exit(
+ #endif
+
+ #ifdef CONFIG_RT_MUTEXES
++extern void task_setprio(struct task_struct *p, int prio);
+ extern int rt_mutex_getprio(struct task_struct *p);
+-extern void rt_mutex_setprio(struct task_struct *p, int prio);
++static inline void rt_mutex_setprio(struct task_struct *p, int prio)
++{
++ task_setprio(p, prio);
+}
-+
-+/*
-+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
-+ * Here we can start up the necessary migration thread for the new CPU.
-+ */
-+static int posix_cpu_thread_call(struct notifier_block *nfb,
-+ unsigned long action, void *hcpu)
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
-+ int cpu = (long)hcpu;
-+ struct task_struct *p;
-+ struct sched_param param;
-+
-+ switch (action) {
-+ case CPU_UP_PREPARE:
-+ p = kthread_create(posix_cpu_timers_thread, hcpu,
-+ "posixcputmr/%d",cpu);
-+ if (IS_ERR(p))
-+ return NOTIFY_BAD;
-+ p->flags |= PF_NOFREEZE;
-+ kthread_bind(p, cpu);
-+ /* Must be high prio to avoid getting starved */
-+ param.sched_priority = MAX_RT_PRIO-1;
-+ sched_setscheduler(p, SCHED_FIFO, ¶m);
-+ per_cpu(posix_timer_task,cpu) = p;
-+ break;
-+ case CPU_ONLINE:
-+ /* Strictly unneccessary, as first user will wake it. */
-+ wake_up_process(per_cpu(posix_timer_task,cpu));
-+ break;
-+#ifdef CONFIG_HOTPLUG_CPU
-+ case CPU_UP_CANCELED:
-+ /* Unbind it from offline cpu so it can run. Fall thru. */
-+ kthread_bind(per_cpu(posix_timer_task,cpu),
-+ any_online_cpu(cpu_online_map));
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
-+ case CPU_DEAD:
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
-+#endif
-+ }
-+ return NOTIFY_OK;
++ return tsk->pi_blocked_on != NULL;
+}
-+
-+/* Register at highest priority so that task migration (migrate_all_tasks)
-+ * happens before everything else.
-+ */
-+static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
-+ .notifier_call = posix_cpu_thread_call,
-+ .priority = 10
-+};
-+
-+static int __init posix_cpu_thread_init(void)
+ #else
+ static inline int rt_mutex_getprio(struct task_struct *p)
+ {
+ return p->normal_prio;
+ }
+ # define rt_mutex_adjust_pi(p) do { } while (0)
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
-+ void *hcpu = (void *)(long)smp_processor_id();
-+ /* Start one for boot CPU. */
-+ unsigned long cpu;
-+
-+ /* init the per-cpu posix_timer_tasklets */
-+ for_each_cpu_mask(cpu, cpu_possible_map)
-+ per_cpu(posix_timer_tasklist, cpu) = NULL;
-+
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
-+ register_cpu_notifier(&posix_cpu_thread_notifier);
-+ return 0;
++ return false;
+}
-+early_initcall(posix_cpu_thread_init);
-+#else /* CONFIG_PREEMPT_RT_BASE */
-+void run_posix_cpu_timers(struct task_struct *tsk)
+ #endif
+
+ extern bool yield_to(struct task_struct *p, bool preempt);
+@@ -2110,6 +2170,7 @@ extern void xtime_update(unsigned long t
+
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct * tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+ #ifdef CONFIG_SMP
+ extern void kick_process(struct task_struct *tsk);
+@@ -2199,12 +2260,24 @@ extern struct mm_struct * mm_alloc(void)
+
+ /* mmdrop drops the mm and the page tables */
+ extern void __mmdrop(struct mm_struct *);
++
+ static inline void mmdrop(struct mm_struct * mm)
+ {
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
+{
-+ __run_posix_cpu_timers(tsk);
++ if (atomic_dec_and_test(&mm->mm_count))
++ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
-+#endif /* CONFIG_PREEMPT_RT_BASE */
++#else
++# define mmdrop_delayed(mm) mmdrop(mm)
++#endif
+
- /*
- * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
- * The tsk->sighand->siglock must be held by the caller.
-Index: linux-2.6/kernel/sched_stats.h
-===================================================================
---- linux-2.6.orig/kernel/sched_stats.h
-+++ linux-2.6/kernel/sched_stats.h
-@@ -282,10 +282,10 @@ static inline void account_group_user_ti
- if (!cputimer->running)
- return;
+ /* mmput gets rid of the mappings and all user-space */
+ extern void mmput(struct mm_struct *);
+ /* Grab a reference to a task's mm, if it is not already going away */
+@@ -2510,7 +2583,7 @@ extern int _cond_resched(void);
-- spin_lock(&cputimer->lock);
-+ raw_spin_lock(&cputimer->lock);
- cputimer->cputime.utime =
- cputime_add(cputimer->cputime.utime, cputime);
-- spin_unlock(&cputimer->lock);
-+ raw_spin_unlock(&cputimer->lock);
- }
+ extern int __cond_resched_lock(spinlock_t *lock);
- /**
-@@ -306,10 +306,10 @@ static inline void account_group_system_
- if (!cputimer->running)
- return;
+-#ifdef CONFIG_PREEMPT
++#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT_FULL)
+ #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
+ #else
+ #define PREEMPT_LOCK_OFFSET 0
+@@ -2521,12 +2594,16 @@ extern int __cond_resched_lock(spinlock_
+ __cond_resched_lock(lock); \
+ })
-- spin_lock(&cputimer->lock);
-+ raw_spin_lock(&cputimer->lock);
- cputimer->cputime.stime =
- cputime_add(cputimer->cputime.stime, cputime);
-- spin_unlock(&cputimer->lock);
-+ raw_spin_unlock(&cputimer->lock);
- }
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern int __cond_resched_softirq(void);
- /**
-@@ -330,7 +330,7 @@ static inline void account_group_exec_ru
- if (!cputimer->running)
- return;
+ #define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
+ })
++#else
++# define cond_resched_softirq() cond_resched()
++#endif
-- spin_lock(&cputimer->lock);
-+ raw_spin_lock(&cputimer->lock);
- cputimer->cputime.sum_exec_runtime += ns;
-- spin_unlock(&cputimer->lock);
-+ raw_spin_unlock(&cputimer->lock);
+ /*
+ * Does a critical section need to be broken due to another
+@@ -2550,7 +2627,7 @@ void thread_group_cputimer(struct task_s
+
+ static inline void thread_group_cputime_init(struct signal_struct *sig)
+ {
+- spin_lock_init(&sig->cputimer.lock);
++ raw_spin_lock_init(&sig->cputimer.lock);
}
-Index: linux-2.6/include/linux/semaphore.h
-===================================================================
---- linux-2.6.orig/include/linux/semaphore.h
-+++ linux-2.6/include/linux/semaphore.h
-@@ -14,14 +14,14 @@
- /* Please don't access any members of this structure directly */
- struct semaphore {
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- unsigned int count;
- struct list_head wait_list;
- };
+ /*
+@@ -2589,6 +2666,26 @@ static inline void set_task_cpu(struct t
- #define __SEMAPHORE_INITIALIZER(name, n) \
- { \
-- .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
- .count = n, \
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
- }
-Index: linux-2.6/kernel/semaphore.c
-===================================================================
---- linux-2.6.orig/kernel/semaphore.c
-+++ linux-2.6/kernel/semaphore.c
-@@ -54,12 +54,12 @@ void down(struct semaphore *sem)
- {
- unsigned long flags;
+ #endif /* CONFIG_SMP */
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- __down(sem);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
- }
- EXPORT_SYMBOL(down);
++static inline int __migrate_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ return p->migrate_disable;
++#else
++ return 0;
++#endif
++}
++
++/* Future-safe accessor for struct task_struct's cpus_allowed. */
++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (p->migrate_disable)
++ return cpumask_of(task_cpu(p));
++#endif
++
++ return &p->cpus_allowed;
++}
++
+ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore
+Index: linux-2.6/kernel/posix-cpu-timers.c
+===================================================================
+--- linux-2.6.orig/kernel/posix-cpu-timers.c
++++ linux-2.6/kernel/posix-cpu-timers.c
+@@ -274,7 +274,7 @@ void thread_group_cputimer(struct task_s
+ struct task_cputime sum;
unsigned long flags;
- int result = 0;
-
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- result = __down_interruptible(sem);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
- return result;
+- spin_lock_irqsave(&cputimer->lock, flags);
++ raw_spin_lock_irqsave(&cputimer->lock, flags);
+ if (!cputimer->running) {
+ cputimer->running = 1;
+ /*
+@@ -287,7 +287,7 @@ void thread_group_cputimer(struct task_s
+ update_gt_cputime(&cputimer->cputime, &sum);
+ }
+ *times = cputimer->cputime;
+- spin_unlock_irqrestore(&cputimer->lock, flags);
++ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
-@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem)
- unsigned long flags;
- int result = 0;
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- result = __down_killable(sem);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ /*
+@@ -699,7 +699,7 @@ static int posix_cpu_timer_set(struct k_
+ /*
+ * Disarm any old timer after extracting its expiry time.
+ */
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
- return result;
- }
-@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem)
+ ret = 0;
+ old_incr = timer->it.cpu.incr;
+@@ -997,9 +997,9 @@ static void stop_process_timers(struct s
+ struct thread_group_cputimer *cputimer = &sig->cputimer;
unsigned long flags;
- int count;
-
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- count = sem->count - 1;
- if (likely(count >= 0))
- sem->count = count;
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
- return (count < 0);
+- spin_lock_irqsave(&cputimer->lock, flags);
++ raw_spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 0;
+- spin_unlock_irqrestore(&cputimer->lock, flags);
++ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
-@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem,
- unsigned long flags;
- int result = 0;
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- result = __down_timeout(sem, jiffies);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ static u32 onecputick;
+@@ -1221,7 +1221,7 @@ void posix_cpu_timer_schedule(struct k_i
+ /*
+ * Now re-arm for the new expiry time.
+ */
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ arm_timer(timer);
+ spin_unlock(&p->sighand->siglock);
- return result;
- }
-@@ -179,12 +179,12 @@ void up(struct semaphore *sem)
- {
- unsigned long flags;
+@@ -1288,10 +1288,11 @@ static inline int fastpath_timer_check(s
+ sig = tsk->signal;
+ if (sig->cputimer.running) {
+ struct task_cputime group_sample;
++ unsigned long flags;
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(list_empty(&sem->wait_list)))
- sem->count++;
- else
- __up(sem);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
- }
- EXPORT_SYMBOL(up);
+- spin_lock(&sig->cputimer.lock);
++ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
+ group_sample = sig->cputimer.cputime;
+- spin_unlock(&sig->cputimer.lock);
++ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
-@@ -217,9 +217,9 @@ static inline int __sched __down_common(
- if (timeout <= 0)
- goto timed_out;
- __set_task_state(task, state);
-- spin_unlock_irq(&sem->lock);
-+ raw_spin_unlock_irq(&sem->lock);
- timeout = schedule_timeout(timeout);
-- spin_lock_irq(&sem->lock);
-+ raw_spin_lock_irq(&sem->lock);
- if (waiter.up)
- return 0;
- }
-Index: linux-2.6/include/linux/rwsem-spinlock.h
-===================================================================
---- linux-2.6.orig/include/linux/rwsem-spinlock.h
-+++ linux-2.6/include/linux/rwsem-spinlock.h
-@@ -20,26 +20,42 @@
- * - if activity is -1 then there is one active writer
- * - if wait_list is not empty, then there are processes waiting for the semaphore
+ if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+ return 1;
+@@ -1305,13 +1306,13 @@ static inline int fastpath_timer_check(s
+ * already updated our counts. We need to check if any timers fire now.
+ * Interrupts are disabled.
*/
-+struct rw_anon_semaphore {
-+ __s32 activity;
-+ raw_spinlock_t wait_lock;
-+ struct list_head wait_list;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ struct lockdep_map dep_map;
-+#endif
-+};
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * Non preempt-rt implementation of rw_semaphore. Same as above, but
-+ * restricted vs. ownership. i.e. ownerless locked state and non owner
-+ * release not allowed.
-+ */
- struct rw_semaphore {
- __s32 activity;
-- spinlock_t wait_lock;
-+ raw_spinlock_t wait_lock;
- struct list_head wait_list;
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
- #endif
- };
-+#endif /* PREEMPT_RT_FULL */
-
- #define RWSEM_UNLOCKED_VALUE 0x00000000
-
--extern void __down_read(struct rw_semaphore *sem);
--extern int __down_read_trylock(struct rw_semaphore *sem);
--extern void __down_write(struct rw_semaphore *sem);
--extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
--extern int __down_write_trylock(struct rw_semaphore *sem);
--extern void __up_read(struct rw_semaphore *sem);
--extern void __up_write(struct rw_semaphore *sem);
--extern void __downgrade_write(struct rw_semaphore *sem);
--extern int rwsem_is_locked(struct rw_semaphore *sem);
-+extern void __down_read(struct rw_anon_semaphore *sem);
-+extern int __down_read_trylock(struct rw_anon_semaphore *sem);
-+extern void __down_write(struct rw_anon_semaphore *sem);
-+extern void __down_write_nested(struct rw_anon_semaphore *sem, int subclass);
-+extern int __down_write_trylock(struct rw_anon_semaphore *sem);
-+extern void __up_read(struct rw_anon_semaphore *sem);
-+extern void __up_write(struct rw_anon_semaphore *sem);
-+extern void __downgrade_write(struct rw_anon_semaphore *sem);
-+extern int anon_rwsem_is_locked(struct rw_anon_semaphore *sem);
+-void run_posix_cpu_timers(struct task_struct *tsk)
++static void __run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ LIST_HEAD(firing);
+ struct k_itimer *timer, *next;
+ unsigned long flags;
- #endif /* __KERNEL__ */
- #endif /* _LINUX_RWSEM_SPINLOCK_H */
-Index: linux-2.6/include/linux/rwsem.h
-===================================================================
---- linux-2.6.orig/include/linux/rwsem.h
-+++ linux-2.6/include/linux/rwsem.h
-@@ -17,37 +17,50 @@
- #include <asm/system.h>
- #include <asm/atomic.h>
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
-+struct rw_anon_semaphore;
- struct rw_semaphore;
+ /*
+ * The fast path checks that there are no expired thread or thread
+@@ -1369,6 +1370,190 @@ void run_posix_cpu_timers(struct task_st
+ }
+ }
- #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
- #include <linux/rwsem-spinlock.h> /* use a generic implementation */
--#else
-+#else /* RWSEM_GENERIC_SPINLOCK */
++#ifdef CONFIG_PREEMPT_RT_BASE
++#include <linux/kthread.h>
++#include <linux/cpu.h>
++DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
+
- /* All arch specific implementations share the same struct */
--struct rw_semaphore {
-+struct rw_anon_semaphore {
- long count;
-- spinlock_t wait_lock;
-+ raw_spinlock_t wait_lock;
- struct list_head wait_list;
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
- #endif
- };
-
--extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
--extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
--extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
--extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-+extern struct rw_anon_semaphore *rwsem_down_read_failed(struct rw_anon_semaphore *sem);
-+extern struct rw_anon_semaphore *rwsem_down_write_failed(struct rw_anon_semaphore *sem);
-+extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *);
-+extern struct rw_anon_semaphore *rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
-
- /* Include the arch specific part */
- #include <asm/rwsem.h>
-
- /* In all implementations count != 0 means locked */
--static inline int rwsem_is_locked(struct rw_semaphore *sem)
-+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
- {
- return sem->count != 0;
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+struct rw_semaphore {
-+ long count;
-+ raw_spinlock_t wait_lock;
-+ struct list_head wait_list;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ struct lockdep_map dep_map;
-+#endif
-+};
- #endif
-
-+#endif /* !RWSEM_GENERIC_SPINLOCK */
++static int posix_cpu_timers_thread(void *data)
++{
++ int cpu = (long)data;
+
- /* Common initializer macros and functions */
-
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
-@@ -56,57 +69,59 @@ static inline int rwsem_is_locked(struct
- # define __RWSEM_DEP_MAP_INIT(lockname)
- #endif
-
--#define __RWSEM_INITIALIZER(name) \
-- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \
-- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
-+#define __RWSEM_ANON_INITIALIZER(name) \
-+ { RWSEM_UNLOCKED_VALUE, \
-+ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
-+ LIST_HEAD_INIT((name).wait_list) \
-+ __RWSEM_DEP_MAP_INIT(name) }
-
--#define DECLARE_RWSEM(name) \
-- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-+#define DECLARE_ANON_RWSEM(name) \
-+ struct rw_anon_semaphore name = __RWSEM_INITIALIZER(name)
-
--extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
-- struct lock_class_key *key);
-+extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
-+ struct lock_class_key *key);
-
--#define init_rwsem(sem) \
-+#define init_anon_rwsem(sem) \
- do { \
- static struct lock_class_key __key; \
- \
-- __init_rwsem((sem), #sem, &__key); \
-+ __init_anon_rwsem((sem), #sem, &__key); \
- } while (0)
-
- /*
- * lock for reading
- */
--extern void down_read(struct rw_semaphore *sem);
-+extern void anon_down_read(struct rw_anon_semaphore *sem);
-
- /*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
--extern int down_read_trylock(struct rw_semaphore *sem);
-+extern int anon_down_read_trylock(struct rw_anon_semaphore *sem);
-
- /*
- * lock for writing
- */
--extern void down_write(struct rw_semaphore *sem);
-+extern void anon_down_write(struct rw_anon_semaphore *sem);
-
- /*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
--extern int down_write_trylock(struct rw_semaphore *sem);
-+extern int anon_down_write_trylock(struct rw_anon_semaphore *sem);
-
- /*
- * release a read lock
- */
--extern void up_read(struct rw_semaphore *sem);
-+extern void anon_up_read(struct rw_anon_semaphore *sem);
-
- /*
- * release a write lock
- */
--extern void up_write(struct rw_semaphore *sem);
-+extern void anon_up_write(struct rw_anon_semaphore *sem);
-
- /*
- * downgrade write lock to read lock
- */
--extern void downgrade_write(struct rw_semaphore *sem);
-+extern void anon_downgrade_write(struct rw_anon_semaphore *sem);
-
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
-@@ -122,21 +137,101 @@ extern void downgrade_write(struct rw_se
- * lockdep_set_class() at lock initialization time.
- * See Documentation/lockdep-design.txt for more details.)
- */
--extern void down_read_nested(struct rw_semaphore *sem, int subclass);
--extern void down_write_nested(struct rw_semaphore *sem, int subclass);
-+extern void anon_down_read_nested(struct rw_anon_semaphore *sem, int subclass);
-+extern void anon_down_write_nested(struct rw_anon_semaphore *sem, int subclass);
- /*
- * Take/release a lock when not the owner will release it.
- *
- * [ This API should be avoided as much as possible - the
- * proper abstraction for this case is completions. ]
- */
--extern void down_read_non_owner(struct rw_semaphore *sem);
--extern void up_read_non_owner(struct rw_semaphore *sem);
-+extern void anon_down_read_non_owner(struct rw_anon_semaphore *sem);
-+extern void anon_up_read_non_owner(struct rw_anon_semaphore *sem);
- #else
--# define down_read_nested(sem, subclass) down_read(sem)
--# define down_write_nested(sem, subclass) down_write(sem)
--# define down_read_non_owner(sem) down_read(sem)
--# define up_read_non_owner(sem) up_read(sem)
-+# define anon_down_read_nested(sem, subclass) anon_down_read(sem)
-+# define anon_down_write_nested(sem, subclass) anon_down_write(sem)
-+# define anon_down_read_non_owner(sem) anon_down_read(sem)
-+# define anon_up_read_non_owner(sem) anon_up_read(sem)
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+#include <linux/rwsem_rt.h>
-+#else /* PREEMPT_RT_FULL */
-+/*
-+ * Non preempt-rt implementations
-+ */
-+#define __RWSEM_INITIALIZER(name) \
-+ { RWSEM_UNLOCKED_VALUE, \
-+ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
-+ LIST_HEAD_INIT((name).wait_list) \
-+ __RWSEM_DEP_MAP_INIT(name) }
++ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
+
-+#define DECLARE_RWSEM(name) \
-+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
++ while (!kthread_should_stop()) {
++ struct task_struct *tsk = NULL;
++ struct task_struct *next = NULL;
+
-+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
-+ struct lock_class_key *key)
-+{
-+ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
-+}
++ if (cpu_is_offline(cpu))
++ goto wait_to_die;
+
-+#define init_rwsem(sem) \
-+do { \
-+ static struct lock_class_key __key; \
-+ \
-+ __init_rwsem((sem), #sem, &__key); \
-+} while (0)
++ /* grab task list */
++ raw_local_irq_disable();
++ tsk = per_cpu(posix_timer_tasklist, cpu);
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++ raw_local_irq_enable();
+
-+static inline void down_read(struct rw_semaphore *sem)
-+{
-+ anon_down_read((struct rw_anon_semaphore *)sem);
-+}
++ /* its possible the list is empty, just return */
++ if (!tsk) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ __set_current_state(TASK_RUNNING);
++ continue;
++ }
+
-+static inline int down_read_trylock(struct rw_semaphore *sem)
-+{
-+ return anon_down_read_trylock((struct rw_anon_semaphore *)sem);
-+}
++ /* Process task list */
++ while (1) {
++ /* save next */
++ next = tsk->posix_timer_list;
+
-+static inline void down_write(struct rw_semaphore *sem)
-+{
-+ anon_down_write((struct rw_anon_semaphore *)sem);
-+}
++ /* run the task timers, clear its ptr and
++ * unreference it
++ */
++ __run_posix_cpu_timers(tsk);
++ tsk->posix_timer_list = NULL;
++ put_task_struct(tsk);
+
-+static inline int down_write_trylock(struct rw_semaphore *sem)
-+{
-+ return anon_down_write_trylock((struct rw_anon_semaphore *)sem);
-+}
++ /* check if this is the last on the list */
++ if (next == tsk)
++ break;
++ tsk = next;
++ }
++ }
++ return 0;
+
-+static inline void up_read(struct rw_semaphore *sem)
-+{
-+ anon_up_read((struct rw_anon_semaphore *)sem);
++wait_to_die:
++ /* Wait for kthread_stop */
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
+}
+
-+static inline void up_write(struct rw_semaphore *sem)
++static inline int __fastpath_timer_check(struct task_struct *tsk)
+{
-+ anon_up_write((struct rw_anon_semaphore *)sem);
-+}
++ /* tsk == current, ensure it is safe to use ->signal/sighand */
++ if (unlikely(tsk->exit_state))
++ return 0;
+
-+static inline void downgrade_write(struct rw_semaphore *sem)
-+{
-+ anon_downgrade_write((struct rw_anon_semaphore *)sem);
++ if (!task_cputime_zero(&tsk->cputime_expires))
++ return 1;
++
++ if (!task_cputime_zero(&tsk->signal->cputime_expires))
++ return 1;
++
++ return 0;
+}
+
-+static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
++void run_posix_cpu_timers(struct task_struct *tsk)
+{
-+ return anon_down_read_nested((struct rw_anon_semaphore *)sem, subclass);
++ unsigned long cpu = smp_processor_id();
++ struct task_struct *tasklist;
++
++ BUG_ON(!irqs_disabled());
++ if(!per_cpu(posix_timer_task, cpu))
++ return;
++ /* get per-cpu references */
++ tasklist = per_cpu(posix_timer_tasklist, cpu);
++
++ /* check to see if we're already queued */
++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
++ get_task_struct(tsk);
++ if (tasklist) {
++ tsk->posix_timer_list = tasklist;
++ } else {
++ /*
++ * The list is terminated by a self-pointing
++ * task_struct
++ */
++ tsk->posix_timer_list = tsk;
++ }
++ per_cpu(posix_timer_tasklist, cpu) = tsk;
++
++ wake_up_process(per_cpu(posix_timer_task, cpu));
++ }
+}
+
-+static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
++/*
++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int posix_cpu_thread_call(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
+{
-+ anon_down_write_nested((struct rw_anon_semaphore *)sem, subclass);
++ int cpu = (long)hcpu;
++ struct task_struct *p;
++ struct sched_param param;
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ p = kthread_create(posix_cpu_timers_thread, hcpu,
++ "posixcputmr/%d",cpu);
++ if (IS_ERR(p))
++ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
++ kthread_bind(p, cpu);
++ /* Must be high prio to avoid getting starved */
++ param.sched_priority = MAX_RT_PRIO-1;
++ sched_setscheduler(p, SCHED_FIFO, ¶m);
++ per_cpu(posix_timer_task,cpu) = p;
++ break;
++ case CPU_ONLINE:
++ /* Strictly unneccessary, as first user will wake it. */
++ wake_up_process(per_cpu(posix_timer_task,cpu));
++ break;
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_UP_CANCELED:
++ /* Unbind it from offline cpu so it can run. Fall thru. */
++ kthread_bind(per_cpu(posix_timer_task,cpu),
++ any_online_cpu(cpu_online_map));
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++ case CPU_DEAD:
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++#endif
++ }
++ return NOTIFY_OK;
+}
+
-+static inline int rwsem_is_locked(struct rw_semaphore *sem)
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
++ .notifier_call = posix_cpu_thread_call,
++ .priority = 10
++};
++
++static int __init posix_cpu_thread_init(void)
+{
-+ return anon_rwsem_is_locked((struct rw_anon_semaphore *)sem);
-+}
-+#endif /* !PREEMPT_RT_FULL */
++ void *hcpu = (void *)(long)smp_processor_id();
++ /* Start one for boot CPU. */
++ unsigned long cpu;
+
- #endif /* _LINUX_RWSEM_H */
++ /* init the per-cpu posix_timer_tasklets */
++ for_each_cpu_mask(cpu, cpu_possible_map)
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
+
-Index: linux-2.6/lib/rwsem-spinlock.c
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
++ register_cpu_notifier(&posix_cpu_thread_notifier);
++ return 0;
++}
++early_initcall(posix_cpu_thread_init);
++#else /* CONFIG_PREEMPT_RT_BASE */
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ __run_posix_cpu_timers(tsk);
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ /*
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
+ * The tsk->sighand->siglock must be held by the caller.
+Index: linux-2.6/kernel/sched_stats.h
===================================================================
---- linux-2.6.orig/lib/rwsem-spinlock.c
-+++ linux-2.6/lib/rwsem-spinlock.c
-@@ -17,24 +17,24 @@ struct rwsem_waiter {
- #define RWSEM_WAITING_FOR_WRITE 0x00000002
- };
+--- linux-2.6.orig/kernel/sched_stats.h
++++ linux-2.6/kernel/sched_stats.h
+@@ -282,10 +282,10 @@ static inline void account_group_user_ti
+ if (!cputimer->running)
+ return;
--int rwsem_is_locked(struct rw_semaphore *sem)
-+int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
- {
- int ret = 1;
- unsigned long flags;
+- spin_lock(&cputimer->lock);
++ raw_spin_lock(&cputimer->lock);
+ cputimer->cputime.utime =
+ cputime_add(cputimer->cputime.utime, cputime);
+- spin_unlock(&cputimer->lock);
++ raw_spin_unlock(&cputimer->lock);
+ }
-- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
-+ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
- ret = (sem->activity != 0);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- }
- return ret;
+ /**
+@@ -306,10 +306,10 @@ static inline void account_group_system_
+ if (!cputimer->running)
+ return;
+
+- spin_lock(&cputimer->lock);
++ raw_spin_lock(&cputimer->lock);
+ cputimer->cputime.stime =
+ cputime_add(cputimer->cputime.stime, cputime);
+- spin_unlock(&cputimer->lock);
++ raw_spin_unlock(&cputimer->lock);
}
--EXPORT_SYMBOL(rwsem_is_locked);
-+EXPORT_SYMBOL(anon_rwsem_is_locked);
- /*
- * initialise the semaphore
- */
--void __init_rwsem(struct rw_semaphore *sem, const char *name,
-- struct lock_class_key *key)
-+void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
-+ struct lock_class_key *key)
- {
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
-@@ -44,10 +44,10 @@ void __init_rwsem(struct rw_semaphore *s
- lockdep_init_map(&sem->dep_map, name, key, 0);
- #endif
- sem->activity = 0;
-- spin_lock_init(&sem->wait_lock);
-+ raw_spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
+ /**
+@@ -330,7 +330,7 @@ static inline void account_group_exec_ru
+ if (!cputimer->running)
+ return;
+
+- spin_lock(&cputimer->lock);
++ raw_spin_lock(&cputimer->lock);
+ cputimer->cputime.sum_exec_runtime += ns;
+- spin_unlock(&cputimer->lock);
++ raw_spin_unlock(&cputimer->lock);
}
--EXPORT_SYMBOL(__init_rwsem);
-+EXPORT_SYMBOL(__init_anon_rwsem);
+Index: linux-2.6/include/linux/semaphore.h
+===================================================================
+--- linux-2.6.orig/include/linux/semaphore.h
++++ linux-2.6/include/linux/semaphore.h
+@@ -14,14 +14,14 @@
- /*
- * handle the lock release when processes blocked on it that can now run
-@@ -58,8 +58,8 @@ EXPORT_SYMBOL(__init_rwsem);
- * - woken process blocks are discarded from the list after having task zeroed
- * - writers are only woken if wakewrite is non-zero
- */
--static inline struct rw_semaphore *
--__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
-+static inline struct rw_anon_semaphore *
-+__rwsem_do_wake(struct rw_anon_semaphore *sem, int wakewrite)
- {
- struct rwsem_waiter *waiter;
- struct task_struct *tsk;
-@@ -117,8 +117,8 @@ __rwsem_do_wake(struct rw_semaphore *sem
- /*
- * wake a single writer
- */
--static inline struct rw_semaphore *
--__rwsem_wake_one_writer(struct rw_semaphore *sem)
-+static inline struct rw_anon_semaphore *
-+__rwsem_wake_one_writer(struct rw_anon_semaphore *sem)
- {
- struct rwsem_waiter *waiter;
- struct task_struct *tsk;
-@@ -139,18 +139,18 @@ __rwsem_wake_one_writer(struct rw_semaph
- /*
- * get a read lock on the semaphore
- */
--void __sched __down_read(struct rw_semaphore *sem)
-+void __sched __down_read(struct rw_anon_semaphore *sem)
+ /* Please don't access any members of this structure directly */
+ struct semaphore {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ unsigned int count;
+ struct list_head wait_list;
+ };
+
+ #define __SEMAPHORE_INITIALIZER(name, n) \
+ { \
+- .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
+ .count = n, \
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ }
+Index: linux-2.6/kernel/semaphore.c
+===================================================================
+--- linux-2.6.orig/kernel/semaphore.c
++++ linux-2.6/kernel/semaphore.c
+@@ -54,12 +54,12 @@ void down(struct semaphore *sem)
{
- struct rwsem_waiter waiter;
- struct task_struct *tsk;
unsigned long flags;
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count > 0))
+ sem->count--;
+ else
+ __down(sem);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ }
+ EXPORT_SYMBOL(down);
- if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
- /* granted */
- sem->activity++;
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- goto out;
- }
-
-@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semap
- list_add_tail(&waiter.list, &sem->wait_list);
-
- /* we don't need to touch the semaphore struct anymore */
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- /* wait to be given the lock */
- for (;;) {
-@@ -183,13 +183,13 @@ void __sched __down_read(struct rw_semap
- /*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
--int __down_read_trylock(struct rw_semaphore *sem)
-+int __down_read_trylock(struct rw_anon_semaphore *sem)
- {
- unsigned long flags;
- int ret = 0;
-
-
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
- /* granted */
-@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaph
- ret = 1;
- }
-
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- return ret;
- }
-@@ -206,18 +206,18 @@ int __down_read_trylock(struct rw_semaph
- * get a write lock on the semaphore
- * - we increment the waiting count anyway to indicate an exclusive lock
- */
--void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
-+void __sched __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
- {
- struct rwsem_waiter waiter;
- struct task_struct *tsk;
+@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore
unsigned long flags;
+ int result = 0;
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- if (sem->activity == 0 && list_empty(&sem->wait_list)) {
- /* granted */
- sem->activity = -1;
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- goto out;
- }
-
-@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct
- list_add_tail(&waiter.list, &sem->wait_list);
-
- /* we don't need to touch the semaphore struct anymore */
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- /* wait to be given the lock */
- for (;;) {
-@@ -247,7 +247,7 @@ void __sched __down_write_nested(struct
- ;
- }
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count > 0))
+ sem->count--;
+ else
+ result = __down_interruptible(sem);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
--void __sched __down_write(struct rw_semaphore *sem)
-+void __sched __down_write(struct rw_anon_semaphore *sem)
- {
- __down_write_nested(sem, 0);
+ return result;
}
-@@ -255,12 +255,12 @@ void __sched __down_write(struct rw_sema
- /*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
--int __down_write_trylock(struct rw_semaphore *sem)
-+int __down_write_trylock(struct rw_anon_semaphore *sem)
- {
+@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem)
unsigned long flags;
- int ret = 0;
-
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- if (sem->activity == 0 && list_empty(&sem->wait_list)) {
- /* granted */
-@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semap
- ret = 1;
- }
+ int result = 0;
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count > 0))
+ sem->count--;
+ else
+ result = __down_killable(sem);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
- return ret;
+ return result;
}
-@@ -276,48 +276,48 @@ int __down_write_trylock(struct rw_semap
- /*
- * release a read lock on the semaphore
- */
--void __up_read(struct rw_semaphore *sem)
-+void __up_read(struct rw_anon_semaphore *sem)
- {
+@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem)
unsigned long flags;
+ int count;
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- if (--sem->activity == 0 && !list_empty(&sem->wait_list))
- sem = __rwsem_wake_one_writer(sem);
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ count = sem->count - 1;
+ if (likely(count >= 0))
+ sem->count = count;
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ return (count < 0);
}
-
- /*
- * release a write lock on the semaphore
- */
--void __up_write(struct rw_semaphore *sem)
-+void __up_write(struct rw_anon_semaphore *sem)
- {
+@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem,
unsigned long flags;
+ int result = 0;
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- sem->activity = 0;
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, 1);
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count > 0))
+ sem->count--;
+ else
+ result = __down_timeout(sem, jiffies);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ return result;
}
-
- /*
- * downgrade a write lock into a read lock
- * - just wake up any readers at the front of the queue
- */
--void __downgrade_write(struct rw_semaphore *sem)
-+void __downgrade_write(struct rw_anon_semaphore *sem)
+@@ -179,12 +179,12 @@ void up(struct semaphore *sem)
{
unsigned long flags;
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- sem->activity = 1;
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, 0);
-
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(list_empty(&sem->wait_list)))
+ sem->count++;
+ else
+ __up(sem);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
}
+ EXPORT_SYMBOL(up);
-Index: linux-2.6/lib/rwsem.c
+@@ -217,9 +217,9 @@ static inline int __sched __down_common(
+ if (timeout <= 0)
+ goto timed_out;
+ __set_task_state(task, state);
+- spin_unlock_irq(&sem->lock);
++ raw_spin_unlock_irq(&sem->lock);
+ timeout = schedule_timeout(timeout);
+- spin_lock_irq(&sem->lock);
++ raw_spin_lock_irq(&sem->lock);
+ if (waiter.up)
+ return 0;
+ }
+Index: linux-2.6/include/linux/rwsem-spinlock.h
===================================================================
---- linux-2.6.orig/lib/rwsem.c
-+++ linux-2.6/lib/rwsem.c
-@@ -11,8 +11,8 @@
- /*
- * Initialize an rwsem:
+--- linux-2.6.orig/include/linux/rwsem-spinlock.h
++++ linux-2.6/include/linux/rwsem-spinlock.h
+@@ -20,26 +20,42 @@
+ * - if activity is -1 then there is one active writer
+ * - if wait_list is not empty, then there are processes waiting for the semaphore
*/
--void __init_rwsem(struct rw_semaphore *sem, const char *name,
-- struct lock_class_key *key)
-+void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
-+ struct lock_class_key *key)
- {
++struct rw_anon_semaphore {
++ __s32 activity;
++ raw_spinlock_t wait_lock;
++ struct list_head wait_list;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#ifndef CONFIG_PREEMPT_RT_FULL
++/*
++ * Non preempt-rt implementation of rw_semaphore. Same as above, but
++ * restricted vs. ownership. i.e. ownerless locked state and non owner
++ * release not allowed.
++ */
+ struct rw_semaphore {
+ __s32 activity;
+- spinlock_t wait_lock;
++ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
-@@ -22,11 +22,11 @@ void __init_rwsem(struct rw_semaphore *s
- lockdep_init_map(&sem->dep_map, name, key, 0);
+ struct lockdep_map dep_map;
#endif
- sem->count = RWSEM_UNLOCKED_VALUE;
-- spin_lock_init(&sem->wait_lock);
-+ raw_spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
- }
+ };
++#endif /* PREEMPT_RT_FULL */
--EXPORT_SYMBOL(__init_rwsem);
-+EXPORT_SYMBOL(__init_anon_rwsem);
+ #define RWSEM_UNLOCKED_VALUE 0x00000000
- struct rwsem_waiter {
- struct list_head list;
-@@ -54,8 +54,8 @@ struct rwsem_waiter {
- * - woken process blocks are discarded from the list after having task zeroed
- * - writers are only woken if downgrading is false
- */
--static struct rw_semaphore *
--__rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
-+static struct rw_anon_semaphore *
-+__rwsem_do_wake(struct rw_anon_semaphore *sem, int wake_type)
- {
- struct rwsem_waiter *waiter;
- struct task_struct *tsk;
-@@ -169,8 +169,8 @@ __rwsem_do_wake(struct rw_semaphore *sem
- /*
- * wait for a lock to be granted
- */
--static struct rw_semaphore __sched *
--rwsem_down_failed_common(struct rw_semaphore *sem,
-+static struct rw_anon_semaphore __sched *
-+rwsem_down_failed_common(struct rw_anon_semaphore *sem,
- unsigned int flags, signed long adjustment)
- {
- struct rwsem_waiter waiter;
-@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semap
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+-extern void __down_read(struct rw_semaphore *sem);
+-extern int __down_read_trylock(struct rw_semaphore *sem);
+-extern void __down_write(struct rw_semaphore *sem);
+-extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
+-extern int __down_write_trylock(struct rw_semaphore *sem);
+-extern void __up_read(struct rw_semaphore *sem);
+-extern void __up_write(struct rw_semaphore *sem);
+-extern void __downgrade_write(struct rw_semaphore *sem);
+-extern int rwsem_is_locked(struct rw_semaphore *sem);
++extern void __down_read(struct rw_anon_semaphore *sem);
++extern int __down_read_trylock(struct rw_anon_semaphore *sem);
++extern void __down_write(struct rw_anon_semaphore *sem);
++extern void __down_write_nested(struct rw_anon_semaphore *sem, int subclass);
++extern int __down_write_trylock(struct rw_anon_semaphore *sem);
++extern void __up_read(struct rw_anon_semaphore *sem);
++extern void __up_write(struct rw_anon_semaphore *sem);
++extern void __downgrade_write(struct rw_anon_semaphore *sem);
++extern int anon_rwsem_is_locked(struct rw_anon_semaphore *sem);
- /* set up my own style of waitqueue */
-- spin_lock_irq(&sem->wait_lock);
-+ raw_spin_lock_irq(&sem->wait_lock);
- waiter.task = tsk;
- waiter.flags = flags;
- get_task_struct(tsk);
-@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semap
- adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
- sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_RWSEM_SPINLOCK_H */
+Index: linux-2.6/include/linux/rwsem.h
+===================================================================
+--- linux-2.6.orig/include/linux/rwsem.h
++++ linux-2.6/include/linux/rwsem.h
+@@ -17,37 +17,50 @@
+ #include <asm/system.h>
+ #include <asm/atomic.h>
-- spin_unlock_irq(&sem->wait_lock);
-+ raw_spin_unlock_irq(&sem->wait_lock);
++struct rw_anon_semaphore;
+ struct rw_semaphore;
- /* wait to be given the lock */
- for (;;) {
-@@ -222,7 +222,8 @@ rwsem_down_failed_common(struct rw_semap
- /*
- * wait for the read lock to be granted
- */
--struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
-+struct rw_anon_semaphore __sched *
-+rwsem_down_read_failed(struct rw_anon_semaphore *sem)
- {
- return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
- -RWSEM_ACTIVE_READ_BIAS);
-@@ -231,7 +232,8 @@ struct rw_semaphore __sched *rwsem_down_
- /*
- * wait for the write lock to be granted
- */
--struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
-+struct rw_anon_semaphore __sched *
-+rwsem_down_write_failed(struct rw_anon_semaphore *sem)
- {
- return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
- -RWSEM_ACTIVE_WRITE_BIAS);
-@@ -241,17 +243,17 @@ struct rw_semaphore __sched *rwsem_down_
- * handle waking up a waiter on the semaphore
- * - up_read/up_write has decremented the active part of count if we come here
- */
--struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
-+struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem)
+ #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+ #include <linux/rwsem-spinlock.h> /* use a generic implementation */
+-#else
++#else /* RWSEM_GENERIC_SPINLOCK */
++
+ /* All arch specific implementations share the same struct */
+-struct rw_semaphore {
++struct rw_anon_semaphore {
+ long count;
+- spinlock_t wait_lock;
++ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ #endif
+ };
+
+-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
++extern struct rw_anon_semaphore *rwsem_down_read_failed(struct rw_anon_semaphore *sem);
++extern struct rw_anon_semaphore *rwsem_down_write_failed(struct rw_anon_semaphore *sem);
++extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *);
++extern struct rw_anon_semaphore *rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
+
+ /* Include the arch specific part */
+ #include <asm/rwsem.h>
+
+ /* In all implementations count != 0 means locked */
+-static inline int rwsem_is_locked(struct rw_semaphore *sem)
++static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
{
- unsigned long flags;
+ return sem->count != 0;
+ }
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
++#ifndef CONFIG_PREEMPT_RT_FULL
++struct rw_semaphore {
++ long count;
++ raw_spinlock_t wait_lock;
++ struct list_head wait_list;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
+ #endif
- /* do nothing if list empty */
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
++#endif /* !RWSEM_GENERIC_SPINLOCK */
++
+ /* Common initializer macros and functions */
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+@@ -56,57 +69,59 @@ static inline int rwsem_is_locked(struct
+ # define __RWSEM_DEP_MAP_INIT(lockname)
+ #endif
- return sem;
- }
-@@ -261,17 +263,17 @@ struct rw_semaphore *rwsem_wake(struct r
- * - caller incremented waiting part of count and discovered it still negative
- * - just wake up any readers at the front of the queue
- */
--struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
-+struct rw_anon_semaphore *rwsem_downgrade_wake(struct rw_anon_semaphore *sem)
- {
- unsigned long flags;
+-#define __RWSEM_INITIALIZER(name) \
+- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \
+- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
++#define __RWSEM_ANON_INITIALIZER(name) \
++ { RWSEM_UNLOCKED_VALUE, \
++ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ LIST_HEAD_INIT((name).wait_list) \
++ __RWSEM_DEP_MAP_INIT(name) }
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+-#define DECLARE_RWSEM(name) \
+- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
++#define DECLARE_ANON_RWSEM(name) \
++ struct rw_anon_semaphore name = __RWSEM_INITIALIZER(name)
- /* do nothing if list empty */
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
+-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+- struct lock_class_key *key);
++extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
++ struct lock_class_key *key);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+-#define init_rwsem(sem) \
++#define init_anon_rwsem(sem) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+- __init_rwsem((sem), #sem, &__key); \
++ __init_anon_rwsem((sem), #sem, &__key); \
+ } while (0)
- return sem;
- }
-Index: linux-2.6/kernel/time/timer_stats.c
-===================================================================
---- linux-2.6.orig/kernel/time/timer_stats.c
-+++ linux-2.6/kernel/time/timer_stats.c
-@@ -81,7 +81,7 @@ struct entry {
/*
- * Spinlock protecting the tables - not taken during lookup:
+ * lock for reading
*/
--static DEFINE_SPINLOCK(table_lock);
-+static DEFINE_RAW_SPINLOCK(table_lock);
+-extern void down_read(struct rw_semaphore *sem);
++extern void anon_down_read(struct rw_anon_semaphore *sem);
/*
- * Per-CPU lookup locks for fast hash lookup:
-@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct
- prev = NULL;
- curr = *head;
-
-- spin_lock(&table_lock);
-+ raw_spin_lock(&table_lock);
- /*
- * Make sure we have not raced with another CPU:
- */
-@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct
- *head = curr;
- }
- out_unlock:
-- spin_unlock(&table_lock);
-+ raw_spin_unlock(&table_lock);
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+-extern int down_read_trylock(struct rw_semaphore *sem);
++extern int anon_down_read_trylock(struct rw_anon_semaphore *sem);
- return curr;
- }
-Index: linux-2.6/kernel/latencytop.c
-===================================================================
---- linux-2.6.orig/kernel/latencytop.c
-+++ linux-2.6/kernel/latencytop.c
-@@ -58,7 +58,7 @@
- #include <linux/list.h>
- #include <linux/stacktrace.h>
+ /*
+ * lock for writing
+ */
+-extern void down_write(struct rw_semaphore *sem);
++extern void anon_down_write(struct rw_anon_semaphore *sem);
--static DEFINE_SPINLOCK(latency_lock);
-+static DEFINE_RAW_SPINLOCK(latency_lock);
+ /*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+-extern int down_write_trylock(struct rw_semaphore *sem);
++extern int anon_down_write_trylock(struct rw_anon_semaphore *sem);
- #define MAXLR 128
- static struct latency_record latency_record[MAXLR];
-@@ -72,19 +72,19 @@ void clear_all_latency_tracing(struct ta
- if (!latencytop_enabled)
- return;
+ /*
+ * release a read lock
+ */
+-extern void up_read(struct rw_semaphore *sem);
++extern void anon_up_read(struct rw_anon_semaphore *sem);
-- spin_lock_irqsave(&latency_lock, flags);
-+ raw_spin_lock_irqsave(&latency_lock, flags);
- memset(&p->latency_record, 0, sizeof(p->latency_record));
- p->latency_record_count = 0;
-- spin_unlock_irqrestore(&latency_lock, flags);
-+ raw_spin_unlock_irqrestore(&latency_lock, flags);
- }
+ /*
+ * release a write lock
+ */
+-extern void up_write(struct rw_semaphore *sem);
++extern void anon_up_write(struct rw_anon_semaphore *sem);
- static void clear_global_latency_tracing(void)
+ /*
+ * downgrade write lock to read lock
+ */
+-extern void downgrade_write(struct rw_semaphore *sem);
++extern void anon_downgrade_write(struct rw_anon_semaphore *sem);
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+@@ -122,21 +137,101 @@ extern void downgrade_write(struct rw_se
+ * lockdep_set_class() at lock initialization time.
+ * See Documentation/lockdep-design.txt for more details.)
+ */
+-extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+-extern void down_write_nested(struct rw_semaphore *sem, int subclass);
++extern void anon_down_read_nested(struct rw_anon_semaphore *sem, int subclass);
++extern void anon_down_write_nested(struct rw_anon_semaphore *sem, int subclass);
+ /*
+ * Take/release a lock when not the owner will release it.
+ *
+ * [ This API should be avoided as much as possible - the
+ * proper abstraction for this case is completions. ]
+ */
+-extern void down_read_non_owner(struct rw_semaphore *sem);
+-extern void up_read_non_owner(struct rw_semaphore *sem);
++extern void anon_down_read_non_owner(struct rw_anon_semaphore *sem);
++extern void anon_up_read_non_owner(struct rw_anon_semaphore *sem);
+ #else
+-# define down_read_nested(sem, subclass) down_read(sem)
+-# define down_write_nested(sem, subclass) down_write(sem)
+-# define down_read_non_owner(sem) down_read(sem)
+-# define up_read_non_owner(sem) up_read(sem)
++# define anon_down_read_nested(sem, subclass) anon_down_read(sem)
++# define anon_down_write_nested(sem, subclass) anon_down_write(sem)
++# define anon_down_read_non_owner(sem) anon_down_read(sem)
++# define anon_up_read_non_owner(sem) anon_up_read(sem)
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#include <linux/rwsem_rt.h>
++#else /* PREEMPT_RT_FULL */
++/*
++ * Non preempt-rt implementations
++ */
++#define __RWSEM_INITIALIZER(name) \
++ { RWSEM_UNLOCKED_VALUE, \
++ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ LIST_HEAD_INIT((name).wait_list) \
++ __RWSEM_DEP_MAP_INIT(name) }
++
++#define DECLARE_RWSEM(name) \
++ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
++
++static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
++ struct lock_class_key *key)
++{
++ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
++}
++
++#define init_rwsem(sem) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ __init_rwsem((sem), #sem, &__key); \
++} while (0)
++
++static inline void down_read(struct rw_semaphore *sem)
++{
++ anon_down_read((struct rw_anon_semaphore *)sem);
++}
++
++static inline int down_read_trylock(struct rw_semaphore *sem)
++{
++ return anon_down_read_trylock((struct rw_anon_semaphore *)sem);
++}
++
++static inline void down_write(struct rw_semaphore *sem)
++{
++ anon_down_write((struct rw_anon_semaphore *)sem);
++}
++
++static inline int down_write_trylock(struct rw_semaphore *sem)
++{
++ return anon_down_write_trylock((struct rw_anon_semaphore *)sem);
++}
++
++static inline void up_read(struct rw_semaphore *sem)
++{
++ anon_up_read((struct rw_anon_semaphore *)sem);
++}
++
++static inline void up_write(struct rw_semaphore *sem)
++{
++ anon_up_write((struct rw_anon_semaphore *)sem);
++}
++
++static inline void downgrade_write(struct rw_semaphore *sem)
++{
++ anon_downgrade_write((struct rw_anon_semaphore *)sem);
++}
++
++static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
++{
++ return anon_down_read_nested((struct rw_anon_semaphore *)sem, subclass);
++}
++
++static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
++{
++ anon_down_write_nested((struct rw_anon_semaphore *)sem, subclass);
++}
++
++static inline int rwsem_is_locked(struct rw_semaphore *sem)
++{
++ return anon_rwsem_is_locked((struct rw_anon_semaphore *)sem);
++}
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* _LINUX_RWSEM_H */
++
+Index: linux-2.6/lib/rwsem-spinlock.c
+===================================================================
+--- linux-2.6.orig/lib/rwsem-spinlock.c
++++ linux-2.6/lib/rwsem-spinlock.c
+@@ -17,24 +17,24 @@ struct rwsem_waiter {
+ #define RWSEM_WAITING_FOR_WRITE 0x00000002
+ };
+
+-int rwsem_is_locked(struct rw_semaphore *sem)
++int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
{
+ int ret = 1;
unsigned long flags;
-- spin_lock_irqsave(&latency_lock, flags);
-+ raw_spin_lock_irqsave(&latency_lock, flags);
- memset(&latency_record, 0, sizeof(latency_record));
-- spin_unlock_irqrestore(&latency_lock, flags);
-+ raw_spin_unlock_irqrestore(&latency_lock, flags);
+- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
++ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ ret = (sem->activity != 0);
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ }
+ return ret;
}
+-EXPORT_SYMBOL(rwsem_is_locked);
++EXPORT_SYMBOL(anon_rwsem_is_locked);
- static void __sched
-@@ -190,7 +190,7 @@ __account_scheduler_latency(struct task_
- lat.max = usecs;
- store_stacktrace(tsk, &lat);
-
-- spin_lock_irqsave(&latency_lock, flags);
-+ raw_spin_lock_irqsave(&latency_lock, flags);
+ /*
+ * initialise the semaphore
+ */
+-void __init_rwsem(struct rw_semaphore *sem, const char *name,
+- struct lock_class_key *key)
++void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
++ struct lock_class_key *key)
+ {
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+@@ -44,10 +44,10 @@ void __init_rwsem(struct rw_semaphore *s
+ lockdep_init_map(&sem->dep_map, name, key, 0);
+ #endif
+ sem->activity = 0;
+- spin_lock_init(&sem->wait_lock);
++ raw_spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+ }
+-EXPORT_SYMBOL(__init_rwsem);
++EXPORT_SYMBOL(__init_anon_rwsem);
- account_global_scheduler_latency(tsk, &lat);
+ /*
+ * handle the lock release when processes blocked on it that can now run
+@@ -58,8 +58,8 @@ EXPORT_SYMBOL(__init_rwsem);
+ * - woken process blocks are discarded from the list after having task zeroed
+ * - writers are only woken if wakewrite is non-zero
+ */
+-static inline struct rw_semaphore *
+-__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
++static inline struct rw_anon_semaphore *
++__rwsem_do_wake(struct rw_anon_semaphore *sem, int wakewrite)
+ {
+ struct rwsem_waiter *waiter;
+ struct task_struct *tsk;
+@@ -117,8 +117,8 @@ __rwsem_do_wake(struct rw_semaphore *sem
+ /*
+ * wake a single writer
+ */
+-static inline struct rw_semaphore *
+-__rwsem_wake_one_writer(struct rw_semaphore *sem)
++static inline struct rw_anon_semaphore *
++__rwsem_wake_one_writer(struct rw_anon_semaphore *sem)
+ {
+ struct rwsem_waiter *waiter;
+ struct task_struct *tsk;
+@@ -139,18 +139,18 @@ __rwsem_wake_one_writer(struct rw_semaph
+ /*
+ * get a read lock on the semaphore
+ */
+-void __sched __down_read(struct rw_semaphore *sem)
++void __sched __down_read(struct rw_anon_semaphore *sem)
+ {
+ struct rwsem_waiter waiter;
+ struct task_struct *tsk;
+ unsigned long flags;
-@@ -231,7 +231,7 @@ __account_scheduler_latency(struct task_
- memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
- out_unlock:
-- spin_unlock_irqrestore(&latency_lock, flags);
-+ raw_spin_unlock_irqrestore(&latency_lock, flags);
- }
+ if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity++;
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ goto out;
+ }
- static int lstats_show(struct seq_file *m, void *v)
-Index: linux-2.6/drivers/video/console/vgacon.c
-===================================================================
---- linux-2.6.orig/drivers/video/console/vgacon.c
-+++ linux-2.6/drivers/video/console/vgacon.c
-@@ -50,7 +50,7 @@
- #include <video/vga.h>
- #include <asm/io.h>
+@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semap
+ list_add_tail(&waiter.list, &sem->wait_list);
--static DEFINE_SPINLOCK(vga_lock);
-+static DEFINE_RAW_SPINLOCK(vga_lock);
- static int cursor_size_lastfrom;
- static int cursor_size_lastto;
- static u32 vgacon_xres;
-@@ -157,7 +157,7 @@ static inline void write_vga(unsigned ch
- * ddprintk might set the console position from interrupt
- * handlers, thus the write has to be IRQ-atomic.
- */
-- spin_lock_irqsave(&vga_lock, flags);
-+ raw_spin_lock_irqsave(&vga_lock, flags);
+ /* we don't need to touch the semaphore struct anymore */
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- #ifndef SLOW_VGA
- v1 = reg + (val & 0xff00);
-@@ -170,7 +170,7 @@ static inline void write_vga(unsigned ch
- outb_p(reg + 1, vga_video_port_reg);
- outb_p(val & 0xff, vga_video_port_val);
- #endif
-- spin_unlock_irqrestore(&vga_lock, flags);
-+ raw_spin_unlock_irqrestore(&vga_lock, flags);
- }
+ /* wait to be given the lock */
+ for (;;) {
+@@ -183,13 +183,13 @@ void __sched __down_read(struct rw_semap
+ /*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+-int __down_read_trylock(struct rw_semaphore *sem)
++int __down_read_trylock(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
+ int ret = 0;
- static inline void vga_set_mem_top(struct vc_data *c)
-@@ -664,7 +664,7 @@ static void vgacon_set_cursor_size(int x
- cursor_size_lastfrom = from;
- cursor_size_lastto = to;
-- spin_lock_irqsave(&vga_lock, flags);
-+ raw_spin_lock_irqsave(&vga_lock, flags);
- if (vga_video_type >= VIDEO_TYPE_VGAC) {
- outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg);
- curs = inb_p(vga_video_port_val);
-@@ -682,7 +682,7 @@ static void vgacon_set_cursor_size(int x
- outb_p(curs, vga_video_port_val);
- outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg);
- outb_p(cure, vga_video_port_val);
-- spin_unlock_irqrestore(&vga_lock, flags);
-+ raw_spin_unlock_irqrestore(&vga_lock, flags);
- }
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
- static void vgacon_cursor(struct vc_data *c, int mode)
-@@ -757,7 +757,7 @@ static int vgacon_doresize(struct vc_dat
- unsigned int scanlines = height * c->vc_font.height;
- u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
+ if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+ /* granted */
+@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaph
+ ret = 1;
+ }
-- spin_lock_irqsave(&vga_lock, flags);
-+ raw_spin_lock_irqsave(&vga_lock, flags);
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- vgacon_xres = width * VGA_FONTWIDTH;
- vgacon_yres = height * c->vc_font.height;
-@@ -808,7 +808,7 @@ static int vgacon_doresize(struct vc_dat
- outb_p(vsync_end, vga_video_port_val);
+ return ret;
+ }
+@@ -206,18 +206,18 @@ int __down_read_trylock(struct rw_semaph
+ * get a write lock on the semaphore
+ * - we increment the waiting count anyway to indicate an exclusive lock
+ */
+-void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
++void __sched __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
+ {
+ struct rwsem_waiter waiter;
+ struct task_struct *tsk;
+ unsigned long flags;
+
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+
+ if (sem->activity == 0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity = -1;
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ goto out;
}
-- spin_unlock_irqrestore(&vga_lock, flags);
-+ raw_spin_unlock_irqrestore(&vga_lock, flags);
- return 0;
+@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct
+ list_add_tail(&waiter.list, &sem->wait_list);
+
+ /* we don't need to touch the semaphore struct anymore */
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+ /* wait to be given the lock */
+ for (;;) {
+@@ -247,7 +247,7 @@ void __sched __down_write_nested(struct
+ ;
}
-@@ -891,11 +891,11 @@ static void vga_vesa_blank(struct vgasta
+-void __sched __down_write(struct rw_semaphore *sem)
++void __sched __down_write(struct rw_anon_semaphore *sem)
{
- /* save original values of VGA controller registers */
- if (!vga_vesa_blanked) {
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I);
- vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg);
- vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
+ __down_write_nested(sem, 0);
+ }
+@@ -255,12 +255,12 @@ void __sched __down_write(struct rw_sema
+ /*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+-int __down_write_trylock(struct rw_semaphore *sem)
++int __down_write_trylock(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
+ int ret = 0;
- outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
- vga_state.HorizontalTotal = inb_p(vga_video_port_val);
-@@ -918,7 +918,7 @@ static void vga_vesa_blank(struct vgasta
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
- /* assure that video is enabled */
- /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20);
+ if (sem->activity == 0 && list_empty(&sem->wait_list)) {
+ /* granted */
+@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semap
+ ret = 1;
+ }
- /* test for vertical retrace in process.... */
-@@ -954,13 +954,13 @@ static void vga_vesa_blank(struct vgasta
- /* restore both index registers */
- vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
- outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
- }
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- static void vga_vesa_unblank(struct vgastate *state)
+ return ret;
+ }
+@@ -276,48 +276,48 @@ int __down_write_trylock(struct rw_semap
+ /*
+ * release a read lock on the semaphore
+ */
+-void __up_read(struct rw_semaphore *sem)
++void __up_read(struct rw_anon_semaphore *sem)
{
- /* restore original values of VGA controller registers */
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO);
+ unsigned long flags;
- outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
-@@ -985,7 +985,7 @@ static void vga_vesa_unblank(struct vgas
- /* restore index/control registers */
- vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
- outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+
+ if (--sem->activity == 0 && !list_empty(&sem->wait_list))
+ sem = __rwsem_wake_one_writer(sem);
+
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
- static void vga_pal_blank(struct vgastate *state)
-@@ -1104,7 +1104,7 @@ static int vgacon_do_font_op(struct vgas
- charmap += 4 * cmapsz;
- #endif
+ /*
+ * release a write lock on the semaphore
+ */
+-void __up_write(struct rw_semaphore *sem)
++void __up_write(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- /* First, the Sequencer */
- vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
- /* CPU writes only to map 2 */
-@@ -1120,7 +1120,7 @@ static int vgacon_do_font_op(struct vgas
- vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00);
- /* map start at A000:0000 */
- vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (arg) {
- if (set)
-@@ -1147,7 +1147,7 @@ static int vgacon_do_font_op(struct vgas
- }
- }
+ sem->activity = 0;
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem, 1);
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- /* First, the sequencer, Synchronous reset */
- vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01);
- /* CPU writes to maps 0 and 1 */
-@@ -1186,7 +1186,7 @@ static int vgacon_do_font_op(struct vgas
- inb_p(video_port_status);
- vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
- }
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
- return 0;
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
-@@ -1211,26 +1211,26 @@ static int vgacon_adjust_height(struct v
- registers; they are write-only on EGA, but it appears that they
- are all don't care bits on EGA, so I guess it doesn't matter. */
+ /*
+ * downgrade a write lock into a read lock
+ * - just wake up any readers at the front of the queue
+ */
+-void __downgrade_write(struct rw_semaphore *sem)
++void __downgrade_write(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
- ovr = inb_p(vga_video_port_val);
- outb_p(0x09, vga_video_port_reg); /* Font size register */
- fsr = inb_p(vga_video_port_val);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
- vde = maxscan & 0xff; /* Vertical display end reg */
- ovr = (ovr & 0xbd) + /* Overflow register */
- ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3);
- fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */
+ sem->activity = 1;
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem, 0);
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
- outb_p(ovr, vga_video_port_val);
- outb_p(0x09, vga_video_port_reg); /* Font size */
- outb_p(fsr, vga_video_port_val);
- outb_p(0x12, vga_video_port_reg); /* Vertical display limit */
- outb_p(vde, vga_video_port_val);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
- vga_video_font_height = fontheight;
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ }
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
-Index: linux-2.6/arch/x86/oprofile/nmi_int.c
+Index: linux-2.6/lib/rwsem.c
===================================================================
---- linux-2.6.orig/arch/x86/oprofile/nmi_int.c
-+++ linux-2.6/arch/x86/oprofile/nmi_int.c
-@@ -355,10 +355,10 @@ static void nmi_cpu_setup(void *dummy)
- int cpu = smp_processor_id();
- struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
- nmi_cpu_save_registers(msrs);
-- spin_lock(&oprofilefs_lock);
-+ raw_spin_lock(&oprofilefs_lock);
- model->setup_ctrs(model, msrs);
- nmi_cpu_setup_mux(cpu, msrs);
-- spin_unlock(&oprofilefs_lock);
-+ raw_spin_unlock(&oprofilefs_lock);
- per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
- apic_write(APIC_LVTPC, APIC_DM_NMI);
+--- linux-2.6.orig/lib/rwsem.c
++++ linux-2.6/lib/rwsem.c
+@@ -11,8 +11,8 @@
+ /*
+ * Initialize an rwsem:
+ */
+-void __init_rwsem(struct rw_semaphore *sem, const char *name,
+- struct lock_class_key *key)
++void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
++ struct lock_class_key *key)
+ {
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+@@ -22,11 +22,11 @@ void __init_rwsem(struct rw_semaphore *s
+ lockdep_init_map(&sem->dep_map, name, key, 0);
+ #endif
+ sem->count = RWSEM_UNLOCKED_VALUE;
+- spin_lock_init(&sem->wait_lock);
++ raw_spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
}
-Index: linux-2.6/drivers/oprofile/event_buffer.c
-===================================================================
---- linux-2.6.orig/drivers/oprofile/event_buffer.c
-+++ linux-2.6/drivers/oprofile/event_buffer.c
-@@ -82,10 +82,10 @@ int alloc_event_buffer(void)
+
+-EXPORT_SYMBOL(__init_rwsem);
++EXPORT_SYMBOL(__init_anon_rwsem);
+
+ struct rwsem_waiter {
+ struct list_head list;
+@@ -54,8 +54,8 @@ struct rwsem_waiter {
+ * - woken process blocks are discarded from the list after having task zeroed
+ * - writers are only woken if downgrading is false
+ */
+-static struct rw_semaphore *
+-__rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
++static struct rw_anon_semaphore *
++__rwsem_do_wake(struct rw_anon_semaphore *sem, int wake_type)
{
- unsigned long flags;
+ struct rwsem_waiter *waiter;
+ struct task_struct *tsk;
+@@ -169,8 +169,8 @@ __rwsem_do_wake(struct rw_semaphore *sem
+ /*
+ * wait for a lock to be granted
+ */
+-static struct rw_semaphore __sched *
+-rwsem_down_failed_common(struct rw_semaphore *sem,
++static struct rw_anon_semaphore __sched *
++rwsem_down_failed_common(struct rw_anon_semaphore *sem,
+ unsigned int flags, signed long adjustment)
+ {
+ struct rwsem_waiter waiter;
+@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semap
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-- spin_lock_irqsave(&oprofilefs_lock, flags);
-+ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
- buffer_size = oprofile_buffer_size;
- buffer_watershed = oprofile_buffer_watershed;
-- spin_unlock_irqrestore(&oprofilefs_lock, flags);
-+ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
+ /* set up my own style of waitqueue */
+- spin_lock_irq(&sem->wait_lock);
++ raw_spin_lock_irq(&sem->wait_lock);
+ waiter.task = tsk;
+ waiter.flags = flags;
+ get_task_struct(tsk);
+@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semap
+ adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
- if (buffer_watershed >= buffer_size)
- return -EINVAL;
-Index: linux-2.6/drivers/oprofile/oprofile_perf.c
-===================================================================
---- linux-2.6.orig/drivers/oprofile/oprofile_perf.c
-+++ linux-2.6/drivers/oprofile/oprofile_perf.c
-@@ -160,9 +160,9 @@ static int oprofile_perf_create_files(st
+- spin_unlock_irq(&sem->wait_lock);
++ raw_spin_unlock_irq(&sem->wait_lock);
- static int oprofile_perf_setup(void)
+ /* wait to be given the lock */
+ for (;;) {
+@@ -222,7 +222,8 @@ rwsem_down_failed_common(struct rw_semap
+ /*
+ * wait for the read lock to be granted
+ */
+-struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
++struct rw_anon_semaphore __sched *
++rwsem_down_read_failed(struct rw_anon_semaphore *sem)
{
-- spin_lock(&oprofilefs_lock);
-+ raw_spin_lock(&oprofilefs_lock);
- op_perf_setup();
-- spin_unlock(&oprofilefs_lock);
-+ raw_spin_unlock(&oprofilefs_lock);
- return 0;
- }
+ return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
+ -RWSEM_ACTIVE_READ_BIAS);
+@@ -231,7 +232,8 @@ struct rw_semaphore __sched *rwsem_down_
+ /*
+ * wait for the write lock to be granted
+ */
+-struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
++struct rw_anon_semaphore __sched *
++rwsem_down_write_failed(struct rw_anon_semaphore *sem)
+ {
+ return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
+ -RWSEM_ACTIVE_WRITE_BIAS);
+@@ -241,17 +243,17 @@ struct rw_semaphore __sched *rwsem_down_
+ * handle waking up a waiter on the semaphore
+ * - up_read/up_write has decremented the active part of count if we come here
+ */
+-struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
++struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
-Index: linux-2.6/drivers/oprofile/oprofilefs.c
-===================================================================
---- linux-2.6.orig/drivers/oprofile/oprofilefs.c
-+++ linux-2.6/drivers/oprofile/oprofilefs.c
-@@ -21,7 +21,7 @@
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
- #define OPROFILEFS_MAGIC 0x6f70726f
+ /* do nothing if list empty */
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
--DEFINE_SPINLOCK(oprofilefs_lock);
-+DEFINE_RAW_SPINLOCK(oprofilefs_lock);
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
+ return sem;
+ }
+@@ -261,17 +263,17 @@ struct rw_semaphore *rwsem_wake(struct r
+ * - caller incremented waiting part of count and discovered it still negative
+ * - just wake up any readers at the front of the queue
+ */
+-struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
++struct rw_anon_semaphore *rwsem_downgrade_wake(struct rw_anon_semaphore *sem)
{
-@@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned
- if (copy_from_user(tmpbuf, buf, count))
- return -EFAULT;
+ unsigned long flags;
-- spin_lock_irqsave(&oprofilefs_lock, flags);
-+ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
- *val = simple_strtoul(tmpbuf, NULL, 0);
-- spin_unlock_irqrestore(&oprofilefs_lock, flags);
-+ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- return 0;
- }
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-Index: linux-2.6/include/linux/oprofile.h
-===================================================================
---- linux-2.6.orig/include/linux/oprofile.h
-+++ linux-2.6/include/linux/oprofile.h
-@@ -166,7 +166,7 @@ ssize_t oprofilefs_ulong_to_user(unsigne
- int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
+ /* do nothing if list empty */
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
- /** lock for read/write safety */
--extern spinlock_t oprofilefs_lock;
-+extern raw_spinlock_t oprofilefs_lock;
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- /**
- * Add the contents of a circular buffer to the event buffer.
-Index: linux-2.6/drivers/acpi/processor_idle.c
-===================================================================
---- linux-2.6.orig/drivers/acpi/processor_idle.c
-+++ linux-2.6/drivers/acpi/processor_idle.c
-@@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct
+ return sem;
}
+Index: linux-2.6/kernel/time/timer_stats.c
+===================================================================
+--- linux-2.6.orig/kernel/time/timer_stats.c
++++ linux-2.6/kernel/time/timer_stats.c
+@@ -81,7 +81,7 @@ struct entry {
+ /*
+ * Spinlock protecting the tables - not taken during lookup:
+ */
+-static DEFINE_SPINLOCK(table_lock);
++static DEFINE_RAW_SPINLOCK(table_lock);
- static int c3_cpu_count;
--static DEFINE_SPINLOCK(c3_lock);
-+static DEFINE_RAW_SPINLOCK(c3_lock);
+ /*
+ * Per-CPU lookup locks for fast hash lookup:
+@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct
+ prev = NULL;
+ curr = *head;
- /**
- * acpi_idle_enter_bm - enters C3 with proper BM handling
-@@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpu
- * without doing anything.
+- spin_lock(&table_lock);
++ raw_spin_lock(&table_lock);
+ /*
+ * Make sure we have not raced with another CPU:
*/
- if (pr->flags.bm_check && pr->flags.bm_control) {
-- spin_lock(&c3_lock);
-+ raw_spin_lock(&c3_lock);
- c3_cpu_count++;
- /* Disable bus master arbitration when all CPUs are in C3 */
- if (c3_cpu_count == num_online_cpus())
- acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
-- spin_unlock(&c3_lock);
-+ raw_spin_unlock(&c3_lock);
- } else if (!pr->flags.bm_check) {
- ACPI_FLUSH_CPU_CACHE();
+@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct
+ *head = curr;
}
-@@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpu
+ out_unlock:
+- spin_unlock(&table_lock);
++ raw_spin_unlock(&table_lock);
- /* Re-enable bus master arbitration */
- if (pr->flags.bm_check && pr->flags.bm_control) {
-- spin_lock(&c3_lock);
-+ raw_spin_lock(&c3_lock);
- acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
- c3_cpu_count--;
-- spin_unlock(&c3_lock);
-+ raw_spin_unlock(&c3_lock);
- }
- kt2 = ktime_get_real();
- idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
-Index: linux-2.6/arch/x86/kernel/cpu/mcheck/mce_intel.c
+ return curr;
+ }
+Index: linux-2.6/kernel/latencytop.c
===================================================================
---- linux-2.6.orig/arch/x86/kernel/cpu/mcheck/mce_intel.c
-+++ linux-2.6/arch/x86/kernel/cpu/mcheck/mce_intel.c
-@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_b
- * cmci_discover_lock protects against parallel discovery attempts
- * which could race against each other.
- */
--static DEFINE_SPINLOCK(cmci_discover_lock);
-+static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
-
- #define CMCI_THRESHOLD 1
+--- linux-2.6.orig/kernel/latencytop.c
++++ linux-2.6/kernel/latencytop.c
+@@ -58,7 +58,7 @@
+ #include <linux/list.h>
+ #include <linux/stacktrace.h>
-@@ -85,7 +85,7 @@ static void cmci_discover(int banks, int
- int hdr = 0;
- int i;
+-static DEFINE_SPINLOCK(latency_lock);
++static DEFINE_RAW_SPINLOCK(latency_lock);
-- spin_lock_irqsave(&cmci_discover_lock, flags);
-+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- for (i = 0; i < banks; i++) {
- u64 val;
+ #define MAXLR 128
+ static struct latency_record latency_record[MAXLR];
+@@ -72,19 +72,19 @@ void clear_all_latency_tracing(struct ta
+ if (!latencytop_enabled)
+ return;
-@@ -116,7 +116,7 @@ static void cmci_discover(int banks, int
- WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
- }
- }
-- spin_unlock_irqrestore(&cmci_discover_lock, flags);
-+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
- if (hdr)
- printk(KERN_CONT "\n");
+- spin_lock_irqsave(&latency_lock, flags);
++ raw_spin_lock_irqsave(&latency_lock, flags);
+ memset(&p->latency_record, 0, sizeof(p->latency_record));
+ p->latency_record_count = 0;
+- spin_unlock_irqrestore(&latency_lock, flags);
++ raw_spin_unlock_irqrestore(&latency_lock, flags);
}
-@@ -150,7 +150,7 @@ void cmci_clear(void)
- if (!cmci_supported(&banks))
- return;
-- spin_lock_irqsave(&cmci_discover_lock, flags);
-+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- for (i = 0; i < banks; i++) {
- if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
- continue;
-@@ -160,7 +160,7 @@ void cmci_clear(void)
- wrmsrl(MSR_IA32_MCx_CTL2(i), val);
- __clear_bit(i, __get_cpu_var(mce_banks_owned));
- }
-- spin_unlock_irqrestore(&cmci_discover_lock, flags);
-+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
- }
+ static void clear_global_latency_tracing(void)
+ {
+ unsigned long flags;
- /*
-Index: linux-2.6/arch/powerpc/sysdev/uic.c
-===================================================================
---- linux-2.6.orig/arch/powerpc/sysdev/uic.c
-+++ linux-2.6/arch/powerpc/sysdev/uic.c
-@@ -47,7 +47,7 @@ struct uic {
- int index;
- int dcrbase;
+- spin_lock_irqsave(&latency_lock, flags);
++ raw_spin_lock_irqsave(&latency_lock, flags);
+ memset(&latency_record, 0, sizeof(latency_record));
+- spin_unlock_irqrestore(&latency_lock, flags);
++ raw_spin_unlock_irqrestore(&latency_lock, flags);
+ }
-- spinlock_t lock;
-+ raw_spinlock_t lock;
+ static void __sched
+@@ -190,7 +190,7 @@ __account_scheduler_latency(struct task_
+ lat.max = usecs;
+ store_stacktrace(tsk, &lat);
- /* The remapper for this UIC */
- struct irq_host *irqhost;
-@@ -61,14 +61,14 @@ static void uic_unmask_irq(struct irq_da
- u32 er, sr;
+- spin_lock_irqsave(&latency_lock, flags);
++ raw_spin_lock_irqsave(&latency_lock, flags);
- sr = 1 << (31-src);
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- /* ack level-triggered interrupts here */
- if (irqd_is_level_type(d))
- mtdcr(uic->dcrbase + UIC_SR, sr);
- er = mfdcr(uic->dcrbase + UIC_ER);
- er |= sr;
- mtdcr(uic->dcrbase + UIC_ER, er);
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
- }
+ account_global_scheduler_latency(tsk, &lat);
- static void uic_mask_irq(struct irq_data *d)
-@@ -78,11 +78,11 @@ static void uic_mask_irq(struct irq_data
- unsigned long flags;
- u32 er;
+@@ -231,7 +231,7 @@ __account_scheduler_latency(struct task_
+ memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- er = mfdcr(uic->dcrbase + UIC_ER);
- er &= ~(1 << (31 - src));
- mtdcr(uic->dcrbase + UIC_ER, er);
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
+ out_unlock:
+- spin_unlock_irqrestore(&latency_lock, flags);
++ raw_spin_unlock_irqrestore(&latency_lock, flags);
}
- static void uic_ack_irq(struct irq_data *d)
-@@ -91,9 +91,9 @@ static void uic_ack_irq(struct irq_data
- unsigned int src = irqd_to_hwirq(d);
- unsigned long flags;
+ static int lstats_show(struct seq_file *m, void *v)
+Index: linux-2.6/drivers/video/console/vgacon.c
+===================================================================
+--- linux-2.6.orig/drivers/video/console/vgacon.c
++++ linux-2.6/drivers/video/console/vgacon.c
+@@ -50,7 +50,7 @@
+ #include <video/vga.h>
+ #include <asm/io.h>
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
+-static DEFINE_SPINLOCK(vga_lock);
++static DEFINE_RAW_SPINLOCK(vga_lock);
+ static int cursor_size_lastfrom;
+ static int cursor_size_lastto;
+ static u32 vgacon_xres;
+@@ -157,7 +157,7 @@ static inline void write_vga(unsigned ch
+ * ddprintk might set the console position from interrupt
+ * handlers, thus the write has to be IRQ-atomic.
+ */
+- spin_lock_irqsave(&vga_lock, flags);
++ raw_spin_lock_irqsave(&vga_lock, flags);
+
+ #ifndef SLOW_VGA
+ v1 = reg + (val & 0xff00);
+@@ -170,7 +170,7 @@ static inline void write_vga(unsigned ch
+ outb_p(reg + 1, vga_video_port_reg);
+ outb_p(val & 0xff, vga_video_port_val);
+ #endif
+- spin_unlock_irqrestore(&vga_lock, flags);
++ raw_spin_unlock_irqrestore(&vga_lock, flags);
}
- static void uic_mask_ack_irq(struct irq_data *d)
-@@ -104,7 +104,7 @@ static void uic_mask_ack_irq(struct irq_
- u32 er, sr;
+ static inline void vga_set_mem_top(struct vc_data *c)
+@@ -664,7 +664,7 @@ static void vgacon_set_cursor_size(int x
+ cursor_size_lastfrom = from;
+ cursor_size_lastto = to;
- sr = 1 << (31-src);
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- er = mfdcr(uic->dcrbase + UIC_ER);
- er &= ~sr;
- mtdcr(uic->dcrbase + UIC_ER, er);
-@@ -118,7 +118,7 @@ static void uic_mask_ack_irq(struct irq_
- */
- if (!irqd_is_level_type(d))
- mtdcr(uic->dcrbase + UIC_SR, sr);
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
+- spin_lock_irqsave(&vga_lock, flags);
++ raw_spin_lock_irqsave(&vga_lock, flags);
+ if (vga_video_type >= VIDEO_TYPE_VGAC) {
+ outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg);
+ curs = inb_p(vga_video_port_val);
+@@ -682,7 +682,7 @@ static void vgacon_set_cursor_size(int x
+ outb_p(curs, vga_video_port_val);
+ outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg);
+ outb_p(cure, vga_video_port_val);
+- spin_unlock_irqrestore(&vga_lock, flags);
++ raw_spin_unlock_irqrestore(&vga_lock, flags);
}
- static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
-@@ -152,7 +152,7 @@ static int uic_set_irq_type(struct irq_d
-
- mask = ~(1 << (31 - src));
+ static void vgacon_cursor(struct vc_data *c, int mode)
+@@ -757,7 +757,7 @@ static int vgacon_doresize(struct vc_dat
+ unsigned int scanlines = height * c->vc_font.height;
+ u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- tr = mfdcr(uic->dcrbase + UIC_TR);
- pr = mfdcr(uic->dcrbase + UIC_PR);
- tr = (tr & mask) | (trigger << (31-src));
-@@ -161,7 +161,7 @@ static int uic_set_irq_type(struct irq_d
- mtdcr(uic->dcrbase + UIC_PR, pr);
- mtdcr(uic->dcrbase + UIC_TR, tr);
+- spin_lock_irqsave(&vga_lock, flags);
++ raw_spin_lock_irqsave(&vga_lock, flags);
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
+ vgacon_xres = width * VGA_FONTWIDTH;
+ vgacon_yres = height * c->vc_font.height;
+@@ -808,7 +808,7 @@ static int vgacon_doresize(struct vc_dat
+ outb_p(vsync_end, vga_video_port_val);
+ }
+- spin_unlock_irqrestore(&vga_lock, flags);
++ raw_spin_unlock_irqrestore(&vga_lock, flags);
return 0;
}
-@@ -254,7 +254,7 @@ static struct uic * __init uic_init_one(
- if (! uic)
- return NULL; /* FIXME: panic? */
-
-- spin_lock_init(&uic->lock);
-+ raw_spin_lock_init(&uic->lock);
- indexp = of_get_property(node, "cell-index", &len);
- if (!indexp || (len != sizeof(u32))) {
- printk(KERN_ERR "uic: Device node %s has missing or invalid "
-Index: linux-2.6/drivers/dca/dca-core.c
-===================================================================
---- linux-2.6.orig/drivers/dca/dca-core.c
-+++ linux-2.6/drivers/dca/dca-core.c
-@@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION);
- MODULE_LICENSE("GPL");
- MODULE_AUTHOR("Intel Corporation");
--static DEFINE_SPINLOCK(dca_lock);
-+static DEFINE_RAW_SPINLOCK(dca_lock);
-
- static LIST_HEAD(dca_domains);
+@@ -891,11 +891,11 @@ static void vga_vesa_blank(struct vgasta
+ {
+ /* save original values of VGA controller registers */
+ if (!vga_vesa_blanked) {
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I);
+ vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg);
+ vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
-@@ -101,10 +101,10 @@ static void unregister_dca_providers(voi
+ outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
+ vga_state.HorizontalTotal = inb_p(vga_video_port_val);
+@@ -918,7 +918,7 @@ static void vga_vesa_blank(struct vgasta
- INIT_LIST_HEAD(&unregistered_providers);
+ /* assure that video is enabled */
+ /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20);
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
+ /* test for vertical retrace in process.... */
+@@ -954,13 +954,13 @@ static void vga_vesa_blank(struct vgasta
+ /* restore both index registers */
+ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
+ outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
+ }
- if (list_empty(&dca_domains)) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return;
- }
-
-@@ -116,7 +116,7 @@ static void unregister_dca_providers(voi
+ static void vga_vesa_unblank(struct vgastate *state)
+ {
+ /* restore original values of VGA controller registers */
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO);
- dca_free_domain(domain);
+ outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
+@@ -985,7 +985,7 @@ static void vga_vesa_unblank(struct vgas
+ /* restore index/control registers */
+ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
+ outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
+ }
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ static void vga_pal_blank(struct vgastate *state)
+@@ -1104,7 +1104,7 @@ static int vgacon_do_font_op(struct vgas
+ charmap += 4 * cmapsz;
+ #endif
- list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
- dca_sysfs_remove_provider(dca);
-@@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain
- domain = dca_find_domain(rc);
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ /* First, the Sequencer */
+ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
+ /* CPU writes only to map 2 */
+@@ -1120,7 +1120,7 @@ static int vgacon_do_font_op(struct vgas
+ vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00);
+ /* map start at A000:0000 */
+ vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
- if (!domain) {
-- if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
-+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
- dca_providers_blocked = 1;
-- } else {
-- domain = dca_allocate_domain(rc);
-- if (domain)
-- list_add(&domain->node, &dca_domains);
-- }
+ if (arg) {
+ if (set)
+@@ -1147,7 +1147,7 @@ static int vgacon_do_font_op(struct vgas
+ }
}
- return domain;
-@@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev
- if (!dev)
- return -EFAULT;
-
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
-
- /* check if the requester has not been added already */
- dca = dca_find_provider_by_dev(dev);
- if (dca) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -EEXIST;
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ /* First, the sequencer, Synchronous reset */
+ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01);
+ /* CPU writes to maps 0 and 1 */
+@@ -1186,7 +1186,7 @@ static int vgacon_do_font_op(struct vgas
+ inb_p(video_port_status);
+ vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
}
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
+ return 0;
+ }
- pci_rc = dca_pci_rc_from_dev(dev);
- domain = dca_find_domain(pci_rc);
- if (!domain) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -ENODEV;
- }
+@@ -1211,26 +1211,26 @@ static int vgacon_adjust_height(struct v
+ registers; they are write-only on EGA, but it appears that they
+ are all don't care bits on EGA, so I guess it doesn't matter. */
-@@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev
- break;
- }
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
+ ovr = inb_p(vga_video_port_val);
+ outb_p(0x09, vga_video_port_reg); /* Font size register */
+ fsr = inb_p(vga_video_port_val);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ vde = maxscan & 0xff; /* Vertical display end reg */
+ ovr = (ovr & 0xbd) + /* Overflow register */
+ ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3);
+ fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */
- if (slot < 0)
- return slot;
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
+ outb_p(ovr, vga_video_port_val);
+ outb_p(0x09, vga_video_port_reg); /* Font size */
+ outb_p(fsr, vga_video_port_val);
+ outb_p(0x12, vga_video_port_reg); /* Vertical display limit */
+ outb_p(vde, vga_video_port_val);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
+ vga_video_font_height = fontheight;
- err = dca_sysfs_add_req(dca, dev, slot);
- if (err) {
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
- if (dca == dca_find_provider_by_dev(dev))
- dca->ops->remove_requester(dca, dev);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return err;
- }
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+Index: linux-2.6/arch/x86/oprofile/nmi_int.c
+===================================================================
+--- linux-2.6.orig/arch/x86/oprofile/nmi_int.c
++++ linux-2.6/arch/x86/oprofile/nmi_int.c
+@@ -355,10 +355,10 @@ static void nmi_cpu_setup(void *dummy)
+ int cpu = smp_processor_id();
+ struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
+ nmi_cpu_save_registers(msrs);
+- spin_lock(&oprofilefs_lock);
++ raw_spin_lock(&oprofilefs_lock);
+ model->setup_ctrs(model, msrs);
+ nmi_cpu_setup_mux(cpu, msrs);
+- spin_unlock(&oprofilefs_lock);
++ raw_spin_unlock(&oprofilefs_lock);
+ per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+ }
+Index: linux-2.6/drivers/oprofile/event_buffer.c
+===================================================================
+--- linux-2.6.orig/drivers/oprofile/event_buffer.c
++++ linux-2.6/drivers/oprofile/event_buffer.c
+@@ -82,10 +82,10 @@ int alloc_event_buffer(void)
+ {
+ unsigned long flags;
-@@ -251,14 +246,14 @@ int dca_remove_requester(struct device *
- if (!dev)
- return -EFAULT;
+- spin_lock_irqsave(&oprofilefs_lock, flags);
++ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
+ buffer_size = oprofile_buffer_size;
+ buffer_watershed = oprofile_buffer_watershed;
+- spin_unlock_irqrestore(&oprofilefs_lock, flags);
++ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
- dca = dca_find_provider_by_dev(dev);
- if (!dca) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -ENODEV;
- }
- slot = dca->ops->remove_requester(dca, dev);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ if (buffer_watershed >= buffer_size)
+ return -EINVAL;
+Index: linux-2.6/drivers/oprofile/oprofile_perf.c
+===================================================================
+--- linux-2.6.orig/drivers/oprofile/oprofile_perf.c
++++ linux-2.6/drivers/oprofile/oprofile_perf.c
+@@ -160,9 +160,9 @@ static int oprofile_perf_create_files(st
- if (slot < 0)
- return slot;
-@@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev
- u8 tag;
- unsigned long flags;
+ static int oprofile_perf_setup(void)
+ {
+- spin_lock(&oprofilefs_lock);
++ raw_spin_lock(&oprofilefs_lock);
+ op_perf_setup();
+- spin_unlock(&oprofilefs_lock);
++ raw_spin_unlock(&oprofilefs_lock);
+ return 0;
+ }
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
+Index: linux-2.6/drivers/oprofile/oprofilefs.c
+===================================================================
+--- linux-2.6.orig/drivers/oprofile/oprofilefs.c
++++ linux-2.6/drivers/oprofile/oprofilefs.c
+@@ -21,7 +21,7 @@
- dca = dca_find_provider_by_dev(dev);
- if (!dca) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -ENODEV;
- }
- tag = dca->ops->get_tag(dca, dev, cpu);
+ #define OPROFILEFS_MAGIC 0x6f70726f
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return tag;
- }
+-DEFINE_SPINLOCK(oprofilefs_lock);
++DEFINE_RAW_SPINLOCK(oprofilefs_lock);
-@@ -360,36 +355,51 @@ int register_dca_provider(struct dca_pro
+ static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
{
- int err;
- unsigned long flags;
-- struct dca_domain *domain;
-+ struct dca_domain *domain, *newdomain = NULL;
+@@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EFAULT;
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
- if (dca_providers_blocked) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -ENODEV;
- }
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+- spin_lock_irqsave(&oprofilefs_lock, flags);
++ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
+ *val = simple_strtoul(tmpbuf, NULL, 0);
+- spin_unlock_irqrestore(&oprofilefs_lock, flags);
++ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
+ return 0;
+ }
- err = dca_sysfs_add_provider(dca, dev);
- if (err)
- return err;
+Index: linux-2.6/include/linux/oprofile.h
+===================================================================
+--- linux-2.6.orig/include/linux/oprofile.h
++++ linux-2.6/include/linux/oprofile.h
+@@ -166,7 +166,7 @@ ssize_t oprofilefs_ulong_to_user(unsigne
+ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
- domain = dca_get_domain(dev);
- if (!domain) {
-+ struct pci_bus *rc;
-+
- if (dca_providers_blocked) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- dca_sysfs_remove_provider(dca);
- unregister_dca_providers();
-- } else {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ return -ENODEV;
-+ }
-+
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
-+ rc = dca_pci_rc_from_dev(dev);
-+ newdomain = dca_allocate_domain(rc);
-+ if (!newdomain)
-+ return -ENODEV;
-+ raw_spin_lock_irqsave(&dca_lock, flags);
-+ /* Recheck, we might have raced after dropping the lock */
-+ domain = dca_get_domain(dev);
-+ if (!domain) {
-+ domain = newdomain;
-+ newdomain = NULL;
-+ list_add(&domain->node, &dca_domains);
- }
-- return -ENODEV;
- }
- list_add(&dca->node, &domain->dca_providers);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ /** lock for read/write safety */
+-extern spinlock_t oprofilefs_lock;
++extern raw_spinlock_t oprofilefs_lock;
- blocking_notifier_call_chain(&dca_provider_chain,
- DCA_PROVIDER_ADD, NULL);
-+ kfree(newdomain);
- return 0;
+ /**
+ * Add the contents of a circular buffer to the event buffer.
+Index: linux-2.6/drivers/acpi/processor_idle.c
+===================================================================
+--- linux-2.6.orig/drivers/acpi/processor_idle.c
++++ linux-2.6/drivers/acpi/processor_idle.c
+@@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct
}
- EXPORT_SYMBOL_GPL(register_dca_provider);
-@@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_
- blocking_notifier_call_chain(&dca_provider_chain,
- DCA_PROVIDER_REMOVE, NULL);
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
-
- list_del(&dca->node);
-
-@@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_
- if (list_empty(&domain->dca_providers))
- dca_free_domain(domain);
+ static int c3_cpu_count;
+-static DEFINE_SPINLOCK(c3_lock);
++static DEFINE_RAW_SPINLOCK(c3_lock);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ /**
+ * acpi_idle_enter_bm - enters C3 with proper BM handling
+@@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpu
+ * without doing anything.
+ */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+- spin_lock(&c3_lock);
++ raw_spin_lock(&c3_lock);
+ c3_cpu_count++;
+ /* Disable bus master arbitration when all CPUs are in C3 */
+ if (c3_cpu_count == num_online_cpus())
+ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
+- spin_unlock(&c3_lock);
++ raw_spin_unlock(&c3_lock);
+ } else if (!pr->flags.bm_check) {
+ ACPI_FLUSH_CPU_CACHE();
+ }
+@@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpu
- dca_sysfs_remove_provider(dca);
- }
-Index: linux-2.6/arch/arm/common/gic.c
+ /* Re-enable bus master arbitration */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+- spin_lock(&c3_lock);
++ raw_spin_lock(&c3_lock);
+ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
+ c3_cpu_count--;
+- spin_unlock(&c3_lock);
++ raw_spin_unlock(&c3_lock);
+ }
+ kt2 = ktime_get_real();
+ idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
+Index: linux-2.6/arch/x86/kernel/cpu/mcheck/mce_intel.c
===================================================================
---- linux-2.6.orig/arch/arm/common/gic.c
-+++ linux-2.6/arch/arm/common/gic.c
-@@ -33,7 +33,7 @@
- #include <asm/mach/irq.h>
- #include <asm/hardware/gic.h>
-
--static DEFINE_SPINLOCK(irq_controller_lock);
-+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+--- linux-2.6.orig/arch/x86/kernel/cpu/mcheck/mce_intel.c
++++ linux-2.6/arch/x86/kernel/cpu/mcheck/mce_intel.c
+@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_b
+ * cmci_discover_lock protects against parallel discovery attempts
+ * which could race against each other.
+ */
+-static DEFINE_SPINLOCK(cmci_discover_lock);
++static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
- /* Address of GIC 0 CPU interface */
- void __iomem *gic_cpu_base_addr __read_mostly;
-@@ -88,30 +88,30 @@ static void gic_mask_irq(struct irq_data
- {
- u32 mask = 1 << (d->irq % 32);
+ #define CMCI_THRESHOLD 1
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
- if (gic_arch_extn.irq_mask)
- gic_arch_extn.irq_mask(d);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
- }
+@@ -85,7 +85,7 @@ static void cmci_discover(int banks, int
+ int hdr = 0;
+ int i;
- static void gic_unmask_irq(struct irq_data *d)
- {
- u32 mask = 1 << (d->irq % 32);
+- spin_lock_irqsave(&cmci_discover_lock, flags);
++ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ for (i = 0; i < banks; i++) {
+ u64 val;
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- if (gic_arch_extn.irq_unmask)
- gic_arch_extn.irq_unmask(d);
- writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+@@ -116,7 +116,7 @@ static void cmci_discover(int banks, int
+ WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
+ }
+ }
+- spin_unlock_irqrestore(&cmci_discover_lock, flags);
++ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ if (hdr)
+ printk(KERN_CONT "\n");
}
+@@ -150,7 +150,7 @@ void cmci_clear(void)
- static void gic_eoi_irq(struct irq_data *d)
- {
- if (gic_arch_extn.irq_eoi) {
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- gic_arch_extn.irq_eoi(d);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+ if (!cmci_supported(&banks))
+ return;
+- spin_lock_irqsave(&cmci_discover_lock, flags);
++ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ for (i = 0; i < banks; i++) {
+ if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
+ continue;
+@@ -160,7 +160,7 @@ void cmci_clear(void)
+ wrmsrl(MSR_IA32_MCx_CTL2(i), val);
+ __clear_bit(i, __get_cpu_var(mce_banks_owned));
}
+- spin_unlock_irqrestore(&cmci_discover_lock, flags);
++ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ }
- writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
-@@ -135,7 +135,7 @@ static int gic_set_type(struct irq_data
- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
- return -EINVAL;
-
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
+ /*
+Index: linux-2.6/arch/powerpc/sysdev/uic.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/sysdev/uic.c
++++ linux-2.6/arch/powerpc/sysdev/uic.c
+@@ -47,7 +47,7 @@ struct uic {
+ int index;
+ int dcrbase;
- if (gic_arch_extn.irq_set_type)
- gic_arch_extn.irq_set_type(d, type);
-@@ -160,7 +160,7 @@ static int gic_set_type(struct irq_data
- if (enabled)
- writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+- spinlock_t lock;
++ raw_spinlock_t lock;
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+ /* The remapper for this UIC */
+ struct irq_host *irqhost;
+@@ -61,14 +61,14 @@ static void uic_unmask_irq(struct irq_da
+ u32 er, sr;
- return 0;
+ sr = 1 << (31-src);
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ /* ack level-triggered interrupts here */
+ if (irqd_is_level_type(d))
+ mtdcr(uic->dcrbase + UIC_SR, sr);
+ er = mfdcr(uic->dcrbase + UIC_ER);
+ er |= sr;
+ mtdcr(uic->dcrbase + UIC_ER, er);
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
}
-@@ -188,11 +188,11 @@ static int gic_set_affinity(struct irq_d
- mask = 0xff << shift;
- bit = 1 << (cpu + shift);
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- d->node = cpu;
- val = readl_relaxed(reg) & ~mask;
- writel_relaxed(val | bit, reg);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+ static void uic_mask_irq(struct irq_data *d)
+@@ -78,11 +78,11 @@ static void uic_mask_irq(struct irq_data
+ unsigned long flags;
+ u32 er;
- return 0;
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ er = mfdcr(uic->dcrbase + UIC_ER);
+ er &= ~(1 << (31 - src));
+ mtdcr(uic->dcrbase + UIC_ER, er);
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
}
-@@ -222,9 +222,9 @@ static void gic_handle_cascade_irq(unsig
- chained_irq_enter(chip, desc);
+ static void uic_ack_irq(struct irq_data *d)
+@@ -91,9 +91,9 @@ static void uic_ack_irq(struct irq_data
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long flags;
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
+ }
- gic_irq = (status & 0x3ff);
- if (gic_irq == 1023)
-Index: linux-2.6/arch/arm/include/asm/dma.h
-===================================================================
---- linux-2.6.orig/arch/arm/include/asm/dma.h
-+++ linux-2.6/arch/arm/include/asm/dma.h
-@@ -33,18 +33,18 @@
- #define DMA_MODE_CASCADE 0xc0
- #define DMA_AUTOINIT 0x10
-
--extern spinlock_t dma_spin_lock;
-+extern raw_spinlock_t dma_spin_lock;
+ static void uic_mask_ack_irq(struct irq_data *d)
+@@ -104,7 +104,7 @@ static void uic_mask_ack_irq(struct irq_
+ u32 er, sr;
- static inline unsigned long claim_dma_lock(void)
- {
- unsigned long flags;
-- spin_lock_irqsave(&dma_spin_lock, flags);
-+ raw_spin_lock_irqsave(&dma_spin_lock, flags);
- return flags;
+ sr = 1 << (31-src);
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ er = mfdcr(uic->dcrbase + UIC_ER);
+ er &= ~sr;
+ mtdcr(uic->dcrbase + UIC_ER, er);
+@@ -118,7 +118,7 @@ static void uic_mask_ack_irq(struct irq_
+ */
+ if (!irqd_is_level_type(d))
+ mtdcr(uic->dcrbase + UIC_SR, sr);
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
}
- static inline void release_dma_lock(unsigned long flags)
- {
-- spin_unlock_irqrestore(&dma_spin_lock, flags);
-+ raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
- }
+ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
+@@ -152,7 +152,7 @@ static int uic_set_irq_type(struct irq_d
- /* Clear the 'DMA Pointer Flip Flop'.
-Index: linux-2.6/arch/arm/include/asm/mmu.h
-===================================================================
---- linux-2.6.orig/arch/arm/include/asm/mmu.h
-+++ linux-2.6/arch/arm/include/asm/mmu.h
-@@ -6,7 +6,7 @@
- typedef struct {
- #ifdef CONFIG_CPU_HAS_ASID
- unsigned int id;
-- spinlock_t id_lock;
-+ raw_spinlock_t id_lock;
- #endif
- unsigned int kvm_seq;
- } mm_context_t;
-@@ -16,7 +16,7 @@ typedef struct {
+ mask = ~(1 << (31 - src));
- /* init_mm.context.id_lock should be initialized. */
- #define INIT_MM_CONTEXT(name) \
-- .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
-+ .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
- #else
- #define ASID(mm) (0)
- #endif
-Index: linux-2.6/arch/arm/kernel/dma.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/dma.c
-+++ linux-2.6/arch/arm/kernel/dma.c
-@@ -23,7 +23,7 @@
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ tr = mfdcr(uic->dcrbase + UIC_TR);
+ pr = mfdcr(uic->dcrbase + UIC_PR);
+ tr = (tr & mask) | (trigger << (31-src));
+@@ -161,7 +161,7 @@ static int uic_set_irq_type(struct irq_d
+ mtdcr(uic->dcrbase + UIC_PR, pr);
+ mtdcr(uic->dcrbase + UIC_TR, tr);
- #include <asm/mach/dma.h>
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
--DEFINE_SPINLOCK(dma_spin_lock);
-+DEFINE_RAW_SPINLOCK(dma_spin_lock);
- EXPORT_SYMBOL(dma_spin_lock);
+ return 0;
+ }
+@@ -254,7 +254,7 @@ static struct uic * __init uic_init_one(
+ if (! uic)
+ return NULL; /* FIXME: panic? */
- static dma_t *dma_chan[MAX_DMA_CHANNELS];
-Index: linux-2.6/arch/arm/kernel/traps.c
+- spin_lock_init(&uic->lock);
++ raw_spin_lock_init(&uic->lock);
+ indexp = of_get_property(node, "cell-index", &len);
+ if (!indexp || (len != sizeof(u32))) {
+ printk(KERN_ERR "uic: Device node %s has missing or invalid "
+Index: linux-2.6/drivers/dca/dca-core.c
===================================================================
---- linux-2.6.orig/arch/arm/kernel/traps.c
-+++ linux-2.6/arch/arm/kernel/traps.c
-@@ -255,7 +255,7 @@ static int __die(const char *str, int er
- return ret;
- }
+--- linux-2.6.orig/drivers/dca/dca-core.c
++++ linux-2.6/drivers/dca/dca-core.c
+@@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Intel Corporation");
--static DEFINE_SPINLOCK(die_lock);
-+static DEFINE_RAW_SPINLOCK(die_lock);
+-static DEFINE_SPINLOCK(dca_lock);
++static DEFINE_RAW_SPINLOCK(dca_lock);
- /*
- * This function is protected against re-entrancy.
-@@ -267,7 +267,7 @@ void die(const char *str, struct pt_regs
+ static LIST_HEAD(dca_domains);
- oops_enter();
+@@ -101,10 +101,10 @@ static void unregister_dca_providers(voi
-- spin_lock_irq(&die_lock);
-+ raw_spin_lock_irq(&die_lock);
- console_verbose();
- bust_spinlocks(1);
- ret = __die(str, err, thread, regs);
-@@ -277,7 +277,7 @@ void die(const char *str, struct pt_regs
+ INIT_LIST_HEAD(&unregistered_providers);
- bust_spinlocks(0);
- add_taint(TAINT_DIE);
-- spin_unlock_irq(&die_lock);
-+ raw_spin_unlock_irq(&die_lock);
- oops_exit();
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
- if (in_interrupt())
-@@ -302,24 +302,24 @@ void arm_notify_die(const char *str, str
- }
+ if (list_empty(&dca_domains)) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
- static LIST_HEAD(undef_hook);
--static DEFINE_SPINLOCK(undef_lock);
-+static DEFINE_RAW_SPINLOCK(undef_lock);
+@@ -116,7 +116,7 @@ static void unregister_dca_providers(voi
- void register_undef_hook(struct undef_hook *hook)
- {
- unsigned long flags;
+ dca_free_domain(domain);
-- spin_lock_irqsave(&undef_lock, flags);
-+ raw_spin_lock_irqsave(&undef_lock, flags);
- list_add(&hook->node, &undef_hook);
-- spin_unlock_irqrestore(&undef_lock, flags);
-+ raw_spin_unlock_irqrestore(&undef_lock, flags);
- }
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
- void unregister_undef_hook(struct undef_hook *hook)
- {
- unsigned long flags;
+ list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
+ dca_sysfs_remove_provider(dca);
+@@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain
+ domain = dca_find_domain(rc);
-- spin_lock_irqsave(&undef_lock, flags);
-+ raw_spin_lock_irqsave(&undef_lock, flags);
- list_del(&hook->node);
-- spin_unlock_irqrestore(&undef_lock, flags);
-+ raw_spin_unlock_irqrestore(&undef_lock, flags);
- }
+ if (!domain) {
+- if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
++ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
+ dca_providers_blocked = 1;
+- } else {
+- domain = dca_allocate_domain(rc);
+- if (domain)
+- list_add(&domain->node, &dca_domains);
+- }
+ }
- static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
-@@ -328,12 +328,12 @@ static int call_undef_hook(struct pt_reg
- unsigned long flags;
- int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
+ return domain;
+@@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev
+ if (!dev)
+ return -EFAULT;
-- spin_lock_irqsave(&undef_lock, flags);
-+ raw_spin_lock_irqsave(&undef_lock, flags);
- list_for_each_entry(hook, &undef_hook, node)
- if ((instr & hook->instr_mask) == hook->instr_val &&
- (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
- fn = hook->fn;
-- spin_unlock_irqrestore(&undef_lock, flags);
-+ raw_spin_unlock_irqrestore(&undef_lock, flags);
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
- return fn ? fn(regs, instr) : 1;
- }
-Index: linux-2.6/arch/arm/mach-footbridge/include/mach/hardware.h
-===================================================================
---- linux-2.6.orig/arch/arm/mach-footbridge/include/mach/hardware.h
-+++ linux-2.6/arch/arm/mach-footbridge/include/mach/hardware.h
-@@ -93,7 +93,7 @@
- #define CPLD_FLASH_WR_ENABLE 1
+ /* check if the requester has not been added already */
+ dca = dca_find_provider_by_dev(dev);
+ if (dca) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -EEXIST;
+ }
- #ifndef __ASSEMBLY__
--extern spinlock_t nw_gpio_lock;
-+extern raw_spinlock_t nw_gpio_lock;
- extern void nw_gpio_modify_op(unsigned int mask, unsigned int set);
- extern void nw_gpio_modify_io(unsigned int mask, unsigned int in);
- extern unsigned int nw_gpio_read(void);
-Index: linux-2.6/arch/arm/mach-footbridge/netwinder-hw.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-footbridge/netwinder-hw.c
-+++ linux-2.6/arch/arm/mach-footbridge/netwinder-hw.c
-@@ -68,7 +68,7 @@ static inline void wb977_ww(int reg, int
- /*
- * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE
- */
--DEFINE_SPINLOCK(nw_gpio_lock);
-+DEFINE_RAW_SPINLOCK(nw_gpio_lock);
- EXPORT_SYMBOL(nw_gpio_lock);
+ pci_rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(pci_rc);
+ if (!domain) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
- static unsigned int current_gpio_op;
-@@ -327,9 +327,9 @@ static inline void wb977_init_gpio(void)
- /*
- * Set Group1/Group2 outputs
- */
-- spin_lock_irqsave(&nw_gpio_lock, flags);
-+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
- nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN);
-- spin_unlock_irqrestore(&nw_gpio_lock, flags);
-+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
- }
+@@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev
+ break;
+ }
- /*
-@@ -390,9 +390,9 @@ static void __init cpld_init(void)
- {
- unsigned long flags;
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
-- spin_lock_irqsave(&nw_gpio_lock, flags);
-+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
- nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE);
-- spin_unlock_irqrestore(&nw_gpio_lock, flags);
-+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
- }
+ if (slot < 0)
+ return slot;
- static unsigned char rwa_unlock[] __initdata =
-@@ -616,9 +616,9 @@ static int __init nw_hw_init(void)
- cpld_init();
- rwa010_init();
+ err = dca_sysfs_add_req(dca, dev, slot);
+ if (err) {
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
+ if (dca == dca_find_provider_by_dev(dev))
+ dca->ops->remove_requester(dca, dev);
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return err;
+ }
-- spin_lock_irqsave(&nw_gpio_lock, flags);
-+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
- nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS);
-- spin_unlock_irqrestore(&nw_gpio_lock, flags);
-+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+@@ -251,14 +246,14 @@ int dca_remove_requester(struct device *
+ if (!dev)
+ return -EFAULT;
+
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
+ dca = dca_find_provider_by_dev(dev);
+ if (!dca) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
}
- return 0;
- }
-Index: linux-2.6/arch/arm/mach-footbridge/netwinder-leds.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-footbridge/netwinder-leds.c
-+++ linux-2.6/arch/arm/mach-footbridge/netwinder-leds.c
-@@ -31,13 +31,13 @@
- static char led_state;
- static char hw_led_state;
+ slot = dca->ops->remove_requester(dca, dev);
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
--static DEFINE_SPINLOCK(leds_lock);
-+static DEFINE_RAW_SPINLOCK(leds_lock);
+ if (slot < 0)
+ return slot;
+@@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev
+ u8 tag;
+ unsigned long flags;
- static void netwinder_leds_event(led_event_t evt)
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
+
+ dca = dca_find_provider_by_dev(dev);
+ if (!dca) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ tag = dca->ops->get_tag(dca, dev, cpu);
+
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return tag;
+ }
+
+@@ -360,36 +355,51 @@ int register_dca_provider(struct dca_pro
{
+ int err;
unsigned long flags;
+- struct dca_domain *domain;
++ struct dca_domain *domain, *newdomain = NULL;
-- spin_lock_irqsave(&leds_lock, flags);
-+ raw_spin_lock_irqsave(&leds_lock, flags);
-
- switch (evt) {
- case led_start:
-@@ -117,12 +117,12 @@ static void netwinder_leds_event(led_eve
- break;
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
+ if (dca_providers_blocked) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
}
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
-- spin_unlock_irqrestore(&leds_lock, flags);
-+ raw_spin_unlock_irqrestore(&leds_lock, flags);
+ err = dca_sysfs_add_provider(dca, dev);
+ if (err)
+ return err;
- if (led_state & LED_STATE_ENABLED) {
-- spin_lock_irqsave(&nw_gpio_lock, flags);
-+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
- nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state);
-- spin_unlock_irqrestore(&nw_gpio_lock, flags);
-+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
+ domain = dca_get_domain(dev);
+ if (!domain) {
++ struct pci_bus *rc;
++
+ if (dca_providers_blocked) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ dca_sysfs_remove_provider(dca);
+ unregister_dca_providers();
+- } else {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ return -ENODEV;
++ }
++
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
++ rc = dca_pci_rc_from_dev(dev);
++ newdomain = dca_allocate_domain(rc);
++ if (!newdomain)
++ return -ENODEV;
++ raw_spin_lock_irqsave(&dca_lock, flags);
++ /* Recheck, we might have raced after dropping the lock */
++ domain = dca_get_domain(dev);
++ if (!domain) {
++ domain = newdomain;
++ newdomain = NULL;
++ list_add(&domain->node, &dca_domains);
+ }
+- return -ENODEV;
}
+ list_add(&dca->node, &domain->dca_providers);
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_ADD, NULL);
++ kfree(newdomain);
+ return 0;
}
+ EXPORT_SYMBOL_GPL(register_dca_provider);
+@@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_REMOVE, NULL);
-Index: linux-2.6/arch/arm/mach-integrator/core.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-integrator/core.c
-+++ linux-2.6/arch/arm/mach-integrator/core.c
-@@ -205,7 +205,7 @@ static struct amba_pl010_data integrator
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
- #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL)
+ list_del(&dca->node);
--static DEFINE_SPINLOCK(cm_lock);
-+static DEFINE_RAW_SPINLOCK(cm_lock);
+@@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_
+ if (list_empty(&domain->dca_providers))
+ dca_free_domain(domain);
- /**
- * cm_control - update the CM_CTRL register.
-@@ -217,10 +217,10 @@ void cm_control(u32 mask, u32 set)
- unsigned long flags;
- u32 val;
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
-- spin_lock_irqsave(&cm_lock, flags);
-+ raw_spin_lock_irqsave(&cm_lock, flags);
- val = readl(CM_CTRL) & ~mask;
- writel(val | set, CM_CTRL);
-- spin_unlock_irqrestore(&cm_lock, flags);
-+ raw_spin_unlock_irqrestore(&cm_lock, flags);
+ dca_sysfs_remove_provider(dca);
}
-
- EXPORT_SYMBOL(cm_control);
-Index: linux-2.6/arch/arm/mach-integrator/pci_v3.c
+Index: linux-2.6/arch/arm/common/gic.c
===================================================================
---- linux-2.6.orig/arch/arm/mach-integrator/pci_v3.c
-+++ linux-2.6/arch/arm/mach-integrator/pci_v3.c
-@@ -163,7 +163,7 @@
- * 7:2 register number
- *
- */
--static DEFINE_SPINLOCK(v3_lock);
-+static DEFINE_RAW_SPINLOCK(v3_lock);
-
- #define PCI_BUS_NONMEM_START 0x00000000
- #define PCI_BUS_NONMEM_SIZE SZ_256M
-@@ -284,7 +284,7 @@ static int v3_read_config(struct pci_bus
- unsigned long flags;
- u32 v;
+--- linux-2.6.orig/arch/arm/common/gic.c
++++ linux-2.6/arch/arm/common/gic.c
+@@ -33,7 +33,7 @@
+ #include <asm/mach/irq.h>
+ #include <asm/hardware/gic.h>
-- spin_lock_irqsave(&v3_lock, flags);
-+ raw_spin_lock_irqsave(&v3_lock, flags);
- addr = v3_open_config_window(bus, devfn, where);
+-static DEFINE_SPINLOCK(irq_controller_lock);
++static DEFINE_RAW_SPINLOCK(irq_controller_lock);
- switch (size) {
-@@ -302,7 +302,7 @@ static int v3_read_config(struct pci_bus
- }
+ /* Address of GIC 0 CPU interface */
+ void __iomem *gic_cpu_base_addr __read_mostly;
+@@ -88,30 +88,30 @@ static void gic_mask_irq(struct irq_data
+ {
+ u32 mask = 1 << (d->irq % 32);
- v3_close_config_window();
-- spin_unlock_irqrestore(&v3_lock, flags);
-+ raw_spin_unlock_irqrestore(&v3_lock, flags);
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
+ if (gic_arch_extn.irq_mask)
+ gic_arch_extn.irq_mask(d);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
+ }
- *val = v;
- return PCIBIOS_SUCCESSFUL;
-@@ -314,7 +314,7 @@ static int v3_write_config(struct pci_bu
- unsigned long addr;
- unsigned long flags;
+ static void gic_unmask_irq(struct irq_data *d)
+ {
+ u32 mask = 1 << (d->irq % 32);
-- spin_lock_irqsave(&v3_lock, flags);
-+ raw_spin_lock_irqsave(&v3_lock, flags);
- addr = v3_open_config_window(bus, devfn, where);
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ if (gic_arch_extn.irq_unmask)
+ gic_arch_extn.irq_unmask(d);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
+ }
- switch (size) {
-@@ -335,7 +335,7 @@ static int v3_write_config(struct pci_bu
+ static void gic_eoi_irq(struct irq_data *d)
+ {
+ if (gic_arch_extn.irq_eoi) {
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ gic_arch_extn.irq_eoi(d);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
}
- v3_close_config_window();
-- spin_unlock_irqrestore(&v3_lock, flags);
-+ raw_spin_unlock_irqrestore(&v3_lock, flags);
+ writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+@@ -135,7 +135,7 @@ static int gic_set_type(struct irq_data
+ if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
- return PCIBIOS_SUCCESSFUL;
- }
-@@ -510,7 +510,7 @@ void __init pci_v3_preinit(void)
- hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
- hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
-- spin_lock_irqsave(&v3_lock, flags);
-+ raw_spin_lock_irqsave(&v3_lock, flags);
+ if (gic_arch_extn.irq_set_type)
+ gic_arch_extn.irq_set_type(d, type);
+@@ -160,7 +160,7 @@ static int gic_set_type(struct irq_data
+ if (enabled)
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
- /*
- * Unlock V3 registers, but only if they were previously locked.
-@@ -583,7 +583,7 @@ void __init pci_v3_preinit(void)
- printk(KERN_ERR "PCI: unable to grab PCI error "
- "interrupt: %d\n", ret);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
-- spin_unlock_irqrestore(&v3_lock, flags);
-+ raw_spin_unlock_irqrestore(&v3_lock, flags);
+ return 0;
}
+@@ -188,11 +188,11 @@ static int gic_set_affinity(struct irq_d
+ mask = 0xff << shift;
+ bit = 1 << (cpu + shift);
- void __init pci_v3_postinit(void)
-Index: linux-2.6/arch/arm/mach-ixp4xx/common-pci.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-ixp4xx/common-pci.c
-+++ linux-2.6/arch/arm/mach-ixp4xx/common-pci.c
-@@ -54,7 +54,7 @@ unsigned long ixp4xx_pci_reg_base = 0;
- * these transactions are atomic or we will end up
- * with corrupt data on the bus or in a driver.
- */
--static DEFINE_SPINLOCK(ixp4xx_pci_lock);
-+static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock);
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ d->node = cpu;
+ val = readl_relaxed(reg) & ~mask;
+ writel_relaxed(val | bit, reg);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
- /*
- * Read from PCI config space
-@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(ixp4xx_pci_lock);
- static void crp_read(u32 ad_cbe, u32 *data)
- {
- unsigned long flags;
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
- *PCI_CRP_AD_CBE = ad_cbe;
- *data = *PCI_CRP_RDATA;
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ return 0;
}
+@@ -222,9 +222,9 @@ static void gic_handle_cascade_irq(unsig
- /*
-@@ -74,10 +74,10 @@ static void crp_read(u32 ad_cbe, u32 *da
- static void crp_write(u32 ad_cbe, u32 data)
- {
- unsigned long flags;
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
- *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe;
- *PCI_CRP_WDATA = data;
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
- }
+ chained_irq_enter(chip, desc);
- static inline int check_master_abort(void)
-@@ -101,7 +101,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32
- int retval = 0;
- int i;
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ gic_irq = (status & 0x3ff);
+ if (gic_irq == 1023)
+Index: linux-2.6/arch/arm/include/asm/dma.h
+===================================================================
+--- linux-2.6.orig/arch/arm/include/asm/dma.h
++++ linux-2.6/arch/arm/include/asm/dma.h
+@@ -33,18 +33,18 @@
+ #define DMA_MODE_CASCADE 0xc0
+ #define DMA_AUTOINIT 0x10
- *PCI_NP_AD = addr;
+-extern spinlock_t dma_spin_lock;
++extern raw_spinlock_t dma_spin_lock;
-@@ -118,7 +118,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32
- if(check_master_abort())
- retval = 1;
+ static inline unsigned long claim_dma_lock(void)
+ {
+ unsigned long flags;
+- spin_lock_irqsave(&dma_spin_lock, flags);
++ raw_spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
+ }
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
- return retval;
+ static inline void release_dma_lock(unsigned long flags)
+ {
+- spin_unlock_irqrestore(&dma_spin_lock, flags);
++ raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
}
-@@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr,
- unsigned long flags;
- int retval = 0;
+ /* Clear the 'DMA Pointer Flip Flop'.
+Index: linux-2.6/arch/arm/include/asm/mmu.h
+===================================================================
+--- linux-2.6.orig/arch/arm/include/asm/mmu.h
++++ linux-2.6/arch/arm/include/asm/mmu.h
+@@ -6,7 +6,7 @@
+ typedef struct {
+ #ifdef CONFIG_CPU_HAS_ASID
+ unsigned int id;
+- spinlock_t id_lock;
++ raw_spinlock_t id_lock;
+ #endif
+ unsigned int kvm_seq;
+ } mm_context_t;
+@@ -16,7 +16,7 @@ typedef struct {
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ /* init_mm.context.id_lock should be initialized. */
+ #define INIT_MM_CONTEXT(name) \
+- .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
++ .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
+ #else
+ #define ASID(mm) (0)
+ #endif
+Index: linux-2.6/arch/arm/kernel/dma.c
+===================================================================
+--- linux-2.6.orig/arch/arm/kernel/dma.c
++++ linux-2.6/arch/arm/kernel/dma.c
+@@ -23,7 +23,7 @@
- *PCI_NP_AD = addr;
+ #include <asm/mach/dma.h>
-@@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr,
- if(check_master_abort())
- retval = 1;
+-DEFINE_SPINLOCK(dma_spin_lock);
++DEFINE_RAW_SPINLOCK(dma_spin_lock);
+ EXPORT_SYMBOL(dma_spin_lock);
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
- return retval;
+ static dma_t *dma_chan[MAX_DMA_CHANNELS];
+Index: linux-2.6/arch/arm/kernel/traps.c
+===================================================================
+--- linux-2.6.orig/arch/arm/kernel/traps.c
++++ linux-2.6/arch/arm/kernel/traps.c
+@@ -255,7 +255,7 @@ static int __die(const char *str, int er
+ return ret;
}
-@@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd,
- unsigned long flags;
- int retval = 0;
+-static DEFINE_SPINLOCK(die_lock);
++static DEFINE_RAW_SPINLOCK(die_lock);
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ /*
+ * This function is protected against re-entrancy.
+@@ -267,7 +267,7 @@ void die(const char *str, struct pt_regs
- *PCI_NP_AD = addr;
+ oops_enter();
-@@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd,
- if(check_master_abort())
- retval = 1;
+- spin_lock_irq(&die_lock);
++ raw_spin_lock_irq(&die_lock);
+ console_verbose();
+ bust_spinlocks(1);
+ ret = __die(str, err, thread, regs);
+@@ -277,7 +277,7 @@ void die(const char *str, struct pt_regs
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
- return retval;
- }
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE);
+- spin_unlock_irq(&die_lock);
++ raw_spin_unlock_irq(&die_lock);
+ oops_exit();
-Index: linux-2.6/arch/arm/mach-shark/leds.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-shark/leds.c
-+++ linux-2.6/arch/arm/mach-shark/leds.c
-@@ -36,7 +36,7 @@ static char led_state;
- static short hw_led_state;
- static short saved_state;
+ if (in_interrupt())
+@@ -302,24 +302,24 @@ void arm_notify_die(const char *str, str
+ }
--static DEFINE_SPINLOCK(leds_lock);
-+static DEFINE_RAW_SPINLOCK(leds_lock);
+ static LIST_HEAD(undef_hook);
+-static DEFINE_SPINLOCK(undef_lock);
++static DEFINE_RAW_SPINLOCK(undef_lock);
- short sequoia_read(int addr) {
- outw(addr,0x24);
-@@ -52,7 +52,7 @@ static void sequoia_leds_event(led_event
+ void register_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
-- spin_lock_irqsave(&leds_lock, flags);
-+ raw_spin_lock_irqsave(&leds_lock, flags);
-
- hw_led_state = sequoia_read(0x09);
-
-@@ -144,7 +144,7 @@ static void sequoia_leds_event(led_event
- if (led_state & LED_STATE_ENABLED)
- sequoia_write(hw_led_state,0x09);
-
-- spin_unlock_irqrestore(&leds_lock, flags);
-+ raw_spin_unlock_irqrestore(&leds_lock, flags);
+- spin_lock_irqsave(&undef_lock, flags);
++ raw_spin_lock_irqsave(&undef_lock, flags);
+ list_add(&hook->node, &undef_hook);
+- spin_unlock_irqrestore(&undef_lock, flags);
++ raw_spin_unlock_irqrestore(&undef_lock, flags);
}
- static int __init leds_init(void)
-Index: linux-2.6/arch/arm/mm/cache-l2x0.c
-===================================================================
---- linux-2.6.orig/arch/arm/mm/cache-l2x0.c
-+++ linux-2.6/arch/arm/mm/cache-l2x0.c
-@@ -26,7 +26,7 @@
- #define CACHE_LINE_SIZE 32
-
- static void __iomem *l2x0_base;
--static DEFINE_SPINLOCK(l2x0_lock);
-+static DEFINE_RAW_SPINLOCK(l2x0_lock);
- static uint32_t l2x0_way_mask; /* Bitmask of active ways */
- static uint32_t l2x0_size;
-
-@@ -115,9 +115,9 @@ static void l2x0_cache_sync(void)
+ void unregister_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&undef_lock, flags);
++ raw_spin_lock_irqsave(&undef_lock, flags);
+ list_del(&hook->node);
+- spin_unlock_irqrestore(&undef_lock, flags);
++ raw_spin_unlock_irqrestore(&undef_lock, flags);
}
- static void __l2x0_flush_all(void)
-@@ -134,9 +134,9 @@ static void l2x0_flush_all(void)
+ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+@@ -328,12 +328,12 @@ static int call_undef_hook(struct pt_reg
unsigned long flags;
+ int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
- /* clean all ways */
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- __l2x0_flush_all();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&undef_lock, flags);
++ raw_spin_lock_irqsave(&undef_lock, flags);
+ list_for_each_entry(hook, &undef_hook, node)
+ if ((instr & hook->instr_mask) == hook->instr_val &&
+ (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
+ fn = hook->fn;
+- spin_unlock_irqrestore(&undef_lock, flags);
++ raw_spin_unlock_irqrestore(&undef_lock, flags);
+
+ return fn ? fn(regs, instr) : 1;
}
+Index: linux-2.6/arch/arm/mach-footbridge/include/mach/hardware.h
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-footbridge/include/mach/hardware.h
++++ linux-2.6/arch/arm/mach-footbridge/include/mach/hardware.h
+@@ -93,7 +93,7 @@
+ #define CPLD_FLASH_WR_ENABLE 1
- static void l2x0_clean_all(void)
-@@ -144,11 +144,11 @@ static void l2x0_clean_all(void)
- unsigned long flags;
+ #ifndef __ASSEMBLY__
+-extern spinlock_t nw_gpio_lock;
++extern raw_spinlock_t nw_gpio_lock;
+ extern void nw_gpio_modify_op(unsigned int mask, unsigned int set);
+ extern void nw_gpio_modify_io(unsigned int mask, unsigned int in);
+ extern unsigned int nw_gpio_read(void);
+Index: linux-2.6/arch/arm/mach-footbridge/netwinder-hw.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-footbridge/netwinder-hw.c
++++ linux-2.6/arch/arm/mach-footbridge/netwinder-hw.c
+@@ -68,7 +68,7 @@ static inline void wb977_ww(int reg, int
+ /*
+ * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE
+ */
+-DEFINE_SPINLOCK(nw_gpio_lock);
++DEFINE_RAW_SPINLOCK(nw_gpio_lock);
+ EXPORT_SYMBOL(nw_gpio_lock);
- /* clean all ways */
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
- cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ static unsigned int current_gpio_op;
+@@ -327,9 +327,9 @@ static inline void wb977_init_gpio(void)
+ /*
+ * Set Group1/Group2 outputs
+ */
+- spin_lock_irqsave(&nw_gpio_lock, flags);
++ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN);
+- spin_unlock_irqrestore(&nw_gpio_lock, flags);
++ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
- static void l2x0_inv_all(void)
-@@ -156,13 +156,13 @@ static void l2x0_inv_all(void)
+ /*
+@@ -390,9 +390,9 @@ static void __init cpld_init(void)
+ {
unsigned long flags;
- /* invalidate all ways */
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- /* Invalidating when L2 is enabled is a nono */
- BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
- cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&nw_gpio_lock, flags);
++ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE);
+- spin_unlock_irqrestore(&nw_gpio_lock, flags);
++ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
- static void l2x0_inv_range(unsigned long start, unsigned long end)
-@@ -170,7 +170,7 @@ static void l2x0_inv_range(unsigned long
- void __iomem *base = l2x0_base;
- unsigned long flags;
-
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- if (start & (CACHE_LINE_SIZE - 1)) {
- start &= ~(CACHE_LINE_SIZE - 1);
- debug_writel(0x03);
-@@ -195,13 +195,13 @@ static void l2x0_inv_range(unsigned long
- }
+ static unsigned char rwa_unlock[] __initdata =
+@@ -616,9 +616,9 @@ static int __init nw_hw_init(void)
+ cpld_init();
+ rwa010_init();
- if (blk_end < end) {
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- }
+- spin_lock_irqsave(&nw_gpio_lock, flags);
++ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS);
+- spin_unlock_irqrestore(&nw_gpio_lock, flags);
++ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
- cache_wait(base + L2X0_INV_LINE_PA, 1);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ return 0;
}
+Index: linux-2.6/arch/arm/mach-footbridge/netwinder-leds.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-footbridge/netwinder-leds.c
++++ linux-2.6/arch/arm/mach-footbridge/netwinder-leds.c
+@@ -31,13 +31,13 @@
+ static char led_state;
+ static char hw_led_state;
- static void l2x0_clean_range(unsigned long start, unsigned long end)
-@@ -214,7 +214,7 @@ static void l2x0_clean_range(unsigned lo
- return;
- }
+-static DEFINE_SPINLOCK(leds_lock);
++static DEFINE_RAW_SPINLOCK(leds_lock);
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- start &= ~(CACHE_LINE_SIZE - 1);
- while (start < end) {
- unsigned long blk_end = start + min(end - start, 4096UL);
-@@ -225,13 +225,13 @@ static void l2x0_clean_range(unsigned lo
- }
+ static void netwinder_leds_event(led_event_t evt)
+ {
+ unsigned long flags;
- if (blk_end < end) {
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- }
- }
- cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
- }
+- spin_lock_irqsave(&leds_lock, flags);
++ raw_spin_lock_irqsave(&leds_lock, flags);
- static void l2x0_flush_range(unsigned long start, unsigned long end)
-@@ -244,7 +244,7 @@ static void l2x0_flush_range(unsigned lo
- return;
+ switch (evt) {
+ case led_start:
+@@ -117,12 +117,12 @@ static void netwinder_leds_event(led_eve
+ break;
}
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- start &= ~(CACHE_LINE_SIZE - 1);
- while (start < end) {
- unsigned long blk_end = start + min(end - start, 4096UL);
-@@ -257,24 +257,24 @@ static void l2x0_flush_range(unsigned lo
- debug_writel(0x00);
+- spin_unlock_irqrestore(&leds_lock, flags);
++ raw_spin_unlock_irqrestore(&leds_lock, flags);
- if (blk_end < end) {
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- }
+ if (led_state & LED_STATE_ENABLED) {
+- spin_lock_irqsave(&nw_gpio_lock, flags);
++ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state);
+- spin_unlock_irqrestore(&nw_gpio_lock, flags);
++ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
- cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- static void l2x0_disable(void)
- {
+Index: linux-2.6/arch/arm/mach-integrator/core.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-integrator/core.c
++++ linux-2.6/arch/arm/mach-integrator/core.c
+@@ -205,7 +205,7 @@ static struct amba_pl010_data integrator
+
+ #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL)
+
+-static DEFINE_SPINLOCK(cm_lock);
++static DEFINE_RAW_SPINLOCK(cm_lock);
+
+ /**
+ * cm_control - update the CM_CTRL register.
+@@ -217,10 +217,10 @@ void cm_control(u32 mask, u32 set)
unsigned long flags;
+ u32 val;
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- __l2x0_flush_all();
- writel_relaxed(0, l2x0_base + L2X0_CTRL);
- dsb();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&cm_lock, flags);
++ raw_spin_lock_irqsave(&cm_lock, flags);
+ val = readl(CM_CTRL) & ~mask;
+ writel(val | set, CM_CTRL);
+- spin_unlock_irqrestore(&cm_lock, flags);
++ raw_spin_unlock_irqrestore(&cm_lock, flags);
}
- void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
-Index: linux-2.6/arch/arm/mm/context.c
+ EXPORT_SYMBOL(cm_control);
+Index: linux-2.6/arch/arm/mach-integrator/pci_v3.c
===================================================================
---- linux-2.6.orig/arch/arm/mm/context.c
-+++ linux-2.6/arch/arm/mm/context.c
-@@ -16,7 +16,7 @@
- #include <asm/mmu_context.h>
- #include <asm/tlbflush.h>
-
--static DEFINE_SPINLOCK(cpu_asid_lock);
-+static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
- unsigned int cpu_last_asid = ASID_FIRST_VERSION;
- #ifdef CONFIG_SMP
- DEFINE_PER_CPU(struct mm_struct *, current_mm);
-@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, curre
- void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
- {
- mm->context.id = 0;
-- spin_lock_init(&mm->context.id_lock);
-+ raw_spin_lock_init(&mm->context.id_lock);
- }
+--- linux-2.6.orig/arch/arm/mach-integrator/pci_v3.c
++++ linux-2.6/arch/arm/mach-integrator/pci_v3.c
+@@ -163,7 +163,7 @@
+ * 7:2 register number
+ *
+ */
+-static DEFINE_SPINLOCK(v3_lock);
++static DEFINE_RAW_SPINLOCK(v3_lock);
- static void flush_context(void)
-@@ -58,7 +58,7 @@ static void set_mm_context(struct mm_str
- * the broadcast. This function is also called via IPI so the
- * mm->context.id_lock has to be IRQ-safe.
- */
-- spin_lock_irqsave(&mm->context.id_lock, flags);
-+ raw_spin_lock_irqsave(&mm->context.id_lock, flags);
- if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
- /*
- * Old version of ASID found. Set the new one and
-@@ -67,7 +67,7 @@ static void set_mm_context(struct mm_str
- mm->context.id = asid;
- cpumask_clear(mm_cpumask(mm));
- }
-- spin_unlock_irqrestore(&mm->context.id_lock, flags);
-+ raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
+ #define PCI_BUS_NONMEM_START 0x00000000
+ #define PCI_BUS_NONMEM_SIZE SZ_256M
+@@ -284,7 +284,7 @@ static int v3_read_config(struct pci_bus
+ unsigned long flags;
+ u32 v;
- /*
- * Set the mm_cpumask(mm) bit for the current CPU.
-@@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm)
- {
- unsigned int asid;
+- spin_lock_irqsave(&v3_lock, flags);
++ raw_spin_lock_irqsave(&v3_lock, flags);
+ addr = v3_open_config_window(bus, devfn, where);
-- spin_lock(&cpu_asid_lock);
-+ raw_spin_lock(&cpu_asid_lock);
- #ifdef CONFIG_SMP
- /*
- * Check the ASID again, in case the change was broadcast from
-@@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm)
- */
- if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-- spin_unlock(&cpu_asid_lock);
-+ raw_spin_unlock(&cpu_asid_lock);
- return;
+ switch (size) {
+@@ -302,7 +302,7 @@ static int v3_read_config(struct pci_bus
}
- #endif
-@@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm)
+
+ v3_close_config_window();
+- spin_unlock_irqrestore(&v3_lock, flags);
++ raw_spin_unlock_irqrestore(&v3_lock, flags);
+
+ *val = v;
+ return PCIBIOS_SUCCESSFUL;
+@@ -314,7 +314,7 @@ static int v3_write_config(struct pci_bu
+ unsigned long addr;
+ unsigned long flags;
+
+- spin_lock_irqsave(&v3_lock, flags);
++ raw_spin_lock_irqsave(&v3_lock, flags);
+ addr = v3_open_config_window(bus, devfn, where);
+
+ switch (size) {
+@@ -335,7 +335,7 @@ static int v3_write_config(struct pci_bu
}
- set_mm_context(mm, asid);
-- spin_unlock(&cpu_asid_lock);
-+ raw_spin_unlock(&cpu_asid_lock);
+ v3_close_config_window();
+- spin_unlock_irqrestore(&v3_lock, flags);
++ raw_spin_unlock_irqrestore(&v3_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
}
-Index: linux-2.6/arch/arm/mm/copypage-v4mc.c
+@@ -510,7 +510,7 @@ void __init pci_v3_preinit(void)
+ hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
+ hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
+
+- spin_lock_irqsave(&v3_lock, flags);
++ raw_spin_lock_irqsave(&v3_lock, flags);
+
+ /*
+ * Unlock V3 registers, but only if they were previously locked.
+@@ -583,7 +583,7 @@ void __init pci_v3_preinit(void)
+ printk(KERN_ERR "PCI: unable to grab PCI error "
+ "interrupt: %d\n", ret);
+
+- spin_unlock_irqrestore(&v3_lock, flags);
++ raw_spin_unlock_irqrestore(&v3_lock, flags);
+ }
+
+ void __init pci_v3_postinit(void)
+Index: linux-2.6/arch/arm/mach-ixp4xx/common-pci.c
===================================================================
---- linux-2.6.orig/arch/arm/mm/copypage-v4mc.c
-+++ linux-2.6/arch/arm/mm/copypage-v4mc.c
-@@ -30,7 +30,7 @@
- #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
- L_PTE_MT_MINICACHE)
+--- linux-2.6.orig/arch/arm/mach-ixp4xx/common-pci.c
++++ linux-2.6/arch/arm/mach-ixp4xx/common-pci.c
+@@ -54,7 +54,7 @@ unsigned long ixp4xx_pci_reg_base = 0;
+ * these transactions are atomic or we will end up
+ * with corrupt data on the bus or in a driver.
+ */
+-static DEFINE_SPINLOCK(ixp4xx_pci_lock);
++static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock);
--static DEFINE_SPINLOCK(minicache_lock);
-+static DEFINE_RAW_SPINLOCK(minicache_lock);
+ /*
+ * Read from PCI config space
+@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(ixp4xx_pci_lock);
+ static void crp_read(u32 ad_cbe, u32 *data)
+ {
+ unsigned long flags;
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ *PCI_CRP_AD_CBE = ad_cbe;
+ *data = *PCI_CRP_RDATA;
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ }
/*
- * ARMv4 mini-dcache optimised copy_user_highpage
-@@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct pag
- if (!test_and_set_bit(PG_dcache_clean, &from->flags))
- __flush_dcache_page(page_mapping(from), from);
+@@ -74,10 +74,10 @@ static void crp_read(u32 ad_cbe, u32 *da
+ static void crp_write(u32 ad_cbe, u32 data)
+ {
+ unsigned long flags;
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe;
+ *PCI_CRP_WDATA = data;
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ }
-- spin_lock(&minicache_lock);
-+ raw_spin_lock(&minicache_lock);
+ static inline int check_master_abort(void)
+@@ -101,7 +101,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32
+ int retval = 0;
+ int i;
- set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
- flush_tlb_kernel_page(0xffff8000);
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
- mc_copy_user_page((void *)0xffff8000, kto);
+ *PCI_NP_AD = addr;
-- spin_unlock(&minicache_lock);
-+ raw_spin_unlock(&minicache_lock);
+@@ -118,7 +118,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32
+ if(check_master_abort())
+ retval = 1;
- kunmap_atomic(kto, KM_USER1);
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ return retval;
}
-Index: linux-2.6/arch/arm/mm/copypage-v6.c
-===================================================================
---- linux-2.6.orig/arch/arm/mm/copypage-v6.c
-+++ linux-2.6/arch/arm/mm/copypage-v6.c
-@@ -27,7 +27,7 @@
- #define from_address (0xffff8000)
- #define to_address (0xffffc000)
--static DEFINE_SPINLOCK(v6_lock);
-+static DEFINE_RAW_SPINLOCK(v6_lock);
+@@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr,
+ unsigned long flags;
+ int retval = 0;
- /*
- * Copy the user page. No aliasing to deal with so we can just
-@@ -89,7 +89,7 @@ static void v6_copy_user_highpage_aliasi
- * Now copy the page using the same cache colour as the
- * pages ultimate destination.
- */
-- spin_lock(&v6_lock);
-+ raw_spin_lock(&v6_lock);
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
- set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
-@@ -102,7 +102,7 @@ static void v6_copy_user_highpage_aliasi
+ *PCI_NP_AD = addr;
- copy_page((void *)kto, (void *)kfrom);
+@@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr,
+ if(check_master_abort())
+ retval = 1;
-- spin_unlock(&v6_lock);
-+ raw_spin_unlock(&v6_lock);
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ return retval;
}
- /*
-@@ -122,13 +122,13 @@ static void v6_clear_user_highpage_alias
- * Now clear the page using the same cache colour as
- * the pages ultimate destination.
- */
-- spin_lock(&v6_lock);
-+ raw_spin_lock(&v6_lock);
+@@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd,
+ unsigned long flags;
+ int retval = 0;
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
- flush_tlb_kernel_page(to);
- clear_page((void *)to);
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-- spin_unlock(&v6_lock);
-+ raw_spin_unlock(&v6_lock);
+ *PCI_NP_AD = addr;
+
+@@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd,
+ if(check_master_abort())
+ retval = 1;
+
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ return retval;
}
- struct cpu_user_fns v6_user_fns __initdata = {
-Index: linux-2.6/arch/arm/mm/copypage-xscale.c
+Index: linux-2.6/arch/arm/mach-shark/leds.c
===================================================================
---- linux-2.6.orig/arch/arm/mm/copypage-xscale.c
-+++ linux-2.6/arch/arm/mm/copypage-xscale.c
-@@ -32,7 +32,7 @@
- #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
- L_PTE_MT_MINICACHE)
-
--static DEFINE_SPINLOCK(minicache_lock);
-+static DEFINE_RAW_SPINLOCK(minicache_lock);
+--- linux-2.6.orig/arch/arm/mach-shark/leds.c
++++ linux-2.6/arch/arm/mach-shark/leds.c
+@@ -36,7 +36,7 @@ static char led_state;
+ static short hw_led_state;
+ static short saved_state;
- /*
- * XScale mini-dcache optimised copy_user_highpage
-@@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct
- if (!test_and_set_bit(PG_dcache_clean, &from->flags))
- __flush_dcache_page(page_mapping(from), from);
+-static DEFINE_SPINLOCK(leds_lock);
++static DEFINE_RAW_SPINLOCK(leds_lock);
-- spin_lock(&minicache_lock);
-+ raw_spin_lock(&minicache_lock);
+ short sequoia_read(int addr) {
+ outw(addr,0x24);
+@@ -52,7 +52,7 @@ static void sequoia_leds_event(led_event
+ {
+ unsigned long flags;
- set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
- flush_tlb_kernel_page(COPYPAGE_MINICACHE);
+- spin_lock_irqsave(&leds_lock, flags);
++ raw_spin_lock_irqsave(&leds_lock, flags);
- mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
+ hw_led_state = sequoia_read(0x09);
-- spin_unlock(&minicache_lock);
-+ raw_spin_unlock(&minicache_lock);
+@@ -144,7 +144,7 @@ static void sequoia_leds_event(led_event
+ if (led_state & LED_STATE_ENABLED)
+ sequoia_write(hw_led_state,0x09);
- kunmap_atomic(kto, KM_USER1);
+- spin_unlock_irqrestore(&leds_lock, flags);
++ raw_spin_unlock_irqrestore(&leds_lock, flags);
}
-Index: linux-2.6/drivers/dma/ipu/ipu_irq.c
+
+ static int __init leds_init(void)
+Index: linux-2.6/arch/arm/mm/cache-l2x0.c
===================================================================
---- linux-2.6.orig/drivers/dma/ipu/ipu_irq.c
-+++ linux-2.6/drivers/dma/ipu/ipu_irq.c
-@@ -81,7 +81,7 @@ static struct ipu_irq_map irq_map[CONFIG
- /* Protects allocations from the above array of maps */
- static DEFINE_MUTEX(map_lock);
- /* Protects register accesses and individual mappings */
--static DEFINE_SPINLOCK(bank_lock);
-+static DEFINE_RAW_SPINLOCK(bank_lock);
+--- linux-2.6.orig/arch/arm/mm/cache-l2x0.c
++++ linux-2.6/arch/arm/mm/cache-l2x0.c
+@@ -26,7 +26,7 @@
+ #define CACHE_LINE_SIZE 32
- static struct ipu_irq_map *src2map(unsigned int src)
- {
-@@ -101,11 +101,11 @@ static void ipu_irq_unmask(struct irq_da
- uint32_t reg;
- unsigned long lock_flags;
+ static void __iomem *l2x0_base;
+-static DEFINE_SPINLOCK(l2x0_lock);
++static DEFINE_RAW_SPINLOCK(l2x0_lock);
+ static uint32_t l2x0_way_mask; /* Bitmask of active ways */
+ static uint32_t l2x0_size;
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+@@ -115,9 +115,9 @@ static void l2x0_cache_sync(void)
+ {
+ unsigned long flags;
- bank = map->bank;
- if (!bank) {
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-@@ -114,7 +114,7 @@ static void ipu_irq_unmask(struct irq_da
- reg |= (1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ static void __l2x0_flush_all(void)
+@@ -134,9 +134,9 @@ static void l2x0_flush_all(void)
+ unsigned long flags;
+
+ /* clean all ways */
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ __l2x0_flush_all();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- static void ipu_irq_mask(struct irq_data *d)
-@@ -124,11 +124,11 @@ static void ipu_irq_mask(struct irq_data
- uint32_t reg;
- unsigned long lock_flags;
+ static void l2x0_clean_all(void)
+@@ -144,11 +144,11 @@ static void l2x0_clean_all(void)
+ unsigned long flags;
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+ /* clean all ways */
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
+ cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
- bank = map->bank;
- if (!bank) {
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-@@ -137,7 +137,7 @@ static void ipu_irq_mask(struct irq_data
- reg &= ~(1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
+ static void l2x0_inv_all(void)
+@@ -156,13 +156,13 @@ static void l2x0_inv_all(void)
+ unsigned long flags;
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ /* invalidate all ways */
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ /* Invalidating when L2 is enabled is a nono */
+ BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+ cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- static void ipu_irq_ack(struct irq_data *d)
-@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data
- struct ipu_irq_bank *bank;
- unsigned long lock_flags;
+ static void l2x0_inv_range(unsigned long start, unsigned long end)
+@@ -170,7 +170,7 @@ static void l2x0_inv_range(unsigned long
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ start &= ~(CACHE_LINE_SIZE - 1);
+ debug_writel(0x03);
+@@ -195,13 +195,13 @@ static void l2x0_inv_range(unsigned long
+ }
- bank = map->bank;
- if (!bank) {
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
+ if (blk_end < end) {
+- spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ }
}
-
- ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ cache_wait(base + L2X0_INV_LINE_PA, 1);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- /**
-@@ -172,11 +172,11 @@ bool ipu_irq_status(unsigned int irq)
- unsigned long lock_flags;
- bool ret;
+ static void l2x0_clean_range(unsigned long start, unsigned long end)
+@@ -214,7 +214,7 @@ static void l2x0_clean_range(unsigned lo
+ return;
+ }
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- bank = map->bank;
- ret = bank && ipu_read_reg(bank->ipu, bank->status) &
- (1UL << (map->source & 31));
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ start &= ~(CACHE_LINE_SIZE - 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+@@ -225,13 +225,13 @@ static void l2x0_clean_range(unsigned lo
+ }
- return ret;
+ if (blk_end < end) {
+- spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
-@@ -213,10 +213,10 @@ int ipu_irq_map(unsigned int source)
- if (irq_map[i].source < 0) {
- unsigned long lock_flags;
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = source;
- irq_map[i].bank = irq_bank + source / 32;
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ static void l2x0_flush_range(unsigned long start, unsigned long end)
+@@ -244,7 +244,7 @@ static void l2x0_flush_range(unsigned lo
+ return;
+ }
- ret = irq_map[i].irq;
- pr_debug("IPU: mapped source %u to IRQ %u\n",
-@@ -252,10 +252,10 @@ int ipu_irq_unmap(unsigned int source)
- pr_debug("IPU: unmapped source %u from IRQ %u\n",
- source, irq_map[i].irq);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ start &= ~(CACHE_LINE_SIZE - 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+@@ -257,24 +257,24 @@ static void l2x0_flush_range(unsigned lo
+ debug_writel(0x00);
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = -EINVAL;
- irq_map[i].bank = NULL;
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ if (blk_end < end) {
+- spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
- ret = 0;
- break;
-@@ -276,7 +276,7 @@ static void ipu_irq_err(unsigned int irq
- for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
- struct ipu_irq_bank *bank = irq_bank + i;
+ static void l2x0_disable(void)
+ {
+ unsigned long flags;
-- spin_lock(&bank_lock);
-+ raw_spin_lock(&bank_lock);
- status = ipu_read_reg(ipu, bank->status);
- /*
- * Don't think we have to clear all interrupts here, they will
-@@ -284,18 +284,18 @@ static void ipu_irq_err(unsigned int irq
- * might want to clear unhandled interrupts after the loop...
- */
- status &= ipu_read_reg(ipu, bank->control);
-- spin_unlock(&bank_lock);
-+ raw_spin_unlock(&bank_lock);
- while ((line = ffs(status))) {
- struct ipu_irq_map *map;
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ __l2x0_flush_all();
+ writel_relaxed(0, l2x0_base + L2X0_CTRL);
+ dsb();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
- line--;
- status &= ~(1UL << line);
+ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+Index: linux-2.6/arch/arm/mm/context.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mm/context.c
++++ linux-2.6/arch/arm/mm/context.c
+@@ -16,7 +16,7 @@
+ #include <asm/mmu_context.h>
+ #include <asm/tlbflush.h>
-- spin_lock(&bank_lock);
-+ raw_spin_lock(&bank_lock);
- map = src2map(32 * i + line);
- if (map)
- irq = map->irq;
-- spin_unlock(&bank_lock);
-+ raw_spin_unlock(&bank_lock);
+-static DEFINE_SPINLOCK(cpu_asid_lock);
++static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
+ #ifdef CONFIG_SMP
+ DEFINE_PER_CPU(struct mm_struct *, current_mm);
+@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, curre
+ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ {
+ mm->context.id = 0;
+- spin_lock_init(&mm->context.id_lock);
++ raw_spin_lock_init(&mm->context.id_lock);
+ }
- if (!map) {
- pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
-@@ -317,22 +317,22 @@ static void ipu_irq_fn(unsigned int irq,
- for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
- struct ipu_irq_bank *bank = irq_bank + i;
-
-- spin_lock(&bank_lock);
-+ raw_spin_lock(&bank_lock);
- status = ipu_read_reg(ipu, bank->status);
- /* Not clearing all interrupts, see above */
- status &= ipu_read_reg(ipu, bank->control);
-- spin_unlock(&bank_lock);
-+ raw_spin_unlock(&bank_lock);
- while ((line = ffs(status))) {
- struct ipu_irq_map *map;
+ static void flush_context(void)
+@@ -58,7 +58,7 @@ static void set_mm_context(struct mm_str
+ * the broadcast. This function is also called via IPI so the
+ * mm->context.id_lock has to be IRQ-safe.
+ */
+- spin_lock_irqsave(&mm->context.id_lock, flags);
++ raw_spin_lock_irqsave(&mm->context.id_lock, flags);
+ if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
+ /*
+ * Old version of ASID found. Set the new one and
+@@ -67,7 +67,7 @@ static void set_mm_context(struct mm_str
+ mm->context.id = asid;
+ cpumask_clear(mm_cpumask(mm));
+ }
+- spin_unlock_irqrestore(&mm->context.id_lock, flags);
++ raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
- line--;
- status &= ~(1UL << line);
+ /*
+ * Set the mm_cpumask(mm) bit for the current CPU.
+@@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm)
+ {
+ unsigned int asid;
-- spin_lock(&bank_lock);
-+ raw_spin_lock(&bank_lock);
- map = src2map(32 * i + line);
- if (map)
- irq = map->irq;
-- spin_unlock(&bank_lock);
-+ raw_spin_unlock(&bank_lock);
+- spin_lock(&cpu_asid_lock);
++ raw_spin_lock(&cpu_asid_lock);
+ #ifdef CONFIG_SMP
+ /*
+ * Check the ASID again, in case the change was broadcast from
+@@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm)
+ */
+ if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+- spin_unlock(&cpu_asid_lock);
++ raw_spin_unlock(&cpu_asid_lock);
+ return;
+ }
+ #endif
+@@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm)
+ }
- if (!map) {
- pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
-Index: linux-2.6/drivers/pci/dmar.c
+ set_mm_context(mm, asid);
+- spin_unlock(&cpu_asid_lock);
++ raw_spin_unlock(&cpu_asid_lock);
+ }
+Index: linux-2.6/arch/arm/mm/copypage-v4mc.c
===================================================================
---- linux-2.6.orig/drivers/pci/dmar.c
-+++ linux-2.6/drivers/pci/dmar.c
-@@ -800,7 +800,7 @@ int alloc_iommu(struct dmar_drhd_unit *d
- (unsigned long long)iommu->cap,
- (unsigned long long)iommu->ecap);
-
-- spin_lock_init(&iommu->register_lock);
-+ raw_spin_lock_init(&iommu->register_lock);
-
- drhd->iommu = iommu;
- return 0;
-@@ -921,11 +921,11 @@ int qi_submit_sync(struct qi_desc *desc,
- restart:
- rc = 0;
-
-- spin_lock_irqsave(&qi->q_lock, flags);
-+ raw_spin_lock_irqsave(&qi->q_lock, flags);
- while (qi->free_cnt < 3) {
-- spin_unlock_irqrestore(&qi->q_lock, flags);
-+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
- cpu_relax();
-- spin_lock_irqsave(&qi->q_lock, flags);
-+ raw_spin_lock_irqsave(&qi->q_lock, flags);
- }
+--- linux-2.6.orig/arch/arm/mm/copypage-v4mc.c
++++ linux-2.6/arch/arm/mm/copypage-v4mc.c
+@@ -30,7 +30,7 @@
+ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
+ L_PTE_MT_MINICACHE)
- index = qi->free_head;
-@@ -965,15 +965,15 @@ restart:
- if (rc)
- break;
+-static DEFINE_SPINLOCK(minicache_lock);
++static DEFINE_RAW_SPINLOCK(minicache_lock);
-- spin_unlock(&qi->q_lock);
-+ raw_spin_unlock(&qi->q_lock);
- cpu_relax();
-- spin_lock(&qi->q_lock);
-+ raw_spin_lock(&qi->q_lock);
- }
+ /*
+ * ARMv4 mini-dcache optimised copy_user_highpage
+@@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct pag
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
+ __flush_dcache_page(page_mapping(from), from);
- qi->desc_status[index] = QI_DONE;
+- spin_lock(&minicache_lock);
++ raw_spin_lock(&minicache_lock);
- reclaim_free_desc(qi);
-- spin_unlock_irqrestore(&qi->q_lock, flags);
-+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
+ set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
+ flush_tlb_kernel_page(0xffff8000);
- if (rc == -EAGAIN)
- goto restart;
-@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu
- if (!ecap_qis(iommu->ecap))
- return;
+ mc_copy_user_page((void *)0xffff8000, kto);
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+- spin_unlock(&minicache_lock);
++ raw_spin_unlock(&minicache_lock);
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
- if (!(sts & DMA_GSTS_QIES))
-@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
- !(sts & DMA_GSTS_QIES), sts);
- end:
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ kunmap_atomic(kto, KM_USER1);
}
+Index: linux-2.6/arch/arm/mm/copypage-v6.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mm/copypage-v6.c
++++ linux-2.6/arch/arm/mm/copypage-v6.c
+@@ -27,7 +27,7 @@
+ #define from_address (0xffff8000)
+ #define to_address (0xffffc000)
+
+-static DEFINE_SPINLOCK(v6_lock);
++static DEFINE_RAW_SPINLOCK(v6_lock);
/*
-@@ -1097,7 +1097,7 @@ static void __dmar_enable_qi(struct inte
- qi->free_head = qi->free_tail = 0;
- qi->free_cnt = QI_LENGTH;
+ * Copy the user page. No aliasing to deal with so we can just
+@@ -89,7 +89,7 @@ static void v6_copy_user_highpage_aliasi
+ * Now copy the page using the same cache colour as the
+ * pages ultimate destination.
+ */
+- spin_lock(&v6_lock);
++ raw_spin_lock(&v6_lock);
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
+ set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
+@@ -102,7 +102,7 @@ static void v6_copy_user_highpage_aliasi
- /* write zero to the tail reg */
- writel(0, iommu->reg + DMAR_IQT_REG);
-@@ -1110,7 +1110,7 @@ static void __dmar_enable_qi(struct inte
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
+ copy_page((void *)kto, (void *)kfrom);
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+- spin_unlock(&v6_lock);
++ raw_spin_unlock(&v6_lock);
}
/*
-@@ -1159,7 +1159,7 @@ int dmar_enable_qi(struct intel_iommu *i
- qi->free_head = qi->free_tail = 0;
- qi->free_cnt = QI_LENGTH;
+@@ -122,13 +122,13 @@ static void v6_clear_user_highpage_alias
+ * Now clear the page using the same cache colour as
+ * the pages ultimate destination.
+ */
+- spin_lock(&v6_lock);
++ raw_spin_lock(&v6_lock);
-- spin_lock_init(&qi->q_lock);
-+ raw_spin_lock_init(&qi->q_lock);
+ set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
+ flush_tlb_kernel_page(to);
+ clear_page((void *)to);
- __dmar_enable_qi(iommu);
+- spin_unlock(&v6_lock);
++ raw_spin_unlock(&v6_lock);
+ }
-@@ -1225,11 +1225,11 @@ void dmar_msi_unmask(struct irq_data *da
- unsigned long flag;
+ struct cpu_user_fns v6_user_fns __initdata = {
+Index: linux-2.6/arch/arm/mm/copypage-xscale.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mm/copypage-xscale.c
++++ linux-2.6/arch/arm/mm/copypage-xscale.c
+@@ -32,7 +32,7 @@
+ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
+ L_PTE_MT_MINICACHE)
- /* unmask it */
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(0, iommu->reg + DMAR_FECTL_REG);
- /* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
+-static DEFINE_SPINLOCK(minicache_lock);
++static DEFINE_RAW_SPINLOCK(minicache_lock);
- void dmar_msi_mask(struct irq_data *data)
-@@ -1238,11 +1238,11 @@ void dmar_msi_mask(struct irq_data *data
- struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
+ /*
+ * XScale mini-dcache optimised copy_user_highpage
+@@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
+ __flush_dcache_page(page_mapping(from), from);
- /* mask it */
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
- /* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
+- spin_lock(&minicache_lock);
++ raw_spin_lock(&minicache_lock);
- void dmar_msi_write(int irq, struct msi_msg *msg)
-@@ -1250,11 +1250,11 @@ void dmar_msi_write(int irq, struct msi_
- struct intel_iommu *iommu = irq_get_handler_data(irq);
- unsigned long flag;
+ set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
+ flush_tlb_kernel_page(COPYPAGE_MINICACHE);
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
- writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
- writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
+ mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
- void dmar_msi_read(int irq, struct msi_msg *msg)
-@@ -1262,11 +1262,11 @@ void dmar_msi_read(int irq, struct msi_m
- struct intel_iommu *iommu = irq_get_handler_data(irq);
- unsigned long flag;
+- spin_unlock(&minicache_lock);
++ raw_spin_unlock(&minicache_lock);
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
- msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
- msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ kunmap_atomic(kto, KM_USER1);
}
+Index: linux-2.6/drivers/dma/ipu/ipu_irq.c
+===================================================================
+--- linux-2.6.orig/drivers/dma/ipu/ipu_irq.c
++++ linux-2.6/drivers/dma/ipu/ipu_irq.c
+@@ -81,7 +81,7 @@ static struct ipu_irq_map irq_map[CONFIG
+ /* Protects allocations from the above array of maps */
+ static DEFINE_MUTEX(map_lock);
+ /* Protects register accesses and individual mappings */
+-static DEFINE_SPINLOCK(bank_lock);
++static DEFINE_RAW_SPINLOCK(bank_lock);
- static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
-@@ -1303,7 +1303,7 @@ irqreturn_t dmar_fault(int irq, void *de
- u32 fault_status;
- unsigned long flag;
-
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- if (fault_status)
- printk(KERN_ERR "DRHD: handling fault status reg %x\n",
-@@ -1342,7 +1342,7 @@ irqreturn_t dmar_fault(int irq, void *de
- writel(DMA_FRCD_F, iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN + 12);
+ static struct ipu_irq_map *src2map(unsigned int src)
+ {
+@@ -101,11 +101,11 @@ static void ipu_irq_unmask(struct irq_da
+ uint32_t reg;
+ unsigned long lock_flags;
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- dmar_fault_do_one(iommu, type, fault_reason,
- source_id, guest_addr);
-@@ -1350,14 +1350,14 @@ irqreturn_t dmar_fault(int irq, void *de
- fault_index++;
- if (fault_index >= cap_num_fault_regs(iommu->cap))
- fault_index = 0;
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ bank = map->bank;
+ if (!bank) {
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
+ return;
}
- clear_rest:
- /* clear all the other faults */
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- writel(fault_status, iommu->reg + DMAR_FSTS_REG);
+@@ -114,7 +114,7 @@ static void ipu_irq_unmask(struct irq_da
+ reg |= (1UL << (map->source & 31));
+ ipu_write_reg(bank->ipu, reg, bank->control);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- return IRQ_HANDLED;
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
-Index: linux-2.6/drivers/pci/intel-iommu.c
-===================================================================
---- linux-2.6.orig/drivers/pci/intel-iommu.c
-+++ linux-2.6/drivers/pci/intel-iommu.c
-@@ -933,7 +933,7 @@ static void iommu_set_root_entry(struct
-
- addr = iommu->root_entry;
+ static void ipu_irq_mask(struct irq_data *d)
+@@ -124,11 +124,11 @@ static void ipu_irq_mask(struct irq_data
+ uint32_t reg;
+ unsigned long lock_flags;
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
-@@ -942,7 +942,7 @@ static void iommu_set_root_entry(struct
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_RTPS), sts);
+ bank = map->bank;
+ if (!bank) {
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
+ return;
+ }
+@@ -137,7 +137,7 @@ static void ipu_irq_mask(struct irq_data
+ reg &= ~(1UL << (map->source & 31));
+ ipu_write_reg(bank->ipu, reg, bank->control);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
- static void iommu_flush_write_buffer(struct intel_iommu *iommu)
-@@ -953,14 +953,14 @@ static void iommu_flush_write_buffer(str
- if (!rwbf_quirk && !cap_rwbf(iommu->cap))
- return;
+ static void ipu_irq_ack(struct irq_data *d)
+@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data
+ struct ipu_irq_bank *bank;
+ unsigned long lock_flags;
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(val & DMA_GSTS_WBFS)), val);
+ bank = map->bank;
+ if (!bank) {
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
+ return;
+ }
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
- /* return value determine if we need a write buffer flush */
-@@ -987,14 +987,14 @@ static void __iommu_flush_context(struct
- }
- val |= DMA_CCMD_ICC;
-
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
+ /**
+@@ -172,11 +172,11 @@ bool ipu_irq_status(unsigned int irq)
+ unsigned long lock_flags;
+ bool ret;
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
- dmar_readq, (!(val & DMA_CCMD_ICC)), val);
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+ bank = map->bank;
+ ret = bank && ipu_read_reg(bank->ipu, bank->status) &
+ (1UL << (map->source & 31));
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ return ret;
}
+@@ -213,10 +213,10 @@ int ipu_irq_map(unsigned int source)
+ if (irq_map[i].source < 0) {
+ unsigned long lock_flags;
- /* return value determine if we need a write buffer flush */
-@@ -1033,7 +1033,7 @@ static void __iommu_flush_iotlb(struct i
- if (cap_write_drain(iommu->cap))
- val |= DMA_TLB_WRITE_DRAIN;
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+ irq_map[i].source = source;
+ irq_map[i].bank = irq_bank + source / 32;
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- /* Note: Only uses first TLB reg currently */
- if (val_iva)
- dmar_writeq(iommu->reg + tlb_offset, val_iva);
-@@ -1043,7 +1043,7 @@ static void __iommu_flush_iotlb(struct i
- IOMMU_WAIT_OP(iommu, tlb_offset + 8,
- dmar_readq, (!(val & DMA_TLB_IVT)), val);
+ ret = irq_map[i].irq;
+ pr_debug("IPU: mapped source %u to IRQ %u\n",
+@@ -252,10 +252,10 @@ int ipu_irq_unmap(unsigned int source)
+ pr_debug("IPU: unmapped source %u from IRQ %u\n",
+ source, irq_map[i].irq);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+ irq_map[i].source = -EINVAL;
+ irq_map[i].bank = NULL;
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- /* check IOTLB invalidation granularity */
- if (DMA_TLB_IAIG(val) == 0)
-@@ -1159,7 +1159,7 @@ static void iommu_disable_protect_mem_re
- u32 pmen;
- unsigned long flags;
+ ret = 0;
+ break;
+@@ -276,7 +276,7 @@ static void ipu_irq_err(unsigned int irq
+ for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
+ struct ipu_irq_bank *bank = irq_bank + i;
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- pmen = readl(iommu->reg + DMAR_PMEN_REG);
- pmen &= ~DMA_PMEN_EPM;
- writel(pmen, iommu->reg + DMAR_PMEN_REG);
-@@ -1168,7 +1168,7 @@ static void iommu_disable_protect_mem_re
- IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
- readl, !(pmen & DMA_PMEN_PRS), pmen);
+- spin_lock(&bank_lock);
++ raw_spin_lock(&bank_lock);
+ status = ipu_read_reg(ipu, bank->status);
+ /*
+ * Don't think we have to clear all interrupts here, they will
+@@ -284,18 +284,18 @@ static void ipu_irq_err(unsigned int irq
+ * might want to clear unhandled interrupts after the loop...
+ */
+ status &= ipu_read_reg(ipu, bank->control);
+- spin_unlock(&bank_lock);
++ raw_spin_unlock(&bank_lock);
+ while ((line = ffs(status))) {
+ struct ipu_irq_map *map;
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
- }
+ line--;
+ status &= ~(1UL << line);
- static int iommu_enable_translation(struct intel_iommu *iommu)
-@@ -1176,7 +1176,7 @@ static int iommu_enable_translation(stru
- u32 sts;
- unsigned long flags;
+- spin_lock(&bank_lock);
++ raw_spin_lock(&bank_lock);
+ map = src2map(32 * i + line);
+ if (map)
+ irq = map->irq;
+- spin_unlock(&bank_lock);
++ raw_spin_unlock(&bank_lock);
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- iommu->gcmd |= DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+ if (!map) {
+ pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
+@@ -317,22 +317,22 @@ static void ipu_irq_fn(unsigned int irq,
+ for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
+ struct ipu_irq_bank *bank = irq_bank + i;
-@@ -1184,7 +1184,7 @@ static int iommu_enable_translation(stru
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_TES), sts);
+- spin_lock(&bank_lock);
++ raw_spin_lock(&bank_lock);
+ status = ipu_read_reg(ipu, bank->status);
+ /* Not clearing all interrupts, see above */
+ status &= ipu_read_reg(ipu, bank->control);
+- spin_unlock(&bank_lock);
++ raw_spin_unlock(&bank_lock);
+ while ((line = ffs(status))) {
+ struct ipu_irq_map *map;
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
- return 0;
- }
+ line--;
+ status &= ~(1UL << line);
-@@ -1193,7 +1193,7 @@ static int iommu_disable_translation(str
- u32 sts;
- unsigned long flag;
+- spin_lock(&bank_lock);
++ raw_spin_lock(&bank_lock);
+ map = src2map(32 * i + line);
+ if (map)
+ irq = map->irq;
+- spin_unlock(&bank_lock);
++ raw_spin_unlock(&bank_lock);
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- iommu->gcmd &= ~DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+ if (!map) {
+ pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
+Index: linux-2.6/drivers/pci/dmar.c
+===================================================================
+--- linux-2.6.orig/drivers/pci/dmar.c
++++ linux-2.6/drivers/pci/dmar.c
+@@ -800,7 +800,7 @@ int alloc_iommu(struct dmar_drhd_unit *d
+ (unsigned long long)iommu->cap,
+ (unsigned long long)iommu->ecap);
-@@ -1201,7 +1201,7 @@ static int iommu_disable_translation(str
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(sts & DMA_GSTS_TES)), sts);
+- spin_lock_init(&iommu->register_lock);
++ raw_spin_lock_init(&iommu->register_lock);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ drhd->iommu = iommu;
return 0;
- }
-
-@@ -3321,7 +3321,7 @@ static int iommu_suspend(void)
- for_each_active_iommu(iommu, drhd) {
- iommu_disable_translation(iommu);
+@@ -921,11 +921,11 @@ int qi_submit_sync(struct qi_desc *desc,
+ restart:
+ rc = 0;
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+- spin_lock_irqsave(&qi->q_lock, flags);
++ raw_spin_lock_irqsave(&qi->q_lock, flags);
+ while (qi->free_cnt < 3) {
+- spin_unlock_irqrestore(&qi->q_lock, flags);
++ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
+ cpu_relax();
+- spin_lock_irqsave(&qi->q_lock, flags);
++ raw_spin_lock_irqsave(&qi->q_lock, flags);
+ }
- iommu->iommu_state[SR_DMAR_FECTL_REG] =
- readl(iommu->reg + DMAR_FECTL_REG);
-@@ -3332,7 +3332,7 @@ static int iommu_suspend(void)
- iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
- readl(iommu->reg + DMAR_FEUADDR_REG);
+ index = qi->free_head;
+@@ -965,15 +965,15 @@ restart:
+ if (rc)
+ break;
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+- spin_unlock(&qi->q_lock);
++ raw_spin_unlock(&qi->q_lock);
+ cpu_relax();
+- spin_lock(&qi->q_lock);
++ raw_spin_lock(&qi->q_lock);
}
- return 0;
-@@ -3359,7 +3359,7 @@ static void iommu_resume(void)
-
- for_each_active_iommu(iommu, drhd) {
+ qi->desc_status[index] = QI_DONE;
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ reclaim_free_desc(qi);
+- spin_unlock_irqrestore(&qi->q_lock, flags);
++ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
- writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
- iommu->reg + DMAR_FECTL_REG);
-@@ -3370,7 +3370,7 @@ static void iommu_resume(void)
- writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
- iommu->reg + DMAR_FEUADDR_REG);
+ if (rc == -EAGAIN)
+ goto restart;
+@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu
+ if (!ecap_qis(iommu->ecap))
+ return;
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- for_each_active_iommu(iommu, drhd)
-Index: linux-2.6/drivers/pci/intr_remapping.c
-===================================================================
---- linux-2.6.orig/drivers/pci/intr_remapping.c
-+++ linux-2.6/drivers/pci/intr_remapping.c
-@@ -46,7 +46,7 @@ static __init int setup_intremap(char *s
+ sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ if (!(sts & DMA_GSTS_QIES))
+@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
+ !(sts & DMA_GSTS_QIES), sts);
+ end:
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
- early_param("intremap", setup_intremap);
-
--static DEFINE_SPINLOCK(irq_2_ir_lock);
-+static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
- static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
- {
-@@ -63,12 +63,12 @@ int get_irte(int irq, struct irte *entry
- if (!entry || !irq_iommu)
- return -1;
+ /*
+@@ -1097,7 +1097,7 @@ static void __dmar_enable_qi(struct inte
+ qi->free_head = qi->free_tail = 0;
+ qi->free_cnt = QI_LENGTH;
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- index = irq_iommu->irte_index + irq_iommu->sub_handle;
- *entry = *(irq_iommu->iommu->ir_table->base + index);
+ /* write zero to the tail reg */
+ writel(0, iommu->reg + DMAR_IQT_REG);
+@@ -1110,7 +1110,7 @@ static void __dmar_enable_qi(struct inte
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- return 0;
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-@@ -102,7 +102,7 @@ int alloc_irte(struct intel_iommu *iommu
- return -1;
- }
+ /*
+@@ -1159,7 +1159,7 @@ int dmar_enable_qi(struct intel_iommu *i
+ qi->free_head = qi->free_tail = 0;
+ qi->free_cnt = QI_LENGTH;
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
- do {
- for (i = index; i < index + count; i++)
- if (table->base[i].present)
-@@ -114,7 +114,7 @@ int alloc_irte(struct intel_iommu *iommu
- index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
+- spin_lock_init(&qi->q_lock);
++ raw_spin_lock_init(&qi->q_lock);
- if (index == start_index) {
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- printk(KERN_ERR "can't allocate an IRTE\n");
- return -1;
- }
-@@ -128,7 +128,7 @@ int alloc_irte(struct intel_iommu *iommu
- irq_iommu->sub_handle = 0;
- irq_iommu->irte_mask = mask;
+ __dmar_enable_qi(iommu);
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+@@ -1225,11 +1225,11 @@ void dmar_msi_unmask(struct irq_data *da
+ unsigned long flag;
- return index;
+ /* unmask it */
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(0, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-@@ -153,10 +153,10 @@ int map_irq_to_irte_handle(int irq, u16
- if (!irq_iommu)
- return -1;
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
- *sub_handle = irq_iommu->sub_handle;
- index = irq_iommu->irte_index;
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- return index;
- }
+ void dmar_msi_mask(struct irq_data *data)
+@@ -1238,11 +1238,11 @@ void dmar_msi_mask(struct irq_data *data
+ struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
-@@ -168,14 +168,14 @@ int set_irte_irq(int irq, struct intel_i
- if (!irq_iommu)
- return -1;
+ /* mask it */
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ void dmar_msi_write(int irq, struct msi_msg *msg)
+@@ -1250,11 +1250,11 @@ void dmar_msi_write(int irq, struct msi_
+ struct intel_iommu *iommu = irq_get_handler_data(irq);
+ unsigned long flag;
- irq_iommu->iommu = iommu;
- irq_iommu->irte_index = index;
- irq_iommu->sub_handle = subhandle;
- irq_iommu->irte_mask = 0;
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
+ writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
+ writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ void dmar_msi_read(int irq, struct msi_msg *msg)
+@@ -1262,11 +1262,11 @@ void dmar_msi_read(int irq, struct msi_m
+ struct intel_iommu *iommu = irq_get_handler_data(irq);
+ unsigned long flag;
- return 0;
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
+ msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
+ msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-@@ -191,7 +191,7 @@ int modify_irte(int irq, struct irte *ir
- if (!irq_iommu)
- return -1;
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
+@@ -1303,7 +1303,7 @@ irqreturn_t dmar_fault(int irq, void *de
+ u32 fault_status;
+ unsigned long flag;
- iommu = irq_iommu->iommu;
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ if (fault_status)
+ printk(KERN_ERR "DRHD: handling fault status reg %x\n",
+@@ -1342,7 +1342,7 @@ irqreturn_t dmar_fault(int irq, void *de
+ writel(DMA_FRCD_F, iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 12);
-@@ -203,7 +203,7 @@ int modify_irte(int irq, struct irte *ir
- __iommu_flush_cache(iommu, irte, sizeof(*irte));
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- rc = qi_flush_iec(iommu, index, 0);
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ dmar_fault_do_one(iommu, type, fault_reason,
+ source_id, guest_addr);
+@@ -1350,14 +1350,14 @@ irqreturn_t dmar_fault(int irq, void *de
+ fault_index++;
+ if (fault_index >= cap_num_fault_regs(iommu->cap))
+ fault_index = 0;
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ }
+ clear_rest:
+ /* clear all the other faults */
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ writel(fault_status, iommu->reg + DMAR_FSTS_REG);
- return rc;
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ return IRQ_HANDLED;
}
-@@ -271,7 +271,7 @@ int free_irte(int irq)
- if (!irq_iommu)
- return -1;
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+Index: linux-2.6/drivers/pci/intel-iommu.c
+===================================================================
+--- linux-2.6.orig/drivers/pci/intel-iommu.c
++++ linux-2.6/drivers/pci/intel-iommu.c
+@@ -933,7 +933,7 @@ static void iommu_set_root_entry(struct
- rc = clear_entries(irq_iommu);
+ addr = iommu->root_entry;
-@@ -280,7 +280,7 @@ int free_irte(int irq)
- irq_iommu->sub_handle = 0;
- irq_iommu->irte_mask = 0;
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
+@@ -942,7 +942,7 @@ static void iommu_set_root_entry(struct
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_RTPS), sts);
- return rc;
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-@@ -410,7 +410,7 @@ static void iommu_set_intr_remapping(str
-
- addr = virt_to_phys((void *)iommu->ir_table->base);
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
+@@ -953,14 +953,14 @@ static void iommu_flush_write_buffer(str
+ if (!rwbf_quirk && !cap_rwbf(iommu->cap))
+ return;
- dmar_writeq(iommu->reg + DMAR_IRTA_REG,
- (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
-@@ -421,7 +421,7 @@ static void iommu_set_intr_remapping(str
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
+ /* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_IRTPS), sts);
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ readl, (!(val & DMA_GSTS_WBFS)), val);
- /*
- * global invalidation of interrupt entry cache before enabling
-@@ -429,7 +429,7 @@ static void iommu_set_intr_remapping(str
- */
- qi_global_iec(iommu);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+
+ /* return value determine if we need a write buffer flush */
+@@ -987,14 +987,14 @@ static void __iommu_flush_context(struct
+ }
+ val |= DMA_CCMD_ICC;
+
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
+ dmar_readq, (!(val & DMA_CCMD_ICC)), val);
+
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+
+ /* return value determine if we need a write buffer flush */
+@@ -1033,7 +1033,7 @@ static void __iommu_flush_iotlb(struct i
+ if (cap_write_drain(iommu->cap))
+ val |= DMA_TLB_WRITE_DRAIN;
+
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ /* Note: Only uses first TLB reg currently */
+ if (val_iva)
+ dmar_writeq(iommu->reg + tlb_offset, val_iva);
+@@ -1043,7 +1043,7 @@ static void __iommu_flush_iotlb(struct i
+ IOMMU_WAIT_OP(iommu, tlb_offset + 8,
+ dmar_readq, (!(val & DMA_TLB_IVT)), val);
+
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+ /* check IOTLB invalidation granularity */
+ if (DMA_TLB_IAIG(val) == 0)
+@@ -1159,7 +1159,7 @@ static void iommu_disable_protect_mem_re
+ u32 pmen;
+ unsigned long flags;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
-
- /* Enable interrupt-remapping */
- iommu->gcmd |= DMA_GCMD_IRE;
-@@ -438,7 +438,7 @@ static void iommu_set_intr_remapping(str
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_IRES), sts);
+ pmen = readl(iommu->reg + DMAR_PMEN_REG);
+ pmen &= ~DMA_PMEN_EPM;
+ writel(pmen, iommu->reg + DMAR_PMEN_REG);
+@@ -1168,7 +1168,7 @@ static void iommu_disable_protect_mem_re
+ IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
+ readl, !(pmen & DMA_PMEN_PRS), pmen);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-
-@@ -486,7 +486,7 @@ static void iommu_disable_intr_remapping
- */
- qi_global_iec(iommu);
+ static int iommu_enable_translation(struct intel_iommu *iommu)
+@@ -1176,7 +1176,7 @@ static int iommu_enable_translation(stru
+ u32 sts;
+ unsigned long flags;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ iommu->gcmd |= DMA_GCMD_TE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
- if (!(sts & DMA_GSTS_IRES))
-@@ -499,7 +499,7 @@ static void iommu_disable_intr_remapping
- readl, !(sts & DMA_GSTS_IRES), sts);
+@@ -1184,7 +1184,7 @@ static int iommu_enable_translation(stru
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_TES), sts);
- end:
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ return 0;
}
- int __init intr_remapping_supported(void)
-Index: linux-2.6/include/linux/intel-iommu.h
-===================================================================
---- linux-2.6.orig/include/linux/intel-iommu.h
-+++ linux-2.6/include/linux/intel-iommu.h
-@@ -271,7 +271,7 @@ struct qi_desc {
- };
-
- struct q_inval {
-- spinlock_t q_lock;
-+ raw_spinlock_t q_lock;
- struct qi_desc *desc; /* invalidation queue */
- int *desc_status; /* desc status */
- int free_head; /* first free entry */
-@@ -311,7 +311,7 @@ struct intel_iommu {
- u64 cap;
- u64 ecap;
- u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
-- spinlock_t register_lock; /* protect register handling */
-+ raw_spinlock_t register_lock; /* protect register handling */
- int seq_id; /* sequence id of the iommu */
- int agaw; /* agaw of this iommu */
- int msagaw; /* max sagaw of this iommu */
-Index: linux-2.6/lib/atomic64.c
-===================================================================
---- linux-2.6.orig/lib/atomic64.c
-+++ linux-2.6/lib/atomic64.c
-@@ -29,11 +29,11 @@
- * Ensure each lock is in a separate cacheline.
- */
- static union {
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- char pad[L1_CACHE_BYTES];
- } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
-
--static inline spinlock_t *lock_addr(const atomic64_t *v)
-+static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
- {
- unsigned long addr = (unsigned long) v;
+@@ -1193,7 +1193,7 @@ static int iommu_disable_translation(str
+ u32 sts;
+ unsigned long flag;
-@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(cons
- long long atomic64_read(const atomic64_t *v)
- {
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
- long long val;
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ iommu->gcmd &= ~DMA_GCMD_TE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- val = v->counter;
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
- return val;
- }
- EXPORT_SYMBOL(atomic64_read);
-@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read);
- void atomic64_set(atomic64_t *v, long long i)
- {
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
+@@ -1201,7 +1201,7 @@ static int iommu_disable_translation(str
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (!(sts & DMA_GSTS_TES)), sts);
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- v->counter = i;
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ return 0;
}
- EXPORT_SYMBOL(atomic64_set);
- void atomic64_add(long long a, atomic64_t *v)
- {
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
+@@ -3321,7 +3321,7 @@ static int iommu_suspend(void)
+ for_each_active_iommu(iommu, drhd) {
+ iommu_disable_translation(iommu);
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- v->counter += a;
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
- }
- EXPORT_SYMBOL(atomic64_add);
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- long long atomic64_add_return(long long a, atomic64_t *v)
- {
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ iommu->iommu_state[SR_DMAR_FECTL_REG] =
+ readl(iommu->reg + DMAR_FECTL_REG);
+@@ -3332,7 +3332,7 @@ static int iommu_suspend(void)
+ iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
+ readl(iommu->reg + DMAR_FEUADDR_REG);
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- val = v->counter += a;
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
- return val;
- }
- EXPORT_SYMBOL(atomic64_add_return);
-@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return);
- void atomic64_sub(long long a, atomic64_t *v)
- {
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+ return 0;
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- v->counter -= a;
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
- }
- EXPORT_SYMBOL(atomic64_sub);
+@@ -3359,7 +3359,7 @@ static void iommu_resume(void)
- long long atomic64_sub_return(long long a, atomic64_t *v)
- {
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ for_each_active_iommu(iommu, drhd) {
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- val = v->counter -= a;
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
- return val;
- }
- EXPORT_SYMBOL(atomic64_sub_return);
-@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return);
- long long atomic64_dec_if_positive(atomic64_t *v)
- {
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
- long long val;
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- val = v->counter - 1;
- if (val >= 0)
- v->counter = val;
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
- return val;
- }
- EXPORT_SYMBOL(atomic64_dec_if_positive);
-@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
- long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
- {
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
+ iommu->reg + DMAR_FECTL_REG);
+@@ -3370,7 +3370,7 @@ static void iommu_resume(void)
+ writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
+ iommu->reg + DMAR_FEUADDR_REG);
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- val = v->counter;
- if (val == o)
- v->counter = n;
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
- return val;
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+
+ for_each_active_iommu(iommu, drhd)
+Index: linux-2.6/drivers/pci/intr_remapping.c
+===================================================================
+--- linux-2.6.orig/drivers/pci/intr_remapping.c
++++ linux-2.6/drivers/pci/intr_remapping.c
+@@ -46,7 +46,7 @@ static __init int setup_intremap(char *s
}
- EXPORT_SYMBOL(atomic64_cmpxchg);
-@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
- long long atomic64_xchg(atomic64_t *v, long long new)
+ early_param("intremap", setup_intremap);
+
+-static DEFINE_SPINLOCK(irq_2_ir_lock);
++static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
+
+ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
- long long val;
+@@ -63,12 +63,12 @@ int get_irte(int irq, struct irte *entry
+ if (!entry || !irq_iommu)
+ return -1;
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- val = v->counter;
- v->counter = new;
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
- return val;
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+
+ index = irq_iommu->irte_index + irq_iommu->sub_handle;
+ *entry = *(irq_iommu->iommu->ir_table->base + index);
+
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ return 0;
}
- EXPORT_SYMBOL(atomic64_xchg);
-@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg);
- int atomic64_add_unless(atomic64_t *v, long long a, long long u)
- {
- unsigned long flags;
-- spinlock_t *lock = lock_addr(v);
-+ raw_spinlock_t *lock = lock_addr(v);
- int ret = 0;
-- spin_lock_irqsave(lock, flags);
-+ raw_spin_lock_irqsave(lock, flags);
- if (v->counter != u) {
- v->counter += a;
- ret = 1;
+@@ -102,7 +102,7 @@ int alloc_irte(struct intel_iommu *iommu
+ return -1;
}
-- spin_unlock_irqrestore(lock, flags);
-+ raw_spin_unlock_irqrestore(lock, flags);
- return ret;
+
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ do {
+ for (i = index; i < index + count; i++)
+ if (table->base[i].present)
+@@ -114,7 +114,7 @@ int alloc_irte(struct intel_iommu *iommu
+ index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
+
+ if (index == start_index) {
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ printk(KERN_ERR "can't allocate an IRTE\n");
+ return -1;
+ }
+@@ -128,7 +128,7 @@ int alloc_irte(struct intel_iommu *iommu
+ irq_iommu->sub_handle = 0;
+ irq_iommu->irte_mask = mask;
+
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+
+ return index;
}
- EXPORT_SYMBOL(atomic64_add_unless);
-@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
- int i;
+@@ -153,10 +153,10 @@ int map_irq_to_irte_handle(int irq, u16
+ if (!irq_iommu)
+ return -1;
- for (i = 0; i < NR_LOCKS; ++i)
-- spin_lock_init(&atomic64_lock[i].lock);
-+ raw_spin_lock_init(&atomic64_lock[i].lock);
- return 0;
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ *sub_handle = irq_iommu->sub_handle;
+ index = irq_iommu->irte_index;
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ return index;
}
-Index: linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
-===================================================================
---- linux-2.6.orig/arch/x86/kernel/cpu/intel_cacheinfo.c
-+++ linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
-@@ -151,28 +151,17 @@ union _cpuid4_leaf_ecx {
- u32 full;
- };
+@@ -168,14 +168,14 @@ int set_irte_irq(int irq, struct intel_i
+ if (!irq_iommu)
+ return -1;
--struct amd_l3_cache {
-- struct amd_northbridge *nb;
-- unsigned indices;
-- u8 subcaches[4];
--};
--
--struct _cpuid4_info {
-+struct _cpuid4_info_regs {
- union _cpuid4_leaf_eax eax;
- union _cpuid4_leaf_ebx ebx;
- union _cpuid4_leaf_ecx ecx;
- unsigned long size;
-- struct amd_l3_cache *l3;
-- DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
-+ struct amd_northbridge *nb;
- };
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
--/* subset of above _cpuid4_info w/o shared_cpu_map */
--struct _cpuid4_info_regs {
-- union _cpuid4_leaf_eax eax;
-- union _cpuid4_leaf_ebx ebx;
-- union _cpuid4_leaf_ecx ecx;
-- unsigned long size;
-- struct amd_l3_cache *l3;
-+struct _cpuid4_info {
-+ struct _cpuid4_info_regs base;
-+ DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
- };
+ irq_iommu->iommu = iommu;
+ irq_iommu->irte_index = index;
+ irq_iommu->sub_handle = subhandle;
+ irq_iommu->irte_mask = 0;
- unsigned short num_cache_leaves;
-@@ -314,12 +303,13 @@ struct _cache_attr {
- /*
- * L3 cache descriptors
- */
--static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
-+static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
- {
-+ struct amd_l3_cache *l3 = &nb->l3_cache;
- unsigned int sc0, sc1, sc2, sc3;
- u32 val = 0;
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-- pci_read_config_dword(l3->nb->misc, 0x1C4, &val);
-+ pci_read_config_dword(nb->misc, 0x1C4, &val);
+ return 0;
+ }
+@@ -191,7 +191,7 @@ int modify_irte(int irq, struct irte *ir
+ if (!irq_iommu)
+ return -1;
- /* calculate subcache sizes */
- l3->subcaches[0] = sc0 = !(val & BIT(0));
-@@ -333,33 +323,16 @@ static void __cpuinit amd_calc_l3_indice
- static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
- int index)
- {
-- static struct amd_l3_cache *__cpuinitdata l3_caches;
- int node;
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
- /* only for L3, and not in virtualized environments */
-- if (index < 3 || amd_nb_num() == 0)
-+ if (index < 3)
- return;
-
-- /*
-- * Strictly speaking, the amount in @size below is leaked since it is
-- * never freed but this is done only on shutdown so it doesn't matter.
-- */
-- if (!l3_caches) {
-- int size = amd_nb_num() * sizeof(struct amd_l3_cache);
--
-- l3_caches = kzalloc(size, GFP_ATOMIC);
-- if (!l3_caches)
-- return;
-- }
--
- node = amd_get_nb_id(smp_processor_id());
--
-- if (!l3_caches[node].nb) {
-- l3_caches[node].nb = node_to_amd_nb(node);
-- amd_calc_l3_indices(&l3_caches[node]);
-- }
--
-- this_leaf->l3 = &l3_caches[node];
-+ this_leaf->nb = node_to_amd_nb(node);
-+ if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
-+ amd_calc_l3_indices(this_leaf->nb);
- }
-
- /*
-@@ -369,11 +342,11 @@ static void __cpuinit amd_init_l3_cache(
- *
- * @returns: the disabled index if used or negative value if slot free.
- */
--int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
-+int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
- {
- unsigned int reg = 0;
+ iommu = irq_iommu->iommu;
-- pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, ®);
-+ pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
+@@ -203,7 +203,7 @@ int modify_irte(int irq, struct irte *ir
+ __iommu_flush_cache(iommu, irte, sizeof(*irte));
- /* check whether this slot is activated already */
- if (reg & (3UL << 30))
-@@ -387,11 +360,10 @@ static ssize_t show_cache_disable(struct
- {
- int index;
+ rc = qi_flush_iec(iommu, index, 0);
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-- if (!this_leaf->l3 ||
-- !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
-+ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- return -EINVAL;
+ return rc;
+ }
+@@ -271,7 +271,7 @@ int free_irte(int irq)
+ if (!irq_iommu)
+ return -1;
-- index = amd_get_l3_disable_slot(this_leaf->l3, slot);
-+ index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
- if (index >= 0)
- return sprintf(buf, "%d\n", index);
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-@@ -408,7 +380,7 @@ show_cache_disable_##slot(struct _cpuid4
- SHOW_CACHE_DISABLE(0)
- SHOW_CACHE_DISABLE(1)
+ rc = clear_entries(irq_iommu);
--static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
-+static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
- unsigned slot, unsigned long idx)
- {
- int i;
-@@ -421,10 +393,10 @@ static void amd_l3_disable_index(struct
- for (i = 0; i < 4; i++) {
- u32 reg = idx | (i << 20);
+@@ -280,7 +280,7 @@ int free_irte(int irq)
+ irq_iommu->sub_handle = 0;
+ irq_iommu->irte_mask = 0;
-- if (!l3->subcaches[i])
-+ if (!nb->l3_cache.subcaches[i])
- continue;
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-- pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
-+ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
+ return rc;
+ }
+@@ -410,7 +410,7 @@ static void iommu_set_intr_remapping(str
- /*
- * We need to WBINVD on a core on the node containing the L3
-@@ -434,7 +406,7 @@ static void amd_l3_disable_index(struct
- wbinvd_on_cpu(cpu);
+ addr = virt_to_phys((void *)iommu->ir_table->base);
- reg |= BIT(31);
-- pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
-+ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
- }
- }
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
-@@ -448,24 +420,24 @@ static void amd_l3_disable_index(struct
- *
- * @return: 0 on success, error status on failure
- */
--int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
-+int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
- unsigned long index)
- {
- int ret = 0;
+ dmar_writeq(iommu->reg + DMAR_IRTA_REG,
+ (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
+@@ -421,7 +421,7 @@ static void iommu_set_intr_remapping(str
- /* check if @slot is already used or the index is already disabled */
-- ret = amd_get_l3_disable_slot(l3, slot);
-+ ret = amd_get_l3_disable_slot(nb, slot);
- if (ret >= 0)
- return -EINVAL;
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_IRTPS), sts);
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
-- if (index > l3->indices)
-+ if (index > nb->l3_cache.indices)
- return -EINVAL;
+ /*
+ * global invalidation of interrupt entry cache before enabling
+@@ -429,7 +429,7 @@ static void iommu_set_intr_remapping(str
+ */
+ qi_global_iec(iommu);
- /* check whether the other slot has disabled the same index already */
-- if (index == amd_get_l3_disable_slot(l3, !slot))
-+ if (index == amd_get_l3_disable_slot(nb, !slot))
- return -EINVAL;
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
-- amd_l3_disable_index(l3, cpu, slot, index);
-+ amd_l3_disable_index(nb, cpu, slot, index);
+ /* Enable interrupt-remapping */
+ iommu->gcmd |= DMA_GCMD_IRE;
+@@ -438,7 +438,7 @@ static void iommu_set_intr_remapping(str
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_IRES), sts);
- return 0;
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-@@ -480,8 +452,7 @@ static ssize_t store_cache_disable(struc
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
-- if (!this_leaf->l3 ||
-- !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
-+ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- return -EINVAL;
- cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
-@@ -489,7 +460,7 @@ static ssize_t store_cache_disable(struc
- if (strict_strtoul(buf, 10, &val) < 0)
- return -EINVAL;
-- err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
-+ err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
- if (err) {
- if (err == -EEXIST)
- printk(KERN_WARNING "L3 disable slot %d in use!\n",
-@@ -518,7 +489,7 @@ static struct _cache_attr cache_disable_
- static ssize_t
- show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
- {
-- if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
-+ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- return -EINVAL;
+@@ -486,7 +486,7 @@ static void iommu_disable_intr_remapping
+ */
+ qi_global_iec(iommu);
- return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
-@@ -533,7 +504,7 @@ store_subcaches(struct _cpuid4_info *thi
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
-- if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
-+ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- return -EINVAL;
+ sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ if (!(sts & DMA_GSTS_IRES))
+@@ -499,7 +499,7 @@ static void iommu_disable_intr_remapping
+ readl, !(sts & DMA_GSTS_IRES), sts);
- if (strict_strtoul(buf, 16, &val) < 0)
-@@ -769,7 +740,7 @@ static void __cpuinit cache_shared_cpu_m
- return;
- }
- this_leaf = CPUID4_INFO_IDX(cpu, index);
-- num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
-+ num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
+ end:
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ }
- if (num_threads_sharing == 1)
- cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
-@@ -820,29 +791,19 @@ static void __cpuinit free_cache_attribu
- for (i = 0; i < num_cache_leaves; i++)
- cache_remove_shared_cpu_map(cpu, i);
+ int __init intr_remapping_supported(void)
+Index: linux-2.6/include/linux/intel-iommu.h
+===================================================================
+--- linux-2.6.orig/include/linux/intel-iommu.h
++++ linux-2.6/include/linux/intel-iommu.h
+@@ -271,7 +271,7 @@ struct qi_desc {
+ };
-- kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
- kfree(per_cpu(ici_cpuid4_info, cpu));
- per_cpu(ici_cpuid4_info, cpu) = NULL;
- }
+ struct q_inval {
+- spinlock_t q_lock;
++ raw_spinlock_t q_lock;
+ struct qi_desc *desc; /* invalidation queue */
+ int *desc_status; /* desc status */
+ int free_head; /* first free entry */
+@@ -311,7 +311,7 @@ struct intel_iommu {
+ u64 cap;
+ u64 ecap;
+ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
+- spinlock_t register_lock; /* protect register handling */
++ raw_spinlock_t register_lock; /* protect register handling */
+ int seq_id; /* sequence id of the iommu */
+ int agaw; /* agaw of this iommu */
+ int msagaw; /* max sagaw of this iommu */
+Index: linux-2.6/lib/atomic64.c
+===================================================================
+--- linux-2.6.orig/lib/atomic64.c
++++ linux-2.6/lib/atomic64.c
+@@ -29,11 +29,11 @@
+ * Ensure each lock is in a separate cacheline.
+ */
+ static union {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ char pad[L1_CACHE_BYTES];
+ } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
--static int
--__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
--{
-- struct _cpuid4_info_regs *leaf_regs =
-- (struct _cpuid4_info_regs *)this_leaf;
--
-- return cpuid4_cache_lookup_regs(index, leaf_regs);
--}
--
- static void __cpuinit get_cpu_leaves(void *_retval)
+-static inline spinlock_t *lock_addr(const atomic64_t *v)
++static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
{
- int j, *retval = _retval, cpu = smp_processor_id();
+ unsigned long addr = (unsigned long) v;
- /* Do cpuid and store the results */
- for (j = 0; j < num_cache_leaves; j++) {
-- struct _cpuid4_info *this_leaf;
-- this_leaf = CPUID4_INFO_IDX(cpu, j);
-- *retval = cpuid4_cache_lookup(j, this_leaf);
-+ struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
-+
-+ *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
- if (unlikely(*retval < 0)) {
- int i;
+@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(cons
+ long long atomic64_read(const atomic64_t *v)
+ {
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
-@@ -900,16 +861,16 @@ static ssize_t show_##file_name(struct _
- return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
}
+ EXPORT_SYMBOL(atomic64_read);
+@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read);
+ void atomic64_set(atomic64_t *v, long long i)
+ {
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
--show_one_plus(level, eax.split.level, 0);
--show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
--show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
--show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
--show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
-+show_one_plus(level, base.eax.split.level, 0);
-+show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
-+show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
-+show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
-+show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ v->counter = i;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ }
+ EXPORT_SYMBOL(atomic64_set);
- static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int cpu)
+ void atomic64_add(long long a, atomic64_t *v)
{
-- return sprintf(buf, "%luK\n", this_leaf->size / 1024);
-+ return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ v->counter += a;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
}
+ EXPORT_SYMBOL(atomic64_add);
- static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
-@@ -946,7 +907,7 @@ static inline ssize_t show_shared_cpu_li
- static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int cpu)
+ long long atomic64_add_return(long long a, atomic64_t *v)
{
-- switch (this_leaf->eax.split.type) {
-+ switch (this_leaf->base.eax.split.type) {
- case CACHE_TYPE_DATA:
- return sprintf(buf, "Data\n");
- case CACHE_TYPE_INST:
-@@ -1135,7 +1096,7 @@ static int __cpuinit cache_add_dev(struc
-
- ktype_cache.default_attrs = default_attrs;
- #ifdef CONFIG_AMD_NB
-- if (this_leaf->l3)
-+ if (this_leaf->base.nb)
- ktype_cache.default_attrs = amd_l3_attrs();
- #endif
- retval = kobject_init_and_add(&(this_object->kobj),
-Index: linux-2.6/arch/x86/include/asm/amd_nb.h
-===================================================================
---- linux-2.6.orig/arch/x86/include/asm/amd_nb.h
-+++ linux-2.6/arch/x86/include/asm/amd_nb.h
-@@ -19,9 +19,15 @@ extern int amd_numa_init(void);
- extern int amd_get_subcaches(int);
- extern int amd_set_subcaches(int, int);
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
-+struct amd_l3_cache {
-+ unsigned indices;
-+ u8 subcaches[4];
-+};
-+
- struct amd_northbridge {
- struct pci_dev *misc;
- struct pci_dev *link;
-+ struct amd_l3_cache l3_cache;
- };
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter += a;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+ }
+ EXPORT_SYMBOL(atomic64_add_return);
+@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return);
+ void atomic64_sub(long long a, atomic64_t *v)
+ {
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
- struct amd_northbridge_info {
-Index: linux-2.6/arch/x86/include/asm/irqflags.h
-===================================================================
---- linux-2.6.orig/arch/x86/include/asm/irqflags.h
-+++ linux-2.6/arch/x86/include/asm/irqflags.h
-@@ -60,23 +60,24 @@ static inline void native_halt(void)
- #include <asm/paravirt.h>
- #else
- #ifndef __ASSEMBLY__
-+#include <linux/types.h>
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ v->counter -= a;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ }
+ EXPORT_SYMBOL(atomic64_sub);
--static inline unsigned long arch_local_save_flags(void)
-+static inline notrace unsigned long arch_local_save_flags(void)
+ long long atomic64_sub_return(long long a, atomic64_t *v)
{
- return native_save_fl();
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter -= a;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
}
+ EXPORT_SYMBOL(atomic64_sub_return);
+@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return);
+ long long atomic64_dec_if_positive(atomic64_t *v)
+ {
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
--static inline void arch_local_irq_restore(unsigned long flags)
-+static inline notrace void arch_local_irq_restore(unsigned long flags)
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter - 1;
+ if (val >= 0)
+ v->counter = val;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+ }
+ EXPORT_SYMBOL(atomic64_dec_if_positive);
+@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
+ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
- native_restore_fl(flags);
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ if (val == o)
+ v->counter = n;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
}
+ EXPORT_SYMBOL(atomic64_cmpxchg);
+@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
+ long long atomic64_xchg(atomic64_t *v, long long new)
+ {
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
--static inline void arch_local_irq_disable(void)
-+static inline notrace void arch_local_irq_disable(void)
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ v->counter = new;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+ }
+ EXPORT_SYMBOL(atomic64_xchg);
+@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg);
+ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
- native_irq_disable();
+ unsigned long flags;
+- spinlock_t *lock = lock_addr(v);
++ raw_spinlock_t *lock = lock_addr(v);
+ int ret = 0;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ if (v->counter != u) {
+ v->counter += a;
+ ret = 1;
+ }
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return ret;
}
+ EXPORT_SYMBOL(atomic64_add_unless);
+@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
+ int i;
--static inline void arch_local_irq_enable(void)
-+static inline notrace void arch_local_irq_enable(void)
- {
- native_irq_enable();
+ for (i = 0; i < NR_LOCKS; ++i)
+- spin_lock_init(&atomic64_lock[i].lock);
++ raw_spin_lock_init(&atomic64_lock[i].lock);
+ return 0;
}
-@@ -102,7 +103,7 @@ static inline void halt(void)
- /*
- * For spinlocks, etc:
- */
--static inline unsigned long arch_local_irq_save(void)
-+static inline notrace unsigned long arch_local_irq_save(void)
- {
- unsigned long flags = arch_local_save_flags();
- arch_local_irq_disable();
-Index: linux-2.6/kernel/signal.c
+
+Index: linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
===================================================================
---- linux-2.6.orig/kernel/signal.c
-+++ linux-2.6/kernel/signal.c
-@@ -300,13 +300,45 @@ static bool task_participate_group_stop(
- return false;
- }
+--- linux-2.6.orig/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -151,28 +151,17 @@ union _cpuid4_leaf_ecx {
+ u32 full;
+ };
-+#ifdef __HAVE_ARCH_CMPXCHG
-+static inline struct sigqueue *get_task_cache(struct task_struct *t)
-+{
-+ struct sigqueue *q = t->sigqueue_cache;
-+
-+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
-+ return NULL;
-+ return q;
-+}
-+
-+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-+{
-+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
-+ return 0;
-+ return 1;
-+}
-+
-+#else
-+
-+static inline struct sigqueue *get_task_cache(struct task_struct *t)
-+{
-+ return NULL;
-+}
-+
-+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-+{
-+ return 1;
-+}
-+
-+#endif
-+
+-struct amd_l3_cache {
+- struct amd_northbridge *nb;
+- unsigned indices;
+- u8 subcaches[4];
+-};
+-
+-struct _cpuid4_info {
++struct _cpuid4_info_regs {
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned long size;
+- struct amd_l3_cache *l3;
+- DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
++ struct amd_northbridge *nb;
+ };
+
+-/* subset of above _cpuid4_info w/o shared_cpu_map */
+-struct _cpuid4_info_regs {
+- union _cpuid4_leaf_eax eax;
+- union _cpuid4_leaf_ebx ebx;
+- union _cpuid4_leaf_ecx ecx;
+- unsigned long size;
+- struct amd_l3_cache *l3;
++struct _cpuid4_info {
++ struct _cpuid4_info_regs base;
++ DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+ };
+
+ unsigned short num_cache_leaves;
+@@ -314,12 +303,13 @@ struct _cache_attr {
/*
- * allocate a new signal queue record
- * - this may be called without locks if and only if t == current, otherwise an
- * appropriate lock must be held to stop the target task from exiting
+ * L3 cache descriptors
*/
- static struct sigqueue *
--__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
-+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
-+ int override_rlimit, int fromslab)
+-static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
++static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
{
- struct sigqueue *q = NULL;
- struct user_struct *user;
-@@ -323,7 +355,10 @@ __sigqueue_alloc(int sig, struct task_st
- if (override_rlimit ||
- atomic_read(&user->sigpending) <=
- task_rlimit(t, RLIMIT_SIGPENDING)) {
-- q = kmem_cache_alloc(sigqueue_cachep, flags);
-+ if (!fromslab)
-+ q = get_task_cache(t);
-+ if (!q)
-+ q = kmem_cache_alloc(sigqueue_cachep, flags);
- } else {
- print_dropped_signal(sig);
- }
-@@ -340,6 +375,13 @@ __sigqueue_alloc(int sig, struct task_st
- return q;
- }
++ struct amd_l3_cache *l3 = &nb->l3_cache;
+ unsigned int sc0, sc1, sc2, sc3;
+ u32 val = 0;
-+static struct sigqueue *
-+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
-+ int override_rlimit)
-+{
-+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
-+}
-+
- static void __sigqueue_free(struct sigqueue *q)
- {
- if (q->flags & SIGQUEUE_PREALLOC)
-@@ -349,6 +391,21 @@ static void __sigqueue_free(struct sigqu
- kmem_cache_free(sigqueue_cachep, q);
- }
+- pci_read_config_dword(l3->nb->misc, 0x1C4, &val);
++ pci_read_config_dword(nb->misc, 0x1C4, &val);
-+static void sigqueue_free_current(struct sigqueue *q)
-+{
-+ struct user_struct *up;
-+
-+ if (q->flags & SIGQUEUE_PREALLOC)
-+ return;
-+
-+ up = q->user;
-+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
-+ atomic_dec(&up->sigpending);
-+ free_uid(up);
-+ } else
-+ __sigqueue_free(q);
-+}
-+
- void flush_sigqueue(struct sigpending *queue)
+ /* calculate subcache sizes */
+ l3->subcaches[0] = sc0 = !(val & BIT(0));
+@@ -333,33 +323,16 @@ static void __cpuinit amd_calc_l3_indice
+ static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
+ int index)
{
- struct sigqueue *q;
-@@ -362,6 +419,21 @@ void flush_sigqueue(struct sigpending *q
+- static struct amd_l3_cache *__cpuinitdata l3_caches;
+ int node;
+
+ /* only for L3, and not in virtualized environments */
+- if (index < 3 || amd_nb_num() == 0)
++ if (index < 3)
+ return;
+
+- /*
+- * Strictly speaking, the amount in @size below is leaked since it is
+- * never freed but this is done only on shutdown so it doesn't matter.
+- */
+- if (!l3_caches) {
+- int size = amd_nb_num() * sizeof(struct amd_l3_cache);
+-
+- l3_caches = kzalloc(size, GFP_ATOMIC);
+- if (!l3_caches)
+- return;
+- }
+-
+ node = amd_get_nb_id(smp_processor_id());
+-
+- if (!l3_caches[node].nb) {
+- l3_caches[node].nb = node_to_amd_nb(node);
+- amd_calc_l3_indices(&l3_caches[node]);
+- }
+-
+- this_leaf->l3 = &l3_caches[node];
++ this_leaf->nb = node_to_amd_nb(node);
++ if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
++ amd_calc_l3_indices(this_leaf->nb);
}
/*
-+ * Called from __exit_signal. Flush tsk->pending and
-+ * tsk->sigqueue_cache
-+ */
-+void flush_task_sigqueue(struct task_struct *tsk)
-+{
-+ struct sigqueue *q;
-+
-+ flush_sigqueue(&tsk->pending);
-+
-+ q = get_task_cache(tsk);
-+ if (q)
-+ kmem_cache_free(sigqueue_cachep, q);
-+}
-+
-+/*
- * Flush all pending signals for a task.
+@@ -369,11 +342,11 @@ static void __cpuinit amd_init_l3_cache(
+ *
+ * @returns: the disabled index if used or negative value if slot free.
*/
- void __flush_signals(struct task_struct *t)
-@@ -509,7 +581,7 @@ static void collect_signal(int sig, stru
- still_pending:
- list_del_init(&first->list);
- copy_siginfo(info, &first->info);
-- __sigqueue_free(first);
-+ sigqueue_free_current(first);
- } else {
- /*
- * Ok, it wasn't in the queue. This must be
-@@ -555,6 +627,8 @@ int dequeue_signal(struct task_struct *t
+-int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
++int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
{
- int signr;
+ unsigned int reg = 0;
-+ WARN_ON_ONCE(tsk != current);
-+
- /* We only dequeue private signals from ourselves, we don't let
- * signalfd steal them
- */
-@@ -637,6 +711,9 @@ void signal_wake_up(struct task_struct *
+- pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, ®);
++ pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
- set_tsk_thread_flag(t, TIF_SIGPENDING);
+ /* check whether this slot is activated already */
+ if (reg & (3UL << 30))
+@@ -387,11 +360,10 @@ static ssize_t show_cache_disable(struct
+ {
+ int index;
-+ if (unlikely(t == current))
-+ return;
-+
- /*
- * For SIGKILL, we want to wake it up in the stopped/traced/killable
- * case. We don't check t->state here because there is a race with it
-@@ -1179,12 +1256,12 @@ struct sighand_struct *__lock_task_sigha
- struct sighand_struct *sighand;
+- if (!this_leaf->l3 ||
+- !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
++ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ return -EINVAL;
- for (;;) {
-- local_irq_save(*flags);
-+ local_irq_save_nort(*flags);
- rcu_read_lock();
- sighand = rcu_dereference(tsk->sighand);
- if (unlikely(sighand == NULL)) {
- rcu_read_unlock();
-- local_irq_restore(*flags);
-+ local_irq_restore_nort(*flags);
- break;
- }
+- index = amd_get_l3_disable_slot(this_leaf->l3, slot);
++ index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
+ if (index >= 0)
+ return sprintf(buf, "%d\n", index);
-@@ -1195,7 +1272,7 @@ struct sighand_struct *__lock_task_sigha
- }
- spin_unlock(&sighand->siglock);
- rcu_read_unlock();
-- local_irq_restore(*flags);
-+ local_irq_restore_nort(*flags);
- }
+@@ -408,7 +380,7 @@ show_cache_disable_##slot(struct _cpuid4
+ SHOW_CACHE_DISABLE(0)
+ SHOW_CACHE_DISABLE(1)
- return sighand;
-@@ -1434,7 +1511,8 @@ EXPORT_SYMBOL(kill_pid);
- */
- struct sigqueue *sigqueue_alloc(void)
+-static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
++static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
+ unsigned slot, unsigned long idx)
{
-- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
-+ /* Preallocated sigqueue objects always from the slabcache ! */
-+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
-
- if (q)
- q->flags |= SIGQUEUE_PREALLOC;
-@@ -1782,15 +1860,7 @@ static void ptrace_stop(int exit_code, i
- if (gstop_done && !real_parent_is_ptracer(current))
- do_notify_parent_cldstop(current, false, why);
+ int i;
+@@ -421,10 +393,10 @@ static void amd_l3_disable_index(struct
+ for (i = 0; i < 4; i++) {
+ u32 reg = idx | (i << 20);
-- /*
-- * Don't want to allow preemption here, because
-- * sys_ptrace() needs this task to be inactive.
-- *
-- * XXX: implement read_unlock_no_resched().
-- */
-- preempt_disable();
- read_unlock(&tasklist_lock);
-- preempt_enable_no_resched();
- schedule();
- } else {
- /*
-Index: linux-2.6/arch/arm/kernel/perf_event.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/perf_event.c
-+++ linux-2.6/arch/arm/kernel/perf_event.c
-@@ -420,7 +420,7 @@ armpmu_reserve_hardware(void)
+- if (!l3->subcaches[i])
++ if (!nb->l3_cache.subcaches[i])
continue;
- err = request_irq(irq, handle_irq,
-- IRQF_DISABLED | IRQF_NOBALANCING,
-+ IRQF_NOBALANCING | IRQF_NO_THREAD,
- "armpmu", NULL);
- if (err) {
- pr_warning("unable to request IRQ%d for ARM perf "
-Index: linux-2.6/arch/arm/Kconfig
-===================================================================
---- linux-2.6.orig/arch/arm/Kconfig
-+++ linux-2.6/arch/arm/Kconfig
-@@ -29,6 +29,7 @@ config ARM
- select HAVE_GENERIC_HARDIRQS
- select HAVE_SPARSE_IRQ
- select GENERIC_IRQ_SHOW
-+ select IRQ_FORCED_THREADING
- help
- The ARM series is a line of low-power-consumption RISC chip designs
- licensed by ARM Ltd and targeted at embedded applications and
-@@ -1524,7 +1525,7 @@ config HAVE_ARCH_PFN_VALID
+- pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
++ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
- config HIGHMEM
- bool "High Memory Support"
-- depends on MMU
-+ depends on MMU && !PREEMPT_RT_FULL
- help
- The address space of ARM processors is only 4 Gigabytes large
- and it has to accommodate user address space, kernel address
-Index: linux-2.6/arch/arm/plat-versatile/platsmp.c
-===================================================================
---- linux-2.6.orig/arch/arm/plat-versatile/platsmp.c
-+++ linux-2.6/arch/arm/plat-versatile/platsmp.c
-@@ -37,7 +37,7 @@ static void __cpuinit write_pen_release(
- outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
- }
+ /*
+ * We need to WBINVD on a core on the node containing the L3
+@@ -434,7 +406,7 @@ static void amd_l3_disable_index(struct
+ wbinvd_on_cpu(cpu);
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
+ reg |= BIT(31);
+- pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
++ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
+ }
+ }
- void __cpuinit platform_secondary_init(unsigned int cpu)
+@@ -448,24 +420,24 @@ static void amd_l3_disable_index(struct
+ *
+ * @return: 0 on success, error status on failure
+ */
+-int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
++int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
+ unsigned long index)
{
-@@ -57,8 +57,8 @@ void __cpuinit platform_secondary_init(u
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
+ int ret = 0;
- int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -69,7 +69,7 @@ int __cpuinit boot_secondary(unsigned in
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
+ /* check if @slot is already used or the index is already disabled */
+- ret = amd_get_l3_disable_slot(l3, slot);
++ ret = amd_get_l3_disable_slot(nb, slot);
+ if (ret >= 0)
+ return -EINVAL;
- /*
- * This is really belt and braces; we hold unintended secondary
-@@ -99,7 +99,7 @@ int __cpuinit boot_secondary(unsigned in
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
+- if (index > l3->indices)
++ if (index > nb->l3_cache.indices)
+ return -EINVAL;
- return pen_release != -1 ? -ENOSYS : 0;
- }
-Index: linux-2.6/arch/arm/mach-exynos4/platsmp.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-exynos4/platsmp.c
-+++ linux-2.6/arch/arm/mach-exynos4/platsmp.c
-@@ -56,7 +56,7 @@ static void __iomem *scu_base_addr(void)
- return (void __iomem *)(S5P_VA_SCU);
- }
+ /* check whether the other slot has disabled the same index already */
+- if (index == amd_get_l3_disable_slot(l3, !slot))
++ if (index == amd_get_l3_disable_slot(nb, !slot))
+ return -EINVAL;
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
+- amd_l3_disable_index(l3, cpu, slot, index);
++ amd_l3_disable_index(nb, cpu, slot, index);
- void __cpuinit platform_secondary_init(unsigned int cpu)
- {
-@@ -76,8 +76,8 @@ void __cpuinit platform_secondary_init(u
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
+ return 0;
}
+@@ -480,8 +452,7 @@ static ssize_t store_cache_disable(struc
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
- int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -88,7 +88,7 @@ int __cpuinit boot_secondary(unsigned in
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
+- if (!this_leaf->l3 ||
+- !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
++ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ return -EINVAL;
- /*
- * The secondary processor is waiting to be released from
-@@ -120,7 +120,7 @@ int __cpuinit boot_secondary(unsigned in
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
+ cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+@@ -489,7 +460,7 @@ static ssize_t store_cache_disable(struc
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
- return pen_release != -1 ? -ENOSYS : 0;
- }
-Index: linux-2.6/arch/arm/mach-msm/platsmp.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-msm/platsmp.c
-+++ linux-2.6/arch/arm/mach-msm/platsmp.c
-@@ -38,7 +38,7 @@ extern void msm_secondary_startup(void);
- */
- volatile int pen_release = -1;
+- err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
++ err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
+ if (err) {
+ if (err == -EEXIST)
+ printk(KERN_WARNING "L3 disable slot %d in use!\n",
+@@ -518,7 +489,7 @@ static struct _cache_attr cache_disable_
+ static ssize_t
+ show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
+ {
+- if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
++ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return -EINVAL;
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
+ return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
+@@ -533,7 +504,7 @@ store_subcaches(struct _cpuid4_info *thi
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
- void __cpuinit platform_secondary_init(unsigned int cpu)
- {
-@@ -62,8 +62,8 @@ void __cpuinit platform_secondary_init(u
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
+- if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
++ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return -EINVAL;
- static __cpuinit void prepare_cold_cpu(unsigned int cpu)
-@@ -100,7 +100,7 @@ int __cpuinit boot_secondary(unsigned in
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
+ if (strict_strtoul(buf, 16, &val) < 0)
+@@ -769,7 +740,7 @@ static void __cpuinit cache_shared_cpu_m
+ return;
+ }
+ this_leaf = CPUID4_INFO_IDX(cpu, index);
+- num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
++ num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
- /*
- * The secondary processor is waiting to be released from
-@@ -134,7 +134,7 @@ int __cpuinit boot_secondary(unsigned in
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
+ if (num_threads_sharing == 1)
+ cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
+@@ -820,29 +791,19 @@ static void __cpuinit free_cache_attribu
+ for (i = 0; i < num_cache_leaves; i++)
+ cache_remove_shared_cpu_map(cpu, i);
- return pen_release != -1 ? -ENOSYS : 0;
+- kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
+ kfree(per_cpu(ici_cpuid4_info, cpu));
+ per_cpu(ici_cpuid4_info, cpu) = NULL;
}
-Index: linux-2.6/arch/arm/mach-omap2/omap-smp.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-omap2/omap-smp.c
-+++ linux-2.6/arch/arm/mach-omap2/omap-smp.c
-@@ -29,7 +29,7 @@
- /* SCU base address */
- static void __iomem *scu_base;
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- void __cpuinit platform_secondary_init(unsigned int cpu)
- {
-@@ -43,8 +43,8 @@ void __cpuinit platform_secondary_init(u
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -53,7 +53,7 @@ int __cpuinit boot_secondary(unsigned in
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -70,7 +70,7 @@ int __cpuinit boot_secondary(unsigned in
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return 0;
- }
-Index: linux-2.6/arch/arm/mach-tegra/platsmp.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-tegra/platsmp.c
-+++ linux-2.6/arch/arm/mach-tegra/platsmp.c
-@@ -29,7 +29,7 @@
-
- extern void tegra_secondary_startup(void);
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
-
- #define EVP_CPU_RESET_VECTOR \
-@@ -51,8 +51,8 @@ void __cpuinit platform_secondary_init(u
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -66,7 +66,7 @@ int __cpuinit boot_secondary(unsigned in
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
+-static int
+-__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+-{
+- struct _cpuid4_info_regs *leaf_regs =
+- (struct _cpuid4_info_regs *)this_leaf;
+-
+- return cpuid4_cache_lookup_regs(index, leaf_regs);
+-}
+-
+ static void __cpuinit get_cpu_leaves(void *_retval)
+ {
+ int j, *retval = _retval, cpu = smp_processor_id();
- /* set the reset vector to point to the secondary_startup routine */
-@@ -102,7 +102,7 @@ int __cpuinit boot_secondary(unsigned in
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
+ /* Do cpuid and store the results */
+ for (j = 0; j < num_cache_leaves; j++) {
+- struct _cpuid4_info *this_leaf;
+- this_leaf = CPUID4_INFO_IDX(cpu, j);
+- *retval = cpuid4_cache_lookup(j, this_leaf);
++ struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
++
++ *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
+ if (unlikely(*retval < 0)) {
+ int i;
- return 0;
- }
-Index: linux-2.6/arch/arm/mach-ux500/platsmp.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-ux500/platsmp.c
-+++ linux-2.6/arch/arm/mach-ux500/platsmp.c
-@@ -57,7 +57,7 @@ static void __iomem *scu_base_addr(void)
- return NULL;
+@@ -900,16 +861,16 @@ static ssize_t show_##file_name(struct _
+ return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
}
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
+-show_one_plus(level, eax.split.level, 0);
+-show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
+-show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
+-show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
+-show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
++show_one_plus(level, base.eax.split.level, 0);
++show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
++show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
++show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
++show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
- void __cpuinit platform_secondary_init(unsigned int cpu)
+ static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
+ unsigned int cpu)
{
-@@ -77,8 +77,8 @@ void __cpuinit platform_secondary_init(u
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
+- return sprintf(buf, "%luK\n", this_leaf->size / 1024);
++ return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
}
- int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -89,7 +89,7 @@ int __cpuinit boot_secondary(unsigned in
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -110,7 +110,7 @@ int __cpuinit boot_secondary(unsigned in
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
+ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
+@@ -946,7 +907,7 @@ static inline ssize_t show_shared_cpu_li
+ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
+ unsigned int cpu)
+ {
+- switch (this_leaf->eax.split.type) {
++ switch (this_leaf->base.eax.split.type) {
+ case CACHE_TYPE_DATA:
+ return sprintf(buf, "Data\n");
+ case CACHE_TYPE_INST:
+@@ -1135,7 +1096,7 @@ static int __cpuinit cache_add_dev(struc
- return pen_release != -1 ? -ENOSYS : 0;
- }
-Index: linux-2.6/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+ ktype_cache.default_attrs = default_attrs;
+ #ifdef CONFIG_AMD_NB
+- if (this_leaf->l3)
++ if (this_leaf->base.nb)
+ ktype_cache.default_attrs = amd_l3_attrs();
+ #endif
+ retval = kobject_init_and_add(&(this_object->kobj),
+Index: linux-2.6/arch/x86/include/asm/amd_nb.h
===================================================================
---- linux-2.6.orig/arch/powerpc/platforms/85xx/mpc85xx_cds.c
-+++ linux-2.6/arch/powerpc/platforms/85xx/mpc85xx_cds.c
-@@ -178,7 +178,7 @@ static irqreturn_t mpc85xx_8259_cascade_
+--- linux-2.6.orig/arch/x86/include/asm/amd_nb.h
++++ linux-2.6/arch/x86/include/asm/amd_nb.h
+@@ -19,9 +19,15 @@ extern int amd_numa_init(void);
+ extern int amd_get_subcaches(int);
+ extern int amd_set_subcaches(int, int);
- static struct irqaction mpc85xxcds_8259_irqaction = {
- .handler = mpc85xx_8259_cascade_action,
-- .flags = IRQF_SHARED,
-+ .flags = IRQF_SHARED | IRQF_NO_THREAD,
- .name = "8259 cascade",
++struct amd_l3_cache {
++ unsigned indices;
++ u8 subcaches[4];
++};
++
+ struct amd_northbridge {
+ struct pci_dev *misc;
+ struct pci_dev *link;
++ struct amd_l3_cache l3_cache;
};
- #endif /* PPC_I8259 */
-Index: linux-2.6/arch/powerpc/Kconfig
+
+ struct amd_northbridge_info {
+Index: linux-2.6/kernel/sched_fair.c
===================================================================
---- linux-2.6.orig/arch/powerpc/Kconfig
-+++ linux-2.6/arch/powerpc/Kconfig
-@@ -69,10 +69,11 @@ config LOCKDEP_SUPPORT
+--- linux-2.6.orig/kernel/sched_fair.c
++++ linux-2.6/kernel/sched_fair.c
+@@ -1565,7 +1565,7 @@ find_idlest_group(struct sched_domain *s
- config RWSEM_GENERIC_SPINLOCK
- bool
-+ default y if PREEMPT_RT_FULL
+ /* Skip over this group if it has no CPUs allowed */
+ if (!cpumask_intersects(sched_group_cpus(group),
+- &p->cpus_allowed))
++ tsk_cpus_allowed(p)))
+ continue;
- config RWSEM_XCHGADD_ALGORITHM
- bool
-- default y
-+ default y if !PREEMPT_RT_FULL
+ local_group = cpumask_test_cpu(this_cpu,
+@@ -1611,7 +1611,7 @@ find_idlest_cpu(struct sched_group *grou
+ int i;
- config GENERIC_LOCKBREAK
- bool
-@@ -134,6 +135,7 @@ config PPC
- select GENERIC_IRQ_SHOW_LEVEL
- select HAVE_RCU_TABLE_FREE if SMP
- select HAVE_SYSCALL_TRACEPOINTS
-+ select IRQ_FORCED_THREADING
+ /* Traverse only the allowed CPUs */
+- for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
++ for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
+ load = weighted_cpuload(i);
- config EARLY_PRINTK
- bool
-@@ -271,7 +273,7 @@ menu "Kernel options"
+ if (load < min_load || (load == min_load && i == this_cpu)) {
+@@ -1655,7 +1655,7 @@ static int select_idle_sibling(struct ta
+ if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
+ break;
- config HIGHMEM
- bool "High memory support"
-- depends on PPC32
-+ depends on PPC32 && !PREEMPT_RT_FULL
+- for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
++ for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
+ if (idle_cpu(i)) {
+ target = i;
+ break;
+@@ -1698,7 +1698,7 @@ select_task_rq_fair(struct task_struct *
+ int sync = wake_flags & WF_SYNC;
- source kernel/time/Kconfig
- source kernel/Kconfig.hz
-Index: linux-2.6/arch/arm/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/process.c
-+++ linux-2.6/arch/arm/kernel/process.c
-@@ -209,9 +209,7 @@ void cpu_idle(void)
- }
- leds_event(led_idle_end);
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
+ if (sd_flag & SD_BALANCE_WAKE) {
+- if (cpumask_test_cpu(cpu, &p->cpus_allowed))
++ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
+ want_affine = 1;
+ new_cpu = prev_cpu;
}
- }
-
-@@ -486,6 +484,31 @@ unsigned long arch_randomize_brk(struct
- }
-
- #ifdef CONFIG_MMU
-+
-+/*
-+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
-+ * initialized by pgtable_page_ctor() then a coredump of the vector page will
-+ * fail.
-+ */
-+static int __init vectors_user_mapping_init_page(void)
-+{
-+ struct page *page;
-+ unsigned long addr = 0xffff0000;
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ page = pmd_page(*(pmd));
+@@ -2067,7 +2067,7 @@ int can_migrate_task(struct task_struct
+ * 2) cannot be migrated to this CPU due to cpus_allowed, or
+ * 3) are cache-hot on their current CPU.
+ */
+- if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
++ if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) {
+ schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
+ return 0;
+ }
+@@ -2173,6 +2173,10 @@ balance_tasks(struct rq *this_rq, int th
+ */
+ if (idle == CPU_NEWLY_IDLE)
+ break;
+
-+ pgtable_page_ctor(page);
++ if (raw_spin_is_contended(&this_rq->lock) ||
++ raw_spin_is_contended(&busiest->lock))
++ break;
+ #endif
+
+ /*
+@@ -2277,6 +2281,20 @@ load_balance_fair(struct rq *this_rq, in
+ rem_load_move -= moved_load;
+ if (rem_load_move < 0)
+ break;
+
-+ return 0;
-+}
-+late_initcall(vectors_user_mapping_init_page);
++#ifdef CONFIG_PREEMPT
++ /*
++ * NEWIDLE balancing is a source of latency, so preemptible
++ * kernels will stop after the first task is pulled to minimize
++ * the critical section.
++ */
++ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
++ break;
+
- /*
- * The vectors page is always readable from user space for the
- * atomic helpers and the signal restart code. Let's declare a mapping
-Index: linux-2.6/arch/avr32/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/avr32/kernel/process.c
-+++ linux-2.6/arch/avr32/kernel/process.c
-@@ -38,9 +38,7 @@ void cpu_idle(void)
- while (!need_resched())
- cpu_idle_sleep();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
++ if (raw_spin_is_contended(&this_rq->lock) ||
++ raw_spin_is_contended(&busiest->lock))
++ break;
++#endif
}
- }
+ rcu_read_unlock();
-Index: linux-2.6/arch/blackfin/kernel/process.c
+@@ -3418,7 +3436,7 @@ redo:
+ * moved to this_cpu
+ */
+ if (!cpumask_test_cpu(this_cpu,
+- &busiest->curr->cpus_allowed)) {
++ tsk_cpus_allowed(busiest->curr))) {
+ raw_spin_unlock_irqrestore(&busiest->lock,
+ flags);
+ all_pinned = 1;
+Index: linux-2.6/lib/smp_processor_id.c
===================================================================
---- linux-2.6.orig/arch/blackfin/kernel/process.c
-+++ linux-2.6/arch/blackfin/kernel/process.c
-@@ -92,9 +92,7 @@ void cpu_idle(void)
- while (!need_resched())
- idle();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+--- linux-2.6.orig/lib/smp_processor_id.c
++++ linux-2.6/lib/smp_processor_id.c
+@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor
+ * Kernel threads bound to a single CPU can safely use
+ * smp_processor_id():
+ */
+- if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu)))
++ if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
+ goto out;
-Index: linux-2.6/arch/cris/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/cris/kernel/process.c
-+++ linux-2.6/arch/cris/kernel/process.c
-@@ -115,9 +115,7 @@ void cpu_idle (void)
- idle = default_idle;
- idle();
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ /*
+@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor
+ if (!printk_ratelimit())
+ goto out_enable;
-Index: linux-2.6/arch/frv/kernel/process.c
+- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
+- "code: %s/%d\n",
+- preempt_count() - 1, current->comm, current->pid);
++ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
++ "code: %s/%d\n", preempt_count() - 1,
++ __migrate_disabled(current), current->comm, current->pid);
+ print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ dump_stack();
+
+Index: linux-2.6/arch/x86/include/asm/irqflags.h
===================================================================
---- linux-2.6.orig/arch/frv/kernel/process.c
-+++ linux-2.6/arch/frv/kernel/process.c
-@@ -92,9 +92,7 @@ void cpu_idle(void)
- idle();
- }
+--- linux-2.6.orig/arch/x86/include/asm/irqflags.h
++++ linux-2.6/arch/x86/include/asm/irqflags.h
+@@ -60,23 +60,24 @@ static inline void native_halt(void)
+ #include <asm/paravirt.h>
+ #else
+ #ifndef __ASSEMBLY__
++#include <linux/types.h>
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+-static inline unsigned long arch_local_save_flags(void)
++static inline notrace unsigned long arch_local_save_flags(void)
+ {
+ return native_save_fl();
}
-Index: linux-2.6/arch/h8300/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/h8300/kernel/process.c
-+++ linux-2.6/arch/h8300/kernel/process.c
-@@ -81,9 +81,7 @@ void cpu_idle(void)
- while (1) {
- while (!need_resched())
- idle();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+-static inline void arch_local_irq_restore(unsigned long flags)
++static inline notrace void arch_local_irq_restore(unsigned long flags)
+ {
+ native_restore_fl(flags);
}
-Index: linux-2.6/arch/ia64/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/ia64/kernel/process.c
-+++ linux-2.6/arch/ia64/kernel/process.c
-@@ -330,9 +330,7 @@ cpu_idle (void)
- normal_xtp();
- #endif
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- if (cpu_is_offline(cpu))
- play_dead();
-Index: linux-2.6/arch/m32r/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/m32r/kernel/process.c
-+++ linux-2.6/arch/m32r/kernel/process.c
-@@ -90,9 +90,7 @@ void cpu_idle (void)
-
- idle();
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+-static inline void arch_local_irq_disable(void)
++static inline notrace void arch_local_irq_disable(void)
+ {
+ native_irq_disable();
}
-Index: linux-2.6/arch/m68k/kernel/process_mm.c
-===================================================================
---- linux-2.6.orig/arch/m68k/kernel/process_mm.c
-+++ linux-2.6/arch/m68k/kernel/process_mm.c
-@@ -94,9 +94,7 @@ void cpu_idle(void)
- while (1) {
- while (!need_resched())
- idle();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+-static inline void arch_local_irq_enable(void)
++static inline notrace void arch_local_irq_enable(void)
+ {
+ native_irq_enable();
}
-
-Index: linux-2.6/arch/m68k/kernel/process_no.c
+@@ -102,7 +103,7 @@ static inline void halt(void)
+ /*
+ * For spinlocks, etc:
+ */
+-static inline unsigned long arch_local_irq_save(void)
++static inline notrace unsigned long arch_local_irq_save(void)
+ {
+ unsigned long flags = arch_local_save_flags();
+ arch_local_irq_disable();
+Index: linux-2.6/kernel/signal.c
===================================================================
---- linux-2.6.orig/arch/m68k/kernel/process_no.c
-+++ linux-2.6/arch/m68k/kernel/process_no.c
-@@ -73,9 +73,7 @@ void cpu_idle(void)
- /* endless idle loop with no priority at all */
- while (1) {
- idle();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+--- linux-2.6.orig/kernel/signal.c
++++ linux-2.6/kernel/signal.c
+@@ -300,13 +300,45 @@ static bool task_participate_group_stop(
+ return false;
}
-Index: linux-2.6/arch/microblaze/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/microblaze/kernel/process.c
-+++ linux-2.6/arch/microblaze/kernel/process.c
-@@ -108,9 +108,7 @@ void cpu_idle(void)
- idle();
- tick_nohz_restart_sched_tick();
-
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- }
- }
-Index: linux-2.6/arch/mips/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/mips/kernel/process.c
-+++ linux-2.6/arch/mips/kernel/process.c
-@@ -78,9 +78,7 @@ void __noreturn cpu_idle(void)
- play_dead();
- #endif
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
++#ifdef __HAVE_ARCH_CMPXCHG
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++ struct sigqueue *q = t->sigqueue_cache;
++
++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
++ return NULL;
++ return q;
++}
++
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
++ return 0;
++ return 1;
++}
++
++#else
++
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++ return NULL;
++}
++
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++ return 1;
++}
++
++#endif
++
+ /*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ * appropriate lock must be held to stop the target task from exiting
+ */
+ static struct sigqueue *
+-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit, int fromslab)
+ {
+ struct sigqueue *q = NULL;
+ struct user_struct *user;
+@@ -323,7 +355,10 @@ __sigqueue_alloc(int sig, struct task_st
+ if (override_rlimit ||
+ atomic_read(&user->sigpending) <=
+ task_rlimit(t, RLIMIT_SIGPENDING)) {
+- q = kmem_cache_alloc(sigqueue_cachep, flags);
++ if (!fromslab)
++ q = get_task_cache(t);
++ if (!q)
++ q = kmem_cache_alloc(sigqueue_cachep, flags);
+ } else {
+ print_dropped_signal(sig);
}
+@@ -340,6 +375,13 @@ __sigqueue_alloc(int sig, struct task_st
+ return q;
}
-Index: linux-2.6/arch/mn10300/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/mn10300/kernel/process.c
-+++ linux-2.6/arch/mn10300/kernel/process.c
-@@ -123,9 +123,7 @@ void cpu_idle(void)
- idle();
- }
-
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
++static struct sigqueue *
++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit)
++{
++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
++}
++
+ static void __sigqueue_free(struct sigqueue *q)
+ {
+ if (q->flags & SIGQUEUE_PREALLOC)
+@@ -349,6 +391,21 @@ static void __sigqueue_free(struct sigqu
+ kmem_cache_free(sigqueue_cachep, q);
}
-Index: linux-2.6/arch/parisc/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/parisc/kernel/process.c
-+++ linux-2.6/arch/parisc/kernel/process.c
-@@ -71,9 +71,7 @@ void cpu_idle(void)
- while (1) {
- while (!need_resched())
- barrier();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- }
- }
-Index: linux-2.6/arch/powerpc/kernel/idle.c
-===================================================================
---- linux-2.6.orig/arch/powerpc/kernel/idle.c
-+++ linux-2.6/arch/powerpc/kernel/idle.c
-@@ -94,11 +94,11 @@ void cpu_idle(void)
- HMT_medium();
- ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- if (cpu_should_die())
-+ if (cpu_should_die()) {
-+ __preempt_enable_no_resched();
- cpu_die();
-- schedule();
-- preempt_disable();
-+ }
-+ schedule_preempt_disabled();
- }
++static void sigqueue_free_current(struct sigqueue *q)
++{
++ struct user_struct *up;
++
++ if (q->flags & SIGQUEUE_PREALLOC)
++ return;
++
++ up = q->user;
++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
++ atomic_dec(&up->sigpending);
++ free_uid(up);
++ } else
++ __sigqueue_free(q);
++}
++
+ void flush_sigqueue(struct sigpending *queue)
+ {
+ struct sigqueue *q;
+@@ -362,6 +419,21 @@ void flush_sigqueue(struct sigpending *q
}
-Index: linux-2.6/arch/powerpc/platforms/iseries/setup.c
-===================================================================
---- linux-2.6.orig/arch/powerpc/platforms/iseries/setup.c
-+++ linux-2.6/arch/powerpc/platforms/iseries/setup.c
-@@ -581,9 +581,7 @@ static void iseries_shared_idle(void)
- if (hvlpevent_is_pending())
- process_iSeries_events();
+ /*
++ * Called from __exit_signal. Flush tsk->pending and
++ * tsk->sigqueue_cache
++ */
++void flush_task_sigqueue(struct task_struct *tsk)
++{
++ struct sigqueue *q;
++
++ flush_sigqueue(&tsk->pending);
++
++ q = get_task_cache(tsk);
++ if (q)
++ kmem_cache_free(sigqueue_cachep, q);
++}
++
++/*
+ * Flush all pending signals for a task.
+ */
+ void __flush_signals(struct task_struct *t)
+@@ -509,7 +581,7 @@ static void collect_signal(int sig, stru
+ still_pending:
+ list_del_init(&first->list);
+ copy_siginfo(info, &first->info);
+- __sigqueue_free(first);
++ sigqueue_free_current(first);
+ } else {
+ /*
+ * Ok, it wasn't in the queue. This must be
+@@ -555,6 +627,8 @@ int dequeue_signal(struct task_struct *t
+ {
+ int signr;
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
++ WARN_ON_ONCE(tsk != current);
++
+ /* We only dequeue private signals from ourselves, we don't let
+ * signalfd steal them
+ */
+@@ -637,6 +711,9 @@ void signal_wake_up(struct task_struct *
-@@ -610,9 +608,7 @@ static void iseries_dedicated_idle(void)
+ set_tsk_thread_flag(t, TIF_SIGPENDING);
- ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
++ if (unlikely(t == current))
++ return;
++
+ /*
+ * For SIGKILL, we want to wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
+@@ -1179,12 +1256,12 @@ struct sighand_struct *__lock_task_sigha
+ struct sighand_struct *sighand;
-Index: linux-2.6/arch/s390/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/s390/kernel/process.c
-+++ linux-2.6/arch/s390/kernel/process.c
-@@ -94,9 +94,7 @@ void cpu_idle(void)
- while (!need_resched())
- default_idle();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
+ for (;;) {
+- local_irq_save(*flags);
++ local_irq_save_nort(*flags);
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
+ break;
+ }
+
+@@ -1195,7 +1272,7 @@ struct sighand_struct *__lock_task_sigha
+ }
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
}
- }
-Index: linux-2.6/arch/score/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/score/kernel/process.c
-+++ linux-2.6/arch/score/kernel/process.c
-@@ -53,9 +53,7 @@ void __noreturn cpu_idle(void)
- while (!need_resched())
- barrier();
+ return sighand;
+@@ -1434,7 +1511,8 @@ EXPORT_SYMBOL(kill_pid);
+ */
+ struct sigqueue *sigqueue_alloc(void)
+ {
+- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
++ /* Preallocated sigqueue objects always from the slabcache ! */
++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
-- preempt_enable_no_resched();
-- schedule();
+ if (q)
+ q->flags |= SIGQUEUE_PREALLOC;
+@@ -1782,15 +1860,7 @@ static void ptrace_stop(int exit_code, i
+ if (gstop_done && !real_parent_is_ptracer(current))
+ do_notify_parent_cldstop(current, false, why);
+
+- /*
+- * Don't want to allow preemption here, because
+- * sys_ptrace() needs this task to be inactive.
+- *
+- * XXX: implement read_unlock_no_resched().
+- */
- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ read_unlock(&tasklist_lock);
+- preempt_enable_no_resched();
+ schedule();
+ } else {
+ /*
+Index: linux-2.6/arch/arm/kernel/perf_event.c
+===================================================================
+--- linux-2.6.orig/arch/arm/kernel/perf_event.c
++++ linux-2.6/arch/arm/kernel/perf_event.c
+@@ -420,7 +420,7 @@ armpmu_reserve_hardware(void)
+ continue;
-Index: linux-2.6/arch/sh/kernel/idle.c
+ err = request_irq(irq, handle_irq,
+- IRQF_DISABLED | IRQF_NOBALANCING,
++ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ "armpmu", NULL);
+ if (err) {
+ pr_warning("unable to request IRQ%d for ARM perf "
+Index: linux-2.6/arch/arm/Kconfig
===================================================================
---- linux-2.6.orig/arch/sh/kernel/idle.c
-+++ linux-2.6/arch/sh/kernel/idle.c
-@@ -110,9 +110,7 @@ void cpu_idle(void)
- }
+--- linux-2.6.orig/arch/arm/Kconfig
++++ linux-2.6/arch/arm/Kconfig
+@@ -29,6 +29,7 @@ config ARM
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_SPARSE_IRQ
+ select GENERIC_IRQ_SHOW
++ select IRQ_FORCED_THREADING
+ help
+ The ARM series is a line of low-power-consumption RISC chip designs
+ licensed by ARM Ltd and targeted at embedded applications and
+@@ -1524,7 +1525,7 @@ config HAVE_ARCH_PFN_VALID
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on MMU
++ depends on MMU && !PREEMPT_RT_FULL
+ help
+ The address space of ARM processors is only 4 Gigabytes large
+ and it has to accommodate user address space, kernel address
+Index: linux-2.6/arch/arm/plat-versatile/platsmp.c
+===================================================================
+--- linux-2.6.orig/arch/arm/plat-versatile/platsmp.c
++++ linux-2.6/arch/arm/plat-versatile/platsmp.c
+@@ -37,7 +37,7 @@ static void __cpuinit write_pen_release(
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
}
-Index: linux-2.6/arch/sparc/kernel/process_32.c
-===================================================================
---- linux-2.6.orig/arch/sparc/kernel/process_32.c
-+++ linux-2.6/arch/sparc/kernel/process_32.c
-@@ -113,9 +113,7 @@ void cpu_idle(void)
- while (!need_resched())
- cpu_relax();
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- }
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -57,8 +57,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
}
-@@ -138,9 +136,7 @@ void cpu_idle(void)
- while (!need_resched())
- cpu_relax();
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- }
+
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -69,7 +69,7 @@ int __cpuinit boot_secondary(unsigned in
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+@@ -99,7 +99,7 @@ int __cpuinit boot_secondary(unsigned in
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
}
-Index: linux-2.6/arch/sparc/kernel/process_64.c
+Index: linux-2.6/arch/arm/mach-exynos4/platsmp.c
===================================================================
---- linux-2.6.orig/arch/sparc/kernel/process_64.c
-+++ linux-2.6/arch/sparc/kernel/process_64.c
-@@ -102,15 +102,13 @@ void cpu_idle(void)
+--- linux-2.6.orig/arch/arm/mach-exynos4/platsmp.c
++++ linux-2.6/arch/arm/mach-exynos4/platsmp.c
+@@ -56,7 +56,7 @@ static void __iomem *scu_base_addr(void)
+ return (void __iomem *)(S5P_VA_SCU);
+ }
- tick_nohz_restart_sched_tick();
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
-- preempt_enable_no_resched();
--
- #ifdef CONFIG_HOTPLUG_CPU
-- if (cpu_is_offline(cpu))
-+ if (cpu_is_offline(cpu)) {
-+ __preempt_enable_no_resched();
- cpu_play_dead();
-+ }
- #endif
--
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -76,8 +76,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
}
-Index: linux-2.6/arch/tile/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/tile/kernel/process.c
-+++ linux-2.6/arch/tile/kernel/process.c
-@@ -106,9 +106,7 @@ void cpu_idle(void)
- current_thread_info()->status |= TS_POLLING;
- }
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -88,7 +88,7 @@ int __cpuinit boot_secondary(unsigned in
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
-Index: linux-2.6/arch/x86/kernel/process_32.c
-===================================================================
---- linux-2.6.orig/arch/x86/kernel/process_32.c
-+++ linux-2.6/arch/x86/kernel/process_32.c
-@@ -38,6 +38,7 @@
- #include <linux/uaccess.h>
- #include <linux/io.h>
- #include <linux/kdebug.h>
-+#include <linux/highmem.h>
+ /*
+ * The secondary processor is waiting to be released from
+@@ -120,7 +120,7 @@ int __cpuinit boot_secondary(unsigned in
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
- #include <asm/pgtable.h>
- #include <asm/system.h>
-@@ -113,9 +114,7 @@ void cpu_idle(void)
- start_critical_timings();
- }
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+ return pen_release != -1 ? -ENOSYS : 0;
}
+Index: linux-2.6/arch/arm/mach-msm/platsmp.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-msm/platsmp.c
++++ linux-2.6/arch/arm/mach-msm/platsmp.c
+@@ -38,7 +38,7 @@ extern void msm_secondary_startup(void);
+ */
+ volatile int pen_release = -1;
-@@ -348,6 +347,41 @@ __switch_to(struct task_struct *prev_p,
- task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
- __switch_to_xtra(prev_p, next_p, tss);
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
-+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
-+ /*
-+ * Save @prev's kmap_atomic stack
-+ */
-+ prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx);
-+ if (unlikely(prev_p->kmap_idx)) {
-+ int i;
-+
-+ for (i = 0; i < prev_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+
-+ pte_t *ptep = kmap_pte - idx;
-+ prev_p->kmap_pte[i] = *ptep;
-+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
-+ }
-+
-+ __this_cpu_write(__kmap_atomic_idx, 0);
-+ }
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -62,8 +62,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static __cpuinit void prepare_cold_cpu(unsigned int cpu)
+@@ -100,7 +100,7 @@ int __cpuinit boot_secondary(unsigned in
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -134,7 +134,7 @@ int __cpuinit boot_secondary(unsigned in
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+Index: linux-2.6/arch/arm/mach-omap2/omap-smp.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-omap2/omap-smp.c
++++ linux-2.6/arch/arm/mach-omap2/omap-smp.c
+@@ -29,7 +29,7 @@
+ /* SCU base address */
+ static void __iomem *scu_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -43,8 +43,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -53,7 +53,7 @@ int __cpuinit boot_secondary(unsigned in
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -70,7 +70,7 @@ int __cpuinit boot_secondary(unsigned in
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return 0;
+ }
+Index: linux-2.6/arch/arm/mach-tegra/platsmp.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-tegra/platsmp.c
++++ linux-2.6/arch/arm/mach-tegra/platsmp.c
+@@ -29,7 +29,7 @@
+
+ extern void tegra_secondary_startup(void);
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
+
+ #define EVP_CPU_RESET_VECTOR \
+@@ -51,8 +51,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -66,7 +66,7 @@ int __cpuinit boot_secondary(unsigned in
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+
+ /* set the reset vector to point to the secondary_startup routine */
+@@ -102,7 +102,7 @@ int __cpuinit boot_secondary(unsigned in
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return 0;
+ }
+Index: linux-2.6/arch/arm/mach-ux500/platsmp.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-ux500/platsmp.c
++++ linux-2.6/arch/arm/mach-ux500/platsmp.c
+@@ -57,7 +57,7 @@ static void __iomem *scu_base_addr(void)
+ return NULL;
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -77,8 +77,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -89,7 +89,7 @@ int __cpuinit boot_secondary(unsigned in
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -110,7 +110,7 @@ int __cpuinit boot_secondary(unsigned in
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+Index: linux-2.6/arch/arm/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/arm/kernel/process.c
++++ linux-2.6/arch/arm/kernel/process.c
+@@ -209,9 +209,7 @@ void cpu_idle(void)
+ }
+ leds_event(led_idle_end);
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
+
+@@ -486,6 +484,31 @@ unsigned long arch_randomize_brk(struct
+ }
+
+ #ifdef CONFIG_MMU
+
-+ /*
-+ * Restore @next_p's kmap_atomic stack
-+ */
-+ if (unlikely(next_p->kmap_idx)) {
-+ int i;
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++ struct page *page;
++ unsigned long addr = 0xffff0000;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
+
-+ __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx);
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ page = pmd_page(*(pmd));
+
-+ for (i = 0; i < next_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
++ pgtable_page_ctor(page);
+
-+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
-+ }
-+ }
-+#endif
++ return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
+
- /* If we're going to preload the fpu context, make sure clts
- is run while we're batching the cpu state updates. */
- if (preload_fpu)
-Index: linux-2.6/arch/x86/kernel/process_64.c
+ /*
+ * The vectors page is always readable from user space for the
+ * atomic helpers and the signal restart code. Let's declare a mapping
+Index: linux-2.6/arch/avr32/kernel/process.c
===================================================================
---- linux-2.6.orig/arch/x86/kernel/process_64.c
-+++ linux-2.6/arch/x86/kernel/process_64.c
-@@ -146,9 +146,7 @@ void cpu_idle(void)
- }
-
+--- linux-2.6.orig/arch/avr32/kernel/process.c
++++ linux-2.6/arch/avr32/kernel/process.c
+@@ -38,9 +38,7 @@ void cpu_idle(void)
+ while (!need_resched())
+ cpu_idle_sleep();
tick_nohz_restart_sched_tick();
- preempt_enable_no_resched();
- schedule();
@@ -10598,14 +10672,14 @@
}
}
-Index: linux-2.6/arch/xtensa/kernel/process.c
+Index: linux-2.6/arch/blackfin/kernel/process.c
===================================================================
---- linux-2.6.orig/arch/xtensa/kernel/process.c
-+++ linux-2.6/arch/xtensa/kernel/process.c
-@@ -113,9 +113,7 @@ void cpu_idle(void)
- while (1) {
+--- linux-2.6.orig/arch/blackfin/kernel/process.c
++++ linux-2.6/arch/blackfin/kernel/process.c
+@@ -92,9 +92,7 @@ void cpu_idle(void)
while (!need_resched())
- platform_idle();
+ idle();
+ tick_nohz_restart_sched_tick();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
@@ -10613,3078 +10687,3023 @@
}
}
-Index: linux-2.6/init/main.c
+Index: linux-2.6/arch/cris/kernel/process.c
===================================================================
---- linux-2.6.orig/init/main.c
-+++ linux-2.6/init/main.c
-@@ -68,6 +68,7 @@
- #include <linux/shmem_fs.h>
- #include <linux/slab.h>
- #include <linux/perf_event.h>
-+#include <linux/posix-timers.h>
-
- #include <asm/io.h>
- #include <asm/bugs.h>
-@@ -367,9 +368,7 @@ static noinline void __init_refok rest_i
- * at least once to get things moving:
- */
- init_idle_bootup_task(current);
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
+--- linux-2.6.orig/arch/cris/kernel/process.c
++++ linux-2.6/arch/cris/kernel/process.c
+@@ -115,9 +115,7 @@ void cpu_idle (void)
+ idle = default_idle;
+ idle();
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- /* Call into cpu_idle with preempt disabled */
- cpu_idle();
-@@ -501,6 +500,7 @@ asmlinkage void __init start_kernel(void
- parse_args("Booting kernel", static_command_line, __start___param,
- __stop___param - __start___param,
- &unknown_bootoption);
-+ softirq_early_init();
- /*
- * These use large bootmem allocations and must precede
- * kmem_cache_init()
-Index: linux-2.6/kernel/mutex.c
+Index: linux-2.6/arch/frv/kernel/process.c
===================================================================
---- linux-2.6.orig/kernel/mutex.c
-+++ linux-2.6/kernel/mutex.c
-@@ -240,9 +240,7 @@ __mutex_lock_common(struct mutex *lock,
+--- linux-2.6.orig/arch/frv/kernel/process.c
++++ linux-2.6/arch/frv/kernel/process.c
+@@ -92,9 +92,7 @@ void cpu_idle(void)
+ idle();
+ }
- /* didn't get the lock, go to sleep: */
- spin_unlock_mutex(&lock->wait_lock, flags);
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
+ schedule_preempt_disabled();
- spin_lock_mutex(&lock->wait_lock, flags);
}
+ }
-Index: linux-2.6/kernel/softirq.c
+Index: linux-2.6/arch/h8300/kernel/process.c
===================================================================
---- linux-2.6.orig/kernel/softirq.c
-+++ linux-2.6/kernel/softirq.c
-@@ -24,6 +24,7 @@
- #include <linux/ftrace.h>
- #include <linux/smp.h>
- #include <linux/tick.h>
-+#include <linux/locallock.h>
+--- linux-2.6.orig/arch/h8300/kernel/process.c
++++ linux-2.6/arch/h8300/kernel/process.c
+@@ -81,9 +81,7 @@ void cpu_idle(void)
+ while (1) {
+ while (!need_resched())
+ idle();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- #define CREATE_TRACE_POINTS
- #include <trace/events/irq.h>
-@@ -61,6 +62,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
- "TASKLET", "SCHED", "HRTIMER", "RCU"
- };
+Index: linux-2.6/arch/ia64/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/ia64/kernel/process.c
++++ linux-2.6/arch/ia64/kernel/process.c
+@@ -330,9 +330,7 @@ cpu_idle (void)
+ normal_xtp();
+ #endif
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
+ if (cpu_is_offline(cpu))
+ play_dead();
+Index: linux-2.6/arch/m32r/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/m32r/kernel/process.c
++++ linux-2.6/arch/m32r/kernel/process.c
+@@ -90,9 +90,7 @@ void cpu_idle (void)
-+#ifdef CONFIG_NO_HZ
-+# ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * On preempt-rt a softirq might be blocked on a lock. There might be
-+ * no other runnable task on this CPU because the lock owner runs on
-+ * some other CPU. So we have to go into idle with the pending bit
-+ * set. Therefor we need to check this otherwise we warn about false
-+ * positives which confuses users and defeats the whole purpose of
-+ * this test.
-+ *
-+ * This code is called with interrupts disabled.
-+ */
-+void softirq_check_pending_idle(void)
-+{
-+ static int rate_limit;
-+ u32 warnpending = 0, pending = local_softirq_pending();
-+
-+ if (rate_limit >= 10)
-+ return;
-+
-+ if (pending) {
-+ struct task_struct *tsk;
-+
-+ tsk = __get_cpu_var(ksoftirqd);
-+ /*
-+ * The wakeup code in rtmutex.c wakes up the task
-+ * _before_ it sets pi_blocked_on to NULL under
-+ * tsk->pi_lock. So we need to check for both: state
-+ * and pi_blocked_on.
-+ */
-+ raw_spin_lock(&tsk->pi_lock);
-+
-+ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
-+ warnpending = 1;
-+
-+ raw_spin_unlock(&tsk->pi_lock);
-+ }
-+
-+ if (warnpending) {
-+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
-+ pending);
-+ rate_limit++;
-+ }
-+}
-+# else
-+/*
-+ * On !PREEMPT_RT we just printk rate limited:
-+ */
-+void softirq_check_pending_idle(void)
-+{
-+ static int rate_limit;
-+
-+ if (rate_limit < 10) {
-+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
-+ local_softirq_pending());
-+ rate_limit++;
-+ }
-+}
-+# endif
-+#endif
-+
- /*
- * we cannot loop indefinitely here to avoid userspace starvation,
- * but we also don't want to introduce a worst case 1/HZ latency
-@@ -76,6 +138,35 @@ static void wakeup_softirqd(void)
- wake_up_process(tsk);
+ idle();
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
-+static void handle_pending_softirqs(u32 pending, int cpu)
-+{
-+ struct softirq_action *h = softirq_vec;
-+ unsigned int prev_count = preempt_count();
-+
-+ local_irq_enable();
-+ for ( ; pending; h++, pending >>= 1) {
-+ unsigned int vec_nr = h - softirq_vec;
-+
-+ if (!(pending & 1))
-+ continue;
-+
-+ kstat_incr_softirqs_this_cpu(vec_nr);
-+ trace_softirq_entry(vec_nr);
-+ h->action(h);
-+ trace_softirq_exit(vec_nr);
-+ if (unlikely(prev_count != preempt_count())) {
-+ printk(KERN_ERR
-+ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n",
-+ vec_nr, softirq_to_name[vec_nr], h->action,
-+ prev_count, (unsigned int) preempt_count());
-+ preempt_count() = prev_count;
-+ }
-+ rcu_bh_qs(cpu);
-+ }
-+ local_irq_disable();
-+}
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * preempt_count and SOFTIRQ_OFFSET usage:
- * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -206,7 +297,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
-
- asmlinkage void __do_softirq(void)
- {
-- struct softirq_action *h;
- __u32 pending;
- int max_restart = MAX_SOFTIRQ_RESTART;
- int cpu;
-@@ -215,7 +305,7 @@ asmlinkage void __do_softirq(void)
- account_system_vtime(current);
+Index: linux-2.6/arch/m68k/kernel/process_mm.c
+===================================================================
+--- linux-2.6.orig/arch/m68k/kernel/process_mm.c
++++ linux-2.6/arch/m68k/kernel/process_mm.c
+@@ -94,9 +94,7 @@ void cpu_idle(void)
+ while (1) {
+ while (!need_resched())
+ idle();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- __local_bh_disable((unsigned long)__builtin_return_address(0),
-- SOFTIRQ_OFFSET);
-+ SOFTIRQ_OFFSET);
- lockdep_softirq_enter();
+Index: linux-2.6/arch/m68k/kernel/process_no.c
+===================================================================
+--- linux-2.6.orig/arch/m68k/kernel/process_no.c
++++ linux-2.6/arch/m68k/kernel/process_no.c
+@@ -73,9 +73,7 @@ void cpu_idle(void)
+ /* endless idle loop with no priority at all */
+ while (1) {
+ idle();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- cpu = smp_processor_id();
-@@ -223,36 +313,7 @@ restart:
- /* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
+Index: linux-2.6/arch/microblaze/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/microblaze/kernel/process.c
++++ linux-2.6/arch/microblaze/kernel/process.c
+@@ -108,9 +108,7 @@ void cpu_idle(void)
+ idle();
+ tick_nohz_restart_sched_tick();
-- local_irq_enable();
--
-- h = softirq_vec;
--
-- do {
-- if (pending & 1) {
-- unsigned int vec_nr = h - softirq_vec;
-- int prev_count = preempt_count();
--
-- kstat_incr_softirqs_this_cpu(vec_nr);
--
-- trace_softirq_entry(vec_nr);
-- h->action(h);
-- trace_softirq_exit(vec_nr);
-- if (unlikely(prev_count != preempt_count())) {
-- printk(KERN_ERR "huh, entered softirq %u %s %p"
-- "with preempt_count %08x,"
-- " exited with %08x?\n", vec_nr,
-- softirq_to_name[vec_nr], h->action,
-- prev_count, preempt_count());
-- preempt_count() = prev_count;
-- }
--
-- rcu_bh_qs(cpu);
-- }
-- h++;
-- pending >>= 1;
-- } while (pending);
--
-- local_irq_disable();
-+ handle_pending_softirqs(pending, cpu);
-
- pending = local_softirq_pending();
- if (pending && --max_restart)
-@@ -267,6 +328,26 @@ restart:
- __local_bh_enable(SOFTIRQ_OFFSET);
- }
-
-+/*
-+ * Called with preemption disabled from run_ksoftirqd()
-+ */
-+static int ksoftirqd_do_softirq(int cpu)
-+{
-+ /*
-+ * Preempt disable stops cpu going offline.
-+ * If already offline, we'll be on wrong CPU:
-+ * don't process.
-+ */
-+ if (cpu_is_offline(cpu))
-+ return -1;
-+
-+ local_irq_disable();
-+ if (local_softirq_pending())
-+ __do_softirq();
-+ local_irq_enable();
-+ return 0;
-+}
-+
- #ifndef __ARCH_HAS_DO_SOFTIRQ
-
- asmlinkage void do_softirq(void)
-@@ -289,6 +370,178 @@ asmlinkage void do_softirq(void)
-
- #endif
-
-+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
-+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
-+static inline void ksoftirqd_set_sched_params(void) { }
-+static inline void ksoftirqd_clr_sched_params(void) { }
-+
-+#else /* !PREEMPT_RT_FULL */
-+
-+/*
-+ * On RT we serialize softirq execution with a cpu local lock
-+ */
-+static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
-+static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
-+
-+static void __do_softirq(void);
-+
-+void __init softirq_early_init(void)
-+{
-+ local_irq_lock_init(local_softirq_lock);
-+}
-+
-+void local_bh_disable(void)
-+{
-+ migrate_disable();
-+ current->softirq_nestcnt++;
-+}
-+EXPORT_SYMBOL(local_bh_disable);
-+
-+void local_bh_enable(void)
-+{
-+ if (WARN_ON(current->softirq_nestcnt == 0))
-+ return;
-+
-+ if ((current->softirq_nestcnt == 1) &&
-+ local_softirq_pending() &&
-+ local_trylock(local_softirq_lock)) {
-+
-+ local_irq_disable();
-+ if (local_softirq_pending())
-+ __do_softirq();
-+ local_unlock(local_softirq_lock);
-+ local_irq_enable();
-+ WARN_ON(current->softirq_nestcnt != 1);
-+ }
-+ current->softirq_nestcnt--;
-+ migrate_enable();
-+}
-+EXPORT_SYMBOL(local_bh_enable);
-+
-+void local_bh_enable_ip(unsigned long ip)
-+{
-+ local_bh_enable();
-+}
-+EXPORT_SYMBOL(local_bh_enable_ip);
-+
-+/* For tracing */
-+int notrace __in_softirq(void)
-+{
-+ if (__get_cpu_var(local_softirq_lock).owner == current)
-+ return __get_cpu_var(local_softirq_lock).nestcnt;
-+ return 0;
-+}
-+
-+int in_serving_softirq(void)
-+{
-+ int res;
-+
-+ preempt_disable();
-+ res = __get_cpu_var(local_softirq_runner) == current;
-+ preempt_enable();
-+ return res;
-+}
-+
-+/*
-+ * Called with bh and local interrupts disabled. For full RT cpu must
-+ * be pinned.
-+ */
-+static void __do_softirq(void)
-+{
-+ u32 pending = local_softirq_pending();
-+ int cpu = smp_processor_id();
-+
-+ current->softirq_nestcnt++;
-+
-+ /* Reset the pending bitmask before enabling irqs */
-+ set_softirq_pending(0);
-+
-+ __get_cpu_var(local_softirq_runner) = current;
-+
-+ lockdep_softirq_enter();
-+
-+ handle_pending_softirqs(pending, cpu);
-+
-+ pending = local_softirq_pending();
-+ if (pending)
-+ wakeup_softirqd();
-+
-+ lockdep_softirq_exit();
-+ __get_cpu_var(local_softirq_runner) = NULL;
-+
-+ current->softirq_nestcnt--;
-+}
-+
-+static int __thread_do_softirq(int cpu)
-+{
-+ /*
-+ * Prevent the current cpu from going offline.
-+ * pin_current_cpu() can reenable preemption and block on the
-+ * hotplug mutex. When it returns, the current cpu is
-+ * pinned. It might be the wrong one, but the offline check
-+ * below catches that.
-+ */
-+ pin_current_cpu();
-+ /*
-+ * If called from ksoftirqd (cpu >= 0) we need to check
-+ * whether we are on the wrong cpu due to cpu offlining. If
-+ * called via thread_do_softirq() no action required.
-+ */
-+ if (cpu >= 0 && cpu_is_offline(cpu)) {
-+ unpin_current_cpu();
-+ return -1;
-+ }
-+ preempt_enable();
-+ local_lock(local_softirq_lock);
-+ local_irq_disable();
-+ /*
-+ * We cannot switch stacks on RT as we want to be able to
-+ * schedule!
-+ */
-+ if (local_softirq_pending())
-+ __do_softirq();
-+ local_unlock(local_softirq_lock);
-+ unpin_current_cpu();
-+ preempt_disable();
-+ local_irq_enable();
-+ return 0;
-+}
-+
-+/*
-+ * Called from netif_rx_ni(). Preemption enabled.
-+ */
-+void thread_do_softirq(void)
-+{
-+ if (!in_serving_softirq()) {
-+ preempt_disable();
-+ __thread_do_softirq(-1);
-+ preempt_enable();
-+ }
-+}
-+
-+static int ksoftirqd_do_softirq(int cpu)
-+{
-+ return __thread_do_softirq(cpu);
-+}
-+
-+static inline void local_bh_disable_nort(void) { }
-+static inline void _local_bh_enable_nort(void) { }
-+
-+static inline void ksoftirqd_set_sched_params(void)
-+{
-+ struct sched_param param = { .sched_priority = 1 };
-+
-+ sched_setscheduler(current, SCHED_FIFO, ¶m);
-+}
-+
-+static inline void ksoftirqd_clr_sched_params(void)
-+{
-+ struct sched_param param = { .sched_priority = 0 };
-+
-+ sched_setscheduler(current, SCHED_NORMAL, ¶m);
-+}
-+
-+#endif /* PREEMPT_RT_FULL */
- /*
- * Enter an interrupt context.
- */
-@@ -302,9 +555,9 @@ void irq_enter(void)
- * Prevent raise_softirq from needlessly waking up ksoftirqd
- * here, as softirq will be serviced on return from interrupt.
- */
-- local_bh_disable();
-+ local_bh_disable_nort();
- tick_check_idle(cpu);
-- _local_bh_enable();
-+ _local_bh_enable_nort();
- }
-
- __irq_enter();
-@@ -313,6 +566,7 @@ void irq_enter(void)
- #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
- static inline void invoke_softirq(void)
- {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- if (!force_irqthreads)
- __do_softirq();
- else {
-@@ -321,10 +575,14 @@ static inline void invoke_softirq(void)
- wakeup_softirqd();
- __local_bh_enable(SOFTIRQ_OFFSET);
- }
-+#else
-+ wakeup_softirqd();
-+#endif
- }
- #else
- static inline void invoke_softirq(void)
- {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- if (!force_irqthreads)
- do_softirq();
- else {
-@@ -333,6 +591,9 @@ static inline void invoke_softirq(void)
- wakeup_softirqd();
- __local_bh_enable(SOFTIRQ_OFFSET);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
}
-+#else
-+ wakeup_softirqd();
-+#endif
}
- #endif
-
-@@ -353,7 +614,7 @@ void irq_exit(void)
- if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
- tick_nohz_stop_sched_tick(0);
- #endif
-- preempt_enable_no_resched();
-+ __preempt_enable_no_resched();
- }
-
- /*
-@@ -739,29 +1000,21 @@ void __init softirq_init(void)
-
- static int run_ksoftirqd(void * __bind_cpu)
- {
-+ ksoftirqd_set_sched_params();
-+
- set_current_state(TASK_INTERRUPTIBLE);
-
- while (!kthread_should_stop()) {
- preempt_disable();
-- if (!local_softirq_pending()) {
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-- }
-+ if (!local_softirq_pending())
-+ schedule_preempt_disabled();
-
- __set_current_state(TASK_RUNNING);
-
- while (local_softirq_pending()) {
-- /* Preempt disable stops cpu going offline.
-- If already offline, we'll be on wrong CPU:
-- don't process */
-- if (cpu_is_offline((long)__bind_cpu))
-+ if (ksoftirqd_do_softirq((long) __bind_cpu))
- goto wait_to_die;
-- local_irq_disable();
-- if (local_softirq_pending())
-- __do_softirq();
-- local_irq_enable();
-- preempt_enable_no_resched();
-+ __preempt_enable_no_resched();
- cond_resched();
- preempt_disable();
- rcu_note_context_switch((long)__bind_cpu);
-@@ -774,6 +1027,7 @@ static int run_ksoftirqd(void * __bind_c
-
- wait_to_die:
- preempt_enable();
-+ ksoftirqd_clr_sched_params();
- /* Wait for kthread_stop */
- set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
-@@ -850,9 +1104,8 @@ static int __cpuinit cpu_callback(struct
- int hotcpu = (unsigned long)hcpu;
- struct task_struct *p;
-
-- switch (action) {
-+ switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
-- case CPU_UP_PREPARE_FROZEN:
- p = kthread_create_on_node(run_ksoftirqd,
- hcpu,
- cpu_to_node(hotcpu),
-@@ -865,19 +1118,16 @@ static int __cpuinit cpu_callback(struct
- per_cpu(ksoftirqd, hotcpu) = p;
- break;
- case CPU_ONLINE:
-- case CPU_ONLINE_FROZEN:
- wake_up_process(per_cpu(ksoftirqd, hotcpu));
- break;
- #ifdef CONFIG_HOTPLUG_CPU
- case CPU_UP_CANCELED:
-- case CPU_UP_CANCELED_FROZEN:
- if (!per_cpu(ksoftirqd, hotcpu))
- break;
- /* Unbind so it can run. Fall thru. */
- kthread_bind(per_cpu(ksoftirqd, hotcpu),
- cpumask_any(cpu_online_mask));
-- case CPU_DEAD:
-- case CPU_DEAD_FROZEN: {
-+ case CPU_POST_DEAD: {
- static const struct sched_param param = {
- .sched_priority = MAX_RT_PRIO-1
- };
-Index: linux-2.6/kernel/posix-timers.c
+Index: linux-2.6/arch/mips/kernel/process.c
===================================================================
---- linux-2.6.orig/kernel/posix-timers.c
-+++ linux-2.6/kernel/posix-timers.c
-@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_
- static struct pid *good_sigevent(sigevent_t * event)
- {
- struct task_struct *rtn = current->group_leader;
-+ int sig = event->sigev_signo;
-
- if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
- (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-@@ -447,7 +448,8 @@ static struct pid *good_sigevent(sigeven
- return NULL;
-
- if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
-- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
-+ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
-+ sig_kernel_coredump(sig)))
- return NULL;
-
- return task_pid(rtn);
-@@ -764,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
- return overrun;
- }
-
-+/*
-+ * Protected by RCU!
-+ */
-+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (kc->timer_set == common_timer_set)
-+ hrtimer_wait_for_timer(&timr->it.real.timer);
-+ else
-+ /* FIXME: Whacky hack for posix-cpu-timers */
-+ schedule_timeout(1);
-+#endif
-+}
-+
- /* Set a POSIX.1b interval timer. */
- /* timr->it_lock is taken. */
- static int
-@@ -841,6 +857,7 @@ retry:
- if (!timr)
- return -EINVAL;
-
-+ rcu_read_lock();
- kc = clockid_to_kclock(timr->it_clock);
- if (WARN_ON_ONCE(!kc || !kc->timer_set))
- error = -EINVAL;
-@@ -849,9 +866,12 @@ retry:
-
- unlock_timer(timr, flag);
- if (error == TIMER_RETRY) {
-+ timer_wait_for_callback(kc, timr);
- rtn = NULL; // We already got the old time...
-+ rcu_read_unlock();
- goto retry;
+--- linux-2.6.orig/arch/mips/kernel/process.c
++++ linux-2.6/arch/mips/kernel/process.c
+@@ -78,9 +78,7 @@ void __noreturn cpu_idle(void)
+ play_dead();
+ #endif
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
}
-+ rcu_read_unlock();
+ }
- if (old_setting && !error &&
- copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-@@ -889,10 +909,15 @@ retry_delete:
- if (!timer)
- return -EINVAL;
+Index: linux-2.6/arch/mn10300/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/mn10300/kernel/process.c
++++ linux-2.6/arch/mn10300/kernel/process.c
+@@ -123,9 +123,7 @@ void cpu_idle(void)
+ idle();
+ }
-+ rcu_read_lock();
- if (timer_delete_hook(timer) == TIMER_RETRY) {
- unlock_timer(timer, flags);
-+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
-+ timer);
-+ rcu_read_unlock();
- goto retry_delete;
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
}
-+ rcu_read_unlock();
-
- spin_lock(¤t->sighand->siglock);
- list_del(&timer->list);
-@@ -918,8 +943,18 @@ static void itimer_delete(struct k_itime
- retry_delete:
- spin_lock_irqsave(&timer->it_lock, flags);
+ }
-+ /* On RT we can race with a deletion */
-+ if (!timer->it_signal) {
-+ unlock_timer(timer, flags);
-+ return;
-+ }
-+
- if (timer_delete_hook(timer) == TIMER_RETRY) {
-+ rcu_read_lock();
- unlock_timer(timer, flags);
-+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
-+ timer);
-+ rcu_read_unlock();
- goto retry_delete;
- }
- list_del(&timer->list);
-Index: linux-2.6/include/linux/signal.h
+Index: linux-2.6/arch/parisc/kernel/process.c
===================================================================
---- linux-2.6.orig/include/linux/signal.h
-+++ linux-2.6/include/linux/signal.h
-@@ -229,6 +229,7 @@ static inline void init_sigpending(struc
+--- linux-2.6.orig/arch/parisc/kernel/process.c
++++ linux-2.6/arch/parisc/kernel/process.c
+@@ -71,9 +71,7 @@ void cpu_idle(void)
+ while (1) {
+ while (!need_resched())
+ barrier();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
+ }
}
-
- extern void flush_sigqueue(struct sigpending *queue);
-+extern void flush_task_sigqueue(struct task_struct *tsk);
-
- /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
- static inline int valid_signal(unsigned long sig)
-Index: linux-2.6/kernel/exit.c
+Index: linux-2.6/arch/powerpc/kernel/idle.c
===================================================================
---- linux-2.6.orig/kernel/exit.c
-+++ linux-2.6/kernel/exit.c
-@@ -142,7 +142,7 @@ static void __exit_signal(struct task_st
- * Do this under ->siglock, we can race with another thread
- * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
- */
-- flush_sigqueue(&tsk->pending);
-+ flush_task_sigqueue(tsk);
- tsk->sighand = NULL;
- spin_unlock(&sighand->siglock);
+--- linux-2.6.orig/arch/powerpc/kernel/idle.c
++++ linux-2.6/arch/powerpc/kernel/idle.c
+@@ -94,11 +94,11 @@ void cpu_idle(void)
+ HMT_medium();
+ ppc64_runlatch_on();
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- if (cpu_should_die())
++ if (cpu_should_die()) {
++ __preempt_enable_no_resched();
+ cpu_die();
+- schedule();
+- preempt_disable();
++ }
++ schedule_preempt_disabled();
+ }
+ }
-Index: linux-2.6/kernel/fork.c
+Index: linux-2.6/arch/powerpc/platforms/iseries/setup.c
===================================================================
---- linux-2.6.orig/kernel/fork.c
-+++ linux-2.6/kernel/fork.c
-@@ -87,7 +87,7 @@ int max_threads; /* tunable limit on nr
-
- DEFINE_PER_CPU(unsigned long, process_counts) = 0;
-
--__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
-+DEFINE_RWLOCK(tasklist_lock); /* outer */
-
- #ifdef CONFIG_PROVE_RCU
- int lockdep_tasklist_lock_is_held(void)
-@@ -198,7 +198,18 @@ void __put_task_struct(struct task_struc
- if (!profile_handoff_task(tsk))
- free_task(tsk);
- }
-+#ifndef CONFIG_PREEMPT_RT_BASE
- EXPORT_SYMBOL_GPL(__put_task_struct);
-+#else
-+void __put_task_struct_cb(struct rcu_head *rhp)
-+{
-+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
-+
-+ __put_task_struct(tsk);
-+
-+}
-+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
-+#endif
+--- linux-2.6.orig/arch/powerpc/platforms/iseries/setup.c
++++ linux-2.6/arch/powerpc/platforms/iseries/setup.c
+@@ -581,9 +581,7 @@ static void iseries_shared_idle(void)
+ if (hvlpevent_is_pending())
+ process_iSeries_events();
- /*
- * macro override instead of weak attribute alias, to workaround
-@@ -546,6 +557,19 @@ void __mmdrop(struct mm_struct *mm)
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
- EXPORT_SYMBOL_GPL(__mmdrop);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+/*
-+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
-+ * want another facility to make this work.
-+ */
-+void __mmdrop_delayed(struct rcu_head *rhp)
-+{
-+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
-+
-+ __mmdrop(mm);
-+}
-+#endif
-+
- /*
- * Decrement the use count and release all resources for an mm.
- */
-@@ -1030,6 +1054,9 @@ void mm_init_owner(struct mm_struct *mm,
- */
- static void posix_cpu_timers_init(struct task_struct *tsk)
- {
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ tsk->posix_timer_list = NULL;
-+#endif
- tsk->cputime_expires.prof_exp = cputime_zero;
- tsk->cputime_expires.virt_exp = cputime_zero;
- tsk->cputime_expires.sched_exp = 0;
-@@ -1137,6 +1164,7 @@ static struct task_struct *copy_process(
- spin_lock_init(&p->alloc_lock);
+@@ -610,9 +608,7 @@ static void iseries_dedicated_idle(void)
- init_sigpending(&p->pending);
-+ p->sigqueue_cache = NULL;
+ ppc64_runlatch_on();
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- p->utime = cputime_zero;
- p->stime = cputime_zero;
-@@ -1194,6 +1222,9 @@ static struct task_struct *copy_process(
- p->hardirq_context = 0;
- p->softirq_context = 0;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ p->pagefault_disabled = 0;
-+#endif
- #ifdef CONFIG_LOCKDEP
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
-Index: linux-2.6/kernel/sched_rt.c
+Index: linux-2.6/arch/s390/kernel/process.c
===================================================================
---- linux-2.6.orig/kernel/sched_rt.c
-+++ linux-2.6/kernel/sched_rt.c
-@@ -631,6 +631,7 @@ static int sched_rt_runtime_exceeded(str
-
- if (rt_rq->rt_time > runtime) {
- rt_rq->rt_throttled = 1;
-+ printk_once(KERN_WARNING "sched: RT throttling activated\n");
- if (rt_rq_throttled(rt_rq)) {
- sched_rt_rq_dequeue(rt_rq);
- return 1;
-@@ -1038,7 +1039,7 @@ select_task_rq_rt(struct task_struct *p,
- */
- if (curr && unlikely(rt_task(curr)) &&
- (curr->rt.nr_cpus_allowed < 2 ||
-- curr->prio < p->prio) &&
-+ curr->prio <= p->prio) &&
- (p->rt.nr_cpus_allowed > 1)) {
- int target = find_lowest_rq(p);
-
-@@ -1186,7 +1187,7 @@ static void deactivate_task(struct rq *r
- static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
- {
- if (!task_running(rq, p) &&
-- (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
-+ (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
- (p->rt.nr_cpus_allowed > 1))
- return 1;
- return 0;
-@@ -1331,7 +1332,7 @@ static struct rq *find_lock_lowest_rq(st
- */
- if (unlikely(task_rq(task) != rq ||
- !cpumask_test_cpu(lowest_rq->cpu,
-- &task->cpus_allowed) ||
-+ tsk_cpus_allowed(task)) ||
- task_running(rq, task) ||
- !task->on_rq)) {
-
-@@ -1569,7 +1570,7 @@ static void task_woken_rt(struct rq *rq,
- p->rt.nr_cpus_allowed > 1 &&
- rt_task(rq->curr) &&
- (rq->curr->rt.nr_cpus_allowed < 2 ||
-- rq->curr->prio < p->prio))
-+ rq->curr->prio <= p->prio))
- push_rt_tasks(rq);
+--- linux-2.6.orig/arch/s390/kernel/process.c
++++ linux-2.6/arch/s390/kernel/process.c
+@@ -94,9 +94,7 @@ void cpu_idle(void)
+ while (!need_resched())
+ default_idle();
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
-@@ -1614,9 +1615,6 @@ static void set_cpus_allowed_rt(struct t
+Index: linux-2.6/arch/score/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/score/kernel/process.c
++++ linux-2.6/arch/score/kernel/process.c
+@@ -53,9 +53,7 @@ void __noreturn cpu_idle(void)
+ while (!need_resched())
+ barrier();
- update_rt_migration(&rq->rt);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
}
--
-- cpumask_copy(&p->cpus_allowed, new_mask);
-- p->rt.nr_cpus_allowed = weight;
}
- /* Assumes rq->lock is held */
-Index: linux-2.6/include/asm-generic/cmpxchg-local.h
+Index: linux-2.6/arch/sh/kernel/idle.c
===================================================================
---- linux-2.6.orig/include/asm-generic/cmpxchg-local.h
-+++ linux-2.6/include/asm-generic/cmpxchg-local.h
-@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_lo
- if (size == 8 && sizeof(unsigned long) != 8)
- wrong_size_cmpxchg(ptr);
+--- linux-2.6.orig/arch/sh/kernel/idle.c
++++ linux-2.6/arch/sh/kernel/idle.c
+@@ -110,9 +110,7 @@ void cpu_idle(void)
+ }
-- local_irq_save(flags);
-+ raw_local_irq_save(flags);
- switch (size) {
- case 1: prev = *(u8 *)ptr;
- if (prev == old)
-@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_lo
- default:
- wrong_size_cmpxchg(ptr);
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
}
-- local_irq_restore(flags);
-+ raw_local_irq_restore(flags);
- return prev;
- }
-
-@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_gene
- u64 prev;
- unsigned long flags;
-
-- local_irq_save(flags);
-+ raw_local_irq_save(flags);
- prev = *(u64 *)ptr;
- if (prev == old)
- *(u64 *)ptr = new;
-- local_irq_restore(flags);
-+ raw_local_irq_restore(flags);
- return prev;
}
-Index: linux-2.6/kernel/rtmutex-debug.h
+Index: linux-2.6/arch/sparc/kernel/process_32.c
===================================================================
---- linux-2.6.orig/kernel/rtmutex-debug.h
-+++ linux-2.6/kernel/rtmutex-debug.h
-@@ -17,17 +17,17 @@ extern void debug_rt_mutex_free_waiter(s
- extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
- extern void debug_rt_mutex_lock(struct rt_mutex *lock);
- extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
--extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
-- struct task_struct *powner);
-+extern void
-+debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner);
- extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
- extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
- struct rt_mutex *lock);
- extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
--# define debug_rt_mutex_reset_waiter(w) \
-+# define debug_rt_mutex_reset_waiter(w) \
- do { (w)->deadlock_lock = NULL; } while (0)
-
--static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
-- int detect)
-+static inline int
-+debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, int detect)
- {
-- return (waiter != NULL);
-+ return waiter != NULL;
+--- linux-2.6.orig/arch/sparc/kernel/process_32.c
++++ linux-2.6/arch/sparc/kernel/process_32.c
+@@ -113,9 +113,7 @@ void cpu_idle(void)
+ while (!need_resched())
+ cpu_relax();
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
+ }
}
-Index: linux-2.6/drivers/char/random.c
+@@ -138,9 +136,7 @@ void cpu_idle(void)
+ while (!need_resched())
+ cpu_relax();
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
+ }
+ }
+Index: linux-2.6/arch/sparc/kernel/process_64.c
===================================================================
---- linux-2.6.orig/drivers/char/random.c
-+++ linux-2.6/drivers/char/random.c
-@@ -433,7 +433,7 @@ static struct entropy_store input_pool =
- .poolinfo = &poolinfo_table[0],
- .name = "input",
- .limit = 1,
-- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
-+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
- .pool = input_pool_data
- };
-
-@@ -442,7 +442,7 @@ static struct entropy_store blocking_poo
- .name = "blocking",
- .limit = 1,
- .pull = &input_pool,
-- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
-+ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
- .pool = blocking_pool_data
- };
+--- linux-2.6.orig/arch/sparc/kernel/process_64.c
++++ linux-2.6/arch/sparc/kernel/process_64.c
+@@ -102,15 +102,13 @@ void cpu_idle(void)
-@@ -450,7 +450,7 @@ static struct entropy_store nonblocking_
- .poolinfo = &poolinfo_table[1],
- .name = "nonblocking",
- .pull = &input_pool,
-- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
-+ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
- .pool = nonblocking_pool_data
- };
+ tick_nohz_restart_sched_tick();
-@@ -633,8 +633,11 @@ static void add_timer_randomness(struct
- preempt_disable();
- /* if over the trickle threshold, use only 1 in 4096 samples */
- if (input_pool.entropy_count > trickle_thresh &&
-- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
-- goto out;
-+ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) {
-+ preempt_enable();
-+ return;
-+ }
-+ preempt_enable();
+- preempt_enable_no_resched();
+-
+ #ifdef CONFIG_HOTPLUG_CPU
+- if (cpu_is_offline(cpu))
++ if (cpu_is_offline(cpu)) {
++ __preempt_enable_no_resched();
+ cpu_play_dead();
++ }
+ #endif
+-
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- sample.jiffies = jiffies;
- sample.cycles = get_cycles();
-@@ -676,8 +679,6 @@ static void add_timer_randomness(struct
- credit_entropy_bits(&input_pool,
- min_t(int, fls(delta>>1), 11));
+Index: linux-2.6/arch/tile/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/tile/kernel/process.c
++++ linux-2.6/arch/tile/kernel/process.c
+@@ -106,9 +106,7 @@ void cpu_idle(void)
+ current_thread_info()->status |= TS_POLLING;
+ }
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
}
--out:
-- preempt_enable();
}
- void add_input_randomness(unsigned int type, unsigned int code,
-Index: linux-2.6/fs/ioprio.c
+Index: linux-2.6/arch/x86/kernel/process_32.c
===================================================================
---- linux-2.6.orig/fs/ioprio.c
-+++ linux-2.6/fs/ioprio.c
-@@ -226,6 +226,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which,
- if (!user)
- break;
+--- linux-2.6.orig/arch/x86/kernel/process_32.c
++++ linux-2.6/arch/x86/kernel/process_32.c
+@@ -38,6 +38,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
+ #include <linux/kdebug.h>
++#include <linux/highmem.h>
-+ rcu_read_lock();
- do_each_thread(g, p) {
- if (__task_cred(p)->uid != user->uid)
- continue;
-@@ -237,6 +238,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which,
- else
- ret = ioprio_best(ret, tmpio);
- } while_each_thread(g, p);
-+ rcu_read_unlock();
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+@@ -113,9 +114,7 @@ void cpu_idle(void)
+ start_critical_timings();
+ }
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- if (who)
- free_uid(user);
-Index: linux-2.6/arch/arm/mach-at91/at91rm9200_time.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-at91/at91rm9200_time.c
-+++ linux-2.6/arch/arm/mach-at91/at91rm9200_time.c
-@@ -114,6 +114,7 @@ clkevt32k_mode(enum clock_event_mode mod
- last_crtr = read_CRTR();
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
-+ setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
- /* PIT for periodic irqs; fixed rate of 1/HZ */
- irqmask = AT91_ST_PITS;
- at91_sys_write(AT91_ST_PIMR, LATCH);
-@@ -127,6 +128,7 @@ clkevt32k_mode(enum clock_event_mode mod
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
-+ remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
- case CLOCK_EVT_MODE_RESUME:
- irqmask = 0;
- break;
-Index: linux-2.6/arch/arm/mach-at91/at91sam926x_time.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-at91/at91sam926x_time.c
-+++ linux-2.6/arch/arm/mach-at91/at91sam926x_time.c
-@@ -54,7 +54,7 @@ static struct clocksource pit_clk = {
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- };
+@@ -348,6 +347,41 @@ __switch_to(struct task_struct *prev_p,
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
--
-+static struct irqaction at91sam926x_pit_irq;
- /*
- * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
- */
-@@ -63,6 +63,9 @@ pit_clkevt_mode(enum clock_event_mode mo
- {
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
-+ /* Set up irq handler */
-+ setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++ /*
++ * Save @prev's kmap_atomic stack
++ */
++ prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx);
++ if (unlikely(prev_p->kmap_idx)) {
++ int i;
+
- /* update clocksource counter */
- pit_cnt += pit_cycle * PIT_PICNT(at91_sys_read(AT91_PIT_PIVR));
- at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
-@@ -75,6 +78,7 @@ pit_clkevt_mode(enum clock_event_mode mo
- case CLOCK_EVT_MODE_UNUSED:
- /* disable irq, leaving the clocksource active */
- at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
-+ remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
-Index: linux-2.6/drivers/clocksource/tcb_clksrc.c
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ pte_t *ptep = kmap_pte - idx;
++ prev_p->kmap_pte[i] = *ptep;
++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++ }
++
++ __this_cpu_write(__kmap_atomic_idx, 0);
++ }
++
++ /*
++ * Restore @next_p's kmap_atomic stack
++ */
++ if (unlikely(next_p->kmap_idx)) {
++ int i;
++
++ __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx);
++
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++ }
++ }
++#endif
++
+ /* If we're going to preload the fpu context, make sure clts
+ is run while we're batching the cpu state updates. */
+ if (preload_fpu)
+Index: linux-2.6/arch/x86/kernel/process_64.c
===================================================================
---- linux-2.6.orig/drivers/clocksource/tcb_clksrc.c
-+++ linux-2.6/drivers/clocksource/tcb_clksrc.c
-@@ -21,8 +21,7 @@
- * resolution better than 200 nsec).
- *
- * - The third channel may be used to provide a 16-bit clockevent
-- * source, used in either periodic or oneshot mode. This runs
-- * at 32 KiHZ, and can handle delays of up to two seconds.
-+ * source, used in either periodic or oneshot mode.
- *
- * A boot clocksource and clockevent source are also currently needed,
- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
-@@ -68,6 +67,7 @@ static struct clocksource clksrc = {
- struct tc_clkevt_device {
- struct clock_event_device clkevt;
- struct clk *clk;
-+ u32 freq;
- void __iomem *regs;
- };
+--- linux-2.6.orig/arch/x86/kernel/process_64.c
++++ linux-2.6/arch/x86/kernel/process_64.c
+@@ -146,9 +146,7 @@ void cpu_idle(void)
+ }
-@@ -76,13 +76,6 @@ static struct tc_clkevt_device *to_tc_cl
- return container_of(clkevt, struct tc_clkevt_device, clkevt);
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
--/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
-- * because using one of the divided clocks would usually mean the
-- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
-- *
-- * A divided clock could be good for high resolution timers, since
-- * 30.5 usec resolution can seem "low".
-- */
- static u32 timer_clock;
+Index: linux-2.6/arch/xtensa/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/xtensa/kernel/process.c
++++ linux-2.6/arch/xtensa/kernel/process.c
+@@ -113,9 +113,7 @@ void cpu_idle(void)
+ while (1) {
+ while (!need_resched())
+ platform_idle();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
-@@ -105,11 +98,12 @@ static void tc_mode(enum clock_event_mod
- case CLOCK_EVT_MODE_PERIODIC:
- clk_enable(tcd->clk);
+Index: linux-2.6/init/main.c
+===================================================================
+--- linux-2.6.orig/init/main.c
++++ linux-2.6/init/main.c
+@@ -68,6 +68,7 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/posix-timers.h>
-- /* slow clock, count up to RC, then irq and restart */
-+ /* count up to RC, then irq and restart */
- __raw_writel(timer_clock
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
-- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
-+ __raw_writel((tcd->freq + HZ/2)/HZ,
-+ tcaddr + ATMEL_TC_REG(2, RC));
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+@@ -367,9 +368,7 @@ static noinline void __init_refok rest_i
+ * at least once to get things moving:
+ */
+ init_idle_bootup_task(current);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
- /* Enable clock and interrupts on RC compare */
- __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -122,7 +116,7 @@ static void tc_mode(enum clock_event_mod
- case CLOCK_EVT_MODE_ONESHOT:
- clk_enable(tcd->clk);
+ /* Call into cpu_idle with preempt disabled */
+ cpu_idle();
+@@ -501,6 +500,7 @@ asmlinkage void __init start_kernel(void
+ parse_args("Booting kernel", static_command_line, __start___param,
+ __stop___param - __start___param,
+ &unknown_bootoption);
++ softirq_early_init();
+ /*
+ * These use large bootmem allocations and must precede
+ * kmem_cache_init()
+Index: linux-2.6/kernel/mutex.c
+===================================================================
+--- linux-2.6.orig/kernel/mutex.c
++++ linux-2.6/kernel/mutex.c
+@@ -240,9 +240,7 @@ __mutex_lock_common(struct mutex *lock,
-- /* slow clock, count up to RC, then irq and stop */
-+ /* count up to RC, then irq and stop */
- __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
-@@ -152,8 +146,12 @@ static struct tc_clkevt_device clkevt =
- .features = CLOCK_EVT_FEAT_PERIODIC
- | CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
-+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- /* Should be lower than at91rm9200's system timer */
- .rating = 125,
-+#else
-+ .rating = 200,
-+#endif
- .set_next_event = tc_next_event,
- .set_mode = tc_mode,
- },
-@@ -179,8 +177,9 @@ static struct irqaction tc_irqaction = {
- .handler = ch2_irq,
+ /* didn't get the lock, go to sleep: */
+ spin_unlock_mutex(&lock->wait_lock, flags);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ spin_lock_mutex(&lock->wait_lock, flags);
+ }
+
+Index: linux-2.6/kernel/softirq.c
+===================================================================
+--- linux-2.6.orig/kernel/softirq.c
++++ linux-2.6/kernel/softirq.c
+@@ -24,6 +24,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/tick.h>
++#include <linux/locallock.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -61,6 +62,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
};
--static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
-+static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
- {
-+ unsigned divisor = atmel_tc_divisors[divisor_idx];
- struct clk *t2_clk = tc->clk[2];
- int irq = tc->irq[2];
++#ifdef CONFIG_NO_HZ
++# ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * On preempt-rt a softirq might be blocked on a lock. There might be
++ * no other runnable task on this CPU because the lock owner runs on
++ * some other CPU. So we have to go into idle with the pending bit
++ * set. Therefor we need to check this otherwise we warn about false
++ * positives which confuses users and defeats the whole purpose of
++ * this test.
++ *
++ * This code is called with interrupts disabled.
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++ u32 warnpending = 0, pending = local_softirq_pending();
++
++ if (rate_limit >= 10)
++ return;
++
++ if (pending) {
++ struct task_struct *tsk;
++
++ tsk = __get_cpu_var(ksoftirqd);
++ /*
++ * The wakeup code in rtmutex.c wakes up the task
++ * _before_ it sets pi_blocked_on to NULL under
++ * tsk->pi_lock. So we need to check for both: state
++ * and pi_blocked_on.
++ */
++ raw_spin_lock(&tsk->pi_lock);
++
++ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
++ warnpending = 1;
++
++ raw_spin_unlock(&tsk->pi_lock);
++ }
++
++ if (warnpending) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ pending);
++ rate_limit++;
++ }
++}
++# else
++/*
++ * On !PREEMPT_RT we just printk rate limited:
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++
++ if (rate_limit < 10) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ local_softirq_pending());
++ rate_limit++;
++ }
++}
++# endif
++#endif
++
+ /*
+ * we cannot loop indefinitely here to avoid userspace starvation,
+ * but we also don't want to introduce a worst case 1/HZ latency
+@@ -76,6 +138,36 @@ static void wakeup_softirqd(void)
+ wake_up_process(tsk);
+ }
-@@ -188,11 +187,17 @@ static void __init setup_clkevents(struc
- clkevt.clk = t2_clk;
- tc_irqaction.dev_id = &clkevt;
++static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
++{
++ struct softirq_action *h = softirq_vec;
++ unsigned int prev_count = preempt_count();
++
++ local_irq_enable();
++ for ( ; pending; h++, pending >>= 1) {
++ unsigned int vec_nr = h - softirq_vec;
++
++ if (!(pending & 1))
++ continue;
++
++ kstat_incr_softirqs_this_cpu(vec_nr);
++ trace_softirq_entry(vec_nr);
++ h->action(h);
++ trace_softirq_exit(vec_nr);
++ if (unlikely(prev_count != preempt_count())) {
++ printk(KERN_ERR
++ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n",
++ vec_nr, softirq_to_name[vec_nr], h->action,
++ prev_count, (unsigned int) preempt_count());
++ preempt_count() = prev_count;
++ }
++ if (need_rcu_bh_qs)
++ rcu_bh_qs(cpu);
++ }
++ local_irq_disable();
++}
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -206,7 +298,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
-- timer_clock = clk32k_divisor_idx;
-+ timer_clock = divisor_idx;
+ asmlinkage void __do_softirq(void)
+ {
+- struct softirq_action *h;
+ __u32 pending;
+ int max_restart = MAX_SOFTIRQ_RESTART;
+ int cpu;
+@@ -215,7 +306,7 @@ asmlinkage void __do_softirq(void)
+ account_system_vtime(current);
-- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
-- clkevt.clkevt.max_delta_ns
-- = clockevent_delta2ns(0xffff, &clkevt.clkevt);
-+ if (!divisor)
-+ clkevt.freq = 32768;
-+ else
-+ clkevt.freq = clk_get_rate(t2_clk)/divisor;
-+
-+ clkevt.clkevt.mult = div_sc(clkevt.freq, NSEC_PER_SEC,
-+ clkevt.clkevt.shift);
-+ clkevt.clkevt.max_delta_ns =
-+ clockevent_delta2ns(0xffff, &clkevt.clkevt);
- clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
- clkevt.clkevt.cpumask = cpumask_of(0);
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+- SOFTIRQ_OFFSET);
++ SOFTIRQ_OFFSET);
+ lockdep_softirq_enter();
-@@ -295,8 +300,11 @@ static int __init tcb_clksrc_init(void)
- clocksource_register(&clksrc);
+ cpu = smp_processor_id();
+@@ -223,36 +314,7 @@ restart:
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
- /* channel 2: periodic and oneshot timer support */
-+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- setup_clkevents(tc, clk32k_divisor_idx);
+- local_irq_enable();
-
-+#else
-+ setup_clkevents(tc, best_divisor_idx);
-+#endif
- return 0;
+- h = softirq_vec;
+-
+- do {
+- if (pending & 1) {
+- unsigned int vec_nr = h - softirq_vec;
+- int prev_count = preempt_count();
+-
+- kstat_incr_softirqs_this_cpu(vec_nr);
+-
+- trace_softirq_entry(vec_nr);
+- h->action(h);
+- trace_softirq_exit(vec_nr);
+- if (unlikely(prev_count != preempt_count())) {
+- printk(KERN_ERR "huh, entered softirq %u %s %p"
+- "with preempt_count %08x,"
+- " exited with %08x?\n", vec_nr,
+- softirq_to_name[vec_nr], h->action,
+- prev_count, preempt_count());
+- preempt_count() = prev_count;
+- }
+-
+- rcu_bh_qs(cpu);
+- }
+- h++;
+- pending >>= 1;
+- } while (pending);
+-
+- local_irq_disable();
++ handle_pending_softirqs(pending, cpu, 1);
+
+ pending = local_softirq_pending();
+ if (pending && --max_restart)
+@@ -267,6 +329,26 @@ restart:
+ __local_bh_enable(SOFTIRQ_OFFSET);
}
- arch_initcall(tcb_clksrc_init);
-Index: linux-2.6/drivers/misc/Kconfig
-===================================================================
---- linux-2.6.orig/drivers/misc/Kconfig
-+++ linux-2.6/drivers/misc/Kconfig
-@@ -82,6 +82,7 @@ config AB8500_PWM
- config ATMEL_TCLIB
- bool "Atmel AT32/AT91 Timer/Counter Library"
- depends on (AVR32 || ARCH_AT91)
-+ default y if PREEMPT_RT_FULL
- help
- Select this if you want a library to allocate the Timer/Counter
- blocks found on many Atmel processors. This facilitates using
-@@ -97,8 +98,7 @@ config ATMEL_TCB_CLKSRC
- are combined to make a single 32-bit timer.
- When GENERIC_CLOCKEVENTS is defined, the third timer channel
-- may be used as a clock event device supporting oneshot mode
-- (delays of up to two seconds) based on the 32 KiHz clock.
-+ may be used as a clock event device supporting oneshot mode.
++/*
++ * Called with preemption disabled from run_ksoftirqd()
++ */
++static int ksoftirqd_do_softirq(int cpu)
++{
++ /*
++ * Preempt disable stops cpu going offline.
++ * If already offline, we'll be on wrong CPU:
++ * don't process.
++ */
++ if (cpu_is_offline(cpu))
++ return -1;
++
++ local_irq_disable();
++ if (local_softirq_pending())
++ __do_softirq();
++ local_irq_enable();
++ return 0;
++}
++
+ #ifndef __ARCH_HAS_DO_SOFTIRQ
- config ATMEL_TCB_CLKSRC_BLOCK
- int
-@@ -112,6 +112,14 @@ config ATMEL_TCB_CLKSRC_BLOCK
- TC can be used for other purposes, such as PWM generation and
- interval timing.
+ asmlinkage void do_softirq(void)
+@@ -289,6 +371,183 @@ asmlinkage void do_softirq(void)
-+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
-+ bool "TC Block use 32 KiHz clock"
-+ depends on ATMEL_TCB_CLKSRC
-+ default y if !PREEMPT_RT_FULL
-+ help
-+ Select this to use 32 KiHz base clock rate as TC block clock
-+ source for clock events.
+ #endif
+
++static inline void local_bh_disable_nort(void) { local_bh_disable(); }
++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++static inline void ksoftirqd_set_sched_params(void) { }
++static inline void ksoftirqd_clr_sched_params(void) { }
++
++#else /* !PREEMPT_RT_FULL */
++
++/*
++ * On RT we serialize softirq execution with a cpu local lock
++ */
++static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
++static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
++
++static void __do_softirq_common(int need_rcu_bh_qs);
++
++void __do_softirq(void)
++{
++ __do_softirq_common(0);
++}
++
++void __init softirq_early_init(void)
++{
++ local_irq_lock_init(local_softirq_lock);
++}
++
++void local_bh_disable(void)
++{
++ migrate_disable();
++ current->softirq_nestcnt++;
++}
++EXPORT_SYMBOL(local_bh_disable);
++
++void local_bh_enable(void)
++{
++ if (WARN_ON(current->softirq_nestcnt == 0))
++ return;
++
++ if ((current->softirq_nestcnt == 1) &&
++ local_softirq_pending() &&
++ local_trylock(local_softirq_lock)) {
++
++ local_irq_disable();
++ if (local_softirq_pending())
++ __do_softirq();
++ local_unlock(local_softirq_lock);
++ local_irq_enable();
++ WARN_ON(current->softirq_nestcnt != 1);
++ }
++ current->softirq_nestcnt--;
++ migrate_enable();
++}
++EXPORT_SYMBOL(local_bh_enable);
++
++void local_bh_enable_ip(unsigned long ip)
++{
++ local_bh_enable();
++}
++EXPORT_SYMBOL(local_bh_enable_ip);
++
++/* For tracing */
++int notrace __in_softirq(void)
++{
++ if (__get_cpu_var(local_softirq_lock).owner == current)
++ return __get_cpu_var(local_softirq_lock).nestcnt;
++ return 0;
++}
++
++int in_serving_softirq(void)
++{
++ int res;
++
++ preempt_disable();
++ res = __get_cpu_var(local_softirq_runner) == current;
++ preempt_enable();
++ return res;
++}
++
++/*
++ * Called with bh and local interrupts disabled. For full RT cpu must
++ * be pinned.
++ */
++static void __do_softirq_common(int need_rcu_bh_qs)
++{
++ u32 pending = local_softirq_pending();
++ int cpu = smp_processor_id();
++
++ current->softirq_nestcnt++;
++
++ /* Reset the pending bitmask before enabling irqs */
++ set_softirq_pending(0);
++
++ __get_cpu_var(local_softirq_runner) = current;
++
++ lockdep_softirq_enter();
++
++ handle_pending_softirqs(pending, cpu, need_rcu_bh_qs);
++
++ pending = local_softirq_pending();
++ if (pending)
++ wakeup_softirqd();
++
++ lockdep_softirq_exit();
++ __get_cpu_var(local_softirq_runner) = NULL;
++
++ current->softirq_nestcnt--;
++}
++
++static int __thread_do_softirq(int cpu)
++{
++ /*
++ * Prevent the current cpu from going offline.
++ * pin_current_cpu() can reenable preemption and block on the
++ * hotplug mutex. When it returns, the current cpu is
++ * pinned. It might be the wrong one, but the offline check
++ * below catches that.
++ */
++ pin_current_cpu();
++ /*
++ * If called from ksoftirqd (cpu >= 0) we need to check
++ * whether we are on the wrong cpu due to cpu offlining. If
++ * called via thread_do_softirq() no action required.
++ */
++ if (cpu >= 0 && cpu_is_offline(cpu)) {
++ unpin_current_cpu();
++ return -1;
++ }
++ preempt_enable();
++ local_lock(local_softirq_lock);
++ local_irq_disable();
++ /*
++ * We cannot switch stacks on RT as we want to be able to
++ * schedule!
++ */
++ if (local_softirq_pending())
++ __do_softirq_common(cpu >= 0);
++ local_unlock(local_softirq_lock);
++ unpin_current_cpu();
++ preempt_disable();
++ local_irq_enable();
++ return 0;
++}
+
- config IBM_ASM
- tristate "Device driver for IBM RSA service processor"
- depends on X86 && PCI && INPUT && EXPERIMENTAL
-@@ -133,6 +141,35 @@ config IBM_ASM
- for information on the specific driver level and support statement
- for your IBM server.
-
-+config HWLAT_DETECTOR
-+ tristate "Testing module to detect hardware-induced latencies"
-+ depends on DEBUG_FS
-+ depends on RING_BUFFER
-+ default m
-+ ---help---
-+ A simple hardware latency detector. Use this module to detect
-+ large latencies introduced by the behavior of the underlying
-+ system firmware external to Linux. We do this using periodic
-+ use of stop_machine to grab all available CPUs and measure
-+ for unexplainable gaps in the CPU timestamp counter(s). By
-+ default, the module is not enabled until the "enable" file
-+ within the "hwlat_detector" debugfs directory is toggled.
++/*
++ * Called from netif_rx_ni(). Preemption enabled.
++ */
++void thread_do_softirq(void)
++{
++ if (!in_serving_softirq()) {
++ preempt_disable();
++ __thread_do_softirq(-1);
++ preempt_enable();
++ }
++}
+
-+ This module is often used to detect SMI (System Management
-+ Interrupts) on x86 systems, though is not x86 specific. To
-+ this end, we default to using a sample window of 1 second,
-+ during which we will sample for 0.5 seconds. If an SMI or
-+ similar event occurs during that time, it is recorded
-+ into an 8K samples global ring buffer until retreived.
++static int ksoftirqd_do_softirq(int cpu)
++{
++ return __thread_do_softirq(cpu);
++}
+
-+ WARNING: This software should never be enabled (it can be built
-+ but should not be turned on after it is loaded) in a production
-+ environment where high latencies are a concern since the
-+ sampling mechanism actually introduces latencies for
-+ regular tasks while the CPU(s) are being held.
++static inline void local_bh_disable_nort(void) { }
++static inline void _local_bh_enable_nort(void) { }
+
-+ If unsure, say N
++static inline void ksoftirqd_set_sched_params(void)
++{
++ struct sched_param param = { .sched_priority = 1 };
+
- config PHANTOM
- tristate "Sensable PHANToM (PCI)"
- depends on PCI
-Index: linux-2.6/drivers/net/tulip/tulip_core.c
-===================================================================
---- linux-2.6.orig/drivers/net/tulip/tulip_core.c
-+++ linux-2.6/drivers/net/tulip/tulip_core.c
-@@ -1951,6 +1951,7 @@ static void __devexit tulip_remove_one (
- pci_iounmap(pdev, tp->base_addr);
- free_netdev (dev);
- pci_release_regions (pdev);
-+ pci_disable_device (pdev);
- pci_set_drvdata (pdev, NULL);
-
- /* pci_power_off (pdev, -1); */
-Index: linux-2.6/drivers/net/8139too.c
-===================================================================
---- linux-2.6.orig/drivers/net/8139too.c
-+++ linux-2.6/drivers/net/8139too.c
-@@ -2173,7 +2173,11 @@ static irqreturn_t rtl8139_interrupt (in
++ sched_setscheduler(current, SCHED_FIFO, ¶m);
++}
++
++static inline void ksoftirqd_clr_sched_params(void)
++{
++ struct sched_param param = { .sched_priority = 0 };
++
++ sched_setscheduler(current, SCHED_NORMAL, ¶m);
++}
++
++#endif /* PREEMPT_RT_FULL */
+ /*
+ * Enter an interrupt context.
*/
- static void rtl8139_poll_controller(struct net_device *dev)
+@@ -302,9 +561,9 @@ void irq_enter(void)
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+- local_bh_disable();
++ local_bh_disable_nort();
+ tick_check_idle(cpu);
+- _local_bh_enable();
++ _local_bh_enable_nort();
+ }
+
+ __irq_enter();
+@@ -313,6 +572,7 @@ void irq_enter(void)
+ #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+ static inline void invoke_softirq(void)
{
-- disable_irq(dev->irq);
-+ /*
-+ * use _nosync() variant - might be used by netconsole
-+ * from atomic contexts:
-+ */
-+ disable_irq_nosync(dev->irq);
- rtl8139_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!force_irqthreads)
+ __do_softirq();
+ else {
+@@ -321,10 +581,14 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
++#else
++ wakeup_softirqd();
++#endif
}
-Index: linux-2.6/drivers/net/ehea/ehea_main.c
-===================================================================
---- linux-2.6.orig/drivers/net/ehea/ehea_main.c
-+++ linux-2.6/drivers/net/ehea/ehea_main.c
-@@ -1369,7 +1369,7 @@ static int ehea_reg_interrupts(struct ne
- "%s-queue%d", dev->name, i);
- ret = ibmebus_request_irq(pr->eq->attr.ist1,
- ehea_recv_irq_handler,
-- IRQF_DISABLED, pr->int_send_name,
-+ IRQF_NO_THREAD, pr->int_send_name,
- pr);
- if (ret) {
- netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
-Index: linux-2.6/drivers/net/arm/at91_ether.c
-===================================================================
---- linux-2.6.orig/drivers/net/arm/at91_ether.c
-+++ linux-2.6/drivers/net/arm/at91_ether.c
-@@ -199,7 +199,9 @@ static irqreturn_t at91ether_phy_interru
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
- unsigned int phy;
-+ unsigned long flags;
-
-+ spin_lock_irqsave(&lp->lock, flags);
- /*
- * This hander is triggered on both edges, but the PHY chips expect
- * level-triggering. We therefore have to check if the PHY actually has
-@@ -241,6 +243,7 @@ static irqreturn_t at91ether_phy_interru
-
- done:
- disable_mdi();
-+ spin_unlock_irqrestore(&lp->lock, flags);
-
- return IRQ_HANDLED;
+ #else
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!force_irqthreads)
+ do_softirq();
+ else {
+@@ -333,6 +597,9 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
++#else
++ wakeup_softirqd();
++#endif
}
-@@ -397,9 +400,11 @@ static void at91ether_check_link(unsigne
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
-
-+ spin_lock_irq(&lp->lock);
- enable_mdi();
- update_linkspeed(dev, 1);
- disable_mdi();
-+ spin_unlock_irq(&lp->lock);
+ #endif
- mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
+@@ -353,7 +620,7 @@ void irq_exit(void)
+ if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
+ tick_nohz_stop_sched_tick(0);
+ #endif
+- preempt_enable_no_resched();
++ __preempt_enable_no_resched();
}
-Index: linux-2.6/include/linux/preempt.h
-===================================================================
---- linux-2.6.orig/include/linux/preempt.h
-+++ linux-2.6/include/linux/preempt.h
-@@ -33,12 +33,18 @@ do { \
- barrier(); \
- } while (0)
--#define preempt_enable_no_resched() \
-+#define __preempt_enable_no_resched() \
- do { \
- barrier(); \
- dec_preempt_count(); \
- } while (0)
+ /*
+@@ -739,29 +1006,21 @@ void __init softirq_init(void)
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+# define preempt_enable_no_resched() __preempt_enable_no_resched()
-+#else
-+# define preempt_enable_no_resched() preempt_enable()
-+#endif
+ static int run_ksoftirqd(void * __bind_cpu)
+ {
++ ksoftirqd_set_sched_params();
+
- #define preempt_check_resched() \
- do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
-@@ -47,7 +53,7 @@ do { \
+ set_current_state(TASK_INTERRUPTIBLE);
- #define preempt_enable() \
- do { \
-- preempt_enable_no_resched(); \
-+ __preempt_enable_no_resched(); \
- barrier(); \
- preempt_check_resched(); \
- } while (0)
-@@ -83,6 +89,7 @@ do { \
- #else
+ while (!kthread_should_stop()) {
+ preempt_disable();
+- if (!local_softirq_pending()) {
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
+- }
++ if (!local_softirq_pending())
++ schedule_preempt_disabled();
- #define preempt_disable() do { } while (0)
-+#define __preempt_enable_no_resched() do { } while (0)
- #define preempt_enable_no_resched() do { } while (0)
- #define preempt_enable() do { } while (0)
- #define preempt_check_resched() do { } while (0)
-@@ -93,6 +100,27 @@ do { \
+ __set_current_state(TASK_RUNNING);
- #endif
+ while (local_softirq_pending()) {
+- /* Preempt disable stops cpu going offline.
+- If already offline, we'll be on wrong CPU:
+- don't process */
+- if (cpu_is_offline((long)__bind_cpu))
++ if (ksoftirqd_do_softirq((long) __bind_cpu))
+ goto wait_to_die;
+- local_irq_disable();
+- if (local_softirq_pending())
+- __do_softirq();
+- local_irq_enable();
+- preempt_enable_no_resched();
++ __preempt_enable_no_resched();
+ cond_resched();
+ preempt_disable();
+ rcu_note_context_switch((long)__bind_cpu);
+@@ -774,6 +1033,7 @@ static int run_ksoftirqd(void * __bind_c
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define preempt_disable_rt() preempt_disable()
-+# define preempt_enable_rt() preempt_enable()
-+# define preempt_disable_nort() do { } while (0)
-+# define preempt_enable_nort() do { } while (0)
-+# ifdef CONFIG_SMP
-+ extern void migrate_disable(void);
-+ extern void migrate_enable(void);
-+# else /* CONFIG_SMP */
-+# define migrate_disable() do { } while (0)
-+# define migrate_enable() do { } while (0)
-+# endif /* CONFIG_SMP */
-+#else
-+# define preempt_disable_rt() do { } while (0)
-+# define preempt_enable_rt() do { } while (0)
-+# define preempt_disable_nort() preempt_disable()
-+# define preempt_enable_nort() preempt_enable()
-+# define migrate_disable() preempt_disable()
-+# define migrate_enable() preempt_enable()
-+#endif
-+
- #ifdef CONFIG_PREEMPT_NOTIFIERS
+ wait_to_die:
+ preempt_enable();
++ ksoftirqd_clr_sched_params();
+ /* Wait for kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+@@ -850,9 +1110,8 @@ static int __cpuinit cpu_callback(struct
+ int hotcpu = (unsigned long)hcpu;
+ struct task_struct *p;
- struct preempt_notifier;
-Index: linux-2.6/include/linux/uaccess.h
+- switch (action) {
++ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+- case CPU_UP_PREPARE_FROZEN:
+ p = kthread_create_on_node(run_ksoftirqd,
+ hcpu,
+ cpu_to_node(hotcpu),
+@@ -865,19 +1124,16 @@ static int __cpuinit cpu_callback(struct
+ per_cpu(ksoftirqd, hotcpu) = p;
+ break;
+ case CPU_ONLINE:
+- case CPU_ONLINE_FROZEN:
+ wake_up_process(per_cpu(ksoftirqd, hotcpu));
+ break;
+ #ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+- case CPU_UP_CANCELED_FROZEN:
+ if (!per_cpu(ksoftirqd, hotcpu))
+ break;
+ /* Unbind so it can run. Fall thru. */
+ kthread_bind(per_cpu(ksoftirqd, hotcpu),
+ cpumask_any(cpu_online_mask));
+- case CPU_DEAD:
+- case CPU_DEAD_FROZEN: {
++ case CPU_POST_DEAD: {
+ static const struct sched_param param = {
+ .sched_priority = MAX_RT_PRIO-1
+ };
+Index: linux-2.6/kernel/posix-timers.c
===================================================================
---- linux-2.6.orig/include/linux/uaccess.h
-+++ linux-2.6/include/linux/uaccess.h
-@@ -6,38 +6,37 @@
-
- /*
- * These routines enable/disable the pagefault handler in that
-- * it will not take any locks and go straight to the fixup table.
-- *
-- * They have great resemblance to the preempt_disable/enable calls
-- * and in fact they are identical; this is because currently there is
-- * no other way to make the pagefault handlers do this. So we do
-- * disable preemption but we don't necessarily care about that.
-+ * it will not take any MM locks and go straight to the fixup table.
- */
--static inline void pagefault_disable(void)
-+static inline void raw_pagefault_disable(void)
+--- linux-2.6.orig/kernel/posix-timers.c
++++ linux-2.6/kernel/posix-timers.c
+@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_
+ static struct pid *good_sigevent(sigevent_t * event)
{
- inc_preempt_count();
-- /*
-- * make sure to have issued the store before a pagefault
-- * can hit.
-- */
- barrier();
- }
+ struct task_struct *rtn = current->group_leader;
++ int sig = event->sigev_signo;
--static inline void pagefault_enable(void)
-+static inline void raw_pagefault_enable(void)
- {
-- /*
-- * make sure to issue those last loads/stores before enabling
-- * the pagefault handler again.
-- */
- barrier();
- dec_preempt_count();
-- /*
-- * make sure we do..
-- */
- barrier();
- preempt_check_resched();
+ if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
+ (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
+@@ -447,7 +448,8 @@ static struct pid *good_sigevent(sigeven
+ return NULL;
+
+ if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
+- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
++ sig_kernel_coredump(sig)))
+ return NULL;
+
+ return task_pid(rtn);
+@@ -764,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
+ return overrun;
}
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+static inline void pagefault_disable(void)
-+{
-+ raw_pagefault_disable();
-+}
-+
-+static inline void pagefault_enable(void)
++/*
++ * Protected by RCU!
++ */
++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
+{
-+ raw_pagefault_enable();
-+}
-+#else
-+extern void pagefault_disable(void);
-+extern void pagefault_enable(void);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (kc->timer_set == common_timer_set)
++ hrtimer_wait_for_timer(&timr->it.real.timer);
++ else
++ /* FIXME: Whacky hack for posix-cpu-timers */
++ schedule_timeout(1);
+#endif
++}
+
- #ifndef ARCH_HAS_NOCACHE_UACCESS
+ /* Set a POSIX.1b interval timer. */
+ /* timr->it_lock is taken. */
+ static int
+@@ -841,6 +857,7 @@ retry:
+ if (!timr)
+ return -EINVAL;
- static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
-@@ -77,9 +76,9 @@ static inline unsigned long __copy_from_
- mm_segment_t old_fs = get_fs(); \
- \
- set_fs(KERNEL_DS); \
-- pagefault_disable(); \
-+ raw_pagefault_disable(); \
- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
-- pagefault_enable(); \
-+ raw_pagefault_enable(); \
- set_fs(old_fs); \
- ret; \
- })
-Index: linux-2.6/arch/alpha/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/alpha/mm/fault.c
-+++ linux-2.6/arch/alpha/mm/fault.c
-@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns
++ rcu_read_lock();
+ kc = clockid_to_kclock(timr->it_clock);
+ if (WARN_ON_ONCE(!kc || !kc->timer_set))
+ error = -EINVAL;
+@@ -849,9 +866,12 @@ retry:
- /* If we're in an interrupt context, or have no user context,
- we must not take the fault. */
-- if (!mm || in_atomic())
-+ if (!mm || pagefault_disabled())
- goto no_context;
+ unlock_timer(timr, flag);
+ if (error == TIMER_RETRY) {
++ timer_wait_for_callback(kc, timr);
+ rtn = NULL; // We already got the old time...
++ rcu_read_unlock();
+ goto retry;
+ }
++ rcu_read_unlock();
- #ifdef CONFIG_ALPHA_LARGE_VMALLOC
-Index: linux-2.6/arch/arm/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/arm/mm/fault.c
-+++ linux-2.6/arch/arm/mm/fault.c
-@@ -289,7 +289,7 @@ do_page_fault(unsigned long addr, unsign
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+ if (old_setting && !error &&
+ copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
+@@ -889,10 +909,15 @@ retry_delete:
+ if (!timer)
+ return -EINVAL;
- /*
-Index: linux-2.6/arch/avr32/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/avr32/mm/fault.c
-+++ linux-2.6/arch/avr32/mm/fault.c
-@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned l
- * If we're in an interrupt or have no user context, we must
- * not take the fault...
- */
-- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
-+ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
- goto no_context;
++ rcu_read_lock();
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
+ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
++ rcu_read_unlock();
- local_irq_enable();
-Index: linux-2.6/arch/cris/mm/fault.c
+ spin_lock(¤t->sighand->siglock);
+ list_del(&timer->list);
+@@ -918,8 +943,18 @@ static void itimer_delete(struct k_itime
+ retry_delete:
+ spin_lock_irqsave(&timer->it_lock, flags);
+
++ /* On RT we can race with a deletion */
++ if (!timer->it_signal) {
++ unlock_timer(timer, flags);
++ return;
++ }
++
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
++ rcu_read_lock();
+ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
+ list_del(&timer->list);
+Index: linux-2.6/include/linux/signal.h
===================================================================
---- linux-2.6.orig/arch/cris/mm/fault.c
-+++ linux-2.6/arch/cris/mm/fault.c
-@@ -111,7 +111,7 @@ do_page_fault(unsigned long address, str
- * user context, we must not take the fault.
- */
+--- linux-2.6.orig/include/linux/signal.h
++++ linux-2.6/include/linux/signal.h
+@@ -229,6 +229,7 @@ static inline void init_sigpending(struc
+ }
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+ extern void flush_sigqueue(struct sigpending *queue);
++extern void flush_task_sigqueue(struct task_struct *tsk);
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/frv/mm/fault.c
+ /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+ static inline int valid_signal(unsigned long sig)
+Index: linux-2.6/kernel/exit.c
===================================================================
---- linux-2.6.orig/arch/frv/mm/fault.c
-+++ linux-2.6/arch/frv/mm/fault.c
-@@ -79,7 +79,7 @@ asmlinkage void do_page_fault(int datamm
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
+--- linux-2.6.orig/kernel/exit.c
++++ linux-2.6/kernel/exit.c
+@@ -142,7 +142,7 @@ static void __exit_signal(struct task_st
+ * Do this under ->siglock, we can race with another thread
+ * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+- flush_sigqueue(&tsk->pending);
++ flush_task_sigqueue(tsk);
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/ia64/mm/fault.c
+Index: linux-2.6/kernel/fork.c
===================================================================
---- linux-2.6.orig/arch/ia64/mm/fault.c
-+++ linux-2.6/arch/ia64/mm/fault.c
-@@ -89,7 +89,7 @@ ia64_do_page_fault (unsigned long addres
- /*
- * If we're in an interrupt or have no user context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+--- linux-2.6.orig/kernel/fork.c
++++ linux-2.6/kernel/fork.c
+@@ -87,7 +87,7 @@ int max_threads; /* tunable limit on nr
- #ifdef CONFIG_VIRTUAL_MEM_MAP
-Index: linux-2.6/arch/m32r/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/m32r/mm/fault.c
-+++ linux-2.6/arch/m32r/mm/fault.c
-@@ -115,7 +115,7 @@ asmlinkage void do_page_fault(struct pt_
- * If we're in an interrupt or have no user context or are running in an
- * atomic region then we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto bad_area_nosemaphore;
+ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
- /* When running in the kernel we expect faults to occur only to
-Index: linux-2.6/arch/m68k/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/m68k/mm/fault.c
-+++ linux-2.6/arch/m68k/mm/fault.c
-@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs,
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
++DEFINE_RWLOCK(tasklist_lock); /* outer */
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/microblaze/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/microblaze/mm/fault.c
-+++ linux-2.6/arch/microblaze/mm/fault.c
-@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs,
- if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
- is_write = 0;
+ #ifdef CONFIG_PROVE_RCU
+ int lockdep_tasklist_lock_is_held(void)
+@@ -198,7 +198,18 @@ void __put_task_struct(struct task_struc
+ if (!profile_handoff_task(tsk))
+ free_task(tsk);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ EXPORT_SYMBOL_GPL(__put_task_struct);
++#else
++void __put_task_struct_cb(struct rcu_head *rhp)
++{
++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
++
++ __put_task_struct(tsk);
++
++}
++EXPORT_SYMBOL_GPL(__put_task_struct_cb);
++#endif
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(!mm || pagefault_disabled())) {
- if (kernel_mode(regs))
- goto bad_area_nosemaphore;
+ /*
+ * macro override instead of weak attribute alias, to workaround
+@@ -546,6 +557,19 @@ void __mmdrop(struct mm_struct *mm)
+ }
+ EXPORT_SYMBOL_GPL(__mmdrop);
-Index: linux-2.6/arch/mips/mm/fault.c
++#ifdef CONFIG_PREEMPT_RT_BASE
++/*
++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
++ * want another facility to make this work.
++ */
++void __mmdrop_delayed(struct rcu_head *rhp)
++{
++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
++
++ __mmdrop(mm);
++}
++#endif
++
+ /*
+ * Decrement the use count and release all resources for an mm.
+ */
+@@ -1030,6 +1054,9 @@ void mm_init_owner(struct mm_struct *mm,
+ */
+ static void posix_cpu_timers_init(struct task_struct *tsk)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ tsk->posix_timer_list = NULL;
++#endif
+ tsk->cputime_expires.prof_exp = cputime_zero;
+ tsk->cputime_expires.virt_exp = cputime_zero;
+ tsk->cputime_expires.sched_exp = 0;
+@@ -1137,6 +1164,7 @@ static struct task_struct *copy_process(
+ spin_lock_init(&p->alloc_lock);
+
+ init_sigpending(&p->pending);
++ p->sigqueue_cache = NULL;
+
+ p->utime = cputime_zero;
+ p->stime = cputime_zero;
+@@ -1194,6 +1222,9 @@ static struct task_struct *copy_process(
+ p->hardirq_context = 0;
+ p->softirq_context = 0;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ p->pagefault_disabled = 0;
++#endif
+ #ifdef CONFIG_LOCKDEP
+ p->lockdep_depth = 0; /* no locks held yet */
+ p->curr_chain_key = 0;
+Index: linux-2.6/kernel/sched_features.h
===================================================================
---- linux-2.6.orig/arch/mips/mm/fault.c
-+++ linux-2.6/arch/mips/mm/fault.c
-@@ -88,7 +88,7 @@ asmlinkage void __kprobes do_page_fault(
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto bad_area_nosemaphore;
+--- linux-2.6.orig/kernel/sched_features.h
++++ linux-2.6/kernel/sched_features.h
+@@ -65,10 +65,15 @@ SCHED_FEAT(OWNER_SPIN, 1)
+ */
+ SCHED_FEAT(NONIRQ_POWER, 1)
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/mn10300/mm/fault.c
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Queue remote wakeups on the target CPU and process them
+ * using the scheduler IPI. Reduces rq->lock contention/bounces.
+ */
+ SCHED_FEAT(TTWU_QUEUE, 1)
++#else
++SCHED_FEAT(TTWU_QUEUE, 0)
++#endif
+
+ SCHED_FEAT(FORCE_SD_OVERLAP, 0)
++SCHED_FEAT(RT_RUNTIME_SHARE, 1)
+Index: linux-2.6/include/asm-generic/cmpxchg-local.h
===================================================================
---- linux-2.6.orig/arch/mn10300/mm/fault.c
-+++ linux-2.6/arch/mn10300/mm/fault.c
-@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+--- linux-2.6.orig/include/asm-generic/cmpxchg-local.h
++++ linux-2.6/include/asm-generic/cmpxchg-local.h
+@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_lo
+ if (size == 8 && sizeof(unsigned long) != 8)
+ wrong_size_cmpxchg(ptr);
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/parisc/mm/fault.c
+- local_irq_save(flags);
++ raw_local_irq_save(flags);
+ switch (size) {
+ case 1: prev = *(u8 *)ptr;
+ if (prev == old)
+@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_lo
+ default:
+ wrong_size_cmpxchg(ptr);
+ }
+- local_irq_restore(flags);
++ raw_local_irq_restore(flags);
+ return prev;
+ }
+
+@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_gene
+ u64 prev;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ raw_local_irq_save(flags);
+ prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = new;
+- local_irq_restore(flags);
++ raw_local_irq_restore(flags);
+ return prev;
+ }
+
+Index: linux-2.6/drivers/char/random.c
===================================================================
---- linux-2.6.orig/arch/parisc/mm/fault.c
-+++ linux-2.6/arch/parisc/mm/fault.c
-@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs,
- unsigned long acc_type;
- int fault;
+--- linux-2.6.orig/drivers/char/random.c
++++ linux-2.6/drivers/char/random.c
+@@ -433,7 +433,7 @@ static struct entropy_store input_pool =
+ .poolinfo = &poolinfo_table[0],
+ .name = "input",
+ .limit = 1,
+- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+ .pool = input_pool_data
+ };
+
+@@ -442,7 +442,7 @@ static struct entropy_store blocking_poo
+ .name = "blocking",
+ .limit = 1,
+ .pull = &input_pool,
+- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
+ .pool = blocking_pool_data
+ };
+
+@@ -450,7 +450,7 @@ static struct entropy_store nonblocking_
+ .poolinfo = &poolinfo_table[1],
+ .name = "nonblocking",
+ .pull = &input_pool,
+- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
+ .pool = nonblocking_pool_data
+ };
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+@@ -633,8 +633,11 @@ static void add_timer_randomness(struct
+ preempt_disable();
+ /* if over the trickle threshold, use only 1 in 4096 samples */
+ if (input_pool.entropy_count > trickle_thresh &&
+- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
+- goto out;
++ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) {
++ preempt_enable();
++ return;
++ }
++ preempt_enable();
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/powerpc/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/powerpc/mm/fault.c
-+++ linux-2.6/arch/powerpc/mm/fault.c
-@@ -162,7 +162,7 @@ int __kprobes do_page_fault(struct pt_re
+ sample.jiffies = jiffies;
+ sample.cycles = get_cycles();
+@@ -676,8 +679,6 @@ static void add_timer_randomness(struct
+ credit_entropy_bits(&input_pool,
+ min_t(int, fls(delta>>1), 11));
}
- #endif
+-out:
+- preempt_enable();
+ }
-- if (in_atomic() || mm == NULL) {
-+ if (!mm || pagefault_disabled()) {
- if (!user_mode(regs))
- return SIGSEGV;
- /* in_atomic() in user mode is really bad,
-Index: linux-2.6/arch/s390/mm/fault.c
+ void add_input_randomness(unsigned int type, unsigned int code,
+Index: linux-2.6/arch/arm/mach-at91/at91rm9200_time.c
===================================================================
---- linux-2.6.orig/arch/s390/mm/fault.c
-+++ linux-2.6/arch/s390/mm/fault.c
-@@ -295,7 +295,8 @@ static inline int do_exception(struct pt
- * user context.
- */
- fault = VM_FAULT_BADCONTEXT;
-- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
-+ if (unlikely(!user_space_fault(trans_exc_code) ||
-+ !mm || pagefault_disabled()))
- goto out;
-
- address = trans_exc_code & __FAIL_ADDR_MASK;
-@@ -410,7 +411,8 @@ void __kprobes do_asce_exception(struct
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
-
-- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
-+ if (unlikely(!user_space_fault(trans_exc_code) ||
-+ !mm || pagefault_disabled()))
- goto no_context;
-
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/score/mm/fault.c
+--- linux-2.6.orig/arch/arm/mach-at91/at91rm9200_time.c
++++ linux-2.6/arch/arm/mach-at91/at91rm9200_time.c
+@@ -114,6 +114,7 @@ clkevt32k_mode(enum clock_event_mode mod
+ last_crtr = read_CRTR();
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
++ setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ /* PIT for periodic irqs; fixed rate of 1/HZ */
+ irqmask = AT91_ST_PITS;
+ at91_sys_write(AT91_ST_PIMR, LATCH);
+@@ -127,6 +128,7 @@ clkevt32k_mode(enum clock_event_mode mod
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
++ remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ case CLOCK_EVT_MODE_RESUME:
+ irqmask = 0;
+ break;
+Index: linux-2.6/arch/arm/mach-at91/at91sam926x_time.c
===================================================================
---- linux-2.6.orig/arch/score/mm/fault.c
-+++ linux-2.6/arch/score/mm/fault.c
-@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto bad_area_nosemaphore;
+--- linux-2.6.orig/arch/arm/mach-at91/at91sam926x_time.c
++++ linux-2.6/arch/arm/mach-at91/at91sam926x_time.c
+@@ -54,7 +54,7 @@ static struct clocksource pit_clk = {
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/sh/mm/fault_32.c
+-
++static struct irqaction at91sam926x_pit_irq;
+ /*
+ * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
+ */
+@@ -63,6 +63,9 @@ pit_clkevt_mode(enum clock_event_mode mo
+ {
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
++ /* Set up irq handler */
++ setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
++
+ /* update clocksource counter */
+ pit_cnt += pit_cycle * PIT_PICNT(at91_sys_read(AT91_PIT_PIVR));
+ at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
+@@ -75,6 +78,7 @@ pit_clkevt_mode(enum clock_event_mode mo
+ case CLOCK_EVT_MODE_UNUSED:
+ /* disable irq, leaving the clocksource active */
+ at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
++ remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+Index: linux-2.6/drivers/clocksource/tcb_clksrc.c
===================================================================
---- linux-2.6.orig/arch/sh/mm/fault_32.c
-+++ linux-2.6/arch/sh/mm/fault_32.c
-@@ -166,7 +166,7 @@ asmlinkage void __kprobes do_page_fault(
- * If we're in an interrupt, have no user context or are running
- * in an atomic region then we must not take the fault:
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+--- linux-2.6.orig/drivers/clocksource/tcb_clksrc.c
++++ linux-2.6/drivers/clocksource/tcb_clksrc.c
+@@ -21,8 +21,7 @@
+ * resolution better than 200 nsec).
+ *
+ * - The third channel may be used to provide a 16-bit clockevent
+- * source, used in either periodic or oneshot mode. This runs
+- * at 32 KiHZ, and can handle delays of up to two seconds.
++ * source, used in either periodic or oneshot mode.
+ *
+ * A boot clocksource and clockevent source are also currently needed,
+ * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
+@@ -68,6 +67,7 @@ static struct clocksource clksrc = {
+ struct tc_clkevt_device {
+ struct clock_event_device clkevt;
+ struct clk *clk;
++ u32 freq;
+ void __iomem *regs;
+ };
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/sparc/mm/fault_32.c
-===================================================================
---- linux-2.6.orig/arch/sparc/mm/fault_32.c
-+++ linux-2.6/arch/sparc/mm/fault_32.c
-@@ -248,8 +248,8 @@ asmlinkage void do_sparc_fault(struct pt
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-- goto no_context;
-+ if (!mm || pagefault_disabled())
-+ goto no_context;
+@@ -76,13 +76,6 @@ static struct tc_clkevt_device *to_tc_cl
+ return container_of(clkevt, struct tc_clkevt_device, clkevt);
+ }
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+- * because using one of the divided clocks would usually mean the
+- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+- *
+- * A divided clock could be good for high resolution timers, since
+- * 30.5 usec resolution can seem "low".
+- */
+ static u32 timer_clock;
-Index: linux-2.6/arch/sparc/mm/fault_64.c
-===================================================================
---- linux-2.6.orig/arch/sparc/mm/fault_64.c
-+++ linux-2.6/arch/sparc/mm/fault_64.c
-@@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fau
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto intr_or_no_mm;
+ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
+@@ -105,11 +98,12 @@ static void tc_mode(enum clock_event_mod
+ case CLOCK_EVT_MODE_PERIODIC:
+ clk_enable(tcd->clk);
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
-Index: linux-2.6/arch/tile/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/tile/mm/fault.c
-+++ linux-2.6/arch/tile/mm/fault.c
-@@ -346,7 +346,7 @@ static int handle_page_fault(struct pt_r
- * If we're in an interrupt, have no user context or are running in an
- * atomic region then we must not take the fault.
- */
-- if (in_atomic() || !mm) {
-+ if (!mm || pagefault_disabled()) {
- vma = NULL; /* happy compiler */
- goto bad_area_nosemaphore;
- }
-Index: linux-2.6/arch/um/kernel/trap.c
-===================================================================
---- linux-2.6.orig/arch/um/kernel/trap.c
-+++ linux-2.6/arch/um/kernel/trap.c
-@@ -37,7 +37,7 @@ int handle_page_fault(unsigned long addr
- * If the fault was during atomic operation, don't take the fault, just
- * fail.
- */
-- if (in_atomic())
-+ if (!mm || pagefault_disabled())
- goto out_nosemaphore;
+- /* slow clock, count up to RC, then irq and restart */
++ /* count up to RC, then irq and restart */
+ __raw_writel(timer_clock
+ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
++ __raw_writel((tcd->freq + HZ/2)/HZ,
++ tcaddr + ATMEL_TC_REG(2, RC));
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/x86/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/x86/mm/fault.c
-+++ linux-2.6/arch/x86/mm/fault.c
-@@ -1065,7 +1065,7 @@ do_page_fault(struct pt_regs *regs, unsi
- * If we're in an interrupt, have no user context or are running
- * in an atomic region then we must not take the fault:
- */
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(!mm || pagefault_disabled())) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
-Index: linux-2.6/arch/xtensa/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/xtensa/mm/fault.c
-+++ linux-2.6/arch/xtensa/mm/fault.c
-@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
- /* If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm) {
-+ if (!mm || pagefault_disabled()) {
- bad_page_fault(regs, address, SIGSEGV);
- return;
- }
-Index: linux-2.6/mm/filemap.c
-===================================================================
---- linux-2.6.orig/mm/filemap.c
-+++ linux-2.6/mm/filemap.c
-@@ -2040,7 +2040,7 @@ size_t iov_iter_copy_from_user_atomic(st
- char *kaddr;
- size_t copied;
+ /* Enable clock and interrupts on RC compare */
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+@@ -122,7 +116,7 @@ static void tc_mode(enum clock_event_mod
+ case CLOCK_EVT_MODE_ONESHOT:
+ clk_enable(tcd->clk);
-- BUG_ON(!in_atomic());
-+ BUG_ON(!pagefault_disabled());
- kaddr = kmap_atomic(page, KM_USER0);
- if (likely(i->nr_segs == 1)) {
- int left;
-Index: linux-2.6/arch/x86/mm/highmem_32.c
-===================================================================
---- linux-2.6.orig/arch/x86/mm/highmem_32.c
-+++ linux-2.6/arch/x86/mm/highmem_32.c
-@@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-- BUG_ON(!pte_none(*(kmap_pte-idx)));
-+ WARN_ON(!pte_none(*(kmap_pte-idx)));
- set_pte(kmap_pte-idx, mk_pte(page, prot));
+- /* slow clock, count up to RC, then irq and stop */
++ /* count up to RC, then irq and stop */
+ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
+ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+@@ -152,8 +146,12 @@ static struct tc_clkevt_device clkevt =
+ .features = CLOCK_EVT_FEAT_PERIODIC
+ | CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ /* Should be lower than at91rm9200's system timer */
+ .rating = 125,
++#else
++ .rating = 200,
++#endif
+ .set_next_event = tc_next_event,
+ .set_mode = tc_mode,
+ },
+@@ -179,8 +177,9 @@ static struct irqaction tc_irqaction = {
+ .handler = ch2_irq,
+ };
- return (void *)vaddr;
-Index: linux-2.6/include/linux/kernel.h
-===================================================================
---- linux-2.6.orig/include/linux/kernel.h
-+++ linux-2.6/include/linux/kernel.h
-@@ -346,7 +346,7 @@ extern enum system_states {
- SYSTEM_HALT,
- SYSTEM_POWER_OFF,
- SYSTEM_RESTART,
-- SYSTEM_SUSPEND_DISK,
-+ SYSTEM_SUSPEND,
- } system_state;
+-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
++static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
+ {
++ unsigned divisor = atmel_tc_divisors[divisor_idx];
+ struct clk *t2_clk = tc->clk[2];
+ int irq = tc->irq[2];
- #define TAINT_PROPRIETARY_MODULE 0
-Index: linux-2.6/kernel/power/hibernate.c
-===================================================================
---- linux-2.6.orig/kernel/power/hibernate.c
-+++ linux-2.6/kernel/power/hibernate.c
-@@ -278,6 +278,8 @@ static int create_image(int platform_mod
+@@ -188,11 +187,17 @@ static void __init setup_clkevents(struc
+ clkevt.clk = t2_clk;
+ tc_irqaction.dev_id = &clkevt;
- local_irq_disable();
+- timer_clock = clk32k_divisor_idx;
++ timer_clock = divisor_idx;
-+ system_state = SYSTEM_SUSPEND;
+- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
+- clkevt.clkevt.max_delta_ns
+- = clockevent_delta2ns(0xffff, &clkevt.clkevt);
++ if (!divisor)
++ clkevt.freq = 32768;
++ else
++ clkevt.freq = clk_get_rate(t2_clk)/divisor;
+
- error = syscore_suspend();
- if (error) {
- printk(KERN_ERR "PM: Some system devices failed to power down, "
-@@ -305,6 +307,7 @@ static int create_image(int platform_mod
- syscore_resume();
++ clkevt.clkevt.mult = div_sc(clkevt.freq, NSEC_PER_SEC,
++ clkevt.clkevt.shift);
++ clkevt.clkevt.max_delta_ns =
++ clockevent_delta2ns(0xffff, &clkevt.clkevt);
+ clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
+ clkevt.clkevt.cpumask = cpumask_of(0);
- Enable_irqs:
-+ system_state = SYSTEM_RUNNING;
- local_irq_enable();
+@@ -295,8 +300,11 @@ static int __init tcb_clksrc_init(void)
+ clocksource_register(&clksrc);
- Enable_cpus:
-@@ -412,6 +415,7 @@ static int resume_target_kernel(bool pla
- goto Enable_cpus;
+ /* channel 2: periodic and oneshot timer support */
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ setup_clkevents(tc, clk32k_divisor_idx);
+-
++#else
++ setup_clkevents(tc, best_divisor_idx);
++#endif
+ return 0;
+ }
+ arch_initcall(tcb_clksrc_init);
+Index: linux-2.6/drivers/misc/Kconfig
+===================================================================
+--- linux-2.6.orig/drivers/misc/Kconfig
++++ linux-2.6/drivers/misc/Kconfig
+@@ -82,6 +82,7 @@ config AB8500_PWM
+ config ATMEL_TCLIB
+ bool "Atmel AT32/AT91 Timer/Counter Library"
+ depends on (AVR32 || ARCH_AT91)
++ default y if PREEMPT_RT_FULL
+ help
+ Select this if you want a library to allocate the Timer/Counter
+ blocks found on many Atmel processors. This facilitates using
+@@ -97,8 +98,7 @@ config ATMEL_TCB_CLKSRC
+ are combined to make a single 32-bit timer.
- local_irq_disable();
-+ system_state = SYSTEM_SUSPEND;
+ When GENERIC_CLOCKEVENTS is defined, the third timer channel
+- may be used as a clock event device supporting oneshot mode
+- (delays of up to two seconds) based on the 32 KiHz clock.
++ may be used as a clock event device supporting oneshot mode.
- error = syscore_suspend();
- if (error)
-@@ -445,6 +449,7 @@ static int resume_target_kernel(bool pla
- syscore_resume();
+ config ATMEL_TCB_CLKSRC_BLOCK
+ int
+@@ -112,6 +112,14 @@ config ATMEL_TCB_CLKSRC_BLOCK
+ TC can be used for other purposes, such as PWM generation and
+ interval timing.
- Enable_irqs:
-+ system_state = SYSTEM_RUNNING;
- local_irq_enable();
++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
++ bool "TC Block use 32 KiHz clock"
++ depends on ATMEL_TCB_CLKSRC
++ default y if !PREEMPT_RT_FULL
++ help
++ Select this to use 32 KiHz base clock rate as TC block clock
++ source for clock events.
++
+ config IBM_ASM
+ tristate "Device driver for IBM RSA service processor"
+ depends on X86 && PCI && INPUT && EXPERIMENTAL
+@@ -133,6 +141,35 @@ config IBM_ASM
+ for information on the specific driver level and support statement
+ for your IBM server.
- Enable_cpus:
-@@ -524,6 +529,7 @@ int hibernation_platform_enter(void)
- goto Platform_finish;
++config HWLAT_DETECTOR
++ tristate "Testing module to detect hardware-induced latencies"
++ depends on DEBUG_FS
++ depends on RING_BUFFER
++ default m
++ ---help---
++ A simple hardware latency detector. Use this module to detect
++ large latencies introduced by the behavior of the underlying
++ system firmware external to Linux. We do this using periodic
++ use of stop_machine to grab all available CPUs and measure
++ for unexplainable gaps in the CPU timestamp counter(s). By
++ default, the module is not enabled until the "enable" file
++ within the "hwlat_detector" debugfs directory is toggled.
++
++ This module is often used to detect SMI (System Management
++ Interrupts) on x86 systems, though is not x86 specific. To
++ this end, we default to using a sample window of 1 second,
++ during which we will sample for 0.5 seconds. If an SMI or
++ similar event occurs during that time, it is recorded
++ into an 8K samples global ring buffer until retreived.
++
++ WARNING: This software should never be enabled (it can be built
++ but should not be turned on after it is loaded) in a production
++ environment where high latencies are a concern since the
++ sampling mechanism actually introduces latencies for
++ regular tasks while the CPU(s) are being held.
++
++ If unsure, say N
++
+ config PHANTOM
+ tristate "Sensable PHANToM (PCI)"
+ depends on PCI
+Index: linux-2.6/drivers/net/tulip/tulip_core.c
+===================================================================
+--- linux-2.6.orig/drivers/net/tulip/tulip_core.c
++++ linux-2.6/drivers/net/tulip/tulip_core.c
+@@ -1951,6 +1951,7 @@ static void __devexit tulip_remove_one (
+ pci_iounmap(pdev, tp->base_addr);
+ free_netdev (dev);
+ pci_release_regions (pdev);
++ pci_disable_device (pdev);
+ pci_set_drvdata (pdev, NULL);
- local_irq_disable();
-+ system_state = SYSTEM_SUSPEND;
- syscore_suspend();
- if (pm_wakeup_pending()) {
- error = -EAGAIN;
-@@ -536,6 +542,7 @@ int hibernation_platform_enter(void)
+ /* pci_power_off (pdev, -1); */
+Index: linux-2.6/drivers/net/8139too.c
+===================================================================
+--- linux-2.6.orig/drivers/net/8139too.c
++++ linux-2.6/drivers/net/8139too.c
+@@ -2173,7 +2173,11 @@ static irqreturn_t rtl8139_interrupt (in
+ */
+ static void rtl8139_poll_controller(struct net_device *dev)
+ {
+- disable_irq(dev->irq);
++ /*
++ * use _nosync() variant - might be used by netconsole
++ * from atomic contexts:
++ */
++ disable_irq_nosync(dev->irq);
+ rtl8139_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+ }
+Index: linux-2.6/drivers/net/ehea/ehea_main.c
+===================================================================
+--- linux-2.6.orig/drivers/net/ehea/ehea_main.c
++++ linux-2.6/drivers/net/ehea/ehea_main.c
+@@ -1369,7 +1369,7 @@ static int ehea_reg_interrupts(struct ne
+ "%s-queue%d", dev->name, i);
+ ret = ibmebus_request_irq(pr->eq->attr.ist1,
+ ehea_recv_irq_handler,
+- IRQF_DISABLED, pr->int_send_name,
++ IRQF_NO_THREAD, pr->int_send_name,
+ pr);
+ if (ret) {
+ netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
+Index: linux-2.6/drivers/net/arm/at91_ether.c
+===================================================================
+--- linux-2.6.orig/drivers/net/arm/at91_ether.c
++++ linux-2.6/drivers/net/arm/at91_ether.c
+@@ -199,7 +199,9 @@ static irqreturn_t at91ether_phy_interru
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned int phy;
++ unsigned long flags;
+
++ spin_lock_irqsave(&lp->lock, flags);
+ /*
+ * This hander is triggered on both edges, but the PHY chips expect
+ * level-triggering. We therefore have to check if the PHY actually has
+@@ -241,6 +243,7 @@ static irqreturn_t at91ether_phy_interru
+
+ done:
+ disable_mdi();
++ spin_unlock_irqrestore(&lp->lock, flags);
- Power_up:
- syscore_resume();
-+ system_state = SYSTEM_RUNNING;
- local_irq_enable();
- enable_nonboot_cpus();
+ return IRQ_HANDLED;
+ }
+@@ -397,9 +400,11 @@ static void at91ether_check_link(unsigne
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct at91_private *lp = netdev_priv(dev);
-Index: linux-2.6/kernel/power/suspend.c
++ spin_lock_irq(&lp->lock);
+ enable_mdi();
+ update_linkspeed(dev, 1);
+ disable_mdi();
++ spin_unlock_irq(&lp->lock);
+
+ mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
+ }
+Index: linux-2.6/include/linux/preempt.h
===================================================================
---- linux-2.6.orig/kernel/power/suspend.c
-+++ linux-2.6/kernel/power/suspend.c
-@@ -163,6 +163,8 @@ static int suspend_enter(suspend_state_t
- arch_suspend_disable_irqs();
- BUG_ON(!irqs_disabled());
+--- linux-2.6.orig/include/linux/preempt.h
++++ linux-2.6/include/linux/preempt.h
+@@ -33,12 +33,18 @@ do { \
+ barrier(); \
+ } while (0)
-+ system_state = SYSTEM_SUSPEND;
-+
- error = syscore_suspend();
- if (!error) {
- if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
-@@ -172,6 +174,8 @@ static int suspend_enter(suspend_state_t
- syscore_resume();
- }
+-#define preempt_enable_no_resched() \
++#define __preempt_enable_no_resched() \
+ do { \
+ barrier(); \
+ dec_preempt_count(); \
+ } while (0)
-+ system_state = SYSTEM_RUNNING;
++#ifndef CONFIG_PREEMPT_RT_BASE
++# define preempt_enable_no_resched() __preempt_enable_no_resched()
++#else
++# define preempt_enable_no_resched() preempt_enable()
++#endif
+
- arch_suspend_enable_irqs();
- BUG_ON(irqs_disabled());
-
-Index: linux-2.6/drivers/of/base.c
-===================================================================
---- linux-2.6.orig/drivers/of/base.c
-+++ linux-2.6/drivers/of/base.c
-@@ -29,7 +29,7 @@ struct device_node *of_chosen;
- /* use when traversing tree through the allnext, child, sibling,
- * or parent members of struct device_node.
- */
--DEFINE_RWLOCK(devtree_lock);
-+DEFINE_RAW_SPINLOCK(devtree_lock);
+ #define preempt_check_resched() \
+ do { \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+@@ -47,7 +53,7 @@ do { \
- int of_n_addr_cells(struct device_node *np)
- {
-@@ -138,16 +138,14 @@ void of_node_put(struct device_node *nod
- EXPORT_SYMBOL(of_node_put);
- #endif /* !CONFIG_SPARC */
+ #define preempt_enable() \
+ do { \
+- preempt_enable_no_resched(); \
++ __preempt_enable_no_resched(); \
+ barrier(); \
+ preempt_check_resched(); \
+ } while (0)
+@@ -83,6 +89,7 @@ do { \
+ #else
--struct property *of_find_property(const struct device_node *np,
-- const char *name,
-- int *lenp)
-+static struct property *__of_find_property(const struct device_node *np,
-+ const char *name, int *lenp)
- {
- struct property *pp;
+ #define preempt_disable() do { } while (0)
++#define __preempt_enable_no_resched() do { } while (0)
+ #define preempt_enable_no_resched() do { } while (0)
+ #define preempt_enable() do { } while (0)
+ #define preempt_check_resched() do { } while (0)
+@@ -93,6 +100,27 @@ do { \
- if (!np)
- return NULL;
+ #endif
-- read_lock(&devtree_lock);
- for (pp = np->properties; pp != 0; pp = pp->next) {
- if (of_prop_cmp(pp->name, name) == 0) {
- if (lenp != 0)
-@@ -155,7 +153,20 @@ struct property *of_find_property(const
- break;
- }
- }
-- read_unlock(&devtree_lock);
-+
-+ return pp;
-+}
-+
-+struct property *of_find_property(const struct device_node *np,
-+ const char *name,
-+ int *lenp)
-+{
-+ struct property *pp;
-+ unsigned long flags;
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define preempt_disable_rt() preempt_disable()
++# define preempt_enable_rt() preempt_enable()
++# define preempt_disable_nort() do { } while (0)
++# define preempt_enable_nort() do { } while (0)
++# ifdef CONFIG_SMP
++ extern void migrate_disable(void);
++ extern void migrate_enable(void);
++# else /* CONFIG_SMP */
++# define migrate_disable() do { } while (0)
++# define migrate_enable() do { } while (0)
++# endif /* CONFIG_SMP */
++#else
++# define preempt_disable_rt() do { } while (0)
++# define preempt_enable_rt() do { } while (0)
++# define preempt_disable_nort() preempt_disable()
++# define preempt_enable_nort() preempt_enable()
++# define migrate_disable() preempt_disable()
++# define migrate_enable() preempt_enable()
++#endif
+
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
-+ pp = __of_find_property(np, name, lenp);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
- return pp;
- }
-@@ -173,13 +184,13 @@ struct device_node *of_find_all_nodes(st
+ struct preempt_notifier;
+Index: linux-2.6/include/linux/uaccess.h
+===================================================================
+--- linux-2.6.orig/include/linux/uaccess.h
++++ linux-2.6/include/linux/uaccess.h
+@@ -6,38 +6,37 @@
+
+ /*
+ * These routines enable/disable the pagefault handler in that
+- * it will not take any locks and go straight to the fixup table.
+- *
+- * They have great resemblance to the preempt_disable/enable calls
+- * and in fact they are identical; this is because currently there is
+- * no other way to make the pagefault handlers do this. So we do
+- * disable preemption but we don't necessarily care about that.
++ * it will not take any MM locks and go straight to the fixup table.
+ */
+-static inline void pagefault_disable(void)
++static inline void raw_pagefault_disable(void)
{
- struct device_node *np;
+ inc_preempt_count();
+- /*
+- * make sure to have issued the store before a pagefault
+- * can hit.
+- */
+ barrier();
+ }
-- read_lock(&devtree_lock);
-+ raw_spin_lock(&devtree_lock);
- np = prev ? prev->allnext : allnodes;
- for (; np != NULL; np = np->allnext)
- if (of_node_get(np))
- break;
- of_node_put(prev);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock(&devtree_lock);
- return np;
+-static inline void pagefault_enable(void)
++static inline void raw_pagefault_enable(void)
+ {
+- /*
+- * make sure to issue those last loads/stores before enabling
+- * the pagefault handler again.
+- */
+ barrier();
+ dec_preempt_count();
+- /*
+- * make sure we do..
+- */
+ barrier();
+ preempt_check_resched();
}
- EXPORT_SYMBOL(of_find_all_nodes);
-@@ -188,8 +199,20 @@ EXPORT_SYMBOL(of_find_all_nodes);
- * Find a property with a given name for a given node
- * and return the value.
- */
-+static const void *__of_get_property(const struct device_node *np,
-+ const char *name, int *lenp)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline void pagefault_disable(void)
+{
-+ struct property *pp = __of_find_property(np, name, lenp);
++ raw_pagefault_disable();
++}
+
-+ return pp ? pp->value : NULL;
++static inline void pagefault_enable(void)
++{
++ raw_pagefault_enable();
+}
++#else
++extern void pagefault_disable(void);
++extern void pagefault_enable(void);
++#endif
+
-+/*
-+ * Find a property with a given name for a given node
-+ * and return the value.
-+ */
- const void *of_get_property(const struct device_node *np, const char *name,
-- int *lenp)
-+ int *lenp)
- {
- struct property *pp = of_find_property(np, name, lenp);
+ #ifndef ARCH_HAS_NOCACHE_UACCESS
-@@ -200,13 +223,13 @@ EXPORT_SYMBOL(of_get_property);
- /** Checks if the given "compat" string matches one of the strings in
- * the device's "compatible" property
- */
--int of_device_is_compatible(const struct device_node *device,
-- const char *compat)
-+static int __of_device_is_compatible(const struct device_node *device,
-+ const char *compat)
- {
- const char* cp;
-- int cplen, l;
-+ int uninitialized_var(cplen), l;
+ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+@@ -77,9 +76,9 @@ static inline unsigned long __copy_from_
+ mm_segment_t old_fs = get_fs(); \
+ \
+ set_fs(KERNEL_DS); \
+- pagefault_disable(); \
++ raw_pagefault_disable(); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
+- pagefault_enable(); \
++ raw_pagefault_enable(); \
+ set_fs(old_fs); \
+ ret; \
+ })
+Index: linux-2.6/arch/alpha/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/alpha/mm/fault.c
++++ linux-2.6/arch/alpha/mm/fault.c
+@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns
+
+ /* If we're in an interrupt context, or have no user context,
+ we must not take the fault. */
+- if (!mm || in_atomic())
++ if (!mm || pagefault_disabled())
+ goto no_context;
-- cp = of_get_property(device, "compatible", &cplen);
-+ cp = __of_get_property(device, "compatible", &cplen);
- if (cp == NULL)
- return 0;
- while (cplen > 0) {
-@@ -219,6 +242,21 @@ int of_device_is_compatible(const struct
+ #ifdef CONFIG_ALPHA_LARGE_VMALLOC
+Index: linux-2.6/arch/arm/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mm/fault.c
++++ linux-2.6/arch/arm/mm/fault.c
+@@ -289,7 +289,7 @@ do_page_fault(unsigned long addr, unsign
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
- return 0;
- }
-+
-+/** Checks if the given "compat" string matches one of the strings in
-+ * the device's "compatible" property
-+ */
-+int of_device_is_compatible(const struct device_node *device,
-+ const char *compat)
-+{
-+ unsigned long flags;
-+ int res;
-+
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
-+ res = __of_device_is_compatible(device, compat);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-+ return res;
-+}
- EXPORT_SYMBOL(of_device_is_compatible);
+ /*
+Index: linux-2.6/arch/avr32/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/avr32/mm/fault.c
++++ linux-2.6/arch/avr32/mm/fault.c
+@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned l
+ * If we're in an interrupt or have no user context, we must
+ * not take the fault...
+ */
+- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
++ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
+ goto no_context;
- /**
-@@ -278,13 +316,14 @@ EXPORT_SYMBOL(of_device_is_available);
- struct device_node *of_get_parent(const struct device_node *node)
- {
- struct device_node *np;
-+ unsigned long flags;
+ local_irq_enable();
+Index: linux-2.6/arch/cris/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/cris/mm/fault.c
++++ linux-2.6/arch/cris/mm/fault.c
+@@ -111,7 +111,7 @@ do_page_fault(unsigned long address, str
+ * user context, we must not take the fault.
+ */
- if (!node)
- return NULL;
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = of_node_get(node->parent);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_get_parent);
-@@ -303,14 +342,15 @@ EXPORT_SYMBOL(of_get_parent);
- struct device_node *of_get_next_parent(struct device_node *node)
- {
- struct device_node *parent;
-+ unsigned long flags;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/frv/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/frv/mm/fault.c
++++ linux-2.6/arch/frv/mm/fault.c
+@@ -79,7 +79,7 @@ asmlinkage void do_page_fault(int datamm
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
- if (!node)
- return NULL;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/ia64/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/ia64/mm/fault.c
++++ linux-2.6/arch/ia64/mm/fault.c
+@@ -89,7 +89,7 @@ ia64_do_page_fault (unsigned long addres
+ /*
+ * If we're in an interrupt or have no user context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- parent = of_node_get(node->parent);
- of_node_put(node);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return parent;
- }
+ #ifdef CONFIG_VIRTUAL_MEM_MAP
+Index: linux-2.6/arch/m32r/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/m32r/mm/fault.c
++++ linux-2.6/arch/m32r/mm/fault.c
+@@ -115,7 +115,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user context or are running in an
+ * atomic region then we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
-@@ -326,14 +366,15 @@ struct device_node *of_get_next_child(co
- struct device_node *prev)
- {
- struct device_node *next;
-+ unsigned long flags;
+ /* When running in the kernel we expect faults to occur only to
+Index: linux-2.6/arch/m68k/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/m68k/mm/fault.c
++++ linux-2.6/arch/m68k/mm/fault.c
+@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs,
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- next = prev ? prev->sibling : node->child;
- for (; next; next = next->sibling)
- if (of_node_get(next))
- break;
- of_node_put(prev);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return next;
- }
- EXPORT_SYMBOL(of_get_next_child);
-@@ -348,14 +389,15 @@ EXPORT_SYMBOL(of_get_next_child);
- struct device_node *of_find_node_by_path(const char *path)
- {
- struct device_node *np = allnodes;
-+ unsigned long flags;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/microblaze/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/microblaze/mm/fault.c
++++ linux-2.6/arch/microblaze/mm/fault.c
+@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs,
+ if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
+ is_write = 0;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- for (; np; np = np->allnext) {
- if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
- && of_node_get(np))
- break;
- }
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_by_path);
-@@ -375,15 +417,16 @@ struct device_node *of_find_node_by_name
- const char *name)
- {
- struct device_node *np;
-+ unsigned long flags;
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ if (kernel_mode(regs))
+ goto bad_area_nosemaphore;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext)
- if (np->name && (of_node_cmp(np->name, name) == 0)
- && of_node_get(np))
- break;
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_by_name);
-@@ -404,15 +447,16 @@ struct device_node *of_find_node_by_type
- const char *type)
- {
- struct device_node *np;
-+ unsigned long flags;
+Index: linux-2.6/arch/mips/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/mips/mm/fault.c
++++ linux-2.6/arch/mips/mm/fault.c
+@@ -88,7 +88,7 @@ asmlinkage void __kprobes do_page_fault(
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext)
- if (np->type && (of_node_cmp(np->type, type) == 0)
- && of_node_get(np))
- break;
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_by_type);
-@@ -435,18 +479,20 @@ struct device_node *of_find_compatible_n
- const char *type, const char *compatible)
- {
- struct device_node *np;
-+ unsigned long flags;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/mn10300/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/mn10300/mm/fault.c
++++ linux-2.6/arch/mn10300/mm/fault.c
+@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/parisc/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/parisc/mm/fault.c
++++ linux-2.6/arch/parisc/mm/fault.c
+@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs,
+ unsigned long acc_type;
+ int fault;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext) {
- if (type
- && !(np->type && (of_node_cmp(np->type, type) == 0)))
- continue;
-- if (of_device_is_compatible(np, compatible) && of_node_get(np))
-+ if (__of_device_is_compatible(np, compatible) &&
-+ of_node_get(np))
- break;
- }
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_compatible_node);
-@@ -468,8 +514,9 @@ struct device_node *of_find_node_with_pr
- {
- struct device_node *np;
- struct property *pp;
-+ unsigned long flags;
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext) {
- for (pp = np->properties; pp != 0; pp = pp->next) {
-@@ -481,20 +528,14 @@ struct device_node *of_find_node_with_pr
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/powerpc/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/mm/fault.c
++++ linux-2.6/arch/powerpc/mm/fault.c
+@@ -162,7 +162,7 @@ int __kprobes do_page_fault(struct pt_re
}
- out:
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_with_property);
+ #endif
--/**
-- * of_match_node - Tell if an device_node has a matching of_match structure
-- * @matches: array of of device match structures to search in
-- * @node: the of device structure to match against
-- *
-- * Low level utility function used by device matching.
-- */
--const struct of_device_id *of_match_node(const struct of_device_id *matches,
-- const struct device_node *node)
-+static
-+const struct of_device_id *__of_match_node(const struct of_device_id *matches,
-+ const struct device_node *node)
- {
- if (!matches)
- return NULL;
-@@ -508,14 +549,33 @@ const struct of_device_id *of_match_node
- match &= node->type
- && !strcmp(matches->type, node->type);
- if (matches->compatible[0])
-- match &= of_device_is_compatible(node,
-- matches->compatible);
-+ match &= __of_device_is_compatible(node,
-+ matches->compatible);
- if (match)
- return matches;
- matches++;
- }
- return NULL;
- }
-+
-+/**
-+ * of_match_node - Tell if an device_node has a matching of_match structure
-+ * @matches: array of of device match structures to search in
-+ * @node: the of device structure to match against
-+ *
-+ * Low level utility function used by device matching.
-+ */
-+const struct of_device_id *of_match_node(const struct of_device_id *matches,
-+ const struct device_node *node)
-+{
-+ const struct of_device_id *match;
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
-+ match = __of_match_node(matches, node);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-+ return match;
-+}
- EXPORT_SYMBOL(of_match_node);
+- if (in_atomic() || mm == NULL) {
++ if (!mm || pagefault_disabled()) {
+ if (!user_mode(regs))
+ return SIGSEGV;
+ /* in_atomic() in user mode is really bad,
+Index: linux-2.6/arch/s390/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/s390/mm/fault.c
++++ linux-2.6/arch/s390/mm/fault.c
+@@ -295,7 +295,8 @@ static inline int do_exception(struct pt
+ * user context.
+ */
+ fault = VM_FAULT_BADCONTEXT;
+- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
++ if (unlikely(!user_space_fault(trans_exc_code) ||
++ !mm || pagefault_disabled()))
+ goto out;
- /**
-@@ -534,15 +594,16 @@ struct device_node *of_find_matching_nod
- const struct of_device_id *matches)
- {
- struct device_node *np;
-+ unsigned long flags;
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+@@ -410,7 +411,8 @@ void __kprobes do_asce_exception(struct
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext) {
-- if (of_match_node(matches, np) && of_node_get(np))
-+ if (__of_match_node(matches, np) && of_node_get(np))
- break;
- }
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_matching_node);
-@@ -585,12 +646,12 @@ struct device_node *of_find_node_by_phan
- {
- struct device_node *np;
+- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
++ if (unlikely(!user_space_fault(trans_exc_code) ||
++ !mm || pagefault_disabled()))
+ goto no_context;
-- read_lock(&devtree_lock);
-+ raw_spin_lock(&devtree_lock);
- for (np = allnodes; np; np = np->allnext)
- if (np->phandle == handle)
- break;
- of_node_get(np);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock(&devtree_lock);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_by_phandle);
-@@ -745,18 +806,18 @@ int prom_add_property(struct device_node
- unsigned long flags;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/score/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/score/mm/fault.c
++++ linux-2.6/arch/score/mm/fault.c
+@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
- prop->next = NULL;
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (strcmp(prop->name, (*next)->name) == 0) {
- /* duplicate ! don't insert it */
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return -1;
- }
- next = &(*next)->next;
- }
- *next = prop;
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/sh/mm/fault_32.c
+===================================================================
+--- linux-2.6.orig/arch/sh/mm/fault_32.c
++++ linux-2.6/arch/sh/mm/fault_32.c
+@@ -166,7 +166,7 @@ asmlinkage void __kprobes do_page_fault(
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
- #ifdef CONFIG_PROC_DEVICETREE
- /* try to add to proc as well if it was initialized */
-@@ -781,7 +842,7 @@ int prom_remove_property(struct device_n
- unsigned long flags;
- int found = 0;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/sparc/mm/fault_32.c
+===================================================================
+--- linux-2.6.orig/arch/sparc/mm/fault_32.c
++++ linux-2.6/arch/sparc/mm/fault_32.c
+@@ -248,8 +248,8 @@ asmlinkage void do_sparc_fault(struct pt
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
+- goto no_context;
++ if (!mm || pagefault_disabled())
++ goto no_context;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+
+Index: linux-2.6/arch/sparc/mm/fault_64.c
+===================================================================
+--- linux-2.6.orig/arch/sparc/mm/fault_64.c
++++ linux-2.6/arch/sparc/mm/fault_64.c
+@@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fau
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto intr_or_no_mm;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+Index: linux-2.6/arch/tile/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/tile/mm/fault.c
++++ linux-2.6/arch/tile/mm/fault.c
+@@ -346,7 +346,7 @@ static int handle_page_fault(struct pt_r
+ * If we're in an interrupt, have no user context or are running in an
+ * atomic region then we must not take the fault.
+ */
+- if (in_atomic() || !mm) {
++ if (!mm || pagefault_disabled()) {
+ vma = NULL; /* happy compiler */
+ goto bad_area_nosemaphore;
+ }
+Index: linux-2.6/arch/um/kernel/trap.c
+===================================================================
+--- linux-2.6.orig/arch/um/kernel/trap.c
++++ linux-2.6/arch/um/kernel/trap.c
+@@ -37,7 +37,7 @@ int handle_page_fault(unsigned long addr
+ * If the fault was during atomic operation, don't take the fault, just
+ * fail.
+ */
+- if (in_atomic())
++ if (!mm || pagefault_disabled())
+ goto out_nosemaphore;
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (*next == prop) {
-@@ -794,7 +855,7 @@ int prom_remove_property(struct device_n
- }
- next = &(*next)->next;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/x86/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/x86/mm/fault.c
++++ linux-2.6/arch/x86/mm/fault.c
+@@ -1065,7 +1065,7 @@ do_page_fault(struct pt_regs *regs, unsi
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
}
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+Index: linux-2.6/arch/xtensa/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/xtensa/mm/fault.c
++++ linux-2.6/arch/xtensa/mm/fault.c
+@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
+ /* If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm) {
++ if (!mm || pagefault_disabled()) {
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+ }
+Index: linux-2.6/mm/filemap.c
+===================================================================
+--- linux-2.6.orig/mm/filemap.c
++++ linux-2.6/mm/filemap.c
+@@ -2040,7 +2040,7 @@ size_t iov_iter_copy_from_user_atomic(st
+ char *kaddr;
+ size_t copied;
- if (!found)
- return -ENODEV;
-@@ -824,7 +885,7 @@ int prom_update_property(struct device_n
- unsigned long flags;
- int found = 0;
+- BUG_ON(!in_atomic());
++ BUG_ON(!pagefault_disabled());
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (likely(i->nr_segs == 1)) {
+ int left;
+Index: linux-2.6/arch/x86/mm/highmem_32.c
+===================================================================
+--- linux-2.6.orig/arch/x86/mm/highmem_32.c
++++ linux-2.6/arch/x86/mm/highmem_32.c
+@@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- BUG_ON(!pte_none(*(kmap_pte-idx)));
++ WARN_ON(!pte_none(*(kmap_pte-idx)));
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (*next == oldprop) {
-@@ -838,7 +899,7 @@ int prom_update_property(struct device_n
- }
- next = &(*next)->next;
- }
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return (void *)vaddr;
+Index: linux-2.6/include/linux/kernel.h
+===================================================================
+--- linux-2.6.orig/include/linux/kernel.h
++++ linux-2.6/include/linux/kernel.h
+@@ -346,7 +346,7 @@ extern enum system_states {
+ SYSTEM_HALT,
+ SYSTEM_POWER_OFF,
+ SYSTEM_RESTART,
+- SYSTEM_SUSPEND_DISK,
++ SYSTEM_SUSPEND,
+ } system_state;
- if (!found)
- return -ENODEV;
-@@ -868,12 +929,12 @@ void of_attach_node(struct device_node *
- {
- unsigned long flags;
+ #define TAINT_PROPRIETARY_MODULE 0
+Index: linux-2.6/kernel/power/hibernate.c
+===================================================================
+--- linux-2.6.orig/kernel/power/hibernate.c
++++ linux-2.6/kernel/power/hibernate.c
+@@ -278,6 +278,8 @@ static int create_image(int platform_mod
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np->sibling = np->parent->child;
- np->allnext = allnodes;
- np->parent->child = np;
- allnodes = np;
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- }
+ local_irq_disable();
- /**
-@@ -887,7 +948,7 @@ void of_detach_node(struct device_node *
- struct device_node *parent;
- unsigned long flags;
++ system_state = SYSTEM_SUSPEND;
++
+ error = syscore_suspend();
+ if (error) {
+ printk(KERN_ERR "PM: Some system devices failed to power down, "
+@@ -305,6 +307,7 @@ static int create_image(int platform_mod
+ syscore_resume();
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
- parent = np->parent;
- if (!parent)
-@@ -918,7 +979,7 @@ void of_detach_node(struct device_node *
- of_node_set_flag(np, OF_DETACHED);
+ Enable_cpus:
+@@ -412,6 +415,7 @@ static int resume_target_kernel(bool pla
+ goto Enable_cpus;
- out_unlock:
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- }
- #endif /* defined(CONFIG_OF_DYNAMIC) */
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
-Index: linux-2.6/arch/sparc/kernel/prom_common.c
-===================================================================
---- linux-2.6.orig/arch/sparc/kernel/prom_common.c
-+++ linux-2.6/arch/sparc/kernel/prom_common.c
-@@ -67,7 +67,7 @@ int of_set_property(struct device_node *
- err = -ENODEV;
+ error = syscore_suspend();
+ if (error)
+@@ -445,6 +449,7 @@ static int resume_target_kernel(bool pla
+ syscore_resume();
- mutex_lock(&of_set_property_mutex);
-- write_lock(&devtree_lock);
-+ raw_spin_lock(&devtree_lock);
- prevp = &dp->properties;
- while (*prevp) {
- struct property *prop = *prevp;
-@@ -94,7 +94,7 @@ int of_set_property(struct device_node *
- }
- prevp = &(*prevp)->next;
- }
-- write_unlock(&devtree_lock);
-+ raw_spin_unlock(&devtree_lock);
- mutex_unlock(&of_set_property_mutex);
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
- /* XXX Upate procfs if necessary... */
-Index: linux-2.6/include/linux/of.h
-===================================================================
---- linux-2.6.orig/include/linux/of.h
-+++ linux-2.6/include/linux/of.h
-@@ -68,7 +68,7 @@ struct device_node {
- /* Pointer for first entry in chain of all nodes. */
- extern struct device_node *allnodes;
- extern struct device_node *of_chosen;
--extern rwlock_t devtree_lock;
-+extern raw_spinlock_t devtree_lock;
+ Enable_cpus:
+@@ -524,6 +529,7 @@ int hibernation_platform_enter(void)
+ goto Platform_finish;
- static inline bool of_have_populated_dt(void)
- {
-Index: linux-2.6/include/linux/list.h
-===================================================================
---- linux-2.6.orig/include/linux/list.h
-+++ linux-2.6/include/linux/list.h
-@@ -362,6 +362,17 @@ static inline void list_splice_tail_init
- list_entry((ptr)->next, type, member)
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
+ syscore_suspend();
+ if (pm_wakeup_pending()) {
+ error = -EAGAIN;
+@@ -536,6 +542,7 @@ int hibernation_platform_enter(void)
- /**
-+ * list_last_entry - get the last element from a list
-+ * @ptr: the list head to take the element from.
-+ * @type: the type of the struct this is embedded in.
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Note, that list is expected to be not empty.
-+ */
-+#define list_last_entry(ptr, type, member) \
-+ list_entry((ptr)->prev, type, member)
-+
-+/**
- * list_for_each - iterate over a list
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
-Index: linux-2.6/mm/page_alloc.c
-===================================================================
---- linux-2.6.orig/mm/page_alloc.c
-+++ linux-2.6/mm/page_alloc.c
-@@ -57,6 +57,7 @@
- #include <linux/ftrace_event.h>
- #include <linux/memcontrol.h>
- #include <linux/prefetch.h>
-+#include <linux/locallock.h>
+ Power_up:
+ syscore_resume();
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+ enable_nonboot_cpus();
- #include <asm/tlbflush.h>
- #include <asm/div64.h>
-@@ -222,6 +223,18 @@ EXPORT_SYMBOL(nr_node_ids);
- EXPORT_SYMBOL(nr_online_nodes);
- #endif
+Index: linux-2.6/kernel/power/suspend.c
+===================================================================
+--- linux-2.6.orig/kernel/power/suspend.c
++++ linux-2.6/kernel/power/suspend.c
+@@ -163,6 +163,8 @@ static int suspend_enter(suspend_state_t
+ arch_suspend_disable_irqs();
+ BUG_ON(!irqs_disabled());
-+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
-+
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define cpu_lock_irqsave(cpu, flags) \
-+ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
-+# define cpu_unlock_irqrestore(cpu, flags) \
-+ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
-+#else
-+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
-+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
-+#endif
++ system_state = SYSTEM_SUSPEND;
+
- int page_group_by_mobility_disabled __read_mostly;
+ error = syscore_suspend();
+ if (!error) {
+ if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
+@@ -172,6 +174,8 @@ static int suspend_enter(suspend_state_t
+ syscore_resume();
+ }
- static void set_pageblock_migratetype(struct page *page, int migratetype)
-@@ -580,7 +593,7 @@ static inline int free_pages_check(struc
- }
++ system_state = SYSTEM_RUNNING;
++
+ arch_suspend_enable_irqs();
+ BUG_ON(irqs_disabled());
- /*
-- * Frees a number of pages from the PCP lists
-+ * Frees a number of pages which have been collected from the pcp lists.
- * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free.
- *
-@@ -591,16 +604,42 @@ static inline int free_pages_check(struc
- * pinned" detection logic.
+Index: linux-2.6/drivers/of/base.c
+===================================================================
+--- linux-2.6.orig/drivers/of/base.c
++++ linux-2.6/drivers/of/base.c
+@@ -29,7 +29,7 @@ struct device_node *of_chosen;
+ /* use when traversing tree through the allnext, child, sibling,
+ * or parent members of struct device_node.
*/
- static void free_pcppages_bulk(struct zone *zone, int count,
-- struct per_cpu_pages *pcp)
-+ struct list_head *list)
+-DEFINE_RWLOCK(devtree_lock);
++DEFINE_RAW_SPINLOCK(devtree_lock);
+
+ int of_n_addr_cells(struct device_node *np)
{
-- int migratetype = 0;
-- int batch_free = 0;
- int to_free = count;
-+ unsigned long flags;
+@@ -138,16 +138,14 @@ void of_node_put(struct device_node *nod
+ EXPORT_SYMBOL(of_node_put);
+ #endif /* !CONFIG_SPARC */
-- spin_lock(&zone->lock);
-+ spin_lock_irqsave(&zone->lock, flags);
- zone->all_unreclaimable = 0;
- zone->pages_scanned = 0;
+-struct property *of_find_property(const struct device_node *np,
+- const char *name,
+- int *lenp)
++static struct property *__of_find_property(const struct device_node *np,
++ const char *name, int *lenp)
+ {
+ struct property *pp;
-+ while (!list_empty(list)) {
-+ struct page *page = list_first_entry(list, struct page, lru);
+ if (!np)
+ return NULL;
+
+- read_lock(&devtree_lock);
+ for (pp = np->properties; pp != 0; pp = pp->next) {
+ if (of_prop_cmp(pp->name, name) == 0) {
+ if (lenp != 0)
+@@ -155,7 +153,20 @@ struct property *of_find_property(const
+ break;
+ }
+ }
+- read_unlock(&devtree_lock);
+
-+ /* must delete as __free_one_page list manipulates */
-+ list_del(&page->lru);
-+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
-+ __free_one_page(page, zone, 0, page_private(page));
-+ trace_mm_page_pcpu_drain(page, 0, page_private(page));
-+ to_free--;
-+ }
-+ WARN_ON(to_free != 0);
-+ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-+ spin_unlock_irqrestore(&zone->lock, flags);
++ return pp;
+}
+
-+/*
-+ * Moves a number of pages from the PCP lists to free list which
-+ * is freed outside of the locked region.
-+ *
-+ * Assumes all pages on list are in same zone, and of same order.
-+ * count is the number of pages to free.
-+ */
-+static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
-+ struct list_head *dst)
++struct property *of_find_property(const struct device_node *np,
++ const char *name,
++ int *lenp)
+{
-+ int migratetype = 0, batch_free = 0;
-+
- while (to_free) {
- struct page *page;
- struct list_head *list;
-@@ -616,7 +655,7 @@ static void free_pcppages_bulk(struct zo
- batch_free++;
- if (++migratetype == MIGRATE_PCPTYPES)
- migratetype = 0;
-- list = &pcp->lists[migratetype];
-+ list = &src->lists[migratetype];
- } while (list_empty(list));
-
- /* This is the only non-empty list. Free them all. */
-@@ -624,28 +663,25 @@ static void free_pcppages_bulk(struct zo
- batch_free = to_free;
-
- do {
-- page = list_entry(list->prev, struct page, lru);
-- /* must delete as __free_one_page list manipulates */
-+ page = list_last_entry(list, struct page, lru);
- list_del(&page->lru);
-- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
-- __free_one_page(page, zone, 0, page_private(page));
-- trace_mm_page_pcpu_drain(page, 0, page_private(page));
-+ list_add(&page->lru, dst);
- } while (--to_free && --batch_free && !list_empty(list));
- }
-- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-- spin_unlock(&zone->lock);
- }
-
- static void free_one_page(struct zone *zone, struct page *page, int order,
- int migratetype)
- {
-- spin_lock(&zone->lock);
++ struct property *pp;
+ unsigned long flags;
+
-+ spin_lock_irqsave(&zone->lock, flags);
- zone->all_unreclaimable = 0;
- zone->pages_scanned = 0;
-
- __free_one_page(page, zone, order, migratetype);
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
-- spin_unlock(&zone->lock);
-+ spin_unlock_irqrestore(&zone->lock, flags);
- }
-
- static bool free_pages_prepare(struct page *page, unsigned int order)
-@@ -682,13 +718,13 @@ static void __free_pages_ok(struct page
- if (!free_pages_prepare(page, order))
- return;
++ raw_spin_lock_irqsave(&devtree_lock, flags);
++ pp = __of_find_property(np, name, lenp);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- if (unlikely(wasMlocked))
- free_page_mlock(page);
- __count_vm_events(PGFREE, 1 << order);
- free_one_page(page_zone(page), page, order,
- get_pageblock_migratetype(page));
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
+ return pp;
}
-
- /*
-@@ -1064,16 +1100,18 @@ static int rmqueue_bulk(struct zone *zon
- void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+@@ -173,13 +184,13 @@ struct device_node *of_find_all_nodes(st
{
- unsigned long flags;
-+ LIST_HEAD(dst);
- int to_drain;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- if (pcp->count >= pcp->batch)
- to_drain = pcp->batch;
- else
- to_drain = pcp->count;
-- free_pcppages_bulk(zone, to_drain, pcp);
-+ isolate_pcp_pages(to_drain, pcp, &dst);
- pcp->count -= to_drain;
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
-+ free_pcppages_bulk(zone, to_drain, &dst);
- }
- #endif
-
-@@ -1092,16 +1130,21 @@ static void drain_pages(unsigned int cpu
- for_each_populated_zone(zone) {
- struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
-+ LIST_HEAD(dst);
-+ int count;
-
-- local_irq_save(flags);
-+ cpu_lock_irqsave(cpu, flags);
- pset = per_cpu_ptr(zone->pageset, cpu);
+ struct device_node *np;
- pcp = &pset->pcp;
-- if (pcp->count) {
-- free_pcppages_bulk(zone, pcp->count, pcp);
-+ count = pcp->count;
-+ if (count) {
-+ isolate_pcp_pages(count, pcp, &dst);
- pcp->count = 0;
- }
-- local_irq_restore(flags);
-+ cpu_unlock_irqrestore(cpu, flags);
-+ if (count)
-+ free_pcppages_bulk(zone, count, &dst);
- }
+- read_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ np = prev ? prev->allnext : allnodes;
+ for (; np != NULL; np = np->allnext)
+ if (of_node_get(np))
+ break;
+ of_node_put(prev);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ return np;
}
+ EXPORT_SYMBOL(of_find_all_nodes);
+@@ -188,8 +199,20 @@ EXPORT_SYMBOL(of_find_all_nodes);
+ * Find a property with a given name for a given node
+ * and return the value.
+ */
++static const void *__of_get_property(const struct device_node *np,
++ const char *name, int *lenp)
++{
++ struct property *pp = __of_find_property(np, name, lenp);
++
++ return pp ? pp->value : NULL;
++}
++
++/*
++ * Find a property with a given name for a given node
++ * and return the value.
++ */
+ const void *of_get_property(const struct device_node *np, const char *name,
+- int *lenp)
++ int *lenp)
+ {
+ struct property *pp = of_find_property(np, name, lenp);
-@@ -1118,7 +1161,14 @@ void drain_local_pages(void *arg)
+@@ -200,13 +223,13 @@ EXPORT_SYMBOL(of_get_property);
+ /** Checks if the given "compat" string matches one of the strings in
+ * the device's "compatible" property
*/
- void drain_all_pages(void)
+-int of_device_is_compatible(const struct device_node *device,
+- const char *compat)
++static int __of_device_is_compatible(const struct device_node *device,
++ const char *compat)
{
-+#ifndef CONFIG_PREEMPT_RT_BASE
- on_each_cpu(drain_local_pages, NULL, 1);
-+#else
-+ int i;
-+
-+ for_each_online_cpu(i)
-+ drain_pages(i);
-+#endif
+ const char* cp;
+- int cplen, l;
++ int uninitialized_var(cplen), l;
+
+- cp = of_get_property(device, "compatible", &cplen);
++ cp = __of_get_property(device, "compatible", &cplen);
+ if (cp == NULL)
+ return 0;
+ while (cplen > 0) {
+@@ -219,6 +242,21 @@ int of_device_is_compatible(const struct
+
+ return 0;
}
++
++/** Checks if the given "compat" string matches one of the strings in
++ * the device's "compatible" property
++ */
++int of_device_is_compatible(const struct device_node *device,
++ const char *compat)
++{
++ unsigned long flags;
++ int res;
++
++ raw_spin_lock_irqsave(&devtree_lock, flags);
++ res = __of_device_is_compatible(device, compat);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
++ return res;
++}
+ EXPORT_SYMBOL(of_device_is_compatible);
- #ifdef CONFIG_HIBERNATION
-@@ -1174,7 +1224,7 @@ void free_hot_cold_page(struct page *pag
+ /**
+@@ -278,13 +316,14 @@ EXPORT_SYMBOL(of_device_is_available);
+ struct device_node *of_get_parent(const struct device_node *node)
+ {
+ struct device_node *np;
++ unsigned long flags;
- migratetype = get_pageblock_migratetype(page);
- set_page_private(page, migratetype);
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- if (unlikely(wasMlocked))
- free_page_mlock(page);
- __count_vm_event(PGFREE);
-@@ -1201,12 +1251,19 @@ void free_hot_cold_page(struct page *pag
- list_add(&page->lru, &pcp->lists[migratetype]);
- pcp->count++;
- if (pcp->count >= pcp->high) {
-- free_pcppages_bulk(zone, pcp->batch, pcp);
-+ LIST_HEAD(dst);
-+ int count;
-+
-+ isolate_pcp_pages(pcp->batch, pcp, &dst);
- pcp->count -= pcp->batch;
-+ count = pcp->batch;
-+ local_unlock_irqrestore(pa_lock, flags);
-+ free_pcppages_bulk(zone, count, &dst);
-+ return;
- }
+ if (!node)
+ return NULL;
- out:
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = of_node_get(node->parent);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
}
+ EXPORT_SYMBOL(of_get_parent);
+@@ -303,14 +342,15 @@ EXPORT_SYMBOL(of_get_parent);
+ struct device_node *of_get_next_parent(struct device_node *node)
+ {
+ struct device_node *parent;
++ unsigned long flags;
- /*
-@@ -1301,7 +1358,7 @@ again:
- struct per_cpu_pages *pcp;
- struct list_head *list;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
-@@ -1333,17 +1390,19 @@ again:
- */
- WARN_ON_ONCE(order > 1);
- }
-- spin_lock_irqsave(&zone->lock, flags);
-+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
- page = __rmqueue(zone, order, migratetype);
-- spin_unlock(&zone->lock);
-- if (!page)
-+ if (!page) {
-+ spin_unlock(&zone->lock);
- goto failed;
-+ }
- __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
-+ spin_unlock(&zone->lock);
- }
+ if (!node)
+ return NULL;
- __count_zone_vm_events(PGALLOC, zone, 1 << order);
- zone_statistics(preferred_zone, zone, gfp_flags);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ parent = of_node_get(node->parent);
+ of_node_put(node);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return parent;
+ }
- VM_BUG_ON(bad_range(zone, page));
- if (prep_new_page(page, order, gfp_flags))
-@@ -1351,7 +1410,7 @@ again:
- return page;
+@@ -326,14 +366,15 @@ struct device_node *of_get_next_child(co
+ struct device_node *prev)
+ {
+ struct device_node *next;
++ unsigned long flags;
- failed:
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
- return NULL;
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = prev ? prev->sibling : node->child;
+ for (; next; next = next->sibling)
+ if (of_node_get(next))
+ break;
+ of_node_put(prev);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return next;
}
+ EXPORT_SYMBOL(of_get_next_child);
+@@ -348,14 +389,15 @@ EXPORT_SYMBOL(of_get_next_child);
+ struct device_node *of_find_node_by_path(const char *path)
+ {
+ struct device_node *np = allnodes;
++ unsigned long flags;
-@@ -1912,8 +1971,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m
- if (*did_some_progress != COMPACT_SKIPPED) {
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ for (; np; np = np->allnext) {
+ if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
+ && of_node_get(np))
+ break;
+ }
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_path);
+@@ -375,15 +417,16 @@ struct device_node *of_find_node_by_name
+ const char *name)
+ {
+ struct device_node *np;
++ unsigned long flags;
- /* Page migration frees to the PCP lists but we want merging */
-- drain_pages(get_cpu());
-- put_cpu();
-+ drain_pages(get_cpu_light());
-+ put_cpu_light();
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext)
+ if (np->name && (of_node_cmp(np->name, name) == 0)
+ && of_node_get(np))
+ break;
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_name);
+@@ -404,15 +447,16 @@ struct device_node *of_find_node_by_type
+ const char *type)
+ {
+ struct device_node *np;
++ unsigned long flags;
- page = get_page_from_freelist(gfp_mask, nodemask,
- order, zonelist, high_zoneidx,
-@@ -3685,14 +3744,16 @@ static int __zone_pcp_update(void *data)
- for_each_possible_cpu(cpu) {
- struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
-+ LIST_HEAD(dst);
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext)
+ if (np->type && (of_node_cmp(np->type, type) == 0)
+ && of_node_get(np))
+ break;
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_type);
+@@ -435,18 +479,20 @@ struct device_node *of_find_compatible_n
+ const char *type, const char *compatible)
+ {
+ struct device_node *np;
++ unsigned long flags;
- pset = per_cpu_ptr(zone->pageset, cpu);
- pcp = &pset->pcp;
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+ if (type
+ && !(np->type && (of_node_cmp(np->type, type) == 0)))
+ continue;
+- if (of_device_is_compatible(np, compatible) && of_node_get(np))
++ if (__of_device_is_compatible(np, compatible) &&
++ of_node_get(np))
+ break;
+ }
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_compatible_node);
+@@ -468,8 +514,9 @@ struct device_node *of_find_node_with_pr
+ {
+ struct device_node *np;
+ struct property *pp;
++ unsigned long flags;
-- local_irq_save(flags);
-- free_pcppages_bulk(zone, pcp->count, pcp);
-+ cpu_lock_irqsave(cpu, flags);
-+ isolate_pcp_pages(pcp->count, pcp, &dst);
-+ free_pcppages_bulk(zone, pcp->count, &dst);
- setup_pageset(pset, batch);
-- local_irq_restore(flags);
-+ cpu_unlock_irqrestore(cpu, flags);
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+ for (pp = np->properties; pp != 0; pp = pp->next) {
+@@ -481,20 +528,14 @@ struct device_node *of_find_node_with_pr
}
- return 0;
+ out:
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
}
-@@ -5004,6 +5065,7 @@ static int page_alloc_cpu_notify(struct
- void __init page_alloc_init(void)
+ EXPORT_SYMBOL(of_find_node_with_property);
+
+-/**
+- * of_match_node - Tell if an device_node has a matching of_match structure
+- * @matches: array of of device match structures to search in
+- * @node: the of device structure to match against
+- *
+- * Low level utility function used by device matching.
+- */
+-const struct of_device_id *of_match_node(const struct of_device_id *matches,
+- const struct device_node *node)
++static
++const struct of_device_id *__of_match_node(const struct of_device_id *matches,
++ const struct device_node *node)
{
- hotcpu_notifier(page_alloc_cpu_notify, 0);
-+ local_irq_lock_init(pa_lock);
+ if (!matches)
+ return NULL;
+@@ -508,14 +549,33 @@ const struct of_device_id *of_match_node
+ match &= node->type
+ && !strcmp(matches->type, node->type);
+ if (matches->compatible[0])
+- match &= of_device_is_compatible(node,
+- matches->compatible);
++ match &= __of_device_is_compatible(node,
++ matches->compatible);
+ if (match)
+ return matches;
+ matches++;
+ }
+ return NULL;
}
++
++/**
++ * of_match_node - Tell if an device_node has a matching of_match structure
++ * @matches: array of of device match structures to search in
++ * @node: the of device structure to match against
++ *
++ * Low level utility function used by device matching.
++ */
++const struct of_device_id *of_match_node(const struct of_device_id *matches,
++ const struct device_node *node)
++{
++ const struct of_device_id *match;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&devtree_lock, flags);
++ match = __of_match_node(matches, node);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
++ return match;
++}
+ EXPORT_SYMBOL(of_match_node);
- /*
-Index: linux-2.6/kernel/sched_fair.c
-===================================================================
---- linux-2.6.orig/kernel/sched_fair.c
-+++ linux-2.6/kernel/sched_fair.c
-@@ -1565,7 +1565,7 @@ find_idlest_group(struct sched_domain *s
-
- /* Skip over this group if it has no CPUs allowed */
- if (!cpumask_intersects(sched_group_cpus(group),
-- &p->cpus_allowed))
-+ tsk_cpus_allowed(p)))
- continue;
-
- local_group = cpumask_test_cpu(this_cpu,
-@@ -1611,7 +1611,7 @@ find_idlest_cpu(struct sched_group *grou
- int i;
-
- /* Traverse only the allowed CPUs */
-- for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
-+ for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
- load = weighted_cpuload(i);
+ /**
+@@ -534,15 +594,16 @@ struct device_node *of_find_matching_nod
+ const struct of_device_id *matches)
+ {
+ struct device_node *np;
++ unsigned long flags;
- if (load < min_load || (load == min_load && i == this_cpu)) {
-@@ -1655,7 +1655,7 @@ static int select_idle_sibling(struct ta
- if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+- if (of_match_node(matches, np) && of_node_get(np))
++ if (__of_match_node(matches, np) && of_node_get(np))
break;
-
-- for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
-+ for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
- if (idle_cpu(i)) {
- target = i;
- break;
-@@ -1698,7 +1698,7 @@ select_task_rq_fair(struct task_struct *
- int sync = wake_flags & WF_SYNC;
-
- if (sd_flag & SD_BALANCE_WAKE) {
-- if (cpumask_test_cpu(cpu, &p->cpus_allowed))
-+ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
- want_affine = 1;
- new_cpu = prev_cpu;
- }
-@@ -2067,7 +2067,7 @@ int can_migrate_task(struct task_struct
- * 2) cannot be migrated to this CPU due to cpus_allowed, or
- * 3) are cache-hot on their current CPU.
- */
-- if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
-+ if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) {
- schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
- return 0;
}
-@@ -2173,6 +2173,10 @@ balance_tasks(struct rq *this_rq, int th
- */
- if (idle == CPU_NEWLY_IDLE)
- break;
-+
-+ if (raw_spin_is_contended(&this_rq->lock) ||
-+ raw_spin_is_contended(&busiest->lock))
-+ break;
- #endif
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_matching_node);
+@@ -585,12 +646,12 @@ struct device_node *of_find_node_by_phan
+ {
+ struct device_node *np;
- /*
-@@ -2277,6 +2281,20 @@ load_balance_fair(struct rq *this_rq, in
- rem_load_move -= moved_load;
- if (rem_load_move < 0)
+- read_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ for (np = allnodes; np; np = np->allnext)
+ if (np->phandle == handle)
break;
-+
-+#ifdef CONFIG_PREEMPT
-+ /*
-+ * NEWIDLE balancing is a source of latency, so preemptible
-+ * kernels will stop after the first task is pulled to minimize
-+ * the critical section.
-+ */
-+ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
-+ break;
-+
-+ if (raw_spin_is_contended(&this_rq->lock) ||
-+ raw_spin_is_contended(&busiest->lock))
-+ break;
-+#endif
- }
- rcu_read_unlock();
+ of_node_get(np);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_phandle);
+@@ -745,18 +806,18 @@ int prom_add_property(struct device_node
+ unsigned long flags;
-@@ -3418,7 +3436,7 @@ redo:
- * moved to this_cpu
- */
- if (!cpumask_test_cpu(this_cpu,
-- &busiest->curr->cpus_allowed)) {
-+ tsk_cpus_allowed(busiest->curr))) {
- raw_spin_unlock_irqrestore(&busiest->lock,
- flags);
- all_pinned = 1;
-Index: linux-2.6/lib/smp_processor_id.c
-===================================================================
---- linux-2.6.orig/lib/smp_processor_id.c
-+++ linux-2.6/lib/smp_processor_id.c
-@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor
- * Kernel threads bound to a single CPU can safely use
- * smp_processor_id():
- */
-- if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu)))
-+ if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
- goto out;
+ prop->next = NULL;
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = &np->properties;
+ while (*next) {
+ if (strcmp(prop->name, (*next)->name) == 0) {
+ /* duplicate ! don't insert it */
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return -1;
+ }
+ next = &(*next)->next;
+ }
+ *next = prop;
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- /*
-@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor
- if (!printk_ratelimit())
- goto out_enable;
+ #ifdef CONFIG_PROC_DEVICETREE
+ /* try to add to proc as well if it was initialized */
+@@ -781,7 +842,7 @@ int prom_remove_property(struct device_n
+ unsigned long flags;
+ int found = 0;
-- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
-- "code: %s/%d\n",
-- preempt_count() - 1, current->comm, current->pid);
-+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
-+ "code: %s/%d\n", preempt_count() - 1,
-+ __migrate_disabled(current), current->comm, current->pid);
- print_symbol("caller is %s\n", (long)__builtin_return_address(0));
- dump_stack();
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = &np->properties;
+ while (*next) {
+ if (*next == prop) {
+@@ -794,7 +855,7 @@ int prom_remove_property(struct device_n
+ }
+ next = &(*next)->next;
+ }
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-Index: linux-2.6/include/linux/pid.h
-===================================================================
---- linux-2.6.orig/include/linux/pid.h
-+++ linux-2.6/include/linux/pid.h
-@@ -2,6 +2,7 @@
- #define _LINUX_PID_H
+ if (!found)
+ return -ENODEV;
+@@ -824,7 +885,7 @@ int prom_update_property(struct device_n
+ unsigned long flags;
+ int found = 0;
- #include <linux/rcupdate.h>
-+#include <linux/atomic.h>
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = &np->properties;
+ while (*next) {
+ if (*next == oldprop) {
+@@ -838,7 +899,7 @@ int prom_update_property(struct device_n
+ }
+ next = &(*next)->next;
+ }
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- enum pid_type
+ if (!found)
+ return -ENODEV;
+@@ -868,12 +929,12 @@ void of_attach_node(struct device_node *
{
-Index: linux-2.6/include/linux/jbd.h
-===================================================================
---- linux-2.6.orig/include/linux/jbd.h
-+++ linux-2.6/include/linux/jbd.h
-@@ -244,6 +244,7 @@ typedef struct journal_superblock_s
+ unsigned long flags;
+
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np->sibling = np->parent->child;
+ np->allnext = allnodes;
+ np->parent->child = np;
+ allnodes = np;
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ }
- #include <linux/fs.h>
- #include <linux/sched.h>
-+#include <linux/jbd_common.h>
+ /**
+@@ -887,7 +948,7 @@ void of_detach_node(struct device_node *
+ struct device_node *parent;
+ unsigned long flags;
- #define J_ASSERT(assert) BUG_ON(!(assert))
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
-@@ -270,69 +271,6 @@ typedef struct journal_superblock_s
- #define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
- #endif
+ parent = np->parent;
+ if (!parent)
+@@ -918,7 +979,7 @@ void of_detach_node(struct device_node *
+ of_node_set_flag(np, OF_DETACHED);
--enum jbd_state_bits {
-- BH_JBD /* Has an attached ext3 journal_head */
-- = BH_PrivateStart,
-- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
-- BH_Freed, /* Has been freed (truncated) */
-- BH_Revoked, /* Has been revoked from the log */
-- BH_RevokeValid, /* Revoked flag is valid */
-- BH_JBDDirty, /* Is dirty but journaled */
-- BH_State, /* Pins most journal_head state */
-- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
-- BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
--};
--
--BUFFER_FNS(JBD, jbd)
--BUFFER_FNS(JWrite, jwrite)
--BUFFER_FNS(JBDDirty, jbddirty)
--TAS_BUFFER_FNS(JBDDirty, jbddirty)
--BUFFER_FNS(Revoked, revoked)
--TAS_BUFFER_FNS(Revoked, revoked)
--BUFFER_FNS(RevokeValid, revokevalid)
--TAS_BUFFER_FNS(RevokeValid, revokevalid)
--BUFFER_FNS(Freed, freed)
--
--static inline struct buffer_head *jh2bh(struct journal_head *jh)
--{
-- return jh->b_bh;
--}
--
--static inline struct journal_head *bh2jh(struct buffer_head *bh)
--{
-- return bh->b_private;
--}
--
--static inline void jbd_lock_bh_state(struct buffer_head *bh)
--{
-- bit_spin_lock(BH_State, &bh->b_state);
--}
--
--static inline int jbd_trylock_bh_state(struct buffer_head *bh)
--{
-- return bit_spin_trylock(BH_State, &bh->b_state);
--}
--
--static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
--{
-- return bit_spin_is_locked(BH_State, &bh->b_state);
--}
--
--static inline void jbd_unlock_bh_state(struct buffer_head *bh)
--{
-- bit_spin_unlock(BH_State, &bh->b_state);
--}
--
--static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
--{
-- bit_spin_lock(BH_JournalHead, &bh->b_state);
--}
--
--static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
--{
-- bit_spin_unlock(BH_JournalHead, &bh->b_state);
--}
--
- struct jbd_revoke_table_s;
+ out_unlock:
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ }
+ #endif /* defined(CONFIG_OF_DYNAMIC) */
- /**
-Index: linux-2.6/include/linux/jbd2.h
+Index: linux-2.6/arch/sparc/kernel/prom_common.c
===================================================================
---- linux-2.6.orig/include/linux/jbd2.h
-+++ linux-2.6/include/linux/jbd2.h
-@@ -275,6 +275,7 @@ typedef struct journal_superblock_s
-
- #include <linux/fs.h>
- #include <linux/sched.h>
-+#include <linux/jbd_common.h>
+--- linux-2.6.orig/arch/sparc/kernel/prom_common.c
++++ linux-2.6/arch/sparc/kernel/prom_common.c
+@@ -67,7 +67,7 @@ int of_set_property(struct device_node *
+ err = -ENODEV;
- #define J_ASSERT(assert) BUG_ON(!(assert))
+ mutex_lock(&of_set_property_mutex);
+- write_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ prevp = &dp->properties;
+ while (*prevp) {
+ struct property *prop = *prevp;
+@@ -94,7 +94,7 @@ int of_set_property(struct device_node *
+ }
+ prevp = &(*prevp)->next;
+ }
+- write_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ mutex_unlock(&of_set_property_mutex);
-@@ -302,70 +303,6 @@ typedef struct journal_superblock_s
- #define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
- #endif
+ /* XXX Upate procfs if necessary... */
+Index: linux-2.6/include/linux/of.h
+===================================================================
+--- linux-2.6.orig/include/linux/of.h
++++ linux-2.6/include/linux/of.h
+@@ -68,7 +68,7 @@ struct device_node {
+ /* Pointer for first entry in chain of all nodes. */
+ extern struct device_node *allnodes;
+ extern struct device_node *of_chosen;
+-extern rwlock_t devtree_lock;
++extern raw_spinlock_t devtree_lock;
--enum jbd_state_bits {
-- BH_JBD /* Has an attached ext3 journal_head */
-- = BH_PrivateStart,
-- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
-- BH_Freed, /* Has been freed (truncated) */
-- BH_Revoked, /* Has been revoked from the log */
-- BH_RevokeValid, /* Revoked flag is valid */
-- BH_JBDDirty, /* Is dirty but journaled */
-- BH_State, /* Pins most journal_head state */
-- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
-- BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
-- BH_JBDPrivateStart, /* First bit available for private use by FS */
--};
--
--BUFFER_FNS(JBD, jbd)
--BUFFER_FNS(JWrite, jwrite)
--BUFFER_FNS(JBDDirty, jbddirty)
--TAS_BUFFER_FNS(JBDDirty, jbddirty)
--BUFFER_FNS(Revoked, revoked)
--TAS_BUFFER_FNS(Revoked, revoked)
--BUFFER_FNS(RevokeValid, revokevalid)
--TAS_BUFFER_FNS(RevokeValid, revokevalid)
--BUFFER_FNS(Freed, freed)
--
--static inline struct buffer_head *jh2bh(struct journal_head *jh)
--{
-- return jh->b_bh;
--}
--
--static inline struct journal_head *bh2jh(struct buffer_head *bh)
--{
-- return bh->b_private;
--}
--
--static inline void jbd_lock_bh_state(struct buffer_head *bh)
--{
-- bit_spin_lock(BH_State, &bh->b_state);
--}
--
--static inline int jbd_trylock_bh_state(struct buffer_head *bh)
--{
-- return bit_spin_trylock(BH_State, &bh->b_state);
--}
--
--static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
--{
-- return bit_spin_is_locked(BH_State, &bh->b_state);
--}
--
--static inline void jbd_unlock_bh_state(struct buffer_head *bh)
--{
-- bit_spin_unlock(BH_State, &bh->b_state);
--}
--
--static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
--{
-- bit_spin_lock(BH_JournalHead, &bh->b_state);
--}
--
--static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
--{
-- bit_spin_unlock(BH_JournalHead, &bh->b_state);
--}
--
- /* Flags in jbd_inode->i_flags */
- #define __JI_COMMIT_RUNNING 0
- /* Commit of the inode data in progress. We use this flag to protect us from
-Index: linux-2.6/include/linux/jbd_common.h
+ static inline bool of_have_populated_dt(void)
+ {
+Index: linux-2.6/include/linux/list.h
===================================================================
---- /dev/null
-+++ linux-2.6/include/linux/jbd_common.h
-@@ -0,0 +1,92 @@
-+#ifndef _LINUX_JBD_STATE_H
-+#define _LINUX_JBD_STATE_H
-+
-+enum jbd_state_bits {
-+ BH_JBD /* Has an attached ext3 journal_head */
-+ = BH_PrivateStart,
-+ BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
-+ BH_Freed, /* Has been freed (truncated) */
-+ BH_Revoked, /* Has been revoked from the log */
-+ BH_RevokeValid, /* Revoked flag is valid */
-+ BH_JBDDirty, /* Is dirty but journaled */
-+ BH_State, /* Pins most journal_head state */
-+ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
-+ BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
-+ BH_JBDPrivateStart, /* First bit available for private use by FS */
-+};
-+
-+BUFFER_FNS(JBD, jbd)
-+BUFFER_FNS(JWrite, jwrite)
-+BUFFER_FNS(JBDDirty, jbddirty)
-+TAS_BUFFER_FNS(JBDDirty, jbddirty)
-+BUFFER_FNS(Revoked, revoked)
-+TAS_BUFFER_FNS(Revoked, revoked)
-+BUFFER_FNS(RevokeValid, revokevalid)
-+TAS_BUFFER_FNS(RevokeValid, revokevalid)
-+BUFFER_FNS(Freed, freed)
-+
-+static inline struct buffer_head *jh2bh(struct journal_head *jh)
-+{
-+ return jh->b_bh;
-+}
+--- linux-2.6.orig/include/linux/list.h
++++ linux-2.6/include/linux/list.h
+@@ -362,6 +362,17 @@ static inline void list_splice_tail_init
+ list_entry((ptr)->next, type, member)
+
+ /**
++ * list_last_entry - get the last element from a list
++ * @ptr: the list head to take the element from.
++ * @type: the type of the struct this is embedded in.
++ * @member: the name of the list_struct within the struct.
++ *
++ * Note, that list is expected to be not empty.
++ */
++#define list_last_entry(ptr, type, member) \
++ list_entry((ptr)->prev, type, member)
+
-+static inline struct journal_head *bh2jh(struct buffer_head *bh)
-+{
-+ return bh->b_private;
-+}
++/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+Index: linux-2.6/mm/page_alloc.c
+===================================================================
+--- linux-2.6.orig/mm/page_alloc.c
++++ linux-2.6/mm/page_alloc.c
+@@ -57,6 +57,7 @@
+ #include <linux/ftrace_event.h>
+ #include <linux/memcontrol.h>
+ #include <linux/prefetch.h>
++#include <linux/locallock.h>
+
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+@@ -222,6 +223,18 @@ EXPORT_SYMBOL(nr_node_ids);
+ EXPORT_SYMBOL(nr_online_nodes);
+ #endif
+
++static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+
-+static inline void jbd_lock_bh_state(struct buffer_head *bh)
-+{
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+ bit_spin_lock(BH_State, &bh->b_state);
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define cpu_lock_irqsave(cpu, flags) \
++ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
++# define cpu_unlock_irqrestore(cpu, flags) \
++ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
+#else
-+ spin_lock(&bh->b_state_lock);
++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
+#endif
-+}
+
-+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-+{
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+ return bit_spin_trylock(BH_State, &bh->b_state);
-+#else
-+ return spin_trylock(&bh->b_state_lock);
-+#endif
-+}
+ int page_group_by_mobility_disabled __read_mostly;
+
+ static void set_pageblock_migratetype(struct page *page, int migratetype)
+@@ -580,7 +593,7 @@ static inline int free_pages_check(struc
+ }
+
+ /*
+- * Frees a number of pages from the PCP lists
++ * Frees a number of pages which have been collected from the pcp lists.
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ *
+@@ -591,16 +604,42 @@ static inline int free_pages_check(struc
+ * pinned" detection logic.
+ */
+ static void free_pcppages_bulk(struct zone *zone, int count,
+- struct per_cpu_pages *pcp)
++ struct list_head *list)
+ {
+- int migratetype = 0;
+- int batch_free = 0;
+ int to_free = count;
++ unsigned long flags;
+
+- spin_lock(&zone->lock);
++ spin_lock_irqsave(&zone->lock, flags);
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
+
++ while (!list_empty(list)) {
++ struct page *page = list_first_entry(list, struct page, lru);
+
-+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-+{
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+ return bit_spin_is_locked(BH_State, &bh->b_state);
-+#else
-+ return spin_is_locked(&bh->b_state_lock);
-+#endif
++ /* must delete as __free_one_page list manipulates */
++ list_del(&page->lru);
++ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
++ __free_one_page(page, zone, 0, page_private(page));
++ trace_mm_page_pcpu_drain(page, 0, page_private(page));
++ to_free--;
++ }
++ WARN_ON(to_free != 0);
++ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
++ spin_unlock_irqrestore(&zone->lock, flags);
+}
+
-+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
++/*
++ * Moves a number of pages from the PCP lists to free list which
++ * is freed outside of the locked region.
++ *
++ * Assumes all pages on list are in same zone, and of same order.
++ * count is the number of pages to free.
++ */
++static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
++ struct list_head *dst)
+{
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+ bit_spin_unlock(BH_State, &bh->b_state);
-+#else
-+ spin_unlock(&bh->b_state_lock);
-+#endif
-+}
++ int migratetype = 0, batch_free = 0;
+
-+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
-+{
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+ bit_spin_lock(BH_JournalHead, &bh->b_state);
-+#else
-+ spin_lock(&bh->b_journal_head_lock);
-+#endif
-+}
+ while (to_free) {
+ struct page *page;
+ struct list_head *list;
+@@ -616,7 +655,7 @@ static void free_pcppages_bulk(struct zo
+ batch_free++;
+ if (++migratetype == MIGRATE_PCPTYPES)
+ migratetype = 0;
+- list = &pcp->lists[migratetype];
++ list = &src->lists[migratetype];
+ } while (list_empty(list));
+
+ /* This is the only non-empty list. Free them all. */
+@@ -624,28 +663,25 @@ static void free_pcppages_bulk(struct zo
+ batch_free = to_free;
+
+ do {
+- page = list_entry(list->prev, struct page, lru);
+- /* must delete as __free_one_page list manipulates */
++ page = list_last_entry(list, struct page, lru);
+ list_del(&page->lru);
+- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+- __free_one_page(page, zone, 0, page_private(page));
+- trace_mm_page_pcpu_drain(page, 0, page_private(page));
++ list_add(&page->lru, dst);
+ } while (--to_free && --batch_free && !list_empty(list));
+ }
+- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+- spin_unlock(&zone->lock);
+ }
+
+ static void free_one_page(struct zone *zone, struct page *page, int order,
+ int migratetype)
+ {
+- spin_lock(&zone->lock);
++ unsigned long flags;
+
-+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
-+{
++ spin_lock_irqsave(&zone->lock, flags);
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
+
+ __free_one_page(page, zone, order, migratetype);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+- spin_unlock(&zone->lock);
++ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+
+ static bool free_pages_prepare(struct page *page, unsigned int order)
+@@ -682,13 +718,13 @@ static void __free_pages_ok(struct page
+ if (!free_pages_prepare(page, order))
+ return;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (unlikely(wasMlocked))
+ free_page_mlock(page);
+ __count_vm_events(PGFREE, 1 << order);
+ free_one_page(page_zone(page), page, order,
+ get_pageblock_migratetype(page));
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ /*
+@@ -1064,16 +1100,18 @@ static int rmqueue_bulk(struct zone *zon
+ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ {
+ unsigned long flags;
++ LIST_HEAD(dst);
+ int to_drain;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (pcp->count >= pcp->batch)
+ to_drain = pcp->batch;
+ else
+ to_drain = pcp->count;
+- free_pcppages_bulk(zone, to_drain, pcp);
++ isolate_pcp_pages(to_drain, pcp, &dst);
+ pcp->count -= to_drain;
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, to_drain, &dst);
+ }
+ #endif
+
+@@ -1092,16 +1130,21 @@ static void drain_pages(unsigned int cpu
+ for_each_populated_zone(zone) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
++ int count;
+
+- local_irq_save(flags);
++ cpu_lock_irqsave(cpu, flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+
+ pcp = &pset->pcp;
+- if (pcp->count) {
+- free_pcppages_bulk(zone, pcp->count, pcp);
++ count = pcp->count;
++ if (count) {
++ isolate_pcp_pages(count, pcp, &dst);
+ pcp->count = 0;
+ }
+- local_irq_restore(flags);
++ cpu_unlock_irqrestore(cpu, flags);
++ if (count)
++ free_pcppages_bulk(zone, count, &dst);
+ }
+ }
+
+@@ -1118,7 +1161,14 @@ void drain_local_pages(void *arg)
+ */
+ void drain_all_pages(void)
+ {
+#ifndef CONFIG_PREEMPT_RT_BASE
-+ bit_spin_unlock(BH_JournalHead, &bh->b_state);
+ on_each_cpu(drain_local_pages, NULL, 1);
+#else
-+ spin_unlock(&bh->b_journal_head_lock);
++ int i;
++
++ for_each_online_cpu(i)
++ drain_pages(i);
+#endif
-+}
+ }
+
+ #ifdef CONFIG_HIBERNATION
+@@ -1174,7 +1224,7 @@ void free_hot_cold_page(struct page *pag
+
+ migratetype = get_pageblock_migratetype(page);
+ set_page_private(page, migratetype);
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (unlikely(wasMlocked))
+ free_page_mlock(page);
+ __count_vm_event(PGFREE);
+@@ -1201,12 +1251,19 @@ void free_hot_cold_page(struct page *pag
+ list_add(&page->lru, &pcp->lists[migratetype]);
+ pcp->count++;
+ if (pcp->count >= pcp->high) {
+- free_pcppages_bulk(zone, pcp->batch, pcp);
++ LIST_HEAD(dst);
++ int count;
+
-+#endif
-Index: linux-2.6/net/netfilter/ipvs/ip_vs_ctl.c
++ isolate_pcp_pages(pcp->batch, pcp, &dst);
+ pcp->count -= pcp->batch;
++ count = pcp->batch;
++ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, count, &dst);
++ return;
+ }
+
+ out:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ /*
+@@ -1301,7 +1358,7 @@ again:
+ struct per_cpu_pages *pcp;
+ struct list_head *list;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
+ if (list_empty(list)) {
+@@ -1333,17 +1390,19 @@ again:
+ */
+ WARN_ON_ONCE(order > 1);
+ }
+- spin_lock_irqsave(&zone->lock, flags);
++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+ page = __rmqueue(zone, order, migratetype);
+- spin_unlock(&zone->lock);
+- if (!page)
++ if (!page) {
++ spin_unlock(&zone->lock);
+ goto failed;
++ }
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
++ spin_unlock(&zone->lock);
+ }
+
+ __count_zone_vm_events(PGALLOC, zone, 1 << order);
+ zone_statistics(preferred_zone, zone, gfp_flags);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+
+ VM_BUG_ON(bad_range(zone, page));
+ if (prep_new_page(page, order, gfp_flags))
+@@ -1351,7 +1410,7 @@ again:
+ return page;
+
+ failed:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ return NULL;
+ }
+
+@@ -1912,8 +1971,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m
+ if (*did_some_progress != COMPACT_SKIPPED) {
+
+ /* Page migration frees to the PCP lists but we want merging */
+- drain_pages(get_cpu());
+- put_cpu();
++ drain_pages(get_cpu_light());
++ put_cpu_light();
+
+ page = get_page_from_freelist(gfp_mask, nodemask,
+ order, zonelist, high_zoneidx,
+@@ -3685,14 +3744,16 @@ static int __zone_pcp_update(void *data)
+ for_each_possible_cpu(cpu) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
+
+ pset = per_cpu_ptr(zone->pageset, cpu);
+ pcp = &pset->pcp;
+
+- local_irq_save(flags);
+- free_pcppages_bulk(zone, pcp->count, pcp);
++ cpu_lock_irqsave(cpu, flags);
++ isolate_pcp_pages(pcp->count, pcp, &dst);
++ free_pcppages_bulk(zone, pcp->count, &dst);
+ setup_pageset(pset, batch);
+- local_irq_restore(flags);
++ cpu_unlock_irqrestore(cpu, flags);
+ }
+ return 0;
+ }
+@@ -5004,6 +5065,7 @@ static int page_alloc_cpu_notify(struct
+ void __init page_alloc_init(void)
+ {
+ hotcpu_notifier(page_alloc_cpu_notify, 0);
++ local_irq_lock_init(pa_lock);
+ }
+
+ /*
+Index: linux-2.6/include/linux/pid.h
===================================================================
---- linux-2.6.orig/net/netfilter/ipvs/ip_vs_ctl.c
-+++ linux-2.6/net/netfilter/ipvs/ip_vs_ctl.c
-@@ -3679,7 +3679,7 @@ int __net_init __ip_vs_control_init(stru
- int idx;
- struct netns_ipvs *ipvs = net_ipvs(net);
+--- linux-2.6.orig/include/linux/pid.h
++++ linux-2.6/include/linux/pid.h
+@@ -2,6 +2,7 @@
+ #define _LINUX_PID_H
-- ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
-+ rwlock_init(&ipvs->rs_lock);
+ #include <linux/rcupdate.h>
++#include <linux/atomic.h>
- /* Initialize rs_table */
- for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
+ enum pid_type
+ {
Index: linux-2.6/net/core/sock.c
===================================================================
--- linux-2.6.orig/net/core/sock.c
@@ -13790,7 +13809,7 @@
#endif
}
-@@ -2902,11 +2902,11 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -2902,16 +2902,46 @@ int netif_rx_ni(struct sk_buff *skb)
{
int err;
@@ -13805,7 +13824,51 @@
return err;
}
-@@ -3276,7 +3276,7 @@ static void flush_backlog(void *arg)
+ EXPORT_SYMBOL(netif_rx_ni);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * RT runs ksoftirqd as a real time thread and the root_lock is a
++ * "sleeping spinlock". If the trylock fails then we can go into an
++ * infinite loop when ksoftirqd preempted the task which actually
++ * holds the lock, because we requeue q and raise NET_TX softirq
++ * causing ksoftirqd to loop forever.
++ *
++ * It's safe to use spin_lock on RT here as softirqs run in thread
++ * context and cannot deadlock against the thread which is holding
++ * root_lock.
++ *
++ * On !RT the trylock might fail, but there we bail out from the
++ * softirq loop after 10 attempts which we can't do on RT. And the
++ * task holding root_lock cannot be preempted, so the only downside of
++ * that trylock is that we need 10 loops to decide that we should have
++ * given up in the first one :)
++ */
++static inline int take_root_lock(spinlock_t *lock)
++{
++ spin_lock(lock);
++ return 1;
++}
++#else
++static inline int take_root_lock(spinlock_t *lock)
++{
++ return spin_trylock(lock);
++}
++#endif
++
+ static void net_tx_action(struct softirq_action *h)
+ {
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+@@ -2950,7 +2980,7 @@ static void net_tx_action(struct softirq
+ head = head->next_sched;
+
+ root_lock = qdisc_lock(q);
+- if (spin_trylock(root_lock)) {
++ if (take_root_lock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
+@@ -3276,7 +3306,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -13814,7 +13877,7 @@
input_queue_head_incr(sd);
}
}
-@@ -3285,10 +3285,13 @@ static void flush_backlog(void *arg)
+@@ -3285,10 +3315,13 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
@@ -13829,7 +13892,7 @@
}
static int napi_gro_complete(struct sk_buff *skb)
-@@ -3766,10 +3769,17 @@ static void net_rx_action(struct softirq
+@@ -3766,10 +3799,17 @@ static void net_rx_action(struct softirq
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
@@ -13847,7 +13910,7 @@
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
-@@ -6196,6 +6206,9 @@ static int dev_cpu_callback(struct notif
+@@ -6196,6 +6236,9 @@ static int dev_cpu_callback(struct notif
netif_rx(skb);
input_queue_head_incr(oldsd);
}
@@ -13857,7 +13920,7 @@
return NOTIFY_OK;
}
-@@ -6461,8 +6474,9 @@ static int __init net_dev_init(void)
+@@ -6461,8 +6504,9 @@ static int __init net_dev_init(void)
struct softnet_data *sd = &per_cpu(softnet_data, i);
memset(sd, 0, sizeof(*sd));
@@ -13901,7 +13964,44 @@
===================================================================
--- linux-2.6.orig/kernel/rcutree.c
+++ linux-2.6/kernel/rcutree.c
-@@ -1153,7 +1153,7 @@ static void __rcu_offline_cpu(int cpu, s
+@@ -166,6 +166,12 @@ void rcu_sched_qs(int cpu)
+ rdp->passed_quiesc = 1;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++void rcu_bh_qs(int cpu)
++{
++ rcu_preempt_qs(cpu);
++}
++#else
+ void rcu_bh_qs(int cpu)
+ {
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+@@ -174,6 +180,7 @@ void rcu_bh_qs(int cpu)
+ barrier();
+ rdp->passed_quiesc = 1;
+ }
++#endif
+
+ /*
+ * Note a context switch. This is a quiescent state for RCU-sched,
+@@ -216,6 +223,7 @@ long rcu_batches_completed_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Return the number of RCU BH batches processed thus far for debug & stats.
+ */
+@@ -233,6 +241,7 @@ void rcu_bh_force_quiescent_state(void)
+ force_quiescent_state(&rcu_bh_state, 0);
+ }
+ EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
++#endif
+
+ /*
+ * Record the number of times rcutorture tests have been initiated and
+@@ -1153,7 +1162,7 @@ static void __rcu_offline_cpu(int cpu, s
else
raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (need_report & RCU_OFL_TASKS_EXP_GP)
@@ -13910,11 +14010,67 @@
rcu_node_kthread_setaffinity(rnp, -1);
}
+@@ -1579,6 +1588,7 @@ void call_rcu_sched(struct rcu_head *hea
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Queue an RCU for invocation after a quicker grace period.
+ */
+@@ -1587,6 +1597,7 @@ void call_rcu_bh(struct rcu_head *head,
+ __call_rcu(head, func, &rcu_bh_state);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
+
+ /**
+ * synchronize_sched - wait until an rcu-sched grace period has elapsed.
+@@ -1628,6 +1639,7 @@ void synchronize_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(synchronize_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
+ *
+@@ -1653,6 +1665,7 @@ void synchronize_rcu_bh(void)
+ destroy_rcu_head_on_stack(&rcu.head);
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
++#endif
+
+ /*
+ * Check to see if there is any immediate RCU-related work to be done
+@@ -1806,6 +1819,7 @@ static void _rcu_barrier(struct rcu_stat
+ mutex_unlock(&rcu_barrier_mutex);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
+ */
+@@ -1814,6 +1828,7 @@ void rcu_barrier_bh(void)
+ _rcu_barrier(&rcu_bh_state, call_rcu_bh);
+ }
+ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
++#endif
+
+ /**
+ * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
Index: linux-2.6/kernel/rcutree.h
===================================================================
--- linux-2.6.orig/kernel/rcutree.h
+++ linux-2.6/kernel/rcutree.h
-@@ -443,7 +443,8 @@ static void rcu_preempt_check_callbacks(
+@@ -422,6 +422,7 @@ DECLARE_PER_CPU(struct rcu_data, rcu_pre
+ /* Forward declarations for rcutree_plugin.h */
+ static void rcu_bootup_announce(void);
+ long rcu_batches_completed(void);
++static void rcu_preempt_qs(int cpu);
+ static void rcu_preempt_note_context_switch(int cpu);
+ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -443,7 +444,8 @@ static void rcu_preempt_check_callbacks(
static void rcu_preempt_process_callbacks(void);
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
@@ -14044,6 +14200,15 @@
raw_spin_unlock_irqrestore(&rnp->lock, flags);
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
+@@ -1890,7 +1892,7 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
+
+ #endif /* #else #ifndef CONFIG_SMP */
+
+-#if !defined(CONFIG_RCU_FAST_NO_HZ)
++#if 1 /* !defined(CONFIG_RCU_FAST_NO_HZ) */
+
+ /*
+ * Check to see if any future RCU-related work will need to be done
Index: linux-2.6/drivers/usb/gadget/ci13xxx_udc.c
===================================================================
--- linux-2.6.orig/drivers/usb/gadget/ci13xxx_udc.c
@@ -19537,8 +19702,6 @@
+
+module_init(detector_init);
+module_exit(detector_exit);
-Index: linux-2.6/localversion-rt
-===================================================================
Index: linux-2.6/arch/arm/kernel/early_printk.c
===================================================================
--- linux-2.6.orig/arch/arm/kernel/early_printk.c
@@ -20829,7 +20992,7 @@
config MAGIC_SYSRQ_DEFAULT_MASK
hex "Default mask for Magic SysRq keys on the console"
depends on MAGIC_SYSRQ
-@@ -159,7 +181,7 @@ config DEBUG_KERNEL
+@@ -151,7 +173,7 @@ config DEBUG_KERNEL
config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"
@@ -22324,7 +22487,21 @@
===================================================================
--- linux-2.6.orig/include/linux/rcupdate.h
+++ linux-2.6/include/linux/rcupdate.h
-@@ -104,6 +104,11 @@ void synchronize_rcu(void);
+@@ -78,7 +78,13 @@ struct rcu_head {
+ extern void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
+ extern void synchronize_sched(void);
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define rcu_barrier_bh rcu_barrier
++#else
+ extern void rcu_barrier_bh(void);
++#endif
++
+ extern void rcu_barrier_sched(void);
+
+ static inline void __rcu_read_lock_bh(void)
+@@ -104,6 +110,11 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -22336,7 +22513,7 @@
#else /* #ifdef CONFIG_PREEMPT_RCU */
-@@ -127,6 +132,8 @@ static inline int rcu_preempt_depth(void
+@@ -127,6 +138,8 @@ static inline int rcu_preempt_depth(void
return 0;
}
@@ -22345,25 +22522,66 @@
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
-Index: linux-2.6/kernel/sched_features.h
-===================================================================
---- linux-2.6.orig/kernel/sched_features.h
-+++ linux-2.6/kernel/sched_features.h
-@@ -65,10 +65,14 @@ SCHED_FEAT(OWNER_SPIN, 1)
+@@ -222,7 +235,14 @@ static inline int rcu_read_lock_held(voi
+ * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
+ * hell.
*/
- SCHED_FEAT(NONIRQ_POWER, 1)
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline int rcu_read_lock_bh_held(void)
++{
++ return rcu_read_lock_held();
++}
++#else
+ extern int rcu_read_lock_bh_held(void);
++#endif
+
+ /**
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+@@ -631,8 +651,13 @@ static inline void rcu_read_unlock(void)
+ static inline void rcu_read_lock_bh(void)
+ {
+ __rcu_read_lock_bh();
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_lock();
++#else
+ __acquire(RCU_BH);
+ rcu_read_acquire_bh();
++#endif
+ }
-+#ifndef CONFIG_PREEMPT_RT_FULL
/*
- * Queue remote wakeups on the target CPU and process them
- * using the scheduler IPI. Reduces rq->lock contention/bounces.
+@@ -642,8 +667,12 @@ static inline void rcu_read_lock_bh(void
*/
- SCHED_FEAT(TTWU_QUEUE, 1)
+ static inline void rcu_read_unlock_bh(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_unlock();
+#else
-+SCHED_FEAT(TTWU_QUEUE, 0)
+ rcu_read_release_bh();
+ __release(RCU_BH);
+#endif
+ __rcu_read_unlock_bh();
+ }
- SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+@@ -750,6 +779,9 @@ extern void call_rcu(struct rcu_head *he
+
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#define call_rcu_bh call_rcu
++#else
+ /**
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+@@ -770,6 +802,7 @@ extern void call_rcu(struct rcu_head *he
+ */
+ extern void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
++#endif
+
+ /*
+ * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
Index: linux-2.6/kernel/stop_machine.c
===================================================================
--- linux-2.6.orig/kernel/stop_machine.c
@@ -25489,6 +25707,64 @@
}
+
+#endif
+Index: linux-2.6/include/linux/rcutree.h
+===================================================================
+--- linux-2.6.orig/include/linux/rcutree.h
++++ linux-2.6/include/linux/rcutree.h
+@@ -57,7 +57,11 @@ static inline void exit_rcu(void)
+
+ #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void synchronize_rcu_bh(void);
++#else
++# define synchronize_rcu_bh() synchronize_rcu()
++#endif
+ extern void synchronize_sched_expedited(void);
+ extern void synchronize_rcu_expedited(void);
+
+@@ -71,13 +75,19 @@ extern void rcu_barrier(void);
+ extern unsigned long rcutorture_testseq;
+ extern unsigned long rcutorture_vernum;
+ extern long rcu_batches_completed(void);
+-extern long rcu_batches_completed_bh(void);
+ extern long rcu_batches_completed_sched(void);
+
+ extern void rcu_force_quiescent_state(void);
+-extern void rcu_bh_force_quiescent_state(void);
+ extern void rcu_sched_force_quiescent_state(void);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++extern void rcu_bh_force_quiescent_state(void);
++extern long rcu_batches_completed_bh(void);
++#else
++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
++# define rcu_batches_completed_bh rcu_batches_completed
++#endif
++
+ /* A context switch is a grace period for RCU-sched and RCU-bh. */
+ static inline int rcu_blocking_is_gp(void)
+ {
+Index: linux-2.6/kernel/rcupdate.c
+===================================================================
+--- linux-2.6.orig/kernel/rcupdate.c
++++ linux-2.6/kernel/rcupdate.c
+@@ -72,6 +72,7 @@ int debug_lockdep_rcu_enabled(void)
+ }
+ EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
+ *
+@@ -91,6 +92,7 @@ int rcu_read_lock_bh_held(void)
+ return in_softirq() || irqs_disabled();
+ }
+ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
++#endif
+
+ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
Index: linux-2.6/include/linux/lglock.h
===================================================================
--- linux-2.6.orig/include/linux/lglock.h
Added: dists/sid/linux-2.6/debian/patches/series/6-extra
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/series/6-extra Fri Oct 7 13:59:16 2011 (r18158)
@@ -0,0 +1 @@
++ features/all/rt/patch-3.0.6-rt17.patch featureset=rt
More information about the Kernel-svn-changes
mailing list