[kernel] r19485 - in dists/trunk/linux/debian: . patches patches/features/all/rt
Ben Hutchings
benh at alioth.debian.org
Wed Nov 7 02:42:37 UTC 2012
Author: benh
Date: Wed Nov 7 02:42:37 2012
New Revision: 19485
Log:
[rt] bump to 3.6.5-rt15
Added:
dists/trunk/linux/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
dists/trunk/linux/debian/patches/features/all/rt/fix-crypto-api-init-for-3-6-4-rt10.patch
dists/trunk/linux/debian/patches/features/all/rt/fix-random-fallout.patch
dists/trunk/linux/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
dists/trunk/linux/debian/patches/features/all/rt/preempt-lazy-support.patch
dists/trunk/linux/debian/patches/features/all/rt/slub-correct-per-cpu-slab.patch
dists/trunk/linux/debian/patches/features/all/rt/x86-preempt-lazy.patch
Modified:
dists/trunk/linux/debian/changelog
dists/trunk/linux/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch
dists/trunk/linux/debian/patches/features/all/rt/crypto-make-core-static-and-init-scru-early.patch
dists/trunk/linux/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch
dists/trunk/linux/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
dists/trunk/linux/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
dists/trunk/linux/debian/patches/features/all/rt/latency-hist.patch
dists/trunk/linux/debian/patches/features/all/rt/localversion.patch
dists/trunk/linux/debian/patches/features/all/rt/mm-enable-slub.patch
dists/trunk/linux/debian/patches/features/all/rt/of-convert-devtree-lock.patch
dists/trunk/linux/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch
dists/trunk/linux/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
dists/trunk/linux/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
dists/trunk/linux/debian/patches/features/all/rt/series
dists/trunk/linux/debian/patches/features/all/rt/skbufhead-raw-lock.patch
dists/trunk/linux/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
dists/trunk/linux/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
dists/trunk/linux/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch
dists/trunk/linux/debian/patches/features/all/rt/timer.c-fix-build-fail-for-RT_FULL.patch
dists/trunk/linux/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch
dists/trunk/linux/debian/patches/features/all/rt/timers-mov-printk_tick-to-soft-interrupt.patch
dists/trunk/linux/debian/patches/features/all/rt/timers-preempt-rt-support.patch
dists/trunk/linux/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
dists/trunk/linux/debian/patches/series-rt
Modified: dists/trunk/linux/debian/changelog
==============================================================================
--- dists/trunk/linux/debian/changelog Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/changelog Wed Nov 7 02:42:37 2012 (r19485)
@@ -15,6 +15,7 @@
ZRAM but now a dependency (Closes: #692024)
* fs: Re-enable link security restrictions that are disabled by default
in 3.6.5
+ * [rt] bump to 3.6.5-rt15
-- Uwe Kleine-König <uwe at kleine-koenig.org> Mon, 29 Oct 2012 15:50:12 +0100
Modified: dists/trunk/linux/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -11,7 +11,7 @@
===================================================================
--- linux-stable.orig/arch/arm/Kconfig
+++ linux-stable/arch/arm/Kconfig
-@@ -1737,7 +1737,7 @@ config HAVE_ARCH_PFN_VALID
+@@ -1747,7 +1747,7 @@ config HAVE_ARCH_PFN_VALID
config HIGHMEM
bool "High Memory Support"
Added: dists/trunk/linux/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/all/rt/arm-preempt-lazy-support.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -0,0 +1,105 @@
+Subject: arm-preempt-lazy-support.patch
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 31 Oct 2012 12:04:11 +0100
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/arm/Kconfig | 1 +
+ arch/arm/include/asm/thread_info.h | 3 +++
+ arch/arm/kernel/asm-offsets.c | 1 +
+ arch/arm/kernel/entry-armv.S | 8 ++++++++
+ arch/arm/kernel/signal.c | 3 ++-
+ 5 files changed, 15 insertions(+), 1 deletion(-)
+
+Index: linux-stable/arch/arm/Kconfig
+===================================================================
+--- linux-stable.orig/arch/arm/Kconfig
++++ linux-stable/arch/arm/Kconfig
+@@ -50,6 +50,7 @@ config ARM
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN
++ select HAVE_PREEMPT_LAZY
+ help
+ The ARM series is a line of low-power-consumption RISC chip designs
+ licensed by ARM Ltd and targeted at embedded applications and
+Index: linux-stable/arch/arm/include/asm/thread_info.h
+===================================================================
+--- linux-stable.orig/arch/arm/include/asm/thread_info.h
++++ linux-stable/arch/arm/include/asm/thread_info.h
+@@ -50,6 +50,7 @@ struct cpu_context_save {
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+@@ -146,6 +147,7 @@ extern int vfp_restore_user_hwstate(stru
+ #define TIF_SIGPENDING 0
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
++#define TIF_NEED_RESCHED_LAZY 3
+ #define TIF_SYSCALL_TRACE 8
+ #define TIF_SYSCALL_AUDIT 9
+ #define TIF_POLLING_NRFLAG 16
+@@ -158,6 +160,7 @@ extern int vfp_restore_user_hwstate(stru
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+Index: linux-stable/arch/arm/kernel/asm-offsets.c
+===================================================================
+--- linux-stable.orig/arch/arm/kernel/asm-offsets.c
++++ linux-stable/arch/arm/kernel/asm-offsets.c
+@@ -50,6 +50,7 @@ int main(void)
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+Index: linux-stable/arch/arm/kernel/entry-armv.S
+===================================================================
+--- linux-stable.orig/arch/arm/kernel/entry-armv.S
++++ linux-stable/arch/arm/kernel/entry-armv.S
+@@ -221,6 +221,12 @@ __irq_svc:
+ movne r0, #0 @ force flags to 0
+ tst r0, #_TIF_NEED_RESCHED
+ blne svc_preempt
++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ ldr r0, [tsk, #TI_FLAGS] @ get flags
++ teq r8, #0 @ if preempt lazy count != 0
++ movne r0, #0 @ force flags to 0
++ tst r0, #_TIF_NEED_RESCHED_LAZY
++ blne svc_preempt
+ #endif
+
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -240,6 +246,8 @@ svc_preempt:
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
++ bne 1b
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ moveq pc, r8 @ go again
+ b 1b
+ #endif
+Index: linux-stable/arch/arm/kernel/signal.c
+===================================================================
+--- linux-stable.orig/arch/arm/kernel/signal.c
++++ linux-stable/arch/arm/kernel/signal.c
+@@ -639,7 +639,8 @@ asmlinkage int
+ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ {
+ do {
+- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++ if (likely(thread_flags & (_TIF_NEED_RESCHED |
++ _TIF_NEED_RESCHED_LAZY))) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
Modified: dists/trunk/linux/debian/patches/features/all/rt/crypto-make-core-static-and-init-scru-early.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/crypto-make-core-static-and-init-scru-early.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/crypto-make-core-static-and-init-scru-early.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -13,7 +13,6 @@
FIXME: Create a static initializer for this.
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
-[ukl: squash in fix by Milan Broz removing a srcu_init_notifier_head]
---
crypto/Kconfig | 2 +-
crypto/api.c | 7 +++++++
@@ -50,15 +49,3 @@
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
atomic_inc(&alg->cra_refcnt);
-Index: linux-stable/crypto/algapi.c
-===================================================================
---- linux-stable.orig/crypto/algapi.c
-+++ linux-stable/crypto/algapi.c
-@@ -956,7 +956,6 @@ EXPORT_SYMBOL_GPL(crypto_xor);
-
- static int __init crypto_algapi_init(void)
- {
-- srcu_init_notifier_head(&crypto_chain);
- crypto_init_proc();
- return 0;
- }
Modified: dists/trunk/linux/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -37,7 +37,7 @@
/* Free the IRQs */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-@@ -2957,7 +2957,7 @@ static void adjust_link(struct net_devic
+@@ -2938,7 +2938,7 @@ static void adjust_link(struct net_devic
struct phy_device *phydev = priv->phydev;
int new_state = 0;
@@ -46,7 +46,7 @@
lock_tx_qs(priv);
if (phydev->link) {
-@@ -3026,7 +3026,7 @@ static void adjust_link(struct net_devic
+@@ -3007,7 +3007,7 @@ static void adjust_link(struct net_devic
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
unlock_tx_qs(priv);
Added: dists/trunk/linux/debian/patches/features/all/rt/fix-crypto-api-init-for-3-6-4-rt10.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/all/rt/fix-crypto-api-init-for-3-6-4-rt10.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -0,0 +1,38 @@
+Subject: crypto: Remove duplicate srcu init
+From: Milan Broz <mbroz at redhat.com>
+Date: Tue, 30 Oct 2012 16:27:18 +0100
+
+In peterz-srcu-crypto-chain.patch the blocking notifier is changed to
+srcu notifier and added initialization to module init fucntion.
+Later, in crypto-make-core-static-and-init-scru-early.patch, is that
+initialization added also to core_initcall(), but not removed from
+Peter's patch. So the initializer is called twice which can wipe out
+already registered notifiers. This cause a failure in initialization
+of larval algorithms, like e.g. cbc(aes).
+
+Remove the old one.
+
+Signed-off-by: Milan Broz <mbroz at redhat.com>
+Cc: Tvrtko Ursulin <tvrtko at ursulin.net>
+Cc: dm-crypt at saout.de
+Cc: okozina at redhat.com
+Cc: u.kleine-koenig at pengutronix.de
+Link: http://lkml.kernel.org/r/508FF1D6.3030900@redhat.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+
+---
+ crypto/algapi.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+Index: linux-stable/crypto/algapi.c
+===================================================================
+--- linux-stable.orig/crypto/algapi.c
++++ linux-stable/crypto/algapi.c
+@@ -956,7 +956,6 @@ EXPORT_SYMBOL_GPL(crypto_xor);
+
+ static int __init crypto_algapi_init(void)
+ {
+- srcu_init_notifier_head(&crypto_chain);
+ crypto_init_proc();
+ return 0;
+ }
Added: dists/trunk/linux/debian/patches/features/all/rt/fix-random-fallout.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/all/rt/fix-random-fallout.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -0,0 +1,27 @@
+Subject: genirq: Fix 32bit random changes fallout
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 31 Oct 2012 17:06:19 +0100
+
+On 32bit sytems pointers are surprisingly 32bit wide. So gcc complains
+correctly about a cast to a different size. Use an cast to unsigned
+long instead which handles this correctly for bioth 32 and 64 bit.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ kernel/irq/manage.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/kernel/irq/manage.c
+===================================================================
+--- linux-stable.orig/kernel/irq/manage.c
++++ linux-stable/kernel/irq/manage.c
+@@ -855,7 +855,7 @@ static int irq_thread(void *data)
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ migrate_disable();
+ add_interrupt_randomness(action->irq, 0,
+- desc->random_ip ^ (u64) action);
++ desc->random_ip ^ (unsigned long) action);
+ migrate_enable();
+ #endif
+ wake_threads_waitq(desc);
Modified: dists/trunk/linux/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -75,7 +75,7 @@
}
EXPORT_SYMBOL(end_buffer_async_write);
-@@ -3177,6 +3169,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3178,6 +3170,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
Modified: dists/trunk/linux/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -23,7 +23,7 @@
===================================================================
--- linux-stable.orig/kernel/Kconfig.preempt
+++ linux-stable/kernel/Kconfig.preempt
-@@ -67,6 +67,13 @@ config PREEMPT_RTB
+@@ -73,6 +73,13 @@ config PREEMPT_RTB
enables changes which are preliminary for the full preemptiple
RT kernel.
Modified: dists/trunk/linux/debian/patches/features/all/rt/latency-hist.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/latency-hist.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/latency-hist.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -17,13 +17,13 @@
include/linux/hrtimer.h | 3
include/linux/sched.h | 6
include/trace/events/hist.h | 69 ++
- include/trace/events/latency_hist.h | 30
+ include/trace/events/latency_hist.h | 29
kernel/hrtimer.c | 23
kernel/trace/Kconfig | 104 +++
kernel/trace/Makefile | 4
kernel/trace/latency_hist.c | 1176 ++++++++++++++++++++++++++++++++++++
kernel/trace/trace_irqsoff.c | 11
- 10 files changed, 1612 insertions(+)
+ 10 files changed, 1611 insertions(+)
Index: linux-stable/Documentation/trace/histograms.txt
===================================================================
Modified: dists/trunk/linux/debian/patches/features/all/rt/localversion.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/localversion.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/localversion.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -14,4 +14,4 @@
--- /dev/null
+++ linux-stable/localversion-rt
@@ -0,0 +1 @@
-+-rt10
++-rt15
Modified: dists/trunk/linux/debian/patches/features/all/rt/mm-enable-slub.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/mm-enable-slub.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/mm-enable-slub.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -6,10 +6,24 @@
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
- init/Kconfig | 1
- mm/slub.c | 64 ++++++++++++++++++++++++++++++++++++-----------------------
- 2 files changed, 40 insertions(+), 25 deletions(-)
+ include/linux/slub_def.h | 2
+ init/Kconfig | 1
+ mm/slub.c | 116 ++++++++++++++++++++++++++++++++++++-----------
+ 3 files changed, 90 insertions(+), 29 deletions(-)
+Index: linux-stable/include/linux/slub_def.h
+===================================================================
+--- linux-stable.orig/include/linux/slub_def.h
++++ linux-stable/include/linux/slub_def.h
+@@ -54,7 +54,7 @@ struct kmem_cache_cpu {
+ };
+
+ struct kmem_cache_node {
+- spinlock_t list_lock; /* Protect partial list and nr_partial */
++ raw_spinlock_t list_lock; /* Protect partial list and nr_partial */
+ unsigned long nr_partial;
+ struct list_head partial;
+ #ifdef CONFIG_SLUB_DEBUG
Index: linux-stable/init/Kconfig
===================================================================
--- linux-stable.orig/init/Kconfig
@@ -26,189 +40,355 @@
===================================================================
--- linux-stable.orig/mm/slub.c
+++ linux-stable/mm/slub.c
-@@ -31,6 +31,7 @@
- #include <linux/fault-inject.h>
- #include <linux/stacktrace.h>
- #include <linux/prefetch.h>
-+#include <linux/locallock.h>
-
- #include <trace/events/kmem.h>
+@@ -1253,6 +1253,12 @@ static inline void slab_free_hook(struct
-@@ -225,6 +226,8 @@ static inline void stat(const struct kme
- #endif
- }
+ #endif /* CONFIG_SLUB_DEBUG */
-+static DEFINE_LOCAL_IRQ_LOCK(slub_lock);
++struct slub_free_list {
++ raw_spinlock_t lock;
++ struct list_head list;
++};
++static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
+
- /********************************************************************
- * Core slab cache functions
- *******************************************************************/
-@@ -1278,7 +1281,7 @@ static struct page *allocate_slab(struct
+ /*
+ * Slab allocation and freeing
+ */
+@@ -1277,7 +1283,11 @@ static struct page *allocate_slab(struct
+
flags &= gfp_allowed_mask;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (system_state == SYSTEM_RUNNING)
++#else
if (flags & __GFP_WAIT)
-- local_irq_enable();
-+ local_unlock_irq(slub_lock);
++#endif
+ local_irq_enable();
flags |= s->allocflags;
-
-@@ -1318,7 +1321,7 @@ static struct page *allocate_slab(struct
+@@ -1317,7 +1327,11 @@ static struct page *allocate_slab(struct
+ kmemcheck_mark_unallocated_pages(page, pages);
}
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (system_state == SYSTEM_RUNNING)
++#else
if (flags & __GFP_WAIT)
-- local_irq_disable();
-+ local_lock_irq(slub_lock);
++#endif
+ local_irq_disable();
if (!page)
return NULL;
+@@ -1409,6 +1423,16 @@ static void __free_slab(struct kmem_cach
+ __free_pages(page, order);
+ }
+
++static void free_delayed(struct kmem_cache *s, struct list_head *h)
++{
++ while(!list_empty(h)) {
++ struct page *page = list_first_entry(h, struct page, lru);
++
++ list_del(&page->lru);
++ __free_slab(s, page);
++ }
++}
++
+ #define need_reserve_slab_rcu \
+ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
+
+@@ -1443,6 +1467,12 @@ static void free_slab(struct kmem_cache
+ }
+
+ call_rcu(head, rcu_free_slab);
++ } else if (irqs_disabled()) {
++ struct slub_free_list *f = &__get_cpu_var(slub_free_list);
++
++ raw_spin_lock(&f->lock);
++ list_add(&page->lru, &f->list);
++ raw_spin_unlock(&f->lock);
+ } else
+ __free_slab(s, page);
+ }
+@@ -1544,7 +1574,7 @@ static void *get_partial_node(struct kme
+ if (!n || !n->nr_partial)
+ return NULL;
+
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ list_for_each_entry_safe(page, page2, &n->partial, lru) {
+ void *t;
+ int available;
+@@ -1569,7 +1599,7 @@ static void *get_partial_node(struct kme
+ break;
+
+ }
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ return object;
+ }
+
+@@ -1811,7 +1841,7 @@ redo:
+ * that acquire_slab() will see a slab page that
+ * is frozen
+ */
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+ } else {
+ m = M_FULL;
+@@ -1822,7 +1852,7 @@ redo:
+ * slabs from diagnostic functions will not see
+ * any frozen slabs.
+ */
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+ }
+
+@@ -1857,7 +1887,7 @@ redo:
+ goto redo;
-@@ -1871,10 +1874,10 @@ redo:
- *
- * This function must be called with interrupt disabled.
+ if (lock)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ if (m == M_FREE) {
+ stat(s, DEACTIVATE_EMPTY);
+@@ -1886,10 +1916,10 @@ static void unfreeze_partials(struct kme
+ n2 = get_node(s, page_to_nid(page));
+ if (n != n2) {
+ if (n)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ n = n2;
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+
+ do {
+@@ -1918,7 +1948,7 @@ static void unfreeze_partials(struct kme
+ }
+
+ if (n)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ while (discard_page) {
+ page = discard_page;
+@@ -1939,7 +1969,7 @@ static void unfreeze_partials(struct kme
+ * If we did not find a slot then simply move all the partials to the
+ * per node partial list.
*/
--static void unfreeze_partials(struct kmem_cache *s)
-+static void unfreeze_partials(struct kmem_cache *s, unsigned int cpu)
+-int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
++static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
- struct kmem_cache_node *n = NULL, *n2 = NULL;
-- struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
-+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
- struct page *page, *discard_page = NULL;
-
- while ((page = c->partial)) {
-@@ -1959,9 +1962,9 @@ int put_cpu_partial(struct kmem_cache *s
+ struct page *oldpage;
+ int pages;
+@@ -1954,14 +1984,21 @@ int put_cpu_partial(struct kmem_cache *s
+ pobjects = oldpage->pobjects;
+ pages = oldpage->pages;
+ if (drain && pobjects > s->cpu_partial) {
++ struct slub_free_list *f;
+ unsigned long flags;
++ LIST_HEAD(tofree);
+ /*
* partial array is full. Move the existing
* set to the per node partial list.
*/
-- local_irq_save(flags);
-- unfreeze_partials(s);
-- local_irq_restore(flags);
-+ local_lock_irqsave(slub_lock, flags);
-+ unfreeze_partials(s, smp_processor_id());
-+ local_unlock_irqrestore(slub_lock, flags);
+ local_irq_save(flags);
+ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
++ f = &__get_cpu_var(slub_free_list);
++ raw_spin_lock(&f->lock);
++ list_splice_init(&f->list, &tofree);
++ raw_spin_unlock(&f->lock);
+ local_irq_restore(flags);
++ free_delayed(s, &tofree);
pobjects = 0;
pages = 0;
stat(s, CPU_PARTIAL_DRAIN);
-@@ -2002,17 +2005,10 @@ static inline void __flush_cpu_slab(stru
- if (c->page)
- flush_slab(s, c);
+@@ -2023,7 +2060,22 @@ static bool has_cpu_slab(int cpu, void *
-- unfreeze_partials(s);
-+ unfreeze_partials(s, cpu);
- }
- }
-
--static void flush_cpu_slab(void *d)
--{
-- struct kmem_cache *s = d;
--
-- __flush_cpu_slab(s, smp_processor_id());
--}
--
- static bool has_cpu_slab(int cpu, void *info)
- {
- struct kmem_cache *s = info;
-@@ -2021,10 +2017,29 @@ static bool has_cpu_slab(int cpu, void *
- return c->page || c->partial;
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+static void flush_cpu_slab(void *d)
-+{
-+ struct kmem_cache *s = d;
-+
-+ __flush_cpu_slab(s, smp_processor_id());
-+}
-+
static void flush_all(struct kmem_cache *s)
{
- on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
- }
-+#else
-+static void flush_all(struct kmem_cache *s)
-+{
++ LIST_HEAD(tofree);
+ int cpu;
+
+ on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
+ for_each_online_cpu(cpu) {
-+ if (has_cpu_slab(cpu, s))
-+ __flush_cpu_slab(s, cpu);
++ struct slub_free_list *f;
++
++ if (!has_cpu_slab(cpu, s))
++ continue;
++
++ f = &per_cpu(slub_free_list, cpu);
++ raw_spin_lock_irq(&f->lock);
++ list_splice_init(&f->list, &tofree);
++ raw_spin_unlock_irq(&f->lock);
++ free_delayed(s, &tofree);
+ }
-+}
-+#endif
+ }
/*
- * Check if the objects in a per cpu structure fit numa
-@@ -2201,7 +2216,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2051,10 +2103,10 @@ static unsigned long count_partial(struc
+ unsigned long x = 0;
+ struct page *page;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ x += get_count(page);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+ }
+
+@@ -2197,9 +2249,11 @@ static inline void *get_freelist(struct
+ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ unsigned long addr, struct kmem_cache_cpu *c)
+ {
++ struct slub_free_list *f;
+ void *freelist;
struct page *page;
unsigned long flags;
++ LIST_HEAD(tofree);
-- local_irq_save(flags);
-+ local_lock_irqsave(slub_lock, flags);
+ local_irq_save(flags);
#ifdef CONFIG_PREEMPT
- /*
- * We may have been preempted and rescheduled on a different
-@@ -2262,7 +2277,7 @@ load_freelist:
+@@ -2262,7 +2316,13 @@ load_freelist:
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(slub_lock, flags);
++out:
++ f = &__get_cpu_var(slub_free_list);
++ raw_spin_lock(&f->lock);
++ list_splice_init(&f->list, &tofree);
++ raw_spin_unlock(&f->lock);
+ local_irq_restore(flags);
++ free_delayed(s, &tofree);
return freelist;
new_slab:
-@@ -2281,7 +2296,7 @@ new_slab:
+@@ -2280,9 +2340,7 @@ new_slab:
+ if (unlikely(!freelist)) {
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);
-
+-
- local_irq_restore(flags);
-+ local_unlock_irqrestore(slub_lock, flags);
- return NULL;
+- return NULL;
++ goto out;
}
-@@ -2296,7 +2311,7 @@ new_slab:
+ page = c->page;
+@@ -2296,8 +2354,7 @@ new_slab:
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
- local_irq_restore(flags);
-+ local_unlock_irqrestore(slub_lock, flags);
- return freelist;
+- return freelist;
++ goto out;
}
-@@ -2488,7 +2503,8 @@ static void __slab_free(struct kmem_cach
+ /*
+@@ -2488,7 +2545,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
- spin_lock_irqsave(&n->list_lock, flags);
-+ local_spin_lock_irqsave(slub_lock,
-+ &n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
}
}
-@@ -2538,7 +2554,7 @@ static void __slab_free(struct kmem_cach
+@@ -2538,7 +2595,7 @@ static void __slab_free(struct kmem_cach
stat(s, FREE_ADD_PARTIAL);
}
}
- spin_unlock_irqrestore(&n->list_lock, flags);
-+ local_spin_unlock_irqrestore(slub_lock, &n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
return;
slab_empty:
-@@ -2552,7 +2568,7 @@ slab_empty:
+@@ -2552,7 +2609,7 @@ slab_empty:
/* Slab must be on the full list */
remove_full(s, page);
- spin_unlock_irqrestore(&n->list_lock, flags);
-+ local_spin_unlock_irqrestore(slub_lock, &n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -4002,9 +4018,9 @@ static int __cpuinit slab_cpuup_callback
- case CPU_DEAD_FROZEN:
- mutex_lock(&slab_mutex);
- list_for_each_entry(s, &slab_caches, list) {
-- local_irq_save(flags);
-+ local_lock_irqsave(slub_lock, flags);
- __flush_cpu_slab(s, cpu);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(slub_lock, flags);
- }
- mutex_unlock(&slab_mutex);
- break;
+@@ -2781,7 +2838,7 @@ static void
+ init_kmem_cache_node(struct kmem_cache_node *n)
+ {
+ n->nr_partial = 0;
+- spin_lock_init(&n->list_lock);
++ raw_spin_lock_init(&n->list_lock);
+ INIT_LIST_HEAD(&n->partial);
+ #ifdef CONFIG_SLUB_DEBUG
+ atomic_long_set(&n->nr_slabs, 0);
+@@ -3524,7 +3581,7 @@ int kmem_cache_shrink(struct kmem_cache
+ for (i = 0; i < objects; i++)
+ INIT_LIST_HEAD(slabs_by_inuse + i);
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ /*
+ * Build lists indexed by the items in use in each slab.
+@@ -3545,7 +3602,7 @@ int kmem_cache_shrink(struct kmem_cache
+ for (i = objects - 1; i > 0; i--)
+ list_splice(slabs_by_inuse + i, n->partial.prev);
+
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+
+ /* Release empty slabs */
+ list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
+@@ -3711,10 +3768,15 @@ void __init kmem_cache_init(void)
+ int i;
+ int caches = 0;
+ struct kmem_cache *temp_kmem_cache;
+- int order;
++ int order, cpu;
+ struct kmem_cache *temp_kmem_cache_node;
+ unsigned long kmalloc_size;
+
++ for_each_possible_cpu(cpu) {
++ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
++ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
++ }
++
+ if (debug_guardpage_minorder())
+ slub_max_order = 0;
+
+@@ -4127,7 +4189,7 @@ static int validate_slab_node(struct kme
+ struct page *page;
+ unsigned long flags;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ list_for_each_entry(page, &n->partial, lru) {
+ validate_slab_slab(s, page, map);
+@@ -4150,7 +4212,7 @@ static int validate_slab_node(struct kme
+ atomic_long_read(&n->nr_slabs));
+
+ out:
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return count;
+ }
+
+@@ -4340,12 +4402,12 @@ static int list_locations(struct kmem_ca
+ if (!atomic_long_read(&n->nr_slabs))
+ continue;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ process_slab(&t, s, page, alloc, map);
+ list_for_each_entry(page, &n->full, lru)
+ process_slab(&t, s, page, alloc, map);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ }
+
+ for (i = 0; i < t.count; i++) {
Modified: dists/trunk/linux/debian/patches/features/all/rt/of-convert-devtree-lock.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/of-convert-devtree-lock.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/of-convert-devtree-lock.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -5,9 +5,9 @@
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
arch/sparc/kernel/prom_common.c | 4 -
- drivers/of/base.c | 92 ++++++++++++++++++++++------------------
+ drivers/of/base.c | 96 ++++++++++++++++++++++------------------
include/linux/of.h | 2
- 3 files changed, 55 insertions(+), 43 deletions(-)
+ 3 files changed, 57 insertions(+), 45 deletions(-)
Index: linux-stable/arch/sparc/kernel/prom_common.c
===================================================================
@@ -142,6 +142,24 @@
return next;
}
EXPORT_SYMBOL(of_get_next_child);
+@@ -412,7 +417,7 @@ struct device_node *of_get_next_availabl
+ {
+ struct device_node *next;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ next = prev ? prev->sibling : node->child;
+ for (; next; next = next->sibling) {
+ if (!of_device_is_available(next))
+@@ -421,7 +426,7 @@ struct device_node *of_get_next_availabl
+ break;
+ }
+ of_node_put(prev);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ return next;
+ }
+ EXPORT_SYMBOL(of_get_next_available_child);
@@ -436,14 +441,15 @@ EXPORT_SYMBOL(of_get_next_available_chil
struct device_node *of_find_node_by_path(const char *path)
{
Modified: dists/trunk/linux/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -45,7 +45,7 @@
===================================================================
--- linux-stable.orig/kernel/timer.c
+++ linux-stable/kernel/timer.c
-@@ -1426,7 +1426,7 @@ void update_process_times(int user_tick)
+@@ -1432,7 +1432,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
@@ -54,7 +54,7 @@
if (in_irq())
irq_work_run();
#endif
-@@ -1440,6 +1440,10 @@ static void run_timer_softirq(struct sof
+@@ -1446,6 +1446,10 @@ static void run_timer_softirq(struct sof
{
struct tvec_base *base = __this_cpu_read(tvec_bases);
Added: dists/trunk/linux/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -0,0 +1,179 @@
+Subject: powerpc-preempt-lazy-support.patch
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 01 Nov 2012 10:14:11 +0100
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/powerpc/Kconfig | 1 +
+ arch/powerpc/include/asm/thread_info.h | 7 ++++++-
+ arch/powerpc/kernel/asm-offsets.c | 1 +
+ arch/powerpc/kernel/entry_32.S | 19 +++++++++++++------
+ arch/powerpc/kernel/entry_64.S | 17 +++++++++++------
+ 5 files changed, 32 insertions(+), 13 deletions(-)
+
+Index: linux-stable/arch/powerpc/Kconfig
+===================================================================
+--- linux-stable.orig/arch/powerpc/Kconfig
++++ linux-stable/arch/powerpc/Kconfig
+@@ -140,6 +140,7 @@ config PPC
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
++ select HAVE_PREEMPT_LAZY
+
+ config EARLY_PRINTK
+ bool
+Index: linux-stable/arch/powerpc/include/asm/thread_info.h
+===================================================================
+--- linux-stable.orig/arch/powerpc/include/asm/thread_info.h
++++ linux-stable/arch/powerpc/include/asm/thread_info.h
+@@ -43,6 +43,8 @@ struct thread_info {
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => preemptable,
++ <0 => BUG */
+ struct restart_block restart_block;
+ unsigned long local_flags; /* private flags for thread */
+
+@@ -102,12 +104,14 @@ static inline struct thread_info *curren
+ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
+ #define TIF_NOERROR 12 /* Force successful syscall return */
+ #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
++#define TIF_NEED_RESCHED_LAZY 14 /* lazy rescheduling necessary */
+ #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+ #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
+ #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+ #define _TIF_32BIT (1<<TIF_32BIT)
+ #define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK)
+@@ -123,8 +127,9 @@ static inline struct thread_info *curren
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+- _TIF_NOTIFY_RESUME)
++ _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED_LAZY)
+ #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
+ /* Bits in local_flags */
+ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+Index: linux-stable/arch/powerpc/kernel/asm-offsets.c
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/asm-offsets.c
++++ linux-stable/arch/powerpc/kernel/asm-offsets.c
+@@ -124,6 +124,7 @@ int main(void)
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+
+Index: linux-stable/arch/powerpc/kernel/entry_32.S
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/entry_32.S
++++ linux-stable/arch/powerpc/kernel/entry_32.S
+@@ -843,8 +843,15 @@ resume_kernel:
+ bne restore
+ lwz r0,TI_FLAGS(r9)
+ andi. r0,r0,_TIF_NEED_RESCHED
++ bne+ 1f
++ lwz r0,TI_PREEMPT_LAZY(r9)
++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
++ bne restore
++ lwz r0,TI_FLAGS(r9)
++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
+- andi. r0,r3,MSR_EE /* interrupts off? */
++
++1: andi. r0,r3,MSR_EE /* interrupts off? */
+ beq restore /* don't schedule if so */
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep thinks irqs are enabled, we need to call
+@@ -853,11 +860,11 @@ resume_kernel:
+ */
+ bl trace_hardirqs_off
+ #endif
+-1: bl preempt_schedule_irq
++2: bl preempt_schedule_irq
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r3,TI_FLAGS(r9)
+- andi. r0,r3,_TIF_NEED_RESCHED
+- bne- 1b
++ andi. r0,r3,_TIF_NEED_RESCHED_MASK
++ bne- 2b
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+@@ -1180,7 +1187,7 @@ global_dbcr0:
+ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+
+ do_work: /* r10 contains MSR_KERNEL here */
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ beq do_user_signal
+
+ do_resched: /* r10 contains MSR_KERNEL here */
+@@ -1201,7 +1208,7 @@ recheck:
+ MTMSRD(r10) /* disable interrupts */
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r9,TI_FLAGS(r9)
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ bne- do_resched
+ andi. r0,r9,_TIF_USER_WORK_MASK
+ beq restore_user
+Index: linux-stable/arch/powerpc/kernel/entry_64.S
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/entry_64.S
++++ linux-stable/arch/powerpc/kernel/entry_64.S
+@@ -580,7 +580,7 @@ _GLOBAL(ret_from_except_lite)
+ andi. r0,r4,_TIF_USER_WORK_MASK
+ beq restore
+
+- andi. r0,r4,_TIF_NEED_RESCHED
++ andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ beq 1f
+ bl .restore_interrupts
+ bl .schedule
+@@ -595,11 +595,16 @@ _GLOBAL(ret_from_except_lite)
+ resume_kernel:
+ #ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
++ lwz r8,TI_PREEMPT(r9)
+ andi. r0,r4,_TIF_NEED_RESCHED
++ bne+ 1f
++
++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++ lwz r8,TI_PREEMPT_LAZY(r9)
++
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+- lwz r8,TI_PREEMPT(r9)
+- cmpwi cr1,r8,0
++1: cmpwi cr1,r8,0
+ ld r0,SOFTE(r1)
+ cmpdi r0,0
+ crandc eq,cr1*4+eq,eq
+@@ -610,13 +615,13 @@ resume_kernel:
+ * sure we are soft-disabled first
+ */
+ SOFT_DISABLE_INTS(r3,r4)
+-1: bl .preempt_schedule_irq
++2: bl .preempt_schedule_irq
+
+ /* Re-test flags and eventually loop */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r4,TI_FLAGS(r9)
+- andi. r0,r4,_TIF_NEED_RESCHED
+- bne 1b
++ andi. r0,r4,_TIF_NEED_RESCHED_MASK
++ bne 2b
+ #endif /* CONFIG_PREEMPT */
+
+ .globl fast_exc_return_irq
Added: dists/trunk/linux/debian/patches/features/all/rt/preempt-lazy-support.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/all/rt/preempt-lazy-support.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -0,0 +1,603 @@
+Subject: sched: Add support for lazy preemption
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 26 Oct 2012 18:50:54 +0100
+
+It has become an obsession to mitigate the determinism vs. throughput
+loss of RT. Looking at the mainline semantics of preemption points
+gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER
+tasks. One major issue is the wakeup of tasks which are right away
+preempting the waking task while the waking task holds a lock on which
+the woken task will block right after having preempted the wakee. In
+mainline this is prevented due to the implicit preemption disable of
+spin/rw_lock held regions. On RT this is not possible due to the fully
+preemptible nature of sleeping spinlocks.
+
+Though for a SCHED_OTHER task preempting another SCHED_OTHER task this
+is really not a correctness issue. RT folks are concerned about
+SCHED_FIFO/RR tasks preemption and not about the purely fairness
+driven SCHED_OTHER preemption latencies.
+
+So I introduced a lazy preemption mechanism which only applies to
+SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the
+existing preempt_count each tasks sports now a preempt_lazy_count
+which is manipulated on lock acquiry and release. This is slightly
+incorrect as for lazyness reasons I coupled this on
+migrate_disable/enable so some other mechanisms get the same treatment
+(e.g. get_cpu_light).
+
+Now on the scheduler side instead of setting NEED_RESCHED this sets
+NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and
+therefor allows to exit the waking task the lock held region before
+the woken task preempts. That also works better for cross CPU wakeups
+as the other side can stay in the adaptive spinning loop.
+
+For RT class preemption there is no change. This simply sets
+NEED_RESCHED and forgoes the lazy preemption counter.
+
+ Initial test do not expose any observable latency increasement, but
+history shows that I've been proven wrong before :)
+
+The lazy preemption mode is per default on, but with
+CONFIG_SCHED_DEBUG enabled it can be disabled via:
+
+ # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features
+
+and reenabled via
+
+ # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features
+
+The test results so far are very machine and workload dependent, but
+there is a clear trend that it enhances the non RT workload
+performance.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/ftrace_event.h | 1
+ include/linux/preempt.h | 38 ++++++++++++++++++++++++++-
+ include/linux/sched.h | 51 ++++++++++++++++++++++++++++++++----
+ kernel/Kconfig.preempt | 6 ++++
+ kernel/sched/core.c | 60 ++++++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/fair.c | 16 +++++------
+ kernel/sched/features.h | 4 ++
+ kernel/sched/sched.h | 9 ++++++
+ kernel/trace/trace.c | 41 +++++++++++++++++------------
+ kernel/trace/trace.h | 2 +
+ kernel/trace/trace_output.c | 13 +++++++--
+ 11 files changed, 207 insertions(+), 34 deletions(-)
+
+Index: linux-stable/include/linux/ftrace_event.h
+===================================================================
+--- linux-stable.orig/include/linux/ftrace_event.h
++++ linux-stable/include/linux/ftrace_event.h
+@@ -51,6 +51,7 @@ struct trace_entry {
+ int pid;
+ unsigned short migrate_disable;
+ unsigned short padding;
++ unsigned char preempt_lazy_count;
+ };
+
+ #define FTRACE_MAX_EVENT \
+Index: linux-stable/include/linux/preempt.h
+===================================================================
+--- linux-stable.orig/include/linux/preempt.h
++++ linux-stable/include/linux/preempt.h
+@@ -23,15 +23,38 @@
+
+ #define preempt_count() (current_thread_info()->preempt_count)
+
++#ifdef CONFIG_PREEMPT_LAZY
++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
++#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
++#else
++#define add_preempt_lazy_count(val) do { } while (0)
++#define sub_preempt_lazy_count(val) do { } while (0)
++#define inc_preempt_lazy_count() do { } while (0)
++#define dec_preempt_lazy_count() do { } while (0)
++#define preempt_lazy_count() (0)
++#endif
++
+ #ifdef CONFIG_PREEMPT
+
+ asmlinkage void preempt_schedule(void);
+
++# ifdef CONFIG_PREEMPT_LAZY
++#define preempt_check_resched() \
++do { \
++ if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || \
++ test_thread_flag(TIF_NEED_RESCHED_LAZY))) \
++ preempt_schedule(); \
++} while (0)
++# else
+ #define preempt_check_resched() \
+ do { \
+- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
++ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ preempt_schedule(); \
+ } while (0)
++# endif
+
+ #else /* !CONFIG_PREEMPT */
+
+@@ -48,6 +71,12 @@ do { \
+ barrier(); \
+ } while (0)
+
++#define preempt_lazy_disable() \
++do { \
++ inc_preempt_lazy_count(); \
++ barrier(); \
++} while (0)
++
+ #define sched_preempt_enable_no_resched() \
+ do { \
+ barrier(); \
+@@ -69,6 +98,13 @@ do { \
+ preempt_check_resched(); \
+ } while (0)
+
++#define preempt_lazy_enable() \
++do { \
++ dec_preempt_lazy_count(); \
++ barrier(); \
++ preempt_check_resched(); \
++} while (0)
++
+ /* For debugging and tracer internals only! */
+ #define add_preempt_count_notrace(val) \
+ do { preempt_count() += (val); } while (0)
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -2660,6 +2660,52 @@ static inline int test_tsk_need_resched(
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
++}
++
++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
++}
++
++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
++}
++
++static inline int need_resched_lazy(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++}
++
++static inline int need_resched_now(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED);
++}
++
++static inline int need_resched(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED) ||
++ test_thread_flag(TIF_NEED_RESCHED_LAZY);
++}
++#else
++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
++static inline int need_resched_lazy(void) { return 0; }
++
++static inline int need_resched_now(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED);
++}
++
++static inline int need_resched(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED);
++}
++#endif
++
+ static inline int restart_syscall(void)
+ {
+ set_tsk_thread_flag(current, TIF_SIGPENDING);
+@@ -2691,11 +2737,6 @@ static inline int signal_pending_state(l
+ return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
+ }
+
+-static inline int need_resched(void)
+-{
+- return unlikely(test_thread_flag(TIF_NEED_RESCHED));
+-}
+-
+ /*
+ * cond_resched() and cond_resched_lock(): latency reduction via
+ * explicit rescheduling in places that are safe. The return
+Index: linux-stable/kernel/Kconfig.preempt
+===================================================================
+--- linux-stable.orig/kernel/Kconfig.preempt
++++ linux-stable/kernel/Kconfig.preempt
+@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE
+ bool
+ select PREEMPT
+
++config HAVE_PREEMPT_LAZY
++ bool
++
++config PREEMPT_LAZY
++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
++
+ choice
+ prompt "Preemption Model"
+ default PREEMPT_NONE
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -534,6 +534,37 @@ void resched_task(struct task_struct *p)
+ smp_send_reschedule(cpu);
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++void resched_task_lazy(struct task_struct *p)
++{
++ int cpu;
++
++ if (!sched_feat(PREEMPT_LAZY)) {
++ resched_task(p);
++ return;
++ }
++
++ assert_raw_spin_locked(&task_rq(p)->lock);
++
++ if (test_tsk_need_resched(p))
++ return;
++
++ if (test_tsk_need_resched_lazy(p))
++ return;
++
++ set_tsk_need_resched_lazy(p);
++
++ cpu = task_cpu(p);
++ if (cpu == smp_processor_id())
++ return;
++
++ /* NEED_RESCHED_LAZY must be visible before we test polling */
++ smp_mb();
++ if (!tsk_is_polling(p))
++ smp_send_reschedule(cpu);
++}
++#endif
++
+ void resched_cpu(int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+@@ -650,6 +681,17 @@ void resched_task(struct task_struct *p)
+ assert_raw_spin_locked(&task_rq(p)->lock);
+ set_tsk_need_resched(p);
+ }
++#ifdef CONFIG_PREEMPT_LAZY
++void resched_task_lazy(struct task_struct *p)
++{
++ if (!sched_feat(PREEMPT_LAZY)) {
++ resched_task(p);
++ return;
++ }
++ assert_raw_spin_locked(&task_rq(p)->lock);
++ set_tsk_need_resched_lazy(p);
++}
++#endif
+ #endif /* CONFIG_SMP */
+
+ #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
+@@ -1838,6 +1880,9 @@ void sched_fork(struct task_struct *p)
+ /* Want to start with kernel preemption disabled. */
+ task_thread_info(p)->preempt_count = 1;
+ #endif
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
++ task_thread_info(p)->preempt_lazy_count = 0;
++#endif
+ #ifdef CONFIG_SMP
+ plist_node_init(&p->pushable_tasks, MAX_PRIO);
+ #endif
+@@ -3448,6 +3493,7 @@ void migrate_disable(void)
+ return;
+ }
+
++ preempt_lazy_disable();
+ pin_current_cpu();
+ p->migrate_disable = 1;
+ preempt_enable();
+@@ -3503,6 +3549,7 @@ void migrate_enable(void)
+
+ unpin_current_cpu();
+ preempt_enable();
++ preempt_lazy_enable();
+ }
+ EXPORT_SYMBOL(migrate_enable);
+ #else
+@@ -3603,6 +3650,7 @@ need_resched:
+ put_prev_task(rq, prev);
+ next = pick_next_task(rq);
+ clear_tsk_need_resched(prev);
++ clear_tsk_need_resched_lazy(prev);
+ rq->skip_clock_update = 0;
+
+ if (likely(prev != next)) {
+@@ -3724,6 +3772,14 @@ asmlinkage void __sched notrace preempt_
+ if (likely(ti->preempt_count || irqs_disabled()))
+ return;
+
++#ifdef CONFIG_PREEMPT_LAZY
++ /*
++ * Check for lazy preemption
++ */
++ if (ti->preempt_lazy_count && !test_thread_flag(TIF_NEED_RESCHED))
++ return;
++#endif
++
+ do {
+ add_preempt_count_notrace(PREEMPT_ACTIVE);
+ /*
+@@ -5331,7 +5387,9 @@ void __cpuinit init_idle(struct task_str
+
+ /* Set the preempt count _outside_ the spinlocks! */
+ task_thread_info(idle)->preempt_count = 0;
+-
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
++ task_thread_info(idle)->preempt_lazy_count = 0;
++#endif
+ /*
+ * The idle tasks have their own, simple scheduling class:
+ */
+Index: linux-stable/kernel/sched/fair.c
+===================================================================
+--- linux-stable.orig/kernel/sched/fair.c
++++ linux-stable/kernel/sched/fair.c
+@@ -1222,7 +1222,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+ ideal_runtime = sched_slice(cfs_rq, curr);
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime) {
+- resched_task(rq_of(cfs_rq)->curr);
++ resched_task_lazy(rq_of(cfs_rq)->curr);
+ /*
+ * The current task ran long enough, ensure it doesn't get
+ * re-elected due to buddy favours.
+@@ -1246,7 +1246,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+ return;
+
+ if (delta > ideal_runtime)
+- resched_task(rq_of(cfs_rq)->curr);
++ resched_task_lazy(rq_of(cfs_rq)->curr);
+ }
+
+ static void
+@@ -1363,7 +1363,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+ * validating it and just reschedule.
+ */
+ if (queued) {
+- resched_task(rq_of(cfs_rq)->curr);
++ resched_task_lazy(rq_of(cfs_rq)->curr);
+ return;
+ }
+ /*
+@@ -1543,7 +1543,7 @@ static void __account_cfs_rq_runtime(str
+ * hierarchy can be throttled
+ */
+ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
+- resched_task(rq_of(cfs_rq)->curr);
++ resched_task_lazy(rq_of(cfs_rq)->curr);
+ }
+
+ static __always_inline
+@@ -2129,7 +2129,7 @@ static void hrtick_start_fair(struct rq
+
+ if (delta < 0) {
+ if (rq->curr == p)
+- resched_task(p);
++ resched_task_lazy(p);
+ return;
+ }
+
+@@ -2954,7 +2954,7 @@ static void check_preempt_wakeup(struct
+ return;
+
+ preempt:
+- resched_task(curr);
++ resched_task_lazy(curr);
+ /*
+ * Only set the backward buddy when the current task is still
+ * on the rq. This can happen when a wakeup gets interleaved
+@@ -5027,7 +5027,7 @@ static void task_fork_fair(struct task_s
+ * 'current' within the tree based on its new key value.
+ */
+ swap(curr->vruntime, se->vruntime);
+- resched_task(rq->curr);
++ resched_task_lazy(rq->curr);
+ }
+
+ se->vruntime -= cfs_rq->min_vruntime;
+@@ -5052,7 +5052,7 @@ prio_changed_fair(struct rq *rq, struct
+ */
+ if (rq->curr == p) {
+ if (p->prio > oldprio)
+- resched_task(rq->curr);
++ resched_task_lazy(rq->curr);
+ } else
+ check_preempt_curr(rq, p, 0);
+ }
+Index: linux-stable/kernel/sched/features.h
+===================================================================
+--- linux-stable.orig/kernel/sched/features.h
++++ linux-stable/kernel/sched/features.h
+@@ -68,6 +68,9 @@ SCHED_FEAT(NONTASK_POWER, true)
+ SCHED_FEAT(TTWU_QUEUE, true)
+ #else
+ SCHED_FEAT(TTWU_QUEUE, false)
++# ifdef CONFIG_PREEMPT_LAZY
++SCHED_FEAT(PREEMPT_LAZY, true)
++# endif
+ #endif
+
+ SCHED_FEAT(FORCE_SD_OVERLAP, false)
+Index: linux-stable/kernel/sched/sched.h
+===================================================================
+--- linux-stable.orig/kernel/sched/sched.h
++++ linux-stable/kernel/sched/sched.h
+@@ -876,6 +876,15 @@ extern void init_sched_fair_class(void);
+ extern void resched_task(struct task_struct *p);
+ extern void resched_cpu(int cpu);
+
++#ifdef CONFIG_PREEMPT_LAZY
++extern void resched_task_lazy(struct task_struct *tsk);
++#else
++static inline void resched_task_lazy(struct task_struct *tsk)
++{
++ resched_task(tsk);
++}
++#endif
++
+ extern struct rt_bandwidth def_rt_bandwidth;
+ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+
+Index: linux-stable/kernel/trace/trace.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace.c
++++ linux-stable/kernel/trace/trace.c
+@@ -1152,6 +1152,7 @@ tracing_generic_entry_update(struct trac
+ struct task_struct *tsk = current;
+
+ entry->preempt_count = pc & 0xff;
++ entry->preempt_lazy_count = preempt_lazy_count();
+ entry->pid = (tsk) ? tsk->pid : 0;
+ entry->padding = 0;
+ entry->flags =
+@@ -1162,7 +1163,8 @@ tracing_generic_entry_update(struct trac
+ #endif
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+ ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+- (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
++ (need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
++ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0);
+
+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+ }
+@@ -1985,15 +1987,17 @@ get_total_entries(struct trace_array *tr
+
+ static void print_lat_help_header(struct seq_file *m)
+ {
+- seq_puts(m, "# _------=> CPU# \n");
+- seq_puts(m, "# / _-----=> irqs-off \n");
+- seq_puts(m, "# | / _----=> need-resched \n");
+- seq_puts(m, "# || / _---=> hardirq/softirq \n");
+- seq_puts(m, "# ||| / _--=> preempt-depth \n");
+- seq_puts(m, "# |||| / _--=> migrate-disable\n");
+- seq_puts(m, "# ||||| / delay \n");
+- seq_puts(m, "# cmd pid |||||| time | caller \n");
+- seq_puts(m, "# \\ / ||||| \\ | / \n");
++ seq_puts(m, "# _--------=> CPU# \n");
++ seq_puts(m, "# / _-------=> irqs-off \n");
++ seq_puts(m, "# | / _------=> need-resched \n");
++ seq_puts(m, "# || / _-----=> need-resched_lazy \n");
++ seq_puts(m, "# ||| / _----=> hardirq/softirq \n");
++ seq_puts(m, "# |||| / _---=> preempt-depth \n");
++ seq_puts(m, "# ||||| / _--=> preempt-lazy-depth\n");
++ seq_puts(m, "# |||||| / _-=> migrate-disable \n");
++ seq_puts(m, "# ||||||| / delay \n");
++ seq_puts(m, "# cmd pid |||||||| time | caller \n");
++ seq_puts(m, "# \\ / |||||||| \\ | / \n");
+ }
+
+ static void print_event_info(struct trace_array *tr, struct seq_file *m)
+@@ -2017,13 +2021,16 @@ static void print_func_help_header(struc
+ static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
+ {
+ print_event_info(tr, m);
+- seq_puts(m, "# _-----=> irqs-off\n");
+- seq_puts(m, "# / _----=> need-resched\n");
+- seq_puts(m, "# | / _---=> hardirq/softirq\n");
+- seq_puts(m, "# || / _--=> preempt-depth\n");
+- seq_puts(m, "# ||| / delay\n");
+- seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
+- seq_puts(m, "# | | | |||| | |\n");
++ seq_puts(m, "# _-------=> irqs-off \n");
++ seq_puts(m, "# / _------=> need-resched \n");
++ seq_puts(m, "# |/ _-----=> need-resched_lazy \n");
++ seq_puts(m, "# ||/ _----=> hardirq/softirq \n");
++ seq_puts(m, "# |||/ _---=> preempt-depth \n");
++ seq_puts(m, "# ||||/ _--=> preempt-lazy-depth\n");
++ seq_puts(m, "# ||||| / _-=> migrate-disable \n");
++ seq_puts(m, "# |||||| / delay\n");
++ seq_puts(m, "# TASK-PID CPU# ||||||| TIMESTAMP FUNCTION\n");
++ seq_puts(m, "# | | | ||||||| | |\n");
+ }
+
+ void
+Index: linux-stable/kernel/trace/trace.h
+===================================================================
+--- linux-stable.orig/kernel/trace/trace.h
++++ linux-stable/kernel/trace/trace.h
+@@ -116,6 +116,7 @@ struct uprobe_trace_entry_head {
+ * NEED_RESCHED - reschedule is requested
+ * HARDIRQ - inside an interrupt handler
+ * SOFTIRQ - inside a softirq handler
++ * NEED_RESCHED_LAZY - lazy reschedule is requested
+ */
+ enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+@@ -123,6 +124,7 @@ enum trace_flag_type {
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x20,
+ };
+
+ #define TRACE_BUF_SIZE 1024
+Index: linux-stable/kernel/trace/trace_output.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace_output.c
++++ linux-stable/kernel/trace/trace_output.c
+@@ -564,6 +564,7 @@ int trace_print_lat_fmt(struct trace_seq
+ {
+ char hardsoft_irq;
+ char need_resched;
++ char need_resched_lazy;
+ char irqs_off;
+ int hardirq;
+ int softirq;
+@@ -578,14 +579,17 @@ int trace_print_lat_fmt(struct trace_seq
+ '.';
+ need_resched =
+ (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.';
++ need_resched_lazy =
++ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
+ hardsoft_irq =
+ (hardirq && softirq) ? 'H' :
+ hardirq ? 'h' :
+ softirq ? 's' :
+ '.';
+
+- if (!trace_seq_printf(s, "%c%c%c",
+- irqs_off, need_resched, hardsoft_irq))
++ if (!trace_seq_printf(s, "%c%c%c%c",
++ irqs_off, need_resched, need_resched_lazy,
++ hardsoft_irq))
+ return 0;
+
+ if (entry->preempt_count)
+@@ -593,6 +597,11 @@ int trace_print_lat_fmt(struct trace_seq
+ else
+ ret = trace_seq_putc(s, '.');
+
++ if (entry->preempt_lazy_count)
++ ret = trace_seq_printf(s, "%x", entry->preempt_lazy_count);
++ else
++ ret = trace_seq_putc(s, '.');
++
+ if (entry->migrate_disable)
+ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
+ else
Modified: dists/trunk/linux/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -11,10 +11,10 @@
init/Kconfig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-Index: linux-3.6/init/Kconfig
+Index: linux-stable/init/Kconfig
===================================================================
---- linux-3.6.orig/init/Kconfig
-+++ linux-3.6/init/Kconfig
+--- linux-stable.orig/init/Kconfig
++++ linux-stable/init/Kconfig
@@ -504,7 +504,7 @@ config RCU_FANOUT_EXACT
config RCU_FAST_NO_HZ
Modified: dists/trunk/linux/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -216,7 +216,7 @@
/*
* Record the number of times rcutorture tests have been initiated and
-@@ -1970,6 +1974,7 @@ void call_rcu_sched(struct rcu_head *hea
+@@ -1972,6 +1976,7 @@ void call_rcu_sched(struct rcu_head *hea
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -224,7 +224,7 @@
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
-@@ -1978,6 +1983,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -1980,6 +1985,7 @@ void call_rcu_bh(struct rcu_head *head,
__call_rcu(head, func, &rcu_bh_state, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -232,7 +232,7 @@
/*
* Because a context switch is a grace period for RCU-sched and RCU-bh,
-@@ -2034,6 +2040,7 @@ void synchronize_sched(void)
+@@ -2036,6 +2042,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -240,7 +240,7 @@
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -2054,6 +2061,7 @@ void synchronize_rcu_bh(void)
+@@ -2056,6 +2063,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -248,7 +248,7 @@
static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
-@@ -2460,6 +2468,7 @@ static void _rcu_barrier(struct rcu_stat
+@@ -2462,6 +2470,7 @@ static void _rcu_barrier(struct rcu_stat
destroy_rcu_head_on_stack(&rd.barrier_head);
}
@@ -256,7 +256,7 @@
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -2468,6 +2477,7 @@ void rcu_barrier_bh(void)
+@@ -2470,6 +2479,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
Modified: dists/trunk/linux/debian/patches/features/all/rt/series
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/series Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/series Wed Nov 7 02:42:37 2012 (r19485)
@@ -586,14 +586,15 @@
net-use-cpu-light-in-ip-send-unicast-reply.patch
peterz-srcu-crypto-chain.patch
crypto-make-core-static-and-init-scru-early.patch
+fix-crypto-api-init-for-3-6-4-rt10.patch
x86-perf-uncore-deal-with-kfree.patch
softirq-make-serving-softirqs-a-task-flag.patch
softirq-split-handling-function.patch
softirq-split-locks.patch
rcu-tiny-solve-rt-mistery.patch
+slub-correct-per-cpu-slab.patch
mm-enable-slub.patch
-cpufreq-powernow-k8-fix-bogus-smp-processor-id-usage.patch
hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -602,10 +603,14 @@
softirq-add-more-debug.patch
net-netif-rx-ni-use-local-bh-disable.patch
-# CHECKME
-#rt-replace-rt-spin-lock-to-raw-one-in-res_counter.patch
+fix-random-fallout.patch
+
+preempt-lazy-support.patch
+x86-preempt-lazy.patch
+arm-preempt-lazy-support.patch
# Enable full RT
+powerpc-preempt-lazy-support.patch
kconfig-disable-a-few-options-rt.patch
kconfig-preempt-rt-full.patch
Modified: dists/trunk/linux/debian/patches/features/all/rt/skbufhead-raw-lock.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/skbufhead-raw-lock.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/skbufhead-raw-lock.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -33,7 +33,7 @@
};
struct sk_buff;
-@@ -995,6 +996,12 @@ static inline void skb_queue_head_init(s
+@@ -992,6 +993,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -67,7 +67,7 @@
#endif
}
-@@ -3399,7 +3399,7 @@ static void flush_backlog(void *arg)
+@@ -3402,7 +3402,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -76,7 +76,7 @@
input_queue_head_incr(sd);
}
}
-@@ -3408,10 +3408,13 @@ static void flush_backlog(void *arg)
+@@ -3411,10 +3411,13 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
@@ -91,7 +91,7 @@
}
static int napi_gro_complete(struct sk_buff *skb)
-@@ -3900,10 +3903,17 @@ static void net_rx_action(struct softirq
+@@ -3903,10 +3906,17 @@ static void net_rx_action(struct softirq
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
@@ -109,7 +109,7 @@
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
-@@ -6334,6 +6344,9 @@ static int dev_cpu_callback(struct notif
+@@ -6337,6 +6347,9 @@ static int dev_cpu_callback(struct notif
netif_rx(skb);
input_queue_head_incr(oldsd);
}
@@ -119,7 +119,7 @@
return NOTIFY_OK;
}
-@@ -6598,8 +6611,9 @@ static int __init net_dev_init(void)
+@@ -6601,8 +6614,9 @@ static int __init net_dev_init(void)
struct softnet_data *sd = &per_cpu(softnet_data, i);
memset(sd, 0, sizeof(*sd));
Added: dists/trunk/linux/debian/patches/features/all/rt/slub-correct-per-cpu-slab.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/all/rt/slub-correct-per-cpu-slab.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -0,0 +1,49 @@
+Date: Tue, 30 Oct 2012 15:29:17 +0000
+From: Christoph Lameter <cl at linux.com>
+Subject: slub: Use correct cpu_slab on dead cpu
+
+Pass a kmem_cache_cpu pointer into unfreeze partials so that a different
+kmem_cache_cpu structure than the local one can be specified.
+
+Reported-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Christoph Lameter <cl at linux.com>
+
+---
+ mm/slub.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+Index: linux-stable/mm/slub.c
+===================================================================
+--- linux-stable.orig/mm/slub.c
++++ linux-stable/mm/slub.c
+@@ -1871,10 +1871,10 @@ redo:
+ *
+ * This function must be called with interrupt disabled.
+ */
+-static void unfreeze_partials(struct kmem_cache *s)
++static void unfreeze_partials(struct kmem_cache *s,
++ struct kmem_cache_cpu *c)
+ {
+ struct kmem_cache_node *n = NULL, *n2 = NULL;
+- struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
+ struct page *page, *discard_page = NULL;
+
+ while ((page = c->partial)) {
+@@ -1960,7 +1960,7 @@ int put_cpu_partial(struct kmem_cache *s
+ * set to the per node partial list.
+ */
+ local_irq_save(flags);
+- unfreeze_partials(s);
++ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
+ local_irq_restore(flags);
+ pobjects = 0;
+ pages = 0;
+@@ -2002,7 +2002,7 @@ static inline void __flush_cpu_slab(stru
+ if (c->page)
+ flush_slab(s, c);
+
+- unfreeze_partials(s);
++ unfreeze_partials(s, c);
+ }
+ }
+
Modified: dists/trunk/linux/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -127,7 +127,7 @@
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -3787,6 +3790,7 @@ static void net_rps_action_and_irq_enabl
+@@ -3790,6 +3793,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
@@ -135,7 +135,7 @@
}
static int process_backlog(struct napi_struct *napi, int quota)
-@@ -3859,6 +3863,7 @@ void __napi_schedule(struct napi_struct
+@@ -3862,6 +3866,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(&__get_cpu_var(softnet_data), n);
local_irq_restore(flags);
@@ -143,7 +143,7 @@
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -6364,6 +6369,7 @@ static int dev_cpu_callback(struct notif
+@@ -6367,6 +6372,7 @@ static int dev_cpu_callback(struct notif
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
Modified: dists/trunk/linux/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -60,7 +60,7 @@
===================================================================
--- linux-stable.orig/kernel/timer.c
+++ linux-stable/kernel/timer.c
-@@ -1398,13 +1398,13 @@ void update_process_times(int user_tick)
+@@ -1400,13 +1400,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
Modified: dists/trunk/linux/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -57,7 +57,7 @@
===================================================================
--- linux-stable.orig/kernel/timer.c
+++ linux-stable/kernel/timer.c
-@@ -1391,9 +1391,10 @@ unsigned long get_next_timer_interrupt(u
+@@ -1393,9 +1393,10 @@ unsigned long get_next_timer_interrupt(u
/*
* On PREEMPT_RT we cannot sleep here. If the trylock does not
* succeed then we return the worst-case 'expires in 1 tick'
@@ -70,7 +70,7 @@
return now + 1;
#else
spin_lock(&base->lock);
-@@ -1403,7 +1404,7 @@ unsigned long get_next_timer_interrupt(u
+@@ -1405,7 +1406,7 @@ unsigned long get_next_timer_interrupt(u
base->next_timer = __next_timer_interrupt(base);
expires = base->next_timer;
}
Modified: dists/trunk/linux/debian/patches/features/all/rt/timer.c-fix-build-fail-for-RT_FULL.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/timer.c-fix-build-fail-for-RT_FULL.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/timer.c-fix-build-fail-for-RT_FULL.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -24,11 +24,15 @@
Signed-off-by: Paul Gortmaker <paul.gortmaker at windriver.com>
-diff --git a/kernel/timer.c b/kernel/timer.c
-index 00f1d4f..e81d197 100644
---- a/kernel/timer.c
-+++ b/kernel/timer.c
-@@ -1404,7 +1404,11 @@ unsigned long get_next_timer_interrupt(unsigned long now)
+---
+ kernel/timer.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+Index: linux-stable/kernel/timer.c
+===================================================================
+--- linux-stable.orig/kernel/timer.c
++++ linux-stable/kernel/timer.c
+@@ -1406,7 +1406,11 @@ unsigned long get_next_timer_interrupt(u
base->next_timer = __next_timer_interrupt(base);
expires = base->next_timer;
}
@@ -40,6 +44,3 @@
if (time_before_eq(expires, now))
return now;
---
-1.7.0.4
-
Modified: dists/trunk/linux/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -14,7 +14,7 @@
===================================================================
--- linux-stable.orig/kernel/timer.c
+++ linux-stable/kernel/timer.c
-@@ -737,6 +737,36 @@ static struct tvec_base *lock_timer_base
+@@ -739,6 +739,36 @@ static struct tvec_base *lock_timer_base
}
}
@@ -51,7 +51,7 @@
static inline int
__mod_timer(struct timer_list *timer, unsigned long expires,
bool pending_only, int pinned)
-@@ -775,14 +805,8 @@ __mod_timer(struct timer_list *timer, un
+@@ -777,14 +807,8 @@ __mod_timer(struct timer_list *timer, un
* handler yet has not finished. This also guarantees that
* the timer is serialized wrt itself.
*/
Modified: dists/trunk/linux/debian/patches/features/all/rt/timers-mov-printk_tick-to-soft-interrupt.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/timers-mov-printk_tick-to-soft-interrupt.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/timers-mov-printk_tick-to-soft-interrupt.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -13,7 +13,7 @@
===================================================================
--- linux-stable.orig/kernel/timer.c
+++ linux-stable/kernel/timer.c
-@@ -1400,7 +1400,6 @@ void update_process_times(int user_tick)
+@@ -1402,7 +1402,6 @@ void update_process_times(int user_tick)
account_process_tick(p, user_tick);
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
@@ -21,7 +21,7 @@
#ifdef CONFIG_IRQ_WORK
if (in_irq())
irq_work_run();
-@@ -1416,6 +1415,7 @@ static void run_timer_softirq(struct sof
+@@ -1418,6 +1417,7 @@ static void run_timer_softirq(struct sof
{
struct tvec_base *base = __this_cpu_read(tvec_bases);
Modified: dists/trunk/linux/debian/patches/features/all/rt/timers-preempt-rt-support.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/timers-preempt-rt-support.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/timers-preempt-rt-support.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -12,7 +12,7 @@
===================================================================
--- linux-stable.orig/kernel/timer.c
+++ linux-stable/kernel/timer.c
-@@ -1363,7 +1363,17 @@ unsigned long get_next_timer_interrupt(u
+@@ -1365,7 +1365,17 @@ unsigned long get_next_timer_interrupt(u
if (cpu_is_offline(smp_processor_id()))
return expires;
@@ -30,7 +30,7 @@
if (base->active_timers) {
if (time_before_eq(base->next_timer, base->timer_jiffies))
base->next_timer = __next_timer_interrupt(base);
-@@ -1373,7 +1383,6 @@ unsigned long get_next_timer_interrupt(u
+@@ -1375,7 +1385,6 @@ unsigned long get_next_timer_interrupt(u
if (time_before_eq(expires, now))
return now;
@@ -38,7 +38,7 @@
return cmp_next_hrtimer_event(now, expires);
}
#endif
-@@ -1763,7 +1772,7 @@ static void __cpuinit migrate_timers(int
+@@ -1765,7 +1774,7 @@ static void __cpuinit migrate_timers(int
BUG_ON(cpu_online(cpu));
old_base = per_cpu(tvec_bases, cpu);
@@ -47,7 +47,7 @@
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
-@@ -1784,7 +1793,7 @@ static void __cpuinit migrate_timers(int
+@@ -1786,7 +1795,7 @@ static void __cpuinit migrate_timers(int
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
Modified: dists/trunk/linux/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
==============================================================================
--- dists/trunk/linux/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -31,7 +31,7 @@
===================================================================
--- linux-stable.orig/kernel/timer.c
+++ linux-stable/kernel/timer.c
-@@ -75,6 +75,7 @@ struct tvec_root {
+@@ -76,6 +76,7 @@ struct tvec_root {
struct tvec_base {
spinlock_t lock;
struct timer_list *running_timer;
@@ -39,7 +39,7 @@
unsigned long timer_jiffies;
unsigned long next_timer;
unsigned long active_timers;
-@@ -755,12 +756,15 @@ __mod_timer(struct timer_list *timer, un
+@@ -757,12 +758,15 @@ __mod_timer(struct timer_list *timer, un
debug_activate(timer, expires);
@@ -55,7 +55,7 @@
new_base = per_cpu(tvec_bases, cpu);
if (base != new_base) {
-@@ -961,6 +965,29 @@ void add_timer_on(struct timer_list *tim
+@@ -963,6 +967,29 @@ void add_timer_on(struct timer_list *tim
}
EXPORT_SYMBOL_GPL(add_timer_on);
@@ -85,7 +85,7 @@
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
-@@ -1018,7 +1045,7 @@ int try_to_del_timer_sync(struct timer_l
+@@ -1020,7 +1047,7 @@ int try_to_del_timer_sync(struct timer_l
}
EXPORT_SYMBOL(try_to_del_timer_sync);
@@ -94,7 +94,7 @@
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
-@@ -1078,7 +1105,7 @@ int del_timer_sync(struct timer_list *ti
+@@ -1080,7 +1107,7 @@ int del_timer_sync(struct timer_list *ti
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
@@ -103,7 +103,7 @@
}
}
EXPORT_SYMBOL(del_timer_sync);
-@@ -1192,10 +1219,11 @@ static inline void __run_timers(struct t
+@@ -1194,10 +1221,11 @@ static inline void __run_timers(struct t
spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, data);
@@ -116,7 +116,7 @@
spin_unlock_irq(&base->lock);
}
-@@ -1696,6 +1724,7 @@ static int __cpuinit init_timers_cpu(int
+@@ -1698,6 +1726,7 @@ static int __cpuinit init_timers_cpu(int
}
spin_lock_init(&base->lock);
Added: dists/trunk/linux/debian/patches/features/all/rt/x86-preempt-lazy.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/all/rt/x86-preempt-lazy.patch Wed Nov 7 02:42:37 2012 (r19485)
@@ -0,0 +1,187 @@
+Subject: x86-preempt-lazy.patch
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 01 Nov 2012 11:03:47 +0100
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/Kconfig | 1 +
+ arch/x86/include/asm/thread_info.h | 6 ++++++
+ arch/x86/kernel/asm-offsets.c | 1 +
+ arch/x86/kernel/entry_32.S | 18 +++++++++++++-----
+ arch/x86/kernel/entry_64.S | 24 +++++++++++++++---------
+ 5 files changed, 36 insertions(+), 14 deletions(-)
+
+Index: linux-stable/arch/x86/Kconfig
+===================================================================
+--- linux-stable.orig/arch/x86/Kconfig
++++ linux-stable/arch/x86/Kconfig
+@@ -97,6 +97,7 @@ config X86
+ select KTIME_SCALAR if X86_32
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
++ select HAVE_PREEMPT_LAZY
+
+ config INSTRUCTION_DECODER
+ def_bool (KPROBES || PERF_EVENTS || UPROBES)
+Index: linux-stable/arch/x86/include/asm/thread_info.h
+===================================================================
+--- linux-stable.orig/arch/x86/include/asm/thread_info.h
++++ linux-stable/arch/x86/include/asm/thread_info.h
+@@ -31,6 +31,8 @@ struct thread_info {
+ __u32 cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => lazy preemptable,
++ <0 => BUG */
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ void __user *sysenter_return;
+@@ -83,6 +85,7 @@ struct thread_info {
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
+ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
+ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+ #define TIF_UPROBE 12 /* breakpointed or singlestepping */
+@@ -108,6 +111,7 @@ struct thread_info {
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
+ #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+@@ -155,6 +159,8 @@ struct thread_info {
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
+
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
++
+ #define PREEMPT_ACTIVE 0x10000000
+
+ #ifdef CONFIG_X86_32
+Index: linux-stable/arch/x86/kernel/asm-offsets.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/asm-offsets.c
++++ linux-stable/arch/x86/kernel/asm-offsets.c
+@@ -33,6 +33,7 @@ void common(void) {
+ OFFSET(TI_status, thread_info, status);
+ OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TI_preempt_count, thread_info, preempt_count);
++ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
+
+ BLANK();
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+Index: linux-stable/arch/x86/kernel/entry_32.S
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/entry_32.S
++++ linux-stable/arch/x86/kernel/entry_32.S
+@@ -349,14 +349,22 @@ ENTRY(resume_kernel)
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
+ jnz restore_all
+-need_resched:
+ movl TI_flags(%ebp), %ecx # need_resched set ?
+ testb $_TIF_NEED_RESCHED, %cl
++ jnz 1f
++
++ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
++ jnz restore_all
++ testl $_TIF_NEED_RESCHED_LAZY, %ecx
+ jz restore_all
+- testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
++
++1: testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
+ jz restore_all
+ call preempt_schedule_irq
+- jmp need_resched
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testl $_TIF_NEED_RESCHED_MASK, %ecx
++ jnz 1b
++ jmp restore_all
+ END(resume_kernel)
+ #endif
+ CFI_ENDPROC
+@@ -589,7 +597,7 @@ ENDPROC(system_call)
+ ALIGN
+ RING0_PTREGS_FRAME # can't unwind into user space anyway
+ work_pending:
+- testb $_TIF_NEED_RESCHED, %cl
++ testl $_TIF_NEED_RESCHED_MASK, %ecx
+ jz work_notifysig
+ work_resched:
+ call schedule
+@@ -602,7 +610,7 @@ work_resched:
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+ jz restore_all
+- testb $_TIF_NEED_RESCHED, %cl
++ testl $_TIF_NEED_RESCHED_MASK, %ecx
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+Index: linux-stable/arch/x86/kernel/entry_64.S
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/entry_64.S
++++ linux-stable/arch/x86/kernel/entry_64.S
+@@ -560,8 +560,8 @@ sysret_check:
+ /* Handle reschedules */
+ /* edx: work, edi: workmask */
+ sysret_careful:
+- bt $TIF_NEED_RESCHED,%edx
+- jnc sysret_signal
++ testl $_TIF_NEED_RESCHED_MASK,%edx
++ jz sysret_signal
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq_cfi %rdi
+@@ -673,8 +673,8 @@ GLOBAL(int_with_check)
+ /* First do a reschedule test. */
+ /* edx: work, edi: workmask */
+ int_careful:
+- bt $TIF_NEED_RESCHED,%edx
+- jnc int_very_careful
++ testl $_TIF_NEED_RESCHED_MASK,%edx
++ jz int_very_careful
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq_cfi %rdi
+@@ -969,8 +969,8 @@ bad_iret:
+ /* edi: workmask, edx: work */
+ retint_careful:
+ CFI_RESTORE_STATE
+- bt $TIF_NEED_RESCHED,%edx
+- jnc retint_signal
++ testl $_TIF_NEED_RESCHED_MASK,%edx
++ jz retint_signal
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq_cfi %rdi
+@@ -1003,9 +1003,15 @@ retint_signal:
+ ENTRY(retint_kernel)
+ cmpl $0,TI_preempt_count(%rcx)
+ jnz retint_restore_args
+- bt $TIF_NEED_RESCHED,TI_flags(%rcx)
++ bt $TIF_NEED_RESCHED,TI_flags(%rcx)
++ jc 1f
++
++ cmpl $0,TI_preempt_lazy_count(%rcx)
++ jnz retint_restore_args
++ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
+ jnc retint_restore_args
+- bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
++
++1: bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
+ jnc retint_restore_args
+ call preempt_schedule_irq
+ jmp exit_intr
+@@ -1437,7 +1443,7 @@ paranoid_userspace:
+ movq %rsp,%rdi /* &pt_regs */
+ call sync_regs
+ movq %rax,%rsp /* switch stack for scheduling */
+- testl $_TIF_NEED_RESCHED,%ebx
++ testl $_TIF_NEED_RESCHED_MASK,%ebx
+ jnz paranoid_schedule
+ movl %ebx,%edx /* arg3: thread flags */
+ TRACE_IRQS_ON
Modified: dists/trunk/linux/debian/patches/series-rt
==============================================================================
--- dists/trunk/linux/debian/patches/series-rt Wed Nov 7 02:19:27 2012 (r19484)
+++ dists/trunk/linux/debian/patches/series-rt Wed Nov 7 02:42:37 2012 (r19485)
@@ -586,18 +586,31 @@
features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch
features/all/rt/peterz-srcu-crypto-chain.patch
features/all/rt/crypto-make-core-static-and-init-scru-early.patch
+features/all/rt/fix-crypto-api-init-for-3-6-4-rt10.patch
features/all/rt/x86-perf-uncore-deal-with-kfree.patch
features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch
features/all/rt/softirq-split-handling-function.patch
features/all/rt/softirq-split-locks.patch
-# Needs more thought
-# block-wrap-raise-softirq-in-local-bh-to-avoid-context-switches.patch
-# nohz-fix-sirq-fallout.patch
-
-# Enable full RT
features/all/rt/rcu-tiny-solve-rt-mistery.patch
+features/all/rt/slub-correct-per-cpu-slab.patch
features/all/rt/mm-enable-slub.patch
+features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
+
+features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
+features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch
+features/all/rt/softirq-add-more-debug.patch
+features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch
+
+features/all/rt/fix-random-fallout.patch
+
+features/all/rt/preempt-lazy-support.patch
+features/all/rt/x86-preempt-lazy.patch
+features/all/rt/arm-preempt-lazy-support.patch
+
+# Enable full RT
+features/all/rt/powerpc-preempt-lazy-support.patch
features/all/rt/kconfig-disable-a-few-options-rt.patch
features/all/rt/kconfig-preempt-rt-full.patch
-#rt-replace-rt-spin-lock-to-raw-one-in-res_counter.patch
+
More information about the Kernel-svn-changes
mailing list