[kernel] r18172 - in dists/sid/linux-2.6/debian: . patches/features/all/rt patches/series
Uwe Kleine-König
ukleinek-guest at alioth.debian.org
Wed Oct 19 19:05:02 UTC 2011
Author: ukleinek-guest
Date: Wed Oct 19 19:05:00 2011
New Revision: 18172
Log:
[amd64] Update rt featureset to 3.0.7-rt20
Added:
dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.7-rt20.patch
- copied, changed from r18171, dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt18.patch
Deleted:
dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt18.patch
Modified:
dists/sid/linux-2.6/debian/changelog
dists/sid/linux-2.6/debian/patches/series/6-extra
Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog Wed Oct 19 02:36:03 2011 (r18171)
+++ dists/sid/linux-2.6/debian/changelog Wed Oct 19 19:05:00 2011 (r18172)
@@ -1,7 +1,7 @@
linux-2.6 (3.0.0-6) UNRELEASED; urgency=low
[ Uwe Kleine-König ]
- * [amd64] Update rt featureset to 3.0.6-rt18
+ * [amd64] Update rt featureset to 3.0.7-rt20
[ Bastian Blank ]
* Add stable 3.0.7, including:
@@ -9,7 +9,7 @@
calculation (Closes: #585130)
- ipv6: fix NULL dereference in udp6_ufo_fragment() (Closes: #643817)
For the complete list of changes, see:
- http://www.kernel.org/pub/linux/kernel/v3.0/ChangeLog-3.0.7
+ http://www.kernel.org/pub/linux/kernel/v3.0/ChangeLog-3.0.7
[ Ben Hutchings ]
* cputimer: Cure lock inversion
Copied and modified: dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.7-rt20.patch (from r18171, dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt18.patch)
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.6-rt18.patch Wed Oct 19 02:36:03 2011 (r18171, copy source)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.7-rt20.patch Wed Oct 19 19:05:00 2011 (r18172)
@@ -1,6 +1,6 @@
[bwh: Dropped fix to kernel/taskstats.c applied separately in
bugfix/all/Make-TASKSTATS-require-root-access.patch]
-[bwh: Updated raw spinlock changes in kernel/posix-cput-timers.c to apply after
+[bwh/ukl: Updated raw spinlock changes in kernel/posix-cput-timers.c to apply after
bugfix/all/cputime-Cure-lock-inversion.patch]
Index: linux-2.6/mm/memory.c
@@ -1234,210 +1234,482 @@
inc_irq_stat(x86_platform_ipis);
if (x86_platform_ipi_callback)
-Index: linux-2.6/kernel/trace/ftrace.c
+Index: linux-2.6/kernel/posix-cpu-timers.c
===================================================================
---- linux-2.6.orig/kernel/trace/ftrace.c
-+++ linux-2.6/kernel/trace/ftrace.c
-@@ -1182,8 +1182,14 @@ alloc_and_copy_ftrace_hash(int size_bits
- return NULL;
+--- linux-2.6.orig/kernel/posix-cpu-timers.c
++++ linux-2.6/kernel/posix-cpu-timers.c
+@@ -282,13 +282,13 @@ void thread_group_cputimer(struct task_s
+ * it.
+ */
+ thread_group_cputime(tsk, &sum);
+- spin_lock_irqsave(&cputimer->lock, flags);
++ raw_spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 1;
+ update_gt_cputime(&cputimer->cputime, &sum);
+ } else
+- spin_lock_irqsave(&cputimer->lock, flags);
++ raw_spin_lock_irqsave(&cputimer->lock, flags);
+ *times = cputimer->cputime;
+- spin_unlock_irqrestore(&cputimer->lock, flags);
++ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
-+static void
-+ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
-+static void
-+ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
-+
- static int
--ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
-+ftrace_hash_move(struct ftrace_ops *ops, int enable,
-+ struct ftrace_hash **dst, struct ftrace_hash *src)
- {
- struct ftrace_func_entry *entry;
- struct hlist_node *tp, *tn;
-@@ -1193,9 +1199,16 @@ ftrace_hash_move(struct ftrace_hash **ds
- unsigned long key;
- int size = src->count;
- int bits = 0;
-+ int ret;
- int i;
-
+ /*
+@@ -700,7 +701,7 @@ static int posix_cpu_timer_set(struct k_
/*
-+ * Remove the current set, update the hash and add
-+ * them back.
-+ */
-+ ftrace_hash_rec_disable(ops, enable);
-+
-+ /*
- * If the new source is empty, just free dst and assign it
- * the empty_hash.
+ * Disarm any old timer after extracting its expiry time.
*/
-@@ -1215,9 +1228,10 @@ ftrace_hash_move(struct ftrace_hash **ds
- if (bits > FTRACE_HASH_MAX_BITS)
- bits = FTRACE_HASH_MAX_BITS;
-
-+ ret = -ENOMEM;
- new_hash = alloc_ftrace_hash(bits);
- if (!new_hash)
-- return -ENOMEM;
-+ goto out;
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
- size = 1 << src->size_bits;
- for (i = 0; i < size; i++) {
-@@ -1236,7 +1250,16 @@ ftrace_hash_move(struct ftrace_hash **ds
- rcu_assign_pointer(*dst, new_hash);
- free_ftrace_hash_rcu(old_hash);
+ ret = 0;
+ old_incr = timer->it.cpu.incr;
+@@ -998,9 +999,9 @@ static void stop_process_timers(struct s
+ struct thread_group_cputimer *cputimer = &sig->cputimer;
+ unsigned long flags;
-- return 0;
-+ ret = 0;
-+ out:
-+ /*
-+ * Enable regardless of ret:
-+ * On success, we enable the new hash.
-+ * On failure, we re-enable the original hash.
-+ */
-+ ftrace_hash_rec_enable(ops, enable);
-+
-+ return ret;
+- spin_lock_irqsave(&cputimer->lock, flags);
++ raw_spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 0;
+- spin_unlock_irqrestore(&cputimer->lock, flags);
++ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
- /*
-@@ -2877,7 +2900,7 @@ ftrace_set_regex(struct ftrace_ops *ops,
- ftrace_match_records(hash, buf, len);
-
- mutex_lock(&ftrace_lock);
-- ret = ftrace_hash_move(orig_hash, hash);
-+ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
- mutex_unlock(&ftrace_lock);
-
- mutex_unlock(&ftrace_regex_lock);
-@@ -3060,18 +3083,12 @@ ftrace_regex_release(struct inode *inode
- orig_hash = &iter->ops->notrace_hash;
-
- mutex_lock(&ftrace_lock);
-- /*
-- * Remove the current set, update the hash and add
-- * them back.
-- */
-- ftrace_hash_rec_disable(iter->ops, filter_hash);
-- ret = ftrace_hash_move(orig_hash, iter->hash);
-- if (!ret) {
-- ftrace_hash_rec_enable(iter->ops, filter_hash);
-- if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
-- && ftrace_enabled)
-- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
-- }
-+ ret = ftrace_hash_move(iter->ops, filter_hash,
-+ orig_hash, iter->hash);
-+ if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
-+ && ftrace_enabled)
-+ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
-+
- mutex_unlock(&ftrace_lock);
- }
- free_ftrace_hash(iter->hash);
-Index: linux-2.6/drivers/gpu/drm/drm_irq.c
-===================================================================
---- linux-2.6.orig/drivers/gpu/drm/drm_irq.c
-+++ linux-2.6/drivers/gpu/drm/drm_irq.c
-@@ -109,10 +109,7 @@ static void vblank_disable_and_save(stru
- /* Prevent vblank irq processing while disabling vblank irqs,
- * so no updates of timestamps or count can happen after we've
- * disabled. Needed to prevent races in case of delayed irq's.
-- * Disable preemption, so vblank_time_lock is held as short as
-- * possible, even under a kernel with PREEMPT_RT patches.
+ static u32 onecputick;
+@@ -1222,7 +1223,7 @@ void posix_cpu_timer_schedule(struct k_i
+ /*
+ * Now re-arm for the new expiry time.
*/
-- preempt_disable();
- spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ arm_timer(timer);
+ spin_unlock(&p->sighand->siglock);
- dev->driver->disable_vblank(dev, crtc);
-@@ -163,7 +160,6 @@ static void vblank_disable_and_save(stru
- clear_vblank_timestamps(dev, crtc);
+@@ -1289,10 +1290,11 @@ static inline int fastpath_timer_check(s
+ sig = tsk->signal;
+ if (sig->cputimer.running) {
+ struct task_cputime group_sample;
++ unsigned long flags;
- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
-- preempt_enable();
- }
+- spin_lock(&sig->cputimer.lock);
++ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
+ group_sample = sig->cputimer.cputime;
+- spin_unlock(&sig->cputimer.lock);
++ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
- static void vblank_disable_fn(unsigned long arg)
-@@ -875,10 +871,6 @@ int drm_vblank_get(struct drm_device *de
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- /* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
-- /* Disable preemption while holding vblank_time_lock. Do
-- * it explicitely to guard against PREEMPT_RT kernel.
-- */
-- preempt_disable();
- spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
- if (!dev->vblank_enabled[crtc]) {
- /* Enable vblank irqs under vblank_time_lock protection.
-@@ -898,7 +890,6 @@ int drm_vblank_get(struct drm_device *de
- }
- }
- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
-- preempt_enable();
- } else {
- if (!dev->vblank_enabled[crtc]) {
- atomic_dec(&dev->vblank_refcount[crtc]);
-Index: linux-2.6/arch/x86/kernel/kprobes.c
-===================================================================
---- linux-2.6.orig/arch/x86/kernel/kprobes.c
-+++ linux-2.6/arch/x86/kernel/kprobes.c
-@@ -475,7 +475,6 @@ static void __kprobes setup_singlestep(s
- * stepping.
- */
- regs->ip = (unsigned long)p->ainsn.insn;
-- preempt_enable_no_resched();
- return;
- }
- #endif
-Index: linux-2.6/drivers/ide/ide_platform.c
-===================================================================
---- linux-2.6.orig/drivers/ide/ide_platform.c
-+++ linux-2.6/drivers/ide/ide_platform.c
-@@ -95,7 +95,7 @@ static int __devinit plat_ide_probe(stru
- plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
- hw.dev = &pdev->dev;
+ if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+ return 1;
+@@ -1306,13 +1308,13 @@ static inline int fastpath_timer_check(s
+ * already updated our counts. We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+-void run_posix_cpu_timers(struct task_struct *tsk)
++static void __run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ LIST_HEAD(firing);
+ struct k_itimer *timer, *next;
+ unsigned long flags;
-- d.irq_flags = res_irq->flags;
-+ d.irq_flags = 0;
- if (mmio)
- d.host_flags |= IDE_HFLAG_MMIO;
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
-Index: linux-2.6/arch/x86/kernel/hpet.c
-===================================================================
---- linux-2.6.orig/arch/x86/kernel/hpet.c
-+++ linux-2.6/arch/x86/kernel/hpet.c
-@@ -7,6 +7,7 @@
- #include <linux/slab.h>
- #include <linux/hpet.h>
- #include <linux/init.h>
-+#include <linux/dmi.h>
- #include <linux/cpu.h>
- #include <linux/pm.h>
- #include <linux/io.h>
-@@ -566,6 +567,30 @@ static void init_one_hpet_msi_clockevent
- #define RESERVE_TIMERS 0
- #endif
+ /*
+ * The fast path checks that there are no expired thread or thread
+@@ -1370,6 +1372,190 @@ void run_posix_cpu_timers(struct task_st
+ }
+ }
-+static int __init dmi_disable_hpet_msi(const struct dmi_system_id *d)
++#ifdef CONFIG_PREEMPT_RT_BASE
++#include <linux/kthread.h>
++#include <linux/cpu.h>
++DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++
++static int posix_cpu_timers_thread(void *data)
+{
-+ hpet_msi_disable = 1;
-+ return 0;
-+}
++ int cpu = (long)data;
+
-+static struct dmi_system_id __initdata dmi_hpet_table[] = {
-+ /*
-+ * MSI based per cpu timers lose interrupts when intel_idle()
-+ * is enabled - independent of the c-state. With idle=poll the
-+ * problem cannot be observed. We have no idea yet, whether
-+ * this is a W510 specific issue or a general chipset oddity.
-+ */
-+ {
-+ .callback = dmi_disable_hpet_msi,
-+ .ident = "Lenovo W510",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
-+ },
++ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++
++ while (!kthread_should_stop()) {
++ struct task_struct *tsk = NULL;
++ struct task_struct *next = NULL;
++
++ if (cpu_is_offline(cpu))
++ goto wait_to_die;
++
++ /* grab task list */
++ raw_local_irq_disable();
++ tsk = per_cpu(posix_timer_tasklist, cpu);
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++ raw_local_irq_enable();
++
++ /* its possible the list is empty, just return */
++ if (!tsk) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ __set_current_state(TASK_RUNNING);
++ continue;
++ }
++
++ /* Process task list */
++ while (1) {
++ /* save next */
++ next = tsk->posix_timer_list;
++
++ /* run the task timers, clear its ptr and
++ * unreference it
++ */
++ __run_posix_cpu_timers(tsk);
++ tsk->posix_timer_list = NULL;
++ put_task_struct(tsk);
++
++ /* check if this is the last on the list */
++ if (next == tsk)
++ break;
++ tsk = next;
++ }
++ }
++ return 0;
++
++wait_to_die:
++ /* Wait for kthread_stop */
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++static inline int __fastpath_timer_check(struct task_struct *tsk)
++{
++ /* tsk == current, ensure it is safe to use ->signal/sighand */
++ if (unlikely(tsk->exit_state))
++ return 0;
++
++ if (!task_cputime_zero(&tsk->cputime_expires))
++ return 1;
++
++ if (!task_cputime_zero(&tsk->signal->cputime_expires))
++ return 1;
++
++ return 0;
++}
++
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ unsigned long cpu = smp_processor_id();
++ struct task_struct *tasklist;
++
++ BUG_ON(!irqs_disabled());
++ if(!per_cpu(posix_timer_task, cpu))
++ return;
++ /* get per-cpu references */
++ tasklist = per_cpu(posix_timer_tasklist, cpu);
++
++ /* check to see if we're already queued */
++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
++ get_task_struct(tsk);
++ if (tasklist) {
++ tsk->posix_timer_list = tasklist;
++ } else {
++ /*
++ * The list is terminated by a self-pointing
++ * task_struct
++ */
++ tsk->posix_timer_list = tsk;
++ }
++ per_cpu(posix_timer_tasklist, cpu) = tsk;
++
++ wake_up_process(per_cpu(posix_timer_task, cpu));
++ }
++}
++
++/*
++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int posix_cpu_thread_call(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ int cpu = (long)hcpu;
++ struct task_struct *p;
++ struct sched_param param;
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ p = kthread_create(posix_cpu_timers_thread, hcpu,
++ "posixcputmr/%d",cpu);
++ if (IS_ERR(p))
++ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
++ kthread_bind(p, cpu);
++ /* Must be high prio to avoid getting starved */
++ param.sched_priority = MAX_RT_PRIO-1;
++ sched_setscheduler(p, SCHED_FIFO, ¶m);
++ per_cpu(posix_timer_task,cpu) = p;
++ break;
++ case CPU_ONLINE:
++ /* Strictly unneccessary, as first user will wake it. */
++ wake_up_process(per_cpu(posix_timer_task,cpu));
++ break;
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_UP_CANCELED:
++ /* Unbind it from offline cpu so it can run. Fall thru. */
++ kthread_bind(per_cpu(posix_timer_task,cpu),
++ any_online_cpu(cpu_online_map));
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++ case CPU_DEAD:
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++#endif
++ }
++ return NOTIFY_OK;
++}
++
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
++ .notifier_call = posix_cpu_thread_call,
++ .priority = 10
++};
++
++static int __init posix_cpu_thread_init(void)
++{
++ void *hcpu = (void *)(long)smp_processor_id();
++ /* Start one for boot CPU. */
++ unsigned long cpu;
++
++ /* init the per-cpu posix_timer_tasklets */
++ for_each_cpu_mask(cpu, cpu_possible_map)
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
++ register_cpu_notifier(&posix_cpu_thread_notifier);
++ return 0;
++}
++early_initcall(posix_cpu_thread_init);
++#else /* CONFIG_PREEMPT_RT_BASE */
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ __run_posix_cpu_timers(tsk);
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ /*
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
+ * The tsk->sighand->siglock must be held by the caller.
+Index: linux-2.6/kernel/trace/ftrace.c
+===================================================================
+--- linux-2.6.orig/kernel/trace/ftrace.c
++++ linux-2.6/kernel/trace/ftrace.c
+@@ -1182,8 +1182,14 @@ alloc_and_copy_ftrace_hash(int size_bits
+ return NULL;
+ }
+
++static void
++ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
++static void
++ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
++
+ static int
+-ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
++ftrace_hash_move(struct ftrace_ops *ops, int enable,
++ struct ftrace_hash **dst, struct ftrace_hash *src)
+ {
+ struct ftrace_func_entry *entry;
+ struct hlist_node *tp, *tn;
+@@ -1193,9 +1199,16 @@ ftrace_hash_move(struct ftrace_hash **ds
+ unsigned long key;
+ int size = src->count;
+ int bits = 0;
++ int ret;
+ int i;
+
+ /*
++ * Remove the current set, update the hash and add
++ * them back.
++ */
++ ftrace_hash_rec_disable(ops, enable);
++
++ /*
+ * If the new source is empty, just free dst and assign it
+ * the empty_hash.
+ */
+@@ -1215,9 +1228,10 @@ ftrace_hash_move(struct ftrace_hash **ds
+ if (bits > FTRACE_HASH_MAX_BITS)
+ bits = FTRACE_HASH_MAX_BITS;
+
++ ret = -ENOMEM;
+ new_hash = alloc_ftrace_hash(bits);
+ if (!new_hash)
+- return -ENOMEM;
++ goto out;
+
+ size = 1 << src->size_bits;
+ for (i = 0; i < size; i++) {
+@@ -1236,7 +1250,16 @@ ftrace_hash_move(struct ftrace_hash **ds
+ rcu_assign_pointer(*dst, new_hash);
+ free_ftrace_hash_rcu(old_hash);
+
+- return 0;
++ ret = 0;
++ out:
++ /*
++ * Enable regardless of ret:
++ * On success, we enable the new hash.
++ * On failure, we re-enable the original hash.
++ */
++ ftrace_hash_rec_enable(ops, enable);
++
++ return ret;
+ }
+
+ /*
+@@ -2877,7 +2900,7 @@ ftrace_set_regex(struct ftrace_ops *ops,
+ ftrace_match_records(hash, buf, len);
+
+ mutex_lock(&ftrace_lock);
+- ret = ftrace_hash_move(orig_hash, hash);
++ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
+ mutex_unlock(&ftrace_lock);
+
+ mutex_unlock(&ftrace_regex_lock);
+@@ -3060,18 +3083,12 @@ ftrace_regex_release(struct inode *inode
+ orig_hash = &iter->ops->notrace_hash;
+
+ mutex_lock(&ftrace_lock);
+- /*
+- * Remove the current set, update the hash and add
+- * them back.
+- */
+- ftrace_hash_rec_disable(iter->ops, filter_hash);
+- ret = ftrace_hash_move(orig_hash, iter->hash);
+- if (!ret) {
+- ftrace_hash_rec_enable(iter->ops, filter_hash);
+- if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
+- && ftrace_enabled)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+- }
++ ret = ftrace_hash_move(iter->ops, filter_hash,
++ orig_hash, iter->hash);
++ if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
++ && ftrace_enabled)
++ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++
+ mutex_unlock(&ftrace_lock);
+ }
+ free_ftrace_hash(iter->hash);
+Index: linux-2.6/drivers/gpu/drm/drm_irq.c
+===================================================================
+--- linux-2.6.orig/drivers/gpu/drm/drm_irq.c
++++ linux-2.6/drivers/gpu/drm/drm_irq.c
+@@ -109,10 +109,7 @@ static void vblank_disable_and_save(stru
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+- * Disable preemption, so vblank_time_lock is held as short as
+- * possible, even under a kernel with PREEMPT_RT patches.
+ */
+- preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ dev->driver->disable_vblank(dev, crtc);
+@@ -163,7 +160,6 @@ static void vblank_disable_and_save(stru
+ clear_vblank_timestamps(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+- preempt_enable();
+ }
+
+ static void vblank_disable_fn(unsigned long arg)
+@@ -875,10 +871,6 @@ int drm_vblank_get(struct drm_device *de
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /* Going from 0->1 means we have to enable interrupts again */
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+- /* Disable preemption while holding vblank_time_lock. Do
+- * it explicitely to guard against PREEMPT_RT kernel.
+- */
+- preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
+ if (!dev->vblank_enabled[crtc]) {
+ /* Enable vblank irqs under vblank_time_lock protection.
+@@ -898,7 +890,6 @@ int drm_vblank_get(struct drm_device *de
+ }
+ }
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+- preempt_enable();
+ } else {
+ if (!dev->vblank_enabled[crtc]) {
+ atomic_dec(&dev->vblank_refcount[crtc]);
+Index: linux-2.6/arch/x86/kernel/kprobes.c
+===================================================================
+--- linux-2.6.orig/arch/x86/kernel/kprobes.c
++++ linux-2.6/arch/x86/kernel/kprobes.c
+@@ -475,7 +475,6 @@ static void __kprobes setup_singlestep(s
+ * stepping.
+ */
+ regs->ip = (unsigned long)p->ainsn.insn;
+- preempt_enable_no_resched();
+ return;
+ }
+ #endif
+Index: linux-2.6/drivers/ide/ide_platform.c
+===================================================================
+--- linux-2.6.orig/drivers/ide/ide_platform.c
++++ linux-2.6/drivers/ide/ide_platform.c
+@@ -95,7 +95,7 @@ static int __devinit plat_ide_probe(stru
+ plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
+ hw.dev = &pdev->dev;
+
+- d.irq_flags = res_irq->flags;
++ d.irq_flags = 0;
+ if (mmio)
+ d.host_flags |= IDE_HFLAG_MMIO;
+
+Index: linux-2.6/arch/x86/kernel/hpet.c
+===================================================================
+--- linux-2.6.orig/arch/x86/kernel/hpet.c
++++ linux-2.6/arch/x86/kernel/hpet.c
+@@ -7,6 +7,7 @@
+ #include <linux/slab.h>
+ #include <linux/hpet.h>
+ #include <linux/init.h>
++#include <linux/dmi.h>
+ #include <linux/cpu.h>
+ #include <linux/pm.h>
+ #include <linux/io.h>
+@@ -566,6 +567,30 @@ static void init_one_hpet_msi_clockevent
+ #define RESERVE_TIMERS 0
+ #endif
+
++static int __init dmi_disable_hpet_msi(const struct dmi_system_id *d)
++{
++ hpet_msi_disable = 1;
++ return 0;
++}
++
++static struct dmi_system_id __initdata dmi_hpet_table[] = {
++ /*
++ * MSI based per cpu timers lose interrupts when intel_idle()
++ * is enabled - independent of the c-state. With idle=poll the
++ * problem cannot be observed. We have no idea yet, whether
++ * this is a W510 specific issue or a general chipset oddity.
++ */
++ {
++ .callback = dmi_disable_hpet_msi,
++ .ident = "Lenovo W510",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
++ },
+ },
+ {}
+};
@@ -1699,7 +1971,7 @@
if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
-@@ -4206,6 +4207,126 @@ static inline void schedule_debug(struct
+@@ -4182,6 +4183,126 @@ static inline void schedule_debug(struct
schedstat_inc(this_rq(), sched_count);
}
@@ -1826,7 +2098,7 @@
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->on_rq || rq->skip_clock_update < 0)
-@@ -4265,6 +4386,8 @@ need_resched:
+@@ -4241,6 +4362,8 @@ need_resched:
raw_spin_lock_irq(&rq->lock);
@@ -1835,7 +2107,7 @@
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
-@@ -4272,19 +4395,6 @@ need_resched:
+@@ -4248,19 +4371,6 @@ need_resched:
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@@ -1855,7 +2127,7 @@
}
switch_count = &prev->nvcsw;
}
-@@ -4318,15 +4428,23 @@ need_resched:
+@@ -4294,15 +4404,23 @@ need_resched:
post_schedule(rq);
@@ -1881,7 +2153,7 @@
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -4335,15 +4453,37 @@ static inline void sched_submit_work(str
+@@ -4311,15 +4429,37 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -2279,12 +2551,12 @@
if (worker->flags & WORKER_NOT_RUNNING)
- return NULL;
+ return;
++
++ if (WARN_ON_ONCE(worker->sleeping))
++ return;
- /* this can only happen on the local cpu */
- BUG_ON(cpu != raw_smp_processor_id());
-+ if (WARN_ON_ONCE(worker->sleeping))
-+ return;
-+
+ worker->sleeping = 1;
+ cpu = smp_processor_id();
@@ -2903,14 +3175,14 @@
- gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE);
- spin_lock_irq(&gcwq->lock);
-+ gcwq = get_gcwq(get_cpu());
++ gcwq = get_gcwq(get_cpu_light());
+ spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
+ list_del_init(&work->entry);
+ ___queue_work(get_work_cwq(work)->wq, gcwq, work);
}
+ spin_unlock_irq(&gcwq->lock);
-+ put_cpu();
++ put_cpu_light();
}
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
@@ -3821,15 +4093,75 @@
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
-@@ -631,6 +634,7 @@ static int sched_rt_runtime_exceeded(str
+@@ -553,12 +556,9 @@ static inline int balance_runtime(struct
+
+ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+ {
+- int i, idle = 1;
++ int i, idle = 1, throttled = 0;
+ const struct cpumask *span;
+
+- if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
+- return 1;
+-
+ span = sched_rt_period_mask();
+ for_each_cpu(i, span) {
+ int enqueue = 0;
+@@ -593,12 +593,17 @@ static int do_sched_rt_period_timer(stru
+ if (!rt_rq_throttled(rt_rq))
+ enqueue = 1;
+ }
++ if (rt_rq->rt_throttled)
++ throttled = 1;
+
+ if (enqueue)
+ sched_rt_rq_enqueue(rt_rq);
+ raw_spin_unlock(&rq->lock);
+ }
+
++ if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
++ return 1;
++
+ return idle;
+ }
+
+@@ -630,7 +635,24 @@ static int sched_rt_runtime_exceeded(str
+ return 0;
if (rt_rq->rt_time > runtime) {
- rt_rq->rt_throttled = 1;
-+ printk_once(KERN_WARNING "sched: RT throttling activated\n");
+- rt_rq->rt_throttled = 1;
++ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
++
++ /*
++ * Don't actually throttle groups that have no runtime assigned
++ * but accrue some time due to boosting.
++ */
++ if (likely(rt_b->rt_runtime)) {
++ rt_rq->rt_throttled = 1;
++ printk_once(KERN_WARNING "sched: RT throttling activated\n");
++ } else {
++ /*
++ * In case we did anyway, make it go away,
++ * replenishment is a joke, since it will replenish us
++ * with exactly 0 ns.
++ */
++ rt_rq->rt_time = 0;
++ }
++
if (rt_rq_throttled(rt_rq)) {
sched_rt_rq_dequeue(rt_rq);
return 1;
-@@ -1186,7 +1190,7 @@ static void deactivate_task(struct rq *r
+@@ -658,7 +680,8 @@ static void update_curr_rt(struct rq *rq
+ if (unlikely((s64)delta_exec < 0))
+ delta_exec = 0;
+
+- schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
++ schedstat_set(curr->se.statistics.exec_max,
++ max(curr->se.statistics.exec_max, delta_exec));
+
+ curr->se.sum_exec_runtime += delta_exec;
+ account_group_exec_runtime(curr, delta_exec);
+@@ -1186,7 +1209,7 @@ static void deactivate_task(struct rq *r
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
@@ -3838,7 +4170,7 @@
(p->rt.nr_cpus_allowed > 1))
return 1;
return 0;
-@@ -1331,7 +1335,7 @@ static struct rq *find_lock_lowest_rq(st
+@@ -1331,7 +1354,7 @@ static struct rq *find_lock_lowest_rq(st
*/
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu,
@@ -3847,7 +4179,7 @@
task_running(rq, task) ||
!task->on_rq)) {
-@@ -1614,9 +1618,6 @@ static void set_cpus_allowed_rt(struct t
+@@ -1614,9 +1637,6 @@ static void set_cpus_allowed_rt(struct t
update_rt_migration(&rq->rt);
}
@@ -5428,8 +5760,8 @@
+ va_start(ap, fmt);
+ early_vprintk(fmt, ap);
+ va_end(ap);
-+}
-+
+ }
+
+/*
+ * This is independent of any log levels - a global
+ * kill switch that turns off all of printk.
@@ -5448,8 +5780,8 @@
+void printk_kill(void)
+{
+ printk_killswitch = 1;
- }
-
++}
++
+static int forced_early_printk(const char *fmt, va_list ap)
+{
+ if (!printk_killswitch)
@@ -5536,16 +5868,27 @@
- if (console_trylock_for_printk(this_cpu))
+ if (console_trylock_for_printk(this_cpu, flags)) {
+#ifndef CONFIG_PREEMPT_RT_FULL
-+ console_unlock();
+ console_unlock();
+#else
+ raw_local_irq_restore(flags);
- console_unlock();
++ console_unlock();
+ raw_local_irq_save(flags);
+#endif
+ }
lockdep_on();
out_restore_irqs:
+@@ -1213,8 +1282,8 @@ void printk_tick(void)
+
+ int printk_needs_cpu(int cpu)
+ {
+- if (cpu_is_offline(cpu))
+- printk_tick();
++ if (unlikely(cpu_is_offline(cpu)))
++ __this_cpu_write(printk_pending, 0);
+ return __this_cpu_read(printk_pending);
+ }
+
@@ -1252,18 +1321,23 @@ void console_unlock(void)
console_may_schedule = 0;
@@ -5869,371 +6212,99 @@
extern bool yield_to(struct task_struct *p, bool preempt);
@@ -2109,6 +2169,7 @@ extern void xtime_update(unsigned long t
- extern int wake_up_state(struct task_struct *tsk, unsigned int state);
- extern int wake_up_process(struct task_struct *tsk);
-+extern int wake_up_lock_sleeper(struct task_struct * tsk);
- extern void wake_up_new_task(struct task_struct *tsk);
- #ifdef CONFIG_SMP
- extern void kick_process(struct task_struct *tsk);
-@@ -2198,12 +2259,24 @@ extern struct mm_struct * mm_alloc(void)
-
- /* mmdrop drops the mm and the page tables */
- extern void __mmdrop(struct mm_struct *);
-+
- static inline void mmdrop(struct mm_struct * mm)
- {
- if (unlikely(atomic_dec_and_test(&mm->mm_count)))
- __mmdrop(mm);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+extern void __mmdrop_delayed(struct rcu_head *rhp);
-+static inline void mmdrop_delayed(struct mm_struct *mm)
-+{
-+ if (atomic_dec_and_test(&mm->mm_count))
-+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
-+}
-+#else
-+# define mmdrop_delayed(mm) mmdrop(mm)
-+#endif
-+
- /* mmput gets rid of the mappings and all user-space */
- extern void mmput(struct mm_struct *);
- /* Grab a reference to a task's mm, if it is not already going away */
-@@ -2509,7 +2582,7 @@ extern int _cond_resched(void);
-
- extern int __cond_resched_lock(spinlock_t *lock);
-
--#ifdef CONFIG_PREEMPT
-+#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT_FULL)
- #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
- #else
- #define PREEMPT_LOCK_OFFSET 0
-@@ -2520,12 +2593,16 @@ extern int __cond_resched_lock(spinlock_
- __cond_resched_lock(lock); \
- })
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- extern int __cond_resched_softirq(void);
-
- #define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
- __cond_resched_softirq(); \
- })
-+#else
-+# define cond_resched_softirq() cond_resched()
-+#endif
-
- /*
- * Does a critical section need to be broken due to another
-@@ -2549,7 +2626,7 @@ void thread_group_cputimer(struct task_s
-
- static inline void thread_group_cputime_init(struct signal_struct *sig)
- {
-- spin_lock_init(&sig->cputimer.lock);
-+ raw_spin_lock_init(&sig->cputimer.lock);
- }
-
- /*
-@@ -2588,6 +2665,26 @@ static inline void set_task_cpu(struct t
-
- #endif /* CONFIG_SMP */
-
-+static inline int __migrate_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ return p->migrate_disable;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+/* Future-safe accessor for struct task_struct's cpus_allowed. */
-+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (p->migrate_disable)
-+ return cpumask_of(task_cpu(p));
-+#endif
-+
-+ return &p->cpus_allowed;
-+}
-+
- extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
- extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-
-Index: linux-2.6/kernel/posix-cpu-timers.c
-===================================================================
---- linux-2.6.orig/kernel/posix-cpu-timers.c
-+++ linux-2.6/kernel/posix-cpu-timers.c
-@@ -280,13 +280,13 @@ void thread_group_cputimer(struct task_s
- * it.
- */
- thread_group_cputime(tsk, &sum);
-- spin_lock_irqsave(&cputimer->lock, flags);
-+ raw_spin_lock_irqsave(&cputimer->lock, flags);
- cputimer->running = 1;
- update_gt_cputime(&cputimer->cputime, &sum);
- } else
-- spin_lock_irqsave(&cputimer->lock, flags);
-+ raw_spin_lock_irqsave(&cputimer->lock, flags);
- *times = cputimer->cputime;
-- spin_unlock_irqrestore(&cputimer->lock, flags);
-+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
- }
-
- /*
-@@ -699,7 +699,7 @@ static int posix_cpu_timer_set(struct k_
- /*
- * Disarm any old timer after extracting its expiry time.
- */
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
-
- ret = 0;
- old_incr = timer->it.cpu.incr;
-@@ -997,9 +997,9 @@ static void stop_process_timers(struct s
- struct thread_group_cputimer *cputimer = &sig->cputimer;
- unsigned long flags;
-
-- spin_lock_irqsave(&cputimer->lock, flags);
-+ raw_spin_lock_irqsave(&cputimer->lock, flags);
- cputimer->running = 0;
-- spin_unlock_irqrestore(&cputimer->lock, flags);
-+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
- }
-
- static u32 onecputick;
-@@ -1221,7 +1221,7 @@ void posix_cpu_timer_schedule(struct k_i
- /*
- * Now re-arm for the new expiry time.
- */
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
- arm_timer(timer);
- spin_unlock(&p->sighand->siglock);
-
-@@ -1288,10 +1288,11 @@ static inline int fastpath_timer_check(s
- sig = tsk->signal;
- if (sig->cputimer.running) {
- struct task_cputime group_sample;
-+ unsigned long flags;
-
-- spin_lock(&sig->cputimer.lock);
-+ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
- group_sample = sig->cputimer.cputime;
-- spin_unlock(&sig->cputimer.lock);
-+ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
-
- if (task_cputime_expired(&group_sample, &sig->cputime_expires))
- return 1;
-@@ -1305,13 +1306,13 @@ static inline int fastpath_timer_check(s
- * already updated our counts. We need to check if any timers fire now.
- * Interrupts are disabled.
- */
--void run_posix_cpu_timers(struct task_struct *tsk)
-+static void __run_posix_cpu_timers(struct task_struct *tsk)
- {
- LIST_HEAD(firing);
- struct k_itimer *timer, *next;
- unsigned long flags;
-
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct * tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+ #ifdef CONFIG_SMP
+ extern void kick_process(struct task_struct *tsk);
+@@ -2198,12 +2259,24 @@ extern struct mm_struct * mm_alloc(void)
- /*
- * The fast path checks that there are no expired thread or thread
-@@ -1369,6 +1370,190 @@ void run_posix_cpu_timers(struct task_st
- }
+ /* mmdrop drops the mm and the page tables */
+ extern void __mmdrop(struct mm_struct *);
++
+ static inline void mmdrop(struct mm_struct * mm)
+ {
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
}
+#ifdef CONFIG_PREEMPT_RT_BASE
-+#include <linux/kthread.h>
-+#include <linux/cpu.h>
-+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
-+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
-+
-+static int posix_cpu_timers_thread(void *data)
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
+{
-+ int cpu = (long)data;
-+
-+ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
-+
-+ while (!kthread_should_stop()) {
-+ struct task_struct *tsk = NULL;
-+ struct task_struct *next = NULL;
-+
-+ if (cpu_is_offline(cpu))
-+ goto wait_to_die;
-+
-+ /* grab task list */
-+ raw_local_irq_disable();
-+ tsk = per_cpu(posix_timer_tasklist, cpu);
-+ per_cpu(posix_timer_tasklist, cpu) = NULL;
-+ raw_local_irq_enable();
-+
-+ /* its possible the list is empty, just return */
-+ if (!tsk) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ schedule();
-+ __set_current_state(TASK_RUNNING);
-+ continue;
-+ }
-+
-+ /* Process task list */
-+ while (1) {
-+ /* save next */
-+ next = tsk->posix_timer_list;
-+
-+ /* run the task timers, clear its ptr and
-+ * unreference it
-+ */
-+ __run_posix_cpu_timers(tsk);
-+ tsk->posix_timer_list = NULL;
-+ put_task_struct(tsk);
-+
-+ /* check if this is the last on the list */
-+ if (next == tsk)
-+ break;
-+ tsk = next;
-+ }
-+ }
-+ return 0;
-+
-+wait_to_die:
-+ /* Wait for kthread_stop */
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ while (!kthread_should_stop()) {
-+ schedule();
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
++ if (atomic_dec_and_test(&mm->mm_count))
++ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
++#else
++# define mmdrop_delayed(mm) mmdrop(mm)
++#endif
+
-+static inline int __fastpath_timer_check(struct task_struct *tsk)
+ /* mmput gets rid of the mappings and all user-space */
+ extern void mmput(struct mm_struct *);
+ /* Grab a reference to a task's mm, if it is not already going away */
+@@ -2509,7 +2582,7 @@ extern int _cond_resched(void);
+
+ extern int __cond_resched_lock(spinlock_t *lock);
+
+-#ifdef CONFIG_PREEMPT
++#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT_FULL)
+ #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
+ #else
+ #define PREEMPT_LOCK_OFFSET 0
+@@ -2520,12 +2593,16 @@ extern int __cond_resched_lock(spinlock_
+ __cond_resched_lock(lock); \
+ })
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern int __cond_resched_softirq(void);
+
+ #define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
+ })
++#else
++# define cond_resched_softirq() cond_resched()
++#endif
+
+ /*
+ * Does a critical section need to be broken due to another
+@@ -2549,7 +2626,7 @@ void thread_group_cputimer(struct task_s
+
+ static inline void thread_group_cputime_init(struct signal_struct *sig)
+ {
+- spin_lock_init(&sig->cputimer.lock);
++ raw_spin_lock_init(&sig->cputimer.lock);
+ }
+
+ /*
+@@ -2588,6 +2665,26 @@ static inline void set_task_cpu(struct t
+
+ #endif /* CONFIG_SMP */
+
++static inline int __migrate_disabled(struct task_struct *p)
+{
-+ /* tsk == current, ensure it is safe to use ->signal/sighand */
-+ if (unlikely(tsk->exit_state))
-+ return 0;
-+
-+ if (!task_cputime_zero(&tsk->cputime_expires))
-+ return 1;
-+
-+ if (!task_cputime_zero(&tsk->signal->cputime_expires))
-+ return 1;
-+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ return p->migrate_disable;
++#else
+ return 0;
-+}
-+
-+void run_posix_cpu_timers(struct task_struct *tsk)
-+{
-+ unsigned long cpu = smp_processor_id();
-+ struct task_struct *tasklist;
-+
-+ BUG_ON(!irqs_disabled());
-+ if(!per_cpu(posix_timer_task, cpu))
-+ return;
-+ /* get per-cpu references */
-+ tasklist = per_cpu(posix_timer_tasklist, cpu);
-+
-+ /* check to see if we're already queued */
-+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
-+ get_task_struct(tsk);
-+ if (tasklist) {
-+ tsk->posix_timer_list = tasklist;
-+ } else {
-+ /*
-+ * The list is terminated by a self-pointing
-+ * task_struct
-+ */
-+ tsk->posix_timer_list = tsk;
-+ }
-+ per_cpu(posix_timer_tasklist, cpu) = tsk;
-+
-+ wake_up_process(per_cpu(posix_timer_task, cpu));
-+ }
-+}
-+
-+/*
-+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
-+ * Here we can start up the necessary migration thread for the new CPU.
-+ */
-+static int posix_cpu_thread_call(struct notifier_block *nfb,
-+ unsigned long action, void *hcpu)
-+{
-+ int cpu = (long)hcpu;
-+ struct task_struct *p;
-+ struct sched_param param;
-+
-+ switch (action) {
-+ case CPU_UP_PREPARE:
-+ p = kthread_create(posix_cpu_timers_thread, hcpu,
-+ "posixcputmr/%d",cpu);
-+ if (IS_ERR(p))
-+ return NOTIFY_BAD;
-+ p->flags |= PF_NOFREEZE;
-+ kthread_bind(p, cpu);
-+ /* Must be high prio to avoid getting starved */
-+ param.sched_priority = MAX_RT_PRIO-1;
-+ sched_setscheduler(p, SCHED_FIFO, ¶m);
-+ per_cpu(posix_timer_task,cpu) = p;
-+ break;
-+ case CPU_ONLINE:
-+ /* Strictly unneccessary, as first user will wake it. */
-+ wake_up_process(per_cpu(posix_timer_task,cpu));
-+ break;
-+#ifdef CONFIG_HOTPLUG_CPU
-+ case CPU_UP_CANCELED:
-+ /* Unbind it from offline cpu so it can run. Fall thru. */
-+ kthread_bind(per_cpu(posix_timer_task,cpu),
-+ any_online_cpu(cpu_online_map));
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
-+ case CPU_DEAD:
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
+#endif
-+ }
-+ return NOTIFY_OK;
+}
+
-+/* Register at highest priority so that task migration (migrate_all_tasks)
-+ * happens before everything else.
-+ */
-+static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
-+ .notifier_call = posix_cpu_thread_call,
-+ .priority = 10
-+};
-+
-+static int __init posix_cpu_thread_init(void)
++/* Future-safe accessor for struct task_struct's cpus_allowed. */
++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
+{
-+ void *hcpu = (void *)(long)smp_processor_id();
-+ /* Start one for boot CPU. */
-+ unsigned long cpu;
-+
-+ /* init the per-cpu posix_timer_tasklets */
-+ for_each_cpu_mask(cpu, cpu_possible_map)
-+ per_cpu(posix_timer_tasklist, cpu) = NULL;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (p->migrate_disable)
++ return cpumask_of(task_cpu(p));
++#endif
+
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
-+ register_cpu_notifier(&posix_cpu_thread_notifier);
-+ return 0;
-+}
-+early_initcall(posix_cpu_thread_init);
-+#else /* CONFIG_PREEMPT_RT_BASE */
-+void run_posix_cpu_timers(struct task_struct *tsk)
-+{
-+ __run_posix_cpu_timers(tsk);
++ return &p->cpus_allowed;
+}
-+#endif /* CONFIG_PREEMPT_RT_BASE */
+
- /*
- * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
- * The tsk->sighand->siglock must be held by the caller.
+ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+
Index: linux-2.6/kernel/sched_stats.h
===================================================================
--- linux-2.6.orig/kernel/sched_stats.h
@@ -22908,7 +22979,7 @@
+ struct task_struct *tsk;
+
+ init_completion(&hp->synced);
-+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+ if (IS_ERR(tsk))
+ return (PTR_ERR(tsk));
+ kthread_bind(tsk, cpu);
Modified: dists/sid/linux-2.6/debian/patches/series/6-extra
==============================================================================
--- dists/sid/linux-2.6/debian/patches/series/6-extra Wed Oct 19 02:36:03 2011 (r18171)
+++ dists/sid/linux-2.6/debian/patches/series/6-extra Wed Oct 19 19:05:00 2011 (r18172)
@@ -1 +1 @@
-+ features/all/rt/patch-3.0.6-rt18.patch featureset=rt
++ features/all/rt/patch-3.0.7-rt20.patch featureset=rt
More information about the Kernel-svn-changes
mailing list