[kernel] r19970 - in dists/squeeze-backports/linux: . debian debian/bin debian/config debian/config/amd64 debian/config/i386 debian/config/ia64 debian/config/kernelarch-mips debian/config/kernelarch-x86 debian/config/mips debian/config/mipsel debian/config/powerpc debian/patches debian/patches/bugfix/all debian/patches/bugfix/powerpc debian/patches/bugfix/s390 debian/patches/bugfix/x86 debian/patches/debian debian/patches/features/all/drm debian/patches/features/all/rt debian/patches/features/x86/efi-stub debian/patches/features/x86/hyperv
Ben Hutchings
benh at alioth.debian.org
Sun Apr 7 23:16:12 UTC 2013
Author: benh
Date: Sun Apr 7 23:16:11 2013
New Revision: 19970
Log:
Merge changes from sid up to 3.2.41-2
Added:
dists/squeeze-backports/linux/debian/patches/bugfix/all/KVM-Fix-bounds-checking-in-ioapic-indirect-register-.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/KVM-Fix-bounds-checking-in-ioapic-indirect-register-.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/dcbnl-fix-various-netlink-info-leaks.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/dcbnl-fix-various-netlink-info-leaks.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/efi_pstore-Introducing-workqueue-updating-sysfs.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/efi_pstore-Introducing-workqueue-updating-sysfs.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Add-module-parameter-to-disable-use-as-a-pst.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Add-module-parameter-to-disable-use-as-a-pst.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Allow-disabling-use-as-a-pstore-backend.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Allow-disabling-use-as-a-pstore-backend.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Fix-check-for-CONFIG_EFI_VARS_PSTORE_DEFAULT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Fix-check-for-CONFIG_EFI_VARS_PSTORE_DEFAULT.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Handle-duplicate-names-from-get_next_variabl.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Handle-duplicate-names-from-get_next_variabl.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-explicitly-calculate-length-of-VariableName.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-explicitly-calculate-length-of-VariableName.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-pstore-do-not-check-size-when-erasing-variable.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-pstore-do-not-check-size-when-erasing-variable.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/i915-initialize-CADL-in-opregion.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/i915-initialize-CADL-in-opregion.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/isofs-avoid-info-leak-on-export.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/isofs-avoid-info-leak-on-export.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/kernel-signal.c-use-__ARCH_HAS_SA_RESTORER-instead-o.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/kernel-signal.c-use-__ARCH_HAS_SA_RESTORER-instead-o.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/kexec-remove-KMSG_DUMP_KEXEC.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/kexec-remove-KMSG_DUMP_KEXEC.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/kmsg_dump-don-t-run-on-non-error-paths-by-default.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/kmsg_dump-don-t-run-on-non-error-paths-by-default.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/rds-limit-the-size-allocated-by-rds_message_alloc.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/rds-limit-the-size-allocated-by-rds_message_alloc.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/rtnl-fix-info-leak-on-rtm_getlink-request-for-vf-devices.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/rtnl-fix-info-leak-on-rtm_getlink-request-for-vf-devices.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/signal-fix-use-of-missing-sa_restorer-field.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/signal-fix-use-of-missing-sa_restorer-field.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/udf-avoid-info-leak-on-export.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/udf-avoid-info-leak-on-export.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/vhost-net-fix-heads-usage-of-ubuf_info.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/all/vhost-net-fix-heads-usage-of-ubuf_info.patch
dists/squeeze-backports/linux/debian/patches/bugfix/powerpc/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/powerpc/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch
dists/squeeze-backports/linux/debian/patches/bugfix/s390/s390-mm-fix-flush_tlb_kernel_range.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/s390/s390-mm-fix-flush_tlb_kernel_range.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Convert-MSR_KVM_SYSTEM_TIME-to-use-gfn_to_hv.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/x86/KVM-x86-Convert-MSR_KVM_SYSTEM_TIME-to-use-gfn_to_hv.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-fix-for-buffer-overflow-in-handling-of-MSR_K.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/x86/KVM-x86-fix-for-buffer-overflow-in-handling-of-MSR_K.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/drm-i915-Unconditionally-initialise-the-interrupt-wo.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/x86/drm-i915-Unconditionally-initialise-the-interrupt-wo.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/drm-i915-bounds-check-execbuffer-relocation-count.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/bugfix/x86/drm-i915-bounds-check-execbuffer-relocation-count.patch
dists/squeeze-backports/linux/debian/patches/debian/dm-avoid-ABI-change-in-3.2.41.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/debian/dm-avoid-ABI-change-in-3.2.41.patch
dists/squeeze-backports/linux/debian/patches/debian/efi-autoload-efivars.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/debian/efi-autoload-efivars.patch
dists/squeeze-backports/linux/debian/patches/debian/efivars-remove-check-for-50-full-on-write.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/debian/efivars-remove-check-for-50-full-on-write.patch
dists/squeeze-backports/linux/debian/patches/debian/pps-avoid-abi-change-in-3.2.40.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/debian/pps-avoid-abi-change-in-3.2.40.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0136-hrtimer-fix-reprogram-madness.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0136-hrtimer-fix-reprogram-madness.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0137-timer-fd-Prevent-live-lock.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0137-timer-fd-Prevent-live-lock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0138-posix-timers-thread-posix-cpu-timers-on-rt.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0138-posix-timers-thread-posix-cpu-timers-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0139-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0139-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0140-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0140-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0141-sched-delay-put-task.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0141-sched-delay-put-task.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0142-sched-limit-nr-migrate.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0142-sched-limit-nr-migrate.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0143-sched-mmdrop-delayed.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0143-sched-mmdrop-delayed.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0144-sched-rt-mutex-wakeup.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0144-sched-rt-mutex-wakeup.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0145-sched-prevent-idle-boost.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0145-sched-prevent-idle-boost.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0146-sched-might-sleep-do-not-account-rcu-depth.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0146-sched-might-sleep-do-not-account-rcu-depth.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0147-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0147-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0148-sched-cond-resched.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0148-sched-cond-resched.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0149-cond-resched-softirq-fix.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0149-cond-resched-softirq-fix.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0150-sched-no-work-when-pi-blocked.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0150-sched-no-work-when-pi-blocked.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0151-cond-resched-lock-rt-tweak.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0151-cond-resched-lock-rt-tweak.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0152-sched-disable-ttwu-queue.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0152-sched-disable-ttwu-queue.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0153-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0153-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0154-sched-ttwu-Return-success-when-only-changing-the-sav.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0154-sched-ttwu-Return-success-when-only-changing-the-sav.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0155-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0155-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0156-stomp-machine-mark-stomper-thread.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0156-stomp-machine-mark-stomper-thread.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0157-stomp-machine-raw-lock.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0157-stomp-machine-raw-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0158-hotplug-Lightweight-get-online-cpus.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0158-hotplug-Lightweight-get-online-cpus.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0159-hotplug-sync_unplug-No.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0159-hotplug-sync_unplug-No.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0160-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0160-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0161-sched-migrate-disable.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0161-sched-migrate-disable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0162-hotplug-use-migrate-disable.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0162-hotplug-use-migrate-disable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0163-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0163-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0164-ftrace-migrate-disable-tracing.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0164-ftrace-migrate-disable-tracing.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0165-tracing-Show-padding-as-unsigned-short.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0165-tracing-Show-padding-as-unsigned-short.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0166-migrate-disable-rt-variant.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0166-migrate-disable-rt-variant.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0167-sched-Optimize-migrate_disable.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0167-sched-Optimize-migrate_disable.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0168-sched-Generic-migrate_disable.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0168-sched-Generic-migrate_disable.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0169-sched-rt-Fix-migrate_enable-thinko.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0169-sched-rt-Fix-migrate_enable-thinko.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0170-sched-teach-migrate_disable-about-atomic-contexts.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0170-sched-teach-migrate_disable-about-atomic-contexts.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0171-sched-Postpone-actual-migration-disalbe-to-schedule.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0171-sched-Postpone-actual-migration-disalbe-to-schedule.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0172-sched-Do-not-compare-cpu-masks-in-scheduler.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0172-sched-Do-not-compare-cpu-masks-in-scheduler.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0173-sched-Have-migrate_disable-ignore-bounded-threads.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0173-sched-Have-migrate_disable-ignore-bounded-threads.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0174-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0174-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0175-ftrace-crap.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0175-ftrace-crap.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0176-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0176-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0177-net-netif_rx_ni-migrate-disable.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0177-net-netif_rx_ni-migrate-disable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0178-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0178-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0179-lockdep-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0179-lockdep-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0181-softirq-local-lock.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0181-softirq-local-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0182-softirq-Export-in_serving_softirq.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0182-softirq-Export-in_serving_softirq.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0183-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0183-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0184-softirq-Fix-unplug-deadlock.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0184-softirq-Fix-unplug-deadlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0185-softirq-disable-softirq-stacks-for-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0185-softirq-disable-softirq-stacks-for-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0186-softirq-make-fifo.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0186-softirq-make-fifo.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0187-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0187-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0188-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0188-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0189-local-vars-migrate-disable.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0189-local-vars-migrate-disable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0190-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0190-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0191-rtmutex-lock-killable.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0191-rtmutex-lock-killable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0192-rtmutex-futex-prepare-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0192-rtmutex-futex-prepare-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0193-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0193-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0194-rt-mutex-add-sleeping-spinlocks-support.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0194-rt-mutex-add-sleeping-spinlocks-support.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0195-spinlock-types-separate-raw.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0195-spinlock-types-separate-raw.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0196-rtmutex-avoid-include-hell.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0196-rtmutex-avoid-include-hell.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0197-rt-add-rt-spinlocks.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0197-rt-add-rt-spinlocks.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0198-rt-add-rt-to-mutex-headers.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0198-rt-add-rt-to-mutex-headers.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0199-rwsem-add-rt-variant.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0199-rwsem-add-rt-variant.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0200-rt-Add-the-preempt-rt-lock-replacement-APIs.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0200-rt-Add-the-preempt-rt-lock-replacement-APIs.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0201-rwlocks-Fix-section-mismatch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0201-rwlocks-Fix-section-mismatch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0202-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0202-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0203-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0203-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0204-rcu-Frob-softirq-test.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0204-rcu-Frob-softirq-test.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0205-rcu-Merge-RCU-bh-into-RCU-preempt.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0205-rcu-Merge-RCU-bh-into-RCU-preempt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0206-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0206-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0207-rcu-more-fallout.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0207-rcu-more-fallout.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0208-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0208-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0209-rt-rcutree-Move-misplaced-prototype.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0209-rt-rcutree-Move-misplaced-prototype.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0210-lglocks-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0210-lglocks-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0211-serial-8250-Clean-up-the-locking-for-rt.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0211-serial-8250-Clean-up-the-locking-for-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0212-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0212-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0213-drivers-tty-fix-omap-lock-crap.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0213-drivers-tty-fix-omap-lock-crap.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0214-rt-Improve-the-serial-console-PASS_LIMIT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0214-rt-Improve-the-serial-console-PASS_LIMIT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0215-fs-namespace-preemption-fix.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0215-fs-namespace-preemption-fix.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0216-mm-protect-activate-switch-mm.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0216-mm-protect-activate-switch-mm.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0217-fs-block-rt-support.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0217-fs-block-rt-support.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0218-fs-ntfs-disable-interrupt-only-on-RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0218-fs-ntfs-disable-interrupt-only-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0219-x86-Convert-mce-timer-to-hrtimer.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0219-x86-Convert-mce-timer-to-hrtimer.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0220-x86-stackprotector-Avoid-random-pool-on-rt.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0220-x86-stackprotector-Avoid-random-pool-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0221-x86-Use-generic-rwsem_spinlocks-on-rt.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0221-x86-Use-generic-rwsem_spinlocks-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0222-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0222-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0223-workqueue-use-get-cpu-light.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0223-workqueue-use-get-cpu-light.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0224-epoll.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0224-epoll.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0225-mm-vmalloc.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0225-mm-vmalloc.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0226-debugobjects-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0226-debugobjects-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0227-jump-label-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0227-jump-label-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0228-skbufhead-raw-lock.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0228-skbufhead-raw-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0229-x86-no-perf-irq-work-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0229-x86-no-perf-irq-work-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0230-console-make-rt-friendly.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0230-console-make-rt-friendly.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0231-printk-Disable-migration-instead-of-preemption.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0231-printk-Disable-migration-instead-of-preemption.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0232-power-use-generic-rwsem-on-rt.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0232-power-use-generic-rwsem-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0233-power-disable-highmem-on-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0233-power-disable-highmem-on-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0234-arm-disable-highmem-on-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0234-arm-disable-highmem-on-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0235-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0235-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0236-mips-disable-highmem-on-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0236-mips-disable-highmem-on-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0237-net-Avoid-livelock-in-net_tx_action-on-RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0237-net-Avoid-livelock-in-net_tx_action-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0238-ping-sysrq.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0238-ping-sysrq.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0239-kgdb-serial-Short-term-workaround.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0239-kgdb-serial-Short-term-workaround.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0240-add-sys-kernel-realtime-entry.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0240-add-sys-kernel-realtime-entry.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0241-mm-rt-kmap_atomic-scheduling.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0241-mm-rt-kmap_atomic-scheduling.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0242-ipc-sem-Rework-semaphore-wakeups.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0242-ipc-sem-Rework-semaphore-wakeups.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0243-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0243-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0244-x86-kvm-require-const-tsc-for-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0244-x86-kvm-require-const-tsc-for-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0245-scsi-fcoe-rt-aware.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0245-scsi-fcoe-rt-aware.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0246-x86-crypto-Reduce-preempt-disabled-regions.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0246-x86-crypto-Reduce-preempt-disabled-regions.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0247-dm-Make-rt-aware.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0247-dm-Make-rt-aware.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0248-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0248-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0249-seqlock-Prevent-rt-starvation.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0249-seqlock-Prevent-rt-starvation.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0250-timer-Fix-hotplug-for-rt.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0250-timer-Fix-hotplug-for-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0251-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0251-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0252-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0252-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0253-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0253-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0254-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0254-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0255-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0255-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0256-softirq-Check-preemption-after-reenabling-interrupts.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0256-softirq-Check-preemption-after-reenabling-interrupts.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0257-rt-Introduce-cpu_chill.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0257-rt-Introduce-cpu_chill.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0258-fs-dcache-Use-cpu_chill-in-trylock-loops.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0258-fs-dcache-Use-cpu_chill-in-trylock-loops.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0259-net-Use-cpu_chill-instead-of-cpu_relax.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0259-net-Use-cpu_chill-instead-of-cpu_relax.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0260-kconfig-disable-a-few-options-rt.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0260-kconfig-disable-a-few-options-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0261-kconfig-preempt-rt-full.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0261-kconfig-preempt-rt-full.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0262-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0262-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0263-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0263-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0264-net-RT-REmove-preemption-disabling-in-netif_rx.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0264-net-RT-REmove-preemption-disabling-in-netif_rx.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0265-mips-remove-smp-reserve-lock.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0265-mips-remove-smp-reserve-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0266-Latency-histogramms-Cope-with-backwards-running-loca.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0266-Latency-histogramms-Cope-with-backwards-running-loca.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0267-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0267-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0268-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0268-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0269-Latency-histograms-Detect-another-yet-overlooked-sha.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0269-Latency-histograms-Detect-another-yet-overlooked-sha.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0270-slab-Prevent-local-lock-deadlock.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0270-slab-Prevent-local-lock-deadlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0271-fs-jbd-pull-your-plug-when-waiting-for-space.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0271-fs-jbd-pull-your-plug-when-waiting-for-space.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0272-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0272-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0273-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0273-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0274-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0274-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0275-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0275-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0276-fix-printk-flush-of-messages.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0276-fix-printk-flush-of-messages.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0279-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0279-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0280-mm-slab-Fix-potential-deadlock.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0280-mm-slab-Fix-potential-deadlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0281-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0281-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0282-rt-rwsem-rwlock-lockdep-annotations.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0282-rt-rwsem-rwlock-lockdep-annotations.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0283-sched-Better-debug-output-for-might-sleep.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0283-sched-Better-debug-output-for-might-sleep.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0284-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0284-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0285-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0285-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0286-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0286-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0287-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0287-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0288-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0288-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0289-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0289-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0290-sched-Queue-RT-tasks-to-head-when-prio-drops.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0290-sched-Queue-RT-tasks-to-head-when-prio-drops.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0291-sched-Consider-pi-boosting-in-setscheduler.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0291-sched-Consider-pi-boosting-in-setscheduler.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0292-drivers-tty-pl011-irq-disable-madness.patch.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0292-drivers-tty-pl011-irq-disable-madness.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0293-mmci-Remove-bogus-local_irq_save.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0293-mmci-Remove-bogus-local_irq_save.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0294-sched-Init-idle-on_rq-in-init_idle.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0294-sched-Init-idle-on_rq-in-init_idle.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0295-sched-Check-for-idle-task-in-might_sleep.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0295-sched-Check-for-idle-task-in-might_sleep.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0296-mm-swap-Initialize-local-locks-early.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0296-mm-swap-Initialize-local-locks-early.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0297-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0297-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0298-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0298-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0299-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0299-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0300-serial-Imx-Fix-recursive-locking-bug.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0300-serial-Imx-Fix-recursive-locking-bug.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0301-wait-simple-Simple-waitqueue-implementation.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0301-wait-simple-Simple-waitqueue-implementation.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0302-rcutiny-Use-simple-waitqueue.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0302-rcutiny-Use-simple-waitqueue.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0303-Linux-3.2.40-rt60-REBASE.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0303-Linux-3.2.40-rt60-REBASE.patch
dists/squeeze-backports/linux/debian/patches/features/x86/efi-stub/0019-x86-efi-Fix-processor-specific-memcpy-build-error.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/x86/efi-stub/0019-x86-efi-Fix-processor-specific-memcpy-build-error.patch
dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0080-ata_piix-reenable-ms-virtual-pc-guests.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/x86/hyperv/0080-ata_piix-reenable-ms-virtual-pc-guests.patch
Replaced:
dists/squeeze-backports/linux/debian/patches/features/all/rt/0277-fix-printk-flush-of-messages.patch
- copied unchanged from r19949, dists/sid/linux/debian/patches/features/all/rt/0277-fix-printk-flush-of-messages.patch
Deleted:
dists/squeeze-backports/linux/debian/patches/bugfix/all/USB-usb-storage-unusual_devs-update-for-Super-TOP-SA.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ext4-fix-hole-punch-failure-when-depth-is-greater-th.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ext4-fix-kernel-BUG-on-large-scale-rm-rf-commands.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ext4-rewrite-punch-hole-to-use-ext4_ext_remove_space.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/fs-cachefiles-add-support-for-large-files-in-filesys.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/kmod-introduce-call_modprobe-helper.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/kmod-make-__request_module-killable.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/md-protect-against-crash-upon-fsync-on-ro-array.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/mm-fix-pageblock-bitmap-allocation.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/speakup-lower-default-software-speech-rate.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/usb-Add-USB_QUIRK_RESET_RESUME-for-all-Logitech-UVC-.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/usb-Add-quirk-detection-based-on-interface-informati.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/usermodehelper-____call_usermodehelper-doesnt-need-do_exit.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/usermodehelper-implement-UMH_KILLABLE.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/usermodehelper-introduce-umh_complete.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/asus-laptop-Do-not-call-HWRS-on-init.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/drm-i915-EBUSY-status-handling-added-to-i915_gem_fau.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/efi-Clear-EFI_RUNTIME_SERVICES-rather-than-EFI_BOOT-.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/x86-efi-Make-noefi-really-disable-EFI-runtime-serivc.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0136-hrtimer-Add-missing-debug_activate-aid-Was-Re-ANNOUN.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0137-hrtimer-fix-reprogram-madness.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0138-timer-fd-Prevent-live-lock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0139-posix-timers-thread-posix-cpu-timers-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0140-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0141-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0142-sched-delay-put-task.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0143-sched-limit-nr-migrate.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0144-sched-mmdrop-delayed.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0145-sched-rt-mutex-wakeup.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0146-sched-prevent-idle-boost.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0147-sched-might-sleep-do-not-account-rcu-depth.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0148-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0149-sched-cond-resched.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0150-cond-resched-softirq-fix.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0151-sched-no-work-when-pi-blocked.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0152-cond-resched-lock-rt-tweak.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0153-sched-disable-ttwu-queue.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0154-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0155-sched-ttwu-Return-success-when-only-changing-the-sav.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0156-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0157-stomp-machine-mark-stomper-thread.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0158-stomp-machine-raw-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0159-hotplug-Lightweight-get-online-cpus.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0160-hotplug-sync_unplug-No.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0161-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0162-sched-migrate-disable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0163-hotplug-use-migrate-disable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0164-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0165-ftrace-migrate-disable-tracing.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0166-tracing-Show-padding-as-unsigned-short.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0167-migrate-disable-rt-variant.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0168-sched-Optimize-migrate_disable.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0169-sched-Generic-migrate_disable.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0170-sched-rt-Fix-migrate_enable-thinko.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0171-sched-teach-migrate_disable-about-atomic-contexts.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0172-sched-Postpone-actual-migration-disalbe-to-schedule.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0173-sched-Do-not-compare-cpu-masks-in-scheduler.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0174-sched-Have-migrate_disable-ignore-bounded-threads.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0175-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0176-ftrace-crap.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0177-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0178-net-netif_rx_ni-migrate-disable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0179-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0180-lockdep-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0181-mutex-no-spin-on-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0182-softirq-local-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0183-softirq-Export-in_serving_softirq.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0184-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0185-softirq-Fix-unplug-deadlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0186-softirq-disable-softirq-stacks-for-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0187-softirq-make-fifo.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0188-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0189-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0190-local-vars-migrate-disable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0191-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0192-rtmutex-lock-killable.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0193-rtmutex-futex-prepare-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0194-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0195-rt-mutex-add-sleeping-spinlocks-support.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0196-spinlock-types-separate-raw.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0197-rtmutex-avoid-include-hell.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0198-rt-add-rt-spinlocks.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0199-rt-add-rt-to-mutex-headers.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0200-rwsem-add-rt-variant.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0201-rt-Add-the-preempt-rt-lock-replacement-APIs.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0202-rwlocks-Fix-section-mismatch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0203-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0204-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0205-rcu-Frob-softirq-test.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0206-rcu-Merge-RCU-bh-into-RCU-preempt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0207-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0208-rcu-more-fallout.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0209-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0210-rt-rcutree-Move-misplaced-prototype.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0211-lglocks-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0212-serial-8250-Clean-up-the-locking-for-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0213-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0214-drivers-tty-fix-omap-lock-crap.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0215-rt-Improve-the-serial-console-PASS_LIMIT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0216-fs-namespace-preemption-fix.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0217-mm-protect-activate-switch-mm.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0218-fs-block-rt-support.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0219-fs-ntfs-disable-interrupt-only-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0220-x86-Convert-mce-timer-to-hrtimer.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0221-x86-stackprotector-Avoid-random-pool-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0222-x86-Use-generic-rwsem_spinlocks-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0223-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0224-workqueue-use-get-cpu-light.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0225-epoll.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0226-mm-vmalloc.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0227-debugobjects-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0228-jump-label-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0229-skbufhead-raw-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0230-x86-no-perf-irq-work-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0231-console-make-rt-friendly.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0232-printk-Disable-migration-instead-of-preemption.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0233-power-use-generic-rwsem-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0234-power-disable-highmem-on-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0235-arm-disable-highmem-on-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0236-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0237-mips-disable-highmem-on-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0238-net-Avoid-livelock-in-net_tx_action-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0239-ping-sysrq.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0240-kgdb-serial-Short-term-workaround.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0241-add-sys-kernel-realtime-entry.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0242-mm-rt-kmap_atomic-scheduling.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0243-ipc-sem-Rework-semaphore-wakeups.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0244-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0245-x86-kvm-require-const-tsc-for-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0246-scsi-fcoe-rt-aware.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0247-x86-crypto-Reduce-preempt-disabled-regions.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0248-dm-Make-rt-aware.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0249-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0250-seqlock-Prevent-rt-starvation.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0251-timer-Fix-hotplug-for-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0252-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0253-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0254-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0255-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0256-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0257-softirq-Check-preemption-after-reenabling-interrupts.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0258-rt-Introduce-cpu_chill.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0259-fs-dcache-Use-cpu_chill-in-trylock-loops.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0260-net-Use-cpu_chill-instead-of-cpu_relax.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0261-kconfig-disable-a-few-options-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0262-kconfig-preempt-rt-full.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0263-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0264-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0265-net-RT-REmove-preemption-disabling-in-netif_rx.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0266-mips-remove-smp-reserve-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0267-Latency-histogramms-Cope-with-backwards-running-loca.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0268-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0269-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0270-Latency-histograms-Detect-another-yet-overlooked-sha.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0271-slab-Prevent-local-lock-deadlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0272-fs-jbd-pull-your-plug-when-waiting-for-space.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0273-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0274-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0275-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0276-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0278-fix-printk-flush-of-messages.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0279-random-Make-it-work-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0280-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0281-mm-slab-Fix-potential-deadlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0282-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0283-rt-rwsem-rwlock-lockdep-annotations.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0284-sched-Better-debug-output-for-might-sleep.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0285-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0286-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0287-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0288-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0289-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0290-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0291-sched-Queue-RT-tasks-to-head-when-prio-drops.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0292-sched-Consider-pi-boosting-in-setscheduler.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0293-drivers-tty-pl011-irq-disable-madness.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0294-mmci-Remove-bogus-local_irq_save.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0295-sched-Init-idle-on_rq-in-init_idle.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0296-sched-Check-for-idle-task-in-might_sleep.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0297-mm-swap-Initialize-local-locks-early.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0298-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0299-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0300-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0301-serial-Imx-Fix-recursive-locking-bug.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0302-wait-simple-Simple-waitqueue-implementation.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0303-rcutiny-Use-simple-waitqueue.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0304-Linux-3.2.39-rt59-REBASE.patch
dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0001-NLS-improve-UTF8-UTF16-string-conversion-routine.patch
Modified:
dists/squeeze-backports/linux/ (props changed)
dists/squeeze-backports/linux/debian/bin/test-patches
dists/squeeze-backports/linux/debian/changelog
dists/squeeze-backports/linux/debian/config/amd64/defines
dists/squeeze-backports/linux/debian/config/defines
dists/squeeze-backports/linux/debian/config/i386/defines
dists/squeeze-backports/linux/debian/config/ia64/config
dists/squeeze-backports/linux/debian/config/kernelarch-mips/config
dists/squeeze-backports/linux/debian/config/kernelarch-x86/config
dists/squeeze-backports/linux/debian/config/mips/config
dists/squeeze-backports/linux/debian/config/mips/config.4kc-malta
dists/squeeze-backports/linux/debian/config/mips/config.5kc-malta
dists/squeeze-backports/linux/debian/config/mips/config.r4k-ip22
dists/squeeze-backports/linux/debian/config/mips/config.r5k-ip32
dists/squeeze-backports/linux/debian/config/mips/config.sb1-bcm91250a
dists/squeeze-backports/linux/debian/config/mips/config.sb1a-bcm91480b
dists/squeeze-backports/linux/debian/config/mips/defines
dists/squeeze-backports/linux/debian/config/mipsel/config
dists/squeeze-backports/linux/debian/config/mipsel/config.loongson-2f
dists/squeeze-backports/linux/debian/config/mipsel/config.r5k-cobalt
dists/squeeze-backports/linux/debian/config/mipsel/defines
dists/squeeze-backports/linux/debian/config/powerpc/config.powerpc64
dists/squeeze-backports/linux/debian/patches/features/all/drm/drm-3.4.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0001-Revert-workqueue-skip-nr_running-sanity-check-in-wor.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0002-x86-Call-idle-notifier-after-irq_enter.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0003-slab-lockdep-Annotate-all-slab-caches.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0004-x86-kprobes-Remove-remove-bogus-preempt_enable.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0005-x86-hpet-Disable-MSI-on-Lenovo-W510.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0006-block-Shorten-interrupt-disabled-regions.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0007-sched-Distangle-worker-accounting-from-rq-3Elock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0008-mips-enable-interrupts-in-signal.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0009-arm-enable-interrupts-in-signal-code.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0010-powerpc-85xx-Mark-cascade-irq-IRQF_NO_THREAD.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0011-powerpc-wsp-Mark-opb-cascade-handler-IRQF_NO_THREAD.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0012-powerpc-Mark-IPI-interrupts-IRQF_NO_THREAD.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0013-powerpc-Allow-irq-threading.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0014-sched-Keep-period-timer-ticking-when-throttling-acti.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0015-sched-Do-not-throttle-due-to-PI-boosting.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0016-time-Remove-bogus-comments.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0017-x86-vdso-Remove-bogus-locking-in-update_vsyscall_tz.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0018-x86-vdso-Use-seqcount-instead-of-seqlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0019-ia64-vsyscall-Use-seqcount-instead-of-seqlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0020-seqlock-Remove-unused-functions.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0021-seqlock-Use-seqcount.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0022-vfs-fs_struct-Move-code-out-of-seqcount-write-sectio.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0023-timekeeping-Split-xtime_lock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0024-intel_idle-Convert-i7300_idle_lock-to-raw-spinlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0025-mm-memcg-shorten-preempt-disabled-section-around-eve.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0026-tracing-Account-for-preempt-off-in-preempt_schedule.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0027-signal-revert-ptrace-preempt-magic.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0028-arm-Mark-pmu-interupt-IRQF_NO_THREAD.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0029-arm-Allow-forced-irq-threading.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0030-preempt-rt-Convert-arm-boot_lock-to-raw.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0031-sched-Create-schedule_preempt_disabled.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0032-sched-Use-schedule_preempt_disabled.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0033-signals-Do-not-wakeup-self.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0034-posix-timers-Prevent-broadcast-signals.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0035-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0036-signal-x86-Delay-calling-signals-in-atomic.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0037-generic-Use-raw-local-irq-variant-for-generic-cmpxch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0038-drivers-random-Reduce-preempt-disabled-region.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0039-ARM-AT91-PIT-Remove-irq-handler-when-clock-event-is-.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0040-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0041-drivers-net-tulip_remove_one-needs-to-call-pci_disab.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0042-drivers-net-Use-disable_irq_nosync-in-8139too.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0043-drivers-net-ehea-Make-rx-irq-handler-non-threaded-IR.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0044-drivers-net-at91_ether-Make-mdio-protection-rt-safe.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0045-preempt-mark-legitimated-no-resched-sites.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0046-mm-Prepare-decoupling-the-page-fault-disabling-logic.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0047-mm-Fixup-all-fault-handlers-to-check-current-pagefau.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0048-mm-pagefault_disabled.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0049-mm-raw_pagefault_disable.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0050-filemap-fix-up.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0051-mm-Remove-preempt-count-from-pagefault-disable-enabl.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0052-x86-highmem-Replace-BUG_ON-by-WARN_ON.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0053-suspend-Prevent-might-sleep-splats.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0054-OF-Fixup-resursive-locking-code-paths.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0055-of-convert-devtree-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0056-list-add-list-last-entry.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0057-mm-page-alloc-use-list-last-entry.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0058-mm-slab-move-debug-out.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0059-rwsem-inlcude-fix.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0060-sysctl-include-fix.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0061-net-flip-lock-dep-thingy.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0062-softirq-thread-do-softirq.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0063-softirq-split-out-code.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0064-x86-Do-not-unmask-io_apic-when-interrupt-is-in-progr.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0065-x86-32-fix-signal-crap.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0066-x86-Do-not-disable-preemption-in-int3-on-32bit.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0067-rcu-Reduce-lock-section.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0068-locking-various-init-fixes.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0069-wait-Provide-__wake_up_all_locked.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0070-pci-Use-__wake_up_all_locked-pci_unblock_user_cfg_ac.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0071-latency-hist.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0072-hwlatdetect.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0073-localversion.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0074-early-printk-consolidate.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0075-printk-kill.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0076-printk-force_early_printk-boot-param-to-help-with-de.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0077-rt-preempt-base-config.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0078-bug-BUG_ON-WARN_ON-variants-dependend-on-RT-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0079-rt-local_irq_-variants-depending-on-RT-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0080-preempt-Provide-preempt_-_-no-rt-variants.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0081-ata-Do-not-disable-interrupts-in-ide-code-for-preemp.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0082-ide-Do-not-disable-interrupts-for-PREEMPT-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0083-infiniband-Mellanox-IB-driver-patch-use-_nort-primit.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0084-input-gameport-Do-not-disable-interrupts-on-PREEMPT_.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0085-acpi-Do-not-disable-interrupts-on-PREEMPT_RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0086-core-Do-not-disable-interrupts-on-RT-in-kernel-users.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0087-core-Do-not-disable-interrupts-on-RT-in-res_counter..patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0088-usb-Use-local_irq_-_nort-variants.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0089-tty-Do-not-disable-interrupts-in-put_ldisc-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0090-mm-scatterlist-dont-disable-irqs-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0091-signal-fix-up-rcu-wreckage.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0092-net-wireless-warn-nort.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0093-mm-Replace-cgroup_page-bit-spinlock.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0094-buffer_head-Replace-bh_uptodate_lock-for-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0095-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0096-genirq-Disable-DEBUG_SHIRQ-for-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0097-genirq-Disable-random-call-on-preempt-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0098-genirq-disable-irqpoll-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0099-genirq-force-threading.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0100-drivers-net-fix-livelock-issues.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0101-drivers-net-vortex-fix-locking-issues.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0102-drivers-net-gianfar-Make-RT-aware.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0103-USB-Fix-the-mouse-problem-when-copying-large-amounts.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0104-local-var.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0105-rt-local-irq-lock.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0106-cpu-rt-variants.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0107-mm-slab-wrap-functions.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0108-slab-Fix-__do_drain-to-use-the-right-array-cache.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0109-mm-More-lock-breaks-in-slab.c.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0110-mm-page_alloc-rt-friendly-per-cpu-pages.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0111-mm-page_alloc-reduce-lock-sections-further.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0112-mm-page-alloc-fix.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0113-mm-convert-swap-to-percpu-locked.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0114-mm-vmstat-fix-the-irq-lock-asymetry.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0115-mm-make-vmstat-rt-aware.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0116-mm-shrink-the-page-frame-to-rt-size.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0117-ARM-Initialize-ptl-lock-for-vector-page.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0118-mm-Allow-only-slab-on-RT.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0119-radix-tree-rt-aware.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0120-panic-disable-random-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0121-ipc-Make-the-ipc-code-rt-aware.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0122-ipc-mqueue-Add-a-critical-section-to-avoid-a-deadloc.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0123-relay-fix-timer-madness.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0124-net-ipv4-route-use-locks-on-up-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0125-workqueue-avoid-the-lock-in-cpu-dying.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0126-timers-prepare-for-full-preemption.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0127-timers-preempt-rt-support.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0128-timers-fix-timer-hotplug-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0129-timers-mov-printk_tick-to-soft-interrupt.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0130-timer-delay-waking-softirqs-from-the-jiffy-tick.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0131-timers-Avoid-the-switch-timers-base-set-to-NULL-tric.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0132-printk-Don-t-call-printk_tick-in-printk_needs_cpu-on.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0133-hrtimers-prepare-full-preemption.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0134-hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0135-hrtimer-Don-t-call-the-timer-handler-from-hrtimer_st.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/series
dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0055-Staging-hv-storvsc-Get-rid-of-the-on_io_completion-i.patch
dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0067-Staging-hv-storvsc-Move-the-storage-driver-out-of-th.patch
dists/squeeze-backports/linux/debian/patches/series
dists/squeeze-backports/linux/debian/patches/series-rt
dists/squeeze-backports/linux/debian/rules.real
Modified: dists/squeeze-backports/linux/debian/bin/test-patches
==============================================================================
--- dists/squeeze-backports/linux/debian/bin/test-patches Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/bin/test-patches Sun Apr 7 23:16:11 2013 (r19970)
@@ -19,12 +19,15 @@
featureset=none
fi
-eval "set -- $(getopt -n "$0" -- "f:j:s:" "$@")"
+fuzz=0
+
+eval "set -- $(getopt -n "$0" -o "f:j:s:" -l "fuzz:" -- "$@")"
while true; do
case "$1" in
-f) flavour="$2"; shift 2 ;;
-j) export MAKEFLAGS="$MAKEFLAGS -j$2"; shift 2 ;;
-s) featureset="$2"; shift 2 ;;
+ --fuzz) fuzz="$2"; shift 2;;
--) shift 1; break ;;
esac
done
@@ -36,6 +39,7 @@
-f <flavour> specify the 'flavour' of kernel to build, e.g. 686-pae
-j <jobs> specify number of compiler jobs to run in parallel
-s <featureset> specify an optional featureset to apply, e.g. rt
+ --fuzz <num> set the maximum patch fuzz factor (default: 0)
EOF
exit 2
fi
@@ -63,8 +67,9 @@
# Try to clean up any previous test patches
if [ "$featureset" = none ]; then
- while quilt top 2>/dev/null | grep -q ^test/; do
- quilt delete
+ while patch="$(quilt next 2>/dev/null || quilt top 2>/dev/null)" && \
+ [ "${patch#test/}" != "$patch" ]; do
+ quilt delete -r "$patch"
done
else
sed -i '/^test\//d' debian/patches/series-${featureset}
@@ -89,7 +94,7 @@
patch_abs="$(readlink -f "$patch")"
(cd "debian/build/source_${featureset}" && \
quilt import -P "test/$(basename "$patch")" "$patch_abs" && \
- QUILT_PATCH_OPTS='--fuzz=0' quilt push)
+ QUILT_PATCH_OPTS="--fuzz=$fuzz" quilt push)
done
# Build selected binaries
Modified: dists/squeeze-backports/linux/debian/changelog
==============================================================================
--- dists/squeeze-backports/linux/debian/changelog Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/changelog Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,4 +1,4 @@
-linux (3.2.39-2~bpo60+1) squeeze-backports; urgency=high
+linux (3.2.41-2~bpo60+1) squeeze-backports; urgency=low
* Rebuild for squeeze:
- Use gcc-4.4 for all architectures
@@ -11,7 +11,155 @@
- Make build target depend on build-arch only, so we don't redundantly
build documentation on each architecture
- -- Ben Hutchings <ben at decadent.org.uk> Wed, 27 Feb 2013 05:50:49 +0000
+ -- Ben Hutchings <ben at decadent.org.uk> Mon, 08 Apr 2013 00:04:06 +0100
+
+linux (3.2.41-2) unstable; urgency=low
+
+ * [ia64] udeb: Remove efi-modules package; make kernel-image provide
+ efi-modules (fixes FTBFS)
+ * linux-headers: Fix file installation on architectures without
+ Kbuild.platforms (Closes: #703800)
+ * [x86] drm/i915: bounds check execbuffer relocation count (CVE-2013-0913)
+ * [x86] drm: Enable DRM_GMA500 as module, replacing DRM_PSB (Closes: #703506)
+ - Enable DRM_GMA600, DRM_GMA3600, DRM_MEDFIELD
+ * [x86] KVM: x86: fix for buffer overflow in handling of MSR_KVM_SYSTEM_TIME
+ (CVE-2013-1796)
+ * [x86] KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache
+ functions (CVE-2013-1797)
+ * KVM: Fix bounds checking in ioapic indirect register reads (CVE-2013-1798)
+
+ -- Ben Hutchings <ben at decadent.org.uk> Mon, 25 Mar 2013 15:17:44 +0000
+
+linux (3.2.41-1) unstable; urgency=low
+
+ * New upstream stable update:
+ http://www.kernel.org/pub/linux/kernel/v3.x/ChangeLog-3.2.40
+ - ext4: return ENOMEM if sb_getblk() fails
+ - ext4: fix possible use-after-free with AIO
+ - s390/kvm: Fix store status for ACRS/FPRS
+ - staging: comedi: disallow COMEDI_DEVCONFIG on non-board minors
+ - ext4: fix race in ext4_mb_add_n_trim()
+ - UBIFS: fix double free of ubifs_orphan objects
+ - hrtimer: Prevent hrtimer_enqueue_reprogram race
+ - nfsd: Fix memleak
+ - x86: Do not leak kernel page mapping locations
+ - USB: usb-storage: unusual_devs update for Super TOP SATA bridge
+ - posix-cpu-timers: Fix nanosleep task_struct leak
+ - NFSv4.1: Don't decode skipped layoutgets
+ - cgroup: fix exit() vs rmdir() race
+ - cpuset: fix cpuset_print_task_mems_allowed() vs rename() race
+ - ext4: fix xattr block allocation/release with bigalloc
+ - mm: fix pageblock bitmap allocation
+ - target: Add missing mapped_lun bounds checking during make_mappedlun
+ setup
+ - b43: Increase number of RX DMA slots
+ - posix-timer: Don't call idr_find() with out-of-range ID
+ - fs: Fix possible use-after-free with AIO
+ - powerpc/kexec: Disable hard IRQ before kexec
+ - mmu_notifier_unregister NULL Pointer deref and multiple ->release()
+ callouts
+ - tmpfs: fix use-after-free of mempolicy object (CVE-2013-1767)
+ - ocfs2: fix possible use-after-free with AIO
+ - ocfs2: fix ocfs2_init_security_and_acl() to initialize acl correctly
+ - ocfs2: ac->ac_allow_chain_relink=0 won't disable group relink
+ - idr: fix a subtle bug in idr_get_next()
+ - idr: make idr_get_next() good for rcu_read_lock()
+ - idr: fix top layer handling
+ - sysctl: fix null checking in bin_dn_node_address()
+ - nbd: fsync and kill block device on shutdown
+ - s390/timer: avoid overflow when programming clock comparator
+ (regression in 3.2.38)
+ - xen-pciback: rate limit error messages from xen_pcibk_enable_msi{,x}()
+ (CVE-2013-0231)
+ - xen-netback: correctly return errors from netbk_count_requests()
+ - xen-netback: cancel the credit timer when taking the vif down
+ - ipv6: use a stronger hash for tcp
+ - staging: comedi: ni_labpc: correct differential channel sequence for
+ AI commands
+ - staging: comedi: ni_labpc: set up command4 register *after* command3
+ - vhost: fix length for cross region descriptor (CVE-2013-0311)
+ http://www.kernel.org/pub/linux/kernel/v3.x/ChangeLog-3.2.41
+ - NFS: Don't allow NFS silly-renamed files to be deleted, no signal
+ - ARM: VFP: fix emulation of second VFP instruction
+ - md: fix two bugs when attempting to resize RAID0 array.
+ - proc connector: reject unprivileged listener bumps
+ - cifs: ensure that cifs_get_root() only traverses directories
+ - dm: fix truncated status strings
+ - hw_random: make buffer usable in scatterlist. (real fix for #701784)
+ - efi_pstore: Check remaining space with QueryVariableInfo() before
+ writing data
+ - efi: be more paranoid about available space when creating variables
+ (Closes: #703574)
+ - vfs: fix pipe counter breakage
+ - xen/pciback: Don't disable a PCI device that is already disabled.
+ - ALSA: seq: Fix missing error handling in snd_seq_timer_open()
+ - ext3: Fix format string issues (CVE-2013-1848)
+ - keys: fix race with concurrent install_user_keyrings() (CVE-2013-1792)
+ - USB: cdc-wdm: fix buffer overflow (CVE-2013-1860)
+ - signal: always clear sa_restorer on execve (CVE-2013-0914)
+ - crypto: user - fix info leaks in report API (CVE-2013-2546,
+ CVE-2013-2547, CVE-2013-2548)
+ - Fix: compat_rw_copy_check_uvector() misuse in aio, readv, writev, and
+ security keys
+ - batman-adv: bat_socket_read missing checks
+ - batman-adv: Only write requested number of byte to user buffer
+ - mm/hotplug: correctly add new zone to all other nodes' zone lists
+ (CVE-2012-5517)
+ - btrfs: use rcu_barrier() to wait for bdev puts at unmount
+
+ [ Aurelien Jarno]
+ * [mips,mipsel] Disable VGA_CONSOLE and ignore the corresponding ABI
+ change. It is completely broken on MIPS.
+ * headers: Include Kbuild.platforms and Platform files in -common to
+ fix out-of-tree building on mips and mipsel.
+ * [{mips,mipsel}/{4,5}kc-malta] Enable HW_RANDOM as module so that both
+ flavours have a consistent configuration.
+
+ [ Ben Hutchings ]
+ * [x86] ata_piix: reenable MS Virtual PC guests (fixes regression in
+ 3.2.19-1)
+ * test-patches: Clean up all previous test patches, whether or not they
+ were applied
+ * test-patches: Add --fuzz option to allow testing patches that have fuzz
+ * [x86] efi: Fix processor-specific memcpy() build error (Closes: #698581)
+ * udeb: Add hid-topseed to input-modules (Closes: #702611)
+ * [x86] drm/i915: Unconditionally initialise the interrupt workers,
+ thanks to Bjørn Mork (Closes: #692607)
+ * efi: Ensure efivars is loaded on EFI systems (Closes: #703363)
+ - [x86] Use a platform device to trigger loading of efivars
+ - [ia64] Change EFI_VARS from module to built-in
+ * efivars: Work around serious firmware bugs
+ - Allow disabling use as a pstore backend
+ - Add module parameter to disable use as a pstore backend
+ * [x86] Set EFI_VARS_PSTORE_DEFAULT_DISABLE=y
+ - explicitly calculate length of VariableName
+ - Handle duplicate names from get_next_variable()
+ * efi_pstore: Introducing workqueue updating sysfs
+ * efivars: pstore: Do not check size when erasing variable
+ * efivars: Remove check for 50% full on write
+ * kmsg_dump: Only dump kernel log in error cases (Closes: #703386)
+ - kexec: remove KMSG_DUMP_KEXEC
+ - kmsg_dump: don't run on non-error paths by default
+ * [x86] i915: initialize CADL in opregion (Closes: #703271)
+ * drm, agp: Update to 3.4.37:
+ - drm/radeon/dce6: fix display powergating
+ - drm: don't add inferred modes for monitors that don't support them
+ - drm/i915: Increase the RC6p threshold.
+ * signal: Fix use of missing sa_restorer field (build regression
+ introduced by fix for CVE-2013-0914)
+ * rds: limit the size allocated by rds_message_alloc()
+ * rtnl: fix info leak on RTM_GETLINK request for VF devices
+ * dcbnl: fix various netlink info leaks
+ * [s390] mm: fix flush_tlb_kernel_range()
+ * [powerpc] Fix cputable entry for 970MP rev 1.0
+ * vhost/net: fix heads usage of ubuf_info
+ * udf: avoid info leak on export (CVE-2012-6548)
+ * isofs: avoid info leak on export (CVE-2012-6549)
+ * [x86,powerpc/powerpc64] random: Change HW_RANDOM back from built-in to
+ module, as we now have a real fix for #701784
+ * [rt] Update to 3.2.40-rt60
+
+ -- Ben Hutchings <ben at decadent.org.uk> Sat, 23 Mar 2013 03:54:34 +0000
linux (3.2.39-2) unstable; urgency=high
Modified: dists/squeeze-backports/linux/debian/config/amd64/defines
==============================================================================
--- dists/squeeze-backports/linux/debian/config/amd64/defines Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/amd64/defines Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,3 +1,8 @@
+[abi]
+ignore-changes:
+# Only for use by the vendor-specific KVM modules
+ module:arch/x86/kvm/kvm
+
[base]
featuresets:
none
Modified: dists/squeeze-backports/linux/debian/config/defines
==============================================================================
--- dists/squeeze-backports/linux/debian/config/defines Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/defines Sun Apr 7 23:16:11 2013 (r19970)
@@ -39,6 +39,14 @@
vring_*
# No-one should depend on staging from OOT
module:drivers/staging/*
+# Private to this family of drivers
+ module:drivers/net/wireless/rtlwifi/*
+# Should not be used from OOT
+ kmsg_dump_register
+ kmsg_dump_unregister
+# Only used by Google firmware module
+ register_efivars
+ unregister_efivars
[base]
arches:
Modified: dists/squeeze-backports/linux/debian/config/i386/defines
==============================================================================
--- dists/squeeze-backports/linux/debian/config/i386/defines Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/i386/defines Sun Apr 7 23:16:11 2013 (r19970)
@@ -2,6 +2,8 @@
ignore-changes:
disable_hlt
enable_hlt
+# Only for use by the vendor-specific KVM modules
+ module:arch/x86/kvm/kvm
[base]
featuresets:
Modified: dists/squeeze-backports/linux/debian/config/ia64/config
==============================================================================
--- dists/squeeze-backports/linux/debian/config/ia64/config Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/ia64/config Sun Apr 7 23:16:11 2013 (r19970)
@@ -131,7 +131,8 @@
##
## file: drivers/firmware/Kconfig
##
-CONFIG_EFI_VARS=m
+CONFIG_EFI_VARS=y
+CONFIG_EFI_VARS_PSTORE=y
CONFIG_DMIID=y
##
Modified: dists/squeeze-backports/linux/debian/config/kernelarch-mips/config
==============================================================================
--- dists/squeeze-backports/linux/debian/config/kernelarch-mips/config Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/kernelarch-mips/config Sun Apr 7 23:16:11 2013 (r19970)
@@ -9,6 +9,11 @@
# CONFIG_NIU is not set
##
+## file: drivers/video/console/Kconfig
+##
+# CONFIG_VGA_CONSOLE is not set
+
+##
## file: fs/ext2/Kconfig
##
# CONFIG_EXT2_FS is not set
Modified: dists/squeeze-backports/linux/debian/config/kernelarch-x86/config
==============================================================================
--- dists/squeeze-backports/linux/debian/config/kernelarch-x86/config Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/kernelarch-x86/config Sun Apr 7 23:16:11 2013 (r19970)
@@ -293,7 +293,7 @@
##
## file: drivers/char/hw_random/Kconfig
##
-CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM=m
CONFIG_HW_RANDOM_INTEL=m
CONFIG_HW_RANDOM_AMD=m
CONFIG_HW_RANDOM_GEODE=m
@@ -384,6 +384,9 @@
CONFIG_EDD=m
# CONFIG_EDD_OFF is not set
CONFIG_EFI_VARS=m
+CONFIG_EFI_VARS_PSTORE=y
+#. Runtime-disabled by default
+CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y
CONFIG_DELL_RBU=m
CONFIG_DCDBAS=m
CONFIG_DMIID=y
@@ -415,6 +418,14 @@
CONFIG_DRM_SIS=m
##
+## file: drivers/staging/gma500/Kconfig
+##
+CONFIG_DRM_GMA500=m
+CONFIG_DRM_GMA600=y
+CONFIG_DRM_GMA3600=y
+CONFIG_DRM_MEDFIELD=y
+
+##
## file: drivers/gpu/drm/nouveau/Kconfig
##
CONFIG_DRM_NOUVEAU=m
@@ -1267,11 +1278,6 @@
CONFIG_ET131X=m
##
-## file: drivers/staging/gma500/Kconfig
-##
-CONFIG_DRM_PSB=m
-
-##
## file: drivers/staging/hv/Kconfig
##
CONFIG_HYPERV_STORAGE=m
Modified: dists/squeeze-backports/linux/debian/config/mips/config
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mips/config Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mips/config Sun Apr 7 23:16:11 2013 (r19970)
@@ -6,4 +6,3 @@
# CONFIG_CPU_LITTLE_ENDIAN is not set
## end choice
# CONFIG_RAPIDIO is not set
-
Modified: dists/squeeze-backports/linux/debian/config/mips/config.4kc-malta
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mips/config.4kc-malta Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mips/config.4kc-malta Sun Apr 7 23:16:11 2013 (r19970)
@@ -197,6 +197,11 @@
CONFIG_APPLICOM=m
##
+## file: drivers/char/hw_random/Kconfig
+##
+CONFIG_HW_RANDOM=m
+
+##
## file: drivers/char/ipmi/Kconfig
##
CONFIG_IPMI_HANDLER=m
@@ -1034,8 +1039,6 @@
##
## file: drivers/video/console/Kconfig
##
-CONFIG_VGA_CONSOLE=y
-# CONFIG_VGACON_SOFT_SCROLLBACK is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
# CONFIG_FONTS is not set
Modified: dists/squeeze-backports/linux/debian/config/mips/config.5kc-malta
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mips/config.5kc-malta Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mips/config.5kc-malta Sun Apr 7 23:16:11 2013 (r19970)
@@ -200,6 +200,11 @@
CONFIG_APPLICOM=m
##
+## file: drivers/char/hw_random/Kconfig
+##
+CONFIG_HW_RANDOM=m
+
+##
## file: drivers/char/ipmi/Kconfig
##
CONFIG_IPMI_HANDLER=m
@@ -1038,8 +1043,6 @@
##
## file: drivers/video/console/Kconfig
##
-CONFIG_VGA_CONSOLE=y
-# CONFIG_VGACON_SOFT_SCROLLBACK is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
# CONFIG_FONTS is not set
Modified: dists/squeeze-backports/linux/debian/config/mips/config.r4k-ip22
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mips/config.r4k-ip22 Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mips/config.r4k-ip22 Sun Apr 7 23:16:11 2013 (r19970)
@@ -457,7 +457,6 @@
##
## file: drivers/video/console/Kconfig
##
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_MDA_CONSOLE is not set
CONFIG_SGI_NEWPORT_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
Modified: dists/squeeze-backports/linux/debian/config/mips/config.r5k-ip32
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mips/config.r5k-ip32 Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mips/config.r5k-ip32 Sun Apr 7 23:16:11 2013 (r19970)
@@ -572,7 +572,6 @@
##
## file: drivers/video/console/Kconfig
##
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
# CONFIG_FONTS is not set
Modified: dists/squeeze-backports/linux/debian/config/mips/config.sb1-bcm91250a
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mips/config.sb1-bcm91250a Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mips/config.sb1-bcm91250a Sun Apr 7 23:16:11 2013 (r19970)
@@ -723,7 +723,6 @@
##
## file: drivers/video/console/Kconfig
##
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
# CONFIG_FONTS is not set
Modified: dists/squeeze-backports/linux/debian/config/mips/config.sb1a-bcm91480b
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mips/config.sb1a-bcm91480b Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mips/config.sb1a-bcm91480b Sun Apr 7 23:16:11 2013 (r19970)
@@ -729,7 +729,6 @@
##
## file: drivers/video/console/Kconfig
##
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
# CONFIG_FONTS is not set
Modified: dists/squeeze-backports/linux/debian/config/mips/defines
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mips/defines Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mips/defines Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,3 +1,8 @@
+[abi]
+ignore-changes:
+# vgacon is broken and unusable on MIPS
+ vgacon_*
+
[base]
flavours:
r4k-ip22
Modified: dists/squeeze-backports/linux/debian/config/mipsel/config
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mipsel/config Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mipsel/config Sun Apr 7 23:16:11 2013 (r19970)
@@ -5,4 +5,3 @@
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_CPU_LITTLE_ENDIAN=y
## end choice
-
Modified: dists/squeeze-backports/linux/debian/config/mipsel/config.loongson-2f
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mipsel/config.loongson-2f Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mipsel/config.loongson-2f Sun Apr 7 23:16:11 2013 (r19970)
@@ -128,6 +128,5 @@
##
## file: drivers/video/console/Kconfig
##
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
Modified: dists/squeeze-backports/linux/debian/config/mipsel/config.r5k-cobalt
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mipsel/config.r5k-cobalt Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mipsel/config.r5k-cobalt Sun Apr 7 23:16:11 2013 (r19970)
@@ -696,11 +696,6 @@
CONFIG_FB_COBALT=m
##
-## file: drivers/video/console/Kconfig
-##
-# CONFIG_VGA_CONSOLE is not set
-
-##
## file: drivers/w1/Kconfig
##
# CONFIG_W1 is not set
Modified: dists/squeeze-backports/linux/debian/config/mipsel/defines
==============================================================================
--- dists/squeeze-backports/linux/debian/config/mipsel/defines Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/mipsel/defines Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,3 +1,8 @@
+[abi]
+ignore-changes:
+# vgacon is broken and unusable on MIPS
+ vgacon_*
+
[base]
flavours:
r5k-cobalt
Modified: dists/squeeze-backports/linux/debian/config/powerpc/config.powerpc64
==============================================================================
--- dists/squeeze-backports/linux/debian/config/powerpc/config.powerpc64 Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/config/powerpc/config.powerpc64 Sun Apr 7 23:16:11 2013 (r19970)
@@ -95,7 +95,7 @@
##
## file: drivers/char/hw_random/Kconfig
##
-CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM=m
CONFIG_HW_RANDOM_PASEMI=m
##
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/KVM-Fix-bounds-checking-in-ioapic-indirect-register-.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/KVM-Fix-bounds-checking-in-ioapic-indirect-register-.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/KVM-Fix-bounds-checking-in-ioapic-indirect-register-.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/KVM-Fix-bounds-checking-in-ioapic-indirect-register-.patch)
@@ -0,0 +1,42 @@
+From: Andy Honig <ahonig at google.com>
+Date: Wed, 20 Feb 2013 14:49:16 -0800
+Subject: KVM: Fix bounds checking in ioapic indirect register reads
+ (CVE-2013-1798)
+
+commit a2c118bfab8bc6b8bb213abfc35201e441693d55 upstream.
+
+If the guest specifies a IOAPIC_REG_SELECT with an invalid value and follows
+that with a read of the IOAPIC_REG_WINDOW KVM does not properly validate
+that request. ioapic_read_indirect contains an
+ASSERT(redir_index < IOAPIC_NUM_PINS), but the ASSERT has no effect in
+non-debug builds. In recent kernels this allows a guest to cause a kernel
+oops by reading invalid memory. In older kernels (pre-3.3) this allows a
+guest to read from large ranges of host memory.
+
+Tested: tested against apic unit tests.
+
+Signed-off-by: Andrew Honig <ahonig at google.com>
+Signed-off-by: Marcelo Tosatti <mtosatti at redhat.com>
+---
+ virt/kvm/ioapic.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
+index ce82b94..5ba005c 100644
+--- a/virt/kvm/ioapic.c
++++ b/virt/kvm/ioapic.c
+@@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
+ u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
+ u64 redir_content;
+
+- ASSERT(redir_index < IOAPIC_NUM_PINS);
++ if (redir_index < IOAPIC_NUM_PINS)
++ redir_content =
++ ioapic->redirtbl[redir_index].bits;
++ else
++ redir_content = ~0ULL;
+
+- redir_content = ioapic->redirtbl[redir_index].bits;
+ result = (ioapic->ioregsel & 0x1) ?
+ (redir_content >> 32) & 0xffffffff :
+ redir_content & 0xffffffff;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/dcbnl-fix-various-netlink-info-leaks.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/dcbnl-fix-various-netlink-info-leaks.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/dcbnl-fix-various-netlink-info-leaks.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/dcbnl-fix-various-netlink-info-leaks.patch)
@@ -0,0 +1,83 @@
+From: Mathias Krause <minipli at googlemail.com>
+Date: Sat, 9 Mar 2013 05:52:21 +0000
+Subject: dcbnl: fix various netlink info leaks
+
+[ Upstream commit 29cd8ae0e1a39e239a3a7b67da1986add1199fc0 ]
+
+The dcb netlink interface leaks stack memory in various places:
+* perm_addr[] buffer is only filled at max with 12 of the 32 bytes but
+ copied completely,
+* no in-kernel driver fills all fields of an IEEE 802.1Qaz subcommand,
+ so we're leaking up to 58 bytes for ieee_ets structs, up to 136 bytes
+ for ieee_pfc structs, etc.,
+* the same is true for CEE -- no in-kernel driver fills the whole
+ struct,
+
+Prevent all of the above stack info leaks by properly initializing the
+buffers/structures involved.
+
+Signed-off-by: Mathias Krause <minipli at googlemail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ net/dcb/dcbnl.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/net/dcb/dcbnl.c
++++ b/net/dcb/dcbnl.c
+@@ -336,6 +336,7 @@ static int dcbnl_getperm_hwaddr(struct n
+ dcb->dcb_family = AF_UNSPEC;
+ dcb->cmd = DCB_CMD_GPERM_HWADDR;
+
++ memset(perm_addr, 0, sizeof(perm_addr));
+ netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
+
+ ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
+@@ -1238,6 +1239,7 @@ static int dcbnl_ieee_fill(struct sk_buf
+
+ if (ops->ieee_getets) {
+ struct ieee_ets ets;
++ memset(&ets, 0, sizeof(ets));
+ err = ops->ieee_getets(netdev, &ets);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
+@@ -1245,6 +1247,7 @@ static int dcbnl_ieee_fill(struct sk_buf
+
+ if (ops->ieee_getpfc) {
+ struct ieee_pfc pfc;
++ memset(&pfc, 0, sizeof(pfc));
+ err = ops->ieee_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
+@@ -1277,6 +1280,7 @@ static int dcbnl_ieee_fill(struct sk_buf
+ /* get peer info if available */
+ if (ops->ieee_peer_getets) {
+ struct ieee_ets ets;
++ memset(&ets, 0, sizeof(ets));
+ err = ops->ieee_peer_getets(netdev, &ets);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
+@@ -1284,6 +1288,7 @@ static int dcbnl_ieee_fill(struct sk_buf
+
+ if (ops->ieee_peer_getpfc) {
+ struct ieee_pfc pfc;
++ memset(&pfc, 0, sizeof(pfc));
+ err = ops->ieee_peer_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
+@@ -1463,6 +1468,7 @@ static int dcbnl_cee_fill(struct sk_buff
+ /* peer info if available */
+ if (ops->cee_peer_getpg) {
+ struct cee_pg pg;
++ memset(&pg, 0, sizeof(pg));
+ err = ops->cee_peer_getpg(netdev, &pg);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+@@ -1470,6 +1476,7 @@ static int dcbnl_cee_fill(struct sk_buff
+
+ if (ops->cee_peer_getpfc) {
+ struct cee_pfc pfc;
++ memset(&pfc, 0, sizeof(pfc));
+ err = ops->cee_peer_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/efi_pstore-Introducing-workqueue-updating-sysfs.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/efi_pstore-Introducing-workqueue-updating-sysfs.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/efi_pstore-Introducing-workqueue-updating-sysfs.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/efi_pstore-Introducing-workqueue-updating-sysfs.patch)
@@ -0,0 +1,176 @@
+From: Seiji Aguchi <seiji.aguchi at hds.com>
+Date: Tue, 12 Feb 2013 13:04:41 -0800
+Subject: efi_pstore: Introducing workqueue updating sysfs
+
+commit a93bc0c6e07ed9bac44700280e65e2945d864fd4 upstream.
+
+[Problem]
+efi_pstore creates sysfs entries, which enable users to access to NVRAM,
+in a write callback. If a kernel panic happens in an interrupt context,
+it may fail because it could sleep due to dynamic memory allocations during
+creating sysfs entries.
+
+[Patch Description]
+This patch removes sysfs operations from a write callback by introducing
+a workqueue updating sysfs entries which is scheduled after the write
+callback is called.
+
+Also, the workqueue is kicked in a just oops case.
+A system will go down in other cases such as panic, clean shutdown and emergency
+restart. And we don't need to create sysfs entries because there is no chance for
+users to access to them.
+
+efi_pstore will be robust against a kernel panic in an interrupt context with this patch.
+
+Signed-off-by: Seiji Aguchi <seiji.aguchi at hds.com>
+Acked-by: Matt Fleming <matt.fleming at intel.com>
+Signed-off-by: Tony Luck <tony.luck at intel.com>
+[bwh: Backported to 3.2:
+ - Adjust contest
+ - Don't check reason in efi_pstore_write(), as it is not given as a
+ parameter
+ - Move up declaration of __efivars]
+---
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -128,6 +128,8 @@ struct efivar_attribute {
+ ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
+ };
+
++static struct efivars __efivars;
++
+ #define PSTORE_EFI_ATTRIBUTES \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+@@ -152,6 +154,13 @@ efivar_create_sysfs_entry(struct efivars
+ efi_char16_t *variable_name,
+ efi_guid_t *vendor_guid);
+
++/*
++ * Prototype for workqueue functions updating sysfs entry
++ */
++
++static void efivar_update_sysfs_entries(struct work_struct *);
++static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
++
+ /* Return the number of unicode characters in data */
+ static unsigned long
+ utf16_strnlen(efi_char16_t *s, size_t maxlength)
+@@ -834,11 +843,7 @@ static int efi_pstore_write(enum pstore_
+ if (found)
+ efivar_unregister(found);
+
+- if (size)
+- ret = efivar_create_sysfs_entry(efivars,
+- utf16_strsize(efi_name,
+- DUMP_NAME_LEN * 2),
+- efi_name, &vendor);
++ schedule_work(&efivar_work);
+
+ *id = part;
+ return ret;
+@@ -1017,6 +1022,75 @@ static ssize_t efivar_delete(struct file
+ return count;
+ }
+
++static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
++{
++ struct efivar_entry *entry, *n;
++ struct efivars *efivars = &__efivars;
++ unsigned long strsize1, strsize2;
++ bool found = false;
++
++ strsize1 = utf16_strsize(variable_name, 1024);
++ list_for_each_entry_safe(entry, n, &efivars->list, list) {
++ strsize2 = utf16_strsize(entry->var.VariableName, 1024);
++ if (strsize1 == strsize2 &&
++ !memcmp(variable_name, &(entry->var.VariableName),
++ strsize2) &&
++ !efi_guidcmp(entry->var.VendorGuid,
++ *vendor)) {
++ found = true;
++ break;
++ }
++ }
++ return found;
++}
++
++static void efivar_update_sysfs_entries(struct work_struct *work)
++{
++ struct efivars *efivars = &__efivars;
++ efi_guid_t vendor;
++ efi_char16_t *variable_name;
++ unsigned long variable_name_size = 1024;
++ efi_status_t status = EFI_NOT_FOUND;
++ bool found;
++
++ /* Add new sysfs entries */
++ while (1) {
++ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
++ if (!variable_name) {
++ pr_err("efivars: Memory allocation failed.\n");
++ return;
++ }
++
++ spin_lock_irq(&efivars->lock);
++ found = false;
++ while (1) {
++ variable_name_size = 1024;
++ status = efivars->ops->get_next_variable(
++ &variable_name_size,
++ variable_name,
++ &vendor);
++ if (status != EFI_SUCCESS) {
++ break;
++ } else {
++ if (!variable_is_present(variable_name,
++ &vendor)) {
++ found = true;
++ break;
++ }
++ }
++ }
++ spin_unlock_irq(&efivars->lock);
++
++ if (!found) {
++ kfree(variable_name);
++ break;
++ } else
++ efivar_create_sysfs_entry(efivars,
++ variable_name_size,
++ variable_name, &vendor);
++ }
++}
++
+ /*
+ * Let's not leave out systab information that snuck into
+ * the efivars driver
+@@ -1273,7 +1347,6 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(register_efivars);
+
+-static struct efivars __efivars;
+ static struct efivar_operations ops;
+
+ /*
+@@ -1331,6 +1404,8 @@ err_put:
+ static void __exit
+ efivars_exit(void)
+ {
++ cancel_work_sync(&efivar_work);
++
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ unregister_efivars(&__efivars);
+ kobject_put(efi_kobj);
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -620,7 +620,8 @@ struct efivars {
+ * 1) ->list - adds, removals, reads, writes
+ * 2) ops.[gs]et_variable() calls.
+ * It must not be held when creating sysfs entries or calling kmalloc.
+- * ops.get_next_variable() is only called from register_efivars(),
++ * ops.get_next_variable() is only called from register_efivars()
++ * or efivar_update_sysfs_entries(),
+ * which is protected by the BKL, so that path is safe.
+ */
+ spinlock_t lock;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Add-module-parameter-to-disable-use-as-a-pst.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Add-module-parameter-to-disable-use-as-a-pst.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Add-module-parameter-to-disable-use-as-a-pst.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Add-module-parameter-to-disable-use-as-a-pst.patch)
@@ -0,0 +1,72 @@
+From: Seth Forshee <seth.forshee at canonical.com>
+Date: Mon, 11 Mar 2013 16:17:50 -0500
+Subject: efivars: Add module parameter to disable use as a pstore backend
+
+commit ec0971ba5372a4dfa753f232449d23a8fd98490e upstream.
+
+We know that with some firmware implementations writing too much data to
+UEFI variables can lead to bricking machines. Recent changes attempt to
+address this issue, but for some it may still be prudent to avoid
+writing large amounts of data until the solution has been proven on a
+wide variety of hardware.
+
+Crash dumps or other data from pstore can potentially be a large data
+source. Add a pstore_module parameter to efivars to allow disabling its
+use as a backend for pstore. Also add a config option,
+CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE, to allow setting the default
+value of this paramter to true (i.e. disabled by default).
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Cc: Josh Boyer <jwboyer at redhat.com>
+Cc: Matthew Garrett <mjg59 at srcf.ucam.org>
+Cc: Seiji Aguchi <seiji.aguchi at hds.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Signed-off-by: Matt Fleming <matt.fleming at intel.com>
+[bwh: Backported to 3.2: adjust context]
+---
+ drivers/firmware/Kconfig | 9 +++++++++
+ drivers/firmware/efivars.c | 8 +++++++-
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -62,6 +62,15 @@ config EFI_VARS_PSTORE
+ will allow writing console messages, crash dumps, or anything
+ else supported by pstore to EFI variables.
+
++config EFI_VARS_PSTORE_DEFAULT_DISABLE
++ bool "Disable using efivars as a pstore backend by default"
++ depends on EFI_VARS_PSTORE
++ default n
++ help
++ Saying Y here will disable the use of efivars as a storage
++ backend for pstore by default. This setting can be overridden
++ using the efivars module's pstore_disable parameter.
++
+ config EFI_PCDP
+ bool "Console device selection via EFI PCDP or HCDP table"
+ depends on ACPI && EFI && IA64
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -93,6 +93,11 @@ MODULE_ALIAS("platform:efivars");
+
+ #define DUMP_NAME_LEN 52
+
++static bool efivars_pstore_disable =
++ IS_ENABLED(EFI_VARS_PSTORE_DEFAULT_DISABLE);
++
++module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
++
+ /*
+ * The maximum size of VariableName + Data = 1024
+ * Therefore, it's reasonable to save that much
+@@ -1258,7 +1263,8 @@ int register_efivars(struct efivars *efi
+ if (error)
+ unregister_efivars(efivars);
+
+- efivar_pstore_register(efivars);
++ if (!efivars_pstore_disable)
++ efivar_pstore_register(efivars);
+
+ out:
+ kfree(variable_name);
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Allow-disabling-use-as-a-pstore-backend.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Allow-disabling-use-as-a-pstore-backend.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Allow-disabling-use-as-a-pstore-backend.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Allow-disabling-use-as-a-pstore-backend.patch)
@@ -0,0 +1,142 @@
+From: Seth Forshee <seth.forshee at canonical.com>
+Date: Thu, 7 Mar 2013 11:40:17 -0600
+Subject: efivars: Allow disabling use as a pstore backend
+
+commit ed9dc8ce7a1c8115dba9483a9b51df8b63a2e0ef upstream.
+
+Add a new option, CONFIG_EFI_VARS_PSTORE, which can be set to N to
+avoid using efivars as a backend to pstore, as some users may want to
+compile out the code completely.
+
+Set the default to Y to maintain backwards compatability, since this
+feature has always been enabled until now.
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Cc: Josh Boyer <jwboyer at redhat.com>
+Cc: Matthew Garrett <mjg59 at srcf.ucam.org>
+Cc: Seiji Aguchi <seiji.aguchi at hds.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Signed-off-by: Matt Fleming <matt.fleming at intel.com>
+[bwh: Backported to 3.2: adjust context]
+---
+ drivers/firmware/Kconfig | 9 +++++++
+ drivers/firmware/efivars.c | 64 ++++++++++++++------------------------------
+ 2 files changed, 29 insertions(+), 44 deletions(-)
+
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -53,6 +53,15 @@ config EFI_VARS
+ Subsequent efibootmgr releases may be found at:
+ <http://linux.dell.com/efibootmgr>
+
++config EFI_VARS_PSTORE
++ bool "Register efivars backend for pstore"
++ depends on EFI_VARS && PSTORE
++ default y
++ help
++ Say Y here to enable use efivars as a backend to pstore. This
++ will allow writing console messages, crash dumps, or anything
++ else supported by pstore to EFI variables.
++
+ config EFI_PCDP
+ bool "Console device selection via EFI PCDP or HCDP table"
+ depends on ACPI && EFI && IA64
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -660,8 +660,6 @@ static struct kobj_type efivar_ktype = {
+ .default_attrs = def_attrs,
+ };
+
+-static struct pstore_info efi_pstore_info;
+-
+ static inline void
+ efivar_unregister(struct efivar_entry *var)
+ {
+@@ -698,7 +696,7 @@ static int efi_status_to_err(efi_status_
+ return err;
+ }
+
+-#ifdef CONFIG_PSTORE
++#ifdef CONFIG_EFI_VARS_PSTORE
+
+ static int efi_pstore_open(struct pstore_info *psi)
+ {
+@@ -848,36 +846,6 @@ static int efi_pstore_erase(enum pstore_
+
+ return 0;
+ }
+-#else
+-static int efi_pstore_open(struct pstore_info *psi)
+-{
+- return 0;
+-}
+-
+-static int efi_pstore_close(struct pstore_info *psi)
+-{
+- return 0;
+-}
+-
+-static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+- struct timespec *timespec,
+- char **buf, struct pstore_info *psi)
+-{
+- return -1;
+-}
+-
+-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+- unsigned int part, size_t size, struct pstore_info *psi)
+-{
+- return 0;
+-}
+-
+-static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+- struct pstore_info *psi)
+-{
+- return 0;
+-}
+-#endif
+
+ static struct pstore_info efi_pstore_info = {
+ .owner = THIS_MODULE,
+@@ -889,6 +857,24 @@ static struct pstore_info efi_pstore_inf
+ .erase = efi_pstore_erase,
+ };
+
++static void efivar_pstore_register(struct efivars *efivars)
++{
++ efivars->efi_pstore_info = efi_pstore_info;
++ efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
++ if (efivars->efi_pstore_info.buf) {
++ efivars->efi_pstore_info.bufsize = 1024;
++ efivars->efi_pstore_info.data = efivars;
++ spin_lock_init(&efivars->efi_pstore_info.buf_lock);
++ pstore_register(&efivars->efi_pstore_info);
++ }
++}
++#else
++static void efivar_pstore_register(struct efivars *efivars)
++{
++ return;
++}
++#endif
++
+ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+@@ -1272,15 +1258,7 @@ int register_efivars(struct efivars *efi
+ if (error)
+ unregister_efivars(efivars);
+
+- efivars->efi_pstore_info = efi_pstore_info;
+-
+- efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+- if (efivars->efi_pstore_info.buf) {
+- efivars->efi_pstore_info.bufsize = 1024;
+- efivars->efi_pstore_info.data = efivars;
+- spin_lock_init(&efivars->efi_pstore_info.buf_lock);
+- pstore_register(&efivars->efi_pstore_info);
+- }
++ efivar_pstore_register(efivars);
+
+ out:
+ kfree(variable_name);
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Fix-check-for-CONFIG_EFI_VARS_PSTORE_DEFAULT.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Fix-check-for-CONFIG_EFI_VARS_PSTORE_DEFAULT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Fix-check-for-CONFIG_EFI_VARS_PSTORE_DEFAULT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Fix-check-for-CONFIG_EFI_VARS_PSTORE_DEFAULT.patch)
@@ -0,0 +1,24 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Fri, 22 Mar 2013 19:43:53 +0000
+Subject: efivars: Fix check for CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE
+
+The 'CONFIG_' prefix is not implicit in IS_ENABLED().
+
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+Cc: Seth Forshee <seth.forshee at canonical.com>
+Cc: <stable at vger.kernel.org>
+---
+ drivers/firmware/efivars.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -94,7 +94,7 @@ MODULE_ALIAS("platform:efivars");
+ #define DUMP_NAME_LEN 52
+
+ static bool efivars_pstore_disable =
+- IS_ENABLED(EFI_VARS_PSTORE_DEFAULT_DISABLE);
++ IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
+
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Handle-duplicate-names-from-get_next_variabl.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Handle-duplicate-names-from-get_next_variabl.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-Handle-duplicate-names-from-get_next_variabl.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-Handle-duplicate-names-from-get_next_variabl.patch)
@@ -0,0 +1,156 @@
+From: Matt Fleming <matt.fleming at intel.com>
+Date: Thu, 7 Mar 2013 11:59:14 +0000
+Subject: efivars: Handle duplicate names from get_next_variable()
+
+commit e971318bbed610e28bb3fde9d548e6aaf0a6b02e upstream.
+
+Some firmware exhibits a bug where the same VariableName and
+VendorGuid values are returned on multiple invocations of
+GetNextVariableName(). See,
+
+ https://bugzilla.kernel.org/show_bug.cgi?id=47631
+
+As a consequence of such a bug, Andre reports hitting the following
+WARN_ON() in the sysfs code after updating the BIOS on his, "Gigabyte
+Technology Co., Ltd. To be filled by O.E.M./Z77X-UD3H, BIOS F19e
+11/21/2012)" machine,
+
+[ 0.581554] EFI Variables Facility v0.08 2004-May-17
+[ 0.584914] ------------[ cut here ]------------
+[ 0.585639] WARNING: at /home/andre/linux/fs/sysfs/dir.c:536 sysfs_add_one+0xd4/0x100()
+[ 0.586381] Hardware name: To be filled by O.E.M.
+[ 0.587123] sysfs: cannot create duplicate filename '/firmware/efi/vars/SbAslBufferPtrVar-01f33c25-764d-43ea-aeea-6b5a41f3f3e8'
+[ 0.588694] Modules linked in:
+[ 0.589484] Pid: 1, comm: swapper/0 Not tainted 3.8.0+ #7
+[ 0.590280] Call Trace:
+[ 0.591066] [<ffffffff81208954>] ? sysfs_add_one+0xd4/0x100
+[ 0.591861] [<ffffffff810587bf>] warn_slowpath_common+0x7f/0xc0
+[ 0.592650] [<ffffffff810588bc>] warn_slowpath_fmt+0x4c/0x50
+[ 0.593429] [<ffffffff8134dd85>] ? strlcat+0x65/0x80
+[ 0.594203] [<ffffffff81208954>] sysfs_add_one+0xd4/0x100
+[ 0.594979] [<ffffffff81208b78>] create_dir+0x78/0xd0
+[ 0.595753] [<ffffffff81208ec6>] sysfs_create_dir+0x86/0xe0
+[ 0.596532] [<ffffffff81347e4c>] kobject_add_internal+0x9c/0x220
+[ 0.597310] [<ffffffff81348307>] kobject_init_and_add+0x67/0x90
+[ 0.598083] [<ffffffff81584a71>] ? efivar_create_sysfs_entry+0x61/0x1c0
+[ 0.598859] [<ffffffff81584b2b>] efivar_create_sysfs_entry+0x11b/0x1c0
+[ 0.599631] [<ffffffff8158517e>] register_efivars+0xde/0x420
+[ 0.600395] [<ffffffff81d430a7>] ? edd_init+0x2f5/0x2f5
+[ 0.601150] [<ffffffff81d4315f>] efivars_init+0xb8/0x104
+[ 0.601903] [<ffffffff8100215a>] do_one_initcall+0x12a/0x180
+[ 0.602659] [<ffffffff81d05d80>] kernel_init_freeable+0x13e/0x1c6
+[ 0.603418] [<ffffffff81d05586>] ? loglevel+0x31/0x31
+[ 0.604183] [<ffffffff816a6530>] ? rest_init+0x80/0x80
+[ 0.604936] [<ffffffff816a653e>] kernel_init+0xe/0xf0
+[ 0.605681] [<ffffffff816ce7ec>] ret_from_fork+0x7c/0xb0
+[ 0.606414] [<ffffffff816a6530>] ? rest_init+0x80/0x80
+[ 0.607143] ---[ end trace 1609741ab737eb29 ]---
+
+There's not much we can do to work around and keep traversing the
+variable list once we hit this firmware bug. Our only solution is to
+terminate the loop because, as Lingzhu reports, some machines get
+stuck when they encounter duplicate names,
+
+ > I had an IBM System x3100 M4 and x3850 X5 on which kernel would
+ > get stuck in infinite loop creating duplicate sysfs files because,
+ > for some reason, there are several duplicate boot entries in nvram
+ > getting GetNextVariableName into a circle of iteration (with
+ > period > 2).
+
+Also disable the workqueue, as efivar_update_sysfs_entries() uses
+GetNextVariableName() to figure out which variables have been created
+since the last iteration. That algorithm isn't going to work if
+GetNextVariableName() returns duplicates. Note that we don't disable
+EFI variable creation completely on the affected machines, it's just
+that any pstore dump-* files won't appear in sysfs until the next
+boot.
+
+Reported-by: Andre Heider <a.heider at gmail.com>
+Reported-by: Lingzhu Xiang <lxiang at redhat.com>
+Tested-by: Lingzhu Xiang <lxiang at redhat.com>
+Cc: Seiji Aguchi <seiji.aguchi at hds.com>
+Signed-off-by: Matt Fleming <matt.fleming at intel.com>
+[bwh: Backported to 3.2: reason is not checked in efi_pstore_write()]
+---
+ drivers/firmware/efivars.c | 48 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 47 insertions(+), 1 deletion(-)
+
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -160,6 +160,7 @@ efivar_create_sysfs_entry(struct efivars
+
+ static void efivar_update_sysfs_entries(struct work_struct *);
+ static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
++static bool efivar_wq_enabled = true;
+
+ /* Return the number of unicode characters in data */
+ static unsigned long
+@@ -843,7 +844,8 @@ static int efi_pstore_write(enum pstore_
+ if (found)
+ efivar_unregister(found);
+
+- schedule_work(&efivar_work);
++ if (efivar_wq_enabled)
++ schedule_work(&efivar_work);
+
+ *id = part;
+ return ret;
+@@ -1306,6 +1308,35 @@ void unregister_efivars(struct efivars *
+ }
+ EXPORT_SYMBOL_GPL(unregister_efivars);
+
++/*
++ * Print a warning when duplicate EFI variables are encountered and
++ * disable the sysfs workqueue since the firmware is buggy.
++ */
++static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
++ unsigned long len16)
++{
++ size_t i, len8 = len16 / sizeof(efi_char16_t);
++ char *s8;
++
++ /*
++ * Disable the workqueue since the algorithm it uses for
++ * detecting new variables won't work with this buggy
++ * implementation of GetNextVariableName().
++ */
++ efivar_wq_enabled = false;
++
++ s8 = kzalloc(len8, GFP_KERNEL);
++ if (!s8)
++ return;
++
++ for (i = 0; i < len8; i++)
++ s8[i] = s16[i];
++
++ printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
++ s8, vendor_guid);
++ kfree(s8);
++}
++
+ int register_efivars(struct efivars *efivars,
+ const struct efivar_operations *ops,
+ struct kobject *parent_kobj)
+@@ -1348,6 +1379,22 @@ int register_efivars(struct efivars *efi
+ case EFI_SUCCESS:
+ variable_name_size = var_name_strnsize(variable_name,
+ variable_name_size);
++
++ /*
++ * Some firmware implementations return the
++ * same variable name on multiple calls to
++ * get_next_variable(). Terminate the loop
++ * immediately as there is no guarantee that
++ * we'll ever see a different variable name,
++ * and may end up looping here forever.
++ */
++ if (variable_is_present(variable_name, &vendor_guid)) {
++ dup_variable_bug(variable_name, &vendor_guid,
++ variable_name_size);
++ status = EFI_NOT_FOUND;
++ break;
++ }
++
+ efivar_create_sysfs_entry(efivars,
+ variable_name_size,
+ variable_name,
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-explicitly-calculate-length-of-VariableName.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-explicitly-calculate-length-of-VariableName.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-explicitly-calculate-length-of-VariableName.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-explicitly-calculate-length-of-VariableName.patch)
@@ -0,0 +1,101 @@
+From: Matt Fleming <matt.fleming at intel.com>
+Date: Fri, 1 Mar 2013 14:49:12 +0000
+Subject: efivars: explicitly calculate length of VariableName
+
+commit ec50bd32f1672d38ddce10fb1841cbfda89cfe9a upstream.
+
+It's not wise to assume VariableNameSize represents the length of
+VariableName, as not all firmware updates VariableNameSize in the same
+way (some don't update it at all if EFI_SUCCESS is returned). There
+are even implementations out there that update VariableNameSize with
+values that are both larger than the string returned in VariableName
+and smaller than the buffer passed to GetNextVariableName(), which
+resulted in the following bug report from Michael Schroeder,
+
+ > On HP z220 system (firmware version 1.54), some EFI variables are
+ > incorrectly named :
+ >
+ > ls -d /sys/firmware/efi/vars/*8be4d* | grep -v -- -8be returns
+ > /sys/firmware/efi/vars/dbxDefault-pport8be4df61-93ca-11d2-aa0d-00e098032b8c
+ > /sys/firmware/efi/vars/KEKDefault-pport8be4df61-93ca-11d2-aa0d-00e098032b8c
+ > /sys/firmware/efi/vars/SecureBoot-pport8be4df61-93ca-11d2-aa0d-00e098032b8c
+ > /sys/firmware/efi/vars/SetupMode-Information8be4df61-93ca-11d2-aa0d-00e098032b8c
+
+The issue here is that because we blindly use VariableNameSize without
+verifying its value, we can potentially read garbage values from the
+buffer containing VariableName if VariableNameSize is larger than the
+length of VariableName.
+
+Since VariableName is a string, we can calculate its size by searching
+for the terminating NULL character.
+
+Reported-by: Frederic Crozat <fcrozat at suse.com>
+Cc: Matthew Garrett <mjg59 at srcf.ucam.org>
+Cc: Josh Boyer <jwboyer at redhat.com>
+Cc: Michael Schroeder <mls at suse.com>
+Cc: Lee, Chun-Yi <jlee at suse.com>
+Cc: Lingzhu Xiang <lxiang at redhat.com>
+Cc: Seiji Aguchi <seiji.aguchi at hds.com>
+Signed-off-by: Matt Fleming <matt.fleming at intel.com>
+---
+ drivers/firmware/efivars.c | 32 +++++++++++++++++++++++++++++++-
+ 1 file changed, 31 insertions(+), 1 deletion(-)
+
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -1044,6 +1044,31 @@ static bool variable_is_present(efi_char
+ return found;
+ }
+
++/*
++ * Returns the size of variable_name, in bytes, including the
++ * terminating NULL character, or variable_name_size if no NULL
++ * character is found among the first variable_name_size bytes.
++ */
++static unsigned long var_name_strnsize(efi_char16_t *variable_name,
++ unsigned long variable_name_size)
++{
++ unsigned long len;
++ efi_char16_t c;
++
++ /*
++ * The variable name is, by definition, a NULL-terminated
++ * string, so make absolutely sure that variable_name_size is
++ * the value we expect it to be. If not, return the real size.
++ */
++ for (len = 2; len <= variable_name_size; len += sizeof(c)) {
++ c = variable_name[(len / sizeof(c)) - 1];
++ if (!c)
++ break;
++ }
++
++ return min(len, variable_name_size);
++}
++
+ static void efivar_update_sysfs_entries(struct work_struct *work)
+ {
+ struct efivars *efivars = &__efivars;
+@@ -1084,10 +1109,13 @@ static void efivar_update_sysfs_entries(
+ if (!found) {
+ kfree(variable_name);
+ break;
+- } else
++ } else {
++ variable_name_size = var_name_strnsize(variable_name,
++ variable_name_size);
+ efivar_create_sysfs_entry(efivars,
+ variable_name_size,
+ variable_name, &vendor);
++ }
+ }
+ }
+
+@@ -1318,6 +1346,8 @@ int register_efivars(struct efivars *efi
+ &vendor_guid);
+ switch (status) {
+ case EFI_SUCCESS:
++ variable_name_size = var_name_strnsize(variable_name,
++ variable_name_size);
+ efivar_create_sysfs_entry(efivars,
+ variable_name_size,
+ variable_name,
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-pstore-do-not-check-size-when-erasing-variable.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-pstore-do-not-check-size-when-erasing-variable.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/efivars-pstore-do-not-check-size-when-erasing-variable.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/efivars-pstore-do-not-check-size-when-erasing-variable.patch)
@@ -0,0 +1,49 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: efivars: pstore: Do not check size when erasing variable
+Date: Sat, 23 Mar 2013 03:49:53 +0000
+
+In 3.2, unlike mainline, efi_pstore_erase() calls efi_pstore_write()
+with a size of 0, as the underlying EFI interface treats a size of 0
+as meaning deletion.
+
+This was not taken into account in my backport of commit d80a361d779a
+'efi_pstore: Check remaining space with QueryVariableInfo() before
+writing data'. The size check should be omitted when erasing.
+
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -788,19 +788,21 @@ static int efi_pstore_write(enum pstore_
+
+ spin_lock_irqsave(&efivars->lock, flags);
+
+- /*
+- * Check if there is a space enough to log.
+- * size: a size of logging data
+- * DUMP_NAME_LEN * 2: a maximum size of variable name
+- */
++ if (size) {
++ /*
++ * Check if there is a space enough to log.
++ * size: a size of logging data
++ * DUMP_NAME_LEN * 2: a maximum size of variable name
++ */
+
+- status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
+- size + DUMP_NAME_LEN * 2);
++ status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
++ size + DUMP_NAME_LEN * 2);
+
+- if (status) {
+- spin_unlock_irqrestore(&efivars->lock, flags);
+- *id = part;
+- return -ENOSPC;
++ if (status) {
++ spin_unlock_irqrestore(&efivars->lock, flags);
++ *id = part;
++ return -ENOSPC;
++ }
+ }
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/i915-initialize-CADL-in-opregion.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/i915-initialize-CADL-in-opregion.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/i915-initialize-CADL-in-opregion.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/i915-initialize-CADL-in-opregion.patch)
@@ -0,0 +1,66 @@
+From: Lekensteyn <lekensteyn at gmail.com>
+Date: Tue, 26 Jun 2012 00:36:24 +0200
+Subject: i915: initialize CADL in opregion
+
+commit d627b62ff8d4d36761adbcd90ff143d79c94ab22 upstream.
+
+This is rather a hack to fix brightness hotkeys on a Clevo laptop. CADL is not
+used anywhere in the driver code at the moment, but it could be used in BIOS as
+is the case with the Clevo laptop.
+
+The Clevo B7130 requires the CADL field to contain at least the ID of
+the LCD device. If this field is empty, the ACPI methods that are called
+on pressing brightness / display switching hotkeys will not trigger a
+notification. As a result, it appears as no hotkey has been pressed.
+
+Reference: https://bugs.freedesktop.org/show_bug.cgi?id=45452
+Tested-by: Peter Wu <lekensteyn at gmail.com>
+Signed-off-by: Peter Wu <lekensteyn at gmail.com>
+Acked-by: Jesse Barnes <jbarnes at virtuousgeek.org>
+Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
+---
+ drivers/gpu/drm/i915/intel_opregion.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
+index 18bd0af..e27c170 100644
+--- a/drivers/gpu/drm/i915/intel_opregion.c
++++ b/drivers/gpu/drm/i915/intel_opregion.c
+@@ -427,6 +427,25 @@ blind_set:
+ goto end;
+ }
+
++static void intel_setup_cadls(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++ int i = 0;
++ u32 disp_id;
++
++ /* Initialize the CADL field by duplicating the DIDL values.
++ * Technically, this is not always correct as display outputs may exist,
++ * but not active. This initialization is necessary for some Clevo
++ * laptops that check this field before processing the brightness and
++ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
++ * there are less than eight devices. */
++ do {
++ disp_id = ioread32(&opregion->acpi->didl[i]);
++ iowrite32(disp_id, &opregion->acpi->cadl[i]);
++ } while (++i < 8 && disp_id != 0);
++}
++
+ void intel_opregion_init(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev)
+ return;
+
+ if (opregion->acpi) {
+- if (drm_core_check_feature(dev, DRIVER_MODESET))
++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_didl_outputs(dev);
++ intel_setup_cadls(dev);
++ }
+
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video module.
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/isofs-avoid-info-leak-on-export.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/isofs-avoid-info-leak-on-export.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/isofs-avoid-info-leak-on-export.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/isofs-avoid-info-leak-on-export.patch)
@@ -0,0 +1,26 @@
+From: Mathias Krause <minipli at googlemail.com>
+Date: Thu, 12 Jul 2012 08:46:54 +0200
+Subject: isofs: avoid info leak on export
+
+commit fe685aabf7c8c9f138e5ea900954d295bf229175 upstream.
+
+For type 1 the parent_offset member in struct isofs_fid gets copied
+uninitialized to userland. Fix this by initializing it to 0.
+
+Signed-off-by: Mathias Krause <minipli at googlemail.com>
+Signed-off-by: Jan Kara <jack at suse.cz>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ fs/isofs/export.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -135,6 +135,7 @@ isofs_export_encode_fh(struct dentry *de
+ len = 3;
+ fh32[0] = ei->i_iget5_block;
+ fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
++ fh16[3] = 0; /* avoid leaking uninitialized data */
+ fh32[2] = inode->i_generation;
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+ struct inode *parent;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/kernel-signal.c-use-__ARCH_HAS_SA_RESTORER-instead-o.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/kernel-signal.c-use-__ARCH_HAS_SA_RESTORER-instead-o.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/kernel-signal.c-use-__ARCH_HAS_SA_RESTORER-instead-o.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/kernel-signal.c-use-__ARCH_HAS_SA_RESTORER-instead-o.patch)
@@ -0,0 +1,36 @@
+From: Andrew Morton <akpm at linux-foundation.org>
+Date: Wed, 13 Mar 2013 14:59:34 -0700
+Subject: kernel/signal.c: use __ARCH_HAS_SA_RESTORER instead of SA_RESTORER
+
+commit 522cff142d7d2f9230839c9e1f21a4d8bcc22a4a upstream.
+
+__ARCH_HAS_SA_RESTORER is the preferred conditional for use in 3.9 and
+later kernels, per Kees.
+
+Cc: Emese Revfy <re.emese at gmail.com>
+Cc: Emese Revfy <re.emese at gmail.com>
+Cc: PaX Team <pageexec at freemail.hu>
+Cc: Al Viro <viro at zeniv.linux.org.uk>
+Cc: Oleg Nesterov <oleg at redhat.com>
+Cc: "Eric W. Biederman" <ebiederm at xmission.com>
+Cc: Serge Hallyn <serge.hallyn at canonical.com>
+Cc: Julien Tinnes <jln at google.com>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+---
+ kernel/signal.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 43b0d4a..dd72567 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -485,7 +485,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+ if (force_default || ka->sa.sa_handler != SIG_IGN)
+ ka->sa.sa_handler = SIG_DFL;
+ ka->sa.sa_flags = 0;
+-#ifdef SA_RESTORER
++#ifdef __ARCH_HAS_SA_RESTORER
+ ka->sa.sa_restorer = NULL;
+ #endif
+ sigemptyset(&ka->sa.sa_mask);
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/kexec-remove-KMSG_DUMP_KEXEC.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/kexec-remove-KMSG_DUMP_KEXEC.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/kexec-remove-KMSG_DUMP_KEXEC.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/kexec-remove-KMSG_DUMP_KEXEC.patch)
@@ -0,0 +1,100 @@
+From: WANG Cong <xiyou.wangcong at gmail.com>
+Date: Thu, 12 Jan 2012 17:20:11 -0800
+Subject: kexec: remove KMSG_DUMP_KEXEC
+
+commit a3dd3323058d281abd584b15ad4c5b65064d7a61 upstream.
+
+KMSG_DUMP_KEXEC is useless because we already save kernel messages inside
+/proc/vmcore, and it is unsafe to allow modules to do other stuffs in a
+crash dump scenario.
+
+[akpm at linux-foundation.org: fix powerpc build]
+Signed-off-by: WANG Cong <xiyou.wangcong at gmail.com>
+Reported-by: Vivek Goyal <vgoyal at redhat.com>
+Acked-by: Vivek Goyal <vgoyal at redhat.com>
+Acked-by: Jarod Wilson <jarod at redhat.com>
+Cc: "Eric W. Biederman" <ebiederm at xmission.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro at jp.fujitsu.com>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+---
+ arch/powerpc/platforms/pseries/nvram.c | 1 -
+ drivers/char/ramoops.c | 3 +--
+ drivers/mtd/mtdoops.c | 3 +--
+ include/linux/kmsg_dump.h | 1 -
+ kernel/kexec.c | 3 ---
+ 5 files changed, 2 insertions(+), 9 deletions(-)
+
+diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
+index 330a57b..36f957f 100644
+--- a/arch/powerpc/platforms/pseries/nvram.c
++++ b/arch/powerpc/platforms/pseries/nvram.c
+@@ -638,7 +638,6 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
+ /* These are almost always orderly shutdowns. */
+ return;
+ case KMSG_DUMP_OOPS:
+- case KMSG_DUMP_KEXEC:
+ break;
+ case KMSG_DUMP_PANIC:
+ panicking = true;
+diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
+index 7c7f42a1f8..feda90c 100644
+--- a/drivers/char/ramoops.c
++++ b/drivers/char/ramoops.c
+@@ -83,8 +83,7 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
+ struct timeval timestamp;
+
+ if (reason != KMSG_DUMP_OOPS &&
+- reason != KMSG_DUMP_PANIC &&
+- reason != KMSG_DUMP_KEXEC)
++ reason != KMSG_DUMP_PANIC)
+ return;
+
+ /* Only dump oopses if dump_oops is set */
+diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
+index db8e827..3ce99e0 100644
+--- a/drivers/mtd/mtdoops.c
++++ b/drivers/mtd/mtdoops.c
+@@ -315,8 +315,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
+ char *dst;
+
+ if (reason != KMSG_DUMP_OOPS &&
+- reason != KMSG_DUMP_PANIC &&
+- reason != KMSG_DUMP_KEXEC)
++ reason != KMSG_DUMP_PANIC)
+ return;
+
+ /* Only dump oopses if dump_oops is set */
+diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
+index ee0c952..fee6631 100644
+--- a/include/linux/kmsg_dump.h
++++ b/include/linux/kmsg_dump.h
+@@ -18,7 +18,6 @@
+ enum kmsg_dump_reason {
+ KMSG_DUMP_OOPS,
+ KMSG_DUMP_PANIC,
+- KMSG_DUMP_KEXEC,
+ KMSG_DUMP_RESTART,
+ KMSG_DUMP_HALT,
+ KMSG_DUMP_POWEROFF,
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 090ee10..20ed47a 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -32,7 +32,6 @@
+ #include <linux/console.h>
+ #include <linux/vmalloc.h>
+ #include <linux/swap.h>
+-#include <linux/kmsg_dump.h>
+ #include <linux/syscore_ops.h>
+
+ #include <asm/page.h>
+@@ -1094,8 +1093,6 @@ void crash_kexec(struct pt_regs *regs)
+ if (kexec_crash_image) {
+ struct pt_regs fixed_regs;
+
+- kmsg_dump(KMSG_DUMP_KEXEC);
+-
+ crash_setup_regs(&fixed_regs, regs);
+ crash_save_vmcoreinfo();
+ machine_crash_shutdown(&fixed_regs);
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/kmsg_dump-don-t-run-on-non-error-paths-by-default.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/kmsg_dump-don-t-run-on-non-error-paths-by-default.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/kmsg_dump-don-t-run-on-non-error-paths-by-default.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/kmsg_dump-don-t-run-on-non-error-paths-by-default.patch)
@@ -0,0 +1,105 @@
+From: Matthew Garrett <mjg at redhat.com>
+Date: Mon, 5 Mar 2012 14:59:10 -0800
+Subject: kmsg_dump: don't run on non-error paths by default
+
+commit c22ab332902333f83766017478c1ef6607ace681 upstream.
+
+Since commit 04c6862c055f ("kmsg_dump: add kmsg_dump() calls to the
+reboot, halt, poweroff and emergency_restart paths"), kmsg_dump() gets
+run on normal paths including poweroff and reboot.
+
+This is less than ideal given pstore implementations that can only
+represent single backtraces, since a reboot may overwrite a stored oops
+before it's been picked up by userspace. In addition, some pstore
+backends may have low performance and provide a significant delay in
+reboot as a result.
+
+This patch adds a printk.always_kmsg_dump kernel parameter (which can also
+be changed from userspace). Without it, the code will only be run on
+failure paths rather than on normal paths. The option can be enabled in
+environments where there's a desire to attempt to audit whether or not a
+reboot was cleanly requested or not.
+
+Signed-off-by: Matthew Garrett <mjg at redhat.com>
+Acked-by: Seiji Aguchi <seiji.aguchi at hds.com>
+Cc: Seiji Aguchi <seiji.aguchi at hds.com>
+Cc: David Woodhouse <dwmw2 at infradead.org>
+Cc: Marco Stornelli <marco.stornelli at gmail.com>
+Cc: Artem Bityutskiy <Artem.Bityutskiy at nokia.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro at jp.fujitsu.com>
+Cc: Vivek Goyal <vgoyal at redhat.com>
+Cc: Don Zickus <dzickus at redhat.com>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+---
+ Documentation/kernel-parameters.txt | 6 ++++++
+ include/linux/kmsg_dump.h | 9 +++++++--
+ kernel/printk.c | 6 ++++++
+ 3 files changed, 19 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 033d4e6..d99fd9c 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2211,6 +2211,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+
+ default: off.
+
++ printk.always_kmsg_dump=
++ Trigger kmsg_dump for cases other than kernel oops or
++ panics
++ Format: <bool> (1/Y/y=enable, 0/N/n=disable)
++ default: disabled
++
+ printk.time= Show timing data prefixed to each printk message line
+ Format: <bool> (1/Y/y=enable, 0/N/n=disable)
+
+diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
+index fee6631..35f7237 100644
+--- a/include/linux/kmsg_dump.h
++++ b/include/linux/kmsg_dump.h
+@@ -15,13 +15,18 @@
+ #include <linux/errno.h>
+ #include <linux/list.h>
+
++/*
++ * Keep this list arranged in rough order of priority. Anything listed after
++ * KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump
++ * is passed to the kernel.
++ */
+ enum kmsg_dump_reason {
+- KMSG_DUMP_OOPS,
+ KMSG_DUMP_PANIC,
++ KMSG_DUMP_OOPS,
++ KMSG_DUMP_EMERG,
+ KMSG_DUMP_RESTART,
+ KMSG_DUMP_HALT,
+ KMSG_DUMP_POWEROFF,
+- KMSG_DUMP_EMERG,
+ };
+
+ /**
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 13c0a11..32690a0 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -702,6 +702,9 @@ static bool printk_time = 0;
+ #endif
+ module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
+
++static bool always_kmsg_dump;
++module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
++
+ /* Check if we have any console registered that can be called early in boot. */
+ static int have_callable_console(void)
+ {
+@@ -1732,6 +1735,9 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ unsigned long l1, l2;
+ unsigned long flags;
+
++ if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
++ return;
++
+ /* Theoretically, the log could move on after we do this, but
+ there's not a lot we can do about that. The new messages
+ will overwrite the start of what we dump. */
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/rds-limit-the-size-allocated-by-rds_message_alloc.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/rds-limit-the-size-allocated-by-rds_message_alloc.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/rds-limit-the-size-allocated-by-rds_message_alloc.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/rds-limit-the-size-allocated-by-rds_message_alloc.patch)
@@ -0,0 +1,67 @@
+From: Cong Wang <amwang at redhat.com>
+Date: Sun, 3 Mar 2013 16:18:11 +0000
+Subject: rds: limit the size allocated by rds_message_alloc()
+
+[ Upstream commit ece6b0a2b25652d684a7ced4ae680a863af041e0 ]
+
+Dave Jones reported the following bug:
+
+"When fed mangled socket data, rds will trust what userspace gives it,
+and tries to allocate enormous amounts of memory larger than what
+kmalloc can satisfy."
+
+WARNING: at mm/page_alloc.c:2393 __alloc_pages_nodemask+0xa0d/0xbe0()
+Hardware name: GA-MA78GM-S2H
+Modules linked in: vmw_vsock_vmci_transport vmw_vmci vsock fuse bnep dlci bridge 8021q garp stp mrp binfmt_misc l2tp_ppp l2tp_core rfcomm s
+Pid: 24652, comm: trinity-child2 Not tainted 3.8.0+ #65
+Call Trace:
+ [<ffffffff81044155>] warn_slowpath_common+0x75/0xa0
+ [<ffffffff8104419a>] warn_slowpath_null+0x1a/0x20
+ [<ffffffff811444ad>] __alloc_pages_nodemask+0xa0d/0xbe0
+ [<ffffffff8100a196>] ? native_sched_clock+0x26/0x90
+ [<ffffffff810b2128>] ? trace_hardirqs_off_caller+0x28/0xc0
+ [<ffffffff810b21cd>] ? trace_hardirqs_off+0xd/0x10
+ [<ffffffff811861f8>] alloc_pages_current+0xb8/0x180
+ [<ffffffff8113eaaa>] __get_free_pages+0x2a/0x80
+ [<ffffffff811934fe>] kmalloc_order_trace+0x3e/0x1a0
+ [<ffffffff81193955>] __kmalloc+0x2f5/0x3a0
+ [<ffffffff8104df0c>] ? local_bh_enable_ip+0x7c/0xf0
+ [<ffffffffa0401ab3>] rds_message_alloc+0x23/0xb0 [rds]
+ [<ffffffffa04043a1>] rds_sendmsg+0x2b1/0x990 [rds]
+ [<ffffffff810b21cd>] ? trace_hardirqs_off+0xd/0x10
+ [<ffffffff81564620>] sock_sendmsg+0xb0/0xe0
+ [<ffffffff810b2052>] ? get_lock_stats+0x22/0x70
+ [<ffffffff810b24be>] ? put_lock_stats.isra.23+0xe/0x40
+ [<ffffffff81567f30>] sys_sendto+0x130/0x180
+ [<ffffffff810b872d>] ? trace_hardirqs_on+0xd/0x10
+ [<ffffffff816c547b>] ? _raw_spin_unlock_irq+0x3b/0x60
+ [<ffffffff816cd767>] ? sysret_check+0x1b/0x56
+ [<ffffffff810b8695>] ? trace_hardirqs_on_caller+0x115/0x1a0
+ [<ffffffff81341d8e>] ? trace_hardirqs_on_thunk+0x3a/0x3f
+ [<ffffffff816cd742>] system_call_fastpath+0x16/0x1b
+---[ end trace eed6ae990d018c8b ]---
+
+Reported-by: Dave Jones <davej at redhat.com>
+Cc: Dave Jones <davej at redhat.com>
+Cc: David S. Miller <davem at davemloft.net>
+Cc: Venkat Venkatsubra <venkat.x.venkatsubra at oracle.com>
+Signed-off-by: Cong Wang <amwang at redhat.com>
+Acked-by: Venkat Venkatsubra <venkat.x.venkatsubra at oracle.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ net/rds/message.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/rds/message.c
++++ b/net/rds/message.c
+@@ -197,6 +197,9 @@ struct rds_message *rds_message_alloc(un
+ {
+ struct rds_message *rm;
+
++ if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
++ return NULL;
++
+ rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
+ if (!rm)
+ goto out;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/rtnl-fix-info-leak-on-rtm_getlink-request-for-vf-devices.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/rtnl-fix-info-leak-on-rtm_getlink-request-for-vf-devices.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/rtnl-fix-info-leak-on-rtm_getlink-request-for-vf-devices.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/rtnl-fix-info-leak-on-rtm_getlink-request-for-vf-devices.patch)
@@ -0,0 +1,29 @@
+From: Mathias Krause <minipli at googlemail.com>
+Date: Sat, 9 Mar 2013 05:52:20 +0000
+Subject: rtnl: fix info leak on RTM_GETLINK request for VF devices
+
+[ Upstream commit 84d73cd3fb142bf1298a8c13fd4ca50fd2432372 ]
+
+Initialize the mac address buffer with 0 as the driver specific function
+will probably not fill the whole buffer. In fact, all in-kernel drivers
+fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible
+bytes. Therefore we currently leak 26 bytes of stack memory to userland
+via the netlink interface.
+
+Signed-off-by: Mathias Krause <minipli at googlemail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ net/core/rtnetlink.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -973,6 +973,7 @@ static int rtnl_fill_ifinfo(struct sk_bu
+ * report anything.
+ */
+ ivi.spoofchk = -1;
++ memset(ivi.mac, 0, sizeof(ivi.mac));
+ if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
+ break;
+ vf_mac.vf =
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/signal-fix-use-of-missing-sa_restorer-field.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/signal-fix-use-of-missing-sa_restorer-field.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/signal-fix-use-of-missing-sa_restorer-field.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/signal-fix-use-of-missing-sa_restorer-field.patch)
@@ -0,0 +1,148 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Sun, 25 Nov 2012 22:24:19 -0500
+Subject: signal: Fix use of missing sa_restorer field
+
+flush_signal_handlers() needs to know whether sigaction::sa_restorer
+is defined, not whether SA_RESTORER is defined. Define the
+__ARCH_HAS_SA_RESTORER macro to indicate this.
+
+Vaguely based on upstream commit 574c4866e33d 'consolidate kernel-side
+struct sigaction declarations'.
+
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+Cc: Al Viro <viro at zeniv.linux.org.uk>
+---
+--- a/arch/arm/include/asm/signal.h
++++ b/arch/arm/include/asm/signal.h
+@@ -127,6 +127,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/arch/avr32/include/asm/signal.h
++++ b/arch/avr32/include/asm/signal.h
+@@ -128,6 +128,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/arch/cris/include/asm/signal.h
++++ b/arch/cris/include/asm/signal.h
+@@ -122,6 +122,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/arch/h8300/include/asm/signal.h
++++ b/arch/h8300/include/asm/signal.h
+@@ -121,6 +121,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/arch/m32r/include/asm/signal.h
++++ b/arch/m32r/include/asm/signal.h
+@@ -123,6 +123,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/arch/m68k/include/asm/signal.h
++++ b/arch/m68k/include/asm/signal.h
+@@ -119,6 +119,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/arch/mn10300/include/asm/signal.h
++++ b/arch/mn10300/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/arch/powerpc/include/asm/signal.h
++++ b/arch/powerpc/include/asm/signal.h
+@@ -109,6 +109,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/arch/s390/include/asm/signal.h
++++ b/arch/s390/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/arch/sparc/include/asm/signal.h
++++ b/arch/sparc/include/asm/signal.h
+@@ -191,6 +191,7 @@ struct __old_sigaction {
+ unsigned long sa_flags;
+ void (*sa_restorer)(void); /* not used by Linux/SPARC yet */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ typedef struct sigaltstack {
+ void __user *ss_sp;
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -125,6 +125,8 @@ typedef unsigned long sigset_t;
+ extern void do_notify_resume(struct pt_regs *, void *, __u32);
+ # endif /* __KERNEL__ */
+
++#define __ARCH_HAS_SA_RESTORER
++
+ #ifdef __i386__
+ # ifdef __KERNEL__
+ struct old_sigaction {
+--- a/arch/xtensa/include/asm/signal.h
++++ b/arch/xtensa/include/asm/signal.h
+@@ -133,6 +133,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+--- a/include/asm-generic/signal.h
++++ b/include/asm-generic/signal.h
+@@ -99,6 +99,10 @@ typedef unsigned long old_sigset_t;
+
+ #include <asm-generic/signal-defs.h>
+
++#ifdef SA_RESTORER
++#define __ARCH_HAS_SA_RESTORER
++#endif
++
+ struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/udf-avoid-info-leak-on-export.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/udf-avoid-info-leak-on-export.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/udf-avoid-info-leak-on-export.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/udf-avoid-info-leak-on-export.patch)
@@ -0,0 +1,26 @@
+From: Mathias Krause <minipli at googlemail.com>
+Date: Thu, 12 Jul 2012 08:46:55 +0200
+Subject: udf: avoid info leak on export
+
+commit 0143fc5e9f6f5aad4764801015bc8d4b4a278200 upstream.
+
+For type 0x51 the udf.parent_partref member in struct fid gets copied
+uninitialized to userland. Fix this by initializing it to 0.
+
+Signed-off-by: Mathias Krause <minipli at googlemail.com>
+Signed-off-by: Jan Kara <jack at suse.cz>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ fs/udf/namei.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -1293,6 +1293,7 @@ static int udf_encode_fh(struct dentry *
+ *lenp = 3;
+ fid->udf.block = location.logicalBlockNum;
+ fid->udf.partref = location.partitionReferenceNum;
++ fid->udf.parent_partref = 0;
+ fid->udf.generation = inode->i_generation;
+
+ if (connectable && !S_ISDIR(inode->i_mode)) {
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/vhost-net-fix-heads-usage-of-ubuf_info.patch (from r19949, dists/sid/linux/debian/patches/bugfix/all/vhost-net-fix-heads-usage-of-ubuf_info.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/vhost-net-fix-heads-usage-of-ubuf_info.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/all/vhost-net-fix-heads-usage-of-ubuf_info.patch)
@@ -0,0 +1,31 @@
+From: "Michael S. Tsirkin" <mst at redhat.com>
+Date: Sun, 17 Mar 2013 02:46:09 +0000
+Subject: vhost/net: fix heads usage of ubuf_info
+
+commit 46aa92d1ba162b4b3d6b7102440e459d4e4ee255 upstream.
+
+ubuf info allocator uses guest controlled head as an index,
+so a malicious guest could put the same head entry in the ring twice,
+and we will get two callbacks on the same value.
+To fix use upend_idx which is guaranteed to be unique.
+
+Reported-by: Rusty Russell <rusty at rustcorp.com.au>
+Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ drivers/vhost/net.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -234,7 +234,8 @@ static void handle_tx(struct vhost_net *
+ msg.msg_controllen = 0;
+ ubufs = NULL;
+ } else {
+- struct ubuf_info *ubuf = &vq->ubuf_info[head];
++ struct ubuf_info *ubuf;
++ ubuf = vq->ubuf_info + vq->upend_idx;
+
+ vq->heads[vq->upend_idx].len = len;
+ ubuf->callback = vhost_zerocopy_callback;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/powerpc/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch (from r19949, dists/sid/linux/debian/patches/bugfix/powerpc/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/powerpc/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/powerpc/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch)
@@ -0,0 +1,29 @@
+From: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Date: Wed, 13 Mar 2013 09:55:02 +1100
+Subject: powerpc: Fix cputable entry for 970MP rev 1.0
+
+commit d63ac5f6cf31c8a83170a9509b350c1489a7262b upstream.
+
+Commit 44ae3ab3358e962039c36ad4ae461ae9fb29596c forgot to update
+the entry for the 970MP rev 1.0 processor when moving some CPU
+features bits to the MMU feature bit mask. This breaks booting
+on some rare G5 models using that chip revision.
+
+Reported-by: Phileas Fogg <phileas-fogg at mail.ru>
+Signed-off-by: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ arch/powerpc/kernel/cputable.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -268,7 +268,7 @@ static struct cpu_spec __initdata cpu_sp
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+- .mmu_features = MMU_FTR_HPTE_TABLE,
++ .mmu_features = MMU_FTRS_PPC970,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/s390/s390-mm-fix-flush_tlb_kernel_range.patch (from r19949, dists/sid/linux/debian/patches/bugfix/s390/s390-mm-fix-flush_tlb_kernel_range.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/s390/s390-mm-fix-flush_tlb_kernel_range.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/s390/s390-mm-fix-flush_tlb_kernel_range.patch)
@@ -0,0 +1,40 @@
+From: Heiko Carstens <heiko.carstens at de.ibm.com>
+Date: Mon, 4 Mar 2013 14:14:11 +0100
+Subject: s390/mm: fix flush_tlb_kernel_range()
+
+commit f6a70a07079518280022286a1dceb797d12e1edf upstream.
+
+Our flush_tlb_kernel_range() implementation calls __tlb_flush_mm() with
+&init_mm as argument. __tlb_flush_mm() however will only flush tlbs
+for the passed in mm if its mm_cpumask is not empty.
+
+For the init_mm however its mm_cpumask has never any bits set. Which in
+turn means that our flush_tlb_kernel_range() implementation doesn't
+work at all.
+
+This can be easily verified with a vmalloc/vfree loop which allocates
+a page, writes to it and then frees the page again. A crash will follow
+almost instantly.
+
+To fix this remove the cpumask_empty() check in __tlb_flush_mm() since
+there shouldn't be too many mms with a zero mm_cpumask, besides the
+init_mm of course.
+
+Signed-off-by: Heiko Carstens <heiko.carstens at de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky at de.ibm.com>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ arch/s390/include/asm/tlbflush.h | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsi
+
+ static inline void __tlb_flush_mm(struct mm_struct * mm)
+ {
+- if (unlikely(cpumask_empty(mm_cpumask(mm))))
+- return;
+ /*
+ * If the machine has IDTE we prefer to do a per mm flush
+ * on all cpus instead of doing a local flush if the mm
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Convert-MSR_KVM_SYSTEM_TIME-to-use-gfn_to_hv.patch (from r19949, dists/sid/linux/debian/patches/bugfix/x86/KVM-x86-Convert-MSR_KVM_SYSTEM_TIME-to-use-gfn_to_hv.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Convert-MSR_KVM_SYSTEM_TIME-to-use-gfn_to_hv.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/x86/KVM-x86-Convert-MSR_KVM_SYSTEM_TIME-to-use-gfn_to_hv.patch)
@@ -0,0 +1,134 @@
+From: Andy Honig <ahonig at google.com>
+Date: Wed, 20 Feb 2013 14:48:10 -0800
+Subject: KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache
+ functions (CVE-2013-1797)
+
+commit 0b79459b482e85cb7426aa7da683a9f2c97aeae1 upstream.
+
+There is a potential use after free issue with the handling of
+MSR_KVM_SYSTEM_TIME. If the guest specifies a GPA in a movable or removable
+memory such as frame buffers then KVM might continue to write to that
+address even after it's removed via KVM_SET_USER_MEMORY_REGION. KVM pins
+the page in memory so it's unlikely to cause an issue, but if the user
+space component re-purposes the memory previously used for the guest, then
+the guest will be able to corrupt that memory.
+
+Tested: Tested against kvmclock unit test
+
+Signed-off-by: Andrew Honig <ahonig at google.com>
+Signed-off-by: Marcelo Tosatti <mtosatti at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - We do not implement the PVCLOCK_GUEST_STOPPED flag]
+---
+ arch/x86/include/asm/kvm_host.h | 4 ++--
+ arch/x86/kvm/x86.c | 47 +++++++++++++++++----------------------
+ 2 files changed, 22 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -393,8 +393,8 @@ struct kvm_vcpu_arch {
+ gpa_t time;
+ struct pvclock_vcpu_time_info hv_clock;
+ unsigned int hw_tsc_khz;
+- unsigned int time_offset;
+- struct page *time_page;
++ struct gfn_to_hva_cache pv_time;
++ bool pv_time_enabled;
+
+ struct {
+ u64 msr_val;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1105,7 +1105,6 @@ static int kvm_guest_time_update(struct
+ {
+ unsigned long flags;
+ struct kvm_vcpu_arch *vcpu = &v->arch;
+- void *shared_kaddr;
+ unsigned long this_tsc_khz;
+ s64 kernel_ns, max_kernel_ns;
+ u64 tsc_timestamp;
+@@ -1141,7 +1140,7 @@ static int kvm_guest_time_update(struct
+
+ local_irq_restore(flags);
+
+- if (!vcpu->time_page)
++ if (!vcpu->pv_time_enabled)
+ return 0;
+
+ /*
+@@ -1199,14 +1198,9 @@ static int kvm_guest_time_update(struct
+ */
+ vcpu->hv_clock.version += 2;
+
+- shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
+-
+- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
+- sizeof(vcpu->hv_clock));
+-
+- kunmap_atomic(shared_kaddr, KM_USER0);
+-
+- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
++ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++ &vcpu->hv_clock,
++ sizeof(vcpu->hv_clock));
+ return 0;
+ }
+
+@@ -1496,10 +1490,7 @@ static int kvm_pv_enable_async_pf(struct
+
+ static void kvmclock_reset(struct kvm_vcpu *vcpu)
+ {
+- if (vcpu->arch.time_page) {
+- kvm_release_page_dirty(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ vcpu->arch.pv_time_enabled = false;
+ }
+
+ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+@@ -1591,6 +1582,7 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ case MSR_KVM_SYSTEM_TIME: {
++ u64 gpa_offset;
+ kvmclock_reset(vcpu);
+
+ vcpu->arch.time = data;
+@@ -1600,21 +1592,17 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ if (!(data & 1))
+ break;
+
+- /* ...but clean it before doing the actual write */
+- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
++ gpa_offset = data & ~(PAGE_MASK | 1);
+
+ /* Check that the address is 32-byte aligned. */
+- if (vcpu->arch.time_offset &
+- (sizeof(struct pvclock_vcpu_time_info) - 1))
++ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
+ break;
+
+- vcpu->arch.time_page =
+- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+-
+- if (is_error_page(vcpu->arch.time_page)) {
+- kvm_release_page_clean(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.pv_time, data & ~1ULL))
++ vcpu->arch.pv_time_enabled = false;
++ else
++ vcpu->arch.pv_time_enabled = true;
+ break;
+ }
+ case MSR_KVM_ASYNC_PF_EN:
+@@ -6559,6 +6547,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *
+ if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
+ goto fail_free_mce_banks;
+
++ vcpu->arch.pv_time_enabled = false;
+ kvm_async_pf_hash_reset(vcpu);
+
+ return 0;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-fix-for-buffer-overflow-in-handling-of-MSR_K.patch (from r19949, dists/sid/linux/debian/patches/bugfix/x86/KVM-x86-fix-for-buffer-overflow-in-handling-of-MSR_K.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-fix-for-buffer-overflow-in-handling-of-MSR_K.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/x86/KVM-x86-fix-for-buffer-overflow-in-handling-of-MSR_K.patch)
@@ -0,0 +1,39 @@
+From: Andy Honig <ahonig at google.com>
+Date: Mon, 11 Mar 2013 09:34:52 -0700
+Subject: KVM: x86: fix for buffer overflow in handling of MSR_KVM_SYSTEM_TIME
+ (CVE-2013-1796)
+
+commit c300aa64ddf57d9c5d9c898a64b36877345dd4a9 upstream.
+
+If the guest sets the GPA of the time_page so that the request to update the
+time straddles a page then KVM will write onto an incorrect page. The
+write is done byusing kmap atomic to get a pointer to the page for the time
+structure and then performing a memcpy to that page starting at an offset
+that the guest controls. Well behaved guests always provide a 32-byte aligned
+address, however a malicious guest could use this to corrupt host kernel
+memory.
+
+Tested: Tested against kvmclock unit test.
+
+Signed-off-by: Andrew Honig <ahonig at google.com>
+Signed-off-by: Marcelo Tosatti <mtosatti at redhat.com>
+---
+ arch/x86/kvm/x86.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f7c850b..2ade60c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1959,6 +1959,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ /* ...but clean it before doing the actual write */
+ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+
++ /* Check that the address is 32-byte aligned. */
++ if (vcpu->arch.time_offset &
++ (sizeof(struct pvclock_vcpu_time_info) - 1))
++ break;
++
+ vcpu->arch.time_page =
+ gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/drm-i915-Unconditionally-initialise-the-interrupt-wo.patch (from r19949, dists/sid/linux/debian/patches/bugfix/x86/drm-i915-Unconditionally-initialise-the-interrupt-wo.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/drm-i915-Unconditionally-initialise-the-interrupt-wo.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/x86/drm-i915-Unconditionally-initialise-the-interrupt-wo.patch)
@@ -0,0 +1,59 @@
+From: Chris Wilson <chris at chris-wilson.co.uk>
+Date: Tue, 24 Apr 2012 22:59:41 +0100
+Subject: drm/i915: Unconditionally initialise the interrupt workers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit 8b2e326dc7c5aa6952c88656d04d0d81fd85a6f8 upstream.
+
+Rather than duplicate similar code across the IRQ installers, perform
+the initialisation of the workers upfront. This will lead to simpler
+teardown and quiescent code as we can assume that the workers have
+been initialised.
+
+Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
+Reviewed-by: Jesse Barnes <jbarnes at virtuousgeek.org>
+Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
+[bmork: deleted valleyview hunk for 3.2 backport]
+Signed-off-by: Bjørn Mork <bjorn at mork.no>
+---
+ drivers/gpu/drm/i915/i915_irq.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -1806,10 +1806,6 @@ static void ironlake_irq_preinstall(stru
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+- if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+- INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+
+ I915_WRITE(HWSTAM, 0xeffe);
+
+@@ -1983,9 +1979,6 @@ static void i915_driver_irq_preinstall(s
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+-
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+@@ -2290,6 +2283,12 @@ static void i8xx_irq_uninstall(struct dr
+
+ void intel_irq_init(struct drm_device *dev)
+ {
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
++ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
++ INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
++
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/drm-i915-bounds-check-execbuffer-relocation-count.patch (from r19949, dists/sid/linux/debian/patches/bugfix/x86/drm-i915-bounds-check-execbuffer-relocation-count.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/drm-i915-bounds-check-execbuffer-relocation-count.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/bugfix/x86/drm-i915-bounds-check-execbuffer-relocation-count.patch)
@@ -0,0 +1,49 @@
+From: Kees Cook <keescook at chromium.org>
+Date: Mon, 11 Mar 2013 17:31:45 -0700
+Subject: drm/i915: bounds check execbuffer relocation count
+
+commit 3118a4f652c7b12c752f3222af0447008f9b2368 upstream.
+
+It is possible to wrap the counter used to allocate the buffer for
+relocation copies. This could lead to heap writing overflows.
+
+CVE-2013-0913
+
+v3: collapse test, improve comment
+v2: move check into validate_exec_list
+
+Signed-off-by: Kees Cook <keescook at chromium.org>
+Reported-by: Pinkie Pie
+Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
+Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -907,15 +907,20 @@ validate_exec_list(struct drm_i915_gem_e
+ int count)
+ {
+ int i;
++ int relocs_total = 0;
++ int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
+
+- /* First check for malicious input causing overflow */
+- if (exec[i].relocation_count >
+- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
++ /* First check for malicious input causing overflow in
++ * the worst case where we need to allocate the entire
++ * relocation tree as a single array.
++ */
++ if (exec[i].relocation_count > relocs_max - relocs_total)
+ return -EINVAL;
++ relocs_total += exec[i].relocation_count;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
Copied: dists/squeeze-backports/linux/debian/patches/debian/dm-avoid-ABI-change-in-3.2.41.patch (from r19949, dists/sid/linux/debian/patches/debian/dm-avoid-ABI-change-in-3.2.41.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/dm-avoid-ABI-change-in-3.2.41.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/debian/dm-avoid-ABI-change-in-3.2.41.patch)
@@ -0,0 +1,264 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: dm: Avoid ABI change in 3.2.41
+
+Commit fd7c092e711e 'dm: fix truncated status strings', backported
+into 3.2.41, changed the return type of target_type::status from void
+to int. But we can just as easily leave it as int and ignore the
+returned value.
+
+Also, genksyms is too stupid to understand that 'unsigned' is the
+same thing as 'unsigned int'.
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1725,7 +1725,7 @@ static int crypt_map(struct dm_target *t
+ return DM_MAPIO_SUBMITTED;
+ }
+
+-static void crypt_status(struct dm_target *ti, status_type_t type,
++static int crypt_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ struct crypt_config *cc = ti->private;
+@@ -1753,6 +1753,7 @@ static void crypt_status(struct dm_targe
+
+ break;
+ }
++ return 0;
+ }
+
+ static void crypt_postsuspend(struct dm_target *ti)
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -293,7 +293,7 @@ static int delay_map(struct dm_target *t
+ return delay_bio(dc, dc->read_delay, bio);
+ }
+
+-static void delay_status(struct dm_target *ti, status_type_t type,
++static int delay_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ struct delay_c *dc = ti->private;
+@@ -314,6 +314,8 @@ static void delay_status(struct dm_targe
+ dc->write_delay);
+ break;
+ }
++
++ return 0;
+ }
+
+ static int delay_iterate_devices(struct dm_target *ti,
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -331,7 +331,7 @@ static int flakey_end_io(struct dm_targe
+ return error;
+ }
+
+-static void flakey_status(struct dm_target *ti, status_type_t type,
++static int flakey_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ unsigned sz = 0;
+@@ -362,6 +362,7 @@ static void flakey_status(struct dm_targ
+
+ break;
+ }
++ return 0;
+ }
+
+ static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -94,7 +94,7 @@ static int linear_map(struct dm_target *
+ return DM_MAPIO_REMAPPED;
+ }
+
+-static void linear_status(struct dm_target *ti, status_type_t type,
++static int linear_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ struct linear_c *lc = (struct linear_c *) ti->private;
+@@ -109,6 +109,7 @@ static void linear_status(struct dm_targ
+ (unsigned long long)lc->start);
+ break;
+ }
++ return 0;
+ }
+
+ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1323,7 +1323,7 @@ static void multipath_resume(struct dm_t
+ * [priority selector-name num_ps_args [ps_args]*
+ * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
+ */
+-static void multipath_status(struct dm_target *ti, status_type_t type,
++static int multipath_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ int sz = 0;
+@@ -1427,6 +1427,8 @@ static void multipath_status(struct dm_t
+ }
+
+ spin_unlock_irqrestore(&m->lock, flags);
++
++ return 0;
+ }
+
+ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -1017,7 +1017,7 @@ static int raid_map(struct dm_target *ti
+ return DM_MAPIO_SUBMITTED;
+ }
+
+-static void raid_status(struct dm_target *ti, status_type_t type,
++static int raid_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ struct raid_set *rs = ti->private;
+@@ -1153,6 +1153,8 @@ static void raid_status(struct dm_target
+ DMEMIT(" -");
+ }
+ }
++
++ return 0;
+ }
+
+ static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -1358,7 +1358,7 @@ static char device_status_char(struct mi
+ }
+
+
+-static void mirror_status(struct dm_target *ti, status_type_t type,
++static int mirror_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ unsigned int m, sz = 0;
+@@ -1394,6 +1394,8 @@ static void mirror_status(struct dm_targ
+ if (ms->features & DM_RAID1_HANDLE_ERRORS)
+ DMEMIT(" 1 handle_errors");
+ }
++
++ return 0;
+ }
+
+ static int mirror_iterate_devices(struct dm_target *ti,
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1845,7 +1845,7 @@ static void snapshot_merge_resume(struct
+ start_merge(s);
+ }
+
+-static void snapshot_status(struct dm_target *ti, status_type_t type,
++static int snapshot_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ unsigned sz = 0;
+@@ -1892,6 +1892,8 @@ static void snapshot_status(struct dm_ta
+ maxlen - sz);
+ break;
+ }
++
++ return 0;
+ }
+
+ static int snapshot_iterate_devices(struct dm_target *ti,
+@@ -2146,7 +2148,7 @@ static void origin_resume(struct dm_targ
+ ti->split_io = get_origin_minimum_chunksize(dev->bdev);
+ }
+
+-static void origin_status(struct dm_target *ti, status_type_t type,
++static int origin_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ struct dm_dev *dev = ti->private;
+@@ -2160,6 +2162,8 @@ static void origin_status(struct dm_targ
+ snprintf(result, maxlen, "%s", dev->name);
+ break;
+ }
++
++ return 0;
+ }
+
+ static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -301,7 +301,7 @@ static int stripe_map(struct dm_target *
+ *
+ */
+
+-static void stripe_status(struct dm_target *ti, status_type_t type,
++static int stripe_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ struct stripe_c *sc = (struct stripe_c *) ti->private;
+@@ -329,6 +329,7 @@ static void stripe_status(struct dm_targ
+ (unsigned long long)sc->stripe[i].physical_start);
+ break;
+ }
++ return 0;
+ }
+
+ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2090,7 +2090,7 @@ static int pool_message(struct dm_target
+ * <transaction id> <used metadata sectors>/<total metadata sectors>
+ * <used data sectors>/<total data sectors> <held metadata root>
+ */
+-static void pool_status(struct dm_target *ti, status_type_t type,
++static int pool_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ int r;
+@@ -2171,10 +2171,11 @@ static void pool_status(struct dm_target
+ DMEMIT("skip_block_zeroing ");
+ break;
+ }
+- return;
++ return 0;
+
+ err:
+ DMEMIT("Error");
++ return 0;
+ }
+
+ static int pool_iterate_devices(struct dm_target *ti,
+@@ -2350,7 +2351,7 @@ static void thin_postsuspend(struct dm_t
+ /*
+ * <nr mapped sectors> <highest mapped sector>
+ */
+-static void thin_status(struct dm_target *ti, status_type_t type,
++static int thin_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+ {
+ int r;
+@@ -2392,10 +2393,11 @@ static void thin_status(struct dm_target
+ }
+ }
+
+- return;
++ return 0;
+
+ err:
+ DMEMIT("Error");
++ return 0;
+ }
+
+ static int thin_iterate_devices(struct dm_target *ti,
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -72,8 +72,8 @@ typedef void (*dm_postsuspend_fn) (struc
+ typedef int (*dm_preresume_fn) (struct dm_target *ti);
+ typedef void (*dm_resume_fn) (struct dm_target *ti);
+
+-typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+- char *result, unsigned maxlen);
++typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
++ char *result, unsigned int maxlen);
+
+ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
+
Copied: dists/squeeze-backports/linux/debian/patches/debian/efi-autoload-efivars.patch (from r19949, dists/sid/linux/debian/patches/debian/efi-autoload-efivars.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/efi-autoload-efivars.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/debian/efi-autoload-efivars.patch)
@@ -0,0 +1,56 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: x86/efi: Autoload efivars
+Date: Mon, 18 Mar 2013 22:59:14 +0000
+Bug-Debian: http://bugs.debian.org/703363
+
+efivars is generally useful to have on EFI systems, and in some cases
+it may be impossible to load it after a kernel upgrade in order to
+complete a boot loader update. At the same time we don't want to
+waste memory on non-EFI systems by making it built-in.
+
+Instead, give it a module alias as if it's a platform driver, and
+register a corresponding platform device whenever EFI runtime services
+are available. This should trigger udev to load it.
+
+---
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -38,6 +38,7 @@
+ #include <linux/io.h>
+ #include <linux/reboot.h>
+ #include <linux/bcd.h>
++#include <linux/platform_device.h>
+
+ #include <asm/setup.h>
+ #include <asm/efi.h>
+@@ -612,6 +613,20 @@ void __init efi_init(void)
+ #endif
+ }
+
++#ifdef CONFIG_EFI_VARS_MODULE
++static int __init efi_load_efivars(void)
++{
++ struct platform_device *pdev;
++
++ if (!efi_enabled(EFI_RUNTIME_SERVICES))
++ return 0;
++
++ pdev = platform_device_register_simple("efivars", 0, NULL, 0);
++ return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
++}
++device_initcall(efi_load_efivars);
++#endif
++
+ void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
+ {
+ u64 addr, npages;
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -89,6 +89,7 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@
+ MODULE_DESCRIPTION("sysfs interface to EFI Variables");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(EFIVARS_VERSION);
++MODULE_ALIAS("platform:efivars");
+
+ #define DUMP_NAME_LEN 52
+
Copied: dists/squeeze-backports/linux/debian/patches/debian/efivars-remove-check-for-50-full-on-write.patch (from r19949, dists/sid/linux/debian/patches/debian/efivars-remove-check-for-50-full-on-write.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/efivars-remove-check-for-50-full-on-write.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/debian/efivars-remove-check-for-50-full-on-write.patch)
@@ -0,0 +1,29 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: efivars: Remove check for 50% full on write
+Date: Sat, 23 Mar 2013 02:18:42 +0000
+
+On my EFI-booting system (AMI firmware/Asus board), the firmware does
+not garbage-collect the variable store until it is rather more than
+50% full, and it also updates a variable at every boot. This check
+means that variable writes are guaranteed to fail after the system has
+booted more than a few hundred times.
+
+Since pstore integration is now disabled by default in Debian, we will
+not normally write that much data before rebooting and giving the
+firmware a chance to garbage-collect the variable store. Therefore,
+until the check can be restricted to known-bad systems, it seems less
+risky to disable it for now.
+
+---
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -439,8 +439,7 @@ check_var_size_locked(struct efivars *ef
+ if (status != EFI_SUCCESS)
+ return status;
+
+- if (!storage_size || size > remaining_size || size > max_size ||
+- (remaining_size - size) < (storage_size / 2))
++ if (!storage_size || size > remaining_size || size > max_size)
+ return EFI_OUT_OF_RESOURCES;
+
+ return status;
Copied: dists/squeeze-backports/linux/debian/patches/debian/pps-avoid-abi-change-in-3.2.40.patch (from r19949, dists/sid/linux/debian/patches/debian/pps-avoid-abi-change-in-3.2.40.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/pps-avoid-abi-change-in-3.2.40.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/debian/pps-avoid-abi-change-in-3.2.40.patch)
@@ -0,0 +1,27 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: pps: avoid ABI change in 3.2.40
+Date: Wed, 06 Mar 2013 14:15:34 +0000
+
+Move the new member pps_device::lookup_cookie to the end of the
+structure and hide it from genksyms. This structure is always
+allocated by pps_register_source().
+
+---
+--- a/include/linux/pps_kernel.h
++++ b/include/linux/pps_kernel.h
+@@ -69,11 +69,14 @@ struct pps_device {
+ wait_queue_head_t queue; /* PPS event queue */
+
+ unsigned int id; /* PPS source unique ID */
+- void const *lookup_cookie; /* pps_lookup_dev only */
+ struct cdev cdev;
+ struct device *dev;
+ struct fasync_struct *async_queue; /* fasync method */
+ spinlock_t lock;
++
++#ifndef __GENKSYMS__
++ void const *lookup_cookie; /* pps_lookup_dev only */
++#endif
+ };
+
+ /*
Modified: dists/squeeze-backports/linux/debian/patches/features/all/drm/drm-3.4.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/drm/drm-3.4.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/drm/drm-3.4.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -351,7 +351,7 @@
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
-index 3f1799b..4fd363f 100644
+index 3f1799b..c61e672 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -36,11 +36,7 @@
@@ -1373,7 +1373,7 @@
+
+ switch (bpp) {
+ case 8:
-+ fmt = DRM_FORMAT_RGB332;
++ fmt = DRM_FORMAT_C8;
+ break;
+ case 16:
+ if (depth == 15)
@@ -1923,7 +1923,7 @@
}
out:
-@@ -2777,3 +3400,71 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+@@ -2777,3 +3400,72 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_destroy(file_priv, dev, args->handle);
}
@@ -1936,6 +1936,7 @@
+ int *bpp)
+{
+ switch (format) {
++ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
@@ -2237,20 +2238,10 @@
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
-index bb95d59..9d9835a 100644
+index 9080eb7..384edc6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
-@@ -87,9 +87,6 @@ static struct edid_quirk {
- int product_id;
- u32 quirks;
- } edid_quirk_list[] = {
-- /* ASUS VW222S */
-- { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
--
- /* Acer AL1706 */
- { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
- /* Acer F51 */
-@@ -157,8 +154,7 @@ EXPORT_SYMBOL(drm_edid_header_is_valid);
+@@ -154,8 +154,7 @@ EXPORT_SYMBOL(drm_edid_header_is_valid);
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
@@ -2260,7 +2251,7 @@
{
int i;
u8 csum = 0;
-@@ -211,6 +207,7 @@ bad:
+@@ -208,6 +207,7 @@ bad:
}
return 0;
}
@@ -2268,7 +2259,7 @@
/**
* drm_edid_is_valid - sanity check EDID data
-@@ -234,7 +231,6 @@ bool drm_edid_is_valid(struct edid *edid)
+@@ -231,7 +231,6 @@ bool drm_edid_is_valid(struct edid *edid)
}
EXPORT_SYMBOL(drm_edid_is_valid);
@@ -2276,7 +2267,7 @@
#define DDC_SEGMENT_ADDR 0x30
/**
* Get EDID information via I2C.
-@@ -521,25 +517,10 @@ static void
+@@ -518,25 +517,10 @@ static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
int i, n = 0;
@@ -2304,7 +2295,7 @@
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
-@@ -773,7 +754,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
+@@ -770,7 +754,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
@@ -2313,7 +2304,7 @@
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(edid),
-@@ -1341,6 +1322,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+@@ -1338,6 +1322,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
@@ -2321,7 +2312,7 @@
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
-@@ -1371,6 +1353,47 @@ u8 *drm_find_cea_extension(struct edid *edid)
+@@ -1368,6 +1353,47 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
@@ -2369,7 +2360,7 @@
static void
parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
{
-@@ -1454,26 +1477,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+@@ -1451,26 +1477,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
@@ -2418,10 +2409,10 @@
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
-@@ -1744,6 +1770,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
- num_modes += add_standard_modes(connector, edid);
+@@ -1742,6 +1771,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_established_modes(connector, edid);
- num_modes += add_inferred_modes(connector, edid);
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
@@ -4317,7 +4308,7 @@
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
-index 445003f..37c9a52 100644
+index 471f453..767782a 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -1,8 +1,7 @@
@@ -42875,7 +42866,7 @@
dvo_ch7xxx.o \
dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
-index 10fe480..e6162a1 100644
+index 5620192..e6162a1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,7 +30,6 @@
@@ -43063,7 +43054,7 @@
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
-- seq_printf(m, "Kernel: " UTS_RELEASE);
+- seq_printf(m, "Kernel: " UTS_RELEASE "\n");
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
@@ -44479,7 +44470,7 @@
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index b0186b8..a230a93 100644
+index b0186b8..eb33945 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -58,6 +58,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
@@ -44988,19 +44979,7 @@
drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
-@@ -1259,11 +1186,6 @@ out:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
-- case -EBUSY:
-- /*
-- * EBUSY is ok: this just means that another thread
-- * already did the job.
-- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
-@@ -1687,12 +1609,20 @@ i915_add_request(struct intel_ring_buffer *ring,
+@@ -1687,12 +1614,20 @@ i915_add_request(struct intel_ring_buffer *ring,
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
@@ -45021,7 +45000,7 @@
ret = ring->add_request(ring, &seqno);
if (ret)
return ret;
-@@ -1701,6 +1631,7 @@ i915_add_request(struct intel_ring_buffer *ring,
+@@ -1701,6 +1636,7 @@ i915_add_request(struct intel_ring_buffer *ring,
request->seqno = seqno;
request->ring = ring;
@@ -45029,7 +45008,7 @@
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
-@@ -1715,7 +1646,7 @@ i915_add_request(struct intel_ring_buffer *ring,
+@@ -1715,7 +1651,7 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock);
}
@@ -45038,7 +45017,7 @@
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
-@@ -1837,7 +1768,7 @@ void i915_gem_reset(struct drm_device *dev)
+@@ -1837,7 +1773,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
*/
@@ -45047,7 +45026,7 @@
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
-@@ -1865,6 +1796,12 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+@@ -1865,6 +1801,12 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
break;
trace_i915_gem_request_retire(ring, request->seqno);
@@ -45060,7 +45039,7 @@
list_del(&request->list);
i915_gem_request_remove_from_client(request);
-@@ -1977,7 +1914,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
+@@ -1977,7 +1919,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
@@ -45070,7 +45049,7 @@
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
-@@ -2040,9 +1978,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
+@@ -2040,9 +1983,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
|| atomic_read(&dev_priv->mm.wedged));
ring->irq_put(ring);
@@ -45083,7 +45062,7 @@
ret = -EBUSY;
ring->waiting_seqno = 0;
-@@ -2051,17 +1989,12 @@ i915_wait_request(struct intel_ring_buffer *ring,
+@@ -2051,17 +1994,12 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
@@ -45102,7 +45081,7 @@
i915_gem_retire_requests_ring(ring);
return ret;
-@@ -2085,7 +2018,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
+@@ -2085,7 +2023,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
@@ -45112,7 +45091,7 @@
if (ret)
return ret;
}
-@@ -2123,6 +2057,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+@@ -2123,6 +2062,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
@@ -45120,7 +45099,7 @@
int ret = 0;
if (obj->gtt_space == NULL)
-@@ -2167,6 +2102,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+@@ -2167,6 +2107,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
trace_i915_gem_object_unbind(obj);
i915_gem_gtt_unbind_object(obj);
@@ -45132,7 +45111,7 @@
i915_gem_object_put_pages_gtt(obj);
list_del_init(&obj->gtt_list);
-@@ -2206,7 +2146,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
+@@ -2206,7 +2151,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0;
}
@@ -45141,7 +45120,7 @@
{
int ret;
-@@ -2220,18 +2160,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
+@@ -2220,18 +2165,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret;
}
@@ -45164,7 +45143,7 @@
if (ret)
return ret;
}
-@@ -2434,7 +2374,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+@@ -2434,7 +2379,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
@@ -45174,7 +45153,7 @@
if (ret)
return ret;
}
-@@ -2466,6 +2407,8 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+@@ -2466,6 +2412,8 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -45183,7 +45162,7 @@
i915_gem_clear_fence_reg(obj->base.dev,
&dev_priv->fence_regs[obj->fence_reg]);
-@@ -2490,7 +2433,7 @@ i915_find_fence_reg(struct drm_device *dev,
+@@ -2490,7 +2438,7 @@ i915_find_fence_reg(struct drm_device *dev,
if (!reg->obj)
return reg;
@@ -45192,7 +45171,7 @@
avail = reg;
}
-@@ -2500,7 +2443,7 @@ i915_find_fence_reg(struct drm_device *dev,
+@@ -2500,7 +2448,7 @@ i915_find_fence_reg(struct drm_device *dev,
/* None available, try to steal one or wait for a user to finish */
avail = first = NULL;
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
@@ -45201,7 +45180,7 @@
continue;
if (first == NULL)
-@@ -2575,7 +2518,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+@@ -2575,7 +2523,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
@@ -45211,7 +45190,7 @@
if (ret)
return ret;
}
-@@ -2594,7 +2538,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+@@ -2594,7 +2543,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
reg = i915_find_fence_reg(dev, pipelined);
if (reg == NULL)
@@ -45220,7 +45199,7 @@
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
-@@ -2694,6 +2638,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
+@@ -2694,6 +2643,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
list_del_init(®->lru_list);
reg->obj = NULL;
reg->setup_seqno = 0;
@@ -45228,7 +45207,7 @@
}
/**
-@@ -2980,6 +2925,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
+@@ -2980,6 +2930,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
@@ -45237,7 +45216,7 @@
int ret;
if (obj->cache_level == cache_level)
-@@ -3008,6 +2955,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+@@ -3008,6 +2960,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
i915_gem_gtt_rebind_object(obj, cache_level);
@@ -45247,7 +45226,7 @@
}
if (cache_level == I915_CACHE_NONE) {
-@@ -3346,8 +3296,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+@@ -3346,8 +3301,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
@@ -45258,7 +45237,7 @@
atomic_read(&dev_priv->mm.wedged), 3000)) {
ret = -EBUSY;
}
-@@ -3456,15 +3406,14 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+@@ -3456,15 +3411,14 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out;
}
@@ -45277,7 +45256,7 @@
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
-@@ -3658,8 +3607,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+@@ -3658,8 +3612,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -45288,7 +45267,7 @@
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
* display scanout are coherent with the CPU in
-@@ -3749,7 +3698,7 @@ i915_gem_idle(struct drm_device *dev)
+@@ -3749,7 +3703,7 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
@@ -45297,7 +45276,7 @@
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
-@@ -3784,12 +3733,91 @@ i915_gem_idle(struct drm_device *dev)
+@@ -3784,12 +3738,91 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
@@ -45390,7 +45369,7 @@
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
-@@ -3808,6 +3836,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
+@@ -3808,6 +3841,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
dev_priv->next_seqno = 1;
@@ -45399,7 +45378,7 @@
return 0;
cleanup_bsd_ring:
-@@ -3845,7 +3875,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+@@ -3845,7 +3880,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
@@ -45408,7 +45387,7 @@
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
-@@ -4240,7 +4270,7 @@ rescan:
+@@ -4240,7 +4275,7 @@ rescan:
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
@@ -47730,7 +47709,7 @@
drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 7817429..3c9b9c5 100644
+index 4591582..7ccf896 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -75,7 +75,7 @@ struct intel_limit {
@@ -48066,7 +48045,7 @@
intel_fdi_normal_train(crtc);
-@@ -3331,10 +3392,12 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
+@@ -3339,10 +3400,12 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -48080,7 +48059,7 @@
mutex_unlock(&dev->struct_mutex);
}
}
-@@ -3408,10 +3471,10 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
+@@ -3416,10 +3479,10 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return false;
}
@@ -48095,7 +48074,7 @@
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
-@@ -4527,10 +4590,11 @@ static void ironlake_update_wm(struct drm_device *dev)
+@@ -4535,10 +4598,11 @@ static void ironlake_update_wm(struct drm_device *dev)
*/
}
@@ -48108,7 +48087,7 @@
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
-@@ -4539,8 +4603,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
+@@ -4547,8 +4611,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
@@ -48121,7 +48100,7 @@
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
-@@ -4551,8 +4617,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
+@@ -4559,8 +4625,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
@@ -48134,7 +48113,7 @@
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
-@@ -4565,8 +4633,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
+@@ -4573,8 +4641,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
@@ -48147,7 +48126,7 @@
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
-@@ -4587,7 +4657,8 @@ static void sandybridge_update_wm(struct drm_device *dev)
+@@ -4595,7 +4665,8 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
@@ -48157,7 +48136,7 @@
return;
enabled = ffs(enabled) - 1;
-@@ -4637,6 +4708,161 @@ static void sandybridge_update_wm(struct drm_device *dev)
+@@ -4645,6 +4716,161 @@ static void sandybridge_update_wm(struct drm_device *dev)
cursor_wm);
}
@@ -48319,7 +48298,7 @@
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
-@@ -4677,6 +4903,16 @@ static void intel_update_watermarks(struct drm_device *dev)
+@@ -4685,6 +4911,16 @@ static void intel_update_watermarks(struct drm_device *dev)
dev_priv->display.update_wm(dev);
}
@@ -48336,7 +48315,7 @@
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
-@@ -4824,6 +5060,82 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+@@ -4832,6 +5068,82 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
return display_bpc != bpc;
}
@@ -48419,7 +48398,7 @@
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
-@@ -4837,7 +5149,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+@@ -4845,7 +5157,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
@@ -48428,7 +48407,7 @@
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
-@@ -4878,15 +5190,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+@@ -4886,15 +5198,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
@@ -48445,7 +48424,7 @@
/*
* Returns a set of divisors for the desired target clock with the given
-@@ -4894,7 +5198,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+@@ -4902,7 +5206,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
@@ -48455,7 +48434,7 @@
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
-@@ -4904,53 +5209,24 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+@@ -4912,53 +5217,24 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
@@ -48521,7 +48500,7 @@
dpll = DPLL_VGA_MODE_DIS;
-@@ -5024,8 +5300,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+@@ -5032,8 +5308,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -48530,7 +48509,7 @@
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
-@@ -5060,7 +5334,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+@@ -5068,7 +5342,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
@@ -48538,7 +48517,7 @@
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
-@@ -5147,33 +5420,32 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+@@ -5155,33 +5428,32 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
}
@@ -48587,7 +48566,7 @@
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
-@@ -5290,7 +5562,8 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
+@@ -5298,7 +5570,8 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on panel\n");
temp |= DREF_SSC1_ENABLE;
@@ -48597,7 +48576,7 @@
/* Get SSC going before enabling the outputs */
I915_WRITE(PCH_DREF_CONTROL, temp);
-@@ -5439,7 +5712,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+@@ -5447,7 +5720,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
@@ -48607,7 +48586,7 @@
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
-@@ -5449,21 +5723,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+@@ -5457,21 +5731,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
@@ -48636,7 +48615,7 @@
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
-@@ -5758,17 +6028,19 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+@@ -5766,17 +6036,19 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
@@ -48663,7 +48642,7 @@
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
-@@ -5811,12 +6083,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+@@ -5819,12 +6091,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_wait_for_vblank(dev, pipe);
@@ -48676,7 +48655,7 @@
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
-@@ -5843,14 +6109,45 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+@@ -5851,14 +6117,45 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
x, y, old_fb);
@@ -48724,7 +48703,7 @@
static void g4x_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
-@@ -5867,6 +6164,12 @@ static void g4x_write_eld(struct drm_connector *connector,
+@@ -5875,6 +6172,12 @@ static void g4x_write_eld(struct drm_connector *connector,
else
eldv = G4X_ELDV_DEVCTG;
@@ -48737,7 +48716,7 @@
i = I915_READ(G4X_AUD_CNTL_ST);
i &= ~(eldv | G4X_ELD_ADDR);
len = (i >> 9) & 0x1f; /* ELD buffer size */
-@@ -5894,22 +6197,26 @@ static void ironlake_write_eld(struct drm_connector *connector,
+@@ -5902,22 +6205,26 @@ static void ironlake_write_eld(struct drm_connector *connector,
uint32_t i;
int len;
int hdmiw_hdmiedid;
@@ -48770,7 +48749,7 @@
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
-@@ -5918,14 +6225,27 @@ static void ironlake_write_eld(struct drm_connector *connector,
+@@ -5926,14 +6233,27 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
@@ -48802,7 +48781,7 @@
i = I915_READ(aud_cntrl_st2);
i &= ~eldv;
I915_WRITE(aud_cntrl_st2, i);
-@@ -5933,13 +6253,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
+@@ -5941,13 +6261,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (!eld[0])
return;
@@ -48817,7 +48796,7 @@
I915_WRITE(aud_cntl_st, i);
len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
-@@ -6319,7 +6634,7 @@ static struct drm_display_mode load_detect_mode = {
+@@ -6327,7 +6642,7 @@ static struct drm_display_mode load_detect_mode = {
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
@@ -48826,7 +48805,7 @@
struct drm_i915_gem_object *obj)
{
struct intel_framebuffer *intel_fb;
-@@ -6361,7 +6676,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
+@@ -6369,7 +6684,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
@@ -48835,7 +48814,7 @@
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
-@@ -6370,9 +6685,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
+@@ -6378,9 +6693,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
@@ -48848,7 +48827,7 @@
return intel_framebuffer_create(dev, &mode_cmd, obj);
}
-@@ -6393,11 +6708,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
+@@ -6401,11 +6716,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
return NULL;
fb = &dev_priv->fbdev->ifb.base;
@@ -48863,7 +48842,7 @@
return NULL;
return fb;
-@@ -6729,9 +7044,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
+@@ -6737,9 +7052,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
@@ -48874,7 +48853,7 @@
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
-@@ -6740,9 +7053,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
+@@ -6748,9 +7061,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -48884,7 +48863,7 @@
}
/* Schedule downclock */
-@@ -6755,9 +7065,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
+@@ -6763,9 +7073,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -48894,7 +48873,7 @@
if (HAS_PCH_SPLIT(dev))
return;
-@@ -6770,23 +7077,22 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
+@@ -6778,23 +7085,22 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
@@ -48924,7 +48903,7 @@
}
/**
-@@ -6899,7 +7205,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
+@@ -6907,7 +7213,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
@@ -48933,7 +48912,7 @@
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
-@@ -6927,18 +7233,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+@@ -6935,18 +7241,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
@@ -48953,7 +48932,7 @@
intel_crtc->unpin_work = NULL;
if (work->event) {
-@@ -7010,25 +7309,16 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
+@@ -7018,25 +7317,16 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
@@ -48985,7 +48964,7 @@
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
-@@ -7045,7 +7335,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
+@@ -7053,7 +7343,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
@@ -48994,7 +48973,7 @@
ret = BEGIN_LP_RING(6);
if (ret)
-@@ -7062,16 +7352,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
+@@ -7070,16 +7360,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
@@ -49014,7 +48993,7 @@
err:
return ret;
}
-@@ -7092,7 +7380,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
+@@ -7100,7 +7388,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
@@ -49023,7 +49002,7 @@
ret = BEGIN_LP_RING(6);
if (ret)
-@@ -7106,16 +7394,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
+@@ -7114,16 +7402,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
@@ -49042,7 +49021,7 @@
err:
return ret;
}
-@@ -7144,7 +7431,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+@@ -7152,7 +7439,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
@@ -49051,7 +49030,7 @@
OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
-@@ -7154,13 +7441,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+@@ -7162,13 +7449,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
@@ -49066,7 +49045,7 @@
err:
return ret;
}
-@@ -7185,19 +7470,23 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
+@@ -7193,19 +7478,23 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
@@ -49095,7 +49074,7 @@
err:
return ret;
}
-@@ -7244,16 +7533,14 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+@@ -7252,16 +7541,14 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto err_unpin;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
@@ -49114,7 +49093,46 @@
err:
return ret;
}
-@@ -7551,10 +7838,9 @@ static void intel_setup_outputs(struct drm_device *dev)
+@@ -7280,8 +7567,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct drm_framebuffer *old_fb = crtc->fb;
+- struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
++ struct intel_framebuffer *intel_fb;
++ struct drm_i915_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ unsigned long flags;
+@@ -7293,7 +7580,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+
+ work->event = event;
+ work->dev = crtc->dev;
+- work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
++ intel_fb = to_intel_framebuffer(crtc->fb);
++ work->old_fb_obj = intel_fb->obj;
+ INIT_WORK(&work->work, intel_unpin_work_fn);
+
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+@@ -7313,6 +7601,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ intel_crtc->unpin_work = work;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
++ intel_fb = to_intel_framebuffer(fb);
++ obj = intel_fb->obj;
++
+ mutex_lock(&dev->struct_mutex);
+
+ /* Reference the objects for the scheduled work. */
+@@ -7343,7 +7634,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+
+ cleanup_pending:
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+- crtc->fb = old_fb;
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+@@ -7556,10 +7846,9 @@ static void intel_setup_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
@@ -49127,7 +49145,7 @@
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
-@@ -7683,7 +7969,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
+@@ -7688,7 +7977,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
@@ -49136,7 +49154,7 @@
struct drm_i915_gem_object *obj)
{
int ret;
-@@ -7691,21 +7977,27 @@ int intel_framebuffer_init(struct drm_device *dev,
+@@ -7696,21 +7985,27 @@ int intel_framebuffer_init(struct drm_device *dev,
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
@@ -49174,7 +49192,7 @@
return -EINVAL;
}
-@@ -7723,11 +8015,12 @@ int intel_framebuffer_init(struct drm_device *dev,
+@@ -7728,11 +8023,12 @@ int intel_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
@@ -49189,7 +49207,7 @@
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
-@@ -7996,7 +8289,7 @@ void intel_init_emon(struct drm_device *dev)
+@@ -8001,7 +8297,7 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
@@ -49198,7 +49216,7 @@
{
/*
* Respect the kernel parameter if it is set
-@@ -8014,11 +8307,11 @@ static bool intel_enable_rc6(struct drm_device *dev)
+@@ -8019,11 +8315,11 @@ static bool intel_enable_rc6(struct drm_device *dev)
* Disable rc6 on Sandybridge
*/
if (INTEL_INFO(dev)->gen == 6) {
@@ -49214,7 +49232,7 @@
}
void gen6_enable_rps(struct drm_i915_private *dev_priv)
-@@ -8026,7 +8319,9 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+@@ -8031,7 +8327,9 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0;
@@ -49224,7 +49242,7 @@
int i;
/* Here begins a magic sequence of register writes to enable
-@@ -8037,6 +8332,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+@@ -8042,6 +8340,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN6_RC_STATE, 0);
mutex_lock(&dev_priv->dev->struct_mutex);
@@ -49238,8 +49256,12 @@
gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
-@@ -8057,9 +8359,20 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+@@ -8059,12 +8364,23 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
++ I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (intel_enable_rc6(dev_priv->dev))
@@ -49262,7 +49284,7 @@
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
-@@ -8287,6 +8600,10 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+@@ -8292,6 +8608,10 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
@@ -49273,7 +49295,7 @@
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
-@@ -8365,6 +8682,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
+@@ -8370,6 +8690,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
@@ -49284,7 +49306,7 @@
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-@@ -8675,9 +8996,15 @@ static void intel_init_display(struct drm_device *dev)
+@@ -8680,9 +9004,15 @@ static void intel_init_display(struct drm_device *dev)
if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
@@ -49301,7 +49323,7 @@
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
-@@ -8709,6 +9036,7 @@ static void intel_init_display(struct drm_device *dev)
+@@ -8714,6 +9044,7 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
@@ -49309,7 +49331,7 @@
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
-@@ -8722,6 +9050,7 @@ static void intel_init_display(struct drm_device *dev)
+@@ -8727,6 +9058,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
@@ -49317,7 +49339,7 @@
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
-@@ -8834,8 +9163,6 @@ struct intel_quirk {
+@@ -8839,8 +9171,6 @@ struct intel_quirk {
};
struct intel_quirk intel_quirks[] = {
@@ -49326,7 +49348,7 @@
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
-@@ -8902,33 +9229,19 @@ static void i915_disable_vga(struct drm_device *dev)
+@@ -8907,33 +9237,19 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
@@ -49364,7 +49386,7 @@
dev->mode_config.funcs = (void *)&intel_mode_funcs;
intel_init_quirks(dev);
-@@ -8952,6 +9265,9 @@ void intel_modeset_init(struct drm_device *dev)
+@@ -8957,6 +9273,9 @@ void intel_modeset_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
@@ -71861,7 +71883,7 @@
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
-index a25d08a..15594a3 100644
+index a25d08a..ebbfbd2 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -231,6 +231,22 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
@@ -71887,19 +71909,16 @@
void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
-@@ -242,8 +258,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
- radeon_crtc->enabled = true;
+@@ -243,7 +259,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
-+ if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
-+ atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
- if (ASIC_IS_DCE3(rdev))
+ if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
-@@ -255,10 +273,12 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+@@ -255,7 +271,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
@@ -71908,12 +71927,7 @@
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
-+ if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
-+ atombios_powergate_crtc(crtc, ATOM_ENABLE);
- /* adjust pm to dpms changes AFTER disabling crtcs */
- radeon_pm_compute_clocks(rdev);
- break;
-@@ -355,15 +375,12 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+@@ -355,15 +371,12 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -71931,7 +71945,7 @@
case ATOM_PPLL1:
ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
-@@ -379,7 +396,7 @@ static void atombios_disable_ss(struct drm_crtc *crtc)
+@@ -379,7 +392,7 @@ static void atombios_disable_ss(struct drm_crtc *crtc)
return;
}
} else if (ASIC_IS_AVIVO(rdev)) {
@@ -71940,7 +71954,7 @@
case ATOM_PPLL1:
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
ss_cntl &= ~1;
-@@ -406,16 +423,31 @@ union atom_enable_ss {
+@@ -406,16 +419,31 @@ union atom_enable_ss {
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
};
@@ -71975,7 +71989,7 @@
memset(&args, 0, sizeof(args));
if (ASIC_IS_DCE5(rdev)) {
-@@ -441,7 +473,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+@@ -441,7 +469,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
return;
}
args.v3.ucEnable = enable;
@@ -71984,7 +71998,7 @@
args.v3.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
-@@ -479,7 +511,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+@@ -479,7 +507,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
} else if (ASIC_IS_AVIVO(rdev)) {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
@@ -71993,7 +72007,7 @@
return;
}
args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
-@@ -491,7 +523,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+@@ -491,7 +519,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
} else {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
@@ -72002,7 +72016,7 @@
return;
}
args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
-@@ -523,6 +555,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+@@ -523,6 +551,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
int encoder_mode = 0;
u32 dp_clock = mode->clock;
int bpc = 8;
@@ -72010,7 +72024,7 @@
/* reset the pll flags */
pll->flags = 0;
-@@ -542,9 +575,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+@@ -542,9 +571,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (rdev->family < CHIP_RV770)
pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
@@ -72021,7 +72035,7 @@
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
pll->flags |= RADEON_PLL_LEGACY;
-@@ -559,9 +590,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+@@ -559,9 +586,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
@@ -72034,7 +72048,7 @@
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
if (connector) {
-@@ -657,7 +689,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+@@ -657,7 +685,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
@@ -72043,7 +72057,7 @@
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
-@@ -707,11 +739,9 @@ union set_pixel_clock {
+@@ -707,11 +735,9 @@ union set_pixel_clock {
/* on DCE5, make sure the voltage is high enough to support the
* required disp clk.
*/
@@ -72056,7 +72070,7 @@
u8 frev, crev;
int index;
union set_pixel_clock args;
-@@ -739,7 +769,12 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
+@@ -739,7 +765,12 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
* SetPixelClock provides the dividers
*/
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
@@ -72070,7 +72084,7 @@
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
-@@ -932,7 +967,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+@@ -932,7 +963,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
@@ -72081,7 +72095,7 @@
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
-@@ -1001,7 +1038,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+@@ -1001,7 +1034,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
@@ -72090,7 +72104,7 @@
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
-@@ -1024,7 +1061,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+@@ -1024,7 +1057,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
ss.step = step_size;
}
@@ -72099,7 +72113,7 @@
}
}
-@@ -1041,6 +1078,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+@@ -1041,6 +1074,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
@@ -72107,7 +72121,7 @@
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
-@@ -1131,20 +1169,13 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+@@ -1131,20 +1165,13 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
break;
}
@@ -72134,7 +72148,7 @@
} else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
-@@ -1189,7 +1220,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+@@ -1189,7 +1216,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
@@ -72143,7 +72157,7 @@
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
-@@ -1358,7 +1389,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+@@ -1358,7 +1385,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
@@ -72152,7 +72166,7 @@
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
-@@ -1460,7 +1491,36 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+@@ -1460,7 +1487,36 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
struct drm_crtc *test_crtc;
uint32_t pll_in_use = 0;
@@ -72190,7 +72204,7 @@
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
-@@ -1475,6 +1535,8 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+@@ -1475,6 +1531,8 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
if (rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
@@ -72199,7 +72213,7 @@
else if (ASIC_IS_DCE5(rdev))
return ATOM_DCPLL;
}
-@@ -1501,6 +1563,26 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+@@ -1501,6 +1559,26 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
}
@@ -72226,7 +72240,7 @@
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
-@@ -1522,19 +1604,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
+@@ -1522,19 +1600,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
}
}
@@ -72246,7 +72260,7 @@
atombios_crtc_set_pll(crtc, adjusted_mode);
if (ASIC_IS_DCE4(rdev))
-@@ -1568,18 +1637,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+@@ -1568,18 +1633,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -72275,7 +72289,16 @@
}
static void atombios_crtc_disable(struct drm_crtc *crtc)
-@@ -1611,6 +1690,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+@@ -1591,6 +1666,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ int i;
+
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
++ if (ASIC_IS_DCE6(rdev))
++ atombios_powergate_crtc(crtc, ATOM_ENABLE);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i] &&
+@@ -1611,6 +1688,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
@@ -72957,7 +72980,7 @@
#include <linux/kernel.h>
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
-index 0977849..c62132c 100644
+index 60d13fe..c62132c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -50,6 +50,39 @@ static const u32 crtc_offsets[6] =
@@ -73048,16 +73071,7 @@
blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
if ((blackout & BLACKOUT_MODE_MASK) != 1) {
-@@ -1137,6 +1183,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
- blackout &= ~BLACKOUT_MODE_MASK;
- WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
- }
-+ /* wait for the MC to settle */
-+ udelay(100);
- }
-
- void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
-@@ -1166,10 +1214,20 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
+@@ -1168,10 +1214,20 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
for (i = 0; i < rdev->num_crtc; i++) {
@@ -73082,7 +73096,7 @@
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
for (j = 0; j < rdev->usec_timeout; j++) {
-@@ -1229,7 +1287,10 @@ void evergreen_mc_program(struct radeon_device *rdev)
+@@ -1231,7 +1287,10 @@ void evergreen_mc_program(struct radeon_device *rdev)
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
@@ -73094,7 +73108,7 @@
tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
-@@ -1264,18 +1325,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
+@@ -1266,18 +1325,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
@@ -73121,7 +73135,7 @@
}
-@@ -1313,71 +1376,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
+@@ -1315,71 +1376,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev)
{
@@ -73226,7 +73240,7 @@
u32 tmp;
u32 rb_bufsz;
int r;
-@@ -1395,13 +1460,14 @@ int evergreen_cp_resume(struct radeon_device *rdev)
+@@ -1397,13 +1460,14 @@ int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
@@ -73243,7 +73257,7 @@
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
-@@ -1409,8 +1475,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
+@@ -1411,8 +1475,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
@@ -73254,7 +73268,7 @@
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
-@@ -1428,16 +1494,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
+@@ -1430,16 +1494,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
@@ -73276,7 +73290,7 @@
return r;
}
return 0;
-@@ -1730,7 +1796,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+@@ -1732,7 +1796,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
@@ -73285,7 +73299,7 @@
if (rdev->pdev->device == 0x9648)
rdev->config.evergreen.max_simds = 3;
else if ((rdev->pdev->device == 0x9647) ||
-@@ -1819,7 +1885,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+@@ -1821,7 +1885,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
@@ -73294,7 +73308,7 @@
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
-@@ -1868,7 +1934,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+@@ -1870,7 +1934,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
@@ -73305,7 +73319,7 @@
mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
else
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
-@@ -2272,7 +2340,9 @@ int evergreen_mc_init(struct radeon_device *rdev)
+@@ -2274,7 +2340,9 @@ int evergreen_mc_init(struct radeon_device *rdev)
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -73316,7 +73330,7 @@
tmp = RREG32(FUS_MC_ARB_RAMCFG);
else
tmp = RREG32(MC_ARB_RAMCFG);
-@@ -2304,12 +2374,14 @@ int evergreen_mc_init(struct radeon_device *rdev)
+@@ -2306,12 +2374,14 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
@@ -73333,7 +73347,7 @@
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
}
-@@ -2320,7 +2392,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
+@@ -2322,7 +2392,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
@@ -73342,7 +73356,7 @@
{
u32 srbm_status;
u32 grbm_status;
-@@ -2333,19 +2405,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
+@@ -2335,19 +2405,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
@@ -73369,7 +73383,7 @@
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
-@@ -2437,7 +2509,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+@@ -2439,7 +2509,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
@@ -73384,7 +73398,7 @@
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-@@ -2461,7 +2539,9 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+@@ -2463,7 +2539,9 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
@@ -73395,7 +73409,7 @@
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-@@ -2482,6 +2562,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+@@ -2484,6 +2562,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
@@ -73403,7 +73417,7 @@
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
-@@ -2506,11 +2587,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
+@@ -2508,11 +2587,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -73436,7 +73450,7 @@
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
-@@ -2570,7 +2668,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
+@@ -2572,7 +2668,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
@@ -73450,7 +73464,7 @@
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
-@@ -2985,11 +3088,24 @@ restart_ih:
+@@ -2987,11 +3088,24 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
@@ -73477,7 +73491,7 @@
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
-@@ -3019,6 +3135,7 @@ restart_ih:
+@@ -3021,6 +3135,7 @@ restart_ih:
static int evergreen_startup(struct radeon_device *rdev)
{
@@ -73485,7 +73499,7 @@
int r;
/* enable pcie gen2 link */
-@@ -3064,7 +3181,7 @@ static int evergreen_startup(struct radeon_device *rdev)
+@@ -3066,7 +3181,7 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
@@ -73494,7 +73508,7 @@
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
-@@ -3073,6 +3190,12 @@ static int evergreen_startup(struct radeon_device *rdev)
+@@ -3075,6 +3190,12 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
@@ -73507,7 +73521,7 @@
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
-@@ -3082,7 +3205,9 @@ static int evergreen_startup(struct radeon_device *rdev)
+@@ -3084,7 +3205,9 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
@@ -73518,7 +73532,7 @@
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
-@@ -3092,6 +3217,23 @@ static int evergreen_startup(struct radeon_device *rdev)
+@@ -3094,6 +3217,23 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
@@ -73542,7 +73556,7 @@
return 0;
}
-@@ -3111,15 +3253,11 @@ int evergreen_resume(struct radeon_device *rdev)
+@@ -3113,15 +3253,11 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
@@ -73560,7 +73574,7 @@
return r;
}
-@@ -3129,13 +3267,17 @@ int evergreen_resume(struct radeon_device *rdev)
+@@ -3131,13 +3267,17 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
@@ -73580,7 +73594,7 @@
return 0;
}
-@@ -3210,8 +3352,8 @@ int evergreen_init(struct radeon_device *rdev)
+@@ -3212,8 +3352,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
@@ -73591,7 +73605,7 @@
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
-@@ -3220,29 +3362,24 @@ int evergreen_init(struct radeon_device *rdev)
+@@ -3222,29 +3362,24 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
@@ -73628,7 +73642,7 @@
/* Don't start up if the MC ucode is missing on BTC parts.
* The default clocks and voltages before the MC ucode
-@@ -3260,15 +3397,17 @@ int evergreen_init(struct radeon_device *rdev)
+@@ -3262,15 +3397,17 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
@@ -76068,7 +76082,7 @@
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
-index e022776..34a0e85 100644
+index e022776b..34a0e85 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -35,6 +35,14 @@
@@ -85730,10 +85744,10 @@
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
-index ec36dd9..a2470d9 100644
+index c32fd93..2b2c557 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
-@@ -1536,9 +1536,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+@@ -1545,9 +1545,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
of_machine_is_compatible("PowerBook6,7")) {
/* ibook */
rdev->mode_info.connector_table = CT_IBOOK;
@@ -85743,7 +85757,7 @@
} else if (of_machine_is_compatible("PowerMac4,4")) {
/* emac */
rdev->mode_info.connector_table = CT_EMAC;
-@@ -1564,11 +1561,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+@@ -1573,11 +1570,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
(rdev->pdev->subsystem_device == 0x4150)) {
/* Mac G5 tower 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600;
@@ -85755,7 +85769,7 @@
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
-@@ -2142,115 +2134,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+@@ -2151,115 +2143,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
@@ -85871,7 +85885,7 @@
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
-@@ -2970,7 +2853,7 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+@@ -2979,7 +2862,7 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
case 4:
val = RBIOS16(index);
index += 2;
@@ -85880,7 +85894,7 @@
break;
case 6:
slave_addr = id & 0xff;
-@@ -3169,7 +3052,7 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+@@ -3178,7 +3061,7 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
udelay(150);
break;
case 2:
@@ -85889,7 +85903,7 @@
break;
case 3:
while (tmp--) {
-@@ -3200,13 +3083,13 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+@@ -3209,13 +3092,13 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
/*mclk_cntl |= 0x00001111;*//* ??? */
WREG32_PLL(RADEON_MCLK_CNTL,
mclk_cntl);
@@ -101173,10 +101187,10 @@
+module_exit(udl_exit);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
new file mode 100644
-index 0000000..e760575
+index 0000000..2b8c4fd
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_drv.h
-@@ -0,0 +1,142 @@
+@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
@@ -101253,6 +101267,8 @@
+ struct drm_framebuffer base;
+ struct udl_gem_object *obj;
+ bool active_16; /* active on the 16-bit channel */
++ int x1, y1, x2, y2; /* dirty rect */
++ spinlock_t dirty_lock;
+};
+
+#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
@@ -101407,10 +101423,10 @@
+}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
new file mode 100644
-index 0000000..b9282cf
+index 0000000..f02d223
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_fb.c
-@@ -0,0 +1,613 @@
+@@ -0,0 +1,649 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
@@ -101435,9 +101451,9 @@
+
+#include "drm_fb_helper.h"
+
-+#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
++#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
+
-+static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */
++static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
+static int fb_bpp = 16;
+
+module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
@@ -101566,6 +101582,9 @@
+ struct urb *urb;
+ int aligned_x;
+ int bpp = (fb->base.bits_per_pixel / 8);
++ int x2, y2;
++ bool store_for_later = false;
++ unsigned long flags;
+
+ if (!fb->active_16)
+ return 0;
@@ -101573,8 +101592,6 @@
+ if (!fb->obj->vmapping)
+ udl_gem_vmap(fb->obj);
+
-+ start_cycles = get_cycles();
-+
+ aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
+ width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
+ x = aligned_x;
@@ -101584,19 +101601,53 @@
+ (y + height > fb->base.height))
+ return -EINVAL;
+
++ /* if we are in atomic just store the info
++ can't test inside spin lock */
++ if (in_atomic())
++ store_for_later = true;
++
++ x2 = x + width - 1;
++ y2 = y + height - 1;
++
++ spin_lock_irqsave(&fb->dirty_lock, flags);
++
++ if (fb->y1 < y)
++ y = fb->y1;
++ if (fb->y2 > y2)
++ y2 = fb->y2;
++ if (fb->x1 < x)
++ x = fb->x1;
++ if (fb->x2 > x2)
++ x2 = fb->x2;
++
++ if (store_for_later) {
++ fb->x1 = x;
++ fb->x2 = x2;
++ fb->y1 = y;
++ fb->y2 = y2;
++ spin_unlock_irqrestore(&fb->dirty_lock, flags);
++ return 0;
++ }
++
++ fb->x1 = fb->y1 = INT_MAX;
++ fb->x2 = fb->y2 = 0;
++
++ spin_unlock_irqrestore(&fb->dirty_lock, flags);
++ start_cycles = get_cycles();
++
+ urb = udl_get_urb(dev);
+ if (!urb)
+ return 0;
+ cmd = urb->transfer_buffer;
+
-+ for (i = y; i < y + height ; i++) {
++ for (i = y; i <= y2 ; i++) {
+ const int line_offset = fb->base.pitches[0] * i;
+ const int byte_offset = line_offset + (x * bpp);
+ const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
+ if (udl_render_hline(dev, bpp, &urb,
+ (char *) fb->obj->vmapping,
+ &cmd, byte_offset, dev_byte_offset,
-+ width * bpp,
++ (x2 - x + 1) * bpp,
+ &bytes_identical, &bytes_sent))
+ goto error;
+ }
@@ -101821,6 +101872,7 @@
+{
+ int ret;
+
++ spin_lock_init(&ufb->dirty_lock);
+ ufb->obj = obj;
+ ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
+ drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0001-Revert-workqueue-skip-nr_running-sanity-check-in-wor.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0001-Revert-workqueue-skip-nr_running-sanity-check-in-wor.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0001-Revert-workqueue-skip-nr_running-sanity-check-in-wor.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 4080ba071043dedf518dbf5f9f48cda2edd748d8 Mon Sep 17 00:00:00 2001
+From da6898999c9081fc36822cad92dc573b4b21b18f Mon Sep 17 00:00:00 2001
From: Steven Rostedt <srostedt at redhat.com>
Date: Wed, 6 Jun 2012 17:07:34 -0400
-Subject: [PATCH 001/304] Revert "workqueue: skip nr_running sanity check in
+Subject: [PATCH 001/303] Revert "workqueue: skip nr_running sanity check in
worker_enter_idle() if trustee is active"
This reverts commit 5d79c6f64a904afc92a329f80abe693e3ae105fe.
@@ -14,10 +14,10 @@
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 7bf068a..c58e142 100644
+index 0ad2420..d2fce7c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -1215,13 +1215,8 @@ static void worker_enter_idle(struct worker *worker)
+@@ -1235,13 +1235,8 @@ static void worker_enter_idle(struct worker *worker)
} else
wake_up_all(&gcwq->trustee_wait);
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0002-x86-Call-idle-notifier-after-irq_enter.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0002-x86-Call-idle-notifier-after-irq_enter.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0002-x86-Call-idle-notifier-after-irq_enter.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 04e888e86193098bba652b67b12361ce38032881 Mon Sep 17 00:00:00 2001
+From 27a303aaae0e4f04f8cd9765710cb0f35c545873 Mon Sep 17 00:00:00 2001
From: Frederic Weisbecker <fweisbec at gmail.com>
Date: Mon, 26 Sep 2011 12:19:11 +0200
-Subject: [PATCH 002/304] x86: Call idle notifier after irq_enter()
+Subject: [PATCH 002/303] x86: Call idle notifier after irq_enter()
Interrupts notify the idle exit state before calling irq_enter(). But
the notifier code calls rcu_read_lock() and this is not allowed while
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0003-slab-lockdep-Annotate-all-slab-caches.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0003-slab-lockdep-Annotate-all-slab-caches.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0003-slab-lockdep-Annotate-all-slab-caches.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 15ee71d300baed7e5a2bcd9dee6d6db4a55479ce Mon Sep 17 00:00:00 2001
+From 69497c0ef60725321289054d187814b981252c8b Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra at chello.nl>
Date: Mon, 28 Nov 2011 19:51:51 +0100
-Subject: [PATCH 003/304] slab, lockdep: Annotate all slab caches
+Subject: [PATCH 003/303] slab, lockdep: Annotate all slab caches
Currently we only annotate the kmalloc caches, annotate all of them.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0004-x86-kprobes-Remove-remove-bogus-preempt_enable.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0004-x86-kprobes-Remove-remove-bogus-preempt_enable.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0004-x86-kprobes-Remove-remove-bogus-preempt_enable.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 85b24d758843cf9ffaca854e64c5cd2f98427a4d Mon Sep 17 00:00:00 2001
+From d3ca9a9d96b655d857b8bf0bc51a0883af190668 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 17 Mar 2011 11:02:15 +0100
-Subject: [PATCH 004/304] x86: kprobes: Remove remove bogus preempt_enable
+Subject: [PATCH 004/303] x86: kprobes: Remove remove bogus preempt_enable
The CONFIG_PREEMPT=n section of setup_singlestep() contains:
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0005-x86-hpet-Disable-MSI-on-Lenovo-W510.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0005-x86-hpet-Disable-MSI-on-Lenovo-W510.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0005-x86-hpet-Disable-MSI-on-Lenovo-W510.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 721f7f35c51196324b16e35a15e8483cab45315c Mon Sep 17 00:00:00 2001
+From 0161c0773627a7ad0637df34c65392dfaf3f0608 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 30 Sep 2011 20:03:37 +0200
-Subject: [PATCH 005/304] x86: hpet: Disable MSI on Lenovo W510
+Subject: [PATCH 005/303] x86: hpet: Disable MSI on Lenovo W510
MSI based per cpu timers lose interrupts when intel_idle() is enabled
- independent of the c-state. With idle=poll the problem cannot be
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0006-block-Shorten-interrupt-disabled-regions.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0006-block-Shorten-interrupt-disabled-regions.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0006-block-Shorten-interrupt-disabled-regions.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From d6cb4a7e84ec7796720e3ba60594ee6a9de2f716 Mon Sep 17 00:00:00 2001
+From 7293618dd00ca6c13fac3d745c32a894687d62b4 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 22 Jun 2011 19:47:02 +0200
-Subject: [PATCH 006/304] block: Shorten interrupt disabled regions
+Subject: [PATCH 006/303] block: Shorten interrupt disabled regions
Moving the blk_sched_flush_plug() call out of the interrupt/preempt
disabled region in the scheduler allows us to replace
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0007-sched-Distangle-worker-accounting-from-rq-3Elock.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0007-sched-Distangle-worker-accounting-from-rq-3Elock.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0007-sched-Distangle-worker-accounting-from-rq-3Elock.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From b8a07f6476c04d864ad002f1ac713abbda608d3d Mon Sep 17 00:00:00 2001
+From 3b9d4f10b3587307854796928088b43dab6664da Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 22 Jun 2011 19:47:03 +0200
-Subject: [PATCH 007/304] sched: Distangle worker accounting from rq-%3Elock
+Subject: [PATCH 007/303] sched: Distangle worker accounting from rq-%3Elock
The worker accounting for cpu bound workers is plugged into the core
scheduler code and the wakeup code. This is not a hard requirement and
@@ -141,10 +141,10 @@
EXPORT_SYMBOL(schedule);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index c58e142..1c16faf 100644
+index d2fce7c..205bdb0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -137,6 +137,7 @@ struct worker {
+@@ -138,6 +138,7 @@ struct worker {
unsigned int flags; /* X: flags */
int id; /* I: worker id */
struct work_struct rebind_work; /* L: rebind worker to cpu */
@@ -152,7 +152,7 @@
};
/*
-@@ -660,66 +661,58 @@ static void wake_up_worker(struct global_cwq *gcwq)
+@@ -661,66 +662,58 @@ static void wake_up_worker(struct global_cwq *gcwq)
}
/**
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0008-mips-enable-interrupts-in-signal.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0008-mips-enable-interrupts-in-signal.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0008-mips-enable-interrupts-in-signal.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 708c67b6892ce21a838b8bc6c6da937dea49b30b Mon Sep 17 00:00:00 2001
+From e9c54b053b6cb1bc1d19f391c30353c16a6505ba Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 18 Jul 2011 21:32:10 +0200
-Subject: [PATCH 008/304] mips-enable-interrupts-in-signal.patch
+Subject: [PATCH 008/303] mips-enable-interrupts-in-signal.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0009-arm-enable-interrupts-in-signal-code.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0009-arm-enable-interrupts-in-signal-code.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0009-arm-enable-interrupts-in-signal-code.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 16f05de8665ad363ee70ff967cbc2af50c4c42fb Mon Sep 17 00:00:00 2001
+From 104f7707f01e0dddc4bfae2c2cb8ec9d2b7688ca Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 16 Jul 2011 16:27:13 +0200
-Subject: [PATCH 009/304] arm-enable-interrupts-in-signal-code.patch
+Subject: [PATCH 009/303] arm-enable-interrupts-in-signal-code.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0010-powerpc-85xx-Mark-cascade-irq-IRQF_NO_THREAD.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0010-powerpc-85xx-Mark-cascade-irq-IRQF_NO_THREAD.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0010-powerpc-85xx-Mark-cascade-irq-IRQF_NO_THREAD.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 239e65cf297bd8c459dabff7b137eba58b949240 Mon Sep 17 00:00:00 2001
+From 356692e7eef08e27cee15b47c04430b28dc55778 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 16 Jul 2011 12:09:54 +0200
-Subject: [PATCH 010/304] powerpc: 85xx: Mark cascade irq IRQF_NO_THREAD
+Subject: [PATCH 010/303] powerpc: 85xx: Mark cascade irq IRQF_NO_THREAD
Cascade interrupt must run in hard interrupt context.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0011-powerpc-wsp-Mark-opb-cascade-handler-IRQF_NO_THREAD.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0011-powerpc-wsp-Mark-opb-cascade-handler-IRQF_NO_THREAD.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0011-powerpc-wsp-Mark-opb-cascade-handler-IRQF_NO_THREAD.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e3a3d66e062a9e30a03cd2c4d8723475200c9f43 Mon Sep 17 00:00:00 2001
+From 72afade4d905a7e329637e8c3393dd72ec4d1b65 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 5 Oct 2011 14:11:24 +0200
-Subject: [PATCH 011/304] powerpc: wsp: Mark opb cascade handler
+Subject: [PATCH 011/303] powerpc: wsp: Mark opb cascade handler
IRQF_NO_THREAD
Cascade handlers must run in hard interrupt context.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0012-powerpc-Mark-IPI-interrupts-IRQF_NO_THREAD.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0012-powerpc-Mark-IPI-interrupts-IRQF_NO_THREAD.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0012-powerpc-Mark-IPI-interrupts-IRQF_NO_THREAD.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 8a890ac5641bdc42156f29b4bf132e4405363ca1 Mon Sep 17 00:00:00 2001
+From 685ed6fac065aa4f10c2f123ef108cd979252730 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 5 Oct 2011 14:00:26 +0200
-Subject: [PATCH 012/304] powerpc: Mark IPI interrupts IRQF_NO_THREAD
+Subject: [PATCH 012/303] powerpc: Mark IPI interrupts IRQF_NO_THREAD
IPI handlers cannot be threaded. Remove the obsolete IRQF_DISABLED
flag (see commit e58aa3d2) while at it.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0013-powerpc-Allow-irq-threading.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0013-powerpc-Allow-irq-threading.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0013-powerpc-Allow-irq-threading.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From d7835b602a6890bc13f052626f40d29675b4cab6 Mon Sep 17 00:00:00 2001
+From c3f5a3f5a3e3304dbbfd91684bd6a6b326e8fafd Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 16 Jul 2011 13:16:24 +0200
-Subject: [PATCH 013/304] powerpc: Allow irq threading
+Subject: [PATCH 013/303] powerpc: Allow irq threading
All interrupts which must be non threaded are marked
IRQF_NO_THREAD. So it's safe to allow force threaded handlers.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0014-sched-Keep-period-timer-ticking-when-throttling-acti.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0014-sched-Keep-period-timer-ticking-when-throttling-acti.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0014-sched-Keep-period-timer-ticking-when-throttling-acti.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 953da6243e3f0efd29dc975590e4a5d7788cfce7 Mon Sep 17 00:00:00 2001
+From 70487eb7939a30ce5ee62424122b1600f9cb496a Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz at infradead.org>
Date: Tue, 18 Oct 2011 22:03:48 +0200
-Subject: [PATCH 014/304] sched: Keep period timer ticking when throttling
+Subject: [PATCH 014/303] sched: Keep period timer ticking when throttling
active
When a runqueue is throttled we cannot disable the period timer
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0015-sched-Do-not-throttle-due-to-PI-boosting.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0015-sched-Do-not-throttle-due-to-PI-boosting.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0015-sched-Do-not-throttle-due-to-PI-boosting.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From f0ae682e1fa8444b95e4fde76ab2389583b774fd Mon Sep 17 00:00:00 2001
+From a64d059ff8c803ddbaaaf1ab69a57ed09249f98f Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz at infradead.org>
Date: Tue, 18 Oct 2011 22:03:48 +0200
-Subject: [PATCH 015/304] sched: Do not throttle due to PI boosting
+Subject: [PATCH 015/303] sched: Do not throttle due to PI boosting
When a runqueue has rt_runtime_us = 0 then the only way it can
accumulate rt_time is via PI boosting. Though that causes the runqueue
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0016-time-Remove-bogus-comments.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0016-time-Remove-bogus-comments.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0016-time-Remove-bogus-comments.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 3c033bdfe750cfc8a3c7f94820a97b16233276bc Mon Sep 17 00:00:00 2001
+From 4c2ea7285c24a305434e19ac3f64fab3eae5affc Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 28 Feb 2012 19:06:50 +0100
-Subject: [PATCH 016/304] time: Remove bogus comments
+Subject: [PATCH 016/303] time: Remove bogus comments
There is no global irq lock which makes a syscall magically SMP
safe. Remove the outdated comment concerning do_settimeofday() as
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0017-x86-vdso-Remove-bogus-locking-in-update_vsyscall_tz.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0017-x86-vdso-Remove-bogus-locking-in-update_vsyscall_tz.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0017-x86-vdso-Remove-bogus-locking-in-update_vsyscall_tz.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 1f067852069d9d3b511a50d7df1602a298d2b936 Mon Sep 17 00:00:00 2001
+From ed5b89b6d79159832fbe64a59ced3eea66e67120 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 28 Feb 2012 19:10:46 +0100
-Subject: [PATCH 017/304] x86: vdso: Remove bogus locking in
+Subject: [PATCH 017/303] x86: vdso: Remove bogus locking in
update_vsyscall_tz()
Changing the sequence count in update_vsyscall_tz() is completely
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0018-x86-vdso-Use-seqcount-instead-of-seqlock.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0018-x86-vdso-Use-seqcount-instead-of-seqlock.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0018-x86-vdso-Use-seqcount-instead-of-seqlock.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From a58bf9f435b1f3232a9352c901d3fcec4ef9aaec Mon Sep 17 00:00:00 2001
+From fb5137d75207b55e79a23e2b59f883443ff73280 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 28 Feb 2012 18:24:07 +0100
-Subject: [PATCH 018/304] x86: vdso: Use seqcount instead of seqlock
+Subject: [PATCH 018/303] x86: vdso: Use seqcount instead of seqlock
The update of the vdso data happens under xtime_lock, so adding a
nested lock is pointless. Just use a seqcount to sync the readers.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0019-ia64-vsyscall-Use-seqcount-instead-of-seqlock.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0019-ia64-vsyscall-Use-seqcount-instead-of-seqlock.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0019-ia64-vsyscall-Use-seqcount-instead-of-seqlock.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From fb7e566b34aeda0f8cccc09ab40f451384f7ccc6 Mon Sep 17 00:00:00 2001
+From bb08f4a939ac65b36354bd00e5bd62655eadf287 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 28 Feb 2012 18:33:08 +0100
-Subject: [PATCH 019/304] ia64: vsyscall: Use seqcount instead of seqlock
+Subject: [PATCH 019/303] ia64: vsyscall: Use seqcount instead of seqlock
The update of the vdso data happens under xtime_lock, so adding a
nested lock is pointless. Just use a seqcount to sync the readers.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0020-seqlock-Remove-unused-functions.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0020-seqlock-Remove-unused-functions.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0020-seqlock-Remove-unused-functions.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 30ae17158a129813c5dfed6301b61c5e744e6ed5 Mon Sep 17 00:00:00 2001
+From 94835eaceeeae72b06cb757dcfd08144f443bf18 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 16 Jul 2011 18:38:22 +0200
-Subject: [PATCH 020/304] seqlock: Remove unused functions
+Subject: [PATCH 020/303] seqlock: Remove unused functions
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0021-seqlock-Use-seqcount.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0021-seqlock-Use-seqcount.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0021-seqlock-Use-seqcount.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From dab1ad0a7d594d634d6253e08ba30e12c3f1b0dd Mon Sep 17 00:00:00 2001
+From fcd2a1f958bd73d7789a70e940811c83828004d9 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 16 Jul 2011 18:40:26 +0200
-Subject: [PATCH 021/304] seqlock: Use seqcount
+Subject: [PATCH 021/303] seqlock: Use seqcount
No point in having different implementations for the same thing.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0022-vfs-fs_struct-Move-code-out-of-seqcount-write-sectio.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0022-vfs-fs_struct-Move-code-out-of-seqcount-write-sectio.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0022-vfs-fs_struct-Move-code-out-of-seqcount-write-sectio.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 47a3a34ec251a566d97646774193e1789eb20220 Mon Sep 17 00:00:00 2001
+From 82b63a7422f7b625631b810d29c66115440c279a Mon Sep 17 00:00:00 2001
From: Al Viro <viro at ZenIV.linux.org.uk>
Date: Thu, 15 Mar 2012 18:39:40 +0000
-Subject: [PATCH 022/304] vfs: fs_struct: Move code out of seqcount write
+Subject: [PATCH 022/303] vfs: fs_struct: Move code out of seqcount write
sections
RT cannot disable preemption in the seqcount write sections due to
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0023-timekeeping-Split-xtime_lock.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0023-timekeeping-Split-xtime_lock.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0023-timekeeping-Split-xtime_lock.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 651c5b6726811ee25051054e0c3a240f93a4e79d Mon Sep 17 00:00:00 2001
+From e8773392222019ccadcbc1fbe3fa68a153db90c3 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 1 Mar 2012 15:14:06 +0100
-Subject: [PATCH 023/304] timekeeping: Split xtime_lock
+Subject: [PATCH 023/303] timekeeping: Split xtime_lock
xtime_lock is going to be split apart in mainline, so we can shorten
the seqcount protected regions and avoid updating seqcount in some
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0024-intel_idle-Convert-i7300_idle_lock-to-raw-spinlock.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0024-intel_idle-Convert-i7300_idle_lock-to-raw-spinlock.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0024-intel_idle-Convert-i7300_idle_lock-to-raw-spinlock.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 8808660b00a60ce093b5124280f23335745338af Mon Sep 17 00:00:00 2001
+From ee967d3a8950a3d9673751cb872b63c121d2c9da Mon Sep 17 00:00:00 2001
From: Mike Galbraith <efault at gmx.de>
Date: Wed, 7 Dec 2011 12:48:42 +0100
-Subject: [PATCH 024/304] intel_idle: Convert i7300_idle_lock to raw spinlock
+Subject: [PATCH 024/303] intel_idle: Convert i7300_idle_lock to raw spinlock
24 core Intel box's first exposure to 3.0.12-rt30-rc3 didn't go well.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0025-mm-memcg-shorten-preempt-disabled-section-around-eve.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0025-mm-memcg-shorten-preempt-disabled-section-around-eve.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0025-mm-memcg-shorten-preempt-disabled-section-around-eve.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 251d1e06396395531e508ac1b27a8521f6fa5a87 Mon Sep 17 00:00:00 2001
+From 94aff528835e6abb7c4e945aac307446f69a0c8d Mon Sep 17 00:00:00 2001
From: Johannes Weiner <hannes at cmpxchg.org>
Date: Thu, 17 Nov 2011 07:49:25 +0100
-Subject: [PATCH 025/304] mm: memcg: shorten preempt-disabled section around
+Subject: [PATCH 025/303] mm: memcg: shorten preempt-disabled section around
event checks
Only the ratelimit checks themselves have to run with preemption
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0026-tracing-Account-for-preempt-off-in-preempt_schedule.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0026-tracing-Account-for-preempt-off-in-preempt_schedule.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0026-tracing-Account-for-preempt-off-in-preempt_schedule.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From d95e6628fd3943310c74412d5bfec78b29350c96 Mon Sep 17 00:00:00 2001
+From eca5f48be2a6cda39cad86840df4f594892b36a1 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt at goodmis.org>
Date: Thu, 29 Sep 2011 12:24:30 -0500
-Subject: [PATCH 026/304] tracing: Account for preempt off in
+Subject: [PATCH 026/303] tracing: Account for preempt off in
preempt_schedule()
The preempt_schedule() uses the preempt_disable_notrace() version
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0027-signal-revert-ptrace-preempt-magic.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0027-signal-revert-ptrace-preempt-magic.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0027-signal-revert-ptrace-preempt-magic.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 400e756cce250e3d3fa5080873286dfe3828eebc Mon Sep 17 00:00:00 2001
+From b9c63e317802e3eb05d089af763912d9e826da75 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 21 Sep 2011 19:57:12 +0200
-Subject: [PATCH 027/304] signal-revert-ptrace-preempt-magic.patch
+Subject: [PATCH 027/303] signal-revert-ptrace-preempt-magic.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0028-arm-Mark-pmu-interupt-IRQF_NO_THREAD.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0028-arm-Mark-pmu-interupt-IRQF_NO_THREAD.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0028-arm-Mark-pmu-interupt-IRQF_NO_THREAD.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 6d14c31a815a39dfcbdbea599e1267744ff8b639 Mon Sep 17 00:00:00 2001
+From a6cb98db14795c7cca27f88a5cc87375437a371a Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 16 Mar 2011 14:45:31 +0100
-Subject: [PATCH 028/304] arm: Mark pmu interupt IRQF_NO_THREAD
+Subject: [PATCH 028/303] arm: Mark pmu interupt IRQF_NO_THREAD
PMU interrupt must not be threaded. Remove IRQF_DISABLED while at it
as we run all handlers with interrupts disabled anyway.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0029-arm-Allow-forced-irq-threading.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0029-arm-Allow-forced-irq-threading.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0029-arm-Allow-forced-irq-threading.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From c2978053947a93982770e496b026f092b91414d9 Mon Sep 17 00:00:00 2001
+From f651b42b4bc0577be4ef2893d38617db5620d576 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 16 Jul 2011 13:15:20 +0200
-Subject: [PATCH 029/304] arm: Allow forced irq threading
+Subject: [PATCH 029/303] arm: Allow forced irq threading
All timer interrupts and the perf interrupt are marked NO_THREAD, so
its safe to allow forced interrupt threading.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0030-preempt-rt-Convert-arm-boot_lock-to-raw.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0030-preempt-rt-Convert-arm-boot_lock-to-raw.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0030-preempt-rt-Convert-arm-boot_lock-to-raw.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 373b5a215176a58339eb28f0dfc054869081466e Mon Sep 17 00:00:00 2001
+From bad9d34a1bdca41558e454f2d0fd1f18e5bfe906 Mon Sep 17 00:00:00 2001
From: Frank Rowand <frank.rowand at am.sony.com>
Date: Mon, 19 Sep 2011 14:51:14 -0700
-Subject: [PATCH 030/304] preempt-rt: Convert arm boot_lock to raw
+Subject: [PATCH 030/303] preempt-rt: Convert arm boot_lock to raw
The arm boot_lock is used by the secondary processor startup code. The locking
task is the idle thread, which has idle->sched_class == &idle_sched_class.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0031-sched-Create-schedule_preempt_disabled.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0031-sched-Create-schedule_preempt_disabled.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0031-sched-Create-schedule_preempt_disabled.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 6b2821a7378751a201db2bb689e13ae561758854 Mon Sep 17 00:00:00 2001
+From 3615037604145cd836580924640874d72ec6333f Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 21 Mar 2011 12:09:35 +0100
-Subject: [PATCH 031/304] sched: Create schedule_preempt_disabled()
+Subject: [PATCH 031/303] sched: Create schedule_preempt_disabled()
Get rid of the ever repeating:
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0032-sched-Use-schedule_preempt_disabled.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0032-sched-Use-schedule_preempt_disabled.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0032-sched-Use-schedule_preempt_disabled.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e6b84f28260590a7645ee75fdf2510647f54ff5a Mon Sep 17 00:00:00 2001
+From dafde92bde5c9bbef508269c0959403be9ad89e4 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 21 Mar 2011 12:33:18 +0100
-Subject: [PATCH 032/304] sched: Use schedule_preempt_disabled()
+Subject: [PATCH 032/303] sched: Use schedule_preempt_disabled()
Coccinelle based conversion.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0033-signals-Do-not-wakeup-self.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0033-signals-Do-not-wakeup-self.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0033-signals-Do-not-wakeup-self.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 030f38fae76c1766401fca464a1c8a240be9b3fe Mon Sep 17 00:00:00 2001
+From 76ec433176eaa52d0ab8ff27fbaeb70744268430 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:44:44 -0500
-Subject: [PATCH 033/304] signals: Do not wakeup self
+Subject: [PATCH 033/303] signals: Do not wakeup self
Signals which are delivered by current to current can do without
waking up current :)
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0034-posix-timers-Prevent-broadcast-signals.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0034-posix-timers-Prevent-broadcast-signals.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0034-posix-timers-Prevent-broadcast-signals.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 6ebb8f5810a30ce5b3088fde1b8ae7ab012ea536 Mon Sep 17 00:00:00 2001
+From aa74f58dd38a14d1c82983c29096ece9784636ce Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:29:20 -0500
-Subject: [PATCH 034/304] posix-timers: Prevent broadcast signals
+Subject: [PATCH 034/303] posix-timers: Prevent broadcast signals
Posix timers should not send broadcast signals and kernel only
signals. Prevent it.
@@ -12,7 +12,7 @@
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
-index 69185ae..7b73c34 100644
+index e885be1..4b7183c 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0035-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0035-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0035-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From ba43b4f4c87499a38d459f0cdc73922c5ffc266b Mon Sep 17 00:00:00 2001
+From 9c25950985cb50a5af826baf543d9740779caecc Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:44:56 -0500
-Subject: [PATCH 035/304] signals: Allow rt tasks to cache one sigqueue struct
+Subject: [PATCH 035/303] signals: Allow rt tasks to cache one sigqueue struct
To avoid allocation allow rt tasks to cache one sigqueue struct in
task struct.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0036-signal-x86-Delay-calling-signals-in-atomic.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0036-signal-x86-Delay-calling-signals-in-atomic.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0036-signal-x86-Delay-calling-signals-in-atomic.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 95e7299a51d9415ce8e4f7ffb7568c83ab9fbc65 Mon Sep 17 00:00:00 2001
+From 7c6014e343b7fb67d41967c1a8f105894da237bc Mon Sep 17 00:00:00 2001
From: Oleg Nesterov <oleg at redhat.com>
Date: Tue, 10 Apr 2012 14:33:53 -0400
-Subject: [PATCH 036/304] signal/x86: Delay calling signals in atomic
+Subject: [PATCH 036/303] signal/x86: Delay calling signals in atomic
On x86_64 we must disable preemption before we enable interrupts
for stack faults, int3 and debugging, because the current task is using
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0037-generic-Use-raw-local-irq-variant-for-generic-cmpxch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0037-generic-Use-raw-local-irq-variant-for-generic-cmpxch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0037-generic-Use-raw-local-irq-variant-for-generic-cmpxch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From b1445fb4d3e88a1ae0ce07a774810d67665f7f5c Mon Sep 17 00:00:00 2001
+From cb8615f7be1b3ee6b0920dc084563ea2511b41fd Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:30 -0500
-Subject: [PATCH 037/304] generic: Use raw local irq variant for generic
+Subject: [PATCH 037/303] generic: Use raw local irq variant for generic
cmpxchg
No point in tracing those.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0038-drivers-random-Reduce-preempt-disabled-region.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0038-drivers-random-Reduce-preempt-disabled-region.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0038-drivers-random-Reduce-preempt-disabled-region.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 2ab3a7466cb200dd2b9f5429056897bfaaa0e926 Mon Sep 17 00:00:00 2001
+From a3d9487fb5cd51156bbac763c653e1f3f634a7dd Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:30 -0500
-Subject: [PATCH 038/304] drivers: random: Reduce preempt disabled region
+Subject: [PATCH 038/303] drivers: random: Reduce preempt disabled region
No need to keep preemption disabled across the whole function.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0039-ARM-AT91-PIT-Remove-irq-handler-when-clock-event-is-.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0039-ARM-AT91-PIT-Remove-irq-handler-when-clock-event-is-.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0039-ARM-AT91-PIT-Remove-irq-handler-when-clock-event-is-.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 2e274c61568f55e2e864014e2212d46b50563477 Mon Sep 17 00:00:00 2001
+From 2c8aefd0a91ec578f2350bb168dec508e9c9e3a2 Mon Sep 17 00:00:00 2001
From: Benedikt Spranger <b.spranger at linutronix.de>
Date: Sat, 6 Mar 2010 17:47:10 +0100
-Subject: [PATCH 039/304] ARM: AT91: PIT: Remove irq handler when clock event
+Subject: [PATCH 039/303] ARM: AT91: PIT: Remove irq handler when clock event
is unused
Setup and remove the interrupt handler in clock event mode selection.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0040-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0040-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0040-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 182e525c72fbd5ff6736f5b8c469074386279bb9 Mon Sep 17 00:00:00 2001
+From bf0684cefd32d2e2dfd97eadc4931aa885c5f687 Mon Sep 17 00:00:00 2001
From: Benedikt Spranger <b.spranger at linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
-Subject: [PATCH 040/304] clocksource: TCLIB: Allow higher clock rates for
+Subject: [PATCH 040/303] clocksource: TCLIB: Allow higher clock rates for
clock events
As default the TCLIB uses the 32KiHz base clock rate for clock events.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0041-drivers-net-tulip_remove_one-needs-to-call-pci_disab.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0041-drivers-net-tulip_remove_one-needs-to-call-pci_disab.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0041-drivers-net-tulip_remove_one-needs-to-call-pci_disab.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e662343f9b2b9d59ac2c91c1ac189f57a1e52fd9 Mon Sep 17 00:00:00 2001
+From ada49c3cdefbe72955748b6888f840694cf05878 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:18 -0500
-Subject: [PATCH 041/304] drivers/net: tulip_remove_one needs to call
+Subject: [PATCH 041/303] drivers/net: tulip_remove_one needs to call
pci_disable_device()
Otherwise the device is not completely shut down.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0042-drivers-net-Use-disable_irq_nosync-in-8139too.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0042-drivers-net-Use-disable_irq_nosync-in-8139too.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0042-drivers-net-Use-disable_irq_nosync-in-8139too.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 6736ba7b4f70b60a50e27793d32929edf12b0f4c Mon Sep 17 00:00:00 2001
+From 7d4f523ad93f72233a34155b4d31e408ce22eec9 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:24 -0500
-Subject: [PATCH 042/304] drivers/net: Use disable_irq_nosync() in 8139too
+Subject: [PATCH 042/303] drivers/net: Use disable_irq_nosync() in 8139too
Use disable_irq_nosync() instead of disable_irq() as this might be
called in atomic context with netpoll.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0043-drivers-net-ehea-Make-rx-irq-handler-non-threaded-IR.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0043-drivers-net-ehea-Make-rx-irq-handler-non-threaded-IR.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0043-drivers-net-ehea-Make-rx-irq-handler-non-threaded-IR.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 833eb6940d94f7311fdffb6850b76af7a05c7895 Mon Sep 17 00:00:00 2001
+From 3cced7c03b2e86e2062c8ce3bfe67792f21c051b Mon Sep 17 00:00:00 2001
From: Darren Hart <dvhltc at us.ibm.com>
Date: Tue, 18 May 2010 14:33:07 -0700
-Subject: [PATCH 043/304] drivers: net: ehea: Make rx irq handler non-threaded
+Subject: [PATCH 043/303] drivers: net: ehea: Make rx irq handler non-threaded
(IRQF_NO_THREAD)
The underlying hardware is edge triggered but presented by XICS as level
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0044-drivers-net-at91_ether-Make-mdio-protection-rt-safe.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0044-drivers-net-at91_ether-Make-mdio-protection-rt-safe.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0044-drivers-net-at91_ether-Make-mdio-protection-rt-safe.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From cc15ea4ee68cabbef9b5468b6f3b7df5400975b4 Mon Sep 17 00:00:00 2001
+From e4cc48946055baa4c971b22b1e8d6670e52d2f5a Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 17 Nov 2009 12:02:43 +0100
-Subject: [PATCH 044/304] drivers: net: at91_ether: Make mdio protection -rt
+Subject: [PATCH 044/303] drivers: net: at91_ether: Make mdio protection -rt
safe
Neither the phy interrupt nor the timer callback which updates the
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0045-preempt-mark-legitimated-no-resched-sites.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0045-preempt-mark-legitimated-no-resched-sites.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0045-preempt-mark-legitimated-no-resched-sites.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 266d052b682d69726b52c256f41b3cf9d5daa7b9 Mon Sep 17 00:00:00 2001
+From ef5a3ffd7a493e0bd304b8692c91da3444b05290 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 21 Mar 2011 13:32:17 +0100
-Subject: [PATCH 045/304] preempt-mark-legitimated-no-resched-sites.patch
+Subject: [PATCH 045/303] preempt-mark-legitimated-no-resched-sites.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0046-mm-Prepare-decoupling-the-page-fault-disabling-logic.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0046-mm-Prepare-decoupling-the-page-fault-disabling-logic.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0046-mm-Prepare-decoupling-the-page-fault-disabling-logic.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From fcfd59b2378ad30a96503e0c6ed42b6c2344ad66 Mon Sep 17 00:00:00 2001
+From 5824badf179c3ac4e502760f7a4f3511cf6c2255 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:37 -0500
-Subject: [PATCH 046/304] mm: Prepare decoupling the page fault disabling
+Subject: [PATCH 046/303] mm: Prepare decoupling the page fault disabling
logic
Add a pagefault_disabled variable to task_struct to allow decoupling
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0047-mm-Fixup-all-fault-handlers-to-check-current-pagefau.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0047-mm-Fixup-all-fault-handlers-to-check-current-pagefau.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0047-mm-Fixup-all-fault-handlers-to-check-current-pagefau.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From f4f1fd8c52d8ec8137a13bc69cf25241c2fea7bd Mon Sep 17 00:00:00 2001
+From ac84ffba5033135b4597a16243d63adf6fcad07d Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 17 Mar 2011 11:32:28 +0100
-Subject: [PATCH 047/304] mm: Fixup all fault handlers to check
+Subject: [PATCH 047/303] mm: Fixup all fault handlers to check
current->pagefault_disable
Necessary for decoupling pagefault disable from preempt count.
@@ -307,10 +307,10 @@
down_read(&mm->mmap_sem);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index 5db0490..191015f 100644
+index 7b73c88..dd2643f 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
-@@ -1084,7 +1084,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1086,7 +1086,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0048-mm-pagefault_disabled.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0048-mm-pagefault_disabled.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0048-mm-pagefault_disabled.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 6bff7bda0c7e888fb60ea84d7a7d95131c3f624c Mon Sep 17 00:00:00 2001
+From 667efb47284c1435a968a193934cd477ff2f230f Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra at chello.nl>
Date: Thu, 11 Aug 2011 15:31:31 +0200
-Subject: [PATCH 048/304] mm: pagefault_disabled()
+Subject: [PATCH 048/303] mm: pagefault_disabled()
Wrap the test for pagefault_disabled() into a helper, this allows us
to remove the need for current->pagefault_disabled on !-rt kernels.
@@ -310,10 +310,10 @@
down_read(&mm->mmap_sem);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index 191015f..b567837 100644
+index dd2643f..27ff261 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
-@@ -1084,7 +1084,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1086,7 +1086,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0049-mm-raw_pagefault_disable.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0049-mm-raw_pagefault_disable.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0049-mm-raw_pagefault_disable.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 07ad583dc129c7e19f046dff7bc9bfb5f8a22ab2 Mon Sep 17 00:00:00 2001
+From 7e3abc2e04e80a96c2c21fffa8fbdcea388992c8 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra at chello.nl>
Date: Fri, 5 Aug 2011 17:16:58 +0200
-Subject: [PATCH 049/304] mm: raw_pagefault_disable
+Subject: [PATCH 049/303] mm: raw_pagefault_disable
Adding migrate_disable() to pagefault_disable() to preserve the
per-cpu thing for kmap_atomic might not have been the best of choices.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0050-filemap-fix-up.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0050-filemap-fix-up.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0050-filemap-fix-up.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 297bcc9ab84b792a784f6533a5d121b6e403b8a8 Mon Sep 17 00:00:00 2001
+From 54d2fca5b4d09d037839648146b51b5177062847 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 17 Jun 2011 18:56:24 +0200
-Subject: [PATCH 050/304] filemap-fix-up.patch
+Subject: [PATCH 050/303] filemap-fix-up.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
Wrecked-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0051-mm-Remove-preempt-count-from-pagefault-disable-enabl.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0051-mm-Remove-preempt-count-from-pagefault-disable-enabl.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0051-mm-Remove-preempt-count-from-pagefault-disable-enabl.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From c6dbea047b2897ae9ef6fcc7ef6c59c890b8d2f3 Mon Sep 17 00:00:00 2001
+From 8e0cf3aa85f1ccf4d6de43d62b5a1ecc0736fb2a Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 25 Jul 2009 22:06:27 +0200
-Subject: [PATCH 051/304] mm: Remove preempt count from pagefault
+Subject: [PATCH 051/303] mm: Remove preempt count from pagefault
disable/enable
Now that all users are cleaned up, we can remove the preemption count.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0052-x86-highmem-Replace-BUG_ON-by-WARN_ON.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0052-x86-highmem-Replace-BUG_ON-by-WARN_ON.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0052-x86-highmem-Replace-BUG_ON-by-WARN_ON.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 94957597220fea9ba8c4ecb73bc3c361f2fab722 Mon Sep 17 00:00:00 2001
+From df82c7004d2903832270ebca52899b7f9c8e9f9c Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:25 -0500
-Subject: [PATCH 052/304] x86: highmem: Replace BUG_ON by WARN_ON
+Subject: [PATCH 052/303] x86: highmem: Replace BUG_ON by WARN_ON
The machine might survive that problem and be at least in a state
which allows us to get more information about the problem.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0053-suspend-Prevent-might-sleep-splats.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0053-suspend-Prevent-might-sleep-splats.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0053-suspend-Prevent-might-sleep-splats.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 202426659e5d56754924cf56a4f5dbe919aa335a Mon Sep 17 00:00:00 2001
+From 1fa0af1722d746850b3a7c8290d25a0280c666ef Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 15 Jul 2010 10:29:00 +0200
-Subject: [PATCH 053/304] suspend: Prevent might sleep splats
+Subject: [PATCH 053/303] suspend: Prevent might sleep splats
timekeeping suspend/resume calls read_persistant_clock() which takes
rtc_lock. That results in might sleep warnings because at that point
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0054-OF-Fixup-resursive-locking-code-paths.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0054-OF-Fixup-resursive-locking-code-paths.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0054-OF-Fixup-resursive-locking-code-paths.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 845bfe99270374b1b86095514db2facd733bc8f7 Mon Sep 17 00:00:00 2001
+From 2d40f5d5f18888cabb8f25332713aaac308e175f Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 13 Aug 2009 09:04:10 +0200
-Subject: [PATCH 054/304] OF: Fixup resursive locking code paths
+Subject: [PATCH 054/303] OF: Fixup resursive locking code paths
There is no real reason to use a rwlock for devtree_lock. It even
could be a mutex, but unfortunately it's locked from cpu hotplug
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0055-of-convert-devtree-lock.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0055-of-convert-devtree-lock.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0055-of-convert-devtree-lock.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 372c31da91bca92021b6172271d4d2d745c94c9d Mon Sep 17 00:00:00 2001
+From 6e4051eef15d923fa09892330b95f95adab479f9 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 21 Mar 2011 14:35:34 +0100
-Subject: [PATCH 055/304] of-convert-devtree-lock.patch
+Subject: [PATCH 055/303] of-convert-devtree-lock.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0056-list-add-list-last-entry.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0056-list-add-list-last-entry.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0056-list-add-list-last-entry.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From bd81fc28e6f279914af97142b0e28747a20406a6 Mon Sep 17 00:00:00 2001
+From ba6226c866fbef50a68906ea450766fe7d8fb90b Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz at infradead.org>
Date: Tue, 21 Jun 2011 11:22:36 +0200
-Subject: [PATCH 056/304] list-add-list-last-entry.patch
+Subject: [PATCH 056/303] list-add-list-last-entry.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0057-mm-page-alloc-use-list-last-entry.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0057-mm-page-alloc-use-list-last-entry.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0057-mm-page-alloc-use-list-last-entry.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e29e54d40aabab8f37171b5b20dd837937146421 Mon Sep 17 00:00:00 2001
+From 738db86bf2af8ae353c5424b362d9d2700df25bd Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz at infradead.org>
Date: Tue, 21 Jun 2011 11:24:35 +0200
-Subject: [PATCH 057/304] mm-page-alloc-use-list-last-entry.patch
+Subject: [PATCH 057/303] mm-page-alloc-use-list-last-entry.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
@@ -9,7 +9,7 @@
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 4d3a697..8bba0c4 100644
+index 5c028e2..c106723 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -625,7 +625,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0058-mm-slab-move-debug-out.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0058-mm-slab-move-debug-out.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0058-mm-slab-move-debug-out.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 53b76885dcc890e69111080ca8d518b589997e28 Mon Sep 17 00:00:00 2001
+From 8ede4d4aad479b14cf906c28a703ec9215661a58 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 20 Jun 2011 10:42:04 +0200
-Subject: [PATCH 058/304] mm-slab-move-debug-out.patch
+Subject: [PATCH 058/303] mm-slab-move-debug-out.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
@@ -9,7 +9,7 @@
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
-index 61dfda3..f807b35f 100644
+index 61dfda3..f807b35 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3857,10 +3857,10 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0059-rwsem-inlcude-fix.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0059-rwsem-inlcude-fix.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0059-rwsem-inlcude-fix.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 7b01859415951296b8544bec297d123bc729b06d Mon Sep 17 00:00:00 2001
+From b0028f07038cc529ebee11654c23d9c14e9553ab Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 15 Jul 2011 21:24:27 +0200
-Subject: [PATCH 059/304] rwsem-inlcude-fix.patch
+Subject: [PATCH 059/303] rwsem-inlcude-fix.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0060-sysctl-include-fix.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0060-sysctl-include-fix.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0060-sysctl-include-fix.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From be476ef118242c22509fbff94daeac7bb89dcb7a Mon Sep 17 00:00:00 2001
+From ec921563b7dc0454d71d8a74a011e9fd9163ba0c Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 14 Nov 2011 10:52:34 +0100
-Subject: [PATCH 060/304] sysctl-include-fix.patch
+Subject: [PATCH 060/303] sysctl-include-fix.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0061-net-flip-lock-dep-thingy.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0061-net-flip-lock-dep-thingy.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0061-net-flip-lock-dep-thingy.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e08f997ba36e0d2f64e1a1a385e5140334a61a08 Mon Sep 17 00:00:00 2001
+From 27abfc9c65f79639abfed01cdfde8f6a29e9b539 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 28 Jun 2011 10:59:58 +0200
-Subject: [PATCH 061/304] net-flip-lock-dep-thingy.patch
+Subject: [PATCH 061/303] net-flip-lock-dep-thingy.patch
=======================================================
[ INFO: possible circular locking dependency detected ]
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0062-softirq-thread-do-softirq.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0062-softirq-thread-do-softirq.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0062-softirq-thread-do-softirq.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 9cc11141e83d799b5b6b25423f80f8310e1c9227 Mon Sep 17 00:00:00 2001
+From d1de55c45a61d8a73e1639a2fb605cab9a8f9daf Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 28 Jun 2011 15:44:15 +0200
-Subject: [PATCH 062/304] softirq-thread-do-softirq.patch
+Subject: [PATCH 062/303] softirq-thread-do-softirq.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0063-softirq-split-out-code.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0063-softirq-split-out-code.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0063-softirq-split-out-code.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From f053bf7d7baab5ffae7f0a8a1c12f669829fe3a5 Mon Sep 17 00:00:00 2001
+From 2b2e8afcab538e99da342205942e0aa3ab828359 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 28 Jun 2011 15:46:49 +0200
-Subject: [PATCH 063/304] softirq-split-out-code.patch
+Subject: [PATCH 063/303] softirq-split-out-code.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0064-x86-Do-not-unmask-io_apic-when-interrupt-is-in-progr.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0064-x86-Do-not-unmask-io_apic-when-interrupt-is-in-progr.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0064-x86-Do-not-unmask-io_apic-when-interrupt-is-in-progr.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From d403eeae9b01308ec5be39d10c63d2c1420f1335 Mon Sep 17 00:00:00 2001
+From 055da120e8fbfae70089b1ce4d38fceb15ec50ee Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:27 -0500
-Subject: [PATCH 064/304] x86: Do not unmask io_apic when interrupt is in
+Subject: [PATCH 064/303] x86: Do not unmask io_apic when interrupt is in
progress
With threaded interrupts we might see an interrupt in progress on
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0065-x86-32-fix-signal-crap.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0065-x86-32-fix-signal-crap.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0065-x86-32-fix-signal-crap.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 1f97ca7e31f94bda2bd3ed67aa50a1f2760ee44d Mon Sep 17 00:00:00 2001
+From 146c170c81e6d682886ae29d5152758890f36061 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 18 Jul 2011 15:59:38 +0200
-Subject: [PATCH 065/304] x86-32-fix-signal-crap.patch
+Subject: [PATCH 065/303] x86-32-fix-signal-crap.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0066-x86-Do-not-disable-preemption-in-int3-on-32bit.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0066-x86-Do-not-disable-preemption-in-int3-on-32bit.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0066-x86-Do-not-disable-preemption-in-int3-on-32bit.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From c7a7fa22f991f4eb1ffac46a226354929f13ecdc Mon Sep 17 00:00:00 2001
+From 84e9dc7dd191eb238ddd5e5455f4ea6d1d85b705 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt at goodmis.org>
Date: Tue, 10 Apr 2012 14:33:57 -0400
-Subject: [PATCH 066/304] x86: Do not disable preemption in int3 on 32bit
+Subject: [PATCH 066/303] x86: Do not disable preemption in int3 on 32bit
Preemption must be disabled before enabling interrupts in do_trap
on x86_64 because the stack in use for int3 and debug is a per CPU
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0067-rcu-Reduce-lock-section.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0067-rcu-Reduce-lock-section.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0067-rcu-Reduce-lock-section.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 4fc322e833f252d29c293fdd3685b79b7092d9c1 Mon Sep 17 00:00:00 2001
+From a3d2d66a806ebefdeb48a560afd416963514d97c Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 24 Jun 2011 22:23:02 +0200
-Subject: [PATCH 067/304] rcu: Reduce lock section
+Subject: [PATCH 067/303] rcu: Reduce lock section
So the waitqueue wakeup is outside the raw locked section.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0068-locking-various-init-fixes.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0068-locking-various-init-fixes.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0068-locking-various-init-fixes.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 724a2ae6e20011b4bf1e2739b842e2052d3314d9 Mon Sep 17 00:00:00 2001
+From 47c1f8d4a24429f6b91e2e5339e5c993dc47e584 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 17 Jul 2011 21:25:03 +0200
-Subject: [PATCH 068/304] locking-various-init-fixes.patch
+Subject: [PATCH 068/303] locking-various-init-fixes.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
@@ -70,7 +70,7 @@
/*
diff --git a/include/linux/idr.h b/include/linux/idr.h
-index 255491c..4eaacf0 100644
+index 52a9da2..dffed8f 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -136,7 +136,7 @@ struct ida {
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0069-wait-Provide-__wake_up_all_locked.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0069-wait-Provide-__wake_up_all_locked.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0069-wait-Provide-__wake_up_all_locked.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 468c961556aa22e44a74a0d741fcd7a70a748abe Mon Sep 17 00:00:00 2001
+From 6559618f5e3a816ee361564f702bdcdaa4b3a017 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 1 Dec 2011 00:04:00 +0100
-Subject: [PATCH 069/304] wait: Provide __wake_up_all_locked
+Subject: [PATCH 069/303] wait: Provide __wake_up_all_locked
For code which protects the waitqueue itself with another lock it
makes no sense to acquire the waitqueue lock for wakeup all. Provide
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0070-pci-Use-__wake_up_all_locked-pci_unblock_user_cfg_ac.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0070-pci-Use-__wake_up_all_locked-pci_unblock_user_cfg_ac.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0070-pci-Use-__wake_up_all_locked-pci_unblock_user_cfg_ac.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 8d90e23f8cd2a5b3f7bfa3b357e0eb9224e2e0e3 Mon Sep 17 00:00:00 2001
+From 7a6fadabad1f189f88a627ef3fa6ab2c7ede9ade Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 1 Dec 2011 00:07:16 +0100
-Subject: [PATCH 070/304] pci: Use __wake_up_all_locked
+Subject: [PATCH 070/303] pci: Use __wake_up_all_locked
pci_unblock_user_cfg_access()
The waitqueue is protected by the pci_lock, so we can just avoid to
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0071-latency-hist.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0071-latency-hist.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0071-latency-hist.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From ebc864b47368c454552e3b4e11327cd51fd33a2d Mon Sep 17 00:00:00 2001
+From 3171c4504134a57e43781a9b51828fc146128e8c Mon Sep 17 00:00:00 2001
From: Carsten Emde <C.Emde at osadl.org>
Date: Tue, 19 Jul 2011 14:03:41 +0100
-Subject: [PATCH 071/304] latency-hist.patch
+Subject: [PATCH 071/303] latency-hist.patch
This patch provides a recording mechanism to store data of potential
sources of system latencies. The recordings separately determine the
@@ -349,7 +349,7 @@
+#endif /* _LATENCY_HIST_H */
+
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
-index 6db7a5e..7eec82d 100644
+index cdd5607..3f7f39e 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -49,6 +49,7 @@
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0072-hwlatdetect.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0072-hwlatdetect.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0072-hwlatdetect.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 2f9c2c62b122ae922168c6cc07889dc255e97888 Mon Sep 17 00:00:00 2001
+From caff98c2f4d6a5977aa6457e8a300a5a4bc8a3e7 Mon Sep 17 00:00:00 2001
From: Carsten Emde <C.Emde at osadl.org>
Date: Tue, 19 Jul 2011 13:53:12 +0100
-Subject: [PATCH 072/304] hwlatdetect.patch
+Subject: [PATCH 072/303] hwlatdetect.patch
Jon Masters developed this wonderful SMI detector. For details please
consult Documentation/hwlat_detector.txt. It could be ported to Linux
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0073-localversion.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0073-localversion.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0073-localversion.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 03dbaa1f746a07353688a71bfe59ff72f5bab48c Mon Sep 17 00:00:00 2001
+From 11749eeb34f9faae1f9dcc17ca4b0a50b92c29a2 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 8 Jul 2011 20:25:16 +0200
-Subject: [PATCH 073/304] localversion.patch
+Subject: [PATCH 073/303] localversion.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0074-early-printk-consolidate.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0074-early-printk-consolidate.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0074-early-printk-consolidate.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From d9ac8aa2deb235c5185e34bca1c3f822edd7a21b Mon Sep 17 00:00:00 2001
+From d99dc9e82fc1ab4e4b1263f4a27d5be72d8ae6a7 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 23 Jul 2011 11:04:08 +0200
-Subject: [PATCH 074/304] early-printk-consolidate.patch
+Subject: [PATCH 074/303] early-printk-consolidate.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
@@ -420,10 +420,10 @@
keep = (strstr(buf, "keep") != NULL);
diff --git a/include/linux/console.h b/include/linux/console.h
-index 7453cfd..e5b5dc0 100644
+index 6ae6a15..5695804 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
-@@ -133,6 +133,7 @@ struct console {
+@@ -135,6 +135,7 @@ struct console {
for (con = console_drivers; con != NULL; con = con->next)
extern int console_set_on_cmdline;
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0075-printk-kill.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0075-printk-kill.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0075-printk-kill.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From cb2335f8f3dd17f4e21c3213cd2344a61c4799f4 Mon Sep 17 00:00:00 2001
+From f10a11b623ccf6c5102b184b834bb4c4b2ab58cc Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 22 Jul 2011 17:58:40 +0200
-Subject: [PATCH 075/304] printk-kill.patch
+Subject: [PATCH 075/303] printk-kill.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0076-printk-force_early_printk-boot-param-to-help-with-de.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0076-printk-force_early_printk-boot-param-to-help-with-de.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0076-printk-force_early_printk-boot-param-to-help-with-de.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From d599e3727f16bb30b4d1d90bebcf63ad4f894680 Mon Sep 17 00:00:00 2001
+From 94060eb745bae0238efe8653aa1c23235d4ddbcd Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra at chello.nl>
Date: Fri, 2 Sep 2011 14:29:33 +0200
-Subject: [PATCH 076/304] printk: 'force_early_printk' boot param to help with
+Subject: [PATCH 076/303] printk: 'force_early_printk' boot param to help with
debugging
Gives me an option to screw printk and actually see what the machine
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0077-rt-preempt-base-config.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0077-rt-preempt-base-config.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0077-rt-preempt-base-config.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From af76eb7ed94a35a5a55a740bbc448fc152525a78 Mon Sep 17 00:00:00 2001
+From aff9eeeba097e23902b6580288d01a5f6c890bba Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 17 Jun 2011 12:39:57 +0200
-Subject: [PATCH 077/304] rt-preempt-base-config.patch
+Subject: [PATCH 077/303] rt-preempt-base-config.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0078-bug-BUG_ON-WARN_ON-variants-dependend-on-RT-RT.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0078-bug-BUG_ON-WARN_ON-variants-dependend-on-RT-RT.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0078-bug-BUG_ON-WARN_ON-variants-dependend-on-RT-RT.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From be6f0ef17eab2987d46a167f0a4ac81d027d4cf1 Mon Sep 17 00:00:00 2001
+From ab3d637df32862f12facb1d9952584adfdfdf37d Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:58 -0500
-Subject: [PATCH 078/304] bug: BUG_ON/WARN_ON variants dependend on RT/!RT
+Subject: [PATCH 078/303] bug: BUG_ON/WARN_ON variants dependend on RT/!RT
Signed-off-by: Ingo Molnar <mingo at elte.hu>
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0079-rt-local_irq_-variants-depending-on-RT-RT.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0079-rt-local_irq_-variants-depending-on-RT-RT.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0079-rt-local_irq_-variants-depending-on-RT-RT.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 0a67aca4ee90f40faa9131ad266d3a33e2a2f211 Mon Sep 17 00:00:00 2001
+From 7b661a6fcb15f3be11a306930b110ad2c2c52604 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 21 Jul 2009 22:34:14 +0200
-Subject: [PATCH 079/304] rt: local_irq_* variants depending on RT/!RT
+Subject: [PATCH 079/303] rt: local_irq_* variants depending on RT/!RT
Add local_irq_*_(no)rt variant which are mainly used to break
interrupt disabled sections on PREEMPT_RT or to explicitely disable
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0080-preempt-Provide-preempt_-_-no-rt-variants.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0080-preempt-Provide-preempt_-_-no-rt-variants.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0080-preempt-Provide-preempt_-_-no-rt-variants.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 80e6c37e14dc1d47f031eb180a30ee8faaf08da1 Mon Sep 17 00:00:00 2001
+From ab71f08ff9d1b90312b81e81256ed9fbed434356 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 24 Jul 2009 12:38:56 +0200
-Subject: [PATCH 080/304] preempt: Provide preempt_*_(no)rt variants
+Subject: [PATCH 080/303] preempt: Provide preempt_*_(no)rt variants
RT needs a few preempt_disable/enable points which are not necessary
otherwise. Implement variants to avoid #ifdeffery.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0081-ata-Do-not-disable-interrupts-in-ide-code-for-preemp.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0081-ata-Do-not-disable-interrupts-in-ide-code-for-preemp.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0081-ata-Do-not-disable-interrupts-in-ide-code-for-preemp.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 4a39ec6245923771db7402a39725d7768bc73e13 Mon Sep 17 00:00:00 2001
+From 82eb5398a74110a0ef6f4bdd67e4bac19366158b Mon Sep 17 00:00:00 2001
From: Steven Rostedt <srostedt at redhat.com>
Date: Fri, 3 Jul 2009 08:44:29 -0500
-Subject: [PATCH 081/304] ata: Do not disable interrupts in ide code for
+Subject: [PATCH 081/303] ata: Do not disable interrupts in ide code for
preempt-rt
Use the local_irq_*_nort variants.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0082-ide-Do-not-disable-interrupts-for-PREEMPT-RT.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0082-ide-Do-not-disable-interrupts-for-PREEMPT-RT.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0082-ide-Do-not-disable-interrupts-for-PREEMPT-RT.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 392e72c0d7bc6ee052c18fecfeb64a2073b80922 Mon Sep 17 00:00:00 2001
+From fb55ea20b724c68bb488d619800be08441771dc0 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:16 -0500
-Subject: [PATCH 082/304] ide: Do not disable interrupts for PREEMPT-RT
+Subject: [PATCH 082/303] ide: Do not disable interrupts for PREEMPT-RT
Use the local_irq_*_nort variants.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0083-infiniband-Mellanox-IB-driver-patch-use-_nort-primit.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0083-infiniband-Mellanox-IB-driver-patch-use-_nort-primit.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0083-infiniband-Mellanox-IB-driver-patch-use-_nort-primit.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 9703a10e1e7db795109b9816fb079a96f77d30f1 Mon Sep 17 00:00:00 2001
+From c59d363c7a729ae2a0d17157cbf88270896664e9 Mon Sep 17 00:00:00 2001
From: Sven-Thorsten Dietrich <sdietrich at novell.com>
Date: Fri, 3 Jul 2009 08:30:35 -0500
-Subject: [PATCH 083/304] infiniband: Mellanox IB driver patch use _nort()
+Subject: [PATCH 083/303] infiniband: Mellanox IB driver patch use _nort()
primitives
Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0084-input-gameport-Do-not-disable-interrupts-on-PREEMPT_.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0084-input-gameport-Do-not-disable-interrupts-on-PREEMPT_.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0084-input-gameport-Do-not-disable-interrupts-on-PREEMPT_.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 7d6ae080fe95d4cb1c997b55b881f9c384239f04 Mon Sep 17 00:00:00 2001
+From 58ac6a1943ee3188d67506ff62086667be557063 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:16 -0500
-Subject: [PATCH 084/304] input: gameport: Do not disable interrupts on
+Subject: [PATCH 084/303] input: gameport: Do not disable interrupts on
PREEMPT_RT
Use the _nort() primitives.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0085-acpi-Do-not-disable-interrupts-on-PREEMPT_RT.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0085-acpi-Do-not-disable-interrupts-on-PREEMPT_RT.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0085-acpi-Do-not-disable-interrupts-on-PREEMPT_RT.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From c37ed7c4130022ae9305796f3cba5798f4c6197d Mon Sep 17 00:00:00 2001
+From 71e8cc64f95e1df407623c47919e3b0224075c46 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 21 Jul 2009 22:54:51 +0200
-Subject: [PATCH 085/304] acpi: Do not disable interrupts on PREEMPT_RT
+Subject: [PATCH 085/303] acpi: Do not disable interrupts on PREEMPT_RT
Use the local_irq_*_nort() variants.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0086-core-Do-not-disable-interrupts-on-RT-in-kernel-users.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0086-core-Do-not-disable-interrupts-on-RT-in-kernel-users.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0086-core-Do-not-disable-interrupts-on-RT-in-kernel-users.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 2d8925db60d4fc513681926749b704d652de40e0 Mon Sep 17 00:00:00 2001
+From 5b493bbf9d6dad51ec8ed0362eef80c088b0c230 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 21 Jul 2009 23:06:05 +0200
-Subject: [PATCH 086/304] core: Do not disable interrupts on RT in
+Subject: [PATCH 086/303] core: Do not disable interrupts on RT in
kernel/users.c
Use the local_irq_*_nort variants to reduce latencies in RT. The code
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0087-core-Do-not-disable-interrupts-on-RT-in-res_counter..patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0087-core-Do-not-disable-interrupts-on-RT-in-res_counter..patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0087-core-Do-not-disable-interrupts-on-RT-in-res_counter..patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From f8d9d67993bcf25317e56fc4a716b4fab0cf05c5 Mon Sep 17 00:00:00 2001
+From 3d361d88911f83457be7dd44f7d8f33fa5097c26 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:44:33 -0500
-Subject: [PATCH 087/304] core: Do not disable interrupts on RT in
+Subject: [PATCH 087/303] core: Do not disable interrupts on RT in
res_counter.c
Frederic Weisbecker reported this warning:
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0088-usb-Use-local_irq_-_nort-variants.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0088-usb-Use-local_irq_-_nort-variants.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0088-usb-Use-local_irq_-_nort-variants.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e22d2fce54fd4af7784b7adc19f427c759374e56 Mon Sep 17 00:00:00 2001
+From 235dc01600b9e96189896ad9b8186ca9c33b7856 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <srostedt at redhat.com>
Date: Fri, 3 Jul 2009 08:44:26 -0500
-Subject: [PATCH 088/304] usb: Use local_irq_*_nort() variants
+Subject: [PATCH 088/303] usb: Use local_irq_*_nort() variants
[ tglx: Now that irqf_disabled is dead we should kill that ]
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0089-tty-Do-not-disable-interrupts-in-put_ldisc-on-rt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0089-tty-Do-not-disable-interrupts-in-put_ldisc-on-rt.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0089-tty-Do-not-disable-interrupts-in-put_ldisc-on-rt.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 33faab2f1d8977f79fea7ffa54934b04870d2d23 Mon Sep 17 00:00:00 2001
+From 832ca4f7623527e5566737d81381e800e8edd778 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 17 Aug 2009 19:49:19 +0200
-Subject: [PATCH 089/304] tty: Do not disable interrupts in put_ldisc on -rt
+Subject: [PATCH 089/303] tty: Do not disable interrupts in put_ldisc on -rt
Fixes the following on PREEMPT_RT:
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0090-mm-scatterlist-dont-disable-irqs-on-RT.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0090-mm-scatterlist-dont-disable-irqs-on-RT.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0090-mm-scatterlist-dont-disable-irqs-on-RT.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 8a3fe8786194700aea791c3d4d2dd9c1f8aa371c Mon Sep 17 00:00:00 2001
+From db8b2236edf206a1d998cc955515b1cd3c643087 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:44:34 -0500
-Subject: [PATCH 090/304] mm: scatterlist dont disable irqs on RT
+Subject: [PATCH 090/303] mm: scatterlist dont disable irqs on RT
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0091-signal-fix-up-rcu-wreckage.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0091-signal-fix-up-rcu-wreckage.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0091-signal-fix-up-rcu-wreckage.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From f2862ba8f2f66faf0511aa849a6e40101532afb9 Mon Sep 17 00:00:00 2001
+From 51c9393a85609bc8a97554c126807f785ae40b6b Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 22 Jul 2011 08:07:08 +0200
-Subject: [PATCH 091/304] signal-fix-up-rcu-wreckage.patch
+Subject: [PATCH 091/303] signal-fix-up-rcu-wreckage.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0092-net-wireless-warn-nort.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0092-net-wireless-warn-nort.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0092-net-wireless-warn-nort.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 1bd9a8ede7e6f79e82cb86f504b63395c11f5e4c Mon Sep 17 00:00:00 2001
+From 2fb06ec26fdb630a0103197c6ef2daffcb1d2250 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 21 Jul 2011 21:05:33 +0200
-Subject: [PATCH 092/304] net-wireless-warn-nort.patch
+Subject: [PATCH 092/303] net-wireless-warn-nort.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0093-mm-Replace-cgroup_page-bit-spinlock.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0093-mm-Replace-cgroup_page-bit-spinlock.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0093-mm-Replace-cgroup_page-bit-spinlock.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 3e5e033c633a7a592d36a5a0fc1eaa286b0206ff Mon Sep 17 00:00:00 2001
+From ddcee770d5c706d499d547dca3a5df8974fd5b52 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 19 Aug 2009 09:56:42 +0200
-Subject: [PATCH 093/304] mm: Replace cgroup_page bit spinlock
+Subject: [PATCH 093/303] mm: Replace cgroup_page bit spinlock
Bit spinlocks are not working on RT. Replace them.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0094-buffer_head-Replace-bh_uptodate_lock-for-rt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0094-buffer_head-Replace-bh_uptodate_lock-for-rt.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0094-buffer_head-Replace-bh_uptodate_lock-for-rt.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 72822535e8bd5cb4a48f0072f39b55239f3ecd1f Mon Sep 17 00:00:00 2001
+From 52ad4a4c421b581ae60e4091cf3b46f4facde5d2 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 18 Mar 2011 09:18:52 +0100
-Subject: [PATCH 094/304] buffer_head: Replace bh_uptodate_lock for -rt
+Subject: [PATCH 094/303] buffer_head: Replace bh_uptodate_lock for -rt
Wrap the bit_spin_lock calls into a separate inline and add the RT
replacements with a real spinlock.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0095-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0095-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0095-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 72820bad31dede5eefb81ae6549b3efc3e709109 Mon Sep 17 00:00:00 2001
+From a31a6204232e00ac5cee010869ee505682f5842f Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 18 Mar 2011 10:11:25 +0100
-Subject: [PATCH 095/304] fs: jbd/jbd2: Make state lock and journal head lock
+Subject: [PATCH 095/303] fs: jbd/jbd2: Make state lock and journal head lock
rt safe
bit_spin_locks break under RT.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0096-genirq-Disable-DEBUG_SHIRQ-for-rt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0096-genirq-Disable-DEBUG_SHIRQ-for-rt.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0096-genirq-Disable-DEBUG_SHIRQ-for-rt.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 77e4a14cd313dcc3d61918e9537d66d0429d5f7a Mon Sep 17 00:00:00 2001
+From 892ccc0b6f688d1b59d667ad4198068765a55dba Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 18 Mar 2011 10:22:04 +0100
-Subject: [PATCH 096/304] genirq: Disable DEBUG_SHIRQ for rt
+Subject: [PATCH 096/303] genirq: Disable DEBUG_SHIRQ for rt
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0097-genirq-Disable-random-call-on-preempt-rt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0097-genirq-Disable-random-call-on-preempt-rt.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0097-genirq-Disable-random-call-on-preempt-rt.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From b6f3c7f9e7ec80a9172a39cc9bed6571a4d36253 Mon Sep 17 00:00:00 2001
+From 606335d03f857c907d8d81b3accb102a4b680137 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 21 Jul 2009 16:07:37 +0200
-Subject: [PATCH 097/304] genirq: Disable random call on preempt-rt
+Subject: [PATCH 097/303] genirq: Disable random call on preempt-rt
The random call introduces high latencies and is almost
unused. Disable it for -rt.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0098-genirq-disable-irqpoll-on-rt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0098-genirq-disable-irqpoll-on-rt.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0098-genirq-disable-irqpoll-on-rt.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 20890e1f59e28342c70d63e1b860990376e74f79 Mon Sep 17 00:00:00 2001
+From 46ff95f3d9cb02e822980ea68024a6ebbed5477c Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:57 -0500
-Subject: [PATCH 098/304] genirq: disable irqpoll on -rt
+Subject: [PATCH 098/303] genirq: disable irqpoll on -rt
Creates long latencies for no value
@@ -12,10 +12,10 @@
1 file changed, 10 insertions(+)
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
-index dc813a9..d09e0f5 100644
+index 63633a3..e17e0f9 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
-@@ -341,6 +341,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
+@@ -340,6 +340,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
@@ -27,7 +27,7 @@
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
-@@ -353,6 +358,11 @@ module_param(irqfixup, int, 0644);
+@@ -352,6 +357,11 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0099-genirq-force-threading.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0099-genirq-force-threading.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0099-genirq-force-threading.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 8a71391192108cc3a795b905bf342045a74a9f54 Mon Sep 17 00:00:00 2001
+From 62abbe90fd9e996f3b9ce9ab7d1dc20c372dac61 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 3 Apr 2011 11:57:29 +0200
-Subject: [PATCH 099/304] genirq-force-threading.patch
+Subject: [PATCH 099/303] genirq-force-threading.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0100-drivers-net-fix-livelock-issues.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0100-drivers-net-fix-livelock-issues.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0100-drivers-net-fix-livelock-issues.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 9b3393581a748b4d1c01e63476542705947594c4 Mon Sep 17 00:00:00 2001
+From 62d8d6eaab1756cec4b2dcebfe841cbd01da4a71 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 20 Jun 2009 11:36:54 +0200
-Subject: [PATCH 100/304] drivers/net: fix livelock issues
+Subject: [PATCH 100/303] drivers/net: fix livelock issues
Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro
optimization. The reason is that the softirq thread is rescheduling
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0101-drivers-net-vortex-fix-locking-issues.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0101-drivers-net-vortex-fix-locking-issues.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0101-drivers-net-vortex-fix-locking-issues.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 0d4a0c910ace8d9f922830f192e9eed9c59000b3 Mon Sep 17 00:00:00 2001
+From 0d8d9b32c99aedc8467f7051884d2ed4a142bf9c Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt at goodmis.org>
Date: Fri, 3 Jul 2009 08:30:00 -0500
-Subject: [PATCH 101/304] drivers/net: vortex fix locking issues
+Subject: [PATCH 101/303] drivers/net: vortex fix locking issues
Argh, cut and paste wasn't enough...
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0102-drivers-net-gianfar-Make-RT-aware.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0102-drivers-net-gianfar-Make-RT-aware.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0102-drivers-net-gianfar-Make-RT-aware.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e2fe18c9fc444163324e349bb135babe2d587a77 Mon Sep 17 00:00:00 2001
+From 0233c1705624548cdc59ea9003e9a9883a53f4c2 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 1 Apr 2010 20:20:57 +0200
-Subject: [PATCH 102/304] drivers: net: gianfar: Make RT aware
+Subject: [PATCH 102/303] drivers: net: gianfar: Make RT aware
The adjust_link() disables interrupts before taking the queue
locks. On RT those locks are converted to "sleeping" locks and
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0103-USB-Fix-the-mouse-problem-when-copying-large-amounts.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0103-USB-Fix-the-mouse-problem-when-copying-large-amounts.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0103-USB-Fix-the-mouse-problem-when-copying-large-amounts.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From d33649e480f04cc16bb9605a10247eb08c9ef3f7 Mon Sep 17 00:00:00 2001
+From 0d14f76b3dfa9330a5961faedf1d76661674a0f6 Mon Sep 17 00:00:00 2001
From: Wu Zhangjin <wuzj at lemote.com>
Date: Mon, 4 Jan 2010 11:33:02 +0800
-Subject: [PATCH 103/304] USB: Fix the mouse problem when copying large
+Subject: [PATCH 103/303] USB: Fix the mouse problem when copying large
amounts of data
When copying large amounts of data between the USB storage devices and
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0104-local-var.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0104-local-var.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0104-local-var.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From c6f8725fddfe180682afe8c9be6ef29059fea1b8 Mon Sep 17 00:00:00 2001
+From 384f2aaf46e3e184d74828fc8d10f3ef39246884 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 24 Jun 2011 18:40:37 +0200
-Subject: [PATCH 104/304] local-var.patch
+Subject: [PATCH 104/303] local-var.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0105-rt-local-irq-lock.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0105-rt-local-irq-lock.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0105-rt-local-irq-lock.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 0c8e8ec930651b02c09eddbf94975136ecb220ac Mon Sep 17 00:00:00 2001
+From 4071075696c73b2acef7be1c20facbaab1435587 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 20 Jun 2011 09:03:47 +0200
-Subject: [PATCH 105/304] rt-local-irq-lock.patch
+Subject: [PATCH 105/303] rt-local-irq-lock.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0106-cpu-rt-variants.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0106-cpu-rt-variants.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0106-cpu-rt-variants.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From b3f0502c14b5d001a6a3f92a10137c9ce89553d8 Mon Sep 17 00:00:00 2001
+From 82cd472145d060150020299032a574594be467c4 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 17 Jun 2011 15:42:38 +0200
-Subject: [PATCH 106/304] cpu-rt-variants.patch
+Subject: [PATCH 106/303] cpu-rt-variants.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0107-mm-slab-wrap-functions.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0107-mm-slab-wrap-functions.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0107-mm-slab-wrap-functions.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e29c240dc3262b080dfdce48092193e03f7a129e Mon Sep 17 00:00:00 2001
+From 63e7b224a10c80e9e3100c29680d04900c6a8749 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 18 Jun 2011 19:44:43 +0200
-Subject: [PATCH 107/304] mm-slab-wrap-functions.patch
+Subject: [PATCH 107/303] mm-slab-wrap-functions.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
@@ -9,7 +9,7 @@
1 file changed, 104 insertions(+), 48 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
-index f807b35f..89b7b18 100644
+index f807b35..89b7b18 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -116,6 +116,7 @@
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0108-slab-Fix-__do_drain-to-use-the-right-array-cache.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0108-slab-Fix-__do_drain-to-use-the-right-array-cache.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0108-slab-Fix-__do_drain-to-use-the-right-array-cache.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 111925c08bed273b15d3230931dc1c5bcb5ff5f6 Mon Sep 17 00:00:00 2001
+From 7d7542f71228437e9ffc65c14ad462e7744ce513 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt at goodmis.org>
Date: Tue, 11 Oct 2011 23:56:23 -0400
-Subject: [PATCH 108/304] slab: Fix __do_drain to use the right array cache
+Subject: [PATCH 108/303] slab: Fix __do_drain to use the right array cache
The array cache in __do_drain() was using the cpu_cache_get() function
which uses smp_processor_id() to get the proper array. On mainline, this
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0109-mm-More-lock-breaks-in-slab.c.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0109-mm-More-lock-breaks-in-slab.c.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0109-mm-More-lock-breaks-in-slab.c.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 8ce69e22d0518a7bf2c27d6862e259b6795a01dd Mon Sep 17 00:00:00 2001
+From 20eb7757986300fe5c4c90643d6250ea32b95a72 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra at chello.nl>
Date: Fri, 3 Jul 2009 08:44:43 -0500
-Subject: [PATCH 109/304] mm: More lock breaks in slab.c
+Subject: [PATCH 109/303] mm: More lock breaks in slab.c
Handle __free_pages outside of the locked regions. This reduces the
lock contention on the percpu slab locks in -rt significantly.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0110-mm-page_alloc-rt-friendly-per-cpu-pages.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0110-mm-page_alloc-rt-friendly-per-cpu-pages.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0110-mm-page_alloc-rt-friendly-per-cpu-pages.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 9d03ed6f859390ea37231b72819d6d499ed27dab Mon Sep 17 00:00:00 2001
+From bde9ffe9974b013d333b74839903a7390ceb9d56 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:37 -0500
-Subject: [PATCH 110/304] mm: page_alloc: rt-friendly per-cpu pages
+Subject: [PATCH 110/303] mm: page_alloc: rt-friendly per-cpu pages
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
method into a preemptible, explicit-per-cpu-locks method.
@@ -17,7 +17,7 @@
1 file changed, 39 insertions(+), 16 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 8bba0c4..3b70f1e 100644
+index c106723..60050ce 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
@@ -186,7 +186,7 @@
}
return 0;
}
-@@ -5096,6 +5118,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+@@ -5099,6 +5121,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0111-mm-page_alloc-reduce-lock-sections-further.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0111-mm-page_alloc-reduce-lock-sections-further.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0111-mm-page_alloc-reduce-lock-sections-further.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 86d0172ab4150c6276e12b4682609b0c4f42a13f Mon Sep 17 00:00:00 2001
+From dd993b484da7b8ba70ee5557f29cde2cffce384f Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra at chello.nl>
Date: Fri, 3 Jul 2009 08:44:37 -0500
-Subject: [PATCH 111/304] mm: page_alloc reduce lock sections further
+Subject: [PATCH 111/303] mm: page_alloc reduce lock sections further
Split out the pages which are to be freed into a separate list and
call free_pages_bulk() outside of the percpu page allocator locks.
@@ -13,7 +13,7 @@
1 file changed, 58 insertions(+), 19 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 3b70f1e..e54fa76 100644
+index 60050ce..3a289f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -594,7 +594,7 @@ static inline int free_pages_check(struct page *page)
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0112-mm-page-alloc-fix.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0112-mm-page-alloc-fix.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0112-mm-page-alloc-fix.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 03d0cbe0303d2bec52554c85539e8eb2dabe5ca7 Mon Sep 17 00:00:00 2001
+From b0124e641e34030da55550126e1120c8fc606761 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 21 Jul 2011 16:47:49 +0200
-Subject: [PATCH 112/304] mm-page-alloc-fix.patch
+Subject: [PATCH 112/303] mm-page-alloc-fix.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
@@ -9,7 +9,7 @@
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index e54fa76..dce6a03 100644
+index 3a289f8..9849f08 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1966,8 +1966,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0113-mm-convert-swap-to-percpu-locked.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0113-mm-convert-swap-to-percpu-locked.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0113-mm-convert-swap-to-percpu-locked.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 2b63db3ff70fd56681147012305227e46ca8983d Mon Sep 17 00:00:00 2001
+From 883925128cfaf8270f7923965495d7dd776e3e8d Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:51 -0500
-Subject: [PATCH 113/304] mm: convert swap to percpu locked
+Subject: [PATCH 113/303] mm: convert swap to percpu locked
Signed-off-by: Ingo Molnar <mingo at elte.hu>
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0114-mm-vmstat-fix-the-irq-lock-asymetry.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0114-mm-vmstat-fix-the-irq-lock-asymetry.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0114-mm-vmstat-fix-the-irq-lock-asymetry.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e85aa5adf7e1d57407fb684d093af41d52b6ea3a Mon Sep 17 00:00:00 2001
+From 0ed4352f8366681af2f06237d3eee8c960baada9 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 22 Jun 2011 20:47:08 +0200
-Subject: [PATCH 114/304] mm-vmstat-fix-the-irq-lock-asymetry.patch
+Subject: [PATCH 114/303] mm-vmstat-fix-the-irq-lock-asymetry.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0115-mm-make-vmstat-rt-aware.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0115-mm-make-vmstat-rt-aware.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0115-mm-make-vmstat-rt-aware.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 3adde7584f3b1162e3ebc1fe1de37905664838c9 Mon Sep 17 00:00:00 2001
+From 71630737dca83670eed16b2788a701b85c828ab9 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:13 -0500
-Subject: [PATCH 115/304] mm: make vmstat -rt aware
+Subject: [PATCH 115/303] mm: make vmstat -rt aware
Signed-off-by: Ingo Molnar <mingo at elte.hu>
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0116-mm-shrink-the-page-frame-to-rt-size.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0116-mm-shrink-the-page-frame-to-rt-size.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0116-mm-shrink-the-page-frame-to-rt-size.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e5f793963b064395f81cd9d6eddc0dbe7cea806f Mon Sep 17 00:00:00 2001
+From 5df5947a4b05609f35e924f600cf61871ba05fc5 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz at infradead.org>
Date: Fri, 3 Jul 2009 08:44:54 -0500
-Subject: [PATCH 116/304] mm: shrink the page frame to !-rt size
+Subject: [PATCH 116/303] mm: shrink the page frame to !-rt size
He below is a boot-tested hack to shrink the page frame size back to
normal.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0117-ARM-Initialize-ptl-lock-for-vector-page.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0117-ARM-Initialize-ptl-lock-for-vector-page.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0117-ARM-Initialize-ptl-lock-for-vector-page.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 69b2ff338338dcba2f4594f2dc7ed68bfe630b65 Mon Sep 17 00:00:00 2001
+From 52675d94816c0f95c19657fb14891083328f7bda Mon Sep 17 00:00:00 2001
From: Frank Rowand <frank.rowand at am.sony.com>
Date: Sat, 1 Oct 2011 18:58:13 -0700
-Subject: [PATCH 117/304] ARM: Initialize ptl->lock for vector page
+Subject: [PATCH 117/303] ARM: Initialize ptl->lock for vector page
Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if
PREEMPT_RT_FULL=y because vectors_user_mapping() creates a
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0118-mm-Allow-only-slab-on-RT.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0118-mm-Allow-only-slab-on-RT.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0118-mm-Allow-only-slab-on-RT.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 867056af0eacb11f69399aef9cc0484cb68d2b03 Mon Sep 17 00:00:00 2001
+From a6e11032e2f801e0ecb7505d11709cf7f4d0d023 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:44:03 -0500
-Subject: [PATCH 118/304] mm: Allow only slab on RT
+Subject: [PATCH 118/303] mm: Allow only slab on RT
Signed-off-by: Ingo Molnar <mingo at elte.hu>
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0119-radix-tree-rt-aware.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0119-radix-tree-rt-aware.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0119-radix-tree-rt-aware.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From f5d9e84095c6fcf5d9f75aaa84602d15cf1dfd59 Mon Sep 17 00:00:00 2001
+From 8d2162b374774fdad6b7a4c1f2d3f2e87f4454cd Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 17 Jul 2011 21:33:18 +0200
-Subject: [PATCH 119/304] radix-tree-rt-aware.patch
+Subject: [PATCH 119/303] radix-tree-rt-aware.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0120-panic-disable-random-on-rt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0120-panic-disable-random-on-rt.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0120-panic-disable-random-on-rt.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 5e47429ca888f4ffaf050931fd90007e738f11b1 Mon Sep 17 00:00:00 2001
+From 7aeeacd571da4e14b8fb8e3662d5b32a07bbccdf Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 10 Apr 2012 14:34:04 -0400
-Subject: [PATCH 120/304] panic-disable-random-on-rt
+Subject: [PATCH 120/303] panic-disable-random-on-rt
---
kernel/panic.c | 2 ++
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0121-ipc-Make-the-ipc-code-rt-aware.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0121-ipc-Make-the-ipc-code-rt-aware.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0121-ipc-Make-the-ipc-code-rt-aware.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From ba76c613f5b71d749b072c4da8f08f86d690016a Mon Sep 17 00:00:00 2001
+From 291907ef028bb27e30bdbedd102d7623730a65a9 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:12 -0500
-Subject: [PATCH 121/304] ipc: Make the ipc code -rt aware
+Subject: [PATCH 121/303] ipc: Make the ipc code -rt aware
RT serializes the code with the (rt)spinlock but keeps preemption
enabled. Some parts of the code need to be atomic nevertheless.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0122-ipc-mqueue-Add-a-critical-section-to-avoid-a-deadloc.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0122-ipc-mqueue-Add-a-critical-section-to-avoid-a-deadloc.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0122-ipc-mqueue-Add-a-critical-section-to-avoid-a-deadloc.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 44c107fb9ce047fb1f5407fa53766b9d729b02f5 Mon Sep 17 00:00:00 2001
+From f1fd0ed4cd5af831d6706629989b21f39962d6d5 Mon Sep 17 00:00:00 2001
From: KOBAYASHI Yoshitake <yoshitake.kobayashi at toshiba.co.jp>
Date: Sat, 23 Jul 2011 11:57:36 +0900
-Subject: [PATCH 122/304] ipc/mqueue: Add a critical section to avoid a
+Subject: [PATCH 122/303] ipc/mqueue: Add a critical section to avoid a
deadlock
(Repost for v3.0-rt1 and changed the distination addreses)
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0123-relay-fix-timer-madness.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0123-relay-fix-timer-madness.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0123-relay-fix-timer-madness.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 9b5f4832e5d82952421d8e9024367a425d254564 Mon Sep 17 00:00:00 2001
+From 6a5e3febc6446d88a95622843b4202485469d639 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:44:07 -0500
-Subject: [PATCH 123/304] relay: fix timer madness
+Subject: [PATCH 123/303] relay: fix timer madness
remove timer calls (!!!) from deep within the tracing infrastructure.
This was totally bogus code that can cause lockups and worse. Poll
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0124-net-ipv4-route-use-locks-on-up-rt.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0124-net-ipv4-route-use-locks-on-up-rt.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0124-net-ipv4-route-use-locks-on-up-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From e82c6437cdee6d6743140cddff83788258742f31 Mon Sep 17 00:00:00 2001
+From 45b84f82282b235dc80decb99e81385a45a4525a Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 15 Jul 2011 16:24:45 +0200
-Subject: [PATCH 124/304] net-ipv4-route-use-locks-on-up-rt.patch
+Subject: [PATCH 124/303] net-ipv4-route-use-locks-on-up-rt.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0125-workqueue-avoid-the-lock-in-cpu-dying.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0125-workqueue-avoid-the-lock-in-cpu-dying.patch.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0125-workqueue-avoid-the-lock-in-cpu-dying.patch.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From a1c0e31ccbfd39a8e298350d7803ee000cd2588c Mon Sep 17 00:00:00 2001
+From b32c1514de5a656a67caf2a060cce089f2086e27 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 24 Jun 2011 20:39:24 +0200
-Subject: [PATCH 125/304] workqueue-avoid-the-lock-in-cpu-dying.patch
+Subject: [PATCH 125/303] workqueue-avoid-the-lock-in-cpu-dying.patch
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
@@ -9,10 +9,10 @@
1 file changed, 20 insertions(+), 10 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 1c16faf..f46cc04 100644
+index 205bdb0..9e99e10 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -3535,6 +3535,25 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+@@ -3555,6 +3555,25 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
kthread_stop(new_trustee);
return NOTIFY_BAD;
}
@@ -38,7 +38,7 @@
}
/* some are called w/ irq disabled, don't disturb irq status */
-@@ -3554,16 +3573,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+@@ -3574,16 +3593,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
gcwq->first_idle = new_worker;
break;
@@ -55,7 +55,7 @@
case CPU_POST_DEAD:
gcwq->trustee_state = TRUSTEE_BUTCHER;
/* fall through */
-@@ -3597,6 +3606,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+@@ -3617,6 +3626,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
spin_unlock_irqrestore(&gcwq->lock, flags);
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0126-timers-prepare-for-full-preemption.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0126-timers-prepare-for-full-preemption.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0126-timers-prepare-for-full-preemption.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 04f56cc10ab67d3528a58d662c2f20f9f45ba296 Mon Sep 17 00:00:00 2001
+From 9e0c0d2f2e9d43fa7aa830e423d04f3573ad4978 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:34 -0500
-Subject: [PATCH 126/304] timers: prepare for full preemption
+Subject: [PATCH 126/303] timers: prepare for full preemption
When softirqs can be preempted we need to make sure that cancelling
the timer from the active thread can not deadlock vs. a running timer
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0127-timers-preempt-rt-support.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0127-timers-preempt-rt-support.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0127-timers-preempt-rt-support.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 9cba86fbc4acd2f01050677f7f9ed3aed72ce535 Mon Sep 17 00:00:00 2001
+From 1f1ea0e4627d1b1b34e389b9624e80aa3c9c6ba8 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:20 -0500
-Subject: [PATCH 127/304] timers: preempt-rt support
+Subject: [PATCH 127/303] timers: preempt-rt support
Signed-off-by: Ingo Molnar <mingo at elte.hu>
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0128-timers-fix-timer-hotplug-on-rt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0128-timers-fix-timer-hotplug-on-rt.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0128-timers-fix-timer-hotplug-on-rt.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 8cc2531c4976d14a548ad20cb3c392b539fcd5e2 Mon Sep 17 00:00:00 2001
+From 355b32f11a8edb2ff5696cf3856a757b6a88ee83 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:32 -0500
-Subject: [PATCH 128/304] timers: fix timer hotplug on -rt
+Subject: [PATCH 128/303] timers: fix timer hotplug on -rt
Here we are in the CPU_DEAD notifier, and we must not sleep nor
enable interrupts.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0129-timers-mov-printk_tick-to-soft-interrupt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0129-timers-mov-printk_tick-to-soft-interrupt.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0129-timers-mov-printk_tick-to-soft-interrupt.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From ba8483b66ffca3da620a044cefa62c04b13d4ae2 Mon Sep 17 00:00:00 2001
+From 62174323b804b09034a1c49496bbb16e705a0323 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:44:30 -0500
-Subject: [PATCH 129/304] timers: mov printk_tick to soft interrupt
+Subject: [PATCH 129/303] timers: mov printk_tick to soft interrupt
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
Signed-off-by: Ingo Molnar <mingo at elte.hu>
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0130-timer-delay-waking-softirqs-from-the-jiffy-tick.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0130-timer-delay-waking-softirqs-from-the-jiffy-tick.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0130-timer-delay-waking-softirqs-from-the-jiffy-tick.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From dea575de6551da78f939c0b42815f60af606ac8d Mon Sep 17 00:00:00 2001
+From d850300925f69fc5a7fc750e4c5d51322528f363 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz at infradead.org>
Date: Fri, 21 Aug 2009 11:56:45 +0200
-Subject: [PATCH 130/304] timer: delay waking softirqs from the jiffy tick
+Subject: [PATCH 130/303] timer: delay waking softirqs from the jiffy tick
People were complaining about broken balancing with the recent -rt
series.
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0131-timers-Avoid-the-switch-timers-base-set-to-NULL-tric.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0131-timers-Avoid-the-switch-timers-base-set-to-NULL-tric.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0131-timers-Avoid-the-switch-timers-base-set-to-NULL-tric.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From ab2bd9a29df8bf32a5589d74b9734033a1481b93 Mon Sep 17 00:00:00 2001
+From d57c95eb571dd10b48922e8b500a749fd8f9f06f Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 21 Jul 2011 15:23:39 +0200
-Subject: [PATCH 131/304] timers: Avoid the switch timers base set to NULL
+Subject: [PATCH 131/303] timers: Avoid the switch timers base set to NULL
trick on RT
On RT that code is preemptible, so we cannot assign NULL to timers
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0132-printk-Don-t-call-printk_tick-in-printk_needs_cpu-on.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0132-printk-Don-t-call-printk_tick-in-printk_needs_cpu-on.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0132-printk-Don-t-call-printk_tick-in-printk_needs_cpu-on.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 53e2f2d86ce87a2b8520341285353a8c427e49ce Mon Sep 17 00:00:00 2001
+From 4ed512e2e2824f6173c58fbe59abb0920aea01f9 Mon Sep 17 00:00:00 2001
From: Yong Zhang <yong.zhang0 at gmail.com>
Date: Sun, 16 Oct 2011 18:56:45 +0800
-Subject: [PATCH 132/304] printk: Don't call printk_tick in printk_needs_cpu()
+Subject: [PATCH 132/303] printk: Don't call printk_tick in printk_needs_cpu()
on RT
printk_tick() can't be called in atomic context when RT is enabled,
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0133-hrtimers-prepare-full-preemption.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0133-hrtimers-prepare-full-preemption.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0133-hrtimers-prepare-full-preemption.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 36bac70a3511ae455cba4d470100aeba8315da9b Mon Sep 17 00:00:00 2001
+From eec6a9d707c85881514cabdcbce3a53a19cf2473 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:34 -0500
-Subject: [PATCH 133/304] hrtimers: prepare full preemption
+Subject: [PATCH 133/303] hrtimers: prepare full preemption
Make cancellation of a running callback in softirq context safe
against preemption.
@@ -44,10 +44,10 @@
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
-index 7eec82d..45b923b 100644
+index 3f7f39e..b5eaffa 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
-@@ -857,6 +857,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+@@ -844,6 +844,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -121,10 +121,10 @@
}
expires = timeval_to_ktime(value->it_value);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
-index 7b73c34..6a74800 100644
+index 4b7183c..1d5e435 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
-@@ -766,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
+@@ -773,6 +773,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
return overrun;
}
@@ -145,7 +145,7 @@
/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
static int
-@@ -843,6 +857,7 @@ retry:
+@@ -850,6 +864,7 @@ retry:
if (!timr)
return -EINVAL;
@@ -153,7 +153,7 @@
kc = clockid_to_kclock(timr->it_clock);
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
-@@ -851,9 +866,12 @@ retry:
+@@ -858,9 +873,12 @@ retry:
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
@@ -166,7 +166,7 @@
if (old_setting && !error &&
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-@@ -891,10 +909,15 @@ retry_delete:
+@@ -898,10 +916,15 @@ retry_delete:
if (!timer)
return -EINVAL;
@@ -182,7 +182,7 @@
spin_lock(¤t->sighand->siglock);
list_del(&timer->list);
-@@ -920,8 +943,18 @@ static void itimer_delete(struct k_itimer *timer)
+@@ -927,8 +950,18 @@ static void itimer_delete(struct k_itimer *timer)
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0134-hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0134-hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0134-hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From c495d005449523772e27a22fb74814dc3cebff8e Mon Sep 17 00:00:00 2001
+From 3bcdff277e0f61b18b21457e6d963d6036783cfc Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:44:31 -0500
-Subject: [PATCH 134/304] hrtimer: fixup hrtimer callback changes for
+Subject: [PATCH 134/303] hrtimer: fixup hrtimer callback changes for
preempt-rt
In preempt-rt we can not call the callbacks which take sleeping locks
@@ -14,11 +14,11 @@
Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
include/linux/hrtimer.h | 3 +
- kernel/hrtimer.c | 194 +++++++++++++++++++++++++++++++++++++++++-----
+ kernel/hrtimer.c | 197 ++++++++++++++++++++++++++++++++++++++++------
kernel/sched.c | 2 +
kernel/time/tick-sched.c | 1 +
kernel/watchdog.c | 1 +
- 5 files changed, 181 insertions(+), 20 deletions(-)
+ 5 files changed, 182 insertions(+), 22 deletions(-)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 75b2545..26b008b 100644
@@ -42,7 +42,7 @@
ktime_t (*get_time)(void);
ktime_t softirq_time;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
-index 45b923b..392fa07 100644
+index b5eaffa..a78d7c9 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -589,8 +589,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
@@ -65,37 +65,7 @@
/*
* Initialize the high resolution related parts of cpu_base
*/
-@@ -644,7 +646,29 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base,
- int wakeup)
- {
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+again:
- if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
-+ /*
-+ * Move softirq based timers away from the rbtree in
-+ * case it expired already. Otherwise we would have a
-+ * stale base->first entry until the softirq runs.
-+ */
-+ if (!hrtimer_rt_defer(timer)) {
-+ ktime_t now = ktime_get();
-+
-+ __run_hrtimer(timer, &now);
-+ /*
-+ * __run_hrtimer might have requeued timer and
-+ * it could be base->first again.
-+ */
-+ if (&timer->node == base->active.next)
-+ goto again;
-+ return 1;
-+ }
-+#else
-+ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
-+#endif
- if (wakeup) {
- raw_spin_unlock(&base->cpu_base->lock);
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-@@ -743,6 +767,11 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+@@ -730,6 +732,11 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
@@ -107,7 +77,7 @@
#endif /* CONFIG_HIGH_RES_TIMERS */
-@@ -874,9 +903,9 @@ void hrtimer_wait_for_timer(const struct hrtimer *timer)
+@@ -861,9 +868,9 @@ void hrtimer_wait_for_timer(const struct hrtimer *timer)
{
struct hrtimer_clock_base *base = timer->base;
@@ -119,7 +89,7 @@
}
#else
-@@ -926,6 +955,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
+@@ -913,6 +920,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
@@ -131,7 +101,39 @@
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
-@@ -1188,6 +1222,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+@@ -1009,8 +1021,29 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ *
+ * XXX send_remote_softirq() ?
+ */
+- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
+- && hrtimer_enqueue_reprogram(timer, new_base)) {
++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) {
++#ifdef CONFIG_PREEMPT_RT_BASE
++again:
++ if (hrtimer_enqueue_reprogram(timer, new_base)) {
++ /*
++ * Move softirq based timers away from the rbtree in
++ * case it expired already. Otherwise we would have a
++ * stale base->first entry until the softirq runs.
++ */
++ if (!hrtimer_rt_defer(timer)) {
++ ktime_t now = ktime_get();
++
++ __run_hrtimer(timer, &now);
++ /*
++ * __run_hrtimer might have requeued timer and
++ * it could be base->first again.
++ */
++ if (&timer->node == base->active.next)
++ goto again;
++ }
++#else
++ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
++#endif
+ if (wakeup) {
+ /*
+ * We need to drop cpu_base->lock to avoid a
+@@ -1188,6 +1221,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@@ -139,7 +141,7 @@
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
-@@ -1271,10 +1306,118 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
+@@ -1271,10 +1305,118 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
@@ -260,7 +262,7 @@
/*
* High resolution timer interrupt
* Called with interrupts disabled
-@@ -1283,7 +1426,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+@@ -1283,7 +1425,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
@@ -269,7 +271,7 @@
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
-@@ -1349,7 +1492,10 @@ retry:
+@@ -1349,7 +1491,10 @@ retry:
break;
}
@@ -281,7 +283,7 @@
}
}
-@@ -1364,6 +1510,10 @@ retry:
+@@ -1364,6 +1509,10 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
@@ -292,7 +294,7 @@
return;
}
-@@ -1444,6 +1594,12 @@ void hrtimer_peek_ahead_timers(void)
+@@ -1444,6 +1593,12 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
@@ -305,7 +307,7 @@
static void run_hrtimer_softirq(struct softirq_action *h)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-@@ -1453,15 +1609,9 @@ static void run_hrtimer_softirq(struct softirq_action *h)
+@@ -1453,15 +1608,9 @@ static void run_hrtimer_softirq(struct softirq_action *h)
clock_was_set();
}
@@ -322,7 +324,7 @@
/*
* Called from timer softirq every jiffy, expire hrtimers:
*
-@@ -1494,7 +1644,7 @@ void hrtimer_run_queues(void)
+@@ -1494,7 +1643,7 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
@@ -331,7 +333,7 @@
if (hrtimer_hres_active())
return;
-@@ -1519,12 +1669,16 @@ void hrtimer_run_queues(void)
+@@ -1519,12 +1668,16 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
@@ -350,7 +352,7 @@
}
/*
-@@ -1546,6 +1700,7 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
+@@ -1546,6 +1699,7 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@@ -358,7 +360,7 @@
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1684,6 +1839,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
+@@ -1684,6 +1838,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -366,7 +368,7 @@
}
hrtimer_init_hres(cpu_base);
-@@ -1802,9 +1958,7 @@ void __init hrtimers_init(void)
+@@ -1802,9 +1957,7 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0135-hrtimer-Don-t-call-the-timer-handler-from-hrtimer_st.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0135-hrtimer-Don-t-call-the-timer-handler-from-hrtimer_st.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0135-hrtimer-Don-t-call-the-timer-handler-from-hrtimer_st.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -1,7 +1,7 @@
-From 80cc960e628509c72f63a7327a4dc22707a02b81 Mon Sep 17 00:00:00 2001
+From 1beeecbf0eb9cdc72533030ef331d6494ccec737 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra at chello.nl>
Date: Fri, 12 Aug 2011 17:39:54 +0200
-Subject: [PATCH 135/304] hrtimer: Don't call the timer handler from
+Subject: [PATCH 135/303] hrtimer: Don't call the timer handler from
hrtimer_start
[<ffffffff812de4a9>] __delay+0xf/0x11
@@ -29,80 +29,73 @@
Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
---
- kernel/hrtimer.c | 48 +++++++++++++++++++++++-------------------------
- 1 file changed, 23 insertions(+), 25 deletions(-)
+ kernel/hrtimer.c | 46 ++++++++++++++++++++++------------------------
+ 1 file changed, 22 insertions(+), 24 deletions(-)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
-index 392fa07..27a3192 100644
+index a78d7c9..59d2463 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
-@@ -646,37 +646,24 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base,
- int wakeup)
- {
--#ifdef CONFIG_PREEMPT_RT_BASE
--again:
- if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
-+ if (!wakeup)
-+ return -ETIME;
+@@ -1021,30 +1021,19 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ *
+ * XXX send_remote_softirq() ?
+ */
+- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) {
++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
++ && hrtimer_enqueue_reprogram(timer, new_base)) {
+
-+#ifdef CONFIG_PREEMPT_RT_BASE
- /*
- * Move softirq based timers away from the rbtree in
- * case it expired already. Otherwise we would have a
- * stale base->first entry until the softirq runs.
- */
-- if (!hrtimer_rt_defer(timer)) {
-- ktime_t now = ktime_get();
--
-- __run_hrtimer(timer, &now);
++ if (wakeup
+ #ifdef CONFIG_PREEMPT_RT_BASE
+-again:
+- if (hrtimer_enqueue_reprogram(timer, new_base)) {
- /*
-- * __run_hrtimer might have requeued timer and
-- * it could be base->first again.
+- * Move softirq based timers away from the rbtree in
+- * case it expired already. Otherwise we would have a
+- * stale base->first entry until the softirq runs.
- */
-- if (&timer->node == base->active.next)
-- goto again;
-- return 1;
-- }
+- if (!hrtimer_rt_defer(timer)) {
+- ktime_t now = ktime_get();
+-
+- __run_hrtimer(timer, &now);
+- /*
+- * __run_hrtimer might have requeued timer and
+- * it could be base->first again.
+- */
+- if (&timer->node == base->active.next)
+- goto again;
+- }
-#else
- if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
-+ if (!hrtimer_rt_defer(timer))
-+ return -ETIME;
++ /*
++ * Move softirq based timers away from the rbtree in
++ * case it expired already. Otherwise we would have a
++ * stale base->first entry until the softirq runs.
++ */
++ && hrtimer_rt_defer(timer)
#endif
- if (wakeup) {
-- raw_spin_unlock(&base->cpu_base->lock);
-- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-- raw_spin_lock(&base->cpu_base->lock);
-- } else
++ ) {
+ /*
+ * We need to drop cpu_base->lock to avoid a
+ * lock ordering issue vs. rq->lock.
+@@ -1053,9 +1042,18 @@ again:
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ local_irq_restore(flags);
+ return ret;
+- } else {
- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+ raw_spin_unlock(&base->cpu_base->lock);
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+ raw_spin_lock(&base->cpu_base->lock);
-
-- return 1;
-+ return 0;
+ }
++
++ /*
++ * In case we failed to reprogram the timer (mostly
++ * because out current timer is already elapsed),
++ * remove it again and report a failure. This avoids
++ * stale base->first entries.
++ */
++ debug_deactivate(timer);
++ __remove_hrtimer(timer, new_base,
++ timer->state & HRTIMER_STATE_CALLBACK, 0);
++ ret = -ETIME;
}
- return 0;
-@@ -1056,8 +1043,19 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- *
- * XXX send_remote_softirq() ?
- */
-- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
-- hrtimer_enqueue_reprogram(timer, new_base, wakeup);
-+ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) {
-+ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
-+ if (ret) {
-+ /*
-+ * In case we failed to reprogram the timer (mostly
-+ * because out current timer is already elapsed),
-+ * remove it again and report a failure. This avoids
-+ * stale base->first entries.
-+ */
-+ __remove_hrtimer(timer, new_base,
-+ timer->state & HRTIMER_STATE_CALLBACK, 0);
-+ }
-+ }
-
unlock_hrtimer_base(timer, &flags);
-
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0136-hrtimer-fix-reprogram-madness.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0136-hrtimer-fix-reprogram-madness.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0136-hrtimer-fix-reprogram-madness.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0136-hrtimer-fix-reprogram-madness.patch.patch)
@@ -0,0 +1,43 @@
+From 8f1327ae3bdca06bb6568fa3384de01cd7525426 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 14 Sep 2011 14:48:43 +0200
+Subject: [PATCH 136/303] hrtimer-fix-reprogram-madness.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/hrtimer.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 59d2463..fdfe1bb 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -1325,7 +1325,11 @@ static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
+ if (!enqueue_hrtimer(timer, base))
+ return;
+
+- if (hrtimer_reprogram(timer, base))
++#ifndef CONFIG_HIGH_RES_TIMERS
++ }
++#else
++ if (base->cpu_base->hres_active &&
++ hrtimer_reprogram(timer, base))
+ goto requeue;
+
+ } else if (hrtimer_active(timer)) {
+@@ -1334,6 +1338,7 @@ static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
+ * the event device.
+ */
+ if (&timer->node == base->active.next &&
++ base->cpu_base->hres_active &&
+ hrtimer_reprogram(timer, base))
+ goto requeue;
+ }
+@@ -1346,6 +1351,7 @@ requeue:
+ */
+ __remove_hrtimer(timer, base, timer->state, 0);
+ list_add_tail(&timer->cb_entry, &base->expired);
++#endif
+ }
+
+ /*
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0137-timer-fd-Prevent-live-lock.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0137-timer-fd-Prevent-live-lock.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0137-timer-fd-Prevent-live-lock.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0137-timer-fd-Prevent-live-lock.patch)
@@ -0,0 +1,30 @@
+From ed814c72df388e0d21dbd12bef30d66bea9e55d5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 25 Jan 2012 11:08:40 +0100
+Subject: [PATCH 137/303] timer-fd: Prevent live lock
+
+If hrtimer_try_to_cancel() requires a retry, then depending on the
+priority setting te retry loop might prevent timer callback completion
+on RT. Prevent that by waiting for completion on RT, no change for a
+non RT kernel.
+
+Reported-by: Sankara Muthukrishnan <sankara.m at gmail.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ fs/timerfd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/timerfd.c b/fs/timerfd.c
+index dffeb37..57f0e4e 100644
+--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -313,7 +313,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
+ if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
+ break;
+ spin_unlock_irq(&ctx->wqh.lock);
+- cpu_relax();
++ hrtimer_wait_for_timer(&ctx->tmr);
+ }
+
+ /*
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0138-posix-timers-thread-posix-cpu-timers-on-rt.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0138-posix-timers-thread-posix-cpu-timers-on-rt.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0138-posix-timers-thread-posix-cpu-timers-on-rt.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0138-posix-timers-thread-posix-cpu-timers-on-rt.patch)
@@ -0,0 +1,313 @@
+From a486145b5e2abcdb2b805b74a83716132f3ae057 Mon Sep 17 00:00:00 2001
+From: John Stultz <johnstul at us.ibm.com>
+Date: Fri, 3 Jul 2009 08:29:58 -0500
+Subject: [PATCH 138/303] posix-timers: thread posix-cpu-timers on -rt
+
+posix-cpu-timer code takes non -rt safe locks in hard irq
+context. Move it to a thread.
+
+[ 3.0 fixes from Peter Zijlstra <peterz at infradead.org> ]
+
+Signed-off-by: John Stultz <johnstul at us.ibm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/init_task.h | 7 ++
+ include/linux/sched.h | 3 +
+ init/main.c | 1 +
+ kernel/fork.c | 3 +
+ kernel/posix-cpu-timers.c | 182 +++++++++++++++++++++++++++++++++++++++++++--
+ 5 files changed, 190 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index cdde2b3..3202e80 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -142,6 +142,12 @@ extern struct task_group root_task_group;
+ # define INIT_PERF_EVENTS(tsk)
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define INIT_TIMER_LIST .posix_timer_list = NULL,
++#else
++# define INIT_TIMER_LIST
++#endif
++
+ #define INIT_TASK_COMM "swapper"
+
+ /*
+@@ -197,6 +203,7 @@ extern struct task_group root_task_group;
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
++ INIT_TIMER_LIST \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index a7b7888..0e301c9 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1359,6 +1359,9 @@ struct task_struct {
+
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct task_struct *posix_timer_list;
++#endif
+
+ /* process credentials */
+ const struct cred __rcu *real_cred; /* objective and real subjective task
+diff --git a/init/main.c b/init/main.c
+index feda146..f2936db 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -68,6 +68,7 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/posix-timers.h>
+
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 5bc7283..f56f289 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1031,6 +1031,9 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+ */
+ static void posix_cpu_timers_init(struct task_struct *tsk)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ tsk->posix_timer_list = NULL;
++#endif
+ tsk->cputime_expires.prof_exp = cputime_zero;
+ tsk->cputime_expires.virt_exp = cputime_zero;
+ tsk->cputime_expires.sched_exp = 0;
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index 962c291..cff1757 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -701,7 +701,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
+ /*
+ * Disarm any old timer after extracting its expiry time.
+ */
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+
+ ret = 0;
+ old_incr = timer->it.cpu.incr;
+@@ -1223,7 +1223,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
+ /*
+ * Now re-arm for the new expiry time.
+ */
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ arm_timer(timer);
+ spin_unlock(&p->sighand->siglock);
+
+@@ -1290,10 +1290,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
+ sig = tsk->signal;
+ if (sig->cputimer.running) {
+ struct task_cputime group_sample;
++ unsigned long flags;
+
+- raw_spin_lock(&sig->cputimer.lock);
++ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
+ group_sample = sig->cputimer.cputime;
+- raw_spin_unlock(&sig->cputimer.lock);
++ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
+
+ if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+ return 1;
+@@ -1307,13 +1308,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
+ * already updated our counts. We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+-void run_posix_cpu_timers(struct task_struct *tsk)
++static void __run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ LIST_HEAD(firing);
+ struct k_itimer *timer, *next;
+ unsigned long flags;
+
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+
+ /*
+ * The fast path checks that there are no expired thread or thread
+@@ -1371,6 +1372,175 @@ void run_posix_cpu_timers(struct task_struct *tsk)
+ }
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++#include <linux/kthread.h>
++#include <linux/cpu.h>
++DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++
++static int posix_cpu_timers_thread(void *data)
++{
++ int cpu = (long)data;
++
++ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++
++ while (!kthread_should_stop()) {
++ struct task_struct *tsk = NULL;
++ struct task_struct *next = NULL;
++
++ if (cpu_is_offline(cpu))
++ goto wait_to_die;
++
++ /* grab task list */
++ raw_local_irq_disable();
++ tsk = per_cpu(posix_timer_tasklist, cpu);
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++ raw_local_irq_enable();
++
++ /* its possible the list is empty, just return */
++ if (!tsk) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ __set_current_state(TASK_RUNNING);
++ continue;
++ }
++
++ /* Process task list */
++ while (1) {
++ /* save next */
++ next = tsk->posix_timer_list;
++
++ /* run the task timers, clear its ptr and
++ * unreference it
++ */
++ __run_posix_cpu_timers(tsk);
++ tsk->posix_timer_list = NULL;
++ put_task_struct(tsk);
++
++ /* check if this is the last on the list */
++ if (next == tsk)
++ break;
++ tsk = next;
++ }
++ }
++ return 0;
++
++wait_to_die:
++ /* Wait for kthread_stop */
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ unsigned long cpu = smp_processor_id();
++ struct task_struct *tasklist;
++
++ BUG_ON(!irqs_disabled());
++ if(!per_cpu(posix_timer_task, cpu))
++ return;
++ /* get per-cpu references */
++ tasklist = per_cpu(posix_timer_tasklist, cpu);
++
++ /* check to see if we're already queued */
++ if (!tsk->posix_timer_list) {
++ get_task_struct(tsk);
++ if (tasklist) {
++ tsk->posix_timer_list = tasklist;
++ } else {
++ /*
++ * The list is terminated by a self-pointing
++ * task_struct
++ */
++ tsk->posix_timer_list = tsk;
++ }
++ per_cpu(posix_timer_tasklist, cpu) = tsk;
++ }
++ /* XXX signal the thread somehow */
++ wake_up_process(per_cpu(posix_timer_task, cpu));
++}
++
++/*
++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int posix_cpu_thread_call(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ int cpu = (long)hcpu;
++ struct task_struct *p;
++ struct sched_param param;
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ p = kthread_create(posix_cpu_timers_thread, hcpu,
++ "posix_cpu_timers/%d",cpu);
++ if (IS_ERR(p))
++ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
++ kthread_bind(p, cpu);
++ /* Must be high prio to avoid getting starved */
++ param.sched_priority = MAX_RT_PRIO-1;
++ sched_setscheduler(p, SCHED_FIFO, ¶m);
++ per_cpu(posix_timer_task,cpu) = p;
++ break;
++ case CPU_ONLINE:
++ /* Strictly unneccessary, as first user will wake it. */
++ wake_up_process(per_cpu(posix_timer_task,cpu));
++ break;
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_UP_CANCELED:
++ /* Unbind it from offline cpu so it can run. Fall thru. */
++ kthread_bind(per_cpu(posix_timer_task,cpu),
++ any_online_cpu(cpu_online_map));
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++ case CPU_DEAD:
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++#endif
++ }
++ return NOTIFY_OK;
++}
++
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
++ .notifier_call = posix_cpu_thread_call,
++ .priority = 10
++};
++
++static int __init posix_cpu_thread_init(void)
++{
++ void *hcpu = (void *)(long)smp_processor_id();
++ /* Start one for boot CPU. */
++ unsigned long cpu;
++
++ /* init the per-cpu posix_timer_tasklets */
++ for_each_cpu_mask(cpu, cpu_possible_map)
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
++ register_cpu_notifier(&posix_cpu_thread_notifier);
++ return 0;
++}
++early_initcall(posix_cpu_thread_init);
++#else /* CONFIG_PREEMPT_RT_BASE */
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ __run_posix_cpu_timers(tsk);
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ /*
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
+ * The tsk->sighand->siglock must be held by the caller.
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0139-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0139-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0139-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0139-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch)
@@ -0,0 +1,29 @@
+From 82a85130ca216e43ca0117ea55f3d705022398eb Mon Sep 17 00:00:00 2001
+From: Arnaldo Carvalho de Melo <acme at redhat.com>
+Date: Fri, 3 Jul 2009 08:30:00 -0500
+Subject: [PATCH 139/303] posix-timers: Shorten posix_cpu_timers/<CPU> kernel
+ thread names
+
+Shorten the softirq kernel thread names because they always overflow the
+limited comm length, appearing as "posix_cpu_timer" CPU# times.
+
+Signed-off-by: Arnaldo Carvalho de Melo <acme at redhat.com>
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/posix-cpu-timers.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index cff1757..851f93d 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -1479,7 +1479,7 @@ static int posix_cpu_thread_call(struct notifier_block *nfb,
+ switch (action) {
+ case CPU_UP_PREPARE:
+ p = kthread_create(posix_cpu_timers_thread, hcpu,
+- "posix_cpu_timers/%d",cpu);
++ "posixcputmr/%d",cpu);
+ if (IS_ERR(p))
+ return NOTIFY_BAD;
+ p->flags |= PF_NOFREEZE;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0140-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0140-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0140-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0140-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch)
@@ -0,0 +1,60 @@
+From d90735cc46b406d08a8be9d94dfd0dbc5f85aca6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:44 -0500
+Subject: [PATCH 140/303] posix-timers: Avoid wakeups when no timers are
+ active
+
+Waking the thread even when no timers are scheduled is useless.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/posix-cpu-timers.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index 851f93d..61e7344 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -1436,6 +1436,21 @@ wait_to_die:
+ return 0;
+ }
+
++static inline int __fastpath_timer_check(struct task_struct *tsk)
++{
++ /* tsk == current, ensure it is safe to use ->signal/sighand */
++ if (unlikely(tsk->exit_state))
++ return 0;
++
++ if (!task_cputime_zero(&tsk->cputime_expires))
++ return 1;
++
++ if (!task_cputime_zero(&tsk->signal->cputime_expires))
++ return 1;
++
++ return 0;
++}
++
+ void run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ unsigned long cpu = smp_processor_id();
+@@ -1448,7 +1463,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
+ tasklist = per_cpu(posix_timer_tasklist, cpu);
+
+ /* check to see if we're already queued */
+- if (!tsk->posix_timer_list) {
++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
+ get_task_struct(tsk);
+ if (tasklist) {
+ tsk->posix_timer_list = tasklist;
+@@ -1460,9 +1475,9 @@ void run_posix_cpu_timers(struct task_struct *tsk)
+ tsk->posix_timer_list = tsk;
+ }
+ per_cpu(posix_timer_tasklist, cpu) = tsk;
++
++ wake_up_process(per_cpu(posix_timer_task, cpu));
+ }
+- /* XXX signal the thread somehow */
+- wake_up_process(per_cpu(posix_timer_task, cpu));
+ }
+
+ /*
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0141-sched-delay-put-task.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0141-sched-delay-put-task.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0141-sched-delay-put-task.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0141-sched-delay-put-task.patch.patch)
@@ -0,0 +1,72 @@
+From 93c6a673327ba9069d11959a181af20aa6e2c10f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 31 May 2011 16:59:16 +0200
+Subject: [PATCH 141/303] sched-delay-put-task.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/sched.h | 13 +++++++++++++
+ kernel/fork.c | 11 +++++++++++
+ 2 files changed, 24 insertions(+)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 0e301c9..678cdef 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1595,6 +1595,9 @@ struct task_struct {
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+ atomic_t ptrace_bp_refcnt;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head put_rcu;
++#endif
+ };
+
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+@@ -1779,6 +1782,15 @@ extern struct pid *cad_pid;
+ extern void free_task(struct task_struct *tsk);
+ #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __put_task_struct_cb(struct rcu_head *rhp);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++ if (atomic_dec_and_test(&t->usage))
++ call_rcu(&t->put_rcu, __put_task_struct_cb);
++}
++#else
+ extern void __put_task_struct(struct task_struct *t);
+
+ static inline void put_task_struct(struct task_struct *t)
+@@ -1786,6 +1798,7 @@ static inline void put_task_struct(struct task_struct *t)
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
+ }
++#endif
+
+ extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index f56f289..2b985c7 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -198,7 +198,18 @@ void __put_task_struct(struct task_struct *tsk)
+ if (!profile_handoff_task(tsk))
+ free_task(tsk);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ EXPORT_SYMBOL_GPL(__put_task_struct);
++#else
++void __put_task_struct_cb(struct rcu_head *rhp)
++{
++ struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
++
++ __put_task_struct(tsk);
++
++}
++EXPORT_SYMBOL_GPL(__put_task_struct_cb);
++#endif
+
+ /*
+ * macro override instead of weak attribute alias, to workaround
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0142-sched-limit-nr-migrate.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0142-sched-limit-nr-migrate.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0142-sched-limit-nr-migrate.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0142-sched-limit-nr-migrate.patch.patch)
@@ -0,0 +1,26 @@
+From 1df09dda96cd8e47d9285a95e1e6845c1ac6c194 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 6 Jun 2011 12:12:51 +0200
+Subject: [PATCH 142/303] sched-limit-nr-migrate.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index a877974..2fd0f7c 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -939,7 +939,11 @@ late_initcall(sched_init_debug);
+ * Number of tasks to iterate in a single balance run.
+ * Limited because this is done with IRQs disabled.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ const_debug unsigned int sysctl_sched_nr_migrate = 32;
++#else
++const_debug unsigned int sysctl_sched_nr_migrate = 8;
++#endif
+
+ /*
+ * period over which we average the RT time consumption, measured
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0143-sched-mmdrop-delayed.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0143-sched-mmdrop-delayed.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0143-sched-mmdrop-delayed.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0143-sched-mmdrop-delayed.patch.patch)
@@ -0,0 +1,154 @@
+From f265cea1818c56478f6791b95937655dea4992a1 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 6 Jun 2011 12:20:33 +0200
+Subject: [PATCH 143/303] sched-mmdrop-delayed.patch
+
+Needs thread context (pgd_lock) -> ifdeffed. workqueues wont work with
+RT
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/mm_types.h | 4 ++++
+ include/linux/sched.h | 12 ++++++++++++
+ kernel/fork.c | 15 ++++++++++++++-
+ kernel/sched.c | 21 +++++++++++++++++++--
+ 4 files changed, 49 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 1ec126f..c303a27 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -12,6 +12,7 @@
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+ #include <linux/page-debug-flags.h>
++#include <linux/rcupdate.h>
+ #include <asm/page.h>
+ #include <asm/mmu.h>
+
+@@ -393,6 +394,9 @@ struct mm_struct {
+ #ifdef CONFIG_CPUMASK_OFFSTACK
+ struct cpumask cpumask_allocation;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head delayed_drop;
++#endif
+ };
+
+ static inline void mm_init_cpumask(struct mm_struct *mm)
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 678cdef..cb87a0c 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2280,12 +2280,24 @@ extern struct mm_struct * mm_alloc(void);
+
+ /* mmdrop drops the mm and the page tables */
+ extern void __mmdrop(struct mm_struct *);
++
+ static inline void mmdrop(struct mm_struct * mm)
+ {
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
++{
++ if (atomic_dec_and_test(&mm->mm_count))
++ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
++}
++#else
++# define mmdrop_delayed(mm) mmdrop(mm)
++#endif
++
+ /* mmput gets rid of the mappings and all user-space */
+ extern void mmput(struct mm_struct *);
+ /* Grab a reference to a task's mm, if it is not already going away */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 2b985c7..e2d8055 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(__put_task_struct);
+ #else
+ void __put_task_struct_cb(struct rcu_head *rhp)
+ {
+- struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
+
+ __put_task_struct(tsk);
+
+@@ -555,6 +555,19 @@ void __mmdrop(struct mm_struct *mm)
+ }
+ EXPORT_SYMBOL_GPL(__mmdrop);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++/*
++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
++ * want another facility to make this work.
++ */
++void __mmdrop_delayed(struct rcu_head *rhp)
++{
++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
++
++ __mmdrop(mm);
++}
++#endif
++
+ /*
+ * Decrement the use count and release all resources for an mm.
+ */
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 2fd0f7c..188c3b6bf 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3171,8 +3171,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+ finish_lock_switch(rq, prev);
+
+ fire_sched_in_preempt_notifiers(current);
++ /*
++ * We use mmdrop_delayed() here so we don't have to do the
++ * full __mmdrop() when we are the last user.
++ */
+ if (mm)
+- mmdrop(mm);
++ mmdrop_delayed(mm);
+ if (unlikely(prev_state == TASK_DEAD)) {
+ /*
+ * Remove function-return probe instances associated with this
+@@ -6496,6 +6500,8 @@ static int migration_cpu_stop(void *data)
+
+ #ifdef CONFIG_HOTPLUG_CPU
+
++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
++
+ /*
+ * Ensures that the idle task is using init_mm right before its cpu goes
+ * offline.
+@@ -6508,7 +6514,12 @@ void idle_task_exit(void)
+
+ if (mm != &init_mm)
+ switch_mm(mm, &init_mm, current);
+- mmdrop(mm);
++
++ /*
++ * Defer the cleanup to an alive cpu. On RT we can neither
++ * call mmdrop() nor mmdrop_delayed() from here.
++ */
++ per_cpu(idle_last_mm, smp_processor_id()) = mm;
+ }
+
+ /*
+@@ -6853,6 +6864,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ migrate_nr_uninterruptible(rq);
+ calc_global_load_remove(rq);
+ break;
++ case CPU_DEAD:
++ if (per_cpu(idle_last_mm, cpu)) {
++ mmdrop(per_cpu(idle_last_mm, cpu));
++ per_cpu(idle_last_mm, cpu) = NULL;
++ }
++ break;
+ #endif
+ }
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0144-sched-rt-mutex-wakeup.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0144-sched-rt-mutex-wakeup.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0144-sched-rt-mutex-wakeup.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0144-sched-rt-mutex-wakeup.patch.patch)
@@ -0,0 +1,89 @@
+From ec0eae77ead1e3c9fc9923a2f918ddec360b440b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sat, 25 Jun 2011 09:21:04 +0200
+Subject: [PATCH 144/303] sched-rt-mutex-wakeup.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/sched.h | 3 +++
+ kernel/sched.c | 31 ++++++++++++++++++++++++++++++-
+ 2 files changed, 33 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index cb87a0c..5dc840b 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1073,6 +1073,7 @@ struct sched_domain;
+ #define WF_SYNC 0x01 /* waker goes to sleep after wakup */
+ #define WF_FORK 0x02 /* child wakeup after fork */
+ #define WF_MIGRATED 0x04 /* internal use, task got migrated */
++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
+
+ #define ENQUEUE_WAKEUP 1
+ #define ENQUEUE_HEAD 2
+@@ -1222,6 +1223,7 @@ enum perf_event_task_context {
+
+ struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
++ volatile long saved_state; /* saved state for "spinlock sleepers" */
+ void *stack;
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+@@ -2190,6 +2192,7 @@ extern void xtime_update(unsigned long ticks);
+
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct * tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+ #ifdef CONFIG_SMP
+ extern void kick_process(struct task_struct *tsk);
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 188c3b6bf..3bfd00a 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2823,8 +2823,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+
+ smp_wmb();
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+- if (!(p->state & state))
++ if (!(p->state & state)) {
++ /*
++ * The task might be running due to a spinlock sleeper
++ * wakeup. Check the saved state and set it to running
++ * if the wakeup condition is true.
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER)) {
++ if (p->saved_state & state)
++ p->saved_state = TASK_RUNNING;
++ }
+ goto out;
++ }
++
++ /*
++ * If this is a regular wakeup, then we can unconditionally
++ * clear the saved state of a "lock sleeper".
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER))
++ p->saved_state = TASK_RUNNING;
+
+ success = 1; /* we're going to change ->state */
+ cpu = task_cpu(p);
+@@ -2897,6 +2914,18 @@ int wake_up_process(struct task_struct *p)
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
++/**
++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
++ * @p: The process to be woken up.
++ *
++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
++ * the nature of the wakeup.
++ */
++int wake_up_lock_sleeper(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
++}
++
+ int wake_up_state(struct task_struct *p, unsigned int state)
+ {
+ return try_to_wake_up(p, state, 0);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0145-sched-prevent-idle-boost.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0145-sched-prevent-idle-boost.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0145-sched-prevent-idle-boost.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0145-sched-prevent-idle-boost.patch.patch)
@@ -0,0 +1,52 @@
+From c37cc693e3d902d5d80e7f218a75ad2f1cdddab0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 6 Jun 2011 20:07:38 +0200
+Subject: [PATCH 145/303] sched-prevent-idle-boost.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 3bfd00a..0a2a1b2a 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -5230,6 +5230,24 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+
+ rq = __task_rq_lock(p);
+
++ /*
++ * Idle task boosting is a nono in general. There is one
++ * exception, when PREEMPT_RT and NOHZ is active:
++ *
++ * The idle task calls get_next_timer_interrupt() and holds
++ * the timer wheel base->lock on the CPU and another CPU wants
++ * to access the timer (probably to cancel it). We can safely
++ * ignore the boosting request, as the idle CPU runs this code
++ * with interrupts disabled and will complete the lock
++ * protected section without being interrupted. So there is no
++ * real need to boost.
++ */
++ if (unlikely(p == rq->idle)) {
++ WARN_ON(p != rq->curr);
++ WARN_ON(p->pi_blocked_on);
++ goto out_unlock;
++ }
++
+ trace_sched_pi_setprio(p, prio);
+ oldprio = p->prio;
+ prev_class = p->sched_class;
+@@ -5253,11 +5271,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+ enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
+
+ check_class_changed(rq, p, prev_class, oldprio);
++out_unlock:
+ __task_rq_unlock(rq);
+ }
+-
+ #endif
+-
+ void set_user_nice(struct task_struct *p, long nice)
+ {
+ int old_prio, delta, on_rq;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0146-sched-might-sleep-do-not-account-rcu-depth.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0146-sched-might-sleep-do-not-account-rcu-depth.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0146-sched-might-sleep-do-not-account-rcu-depth.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0146-sched-might-sleep-do-not-account-rcu-depth.patch.patch)
@@ -0,0 +1,50 @@
+From f170601cfb11341cdc9fa8a1cd8d03c6312ad84a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 7 Jun 2011 09:19:06 +0200
+Subject: [PATCH 146/303] sched-might-sleep-do-not-account-rcu-depth.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rcupdate.h | 7 +++++++
+ kernel/sched.c | 3 ++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 2cf4226..a0082e2 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -147,6 +147,11 @@ void synchronize_rcu(void);
+ * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+ */
+ #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
++#ifndef CONFIG_PREEMPT_RT_FULL
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++#else
++static inline int sched_rcu_preempt_depth(void) { return 0; }
++#endif
+
+ #else /* #ifdef CONFIG_PREEMPT_RCU */
+
+@@ -170,6 +175,8 @@ static inline int rcu_preempt_depth(void)
+ return 0;
+ }
+
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+ /* Internal to kernel */
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 0a2a1b2a..0b54ef6 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -8661,7 +8661,8 @@ void __init sched_init(void)
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ static inline int preempt_count_equals(int preempt_offset)
+ {
+- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
++ int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
++ sched_rcu_preempt_depth();
+
+ return (nested == preempt_offset);
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0147-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0147-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0147-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0147-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch)
@@ -0,0 +1,50 @@
+From 9b66b10f56b2f24f4ae16fa46dcb48ca0cf72dfa Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Tue, 16 Mar 2010 14:31:44 -0700
+Subject: [PATCH 147/303] sched: Break out from load_balancing on rq_lock
+ contention
+
+Also limit NEW_IDLE pull
+
+Signed-off-by: Peter Zijlstra <peterz at infradead.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched_fair.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 66e4576..4303d82 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -2899,6 +2899,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ */
+ if (idle == CPU_NEWLY_IDLE)
+ break;
++
++ if (raw_spin_is_contended(&this_rq->lock) ||
++ raw_spin_is_contended(&busiest->lock))
++ break;
+ #endif
+
+ /*
+@@ -3039,6 +3043,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ rem_load_move -= moved_load;
+ if (rem_load_move < 0)
+ break;
++
++#ifdef CONFIG_PREEMPT
++ /*
++ * NEWIDLE balancing is a source of latency, so preemptible
++ * kernels will stop after the first task is pulled to minimize
++ * the critical section.
++ */
++ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
++ break;
++
++ if (raw_spin_is_contended(&this_rq->lock) ||
++ raw_spin_is_contended(&busiest->lock))
++ break;
++#endif
+ }
+ rcu_read_unlock();
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0148-sched-cond-resched.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0148-sched-cond-resched.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0148-sched-cond-resched.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0148-sched-cond-resched.patch.patch)
@@ -0,0 +1,35 @@
+From eff65f5c6bdbbe6d8ccb8ac66475cd772b494027 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 7 Jun 2011 11:25:03 +0200
+Subject: [PATCH 148/303] sched-cond-resched.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 0b54ef6..12a38a8 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -5968,9 +5968,17 @@ static inline int should_resched(void)
+
+ static void __cond_resched(void)
+ {
+- add_preempt_count(PREEMPT_ACTIVE);
+- __schedule();
+- sub_preempt_count(PREEMPT_ACTIVE);
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++ __schedule();
++ sub_preempt_count(PREEMPT_ACTIVE);
++ /*
++ * Check again in case we missed a preemption
++ * opportunity between schedule and now.
++ */
++ barrier();
++
++ } while (need_resched());
+ }
+
+ int __sched _cond_resched(void)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0149-cond-resched-softirq-fix.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0149-cond-resched-softirq-fix.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0149-cond-resched-softirq-fix.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0149-cond-resched-softirq-fix.patch.patch)
@@ -0,0 +1,52 @@
+From bb57a850575a16f57dc7150c93fb4682817d12a1 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 14 Jul 2011 09:56:44 +0200
+Subject: [PATCH 149/303] cond-resched-softirq-fix.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/sched.h | 4 ++++
+ kernel/sched.c | 2 ++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 5dc840b..72bfb2d 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2614,12 +2614,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
+ __cond_resched_lock(lock); \
+ })
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern int __cond_resched_softirq(void);
+
+ #define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
+ })
++#else
++# define cond_resched_softirq() cond_resched()
++#endif
+
+ /*
+ * Does a critical section need to be broken due to another
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 12a38a8..1a9ff1a2 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6019,6 +6019,7 @@ int __cond_resched_lock(spinlock_t *lock)
+ }
+ EXPORT_SYMBOL(__cond_resched_lock);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int __sched __cond_resched_softirq(void)
+ {
+ BUG_ON(!in_softirq());
+@@ -6032,6 +6033,7 @@ int __sched __cond_resched_softirq(void)
+ return 0;
+ }
+ EXPORT_SYMBOL(__cond_resched_softirq);
++#endif
+
+ /**
+ * yield - yield the current processor to other threads.
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0150-sched-no-work-when-pi-blocked.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0150-sched-no-work-when-pi-blocked.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0150-sched-no-work-when-pi-blocked.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0150-sched-no-work-when-pi-blocked.patch.patch)
@@ -0,0 +1,59 @@
+From 0980e2a38cf2f1d8b0da49369362135456ca4940 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 20:46:52 +0200
+Subject: [PATCH 150/303] sched-no-work-when-pi-blocked.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/sched.h | 8 ++++++++
+ kernel/sched.c | 5 ++++-
+ 2 files changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 72bfb2d..367e219 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2107,12 +2107,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
+ extern int rt_mutex_getprio(struct task_struct *p);
+ extern void rt_mutex_setprio(struct task_struct *p, int prio);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++{
++ return tsk->pi_blocked_on != NULL;
++}
+ #else
+ static inline int rt_mutex_getprio(struct task_struct *p)
+ {
+ return p->normal_prio;
+ }
+ # define rt_mutex_adjust_pi(p) do { } while (0)
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++{
++ return false;
++}
+ #endif
+
+ extern bool yield_to(struct task_struct *p, bool preempt);
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 1a9ff1a2..c63fd07 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4654,7 +4654,7 @@ need_resched:
+
+ static inline void sched_submit_work(struct task_struct *tsk)
+ {
+- if (!tsk->state)
++ if (!tsk->state || tsk_is_pi_blocked(tsk))
+ return;
+
+ /*
+@@ -4674,6 +4674,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
+
+ static inline void sched_update_worker(struct task_struct *tsk)
+ {
++ if (tsk_is_pi_blocked(tsk))
++ return;
++
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_running(tsk);
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0151-cond-resched-lock-rt-tweak.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0151-cond-resched-lock-rt-tweak.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0151-cond-resched-lock-rt-tweak.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0151-cond-resched-lock-rt-tweak.patch.patch)
@@ -0,0 +1,23 @@
+From b85397351e2b5df84b23d7b26627c9417dc1d73d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 22:51:33 +0200
+Subject: [PATCH 151/303] cond-resched-lock-rt-tweak.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/sched.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 367e219..e946538 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2611,7 +2611,7 @@ extern int _cond_resched(void);
+
+ extern int __cond_resched_lock(spinlock_t *lock);
+
+-#ifdef CONFIG_PREEMPT_COUNT
++#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
+ #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
+ #else
+ #define PREEMPT_LOCK_OFFSET 0
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0152-sched-disable-ttwu-queue.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0152-sched-disable-ttwu-queue.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0152-sched-disable-ttwu-queue.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0152-sched-disable-ttwu-queue.patch.patch)
@@ -0,0 +1,30 @@
+From 90ca0139bf3b402b9d1d7534096127998f2abc7a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 13 Sep 2011 16:42:35 +0200
+Subject: [PATCH 152/303] sched-disable-ttwu-queue.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched_features.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/kernel/sched_features.h b/kernel/sched_features.h
+index 8480224..0007001 100644
+--- a/kernel/sched_features.h
++++ b/kernel/sched_features.h
+@@ -60,11 +60,15 @@ SCHED_FEAT(OWNER_SPIN, 1)
+ */
+ SCHED_FEAT(NONTASK_POWER, 1)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Queue remote wakeups on the target CPU and process them
+ * using the scheduler IPI. Reduces rq->lock contention/bounces.
+ */
+ SCHED_FEAT(TTWU_QUEUE, 1)
++#else
++SCHED_FEAT(TTWU_QUEUE, 0)
++#endif
+
+ SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+ SCHED_FEAT(RT_RUNTIME_SHARE, 1)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0153-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0153-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0153-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0153-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch)
@@ -0,0 +1,31 @@
+From d2fc33c21315288f26169f18a4c3961e216fa8cd Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 18 Jul 2011 17:03:52 +0200
+Subject: [PATCH 153/303] sched: Disable CONFIG_RT_GROUP_SCHED on RT
+
+Carsten reported problems when running:
+
+ taskset 01 chrt -f 1 sleep 1
+
+from within rc.local on a F15 machine. The task stays running and
+never gets on the run queue because some of the run queues have
+rt_throttled=1 which does not go away. Works nice from a ssh login
+shell. Disabling CONFIG_RT_GROUP_SCHED solves that as well.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ init/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index dbc82d0..720c182 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -731,6 +731,7 @@ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on CGROUP_SCHED
++ depends on !PREEMPT_RT_FULL
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0154-sched-ttwu-Return-success-when-only-changing-the-sav.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0154-sched-ttwu-Return-success-when-only-changing-the-sav.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0154-sched-ttwu-Return-success-when-only-changing-the-sav.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0154-sched-ttwu-Return-success-when-only-changing-the-sav.patch)
@@ -0,0 +1,38 @@
+From 29c4bd7451e8162f9f04a320c4c4a2f6ee64f226 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 13 Dec 2011 21:42:19 +0100
+Subject: [PATCH 154/303] sched: ttwu: Return success when only changing the
+ saved_state value
+
+When a task blocks on a rt lock, it saves the current state in
+p->saved_state, so a lock related wake up will not destroy the
+original state.
+
+When a real wakeup happens, while the task is running due to a lock
+wakeup already, we update p->saved_state to TASK_RUNNING, but we do
+not return success, which might cause another wakeup in the waitqueue
+code and the task remains in the waitqueue list. Return success in
+that case as well.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ kernel/sched.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index c63fd07..49b0d1d 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2830,8 +2830,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ * if the wakeup condition is true.
+ */
+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
+- if (p->saved_state & state)
++ if (p->saved_state & state) {
+ p->saved_state = TASK_RUNNING;
++ success = 1;
++ }
+ }
+ goto out;
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0155-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0155-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0155-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0155-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch)
@@ -0,0 +1,65 @@
+From 6c898cec31a042364915cc1a58c6a6ec0f8c98f9 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo at elte.hu>
+Date: Fri, 3 Jul 2009 08:30:27 -0500
+Subject: [PATCH 155/303] stop_machine: convert stop_machine_run() to
+ PREEMPT_RT
+
+Instead of playing with non-preemption, introduce explicit
+startup serialization. This is more robust and cleaner as
+well.
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/stop_machine.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 2f194e9..61779f8 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -135,6 +135,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
+
+ /* static data for stop_cpus */
+ static DEFINE_MUTEX(stop_cpus_mutex);
++static DEFINE_MUTEX(stopper_lock);
+ static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
+
+ static void queue_stop_cpus_work(const struct cpumask *cpumask,
+@@ -153,15 +154,14 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
+ }
+
+ /*
+- * Disable preemption while queueing to avoid getting
+- * preempted by a stopper which might wait for other stoppers
+- * to enter @fn which can lead to deadlock.
++ * Make sure that all work is queued on all cpus before we
++ * any of the cpus can execute it.
+ */
+- preempt_disable();
++ mutex_lock(&stopper_lock);
+ for_each_cpu(cpu, cpumask)
+ cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
+ &per_cpu(stop_cpus_work, cpu));
+- preempt_enable();
++ mutex_unlock(&stopper_lock);
+ }
+
+ static int __stop_cpus(const struct cpumask *cpumask,
+@@ -275,6 +275,16 @@ repeat:
+
+ __set_current_state(TASK_RUNNING);
+
++ /*
++ * Wait until the stopper finished scheduling on all
++ * cpus
++ */
++ mutex_lock(&stopper_lock);
++ /*
++ * Let other cpu threads continue as well
++ */
++ mutex_unlock(&stopper_lock);
++
+ /* cpu stop callbacks are not allowed to sleep */
+ preempt_disable();
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0156-stomp-machine-mark-stomper-thread.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0156-stomp-machine-mark-stomper-thread.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0156-stomp-machine-mark-stomper-thread.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0156-stomp-machine-mark-stomper-thread.patch.patch)
@@ -0,0 +1,35 @@
+From 3b1d491388bd2e965f2d1344dc2ea1852673d196 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 19:53:19 +0200
+Subject: [PATCH 156/303] stomp-machine-mark-stomper-thread.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/sched.h | 1 +
+ kernel/stop_machine.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index e946538..be35c44 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1826,6 +1826,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
+ #define PF_FROZEN 0x00010000 /* frozen for system suspend */
+ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
+ #define PF_KSWAPD 0x00040000 /* I am kswapd */
++#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */
+ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 61779f8..484a335 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -327,6 +327,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
+ if (IS_ERR(p))
+ return notifier_from_errno(PTR_ERR(p));
+ get_task_struct(p);
++ p->flags |= PF_STOMPER;
+ kthread_bind(p, cpu);
+ sched_set_stop_task(cpu, p);
+ stopper->thread = p;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0157-stomp-machine-raw-lock.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0157-stomp-machine-raw-lock.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0157-stomp-machine-raw-lock.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0157-stomp-machine-raw-lock.patch.patch)
@@ -0,0 +1,177 @@
+From adc388e84e25afa5ecb3f8175c494b3473697d54 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 29 Jun 2011 11:01:51 +0200
+Subject: [PATCH 157/303] stomp-machine-raw-lock.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/stop_machine.c | 58 ++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 41 insertions(+), 17 deletions(-)
+
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 484a335..561ba3a 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -29,12 +29,12 @@ struct cpu_stop_done {
+ atomic_t nr_todo; /* nr left to execute */
+ bool executed; /* actually executed? */
+ int ret; /* collected return value */
+- struct completion completion; /* fired if nr_todo reaches 0 */
++ struct task_struct *waiter; /* woken when nr_todo reaches 0 */
+ };
+
+ /* the actual stopper, one per every possible cpu, enabled on online cpus */
+ struct cpu_stopper {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ bool enabled; /* is this stopper enabled? */
+ struct list_head works; /* list of pending works */
+ struct task_struct *thread; /* stopper thread */
+@@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
+ {
+ memset(done, 0, sizeof(*done));
+ atomic_set(&done->nr_todo, nr_todo);
+- init_completion(&done->completion);
++ done->waiter = current;
+ }
+
+ /* signal completion unless @done is NULL */
+@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
+ if (done) {
+ if (executed)
+ done->executed = true;
+- if (atomic_dec_and_test(&done->nr_todo))
+- complete(&done->completion);
++ if (atomic_dec_and_test(&done->nr_todo)) {
++ wake_up_process(done->waiter);
++ done->waiter = NULL;
++ }
+ }
+ }
+
+@@ -67,7 +69,7 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper,
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&stopper->lock, flags);
++ raw_spin_lock_irqsave(&stopper->lock, flags);
+
+ if (stopper->enabled) {
+ list_add_tail(&work->list, &stopper->works);
+@@ -75,7 +77,23 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper,
+ } else
+ cpu_stop_signal_done(work->done, false);
+
+- spin_unlock_irqrestore(&stopper->lock, flags);
++ raw_spin_unlock_irqrestore(&stopper->lock, flags);
++}
++
++static void wait_for_stop_done(struct cpu_stop_done *done)
++{
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (atomic_read(&done->nr_todo)) {
++ schedule();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++ /*
++ * We need to wait until cpu_stop_signal_done() has cleared
++ * done->waiter.
++ */
++ while (done->waiter)
++ cpu_relax();
++ set_current_state(TASK_RUNNING);
+ }
+
+ /**
+@@ -109,7 +127,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
+
+ cpu_stop_init_done(&done, 1);
+ cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
+- wait_for_completion(&done.completion);
++ wait_for_stop_done(&done);
+ return done.executed ? done.ret : -ENOENT;
+ }
+
+@@ -171,7 +189,7 @@ static int __stop_cpus(const struct cpumask *cpumask,
+
+ cpu_stop_init_done(&done, cpumask_weight(cpumask));
+ queue_stop_cpus_work(cpumask, fn, arg, &done);
+- wait_for_completion(&done.completion);
++ wait_for_stop_done(&done);
+ return done.executed ? done.ret : -ENOENT;
+ }
+
+@@ -259,13 +277,13 @@ repeat:
+ }
+
+ work = NULL;
+- spin_lock_irq(&stopper->lock);
++ raw_spin_lock_irq(&stopper->lock);
+ if (!list_empty(&stopper->works)) {
+ work = list_first_entry(&stopper->works,
+ struct cpu_stop_work, list);
+ list_del_init(&work->list);
+ }
+- spin_unlock_irq(&stopper->lock);
++ raw_spin_unlock_irq(&stopper->lock);
+
+ if (work) {
+ cpu_stop_fn_t fn = work->fn;
+@@ -299,7 +317,13 @@ repeat:
+ kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
+ ksym_buf), arg);
+
++ /*
++ * Make sure that the wakeup and setting done->waiter
++ * to NULL is atomic.
++ */
++ local_irq_disable();
+ cpu_stop_signal_done(done, true);
++ local_irq_enable();
+ } else
+ schedule();
+
+@@ -337,9 +361,9 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
+ /* strictly unnecessary, as first user will wake it */
+ wake_up_process(stopper->thread);
+ /* mark enabled */
+- spin_lock_irq(&stopper->lock);
++ raw_spin_lock_irq(&stopper->lock);
+ stopper->enabled = true;
+- spin_unlock_irq(&stopper->lock);
++ raw_spin_unlock_irq(&stopper->lock);
+ break;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -352,11 +376,11 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
+ /* kill the stopper */
+ kthread_stop(stopper->thread);
+ /* drain remaining works */
+- spin_lock_irq(&stopper->lock);
++ raw_spin_lock_irq(&stopper->lock);
+ list_for_each_entry(work, &stopper->works, list)
+ cpu_stop_signal_done(work->done, false);
+ stopper->enabled = false;
+- spin_unlock_irq(&stopper->lock);
++ raw_spin_unlock_irq(&stopper->lock);
+ /* release the stopper */
+ put_task_struct(stopper->thread);
+ stopper->thread = NULL;
+@@ -387,7 +411,7 @@ static int __init cpu_stop_init(void)
+ for_each_possible_cpu(cpu) {
+ struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+
+- spin_lock_init(&stopper->lock);
++ raw_spin_lock_init(&stopper->lock);
+ INIT_LIST_HEAD(&stopper->works);
+ }
+
+@@ -581,7 +605,7 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+ ret = stop_machine_cpu_stop(&smdata);
+
+ /* Busy wait for completion. */
+- while (!completion_done(&done.completion))
++ while (atomic_read(&done.nr_todo))
+ cpu_relax();
+
+ mutex_unlock(&stop_cpus_mutex);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0158-hotplug-Lightweight-get-online-cpus.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0158-hotplug-Lightweight-get-online-cpus.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0158-hotplug-Lightweight-get-online-cpus.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0158-hotplug-Lightweight-get-online-cpus.patch)
@@ -0,0 +1,213 @@
+From 5b43b403bdfc25d5e1264177959fcbd78d54dc22 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 15 Jun 2011 12:36:06 +0200
+Subject: [PATCH 158/303] hotplug: Lightweight get online cpus
+
+get_online_cpus() is a heavy weight function which involves a global
+mutex. migrate_disable() wants a simpler construct which prevents only
+a CPU from going doing while a task is in a migrate disabled section.
+
+Implement a per cpu lockless mechanism, which serializes only in the
+real unplug case on a global mutex. That serialization affects only
+tasks on the cpu which should be brought down.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/cpu.h | 4 ++
+ kernel/cpu.c | 127 +++++++++++++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 128 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index c692acc..59d28d1 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -168,6 +168,8 @@ extern struct sysdev_class cpu_sysdev_class;
+
+ extern void get_online_cpus(void);
+ extern void put_online_cpus(void);
++extern void pin_current_cpu(void);
++extern void unpin_current_cpu(void);
+ #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+ #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+ #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+@@ -190,6 +192,8 @@ static inline void cpu_hotplug_driver_unlock(void)
+
+ #define get_online_cpus() do { } while (0)
+ #define put_online_cpus() do { } while (0)
++static inline void pin_current_cpu(void) { }
++static inline void unpin_current_cpu(void) { }
+ #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ /* These aren't inline functions due to a GCC bug. */
+ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 563f136..df0a2fc 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -58,6 +58,102 @@ static struct {
+ .refcount = 0,
+ };
+
++struct hotplug_pcp {
++ struct task_struct *unplug;
++ int refcount;
++ struct completion synced;
++};
++
++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
++
++/**
++ * pin_current_cpu - Prevent the current cpu from being unplugged
++ *
++ * Lightweight version of get_online_cpus() to prevent cpu from being
++ * unplugged when code runs in a migration disabled region.
++ *
++ * Must be called with preemption disabled (preempt_count = 1)!
++ */
++void pin_current_cpu(void)
++{
++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
++
++retry:
++ if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
++ hp->unplug == current || (current->flags & PF_STOMPER)) {
++ hp->refcount++;
++ return;
++ }
++ preempt_enable();
++ mutex_lock(&cpu_hotplug.lock);
++ mutex_unlock(&cpu_hotplug.lock);
++ preempt_disable();
++ goto retry;
++}
++
++/**
++ * unpin_current_cpu - Allow unplug of current cpu
++ *
++ * Must be called with preemption or interrupts disabled!
++ */
++void unpin_current_cpu(void)
++{
++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
++
++ WARN_ON(hp->refcount <= 0);
++
++ /* This is safe. sync_unplug_thread is pinned to this cpu */
++ if (!--hp->refcount && hp->unplug && hp->unplug != current &&
++ !(current->flags & PF_STOMPER))
++ wake_up_process(hp->unplug);
++}
++
++/*
++ * FIXME: Is this really correct under all circumstances ?
++ */
++static int sync_unplug_thread(void *data)
++{
++ struct hotplug_pcp *hp = data;
++
++ preempt_disable();
++ hp->unplug = current;
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (hp->refcount) {
++ schedule_preempt_disabled();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++ set_current_state(TASK_RUNNING);
++ preempt_enable();
++ complete(&hp->synced);
++ return 0;
++}
++
++/*
++ * Start the sync_unplug_thread on the target cpu and wait for it to
++ * complete.
++ */
++static int cpu_unplug_begin(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++ struct task_struct *tsk;
++
++ init_completion(&hp->synced);
++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
++ if (IS_ERR(tsk))
++ return (PTR_ERR(tsk));
++ kthread_bind(tsk, cpu);
++ wake_up_process(tsk);
++ wait_for_completion(&hp->synced);
++ return 0;
++}
++
++static void cpu_unplug_done(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ hp->unplug = NULL;
++}
++
+ void get_online_cpus(void)
+ {
+ might_sleep();
+@@ -211,13 +307,14 @@ static int __ref take_cpu_down(void *_param)
+ /* Requires cpu_add_remove_lock to be held */
+ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ {
+- int err, nr_calls = 0;
++ int mycpu, err, nr_calls = 0;
+ void *hcpu = (void *)(long)cpu;
+ unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
+ struct take_cpu_down_param tcd_param = {
+ .mod = mod,
+ .hcpu = hcpu,
+ };
++ cpumask_var_t cpumask;
+
+ if (num_online_cpus() == 1)
+ return -EBUSY;
+@@ -225,7 +322,20 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ if (!cpu_online(cpu))
+ return -EINVAL;
+
+- cpu_hotplug_begin();
++ /* Move the downtaker off the unplug cpu */
++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
++ return -ENOMEM;
++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
++ set_cpus_allowed_ptr(current, cpumask);
++ free_cpumask_var(cpumask);
++ preempt_disable();
++ mycpu = smp_processor_id();
++ if (mycpu == cpu) {
++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
++ preempt_enable();
++ return -EBUSY;
++ }
++ preempt_enable();
+
+ err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+ if (err) {
+@@ -233,7 +343,16 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
+ printk("%s: attempt to take down CPU %u failed\n",
+ __func__, cpu);
+- goto out_release;
++ goto out_cancel;
++ }
++
++ cpu_hotplug_begin();
++ err = cpu_unplug_begin(cpu);
++ if (err) {
++ nr_calls--;
++ __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
++ printk("cpu_unplug_begin(%d) failed\n", cpu);
++ goto out_cancel;
+ }
+
+ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+@@ -264,6 +383,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ check_for_tasks(cpu);
+
+ out_release:
++ cpu_unplug_done(cpu);
++out_cancel:
+ cpu_hotplug_done();
+ if (!err)
+ cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0159-hotplug-sync_unplug-No.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0159-hotplug-sync_unplug-No.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0159-hotplug-sync_unplug-No.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0159-hotplug-sync_unplug-No.patch)
@@ -0,0 +1,27 @@
+From d190ab762a8d679a1f8b58da26c8b67179220d09 Mon Sep 17 00:00:00 2001
+From: Yong Zhang <yong.zhang0 at gmail.com>
+Date: Sun, 16 Oct 2011 18:56:43 +0800
+Subject: [PATCH 159/303] hotplug: sync_unplug: No " " in task name
+
+Otherwise the output will look a little odd.
+
+Signed-off-by: Yong Zhang <yong.zhang0 at gmail.com>
+Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/cpu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index df0a2fc..171cb6c 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -138,7 +138,7 @@ static int cpu_unplug_begin(unsigned int cpu)
+ struct task_struct *tsk;
+
+ init_completion(&hp->synced);
+- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+ if (IS_ERR(tsk))
+ return (PTR_ERR(tsk));
+ kthread_bind(tsk, cpu);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0160-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0160-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0160-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0160-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch)
@@ -0,0 +1,38 @@
+From 84d97fa6fb2cdfaae059a9c5d8ce5ddf11f998b1 Mon Sep 17 00:00:00 2001
+From: Yong Zhang <yong.zhang0 at gmail.com>
+Date: Thu, 28 Jul 2011 11:16:00 +0800
+Subject: [PATCH 160/303] hotplug: Reread hotplug_pcp on pin_current_cpu()
+ retry
+
+When retry happens, it's likely that the task has been migrated to
+another cpu (except unplug failed), but it still derefernces the
+original hotplug_pcp per cpu data.
+
+Update the pointer to hotplug_pcp in the retry path, so it points to
+the current cpu.
+
+Signed-off-by: Yong Zhang <yong.zhang0 at gmail.com>
+Cc: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Link: http://lkml.kernel.org/r/20110728031600.GA338@windriver.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/cpu.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 171cb6c..80c72da 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -76,9 +76,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
+ */
+ void pin_current_cpu(void)
+ {
+- struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
++ struct hotplug_pcp *hp;
+
+ retry:
++ hp = &__get_cpu_var(hotplug_pcp);
++
+ if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
+ hp->unplug == current || (current->flags & PF_STOMPER)) {
+ hp->refcount++;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0161-sched-migrate-disable.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0161-sched-migrate-disable.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0161-sched-migrate-disable.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0161-sched-migrate-disable.patch.patch)
@@ -0,0 +1,214 @@
+From d69b72b1fcf3b7aca64f2d94a5c8d01243bb3c64 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 16 Jun 2011 13:26:08 +0200
+Subject: [PATCH 161/303] sched-migrate-disable.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/preempt.h | 8 +++++
+ include/linux/sched.h | 13 +++++--
+ include/linux/smp.h | 1 -
+ kernel/sched.c | 88 ++++++++++++++++++++++++++++++++++++++++++++---
+ lib/smp_processor_id.c | 6 ++--
+ 5 files changed, 104 insertions(+), 12 deletions(-)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 29db25f..363e5e2 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -108,6 +108,14 @@ do { \
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
++#ifdef CONFIG_SMP
++extern void migrate_disable(void);
++extern void migrate_enable(void);
++#else
++# define migrate_disable() do { } while (0)
++# define migrate_enable() do { } while (0)
++#endif
++
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ # define preempt_disable_rt() preempt_disable()
+ # define preempt_enable_rt() preempt_enable()
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index be35c44..1750a3c 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1263,6 +1263,7 @@ struct task_struct {
+ #endif
+
+ unsigned int policy;
++ int migrate_disable;
+ cpumask_t cpus_allowed;
+
+ #ifdef CONFIG_PREEMPT_RCU
+@@ -1602,9 +1603,6 @@ struct task_struct {
+ #endif
+ };
+
+-/* Future-safe accessor for struct task_struct's cpus_allowed. */
+-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+-
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
+ #else
+@@ -2704,6 +2702,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
+
+ #endif /* CONFIG_SMP */
+
++/* Future-safe accessor for struct task_struct's cpus_allowed. */
++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
++{
++ if (p->migrate_disable)
++ return cpumask_of(task_cpu(p));
++
++ return &p->cpus_allowed;
++}
++
+ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index e6c58d8..94c8430 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -80,7 +80,6 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data,
+
+ int smp_call_function_any(const struct cpumask *mask,
+ smp_call_func_t func, void *info, int wait);
+-
+ /*
+ * Generic and arch helpers
+ */
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 49b0d1d..026cde6 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6417,11 +6417,12 @@ static inline void sched_init_granularity(void)
+ #ifdef CONFIG_SMP
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- if (p->sched_class && p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, new_mask);
+-
++ if (!p->migrate_disable) {
++ if (p->sched_class && p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, new_mask);
++ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
++ }
+ cpumask_copy(&p->cpus_allowed, new_mask);
+- p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+
+ /*
+@@ -6472,7 +6473,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+ do_set_cpus_allowed(p, new_mask);
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask))
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable)
+ goto out;
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+@@ -6491,6 +6492,83 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++ const struct cpumask *mask;
++ unsigned long flags;
++ struct rq *rq;
++
++ preempt_disable();
++ if (p->migrate_disable) {
++ p->migrate_disable++;
++ preempt_enable();
++ return;
++ }
++
++ pin_current_cpu();
++ if (unlikely(!scheduler_running)) {
++ p->migrate_disable = 1;
++ preempt_enable();
++ return;
++ }
++ rq = task_rq_lock(p, &flags);
++ p->migrate_disable = 1;
++ mask = tsk_cpus_allowed(p);
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->rt.nr_cpus_allowed = cpumask_weight(mask);
++ }
++ task_rq_unlock(rq, p, &flags);
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++ const struct cpumask *mask;
++ unsigned long flags;
++ struct rq *rq;
++
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++
++ preempt_disable();
++ if (p->migrate_disable > 1) {
++ p->migrate_disable--;
++ preempt_enable();
++ return;
++ }
++
++ if (unlikely(!scheduler_running)) {
++ p->migrate_disable = 0;
++ unpin_current_cpu();
++ preempt_enable();
++ return;
++ }
++
++ rq = task_rq_lock(p, &flags);
++ p->migrate_disable = 0;
++ mask = tsk_cpus_allowed(p);
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->rt.nr_cpus_allowed = cpumask_weight(mask);
++ }
++
++ task_rq_unlock(rq, p, &flags);
++ unpin_current_cpu();
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
+ /*
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
+diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
+index 503f087..60a7569 100644
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
+ if (!printk_ratelimit())
+ goto out_enable;
+
+- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
+- "code: %s/%d\n",
+- preempt_count() - 1, current->comm, current->pid);
++ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
++ "code: %s/%d\n", preempt_count() - 1,
++ current->migrate_disable, current->comm, current->pid);
+ print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ dump_stack();
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0162-hotplug-use-migrate-disable.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0162-hotplug-use-migrate-disable.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0162-hotplug-use-migrate-disable.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0162-hotplug-use-migrate-disable.patch.patch)
@@ -0,0 +1,39 @@
+From 6e8336bd2574b336ec8bc501fbdbd6e132b49804 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 19:35:29 +0200
+Subject: [PATCH 162/303] hotplug-use-migrate-disable.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/cpu.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 80c72da..2179062 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -330,14 +330,13 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
+ set_cpus_allowed_ptr(current, cpumask);
+ free_cpumask_var(cpumask);
+- preempt_disable();
++ migrate_disable();
+ mycpu = smp_processor_id();
+ if (mycpu == cpu) {
+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
+- preempt_enable();
++ migrate_enable();
+ return -EBUSY;
+ }
+- preempt_enable();
+
+ err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+ if (err) {
+@@ -387,6 +386,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ out_release:
+ cpu_unplug_done(cpu);
+ out_cancel:
++ migrate_enable();
+ cpu_hotplug_done();
+ if (!err)
+ cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0163-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0163-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0163-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0163-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch)
@@ -0,0 +1,62 @@
+From 8b85d54b97ee14ff2937c9e8b04f08ee2c2ba1eb Mon Sep 17 00:00:00 2001
+From: Yong Zhang <yong.zhang0 at gmail.com>
+Date: Sun, 16 Oct 2011 18:56:44 +0800
+Subject: [PATCH 163/303] hotplug: Call cpu_unplug_begin() before DOWN_PREPARE
+
+cpu_unplug_begin() should be called before CPU_DOWN_PREPARE, because
+at CPU_DOWN_PREPARE cpu_active is cleared and sched_domain is
+rebuilt. Otherwise the 'sync_unplug' thread will be running on the cpu
+on which it's created and not bound on the cpu which is about to go
+down.
+
+I found that by an incorrect warning on smp_processor_id() called by
+sync_unplug/1, and trace shows below:
+(echo 1 > /sys/device/system/cpu/cpu1/online)
+ bash-1664 [000] 83.136620: _cpu_down: Bind sync_unplug to cpu 1
+ bash-1664 [000] 83.136623: sched_wait_task: comm=sync_unplug/1 pid=1724 prio=120
+ bash-1664 [000] 83.136624: _cpu_down: Wake sync_unplug
+ bash-1664 [000] 83.136629: sched_wakeup: comm=sync_unplug/1 pid=1724 prio=120 success=1 target_cpu=000
+
+Wants to be folded back....
+
+Signed-off-by: Yong Zhang <yong.zhang0 at gmail.com>
+Link: http://lkml.kernel.org/r/1318762607-2261-3-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/cpu.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 2179062..fa40834 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -338,22 +338,20 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ return -EBUSY;
+ }
+
+- err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
++ cpu_hotplug_begin();
++ err = cpu_unplug_begin(cpu);
+ if (err) {
+- nr_calls--;
+- __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
+- printk("%s: attempt to take down CPU %u failed\n",
+- __func__, cpu);
++ printk("cpu_unplug_begin(%d) failed\n", cpu);
+ goto out_cancel;
+ }
+
+- cpu_hotplug_begin();
+- err = cpu_unplug_begin(cpu);
++ err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+ if (err) {
+ nr_calls--;
+ __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
+- printk("cpu_unplug_begin(%d) failed\n", cpu);
+- goto out_cancel;
++ printk("%s: attempt to take down CPU %u failed\n",
++ __func__, cpu);
++ goto out_release;
+ }
+
+ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0164-ftrace-migrate-disable-tracing.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0164-ftrace-migrate-disable-tracing.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0164-ftrace-migrate-disable-tracing.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0164-ftrace-migrate-disable-tracing.patch.patch)
@@ -0,0 +1,82 @@
+From f207794f8ff9ebe88165da1c24ae8f759bd8fb0f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 21:56:42 +0200
+Subject: [PATCH 164/303] ftrace-migrate-disable-tracing.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/ftrace_event.h | 3 ++-
+ kernel/trace/trace.c | 9 ++++++---
+ kernel/trace/trace_events.c | 1 +
+ kernel/trace/trace_output.c | 5 +++++
+ 4 files changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
+index c3da42d..7c5e176 100644
+--- a/include/linux/ftrace_event.h
++++ b/include/linux/ftrace_event.h
+@@ -49,7 +49,8 @@ struct trace_entry {
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
+- int padding;
++ unsigned short migrate_disable;
++ unsigned short padding;
+ };
+
+ #define FTRACE_MAX_EVENT \
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5638104..1ea52d8 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1123,6 +1123,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+ ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
++
++ entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+
+@@ -1854,9 +1856,10 @@ static void print_lat_help_header(struct seq_file *m)
+ seq_puts(m, "# | / _----=> need-resched \n");
+ seq_puts(m, "# || / _---=> hardirq/softirq \n");
+ seq_puts(m, "# ||| / _--=> preempt-depth \n");
+- seq_puts(m, "# |||| / delay \n");
+- seq_puts(m, "# cmd pid ||||| time | caller \n");
+- seq_puts(m, "# \\ / ||||| \\ | / \n");
++ seq_puts(m, "# |||| / _--=> migrate-disable\n");
++ seq_puts(m, "# ||||| / delay \n");
++ seq_puts(m, "# cmd pid |||||| time | caller \n");
++ seq_puts(m, "# \\ / ||||| \\ | / \n");
+ }
+
+ static void print_func_help_header(struct seq_file *m)
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index c212a7f..aca63cc 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -116,6 +116,7 @@ static int trace_define_common_fields(void)
+ __common_field(unsigned char, flags);
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
++ __common_field(unsigned short, migrate_disable);
+ __common_field(int, padding);
+
+ return ret;
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 1dcf253..bb9a58d 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -591,6 +591,11 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
+ else
+ ret = trace_seq_putc(s, '.');
+
++ if (entry->migrate_disable)
++ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
++ else
++ ret = trace_seq_putc(s, '.');
++
+ return ret;
+ }
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0165-tracing-Show-padding-as-unsigned-short.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0165-tracing-Show-padding-as-unsigned-short.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0165-tracing-Show-padding-as-unsigned-short.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0165-tracing-Show-padding-as-unsigned-short.patch)
@@ -0,0 +1,47 @@
+From 901be5f254ed6fe0c359222daa6ad4c1d3488ebf Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Wed, 16 Nov 2011 13:19:35 -0500
+Subject: [PATCH 165/303] tracing: Show padding as unsigned short
+
+RT added two bytes to trace migrate disable counting to the trace events
+and used two bytes of the padding to make the change. The structures and
+all were updated correctly, but the display in the event formats was
+not:
+
+cat /debug/tracing/events/sched/sched_switch/format
+
+name: sched_switch
+ID: 51
+format:
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+ field:unsigned short common_migrate_disable; offset:8; size:2; signed:0;
+ field:int common_padding; offset:10; size:2; signed:0;
+
+The field for common_padding has the correct size and offset, but the
+use of "int" might confuse some parsers (and people that are reading
+it). This needs to be changed to "unsigned short".
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Link: http://lkml.kernel.org/r/1321467575.4181.36.camel@frodo
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/trace/trace_events.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index aca63cc..69cc908 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -117,7 +117,7 @@ static int trace_define_common_fields(void)
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
+ __common_field(unsigned short, migrate_disable);
+- __common_field(int, padding);
++ __common_field(unsigned short, padding);
+
+ return ret;
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0166-migrate-disable-rt-variant.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0166-migrate-disable-rt-variant.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0166-migrate-disable-rt-variant.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0166-migrate-disable-rt-variant.patch.patch)
@@ -0,0 +1,30 @@
+From c2e953cfecc7f7879332724d2e100b748c09d957 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 19:48:20 +0200
+Subject: [PATCH 166/303] migrate-disable-rt-variant.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/preempt.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 363e5e2..5aa7916 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -121,11 +121,15 @@ extern void migrate_enable(void);
+ # define preempt_enable_rt() preempt_enable()
+ # define preempt_disable_nort() do { } while (0)
+ # define preempt_enable_nort() do { } while (0)
++# define migrate_disable_rt() migrate_disable()
++# define migrate_enable_rt() migrate_enable()
+ #else
+ # define preempt_disable_rt() do { } while (0)
+ # define preempt_enable_rt() do { } while (0)
+ # define preempt_disable_nort() preempt_disable()
+ # define preempt_enable_nort() preempt_enable()
++# define migrate_disable_rt() do { } while (0)
++# define migrate_enable_rt() do { } while (0)
+ #endif
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0167-sched-Optimize-migrate_disable.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0167-sched-Optimize-migrate_disable.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0167-sched-Optimize-migrate_disable.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0167-sched-Optimize-migrate_disable.patch)
@@ -0,0 +1,70 @@
+From 71adcda458eaa8d36e2950b55e7b6890ddbd5899 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Date: Thu, 11 Aug 2011 15:03:35 +0200
+Subject: [PATCH 167/303] sched: Optimize migrate_disable
+
+Change from task_rq_lock() to raw_spin_lock(&rq->lock) to avoid a few
+atomic ops. See comment on why it should be safe.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
+---
+ kernel/sched.c | 24 ++++++++++++++++++++----
+ 1 file changed, 20 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 026cde6..0c6556d 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6512,7 +6512,19 @@ void migrate_disable(void)
+ preempt_enable();
+ return;
+ }
+- rq = task_rq_lock(p, &flags);
++
++ /*
++ * Since this is always current we can get away with only locking
++ * rq->lock, the ->cpus_allowed value can normally only be changed
++ * while holding both p->pi_lock and rq->lock, but seeing that this
++ * it current, we cannot actually be waking up, so all code that
++ * relies on serialization against p->pi_lock is out of scope.
++ *
++ * Taking rq->lock serializes us against things like
++ * set_cpus_allowed_ptr() that can still happen concurrently.
++ */
++ rq = this_rq();
++ raw_spin_lock_irqsave(&rq->lock, flags);
+ p->migrate_disable = 1;
+ mask = tsk_cpus_allowed(p);
+
+@@ -6523,7 +6535,7 @@ void migrate_disable(void)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+ }
+- task_rq_unlock(rq, p, &flags);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ preempt_enable();
+ }
+ EXPORT_SYMBOL_GPL(migrate_disable);
+@@ -6551,7 +6563,11 @@ void migrate_enable(void)
+ return;
+ }
+
+- rq = task_rq_lock(p, &flags);
++ /*
++ * See comment in migrate_disable().
++ */
++ rq = this_rq();
++ raw_spin_lock_irqsave(&rq->lock, flags);
+ p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
+
+@@ -6563,7 +6579,7 @@ void migrate_enable(void)
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+ }
+
+- task_rq_unlock(rq, p, &flags);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ unpin_current_cpu();
+ preempt_enable();
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0168-sched-Generic-migrate_disable.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0168-sched-Generic-migrate_disable.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0168-sched-Generic-migrate_disable.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0168-sched-Generic-migrate_disable.patch)
@@ -0,0 +1,187 @@
+From 2b73a05e76c1efcb608addffda1b55d5d4b6fed4 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Date: Thu, 11 Aug 2011 15:14:58 +0200
+Subject: [PATCH 168/303] sched: Generic migrate_disable
+
+Make migrate_disable() be a preempt_disable() for !rt kernels. This
+allows generic code to use it but still enforces that these code
+sections stay relatively small.
+
+A preemptible migrate_disable() accessible for general use would allow
+people growing arbitrary per-cpu crap instead of clean these things
+up.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
+---
+ include/linux/preempt.h | 21 +++++++++------------
+ include/linux/sched.h | 13 +++++++++++++
+ include/linux/smp.h | 9 ++-------
+ kernel/sched.c | 6 ++++--
+ kernel/trace/trace.c | 2 +-
+ lib/smp_processor_id.c | 2 +-
+ 6 files changed, 30 insertions(+), 23 deletions(-)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 5aa7916..6450c01 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -108,28 +108,25 @@ do { \
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+-#ifdef CONFIG_SMP
+-extern void migrate_disable(void);
+-extern void migrate_enable(void);
+-#else
+-# define migrate_disable() do { } while (0)
+-# define migrate_enable() do { } while (0)
+-#endif
+-
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ # define preempt_disable_rt() preempt_disable()
+ # define preempt_enable_rt() preempt_enable()
+ # define preempt_disable_nort() do { } while (0)
+ # define preempt_enable_nort() do { } while (0)
+-# define migrate_disable_rt() migrate_disable()
+-# define migrate_enable_rt() migrate_enable()
++# ifdef CONFIG_SMP
++ extern void migrate_disable(void);
++ extern void migrate_enable(void);
++# else /* CONFIG_SMP */
++# define migrate_disable() do { } while (0)
++# define migrate_enable() do { } while (0)
++# endif /* CONFIG_SMP */
+ #else
+ # define preempt_disable_rt() do { } while (0)
+ # define preempt_enable_rt() do { } while (0)
+ # define preempt_disable_nort() preempt_disable()
+ # define preempt_enable_nort() preempt_enable()
+-# define migrate_disable_rt() do { } while (0)
+-# define migrate_enable_rt() do { } while (0)
++# define migrate_disable() preempt_disable()
++# define migrate_enable() preempt_enable()
+ #endif
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1750a3c..6cf7ed9 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1263,7 +1263,9 @@ struct task_struct {
+ #endif
+
+ unsigned int policy;
++#ifdef CONFIG_PREEMPT_RT_FULL
+ int migrate_disable;
++#endif
+ cpumask_t cpus_allowed;
+
+ #ifdef CONFIG_PREEMPT_RCU
+@@ -2702,11 +2704,22 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
+
+ #endif /* CONFIG_SMP */
+
++static inline int __migrate_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ return p->migrate_disable;
++#else
++ return 0;
++#endif
++}
++
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+ static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ if (p->migrate_disable)
+ return cpumask_of(task_cpu(p));
++#endif
+
+ return &p->cpus_allowed;
+ }
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index 94c8430..78fd0a2 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -172,13 +172,8 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
+ #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+ #define put_cpu() preempt_enable()
+
+-#ifndef CONFIG_PREEMPT_RT_FULL
+-# define get_cpu_light() get_cpu()
+-# define put_cpu_light() put_cpu()
+-#else
+-# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
+-# define put_cpu_light() migrate_enable()
+-#endif
++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
++#define put_cpu_light() migrate_enable()
+
+ /*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 0c6556d..d0f0f9f 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6417,7 +6417,7 @@ static inline void sched_init_granularity(void)
+ #ifdef CONFIG_SMP
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- if (!p->migrate_disable) {
++ if (!__migrate_disabled(p)) {
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+@@ -6473,7 +6473,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+ do_set_cpus_allowed(p, new_mask);
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable)
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ goto out;
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+@@ -6492,6 +6492,7 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
+ void migrate_disable(void)
+ {
+ struct task_struct *p = current;
+@@ -6584,6 +6585,7 @@ void migrate_enable(void)
+ preempt_enable();
+ }
+ EXPORT_SYMBOL_GPL(migrate_enable);
++#endif /* CONFIG_PREEMPT_RT_FULL */
+
+ /*
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 1ea52d8..8d7ee39 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1124,7 +1124,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
+ ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
+
+- entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0;
++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+
+diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
+index 60a7569..b5e9241 100644
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -41,7 +41,7 @@ notrace unsigned int debug_smp_processor_id(void)
+
+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
+ "code: %s/%d\n", preempt_count() - 1,
+- current->migrate_disable, current->comm, current->pid);
++ __migrate_disabled(current), current->comm, current->pid);
+ print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ dump_stack();
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0169-sched-rt-Fix-migrate_enable-thinko.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0169-sched-rt-Fix-migrate_enable-thinko.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0169-sched-rt-Fix-migrate_enable-thinko.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0169-sched-rt-Fix-migrate_enable-thinko.patch)
@@ -0,0 +1,67 @@
+From c4d595b578f7687cb82bb44dd5ab10c06ed33ef1 Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <efault at gmx.de>
+Date: Tue, 23 Aug 2011 16:12:43 +0200
+Subject: [PATCH 169/303] sched, rt: Fix migrate_enable() thinko
+
+Assigning mask = tsk_cpus_allowed(p) after p->migrate_disable = 0 ensures
+that we won't see a mask change.. no push/pull, we stack tasks on one CPU.
+
+Also add a couple fields to sched_debug for the next guy.
+
+[ Build fix from Stratos Psomadakis <psomas at gentoo.org> ]
+
+Signed-off-by: Mike Galbraith <efault at gmx.de>
+Cc: Paul E. McKenney <paulmck at us.ibm.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Link: http://lkml.kernel.org/r/1314108763.6689.4.camel@marge.simson.net
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 4 +++-
+ kernel/sched_debug.c | 7 +++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index d0f0f9f..ea3b7ce 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6569,12 +6569,14 @@ void migrate_enable(void)
+ */
+ rq = this_rq();
+ raw_spin_lock_irqsave(&rq->lock, flags);
+- p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
++ p->migrate_disable = 0;
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ /* Get the mask now that migration is enabled */
++ mask = tsk_cpus_allowed(p);
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
+index a6710a1..528032b 100644
+--- a/kernel/sched_debug.c
++++ b/kernel/sched_debug.c
+@@ -235,6 +235,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
+ P(rt_throttled);
+ PN(rt_time);
+ PN(rt_runtime);
++#ifdef CONFIG_SMP
++ P(rt_nr_migratory);
++#endif
+
+ #undef PN
+ #undef P
+@@ -484,6 +487,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+ P(se.load.weight);
+ P(policy);
+ P(prio);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ P(migrate_disable);
++#endif
++ P(rt.nr_cpus_allowed);
+ #undef PN
+ #undef __PN
+ #undef P
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0170-sched-teach-migrate_disable-about-atomic-contexts.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0170-sched-teach-migrate_disable-about-atomic-contexts.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0170-sched-teach-migrate_disable-about-atomic-contexts.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0170-sched-teach-migrate_disable-about-atomic-contexts.patch)
@@ -0,0 +1,89 @@
+From 0703d587d1933cadf80bf4f4bad41e5f75fc8cad Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Date: Fri, 2 Sep 2011 14:29:27 +0200
+Subject: [PATCH 170/303] sched: teach migrate_disable about atomic contexts
+
+ <NMI> [<ffffffff812dafd8>] spin_bug+0x94/0xa8
+ [<ffffffff812db07f>] do_raw_spin_lock+0x43/0xea
+ [<ffffffff814fa9be>] _raw_spin_lock_irqsave+0x6b/0x85
+ [<ffffffff8106ff9e>] ? migrate_disable+0x75/0x12d
+ [<ffffffff81078aaf>] ? pin_current_cpu+0x36/0xb0
+ [<ffffffff8106ff9e>] migrate_disable+0x75/0x12d
+ [<ffffffff81115b9d>] pagefault_disable+0xe/0x1f
+ [<ffffffff81047027>] copy_from_user_nmi+0x74/0xe6
+ [<ffffffff810489d7>] perf_callchain_user+0xf3/0x135
+
+Now clearly we can't go around taking locks from NMI context, cure
+this by short-circuiting migrate_disable() when we're in an atomic
+context already.
+
+Add some extra debugging to avoid things like:
+
+ preempt_disable()
+ migrate_disable();
+
+ preempt_enable();
+ migrate_enable();
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Link: http://lkml.kernel.org/r/1314967297.1301.14.camel@twins
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org
+---
+ include/linux/sched.h | 3 +++
+ kernel/sched.c | 21 +++++++++++++++++++++
+ 2 files changed, 24 insertions(+)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 6cf7ed9..c60cfde 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1265,6 +1265,9 @@ struct task_struct {
+ unsigned int policy;
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ int migrate_disable;
++#ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable_atomic;
++#endif
+ #endif
+ cpumask_t cpus_allowed;
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index ea3b7ce..c6429a5 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6500,6 +6500,17 @@ void migrate_disable(void)
+ unsigned long flags;
+ struct rq *rq;
+
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
+ preempt_disable();
+ if (p->migrate_disable) {
+ p->migrate_disable++;
+@@ -6548,6 +6559,16 @@ void migrate_enable(void)
+ unsigned long flags;
+ struct rq *rq;
+
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+
+ preempt_disable();
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0171-sched-Postpone-actual-migration-disalbe-to-schedule.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0171-sched-Postpone-actual-migration-disalbe-to-schedule.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0171-sched-Postpone-actual-migration-disalbe-to-schedule.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0171-sched-Postpone-actual-migration-disalbe-to-schedule.patch)
@@ -0,0 +1,307 @@
+From bb4523e2787dc7f6bf47c6866f14a8883eb891d3 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Tue, 27 Sep 2011 08:40:23 -0400
+Subject: [PATCH 171/303] sched: Postpone actual migration disalbe to schedule
+
+The migrate_disable() can cause a bit of a overhead to the RT kernel,
+as changing the affinity is expensive to do at every lock encountered.
+As a running task can not migrate, the actual disabling of migration
+does not need to occur until the task is about to schedule out.
+
+In most cases, a task that disables migration will enable it before
+it schedules making this change improve performance tremendously.
+
+[ Frank Rowand: UP compile fix ]
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <williams at redhat.com>
+Link: http://lkml.kernel.org/r/20110927124422.779693167@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 251 +++++++++++++++++++++++++++++---------------------------
+ 1 file changed, 132 insertions(+), 119 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index c6429a5..ae75aa5 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4550,6 +4550,135 @@ static inline void schedule_debug(struct task_struct *prev)
+ schedstat_inc(this_rq(), sched_count);
+ }
+
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
++#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
++#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
++#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
++
++static inline void update_migrate_disable(struct task_struct *p)
++{
++ const struct cpumask *mask;
++
++ if (likely(!p->migrate_disable))
++ return;
++
++ /* Did we already update affinity? */
++ if (unlikely(migrate_disabled_updated(p)))
++ return;
++
++ /*
++ * Since this is always current we can get away with only locking
++ * rq->lock, the ->cpus_allowed value can normally only be changed
++ * while holding both p->pi_lock and rq->lock, but seeing that this
++ * is current, we cannot actually be waking up, so all code that
++ * relies on serialization against p->pi_lock is out of scope.
++ *
++ * Having rq->lock serializes us against things like
++ * set_cpus_allowed_ptr() that can still happen concurrently.
++ */
++ mask = tsk_cpus_allowed(p);
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->rt.nr_cpus_allowed = cpumask_weight(mask);
++
++ /* Let migrate_enable know to fix things back up */
++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
++ }
++}
++
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++ preempt_disable();
++ if (p->migrate_disable) {
++ p->migrate_disable++;
++ preempt_enable();
++ return;
++ }
++
++ pin_current_cpu();
++ p->migrate_disable = 1;
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++ const struct cpumask *mask;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++
++ preempt_disable();
++ if (migrate_disable_count(p) > 1) {
++ p->migrate_disable--;
++ preempt_enable();
++ return;
++ }
++
++ if (unlikely(migrate_disabled_updated(p))) {
++ /*
++ * See comment in update_migrate_disable() about locking.
++ */
++ rq = this_rq();
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ mask = tsk_cpus_allowed(p);
++ /*
++ * Clearing migrate_disable causes tsk_cpus_allowed to
++ * show the tasks original cpu affinity.
++ */
++ p->migrate_disable = 0;
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
++ /* Get the mask now that migration is enabled */
++ mask = tsk_cpus_allowed(p);
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->rt.nr_cpus_allowed = cpumask_weight(mask);
++ }
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ } else
++ p->migrate_disable = 0;
++
++ unpin_current_cpu();
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++#else
++static inline void update_migrate_disable(struct task_struct *p) { }
++#define migrate_disabled_updated(p) 0
++#endif
++
+ static void put_prev_task(struct rq *rq, struct task_struct *prev)
+ {
+ if (prev->on_rq || rq->skip_clock_update < 0)
+@@ -4609,6 +4738,8 @@ need_resched:
+
+ raw_spin_lock_irq(&rq->lock);
+
++ update_migrate_disable(prev);
++
+ switch_count = &prev->nivcsw;
+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+ if (unlikely(signal_pending_state(prev->state, prev))) {
+@@ -6417,7 +6548,7 @@ static inline void sched_init_granularity(void)
+ #ifdef CONFIG_SMP
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- if (!__migrate_disabled(p)) {
++ if (!migrate_disabled_updated(p)) {
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+@@ -6492,124 +6623,6 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
+-#ifdef CONFIG_PREEMPT_RT_FULL
+-void migrate_disable(void)
+-{
+- struct task_struct *p = current;
+- const struct cpumask *mask;
+- unsigned long flags;
+- struct rq *rq;
+-
+- if (in_atomic()) {
+-#ifdef CONFIG_SCHED_DEBUG
+- p->migrate_disable_atomic++;
+-#endif
+- return;
+- }
+-
+-#ifdef CONFIG_SCHED_DEBUG
+- WARN_ON_ONCE(p->migrate_disable_atomic);
+-#endif
+-
+- preempt_disable();
+- if (p->migrate_disable) {
+- p->migrate_disable++;
+- preempt_enable();
+- return;
+- }
+-
+- pin_current_cpu();
+- if (unlikely(!scheduler_running)) {
+- p->migrate_disable = 1;
+- preempt_enable();
+- return;
+- }
+-
+- /*
+- * Since this is always current we can get away with only locking
+- * rq->lock, the ->cpus_allowed value can normally only be changed
+- * while holding both p->pi_lock and rq->lock, but seeing that this
+- * it current, we cannot actually be waking up, so all code that
+- * relies on serialization against p->pi_lock is out of scope.
+- *
+- * Taking rq->lock serializes us against things like
+- * set_cpus_allowed_ptr() that can still happen concurrently.
+- */
+- rq = this_rq();
+- raw_spin_lock_irqsave(&rq->lock, flags);
+- p->migrate_disable = 1;
+- mask = tsk_cpus_allowed(p);
+-
+- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+-
+- if (!cpumask_equal(&p->cpus_allowed, mask)) {
+- if (p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, mask);
+- p->rt.nr_cpus_allowed = cpumask_weight(mask);
+- }
+- raw_spin_unlock_irqrestore(&rq->lock, flags);
+- preempt_enable();
+-}
+-EXPORT_SYMBOL_GPL(migrate_disable);
+-
+-void migrate_enable(void)
+-{
+- struct task_struct *p = current;
+- const struct cpumask *mask;
+- unsigned long flags;
+- struct rq *rq;
+-
+- if (in_atomic()) {
+-#ifdef CONFIG_SCHED_DEBUG
+- p->migrate_disable_atomic--;
+-#endif
+- return;
+- }
+-
+-#ifdef CONFIG_SCHED_DEBUG
+- WARN_ON_ONCE(p->migrate_disable_atomic);
+-#endif
+- WARN_ON_ONCE(p->migrate_disable <= 0);
+-
+- preempt_disable();
+- if (p->migrate_disable > 1) {
+- p->migrate_disable--;
+- preempt_enable();
+- return;
+- }
+-
+- if (unlikely(!scheduler_running)) {
+- p->migrate_disable = 0;
+- unpin_current_cpu();
+- preempt_enable();
+- return;
+- }
+-
+- /*
+- * See comment in migrate_disable().
+- */
+- rq = this_rq();
+- raw_spin_lock_irqsave(&rq->lock, flags);
+- mask = tsk_cpus_allowed(p);
+- p->migrate_disable = 0;
+-
+- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+-
+- if (!cpumask_equal(&p->cpus_allowed, mask)) {
+- /* Get the mask now that migration is enabled */
+- mask = tsk_cpus_allowed(p);
+- if (p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, mask);
+- p->rt.nr_cpus_allowed = cpumask_weight(mask);
+- }
+-
+- raw_spin_unlock_irqrestore(&rq->lock, flags);
+- unpin_current_cpu();
+- preempt_enable();
+-}
+-EXPORT_SYMBOL_GPL(migrate_enable);
+-#endif /* CONFIG_PREEMPT_RT_FULL */
+-
+ /*
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0172-sched-Do-not-compare-cpu-masks-in-scheduler.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0172-sched-Do-not-compare-cpu-masks-in-scheduler.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0172-sched-Do-not-compare-cpu-masks-in-scheduler.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0172-sched-Do-not-compare-cpu-masks-in-scheduler.patch)
@@ -0,0 +1,40 @@
+From 43ad06ff635e839a74ab8c579467fcffbe202822 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Date: Tue, 27 Sep 2011 08:40:24 -0400
+Subject: [PATCH 172/303] sched: Do not compare cpu masks in scheduler
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <williams at redhat.com>
+Link: http://lkml.kernel.org/r/20110927124423.128129033@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index ae75aa5..e0f9c6a 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4578,16 +4578,12 @@ static inline void update_migrate_disable(struct task_struct *p)
+ */
+ mask = tsk_cpus_allowed(p);
+
+- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+
+- if (!cpumask_equal(&p->cpus_allowed, mask)) {
+- if (p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, mask);
+- p->rt.nr_cpus_allowed = cpumask_weight(mask);
+-
+- /* Let migrate_enable know to fix things back up */
+- p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
+- }
++ /* Let migrate_enable know to fix things back up */
++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
+ }
+
+ void migrate_disable(void)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0173-sched-Have-migrate_disable-ignore-bounded-threads.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0173-sched-Have-migrate_disable-ignore-bounded-threads.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0173-sched-Have-migrate_disable-ignore-bounded-threads.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0173-sched-Have-migrate_disable-ignore-bounded-threads.patch)
@@ -0,0 +1,70 @@
+From 83b2831b73d49f9f9a435640d8cdf87c155db856 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Date: Tue, 27 Sep 2011 08:40:25 -0400
+Subject: [PATCH 173/303] sched: Have migrate_disable ignore bounded threads
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <williams at redhat.com>
+Link: http://lkml.kernel.org/r/20110927124423.567944215@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 23 +++++++++--------------
+ 1 file changed, 9 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index e0f9c6a..65b9691 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4590,7 +4590,7 @@ void migrate_disable(void)
+ {
+ struct task_struct *p = current;
+
+- if (in_atomic()) {
++ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+ #endif
+@@ -4621,7 +4621,7 @@ void migrate_enable(void)
+ unsigned long flags;
+ struct rq *rq;
+
+- if (in_atomic()) {
++ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+ #endif
+@@ -4642,26 +4642,21 @@ void migrate_enable(void)
+
+ if (unlikely(migrate_disabled_updated(p))) {
+ /*
+- * See comment in update_migrate_disable() about locking.
++ * Undo whatever update_migrate_disable() did, also see there
++ * about locking.
+ */
+ rq = this_rq();
+ raw_spin_lock_irqsave(&rq->lock, flags);
+- mask = tsk_cpus_allowed(p);
++
+ /*
+ * Clearing migrate_disable causes tsk_cpus_allowed to
+ * show the tasks original cpu affinity.
+ */
+ p->migrate_disable = 0;
+-
+- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+-
+- if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
+- /* Get the mask now that migration is enabled */
+- mask = tsk_cpus_allowed(p);
+- if (p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, mask);
+- p->rt.nr_cpus_allowed = cpumask_weight(mask);
+- }
++ mask = tsk_cpus_allowed(p);
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ } else
+ p->migrate_disable = 0;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0174-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0174-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0174-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0174-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch)
@@ -0,0 +1,28 @@
+From a6dd839952ef2c8ec45fee787f19039622870592 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 4 Nov 2011 20:48:36 +0100
+Subject: [PATCH 174/303] sched-clear-pf-thread-bound-on-fallback-rq.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 65b9691..364a863 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2566,7 +2566,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
+ printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
+ task_pid_nr(p), p->comm, cpu);
+ }
+-
++ /*
++ * Clear PF_THREAD_BOUND, otherwise we wreckage
++ * migrate_disable/enable. See optimization for
++ * PF_THREAD_BOUND tasks there.
++ */
++ p->flags &= ~PF_THREAD_BOUND;
+ return dest_cpu;
+ }
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0175-ftrace-crap.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0175-ftrace-crap.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0175-ftrace-crap.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0175-ftrace-crap.patch.patch)
@@ -0,0 +1,93 @@
+From d3375b5a7e763bbd138b9c09f904abed24a6bf21 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 9 Sep 2011 16:55:53 +0200
+Subject: [PATCH 175/303] ftrace-crap.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/trace/trace.c | 26 ++++++++++++++++++++++++--
+ kernel/trace/trace.h | 1 -
+ 2 files changed, 24 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 8d7ee39..dcf0df9 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -359,11 +359,13 @@ static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
+ */
+ void trace_wake_up(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ const unsigned long delay = msecs_to_jiffies(2);
+
+ if (trace_flags & TRACE_ITER_BLOCK)
+ return;
+ schedule_delayed_work(&wakeup_work, delay);
++#endif
+ }
+
+ static int __init set_buf_size(char *str)
+@@ -719,6 +721,12 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++static void default_wait_pipe(struct trace_iterator *iter);
++#else
++#define default_wait_pipe poll_wait_pipe
++#endif
++
+ /**
+ * register_tracer - register a tracer with the ftrace system.
+ * @type - the plugin for the tracer
+@@ -3198,6 +3206,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
+ return 0;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static unsigned int
+ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+ {
+@@ -3219,8 +3228,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+ }
+ }
+
+-
+-void default_wait_pipe(struct trace_iterator *iter)
++static void default_wait_pipe(struct trace_iterator *iter)
+ {
+ DEFINE_WAIT(wait);
+
+@@ -3231,6 +3239,20 @@ void default_wait_pipe(struct trace_iterator *iter)
+
+ finish_wait(&trace_wait, &wait);
+ }
++#else
++static unsigned int
++tracing_poll_pipe(struct file *filp, poll_table *poll_table)
++{
++ struct trace_iterator *iter = filp->private_data;
++
++ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ poll_wait_pipe(iter);
++ if (!trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++#endif
+
+ /*
+ * This is a make-shift waitqueue.
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 092e1f8..69b8700 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -345,7 +345,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
+
+ void tracing_iter_reset(struct trace_iterator *iter, int cpu);
+
+-void default_wait_pipe(struct trace_iterator *iter);
+ void poll_wait_pipe(struct trace_iterator *iter);
+
+ void ftrace(struct trace_array *tr,
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0176-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0176-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0176-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0176-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch)
@@ -0,0 +1,434 @@
+From fb83c2278aa1e20644b534cd0e5ea5e2ce424e50 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Tue, 27 Sep 2011 13:56:50 -0400
+Subject: [PATCH 176/303] ring-buffer: Convert reader_lock from raw_spin_lock
+ into spin_lock
+
+The reader_lock is mostly taken in normal context with interrupts enabled.
+But because ftrace_dump() can happen anywhere, it is used as a spin lock
+and in some cases a check to in_nmi() is performed to determine if the
+ftrace_dump() was initiated from an NMI and if it is, the lock is not taken.
+
+But having the lock as a raw_spin_lock() causes issues with the real-time
+kernel as the lock is held during allocation and freeing of the buffer.
+As memory locks convert into mutexes, keeping the reader_lock as a spin_lock
+causes problems.
+
+Converting the reader_lock is not straight forward as we must still deal
+with the ftrace_dump() happening not only from an NMI but also from
+true interrupt context in PREEPMT_RT.
+
+Two wrapper functions are created to take and release the reader lock:
+
+ int read_buffer_lock(cpu_buffer, unsigned long *flags)
+ void read_buffer_unlock(cpu_buffer, unsigned long flags, int locked)
+
+The read_buffer_lock() returns 1 if it actually took the lock, disables
+interrupts and updates the flags. The only time it returns 0 is in the
+case of a ftrace_dump() happening in an unsafe context.
+
+The read_buffer_unlock() checks the return of locked and will simply
+unlock the spin lock if it was successfully taken.
+
+Instead of just having this in specific cases that the NMI might call
+into, all instances of the reader_lock is converted to the wrapper
+functions to make this a bit simpler to read and less error prone.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark at redhat.com>
+Link: http://lkml.kernel.org/r/1317146210.26514.33.camel@gandalf.stny.rr.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/trace/ring_buffer.c | 151 ++++++++++++++++++++++++--------------------
+ 1 file changed, 81 insertions(+), 70 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 6fdc629..70112f3 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
+ int cpu;
+ atomic_t record_disabled;
+ struct ring_buffer *buffer;
+- raw_spinlock_t reader_lock; /* serialize readers */
++ spinlock_t reader_lock; /* serialize readers */
+ arch_spinlock_t lock;
+ struct lock_class_key lock_key;
+ struct list_head *pages;
+@@ -1049,6 +1049,44 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+ return -ENOMEM;
+ }
+
++static inline int ok_to_lock(void)
++{
++ if (in_nmi())
++ return 0;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (in_atomic())
++ return 0;
++#endif
++ return 1;
++}
++
++static int
++read_buffer_lock(struct ring_buffer_per_cpu *cpu_buffer,
++ unsigned long *flags)
++{
++ /*
++ * If an NMI die dumps out the content of the ring buffer
++ * do not grab locks. We also permanently disable the ring
++ * buffer too. A one time deal is all you get from reading
++ * the ring buffer from an NMI.
++ */
++ if (!ok_to_lock()) {
++ if (spin_trylock_irqsave(&cpu_buffer->reader_lock, *flags))
++ return 1;
++ tracing_off_permanent();
++ return 0;
++ }
++ spin_lock_irqsave(&cpu_buffer->reader_lock, *flags);
++ return 1;
++}
++
++static void
++read_buffer_unlock(struct ring_buffer_per_cpu *cpu_buffer,
++ unsigned long flags, int locked)
++{
++ if (locked)
++ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++}
+ static struct ring_buffer_per_cpu *
+ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
+ {
+@@ -1064,7 +1102,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
+
+ cpu_buffer->cpu = cpu;
+ cpu_buffer->buffer = buffer;
+- raw_spin_lock_init(&cpu_buffer->reader_lock);
++ spin_lock_init(&cpu_buffer->reader_lock);
+ lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
+ cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+
+@@ -1259,9 +1297,11 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
+ {
+ struct buffer_page *bpage;
+ struct list_head *p;
++ unsigned long flags;
+ unsigned i;
++ int locked;
+
+- raw_spin_lock_irq(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ rb_head_page_deactivate(cpu_buffer);
+
+ for (i = 0; i < nr_pages; i++) {
+@@ -1279,7 +1319,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
+ rb_check_pages(cpu_buffer);
+
+ out:
+- raw_spin_unlock_irq(&cpu_buffer->reader_lock);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+
+ static void
+@@ -1288,9 +1328,11 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
+ {
+ struct buffer_page *bpage;
+ struct list_head *p;
++ unsigned long flags;
+ unsigned i;
++ int locked;
+
+- raw_spin_lock_irq(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ rb_head_page_deactivate(cpu_buffer);
+
+ for (i = 0; i < nr_pages; i++) {
+@@ -1305,7 +1347,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
+ rb_check_pages(cpu_buffer);
+
+ out:
+- raw_spin_unlock_irq(&cpu_buffer->reader_lock);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+
+ /**
+@@ -2689,7 +2731,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+ return 0;
+
+ cpu_buffer = buffer->buffers[cpu];
+- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ /*
+ * if the tail is on reader_page, oldest time stamp is on the reader
+ * page
+@@ -2700,7 +2742,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+ bpage = rb_set_head_page(cpu_buffer);
+ if (bpage)
+ ret = bpage->page->time_stamp;
+- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ return ret;
+ }
+@@ -2864,15 +2906,16 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
+ {
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
++ int locked;
+
+ if (!iter)
+ return;
+
+ cpu_buffer = iter->cpu_buffer;
+
+- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ rb_iter_reset(iter);
+- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
+
+@@ -3292,21 +3335,6 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
+
+-static inline int rb_ok_to_lock(void)
+-{
+- /*
+- * If an NMI die dumps out the content of the ring buffer
+- * do not grab locks. We also permanently disable the ring
+- * buffer too. A one time deal is all you get from reading
+- * the ring buffer from an NMI.
+- */
+- if (likely(!in_nmi()))
+- return 1;
+-
+- tracing_off_permanent();
+- return 0;
+-}
+-
+ /**
+ * ring_buffer_peek - peek at the next event to be read
+ * @buffer: The ring buffer to read
+@@ -3324,22 +3352,17 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ struct ring_buffer_event *event;
+ unsigned long flags;
+- int dolock;
++ int locked;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return NULL;
+
+- dolock = rb_ok_to_lock();
+ again:
+- local_irq_save(flags);
+- if (dolock)
+- raw_spin_lock(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ event = rb_buffer_peek(cpu_buffer, ts, lost_events);
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ rb_advance_reader(cpu_buffer);
+- if (dolock)
+- raw_spin_unlock(&cpu_buffer->reader_lock);
+- local_irq_restore(flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ goto again;
+@@ -3361,11 +3384,12 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+ struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ struct ring_buffer_event *event;
+ unsigned long flags;
++ int locked;
+
+ again:
+- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ event = rb_iter_peek(iter, ts);
+- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ goto again;
+@@ -3391,9 +3415,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct ring_buffer_event *event = NULL;
+ unsigned long flags;
+- int dolock;
+-
+- dolock = rb_ok_to_lock();
++ int locked;
+
+ again:
+ /* might be called in atomic */
+@@ -3403,9 +3425,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ goto out;
+
+ cpu_buffer = buffer->buffers[cpu];
+- local_irq_save(flags);
+- if (dolock)
+- raw_spin_lock(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+
+ event = rb_buffer_peek(cpu_buffer, ts, lost_events);
+ if (event) {
+@@ -3413,9 +3433,8 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ rb_advance_reader(cpu_buffer);
+ }
+
+- if (dolock)
+- raw_spin_unlock(&cpu_buffer->reader_lock);
+- local_irq_restore(flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
++
+
+ out:
+ preempt_enable();
+@@ -3500,17 +3519,18 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
+ {
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
++ int locked;
+
+ if (!iter)
+ return;
+
+ cpu_buffer = iter->cpu_buffer;
+
+- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ arch_spin_lock(&cpu_buffer->lock);
+ rb_iter_reset(iter);
+ arch_spin_unlock(&cpu_buffer->lock);
+- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_read_start);
+
+@@ -3544,8 +3564,9 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
+ struct ring_buffer_event *event;
+ struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ unsigned long flags;
++ int locked;
+
+- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ again:
+ event = rb_iter_peek(iter, ts);
+ if (!event)
+@@ -3556,7 +3577,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
+
+ rb_advance_iter(iter);
+ out:
+- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+
+ return event;
+ }
+@@ -3621,13 +3642,14 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
+ {
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ unsigned long flags;
++ int locked;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return;
+
+ atomic_inc(&cpu_buffer->record_disabled);
+
+- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+
+ if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
+ goto out;
+@@ -3639,7 +3661,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
+ arch_spin_unlock(&cpu_buffer->lock);
+
+ out:
+- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+
+ atomic_dec(&cpu_buffer->record_disabled);
+ }
+@@ -3666,22 +3688,16 @@ int ring_buffer_empty(struct ring_buffer *buffer)
+ {
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
+- int dolock;
++ int locked;
+ int cpu;
+ int ret;
+
+- dolock = rb_ok_to_lock();
+-
+ /* yes this is racy, but if you don't like the race, lock the buffer */
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+- local_irq_save(flags);
+- if (dolock)
+- raw_spin_lock(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ ret = rb_per_cpu_empty(cpu_buffer);
+- if (dolock)
+- raw_spin_unlock(&cpu_buffer->reader_lock);
+- local_irq_restore(flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+
+ if (!ret)
+ return 0;
+@@ -3700,22 +3716,16 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
+ {
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
+- int dolock;
++ int locked;
+ int ret;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 1;
+
+- dolock = rb_ok_to_lock();
+-
+ cpu_buffer = buffer->buffers[cpu];
+- local_irq_save(flags);
+- if (dolock)
+- raw_spin_lock(&cpu_buffer->reader_lock);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+ ret = rb_per_cpu_empty(cpu_buffer);
+- if (dolock)
+- raw_spin_unlock(&cpu_buffer->reader_lock);
+- local_irq_restore(flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+
+ return ret;
+ }
+@@ -3890,6 +3900,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+ unsigned int commit;
+ unsigned int read;
+ u64 save_timestamp;
++ int locked;
+ int ret = -1;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+@@ -3911,7 +3922,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+ if (!bpage)
+ goto out;
+
+- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ locked = read_buffer_lock(cpu_buffer, &flags);
+
+ reader = rb_get_reader_page(cpu_buffer);
+ if (!reader)
+@@ -4035,7 +4046,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+ memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
+
+ out_unlock:
+- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ read_buffer_unlock(cpu_buffer, flags, locked);
+
+ out:
+ return ret;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0177-net-netif_rx_ni-migrate-disable.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0177-net-netif_rx_ni-migrate-disable.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0177-net-netif_rx_ni-migrate-disable.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0177-net-netif_rx_ni-migrate-disable.patch.patch)
@@ -0,0 +1,28 @@
+From d2ee1302b22f19086fe9be14ec935282dba70ab8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 16:29:27 +0200
+Subject: [PATCH 177/303] net-netif_rx_ni-migrate-disable.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ net/core/dev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2ae608c..e04372e 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3030,11 +3030,11 @@ int netif_rx_ni(struct sk_buff *skb)
+ {
+ int err;
+
+- preempt_disable();
++ migrate_disable();
+ err = netif_rx(skb);
+ if (local_softirq_pending())
+ thread_do_softirq();
+- preempt_enable();
++ migrate_enable();
+
+ return err;
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0178-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0178-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0178-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0178-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch)
@@ -0,0 +1,116 @@
+From 201ec9d43a45682d6017731d492446a591192bef Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 3 Jul 2009 13:16:38 -0500
+Subject: [PATCH 178/303] softirq: Sanitize softirq pending for NOHZ/RT
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/interrupt.h | 2 ++
+ kernel/softirq.c | 61 +++++++++++++++++++++++++++++++++++++++++++++
+ kernel/time/tick-sched.c | 8 +-----
+ 3 files changed, 64 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index b9162dc..74e28d9 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -471,6 +471,8 @@ static inline void __raise_softirq_irqoff(unsigned int nr)
+ extern void raise_softirq_irqoff(unsigned int nr);
+ extern void raise_softirq(unsigned int nr);
+
++extern void softirq_check_pending_idle(void);
++
+ /* This is the worklist that queues up per-cpu softirq work.
+ *
+ * send_remote_sendirq() adds work to these lists, and
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index c6c5824..8332622 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -61,6 +61,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+
++#ifdef CONFIG_NO_HZ
++# ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * On preempt-rt a softirq might be blocked on a lock. There might be
++ * no other runnable task on this CPU because the lock owner runs on
++ * some other CPU. So we have to go into idle with the pending bit
++ * set. Therefor we need to check this otherwise we warn about false
++ * positives which confuses users and defeats the whole purpose of
++ * this test.
++ *
++ * This code is called with interrupts disabled.
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++ u32 warnpending = 0, pending = local_softirq_pending();
++
++ if (rate_limit >= 10)
++ return;
++
++ if (pending) {
++ struct task_struct *tsk;
++
++ tsk = __get_cpu_var(ksoftirqd);
++ /*
++ * The wakeup code in rtmutex.c wakes up the task
++ * _before_ it sets pi_blocked_on to NULL under
++ * tsk->pi_lock. So we need to check for both: state
++ * and pi_blocked_on.
++ */
++ raw_spin_lock(&tsk->pi_lock);
++
++ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
++ warnpending = 1;
++
++ raw_spin_unlock(&tsk->pi_lock);
++ }
++
++ if (warnpending) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ pending);
++ rate_limit++;
++ }
++}
++# else
++/*
++ * On !PREEMPT_RT we just printk rate limited:
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++
++ if (rate_limit < 10) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ local_softirq_pending());
++ rate_limit++;
++ }
++}
++# endif
++#endif
++
+ /*
+ * we cannot loop indefinitely here to avoid userspace starvation,
+ * but we also don't want to introduce a worst case 1/HZ latency
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 548d311..c901f33 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -336,13 +336,7 @@ void tick_nohz_stop_sched_tick(int inidle)
+ goto end;
+
+ if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+- static int ratelimit;
+-
+- if (ratelimit < 10) {
+- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+- (unsigned int) local_softirq_pending());
+- ratelimit++;
+- }
++ softirq_check_pending_idle();
+ goto end;
+ }
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0179-lockdep-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0179-lockdep-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0179-lockdep-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0179-lockdep-rt.patch.patch)
@@ -0,0 +1,61 @@
+From 0ba766e193a34a4e307f4fdcbd98e39bb512ed59 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 18:51:23 +0200
+Subject: [PATCH 179/303] lockdep-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/irqflags.h | 10 +++++++---
+ kernel/lockdep.c | 2 ++
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 37b13c4..a52b35d 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -25,8 +25,6 @@
+ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
+ # define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
+ # define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
+ # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
+ #else
+ # define trace_hardirqs_on() do { } while (0)
+@@ -39,9 +37,15 @@
+ # define trace_softirqs_enabled(p) 0
+ # define trace_hardirq_enter() do { } while (0)
+ # define trace_hardirq_exit() do { } while (0)
++# define INIT_TRACE_IRQFLAGS
++#endif
++
++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
++#else
+ # define lockdep_softirq_enter() do { } while (0)
+ # define lockdep_softirq_exit() do { } while (0)
+-# define INIT_TRACE_IRQFLAGS
+ #endif
+
+ #if defined(CONFIG_IRQSOFF_TRACER) || \
+diff --git a/kernel/lockdep.c b/kernel/lockdep.c
+index b2e08c9..334d3c0 100644
+--- a/kernel/lockdep.c
++++ b/kernel/lockdep.c
+@@ -3494,6 +3494,7 @@ static void check_flags(unsigned long flags)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * We dont accurately track softirq state in e.g.
+ * hardirq contexts (such as on 4KSTACKS), so only
+@@ -3508,6 +3509,7 @@ static void check_flags(unsigned long flags)
+ DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+ }
+ }
++#endif
+
+ if (!debug_locks)
+ print_irqtrace_events(current);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch)
@@ -0,0 +1,20 @@
+From df6946d69b735058d8d1c25a75fc98825e0ff3b0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 21:51:45 +0200
+Subject: [PATCH 180/303] mutex-no-spin-on-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/Kconfig.locks | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
+index 5068e2a..7bd0598 100644
+--- a/kernel/Kconfig.locks
++++ b/kernel/Kconfig.locks
+@@ -199,4 +199,4 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+
+ config MUTEX_SPIN_ON_OWNER
+- def_bool SMP && !DEBUG_MUTEXES
++ def_bool SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0181-softirq-local-lock.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0181-softirq-local-lock.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0181-softirq-local-lock.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0181-softirq-local-lock.patch.patch)
@@ -0,0 +1,340 @@
+From 56d9106c21d871c1ad3c5ac6f0a35e7e8a908c1a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 28 Jun 2011 15:57:18 +0200
+Subject: [PATCH 181/303] softirq-local-lock.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/hardirq.h | 16 ++++-
+ include/linux/interrupt.h | 11 +++
+ include/linux/sched.h | 1 +
+ init/main.c | 1 +
+ kernel/softirq.c | 170 ++++++++++++++++++++++++++++++++++++++++++++-
+ 5 files changed, 194 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
+index f743883..2f5d318 100644
+--- a/include/linux/hardirq.h
++++ b/include/linux/hardirq.h
+@@ -60,7 +60,11 @@
+ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+ #define NMI_OFFSET (1UL << NMI_SHIFT)
+
+-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#else
++# define SOFTIRQ_DISABLE_OFFSET (0)
++#endif
+
+ #ifndef PREEMPT_ACTIVE
+ #define PREEMPT_ACTIVE_BITS 1
+@@ -73,10 +77,17 @@
+ #endif
+
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+ #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
++#else
++# define softirq_count() (0U)
++extern int in_serving_softirq(void);
++#endif
++
+ /*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+@@ -86,7 +97,6 @@
+ #define in_irq() (hardirq_count())
+ #define in_softirq() (softirq_count())
+ #define in_interrupt() (irq_count())
+-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+
+ /*
+ * Are we in NMI context?
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 74e28d9..20d8dcc 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -458,7 +458,12 @@ struct softirq_action
+
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline void thread_do_softirq(void) { do_softirq(); }
++#else
++extern void thread_do_softirq(void);
++#endif
+
+ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+ extern void softirq_init(void);
+@@ -650,6 +655,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
+ tasklet_kill(&ttimer->tasklet);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void softirq_early_init(void);
++#else
++static inline void softirq_early_init(void) { }
++#endif
++
+ /*
+ * Autoprobing for irqs:
+ *
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index c60cfde..6d920a0 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1605,6 +1605,7 @@ struct task_struct {
+ #endif
+ #ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head put_rcu;
++ int softirq_nestcnt;
+ #endif
+ };
+
+diff --git a/init/main.c b/init/main.c
+index f2936db..b00c71b 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -490,6 +490,7 @@ asmlinkage void __init start_kernel(void)
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them
+ */
++ softirq_early_init();
+ tick_init();
+ boot_cpu_init();
+ page_address_init();
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 8332622..2c10a79 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -24,6 +24,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/tick.h>
++#include <linux/locallock.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -165,6 +166,7 @@ static void handle_pending_softirqs(u32 pending, int cpu)
+ local_irq_disable();
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -368,6 +370,162 @@ asmlinkage void do_softirq(void)
+
+ #endif
+
++static inline void local_bh_disable_nort(void) { local_bh_disable(); }
++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++
++#else /* !PREEMPT_RT_FULL */
++
++/*
++ * On RT we serialize softirq execution with a cpu local lock
++ */
++static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
++static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
++
++static void __do_softirq(void);
++
++void __init softirq_early_init(void)
++{
++ local_irq_lock_init(local_softirq_lock);
++}
++
++void local_bh_disable(void)
++{
++ migrate_disable();
++ current->softirq_nestcnt++;
++}
++EXPORT_SYMBOL(local_bh_disable);
++
++void local_bh_enable(void)
++{
++ if (WARN_ON(current->softirq_nestcnt == 0))
++ return;
++
++ if ((current->softirq_nestcnt == 1) &&
++ local_softirq_pending() &&
++ local_trylock(local_softirq_lock)) {
++
++ local_irq_disable();
++ if (local_softirq_pending())
++ __do_softirq();
++ local_irq_enable();
++ local_unlock(local_softirq_lock);
++ WARN_ON(current->softirq_nestcnt != 1);
++ }
++ current->softirq_nestcnt--;
++ migrate_enable();
++}
++EXPORT_SYMBOL(local_bh_enable);
++
++void local_bh_enable_ip(unsigned long ip)
++{
++ local_bh_enable();
++}
++EXPORT_SYMBOL(local_bh_enable_ip);
++
++/* For tracing */
++int notrace __in_softirq(void)
++{
++ if (__get_cpu_var(local_softirq_lock).owner == current)
++ return __get_cpu_var(local_softirq_lock).nestcnt;
++ return 0;
++}
++
++int in_serving_softirq(void)
++{
++ int res;
++
++ preempt_disable();
++ res = __get_cpu_var(local_softirq_runner) == current;
++ preempt_enable();
++ return res;
++}
++
++/*
++ * Called with bh and local interrupts disabled. For full RT cpu must
++ * be pinned.
++ */
++static void __do_softirq(void)
++{
++ u32 pending = local_softirq_pending();
++ int cpu = smp_processor_id();
++
++ current->softirq_nestcnt++;
++
++ /* Reset the pending bitmask before enabling irqs */
++ set_softirq_pending(0);
++
++ __get_cpu_var(local_softirq_runner) = current;
++
++ lockdep_softirq_enter();
++
++ handle_pending_softirqs(pending, cpu);
++
++ pending = local_softirq_pending();
++ if (pending)
++ wakeup_softirqd();
++
++ lockdep_softirq_exit();
++ __get_cpu_var(local_softirq_runner) = NULL;
++
++ current->softirq_nestcnt--;
++}
++
++static int __thread_do_softirq(int cpu)
++{
++ /*
++ * Prevent the current cpu from going offline.
++ * pin_current_cpu() can reenable preemption and block on the
++ * hotplug mutex. When it returns, the current cpu is
++ * pinned. It might be the wrong one, but the offline check
++ * below catches that.
++ */
++ pin_current_cpu();
++ /*
++ * If called from ksoftirqd (cpu >= 0) we need to check
++ * whether we are on the wrong cpu due to cpu offlining. If
++ * called via thread_do_softirq() no action required.
++ */
++ if (cpu >= 0 && cpu_is_offline(cpu)) {
++ unpin_current_cpu();
++ return -1;
++ }
++ preempt_enable();
++ local_lock(local_softirq_lock);
++ local_irq_disable();
++ /*
++ * We cannot switch stacks on RT as we want to be able to
++ * schedule!
++ */
++ if (local_softirq_pending())
++ __do_softirq();
++ local_unlock(local_softirq_lock);
++ unpin_current_cpu();
++ preempt_disable();
++ local_irq_enable();
++ return 0;
++}
++
++/*
++ * Called from netif_rx_ni(). Preemption enabled.
++ */
++void thread_do_softirq(void)
++{
++ if (!in_serving_softirq()) {
++ preempt_disable();
++ __thread_do_softirq(-1);
++ preempt_enable();
++ }
++}
++
++static int ksoftirqd_do_softirq(int cpu)
++{
++ return __thread_do_softirq(cpu);
++}
++
++static inline void local_bh_disable_nort(void) { }
++static inline void _local_bh_enable_nort(void) { }
++
++#endif /* PREEMPT_RT_FULL */
+ /*
+ * Enter an interrupt context.
+ */
+@@ -381,9 +539,9 @@ void irq_enter(void)
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+- local_bh_disable();
++ local_bh_disable_nort();
+ tick_check_idle(cpu);
+- _local_bh_enable();
++ _local_bh_enable_nort();
+ }
+
+ __irq_enter();
+@@ -392,6 +550,7 @@ void irq_enter(void)
+ #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!force_irqthreads)
+ __do_softirq();
+ else {
+@@ -400,10 +559,14 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
++#else
++ wakeup_softirqd();
++#endif
+ }
+ #else
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!force_irqthreads)
+ do_softirq();
+ else {
+@@ -412,6 +575,9 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
++#else
++ wakeup_softirqd();
++#endif
+ }
+ #endif
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0182-softirq-Export-in_serving_softirq.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0182-softirq-Export-in_serving_softirq.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0182-softirq-Export-in_serving_softirq.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0182-softirq-Export-in_serving_softirq.patch)
@@ -0,0 +1,30 @@
+From 9122bcd72024e8705c24cd7504cd349d4bbe8994 Mon Sep 17 00:00:00 2001
+From: John Kacur <jkacur at redhat.com>
+Date: Mon, 14 Nov 2011 02:44:43 +0100
+Subject: [PATCH 182/303] softirq: Export in_serving_softirq()
+
+ERROR: "in_serving_softirq" [net/sched/cls_cgroup.ko] undefined!
+
+The above can be fixed by exporting in_serving_softirq
+
+Signed-off-by: John Kacur <jkacur at redhat.com>
+Cc: Paul McKenney <paulmck at linux.vnet.ibm.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/1321235083-21756-2-git-send-email-jkacur@redhat.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/softirq.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 2c10a79..f107c07 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -439,6 +439,7 @@ int in_serving_softirq(void)
+ preempt_enable();
+ return res;
+ }
++EXPORT_SYMBOL(in_serving_softirq);
+
+ /*
+ * Called with bh and local interrupts disabled. For full RT cpu must
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0183-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0183-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0183-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0183-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch)
@@ -0,0 +1,42 @@
+From f2bcf8cb9d94f9dd91c3708ae1da4a68ee4d59c9 Mon Sep 17 00:00:00 2001
+From: Yong Zhang <yong.zhang0 at gmail.com>
+Date: Thu, 13 Oct 2011 17:19:09 +0800
+Subject: [PATCH 183/303] hardirq.h: Define softirq_count() as OUL to kill
+ build warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+kernel/lockdep.c: In function ‘print_bad_irq_dependency’:
+kernel/lockdep.c:1476:3: warning: format ‘%lu’ expects type ‘long unsigned int’, but argument 7 has type ‘unsigned int’
+kernel/lockdep.c: In function ‘print_usage_bug’:
+kernel/lockdep.c:2193:3: warning: format ‘%lu’ expects type ‘long unsigned int’, but argument 7 has type ‘unsigned int’
+
+kernel/lockdep.i show this:
+ printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
+ curr->comm, task_pid_nr(curr),
+ curr->hardirq_context, ((current_thread_info()->preempt_count) & (((1UL << (10))-1) << ((0 + 8) + 8))) >> ((0 + 8) + 8),
+ curr->softirq_context, (0U) >> (0 + 8),
+ curr->hardirqs_enabled,
+ curr->softirqs_enabled);
+
+Signed-off-by: Yong Zhang <yong.zhang0 at gmail.com>
+Link: http://lkml.kernel.org/r/20111013091909.GA32739@zhy
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/hardirq.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
+index 2f5d318..7059ce2 100644
+--- a/include/linux/hardirq.h
++++ b/include/linux/hardirq.h
+@@ -84,7 +84,7 @@
+ # define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+ # define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+ #else
+-# define softirq_count() (0U)
++# define softirq_count() (0UL)
+ extern int in_serving_softirq(void);
+ #endif
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0184-softirq-Fix-unplug-deadlock.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0184-softirq-Fix-unplug-deadlock.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0184-softirq-Fix-unplug-deadlock.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0184-softirq-Fix-unplug-deadlock.patch)
@@ -0,0 +1,65 @@
+From d7da86bb442de4d086c80ebcd7e10b6164a0955a Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Date: Fri, 30 Sep 2011 15:52:14 +0200
+Subject: [PATCH 184/303] softirq: Fix unplug deadlock
+
+If ksoftirqd gets woken during hot-unplug, __thread_do_softirq() will
+call pin_current_cpu() which will block on the held cpu_hotplug.lock.
+Moving the offline check in __thread_do_softirq() before the
+pin_current_cpu() call doesn't work, since the wakeup can happen
+before we mark the cpu offline.
+
+So here we have the ksoftirq thread stuck until hotplug finishes, but
+then the ksoftirq CPU_DOWN notifier issues kthread_stop() which will
+wait for the ksoftirq thread to go away -- while holding the hotplug
+lock.
+
+Sort this by delaying the kthread_stop() until CPU_POST_DEAD, which is
+outside of the cpu_hotplug.lock, but still serialized by the
+cpu_add_remove_lock.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Cc: rostedt <rostedt at goodmis.org>
+Cc: Clark Williams <williams at redhat.com>
+Link: http://lkml.kernel.org/r/1317391156.12973.3.camel@twins
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/softirq.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index f107c07..56de566 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -1086,9 +1086,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
+ int hotcpu = (unsigned long)hcpu;
+ struct task_struct *p;
+
+- switch (action) {
++ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+- case CPU_UP_PREPARE_FROZEN:
+ p = kthread_create_on_node(run_ksoftirqd,
+ hcpu,
+ cpu_to_node(hotcpu),
+@@ -1101,19 +1100,16 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
+ per_cpu(ksoftirqd, hotcpu) = p;
+ break;
+ case CPU_ONLINE:
+- case CPU_ONLINE_FROZEN:
+ wake_up_process(per_cpu(ksoftirqd, hotcpu));
+ break;
+ #ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+- case CPU_UP_CANCELED_FROZEN:
+ if (!per_cpu(ksoftirqd, hotcpu))
+ break;
+ /* Unbind so it can run. Fall thru. */
+ kthread_bind(per_cpu(ksoftirqd, hotcpu),
+ cpumask_any(cpu_online_mask));
+- case CPU_DEAD:
+- case CPU_DEAD_FROZEN: {
++ case CPU_POST_DEAD: {
+ static const struct sched_param param = {
+ .sched_priority = MAX_RT_PRIO-1
+ };
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0185-softirq-disable-softirq-stacks-for-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0185-softirq-disable-softirq-stacks-for-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0185-softirq-disable-softirq-stacks-for-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0185-softirq-disable-softirq-stacks-for-rt.patch.patch)
@@ -0,0 +1,193 @@
+From 05b9496a5e8b34d645674029adb0db2d4c402ab3 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 18 Jul 2011 13:59:17 +0200
+Subject: [PATCH 185/303] softirq-disable-softirq-stacks-for-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/powerpc/kernel/irq.c | 3 ++-
+ arch/powerpc/kernel/misc_32.S | 2 ++
+ arch/powerpc/kernel/misc_64.S | 2 ++
+ arch/sh/kernel/irq.c | 2 ++
+ arch/sparc/kernel/irq_64.c | 2 ++
+ arch/x86/kernel/entry_64.S | 2 ++
+ arch/x86/kernel/irq_32.c | 2 ++
+ arch/x86/kernel/irq_64.c | 3 ++-
+ include/linux/interrupt.h | 3 +--
+ 9 files changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 745c1e7..e0ee531 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -440,6 +440,7 @@ void irq_ctx_init(void)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline void do_softirq_onstack(void)
+ {
+ struct thread_info *curtp, *irqtp;
+@@ -476,7 +477,7 @@ void do_softirq(void)
+
+ local_irq_restore(flags);
+ }
+-
++#endif
+
+ /*
+ * IRQ controller and virtual interrupts
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index 7cd07b4..46c6073 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
+@@ -36,6 +36,7 @@
+
+ .text
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ stw r0,4(r1)
+@@ -46,6 +47,7 @@ _GLOBAL(call_do_softirq)
+ lwz r0,4(r1)
+ mtlr r0
+ blr
++#endif
+
+ _GLOBAL(call_handle_irq)
+ mflr r0
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 616921e..2961d75 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -29,6 +29,7 @@
+
+ .text
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ std r0,16(r1)
+@@ -39,6 +40,7 @@ _GLOBAL(call_do_softirq)
+ ld r0,16(r1)
+ mtlr r0
+ blr
++#endif
+
+ _GLOBAL(call_handle_irq)
+ ld r8,0(r6)
+diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
+index a3ee919..9127bc0 100644
+--- a/arch/sh/kernel/irq.c
++++ b/arch/sh/kernel/irq.c
+@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
+ hardirq_ctx[cpu] = NULL;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void)
+ {
+ unsigned long flags;
+@@ -191,6 +192,7 @@ asmlinkage void do_softirq(void)
+
+ local_irq_restore(flags);
+ }
++#endif
+ #else
+ static inline void handle_one_irq(unsigned int irq)
+ {
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index d45b710..c3a3737 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -699,6 +699,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
+ set_irq_regs(old_regs);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq(void)
+ {
+ unsigned long flags;
+@@ -724,6 +725,7 @@ void do_softirq(void)
+
+ local_irq_restore(flags);
+ }
++#endif
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ void fixup_irqs(void)
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 6274f5f..80e83d0 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1192,6 +1192,7 @@ ENTRY(kernel_execve)
+ CFI_ENDPROC
+ END(kernel_execve)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(call_softirq)
+ CFI_STARTPROC
+@@ -1211,6 +1212,7 @@ ENTRY(call_softirq)
+ ret
+ CFI_ENDPROC
+ END(call_softirq)
++#endif
+
+ #ifdef CONFIG_XEN
+ zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index 7209070..84417a2 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -149,6 +149,7 @@ void __cpuinit irq_ctx_init(int cpu)
+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void)
+ {
+ unsigned long flags;
+@@ -179,6 +180,7 @@ asmlinkage void do_softirq(void)
+
+ local_irq_restore(flags);
+ }
++#endif
+
+ bool handle_irq(unsigned irq, struct pt_regs *regs)
+ {
+diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
+index 69bca46..3fbc07d 100644
+--- a/arch/x86/kernel/irq_64.c
++++ b/arch/x86/kernel/irq_64.c
+@@ -65,7 +65,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
+ return true;
+ }
+
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void call_softirq(void);
+
+ asmlinkage void do_softirq(void)
+@@ -85,3 +85,4 @@ asmlinkage void do_softirq(void)
+ }
+ local_irq_restore(flags);
+ }
++#endif
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 20d8dcc..1a74cf7 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -456,10 +456,9 @@ struct softirq_action
+ void (*action)(struct softirq_action *);
+ };
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+-
+-#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline void thread_do_softirq(void) { do_softirq(); }
+ #else
+ extern void thread_do_softirq(void);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0186-softirq-make-fifo.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0186-softirq-make-fifo.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0186-softirq-make-fifo.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0186-softirq-make-fifo.patch.patch)
@@ -0,0 +1,61 @@
+From b43a59463ec24ac8d7005d75b5264eed529f33c6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 21 Jul 2011 21:06:43 +0200
+Subject: [PATCH 186/303] softirq-make-fifo.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/softirq.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 56de566..dd80cb4 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -372,6 +372,8 @@ asmlinkage void do_softirq(void)
+
+ static inline void local_bh_disable_nort(void) { local_bh_disable(); }
+ static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++static inline void ksoftirqd_set_sched_params(void) { }
++static inline void ksoftirqd_clr_sched_params(void) { }
+
+ #else /* !PREEMPT_RT_FULL */
+
+@@ -526,6 +528,20 @@ static int ksoftirqd_do_softirq(int cpu)
+ static inline void local_bh_disable_nort(void) { }
+ static inline void _local_bh_enable_nort(void) { }
+
++static inline void ksoftirqd_set_sched_params(void)
++{
++ struct sched_param param = { .sched_priority = 1 };
++
++ sched_setscheduler(current, SCHED_FIFO, ¶m);
++}
++
++static inline void ksoftirqd_clr_sched_params(void)
++{
++ struct sched_param param = { .sched_priority = 0 };
++
++ sched_setscheduler(current, SCHED_NORMAL, ¶m);
++}
++
+ #endif /* PREEMPT_RT_FULL */
+ /*
+ * Enter an interrupt context.
+@@ -985,6 +1001,8 @@ void __init softirq_init(void)
+
+ static int run_ksoftirqd(void * __bind_cpu)
+ {
++ ksoftirqd_set_sched_params();
++
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (!kthread_should_stop()) {
+@@ -1010,6 +1028,7 @@ static int run_ksoftirqd(void * __bind_cpu)
+
+ wait_to_die:
+ preempt_enable();
++ ksoftirqd_clr_sched_params();
+ /* Wait for kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0187-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0187-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0187-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0187-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch)
@@ -0,0 +1,407 @@
+From 6ec4e0da3a5033fa4e3b77797c44e798f7d10c86 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo at elte.hu>
+Date: Tue, 29 Nov 2011 20:18:22 -0500
+Subject: [PATCH 187/303] tasklet: Prevent tasklets from going into infinite
+ spin in RT
+
+When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
+and spinlocks turn are mutexes. But this can cause issues with
+tasks disabling tasklets. A tasklet runs under ksoftirqd, and
+if a tasklets are disabled with tasklet_disable(), the tasklet
+count is increased. When a tasklet runs, it checks this counter
+and if it is set, it adds itself back on the softirq queue and
+returns.
+
+The problem arises in RT because ksoftirq will see that a softirq
+is ready to run (the tasklet softirq just re-armed itself), and will
+not sleep, but instead run the softirqs again. The tasklet softirq
+will still see that the count is non-zero and will not execute
+the tasklet and requeue itself on the softirq again, which will
+cause ksoftirqd to run it again and again and again.
+
+It gets worse because ksoftirqd runs as a real-time thread.
+If it preempted the task that disabled tasklets, and that task
+has migration disabled, or can't run for other reasons, the tasklet
+softirq will never run because the count will never be zero, and
+ksoftirqd will go into an infinite loop. As an RT task, it this
+becomes a big problem.
+
+This is a hack solution to have tasklet_disable stop tasklets, and
+when a tasklet runs, instead of requeueing the tasklet softirqd
+it delays it. When tasklet_enable() is called, and tasklets are
+waiting, then the tasklet_enable() will kick the tasklets to continue.
+This prevents the lock up from ksoftirq going into an infinite loop.
+
+[ rostedt at goodmis.org: ported to 3.0-rt ]
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/interrupt.h | 39 ++++-----
+ kernel/softirq.c | 208 ++++++++++++++++++++++++++++++++-------------
+ 2 files changed, 170 insertions(+), 77 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 1a74cf7..bb4b441 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -517,8 +517,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
+ to be executed on some cpu at least once after this.
+ * If the tasklet is already scheduled, but its execution is still not
+ started, it will be executed only once.
+- * If this tasklet is already running on another CPU (or schedule is called
+- from tasklet itself), it is rescheduled for later.
++ * If this tasklet is already running on another CPU, it is rescheduled
++ for later.
++ * Schedule must not be called from the tasklet itself (a lockup occurs)
+ * Tasklet is strictly serialized wrt itself, but not
+ wrt another tasklets. If client needs some intertask synchronization,
+ he makes it with spinlocks.
+@@ -543,27 +544,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+ enum
+ {
+ TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
+- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
++ TASKLET_STATE_PENDING /* Tasklet is pending */
+ };
+
+-#ifdef CONFIG_SMP
++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
++
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ static inline int tasklet_trylock(struct tasklet_struct *t)
+ {
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
++static inline int tasklet_tryunlock(struct tasklet_struct *t)
++{
++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
++}
++
+ static inline void tasklet_unlock(struct tasklet_struct *t)
+ {
+ smp_mb__before_clear_bit();
+ clear_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
+-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+-{
+- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+-}
++extern void tasklet_unlock_wait(struct tasklet_struct *t);
++
+ #else
+ #define tasklet_trylock(t) 1
++#define tasklet_tryunlock(t) 1
+ #define tasklet_unlock_wait(t) do { } while (0)
+ #define tasklet_unlock(t) do { } while (0)
+ #endif
+@@ -612,17 +622,8 @@ static inline void tasklet_disable(struct tasklet_struct *t)
+ smp_mb();
+ }
+
+-static inline void tasklet_enable(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic_dec();
+- atomic_dec(&t->count);
+-}
+-
+-static inline void tasklet_hi_enable(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic_dec();
+- atomic_dec(&t->count);
+-}
++extern void tasklet_enable(struct tasklet_struct *t);
++extern void tasklet_hi_enable(struct tasklet_struct *t);
+
+ extern void tasklet_kill(struct tasklet_struct *t);
+ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index dd80cb4..92b4ca3 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -21,6 +21,7 @@
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
+ #include <linux/rcupdate.h>
++#include <linux/delay.h>
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/tick.h>
+@@ -664,15 +665,45 @@ struct tasklet_head
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
+
++static void inline
++__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
++{
++ if (tasklet_trylock(t)) {
++again:
++ /* We may have been preempted before tasklet_trylock
++ * and __tasklet_action may have already run.
++ * So double check the sched bit while the takslet
++ * is locked before adding it to the list.
++ */
++ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
++ t->next = NULL;
++ *head->tail = t;
++ head->tail = &(t->next);
++ raise_softirq_irqoff(nr);
++ tasklet_unlock(t);
++ } else {
++ /* This is subtle. If we hit the corner case above
++ * It is possible that we get preempted right here,
++ * and another task has successfully called
++ * tasklet_schedule(), then this function, and
++ * failed on the trylock. Thus we must be sure
++ * before releasing the tasklet lock, that the
++ * SCHED_BIT is clear. Otherwise the tasklet
++ * may get its SCHED_BIT set, but not added to the
++ * list
++ */
++ if (!tasklet_tryunlock(t))
++ goto again;
++ }
++ }
++}
++
+ void __tasklet_schedule(struct tasklet_struct *t)
+ {
+ unsigned long flags;
+
+ local_irq_save(flags);
+- t->next = NULL;
+- *__this_cpu_read(tasklet_vec.tail) = t;
+- __this_cpu_write(tasklet_vec.tail, &(t->next));
+- raise_softirq_irqoff(TASKLET_SOFTIRQ);
++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+
+@@ -683,10 +714,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
+ unsigned long flags;
+
+ local_irq_save(flags);
+- t->next = NULL;
+- *__this_cpu_read(tasklet_hi_vec.tail) = t;
+- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+- raise_softirq_irqoff(HI_SOFTIRQ);
++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+
+@@ -694,50 +722,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
+
+ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+ {
+- BUG_ON(!irqs_disabled());
+-
+- t->next = __this_cpu_read(tasklet_hi_vec.head);
+- __this_cpu_write(tasklet_hi_vec.head, t);
+- __raise_softirq_irqoff(HI_SOFTIRQ);
++ __tasklet_hi_schedule(t);
+ }
+
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
+-static void tasklet_action(struct softirq_action *a)
++void tasklet_enable(struct tasklet_struct *t)
+ {
+- struct tasklet_struct *list;
++ if (!atomic_dec_and_test(&t->count))
++ return;
++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
++ tasklet_schedule(t);
++}
+
+- local_irq_disable();
+- list = __this_cpu_read(tasklet_vec.head);
+- __this_cpu_write(tasklet_vec.head, NULL);
+- __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
+- local_irq_enable();
++EXPORT_SYMBOL(tasklet_enable);
++
++void tasklet_hi_enable(struct tasklet_struct *t)
++{
++ if (!atomic_dec_and_test(&t->count))
++ return;
++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
++ tasklet_hi_schedule(t);
++}
++
++EXPORT_SYMBOL(tasklet_hi_enable);
++
++static void
++__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
++{
++ int loops = 1000000;
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+- if (tasklet_trylock(t)) {
+- if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+- BUG();
+- t->func(t->data);
+- tasklet_unlock(t);
+- continue;
+- }
+- tasklet_unlock(t);
++ /*
++ * Should always succeed - after a tasklist got on the
++ * list (after getting the SCHED bit set from 0 to 1),
++ * nothing but the tasklet softirq it got queued to can
++ * lock it:
++ */
++ if (!tasklet_trylock(t)) {
++ WARN_ON(1);
++ continue;
+ }
+
+- local_irq_disable();
+ t->next = NULL;
+- *__this_cpu_read(tasklet_vec.tail) = t;
+- __this_cpu_write(tasklet_vec.tail, &(t->next));
+- __raise_softirq_irqoff(TASKLET_SOFTIRQ);
+- local_irq_enable();
++
++ /*
++ * If we cannot handle the tasklet because it's disabled,
++ * mark it as pending. tasklet_enable() will later
++ * re-schedule the tasklet.
++ */
++ if (unlikely(atomic_read(&t->count))) {
++out_disabled:
++ /* implicit unlock: */
++ wmb();
++ t->state = TASKLET_STATEF_PENDING;
++ continue;
++ }
++
++ /*
++ * After this point on the tasklet might be rescheduled
++ * on another CPU, but it can only be added to another
++ * CPU's tasklet list if we unlock the tasklet (which we
++ * dont do yet).
++ */
++ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++ WARN_ON(1);
++
++again:
++ t->func(t->data);
++
++ /*
++ * Try to unlock the tasklet. We must use cmpxchg, because
++ * another CPU might have scheduled or disabled the tasklet.
++ * We only allow the STATE_RUN -> 0 transition here.
++ */
++ while (!tasklet_tryunlock(t)) {
++ /*
++ * If it got disabled meanwhile, bail out:
++ */
++ if (atomic_read(&t->count))
++ goto out_disabled;
++ /*
++ * If it got scheduled meanwhile, re-execute
++ * the tasklet function:
++ */
++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++ goto again;
++ if (!--loops) {
++ printk("hm, tasklet state: %08lx\n", t->state);
++ WARN_ON(1);
++ tasklet_unlock(t);
++ break;
++ }
++ }
+ }
+ }
+
++static void tasklet_action(struct softirq_action *a)
++{
++ struct tasklet_struct *list;
++
++ local_irq_disable();
++ list = __get_cpu_var(tasklet_vec).head;
++ __get_cpu_var(tasklet_vec).head = NULL;
++ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
++ local_irq_enable();
++
++ __tasklet_action(a, list);
++}
++
+ static void tasklet_hi_action(struct softirq_action *a)
+ {
+ struct tasklet_struct *list;
+@@ -748,29 +845,7 @@ static void tasklet_hi_action(struct softirq_action *a)
+ __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
+ local_irq_enable();
+
+- while (list) {
+- struct tasklet_struct *t = list;
+-
+- list = list->next;
+-
+- if (tasklet_trylock(t)) {
+- if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+- BUG();
+- t->func(t->data);
+- tasklet_unlock(t);
+- continue;
+- }
+- tasklet_unlock(t);
+- }
+-
+- local_irq_disable();
+- t->next = NULL;
+- *__this_cpu_read(tasklet_hi_vec.tail) = t;
+- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+- __raise_softirq_irqoff(HI_SOFTIRQ);
+- local_irq_enable();
+- }
++ __tasklet_action(a, list);
+ }
+
+
+@@ -793,7 +868,7 @@ void tasklet_kill(struct tasklet_struct *t)
+
+ while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ do {
+- yield();
++ msleep(1);
+ } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ }
+ tasklet_unlock_wait(t);
+@@ -999,6 +1074,23 @@ void __init softirq_init(void)
+ open_softirq(HI_SOFTIRQ, tasklet_hi_action);
+ }
+
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
++void tasklet_unlock_wait(struct tasklet_struct *t)
++{
++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
++ /*
++ * Hack for now to avoid this busy-loop:
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ msleep(1);
++#else
++ barrier();
++#endif
++ }
++}
++EXPORT_SYMBOL(tasklet_unlock_wait);
++#endif
++
+ static int run_ksoftirqd(void * __bind_cpu)
+ {
+ ksoftirqd_set_sched_params();
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0188-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0188-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0188-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0188-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch)
@@ -0,0 +1,156 @@
+From 632ed80276282907f6ec67a1df32d39de31849ab Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 31 Jan 2012 13:01:27 +0100
+Subject: [PATCH 188/303] genirq: Allow disabling of softirq processing in irq
+ thread context
+
+The processing of softirqs in irq thread context is a performance gain
+for the non-rt workloads of a system, but it's counterproductive for
+interrupts which are explicitely related to the realtime
+workload. Allow such interrupts to prevent softirq processing in their
+thread context.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ include/linux/interrupt.h | 2 ++
+ include/linux/irq.h | 5 ++++-
+ kernel/irq/manage.c | 13 ++++++++++++-
+ kernel/irq/settings.h | 12 ++++++++++++
+ kernel/softirq.c | 7 +++++++
+ 5 files changed, 37 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index bb4b441..f70a65b 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -61,6 +61,7 @@
+ * IRQF_NO_THREAD - Interrupt cannot be threaded
+ * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
+ * resume time.
++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
+ */
+ #define IRQF_DISABLED 0x00000020
+ #define IRQF_SAMPLE_RANDOM 0x00000040
+@@ -75,6 +76,7 @@
+ #define IRQF_FORCE_RESUME 0x00008000
+ #define IRQF_NO_THREAD 0x00010000
+ #define IRQF_EARLY_RESUME 0x00020000
++#define IRQF_NO_SOFTIRQ_CALL 0x00040000
+
+ #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
+
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index bff29c5..3838b53 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -67,6 +67,7 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
+ * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
+ * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
+ * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
+ */
+ enum {
+ IRQ_TYPE_NONE = 0x00000000,
+@@ -90,12 +91,14 @@ enum {
+ IRQ_NESTED_THREAD = (1 << 15),
+ IRQ_NOTHREAD = (1 << 16),
+ IRQ_PER_CPU_DEVID = (1 << 17),
++ IRQ_NO_SOFTIRQ_CALL = (1 << 18),
+ };
+
+ #define IRQF_MODIFY_MASK \
+ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
+ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
++ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
++ IRQ_NO_SOFTIRQ_CALL)
+
+ #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
+
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 35d5ac4..f52263a 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -751,7 +751,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
+ local_bh_disable();
+ ret = action->thread_fn(action->irq, action->dev_id);
+ irq_finalize_oneshot(desc, action, false);
+- local_bh_enable();
++ /*
++ * Interrupts which have real time requirements can be set up
++ * to avoid softirq processing in the thread handler. This is
++ * safe as these interrupts do not raise soft interrupts.
++ */
++ if (irq_settings_no_softirq_call(desc))
++ _local_bh_enable();
++ else
++ local_bh_enable();
+ return ret;
+ }
+
+@@ -1091,6 +1099,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ }
+
++ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
++ irq_settings_set_no_softirq_call(desc);
++
+ /* Set default affinity mask once everything is setup */
+ setup_affinity(irq, desc, mask);
+
+diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
+index 1162f10..0d2c381 100644
+--- a/kernel/irq/settings.h
++++ b/kernel/irq/settings.h
+@@ -14,6 +14,7 @@ enum {
+ _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
+ _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
+ _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
++ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
+ _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
+ };
+
+@@ -26,6 +27,7 @@ enum {
+ #define IRQ_NOAUTOEN GOT_YOU_MORON
+ #define IRQ_NESTED_THREAD GOT_YOU_MORON
+ #define IRQ_PER_CPU_DEVID GOT_YOU_MORON
++#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
+ #undef IRQF_MODIFY_MASK
+ #define IRQF_MODIFY_MASK GOT_YOU_MORON
+
+@@ -36,6 +38,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
+ desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
+ }
+
++static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
++{
++ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
++}
++
++static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
++{
++ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
++}
++
+ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+ {
+ return desc->status_use_accessors & _IRQ_PER_CPU;
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 92b4ca3..bef08f3 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -425,6 +425,13 @@ void local_bh_enable_ip(unsigned long ip)
+ }
+ EXPORT_SYMBOL(local_bh_enable_ip);
+
++void _local_bh_enable(void)
++{
++ current->softirq_nestcnt--;
++ migrate_enable();
++}
++EXPORT_SYMBOL(_local_bh_enable);
++
+ /* For tracing */
+ int notrace __in_softirq(void)
+ {
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0189-local-vars-migrate-disable.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0189-local-vars-migrate-disable.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0189-local-vars-migrate-disable.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0189-local-vars-migrate-disable.patch.patch)
@@ -0,0 +1,49 @@
+From 24772ee0bcb6a6a328d9aa35dd70383812c28fc5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 28 Jun 2011 20:42:16 +0200
+Subject: [PATCH 189/303] local-vars-migrate-disable.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/percpu.h | 28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/percpu.h b/include/linux/percpu.h
+index 3941ea4..58163cd 100644
+--- a/include/linux/percpu.h
++++ b/include/linux/percpu.h
+@@ -48,10 +48,30 @@
+ preempt_enable(); \
+ } while (0)
+
+-#define get_local_var(var) get_cpu_var(var)
+-#define put_local_var(var) put_cpu_var(var)
+-#define get_local_ptr(var) get_cpu_ptr(var)
+-#define put_local_ptr(var) put_cpu_ptr(var)
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define get_local_var(var) get_cpu_var(var)
++# define put_local_var(var) put_cpu_var(var)
++# define get_local_ptr(var) get_cpu_ptr(var)
++# define put_local_ptr(var) put_cpu_ptr(var)
++#else
++# define get_local_var(var) (*({ \
++ migrate_disable(); \
++ &__get_cpu_var(var); }))
++
++# define put_local_var(var) do { \
++ (void)&(var); \
++ migrate_enable(); \
++} while (0)
++
++# define get_local_ptr(var) ({ \
++ migrate_disable(); \
++ this_cpu_ptr(var); })
++
++# define put_local_ptr(var) do { \
++ (void)(var); \
++ migrate_enable(); \
++} while (0)
++#endif
+
+ /* minimum unit size, also is the maximum supported allocation size */
+ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0190-md-raid5-Make-raid5_percpu-handling-RT-aware.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0190-md-raid5-Make-raid5_percpu-handling-RT-aware.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0190-md-raid5-Make-raid5_percpu-handling-RT-aware.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0190-md-raid5-Make-raid5_percpu-handling-RT-aware.patch)
@@ -0,0 +1,65 @@
+From aa01a71c1e1cfaf749cd0a7e6d74b84f7aaa5f15 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 6 Apr 2010 16:51:31 +0200
+Subject: [PATCH 190/303] md: raid5: Make raid5_percpu handling RT aware
+
+__raid_run_ops() disables preemption with get_cpu() around the access
+to the raid5_percpu variables. That causes scheduling while atomic
+spews on RT.
+
+Serialize the access to the percpu data with a lock and keep the code
+preemptible.
+
+Reported-by: Udo van den Heuvel <udovdh at xs4all.nl>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
+---
+ drivers/md/raid5.c | 7 +++++--
+ drivers/md/raid5.h | 1 +
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 26ef63a..17f5c52 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1253,8 +1253,9 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+ struct raid5_percpu *percpu;
+ unsigned long cpu;
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ percpu = per_cpu_ptr(conf->percpu, cpu);
++ spin_lock(&percpu->lock);
+ if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
+ ops_run_biofill(sh);
+ overlap_clear++;
+@@ -1306,7 +1307,8 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+ if (test_and_clear_bit(R5_Overlap, &dev->flags))
+ wake_up(&sh->raid_conf->wait_for_overlap);
+ }
+- put_cpu();
++ spin_unlock(&percpu->lock);
++ put_cpu_light();
+ }
+
+ #ifdef CONFIG_MULTICORE_RAID456
+@@ -4549,6 +4551,7 @@ static int raid5_alloc_percpu(struct r5conf *conf)
+ break;
+ }
+ per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
+ }
+ #ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index e10c553..010a969 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -405,6 +405,7 @@ struct r5conf {
+ int recovery_disabled;
+ /* per cpu variables */
+ struct raid5_percpu {
++ spinlock_t lock; /* Protection for -RT */
+ struct page *spare_page; /* Used when checking P/Q in raid6 */
+ void *scribble; /* space for constructing buffer
+ * lists and performing address
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0191-rtmutex-lock-killable.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0191-rtmutex-lock-killable.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0191-rtmutex-lock-killable.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0191-rtmutex-lock-killable.patch.patch)
@@ -0,0 +1,85 @@
+From bdd86c479a8843986d0fab4a5c96cefe4d536c55 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 9 Jun 2011 11:43:52 +0200
+Subject: [PATCH 191/303] rtmutex-lock-killable.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rtmutex.h | 1 +
+ kernel/rtmutex.c | 33 +++++++++++++++++++++++++++------
+ 2 files changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index de17134..3561eb2 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -90,6 +90,7 @@ extern void rt_mutex_destroy(struct rt_mutex *lock);
+ extern void rt_mutex_lock(struct rt_mutex *lock);
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
+ int detect_deadlock);
++extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock);
+ extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock);
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index f9d8482..723fd3a 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -799,12 +799,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+ /**
+ * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+ *
+- * @lock: the rt_mutex to be locked
++ * @lock: the rt_mutex to be locked
+ * @detect_deadlock: deadlock detection on/off
+ *
+ * Returns:
+- * 0 on success
+- * -EINTR when interrupted by a signal
++ * 0 on success
++ * -EINTR when interrupted by a signal
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
+ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
+@@ -818,17 +818,38 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+ /**
++ * rt_mutex_lock_killable - lock a rt_mutex killable
++ *
++ * @lock: the rt_mutex to be locked
++ * @detect_deadlock: deadlock detection on/off
++ *
++ * Returns:
++ * 0 on success
++ * -EINTR when interrupted by a signal
++ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
++ */
++int __sched rt_mutex_lock_killable(struct rt_mutex *lock,
++ int detect_deadlock)
++{
++ might_sleep();
++
++ return rt_mutex_fastlock(lock, TASK_KILLABLE,
++ detect_deadlock, rt_mutex_slowlock);
++}
++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
++
++/**
+ * rt_mutex_timed_lock - lock a rt_mutex interruptible
+ * the timeout structure is provided
+ * by the caller
+ *
+- * @lock: the rt_mutex to be locked
++ * @lock: the rt_mutex to be locked
+ * @timeout: timeout structure or NULL (no timeout)
+ * @detect_deadlock: deadlock detection on/off
+ *
+ * Returns:
+- * 0 on success
+- * -EINTR when interrupted by a signal
++ * 0 on success
++ * -EINTR when interrupted by a signal
+ * -ETIMEDOUT when the timeout expired
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0192-rtmutex-futex-prepare-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0192-rtmutex-futex-prepare-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0192-rtmutex-futex-prepare-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0192-rtmutex-futex-prepare-rt.patch.patch)
@@ -0,0 +1,222 @@
+From 7f4d5aa0db1deaa6cfce79180890d1ee0c78916a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 10 Jun 2011 11:04:15 +0200
+Subject: [PATCH 192/303] rtmutex-futex-prepare-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/futex.c | 77 ++++++++++++++++++++++++++++++++++++++---------
+ kernel/rtmutex.c | 31 ++++++++++++++++---
+ kernel/rtmutex_common.h | 2 ++
+ 3 files changed, 91 insertions(+), 19 deletions(-)
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 77bccfc..ea742ba 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1442,6 +1442,16 @@ retry_private:
+ requeue_pi_wake_futex(this, &key2, hb2);
+ drop_count++;
+ continue;
++ } else if (ret == -EAGAIN) {
++ /*
++ * Waiter was woken by timeout or
++ * signal and has set pi_blocked_on to
++ * PI_WAKEUP_INPROGRESS before we
++ * tried to enqueue it on the rtmutex.
++ */
++ this->pi_state = NULL;
++ free_pi_state(pi_state);
++ continue;
+ } else if (ret) {
+ /* -EDEADLK */
+ this->pi_state = NULL;
+@@ -2286,7 +2296,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ struct hrtimer_sleeper timeout, *to = NULL;
+ struct rt_mutex_waiter rt_waiter;
+ struct rt_mutex *pi_mutex = NULL;
+- struct futex_hash_bucket *hb;
++ struct futex_hash_bucket *hb, *hb2;
+ union futex_key key2 = FUTEX_KEY_INIT;
+ struct futex_q q = futex_q_init;
+ int res, ret;
+@@ -2333,20 +2343,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+- spin_lock(&hb->lock);
+- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+- spin_unlock(&hb->lock);
+- if (ret)
+- goto out_put_keys;
++ /*
++ * On RT we must avoid races with requeue and trying to block
++ * on two mutexes (hb->lock and uaddr2's rtmutex) by
++ * serializing access to pi_blocked_on with pi_lock.
++ */
++ raw_spin_lock_irq(¤t->pi_lock);
++ if (current->pi_blocked_on) {
++ /*
++ * We have been requeued or are in the process of
++ * being requeued.
++ */
++ raw_spin_unlock_irq(¤t->pi_lock);
++ } else {
++ /*
++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
++ * prevents a concurrent requeue from moving us to the
++ * uaddr2 rtmutex. After that we can safely acquire
++ * (and possibly block on) hb->lock.
++ */
++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
++ raw_spin_unlock_irq(¤t->pi_lock);
++
++ spin_lock(&hb->lock);
++
++ /*
++ * Clean up pi_blocked_on. We might leak it otherwise
++ * when we succeeded with the hb->lock in the fast
++ * path.
++ */
++ raw_spin_lock_irq(¤t->pi_lock);
++ current->pi_blocked_on = NULL;
++ raw_spin_unlock_irq(¤t->pi_lock);
++
++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
++ spin_unlock(&hb->lock);
++ if (ret)
++ goto out_put_keys;
++ }
+
+ /*
+- * In order for us to be here, we know our q.key == key2, and since
+- * we took the hb->lock above, we also know that futex_requeue() has
+- * completed and we no longer have to concern ourselves with a wakeup
+- * race with the atomic proxy lock acquisition by the requeue code. The
+- * futex_requeue dropped our key1 reference and incremented our key2
+- * reference count.
++ * In order to be here, we have either been requeued, are in
++ * the process of being requeued, or requeue successfully
++ * acquired uaddr2 on our behalf. If pi_blocked_on was
++ * non-null above, we may be racing with a requeue. Do not
++ * rely on q->lock_ptr to be hb2->lock until after blocking on
++ * hb->lock or hb2->lock. The futex_requeue dropped our key1
++ * reference and incremented our key2 reference count.
+ */
++ hb2 = hash_futex(&key2);
+
+ /* Check if the requeue code acquired the second futex for us. */
+ if (!q.rt_waiter) {
+@@ -2355,9 +2400,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (q.pi_state && (q.pi_state->owner != current)) {
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+- spin_unlock(q.lock_ptr);
++ spin_unlock(&hb2->lock);
+ }
+ } else {
+ /*
+@@ -2370,7 +2416,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
+ * haven't already.
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index 723fd3a..13b3c92 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -67,6 +67,11 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+ clear_rt_mutex_waiters(lock);
+ }
+
++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
++{
++ return waiter && waiter != PI_WAKEUP_INPROGRESS;
++}
++
+ /*
+ * We can speed up the acquire/release, if the architecture
+ * supports cmpxchg and if there's no debugging state to be set up
+@@ -196,7 +201,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * reached or the state of the chain has changed while we
+ * dropped the locks.
+ */
+- if (!waiter)
++ if (!rt_mutex_real_waiter(waiter))
+ goto out_unlock_pi;
+
+ /*
+@@ -399,6 +404,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ int chain_walk = 0, res;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
++
++ /*
++ * In the case of futex requeue PI, this will be a proxy
++ * lock. The task will wake unaware that it is enqueueed on
++ * this lock. Avoid blocking on two locks and corrupting
++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
++ * flag. futex_wait_requeue_pi() sets this when it wakes up
++ * before requeue (due to a signal or timeout). Do not enqueue
++ * the task if PI_WAKEUP_INPROGRESS is set.
++ */
++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ return -EAGAIN;
++ }
++
++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
++
+ __rt_mutex_adjust_prio(task);
+ waiter->task = task;
+ waiter->lock = lock;
+@@ -423,7 +445,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+
+ __rt_mutex_adjust_prio(owner);
+- if (owner->pi_blocked_on)
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ chain_walk = 1;
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+@@ -517,7 +539,7 @@ static void remove_waiter(struct rt_mutex *lock,
+ }
+ __rt_mutex_adjust_prio(owner);
+
+- if (owner->pi_blocked_on)
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ chain_walk = 1;
+
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+@@ -551,7 +573,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+- if (!waiter || waiter->list_entry.prio == task->prio) {
++ if (!rt_mutex_real_waiter(waiter) ||
++ waiter->list_entry.prio == task->prio) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
+index 53a66c8..b43d832 100644
+--- a/kernel/rtmutex_common.h
++++ b/kernel/rtmutex_common.h
+@@ -103,6 +103,8 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+ /*
+ * PI-futex support (proxy locking functions, etc.):
+ */
++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
++
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0193-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0193-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0193-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0193-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch)
@@ -0,0 +1,116 @@
+From 51cb133ad218fc40922fd50d38c5acda32d367a3 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Tue, 10 Apr 2012 14:34:13 -0400
+Subject: [PATCH 193/303] futex: Fix bug on when a requeued RT task times out
+
+Requeue with timeout causes a bug with PREEMPT_RT_FULL.
+
+The bug comes from a timed out condition.
+
+ TASK 1 TASK 2
+ ------ ------
+ futex_wait_requeue_pi()
+ futex_wait_queue_me()
+ <timed out>
+
+ double_lock_hb();
+
+ raw_spin_lock(pi_lock);
+ if (current->pi_blocked_on) {
+ } else {
+ current->pi_blocked_on = PI_WAKE_INPROGRESS;
+ run_spin_unlock(pi_lock);
+ spin_lock(hb->lock); <-- blocked!
+
+ plist_for_each_entry_safe(this) {
+ rt_mutex_start_proxy_lock();
+ task_blocks_on_rt_mutex();
+ BUG_ON(task->pi_blocked_on)!!!!
+
+The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the
+problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to
+grab the hb->lock, which it fails to do so. As the hb->lock is a mutex,
+it will block and set the "pi_blocked_on" to the hb->lock.
+
+When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails
+because the task1's pi_blocked_on is no longer set to that, but instead,
+set to the hb->lock.
+
+The fix:
+
+When calling rt_mutex_start_proxy_lock() a check is made to see
+if the proxy tasks pi_blocked_on is set. If so, exit out early.
+Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies
+the proxy task that it is being requeued, and will handle things
+appropriately.
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/rtmutex.c | 32 +++++++++++++++++++++++++++++++-
+ kernel/rtmutex_common.h | 1 +
+ 2 files changed, 32 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index 13b3c92..f8dcb7b 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -69,7 +69,8 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+
+ static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
+ {
+- return waiter && waiter != PI_WAKEUP_INPROGRESS;
++ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
++ waiter != PI_REQUEUE_INPROGRESS;
+ }
+
+ /*
+@@ -1010,6 +1011,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ return 1;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * In PREEMPT_RT there's an added race.
++ * If the task, that we are about to requeue, times out,
++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
++ * to skip this task. But right after the task sets
++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
++ * This will replace the PI_WAKEUP_INPROGRESS with the actual
++ * lock that it blocks on. We *must not* place this task
++ * on this proxy lock in that case.
++ *
++ * To prevent this race, we first take the task's pi_lock
++ * and check if it has updated its pi_blocked_on. If it has,
++ * we assume that it woke up and we return -EAGAIN.
++ * Otherwise, we set the task's pi_blocked_on to
++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
++ * it will know that we are in the process of requeuing it.
++ */
++ raw_spin_lock(&task->pi_lock);
++ if (task->pi_blocked_on) {
++ raw_spin_unlock(&task->pi_lock);
++ raw_spin_unlock(&lock->wait_lock);
++ return -EAGAIN;
++ }
++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
++ raw_spin_unlock(&task->pi_lock);
++#endif
++
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+
+ if (ret && !rt_mutex_owner(lock)) {
+diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
+index b43d832..47290ec 100644
+--- a/kernel/rtmutex_common.h
++++ b/kernel/rtmutex_common.h
+@@ -104,6 +104,7 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+ * PI-futex support (proxy locking functions, etc.):
+ */
+ #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
+
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0194-rt-mutex-add-sleeping-spinlocks-support.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0194-rt-mutex-add-sleeping-spinlocks-support.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0194-rt-mutex-add-sleeping-spinlocks-support.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0194-rt-mutex-add-sleeping-spinlocks-support.patch.patch)
@@ -0,0 +1,622 @@
+From 3cdcb118ba287097b259178fb427741fdefd9cad Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 10 Jun 2011 11:21:25 +0200
+Subject: [PATCH 194/303] rt-mutex-add-sleeping-spinlocks-support.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rtmutex.h | 35 ++++-
+ kernel/futex.c | 3 +-
+ kernel/rtmutex.c | 382 ++++++++++++++++++++++++++++++++++++++++++++---
+ kernel/rtmutex_common.h | 9 ++
+ 4 files changed, 403 insertions(+), 26 deletions(-)
+
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 3561eb2..928d93e 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -29,9 +29,10 @@ struct rt_mutex {
+ raw_spinlock_t wait_lock;
+ struct plist_head wait_list;
+ struct task_struct *owner;
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+ int save_state;
+- const char *name, *file;
++#ifdef CONFIG_DEBUG_RT_MUTEXES
++ const char *file;
++ const char *name;
+ int line;
+ void *magic;
+ #endif
+@@ -56,19 +57,39 @@ struct hrtimer_sleeper;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ , .name = #mutexname, .file = __FILE__, .line = __LINE__
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
++
++# define rt_mutex_init(mutex) \
++ do { \
++ raw_spin_lock_init(&(mutex)->wait_lock); \
++ __rt_mutex_init(mutex, #mutex); \
++ } while (0)
++
+ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+ #else
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
++
++# define rt_mutex_init(mutex) \
++ do { \
++ raw_spin_lock_init(&(mutex)->wait_lock); \
++ __rt_mutex_init(mutex, #mutex); \
++ } while (0)
++
+ # define rt_mutex_debug_task_free(t) do { } while (0)
+ #endif
+
+-#define __RT_MUTEX_INITIALIZER(mutexname) \
+- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
+ , .owner = NULL \
+- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
++
++
++#define __RT_MUTEX_INITIALIZER(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
++
++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ , .save_state = 1 }
+
+ #define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+diff --git a/kernel/futex.c b/kernel/futex.c
+index ea742ba..f15f0e4 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2321,8 +2321,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * The waiter is allocated on our stack, manipulated by the requeue
+ * code while we sleep on uaddr.
+ */
+- debug_rt_mutex_init_waiter(&rt_waiter);
+- rt_waiter.task = NULL;
++ rt_mutex_init_waiter(&rt_waiter, false);
+
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
+ if (unlikely(ret != 0))
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index f8dcb7b..a7723d2 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -8,6 +8,12 @@
+ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
+ * Copyright (C) 2006 Esben Nielsen
+ *
++ * Adaptive Spinlocks:
++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
++ * and Peter Morreale,
++ * Adaptive Spinlocks simplification:
++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt at redhat.com>
++ *
+ * See Documentation/rt-mutex-design.txt for details.
+ */
+ #include <linux/spinlock.h>
+@@ -96,6 +102,12 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+ }
+ #endif
+
++static inline void init_lists(struct rt_mutex *lock)
++{
++ if (unlikely(!lock->wait_list.node_list.prev))
++ plist_head_init(&lock->wait_list);
++}
++
+ /*
+ * Calculate task priority from the waiter list priority
+ *
+@@ -142,6 +154,14 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ }
+
++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
++{
++ if (waiter->savestate)
++ wake_up_lock_sleeper(waiter->task);
++ else
++ wake_up_process(waiter->task);
++}
++
+ /*
+ * Max number of times we'll walk the boosting chain:
+ */
+@@ -253,13 +273,15 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ /* Release the task */
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ if (!rt_mutex_owner(lock)) {
++ struct rt_mutex_waiter *lock_top_waiter;
++
+ /*
+ * If the requeue above changed the top waiter, then we need
+ * to wake the new top waiter up to try to get the lock.
+ */
+-
+- if (top_waiter != rt_mutex_top_waiter(lock))
+- wake_up_process(rt_mutex_top_waiter(lock)->task);
++ lock_top_waiter = rt_mutex_top_waiter(lock);
++ if (top_waiter != lock_top_waiter)
++ rt_mutex_wake_waiter(lock_top_waiter);
+ raw_spin_unlock(&lock->wait_lock);
+ goto out_put_task;
+ }
+@@ -304,6 +326,25 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ return ret;
+ }
+
++
++#define STEAL_NORMAL 0
++#define STEAL_LATERAL 1
++
++/*
++ * Note that RT tasks are excluded from lateral-steals to prevent the
++ * introduction of an unbounded latency
++ */
++static inline int lock_is_stealable(struct task_struct *task,
++ struct task_struct *pendowner, int mode)
++{
++ if (mode == STEAL_NORMAL || rt_task(task)) {
++ if (task->prio >= pendowner->prio)
++ return 0;
++ } else if (task->prio > pendowner->prio)
++ return 0;
++ return 1;
++}
++
+ /*
+ * Try to take an rt-mutex
+ *
+@@ -313,8 +354,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * @task: the task which wants to acquire the lock
+ * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
+ */
+-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+- struct rt_mutex_waiter *waiter)
++static int
++__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
++ struct rt_mutex_waiter *waiter, int mode)
+ {
+ /*
+ * We have to be careful here if the atomic speedups are
+@@ -347,12 +389,14 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ * 3) it is top waiter
+ */
+ if (rt_mutex_has_waiters(lock)) {
+- if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
+- if (!waiter || waiter != rt_mutex_top_waiter(lock))
+- return 0;
+- }
++ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
++
++ if (task != pown && !lock_is_stealable(task, pown, mode))
++ return 0;
+ }
+
++ /* We got the lock. */
++
+ if (waiter || rt_mutex_has_waiters(lock)) {
+ unsigned long flags;
+ struct rt_mutex_waiter *top;
+@@ -377,7 +421,6 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ }
+
+- /* We got the lock. */
+ debug_rt_mutex_lock(lock);
+
+ rt_mutex_set_owner(lock, task);
+@@ -387,6 +430,13 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ return 1;
+ }
+
++static inline int
++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
++ struct rt_mutex_waiter *waiter)
++{
++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
++}
++
+ /*
+ * Task blocks on lock.
+ *
+@@ -501,7 +551,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
+
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
+
+- wake_up_process(waiter->task);
++ rt_mutex_wake_waiter(waiter);
+ }
+
+ /*
+@@ -580,18 +630,315 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ return;
+ }
+
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+-
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(task);
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * preemptible spin_lock functions:
++ */
++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
++ void (*slowfn)(struct rt_mutex *lock))
++{
++ might_sleep();
++
++ if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
++ rt_mutex_deadlock_account_lock(lock, current);
++ else
++ slowfn(lock);
++}
++
++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
++ void (*slowfn)(struct rt_mutex *lock))
++{
++ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
++ rt_mutex_deadlock_account_unlock(current);
++ else
++ slowfn(lock);
++}
++
++#ifdef CONFIG_SMP
++/*
++ * Note that owner is a speculative pointer and dereferencing relies
++ * on rcu_read_lock() and the check against the lock owner.
++ */
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *owner)
++{
++ int res = 0;
++
++ rcu_read_lock();
++ for (;;) {
++ if (owner != rt_mutex_owner(lock))
++ break;
++ /*
++ * Ensure that owner->on_cpu is dereferenced _after_
++ * checking the above to be valid.
++ */
++ barrier();
++ if (!owner->on_cpu) {
++ res = 1;
++ break;
++ }
++ cpu_relax();
++ }
++ rcu_read_unlock();
++ return res;
++}
++#else
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *orig_owner)
++{
++ return 1;
++}
++#endif
++
++# define pi_lock(lock) raw_spin_lock_irq(lock)
++# define pi_unlock(lock) raw_spin_unlock_irq(lock)
++
++/*
++ * Slow path lock function spin_lock style: this variant is very
++ * careful not to miss any non-lock wakeups.
++ *
++ * We store the current state under p->pi_lock in p->saved_state and
++ * the try_to_wake_up() code handles this accordingly.
++ */
++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
++{
++ struct task_struct *lock_owner, *self = current;
++ struct rt_mutex_waiter waiter, *top_waiter;
++ int ret;
++
++ rt_mutex_init_waiter(&waiter, true);
++
++ raw_spin_lock(&lock->wait_lock);
++ init_lists(lock);
++
++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
++ raw_spin_unlock(&lock->wait_lock);
++ return;
++ }
++
++ BUG_ON(rt_mutex_owner(lock) == self);
++
++ /*
++ * We save whatever state the task is in and we'll restore it
++ * after acquiring the lock taking real wakeups into account
++ * as well. We are serialized via pi_lock against wakeups. See
++ * try_to_wake_up().
++ */
++ pi_lock(&self->pi_lock);
++ self->saved_state = self->state;
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ pi_unlock(&self->pi_lock);
++
++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
++ BUG_ON(ret);
++
++ for (;;) {
++ /* Try to acquire the lock again. */
++ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
++ break;
++
++ top_waiter = rt_mutex_top_waiter(lock);
++ lock_owner = rt_mutex_owner(lock);
++
++ raw_spin_unlock(&lock->wait_lock);
++
++ debug_rt_mutex_print_deadlock(&waiter);
++
++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
++ schedule_rt_mutex(lock);
++
++ raw_spin_lock(&lock->wait_lock);
++
++ pi_lock(&self->pi_lock);
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ pi_unlock(&self->pi_lock);
++ }
++
++ /*
++ * Restore the task state to current->saved_state. We set it
++ * to the original state above and the try_to_wake_up() code
++ * has possibly updated it when a real (non-rtmutex) wakeup
++ * happened while we were blocked. Clear saved_state so
++ * try_to_wakeup() does not get confused.
++ */
++ pi_lock(&self->pi_lock);
++ __set_current_state(self->saved_state);
++ self->saved_state = TASK_RUNNING;
++ pi_unlock(&self->pi_lock);
++
++ /*
++ * try_to_take_rt_mutex() sets the waiter bit
++ * unconditionally. We might have to fix that up:
++ */
++ fixup_rt_mutex_waiters(lock);
++
++ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
++ BUG_ON(!plist_node_empty(&waiter.list_entry));
++
++ raw_spin_unlock(&lock->wait_lock);
++
++ debug_rt_mutex_free_waiter(&waiter);
++}
++
++/*
++ * Slow path to release a rt_mutex spin_lock style
++ */
++static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
++{
++ raw_spin_lock(&lock->wait_lock);
++
++ debug_rt_mutex_unlock(lock);
++
++ rt_mutex_deadlock_account_unlock(current);
++
++ if (!rt_mutex_has_waiters(lock)) {
++ lock->owner = NULL;
++ raw_spin_unlock(&lock->wait_lock);
++ return;
++ }
++
++ wakeup_next_waiter(lock);
++
++ raw_spin_unlock(&lock->wait_lock);
++
++ /* Undo pi boosting.when necessary */
++ rt_mutex_adjust_prio(current);
++}
++
++void __lockfunc rt_spin_lock(spinlock_t *lock)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock);
++
++void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++}
++EXPORT_SYMBOL(__rt_spin_lock);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock_nested);
++#endif
++
++void __lockfunc rt_spin_unlock(spinlock_t *lock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, 1, _RET_IP_);
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(rt_spin_unlock);
++
++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(__rt_spin_unlock);
++
++/*
++ * Wait for the lock to get unlocked: instead of polling for an unlock
++ * (like raw spinlocks do), we lock and unlock, to force the kernel to
++ * schedule if there's contention:
++ */
++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
++{
++ spin_lock(lock);
++ spin_unlock(lock);
++}
++EXPORT_SYMBOL(rt_spin_unlock_wait);
++
++int __lockfunc rt_spin_trylock(spinlock_t *lock)
++{
++ int ret;
++
++ migrate_disable();
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret)
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock);
++
++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
++{
++ int ret;
++
++ local_bh_disable();
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret) {
++ migrate_disable();
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ } else
++ local_bh_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_bh);
++
++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
++{
++ int ret;
++
++ *flags = 0;
++ migrate_disable();
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret)
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_irqsave);
++
++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
++{
++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
++ if (atomic_add_unless(atomic, -1, 1))
++ return 0;
++ migrate_disable();
++ rt_spin_lock(lock);
++ if (atomic_dec_and_test(atomic))
++ return 1;
++ rt_spin_unlock(lock);
++ migrate_enable();
++ return 0;
++}
++EXPORT_SYMBOL(atomic_dec_and_spin_lock);
++
++void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
++ lockdep_init_map(&lock->dep_map, name, key, 0);
++#endif
++}
++EXPORT_SYMBOL(__rt_spin_lock_init);
++
++#endif /* PREEMPT_RT_FULL */
++
+ /**
+ * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
+ * @lock: the rt_mutex to take
+ * @state: the state the task should block in (TASK_INTERRUPTIBLE
+- * or TASK_UNINTERRUPTIBLE)
++ * or TASK_UNINTERRUPTIBLE)
+ * @timeout: the pre-initialized and started timer, or NULL for none
+ * @waiter: the pre-initialized rt_mutex_waiter
+ *
+@@ -655,9 +1002,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct rt_mutex_waiter waiter;
+ int ret = 0;
+
+- debug_rt_mutex_init_waiter(&waiter);
++ rt_mutex_init_waiter(&waiter, false);
+
+ raw_spin_lock(&lock->wait_lock);
++ init_lists(lock);
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
+@@ -710,6 +1058,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
+ int ret = 0;
+
+ raw_spin_lock(&lock->wait_lock);
++ init_lists(lock);
+
+ if (likely(rt_mutex_owner(lock) != current)) {
+
+@@ -942,7 +1291,6 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+ {
+ lock->owner = NULL;
+- raw_spin_lock_init(&lock->wait_lock);
+ plist_head_init(&lock->wait_list);
+
+ debug_rt_mutex_init(lock, name);
+@@ -962,7 +1310,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner)
+ {
+- __rt_mutex_init(lock, NULL);
++ rt_mutex_init(lock);
+ debug_rt_mutex_proxy_lock(lock, proxy_owner);
+ rt_mutex_set_owner(lock, proxy_owner);
+ rt_mutex_deadlock_account_lock(lock, proxy_owner);
+diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
+index 47290ec..6ec3dc1 100644
+--- a/kernel/rtmutex_common.h
++++ b/kernel/rtmutex_common.h
+@@ -49,6 +49,7 @@ struct rt_mutex_waiter {
+ struct plist_node pi_list_entry;
+ struct task_struct *task;
+ struct rt_mutex *lock;
++ bool savestate;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ unsigned long ip;
+ struct pid *deadlock_task_pid;
+@@ -126,4 +127,12 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+ # include "rtmutex.h"
+ #endif
+
++static inline void
++rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
++{
++ debug_rt_mutex_init_waiter(waiter);
++ waiter->task = NULL;
++ waiter->savestate = savestate;
++}
++
+ #endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0195-spinlock-types-separate-raw.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0195-spinlock-types-separate-raw.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0195-spinlock-types-separate-raw.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0195-spinlock-types-separate-raw.patch.patch)
@@ -0,0 +1,217 @@
+From 195e7e4a295c49821bd1925775095ae05faf6590 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 29 Jun 2011 19:34:01 +0200
+Subject: [PATCH 195/303] spinlock-types-separate-raw.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rwlock_types.h | 4 ++
+ include/linux/spinlock_types.h | 74 +----------------------------------
+ include/linux/spinlock_types_nort.h | 33 ++++++++++++++++
+ include/linux/spinlock_types_raw.h | 56 ++++++++++++++++++++++++++
+ 4 files changed, 95 insertions(+), 72 deletions(-)
+ create mode 100644 include/linux/spinlock_types_nort.h
+ create mode 100644 include/linux/spinlock_types_raw.h
+
+diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
+index cc0072e..5317cd9 100644
+--- a/include/linux/rwlock_types.h
++++ b/include/linux/rwlock_types.h
+@@ -1,6 +1,10 @@
+ #ifndef __LINUX_RWLOCK_TYPES_H
+ #define __LINUX_RWLOCK_TYPES_H
+
++#if !defined(__LINUX_SPINLOCK_TYPES_H)
++# error "Do not include directly, include spinlock_types.h"
++#endif
++
+ /*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
+index 73548eb..5c8664d 100644
+--- a/include/linux/spinlock_types.h
++++ b/include/linux/spinlock_types.h
+@@ -9,79 +9,9 @@
+ * Released under the General Public License (GPL).
+ */
+
+-#if defined(CONFIG_SMP)
+-# include <asm/spinlock_types.h>
+-#else
+-# include <linux/spinlock_types_up.h>
+-#endif
++#include <linux/spinlock_types_raw.h>
+
+-#include <linux/lockdep.h>
+-
+-typedef struct raw_spinlock {
+- arch_spinlock_t raw_lock;
+-#ifdef CONFIG_GENERIC_LOCKBREAK
+- unsigned int break_lock;
+-#endif
+-#ifdef CONFIG_DEBUG_SPINLOCK
+- unsigned int magic, owner_cpu;
+- void *owner;
+-#endif
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+- struct lockdep_map dep_map;
+-#endif
+-} raw_spinlock_t;
+-
+-#define SPINLOCK_MAGIC 0xdead4ead
+-
+-#define SPINLOCK_OWNER_INIT ((void *)-1L)
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+-#else
+-# define SPIN_DEP_MAP_INIT(lockname)
+-#endif
+-
+-#ifdef CONFIG_DEBUG_SPINLOCK
+-# define SPIN_DEBUG_INIT(lockname) \
+- .magic = SPINLOCK_MAGIC, \
+- .owner_cpu = -1, \
+- .owner = SPINLOCK_OWNER_INIT,
+-#else
+-# define SPIN_DEBUG_INIT(lockname)
+-#endif
+-
+-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+- { \
+- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+- SPIN_DEBUG_INIT(lockname) \
+- SPIN_DEP_MAP_INIT(lockname) }
+-
+-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+-
+-typedef struct spinlock {
+- union {
+- struct raw_spinlock rlock;
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+- struct {
+- u8 __padding[LOCK_PADSIZE];
+- struct lockdep_map dep_map;
+- };
+-#endif
+- };
+-} spinlock_t;
+-
+-#define __SPIN_LOCK_INITIALIZER(lockname) \
+- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+-
+-#define __SPIN_LOCK_UNLOCKED(lockname) \
+- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++#include <linux/spinlock_types_nort.h>
+
+ #include <linux/rwlock_types.h>
+
+diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
+new file mode 100644
+index 0000000..f1dac1f
+--- /dev/null
++++ b/include/linux/spinlock_types_nort.h
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
++#define __LINUX_SPINLOCK_TYPES_NORT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * The non RT version maps spinlocks to raw_spinlocks
++ */
++typedef struct spinlock {
++ union {
++ struct raw_spinlock rlock;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
++ struct {
++ u8 __padding[LOCK_PADSIZE];
++ struct lockdep_map dep_map;
++ };
++#endif
++ };
++} spinlock_t;
++
++#define __SPIN_LOCK_INITIALIZER(lockname) \
++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
++
++#define __SPIN_LOCK_UNLOCKED(lockname) \
++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++
++#endif
+diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
+new file mode 100644
+index 0000000..edffc4d
+--- /dev/null
++++ b/include/linux/spinlock_types_raw.h
+@@ -0,0 +1,56 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
++#define __LINUX_SPINLOCK_TYPES_RAW_H
++
++#if defined(CONFIG_SMP)
++# include <asm/spinlock_types.h>
++#else
++# include <linux/spinlock_types_up.h>
++#endif
++
++#include <linux/lockdep.h>
++
++typedef struct raw_spinlock {
++ arch_spinlock_t raw_lock;
++#ifdef CONFIG_GENERIC_LOCKBREAK
++ unsigned int break_lock;
++#endif
++#ifdef CONFIG_DEBUG_SPINLOCK
++ unsigned int magic, owner_cpu;
++ void *owner;
++#endif
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} raw_spinlock_t;
++
++#define SPINLOCK_MAGIC 0xdead4ead
++
++#define SPINLOCK_OWNER_INIT ((void *)-1L)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define SPIN_DEP_MAP_INIT(lockname)
++#endif
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define SPIN_DEBUG_INIT(lockname) \
++ .magic = SPINLOCK_MAGIC, \
++ .owner_cpu = -1, \
++ .owner = SPINLOCK_OWNER_INIT,
++#else
++# define SPIN_DEBUG_INIT(lockname)
++#endif
++
++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
++ { \
++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
++ SPIN_DEBUG_INIT(lockname) \
++ SPIN_DEP_MAP_INIT(lockname) }
++
++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
++
++#endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0196-rtmutex-avoid-include-hell.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0196-rtmutex-avoid-include-hell.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0196-rtmutex-avoid-include-hell.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0196-rtmutex-avoid-include-hell.patch.patch)
@@ -0,0 +1,23 @@
+From 936b6c11aaf43cd5ab0eba5757fd0369611d4a50 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 29 Jun 2011 20:06:39 +0200
+Subject: [PATCH 196/303] rtmutex-avoid-include-hell.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rtmutex.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 928d93e..5ebd0bb 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -14,7 +14,7 @@
+
+ #include <linux/linkage.h>
+ #include <linux/plist.h>
+-#include <linux/spinlock_types.h>
++#include <linux/spinlock_types_raw.h>
+
+ extern int max_lock_depth; /* for sysctl */
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0197-rt-add-rt-spinlocks.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0197-rt-add-rt-spinlocks.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0197-rt-add-rt-spinlocks.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0197-rt-add-rt-spinlocks.patch.patch)
@@ -0,0 +1,129 @@
+From 12553d8a036b4c25b3823cba7dd6c1198b0c9f06 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 29 Jun 2011 19:43:35 +0200
+Subject: [PATCH 197/303] rt-add-rt-spinlocks.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rwlock_types_rt.h | 33 +++++++++++++++++++++++++
+ include/linux/spinlock_types.h | 11 ++++++---
+ include/linux/spinlock_types_rt.h | 49 +++++++++++++++++++++++++++++++++++++
+ 3 files changed, 90 insertions(+), 3 deletions(-)
+ create mode 100644 include/linux/rwlock_types_rt.h
+ create mode 100644 include/linux/spinlock_types_rt.h
+
+diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
+new file mode 100644
+index 0000000..b138321
+--- /dev/null
++++ b/include/linux/rwlock_types_rt.h
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_RWLOCK_TYPES_RT_H
++#define __LINUX_RWLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * rwlocks - rtmutex which allows single reader recursion
++ */
++typedef struct {
++ struct rt_mutex lock;
++ int read_depth;
++ unsigned int break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} rwlock_t;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define RW_DEP_MAP_INIT(lockname)
++#endif
++
++#define __RW_LOCK_UNLOCKED(name) \
++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
++ RW_DEP_MAP_INIT(name) }
++
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
++
++#endif
+diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
+index 5c8664d..10bac71 100644
+--- a/include/linux/spinlock_types.h
++++ b/include/linux/spinlock_types.h
+@@ -11,8 +11,13 @@
+
+ #include <linux/spinlock_types_raw.h>
+
+-#include <linux/spinlock_types_nort.h>
+-
+-#include <linux/rwlock_types.h>
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_types_nort.h>
++# include <linux/rwlock_types.h>
++#else
++# include <linux/rtmutex.h>
++# include <linux/spinlock_types_rt.h>
++# include <linux/rwlock_types_rt.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_TYPES_H */
+diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
+new file mode 100644
+index 0000000..1fe8fc0
+--- /dev/null
++++ b/include/linux/spinlock_types_rt.h
+@@ -0,0 +1,49 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RT_H
++#define __LINUX_SPINLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
++ */
++typedef struct spinlock {
++ struct rt_mutex lock;
++ unsigned int break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} spinlock_t;
++
++#ifdef CONFIG_DEBUG_RT_MUTEXES
++# define __RT_SPIN_INITIALIZER(name) \
++ { \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ .save_state = 1, \
++ .file = __FILE__, \
++ .line = __LINE__ , \
++ }
++#else
++# define __RT_SPIN_INITIALIZER(name) \
++ { \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ .save_state = 1, \
++ }
++#endif
++
++/*
++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
++*/
++
++#define __SPIN_LOCK_UNLOCKED(name) \
++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
++ SPIN_DEP_MAP_INIT(name) }
++
++#define __DEFINE_SPINLOCK(name) \
++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
++
++#define DEFINE_SPINLOCK(name) \
++ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
++
++#endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0198-rt-add-rt-to-mutex-headers.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0198-rt-add-rt-to-mutex-headers.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0198-rt-add-rt-to-mutex-headers.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0198-rt-add-rt-to-mutex-headers.patch.patch)
@@ -0,0 +1,148 @@
+From 6e11d143fe8226f1c1ecaf7c906e73c7bbdc04e6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 29 Jun 2011 20:56:22 +0200
+Subject: [PATCH 198/303] rt-add-rt-to-mutex-headers.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/mutex.h | 21 ++++++++----
+ include/linux/mutex_rt.h | 84 ++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 98 insertions(+), 7 deletions(-)
+ create mode 100644 include/linux/mutex_rt.h
+
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 9121595..bdf1da2 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -17,6 +17,17 @@
+
+ #include <linux/atomic.h>
+
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
++ , .dep_map = { .name = #lockname }
++#else
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
++#endif
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/mutex_rt.h>
++#else
++
+ /*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+@@ -95,13 +106,6 @@ do { \
+ static inline void mutex_destroy(struct mutex *lock) {}
+ #endif
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+- , .dep_map = { .name = #lockname }
+-#else
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+-#endif
+-
+ #define __MUTEX_INITIALIZER(lockname) \
+ { .count = ATOMIC_INIT(1) \
+ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+@@ -167,6 +171,9 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
+ */
+ extern int mutex_trylock(struct mutex *lock);
+ extern void mutex_unlock(struct mutex *lock);
++
++#endif /* !PREEMPT_RT_FULL */
++
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
+ #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
+diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
+new file mode 100644
+index 0000000..c38a44b
+--- /dev/null
++++ b/include/linux/mutex_rt.h
+@@ -0,0 +1,84 @@
++#ifndef __LINUX_MUTEX_RT_H
++#define __LINUX_MUTEX_RT_H
++
++#ifndef __LINUX_MUTEX_H
++#error "Please include mutex.h"
++#endif
++
++#include <linux/rtmutex.h>
++
++/* FIXME: Just for __lockfunc */
++#include <linux/spinlock.h>
++
++struct mutex {
++ struct rt_mutex lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __MUTEX_INITIALIZER(mutexname) \
++ { \
++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
++ }
++
++#define DEFINE_MUTEX(mutexname) \
++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
++
++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
++extern void __lockfunc _mutex_lock(struct mutex *lock);
++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
++extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_trylock(struct mutex *lock);
++extern void __lockfunc _mutex_unlock(struct mutex *lock);
++
++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
++#define mutex_lock(l) _mutex_lock(l)
++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
++#define mutex_lock_killable(l) _mutex_lock_killable(l)
++#define mutex_trylock(l) _mutex_trylock(l)
++#define mutex_unlock(l) _mutex_unlock(l)
++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible_nested(l, s)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable_nested(l, s)
++
++# define mutex_lock_nest_lock(lock, nest_lock) \
++do { \
++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
++} while (0)
++
++#else
++# define mutex_lock_nested(l, s) _mutex_lock(l)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible(l)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable(l)
++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
++#endif
++
++# define mutex_init(mutex) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), #mutex, &__key); \
++} while (0)
++
++# define __mutex_init(mutex, name, key) \
++do { \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), name, key); \
++} while (0)
++
++#endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0199-rwsem-add-rt-variant.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0199-rwsem-add-rt-variant.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0199-rwsem-add-rt-variant.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0199-rwsem-add-rt-variant.patch.patch)
@@ -0,0 +1,162 @@
+From e0de6adc8ec6bbb7fa0960cf2fd00ea97d7d4055 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 29 Jun 2011 21:02:53 +0200
+Subject: [PATCH 199/303] rwsem-add-rt-variant.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rwsem.h | 6 +++
+ include/linux/rwsem_rt.h | 105 ++++++++++++++++++++++++++++++++++++++++++++++
+ lib/Makefile | 3 ++
+ 3 files changed, 114 insertions(+)
+ create mode 100644 include/linux/rwsem_rt.h
+
+diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
+index 63d4065..209be4b 100644
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -17,6 +17,10 @@
+ #include <asm/system.h>
+ #include <linux/atomic.h>
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#include <linux/rwsem_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ struct rw_semaphore;
+
+ #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+@@ -131,4 +135,6 @@ extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+ # define down_write_nested(sem, subclass) down_write(sem)
+ #endif
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* _LINUX_RWSEM_H */
+diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
+new file mode 100644
+index 0000000..802c690
+--- /dev/null
++++ b/include/linux/rwsem_rt.h
+@@ -0,0 +1,105 @@
++#ifndef _LINUX_RWSEM_RT_H
++#define _LINUX_RWSEM_RT_H
++
++#ifndef _LINUX_RWSEM_H
++#error "Include rwsem.h"
++#endif
++
++/*
++ * RW-semaphores are a spinlock plus a reader-depth count.
++ *
++ * Note that the semantics are different from the usual
++ * Linux rw-sems, in PREEMPT_RT mode we do not allow
++ * multiple readers to hold the lock at once, we only allow
++ * a read-lock owner to read-lock recursively. This is
++ * better for latency, makes the implementation inherently
++ * fair and makes it simpler as well.
++ */
++
++#include <linux/rtmutex.h>
++
++struct rw_semaphore {
++ struct rt_mutex lock;
++ int read_depth;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __RWSEM_INITIALIZER(name) \
++ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
++ RW_DEP_MAP_INIT(name) }
++
++#define DECLARE_RWSEM(lockname) \
++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
++
++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
++ struct lock_class_key *key);
++
++# define rt_init_rwsem(sem) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(sem)->lock); \
++ __rt_rwsem_init((sem), #sem, &__key); \
++} while (0)
++
++extern void rt_down_write(struct rw_semaphore *rwsem);
++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
++extern void rt_down_read(struct rw_semaphore *rwsem);
++extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
++extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
++extern void rt_up_read(struct rw_semaphore *rwsem);
++extern void rt_up_write(struct rw_semaphore *rwsem);
++extern void rt_downgrade_write(struct rw_semaphore *rwsem);
++
++#define init_rwsem(sem) rt_init_rwsem(sem)
++#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
++
++static inline void down_read(struct rw_semaphore *sem)
++{
++ rt_down_read(sem);
++}
++
++static inline int down_read_trylock(struct rw_semaphore *sem)
++{
++ return rt_down_read_trylock(sem);
++}
++
++static inline void down_write(struct rw_semaphore *sem)
++{
++ rt_down_write(sem);
++}
++
++static inline int down_write_trylock(struct rw_semaphore *sem)
++{
++ return rt_down_write_trylock(sem);
++}
++
++static inline void up_read(struct rw_semaphore *sem)
++{
++ rt_up_read(sem);
++}
++
++static inline void up_write(struct rw_semaphore *sem)
++{
++ rt_up_write(sem);
++}
++
++static inline void downgrade_write(struct rw_semaphore *sem)
++{
++ rt_downgrade_write(sem);
++}
++
++static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
++{
++ return rt_down_read_nested(sem, subclass);
++}
++
++static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
++{
++ rt_down_write_nested(sem, subclass);
++}
++
++#endif
+diff --git a/lib/Makefile b/lib/Makefile
+index a4da283..5026c91 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -37,8 +37,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
+ obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
+ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
++
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
++endif
+
+ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
+ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0200-rt-Add-the-preempt-rt-lock-replacement-APIs.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0200-rt-Add-the-preempt-rt-lock-replacement-APIs.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0200-rt-Add-the-preempt-rt-lock-replacement-APIs.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0200-rt-Add-the-preempt-rt-lock-replacement-APIs.patch)
@@ -0,0 +1,914 @@
+From b2187b6419c4acb45623619125a24e7318af1d38 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 26 Jul 2009 19:39:56 +0200
+Subject: [PATCH 200/303] rt: Add the preempt-rt lock replacement APIs
+
+Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex
+based locking functions for preempt-rt.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rwlock_rt.h | 123 +++++++++++
+ include/linux/spinlock.h | 12 +-
+ include/linux/spinlock_api_smp.h | 4 +-
+ include/linux/spinlock_rt.h | 156 ++++++++++++++
+ kernel/Makefile | 9 +-
+ kernel/rt.c | 442 ++++++++++++++++++++++++++++++++++++++
+ kernel/spinlock.c | 7 +
+ lib/spinlock_debug.c | 5 +
+ 8 files changed, 754 insertions(+), 4 deletions(-)
+ create mode 100644 include/linux/rwlock_rt.h
+ create mode 100644 include/linux/spinlock_rt.h
+ create mode 100644 kernel/rt.c
+
+diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
+new file mode 100644
+index 0000000..853ee36
+--- /dev/null
++++ b/include/linux/rwlock_rt.h
+@@ -0,0 +1,123 @@
++#ifndef __LINUX_RWLOCK_RT_H
++#define __LINUX_RWLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++#define rwlock_init(rwl) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(rwl)->lock); \
++ __rt_rwlock_init(rwl, #rwl, &__key); \
++} while (0)
++
++extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
++
++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
++
++#define write_trylock_irqsave(lock, flags) \
++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
++
++#define read_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ migrate_disable(); \
++ flags = rt_read_lock_irqsave(lock); \
++ } while (0)
++
++#define write_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ migrate_disable(); \
++ flags = rt_write_lock_irqsave(lock); \
++ } while (0)
++
++#define read_lock(lock) \
++ do { \
++ migrate_disable(); \
++ rt_read_lock(lock); \
++ } while (0)
++
++#define read_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ migrate_disable(); \
++ rt_read_lock(lock); \
++ } while (0)
++
++#define read_lock_irq(lock) read_lock(lock)
++
++#define write_lock(lock) \
++ do { \
++ migrate_disable(); \
++ rt_write_lock(lock); \
++ } while (0)
++
++#define write_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ migrate_disable(); \
++ rt_write_lock(lock); \
++ } while (0)
++
++#define write_lock_irq(lock) write_lock(lock)
++
++#define read_unlock(lock) \
++ do { \
++ rt_read_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define read_unlock_bh(lock) \
++ do { \
++ rt_read_unlock(lock); \
++ migrate_enable(); \
++ local_bh_enable(); \
++ } while (0)
++
++#define read_unlock_irq(lock) read_unlock(lock)
++
++#define write_unlock(lock) \
++ do { \
++ rt_write_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define write_unlock_bh(lock) \
++ do { \
++ rt_write_unlock(lock); \
++ migrate_enable(); \
++ local_bh_enable(); \
++ } while (0)
++
++#define write_unlock_irq(lock) write_unlock(lock)
++
++#define read_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_read_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define write_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_write_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#endif
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 7df6c17..5fe7e40 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -254,7 +254,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
+ #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+ /* Include rwlock functions */
+-#include <linux/rwlock.h>
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_rt.h>
++#else
++# include <linux/rwlock.h>
++#endif
+
+ /*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+@@ -265,6 +269,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
+ # include <linux/spinlock_api_up.h>
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ /*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+@@ -397,4 +405,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+ #define atomic_dec_and_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* __LINUX_SPINLOCK_H */
+diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
+index e253ccd..2a5ba05 100644
+--- a/include/linux/spinlock_api_smp.h
++++ b/include/linux/spinlock_api_smp.h
+@@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ return 0;
+ }
+
+-#include <linux/rwlock_api_smp.h>
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_api_smp.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_API_SMP_H */
+diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
+new file mode 100644
+index 0000000..205ca95
+--- /dev/null
++++ b/include/linux/spinlock_rt.h
+@@ -0,0 +1,156 @@
++#ifndef __LINUX_SPINLOCK_RT_H
++#define __LINUX_SPINLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++extern void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
++
++#define spin_lock_init(slock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(slock)->lock); \
++ __rt_spin_lock_init(slock, #slock, &__key); \
++} while (0)
++
++extern void __lockfunc rt_spin_lock(spinlock_t *lock);
++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
++extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
++
++/*
++ * lockdep-less calls, for derived types like rwlock:
++ * (for trylock they can use rt_mutex_trylock() directly.
++ */
++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
++
++#define spin_lock_local(lock) rt_spin_lock(lock)
++#define spin_unlock_local(lock) rt_spin_unlock(lock)
++
++#define spin_lock(lock) \
++ do { \
++ migrate_disable(); \
++ rt_spin_lock(lock); \
++ } while (0)
++
++#define spin_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ migrate_disable(); \
++ rt_spin_lock(lock); \
++ } while (0)
++
++#define spin_lock_irq(lock) spin_lock(lock)
++
++#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
++
++#ifdef CONFIG_LOCKDEP
++# define spin_lock_nested(lock, subclass) \
++ do { \
++ migrate_disable(); \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ migrate_disable(); \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++#else
++# define spin_lock_nested(lock, subclass) spin_lock(lock)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(lock); \
++ } while (0)
++#endif
++
++#define spin_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(lock); \
++ } while (0)
++
++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
++{
++ unsigned long flags = 0;
++#ifdef CONFIG_TRACE_IRQFLAGS
++ flags = rt_spin_lock_trace_flags(lock);
++#else
++ spin_lock(lock); /* lock_local */
++#endif
++ return flags;
++}
++
++/* FIXME: we need rt_spin_lock_nest_lock */
++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
++
++#define spin_unlock(lock) \
++ do { \
++ rt_spin_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define spin_unlock_bh(lock) \
++ do { \
++ rt_spin_unlock(lock); \
++ migrate_enable(); \
++ local_bh_enable(); \
++ } while (0)
++
++#define spin_unlock_irq(lock) spin_unlock(lock)
++
++#define spin_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ spin_unlock(lock); \
++ } while (0)
++
++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
++#define spin_trylock_irq(lock) spin_trylock(lock)
++
++#define spin_trylock_irqsave(lock, flags) \
++ rt_spin_trylock_irqsave(lock, &(flags))
++
++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
++
++#ifdef CONFIG_GENERIC_LOCKBREAK
++# define spin_is_contended(lock) ((lock)->break_lock)
++#else
++# define spin_is_contended(lock) (((void)(lock), 0))
++#endif
++
++static inline int spin_can_lock(spinlock_t *lock)
++{
++ return !rt_mutex_is_locked(&lock->lock);
++}
++
++static inline int spin_is_locked(spinlock_t *lock)
++{
++ return rt_mutex_is_locked(&lock->lock);
++}
++
++static inline void assert_spin_locked(spinlock_t *lock)
++{
++ BUG_ON(!spin_is_locked(lock));
++}
++
++#define atomic_dec_and_lock(atomic, lock) \
++ atomic_dec_and_spin_lock(atomic, lock)
++
++#endif
+diff --git a/kernel/Makefile b/kernel/Makefile
+index e898c5b..c961d3a 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -7,8 +7,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
+ sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
+ signal.o sys.o kmod.o workqueue.o pid.o \
+ rcupdate.o extable.o params.o posix-timers.o \
+- kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
+- hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
++ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
++ hrtimer.o nsproxy.o srcu.o semaphore.o \
+ notifier.o ksysfs.o sched_clock.o cred.o \
+ async.o range.o
+ obj-y += groups.o
+@@ -29,7 +29,11 @@ obj-$(CONFIG_PROFILING) += profile.o
+ obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
+ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+ obj-y += time/
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
++obj-y += mutex.o
+ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
++obj-y += rwsem.o
++endif
+ obj-$(CONFIG_LOCKDEP) += lockdep.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+@@ -41,6 +45,7 @@ endif
+ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+ obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
+ obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
+ obj-$(CONFIG_SMP) += smp.o
+ ifneq ($(CONFIG_SMP),y)
+diff --git a/kernel/rt.c b/kernel/rt.c
+new file mode 100644
+index 0000000..092d6b3
+--- /dev/null
++++ b/kernel/rt.c
+@@ -0,0 +1,442 @@
++/*
++ * kernel/rt.c
++ *
++ * Real-Time Preemption Support
++ *
++ * started by Ingo Molnar:
++ *
++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo at redhat.com>
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx at timesys.com>
++ *
++ * historic credit for proving that Linux spinlocks can be implemented via
++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
++ * and others) who prototyped it on 2.4 and did lots of comparative
++ * research and analysis; TimeSys, for proving that you can implement a
++ * fully preemptible kernel via the use of IRQ threading and mutexes;
++ * Bill Huey for persuasively arguing on lkml that the mutex model is the
++ * right one; and to MontaVista, who ported pmutexes to 2.6.
++ *
++ * This code is a from-scratch implementation and is not based on pmutexes,
++ * but the idea of converting spinlocks to mutexes is used here too.
++ *
++ * lock debugging, locking tree, deadlock detection:
++ *
++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
++ * Released under the General Public License (GPL).
++ *
++ * Includes portions of the generic R/W semaphore implementation from:
++ *
++ * Copyright (c) 2001 David Howells (dhowells at redhat.com).
++ * - Derived partially from idea by Andrea Arcangeli <andrea at suse.de>
++ * - Derived also from comments by Linus
++ *
++ * Pending ownership of locks and ownership stealing:
++ *
++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
++ *
++ * (also by Steven Rostedt)
++ * - Converted single pi_lock to individual task locks.
++ *
++ * By Esben Nielsen:
++ * Doing priority inheritance with help of the scheduler.
++ *
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx at timesys.com>
++ * - major rework based on Esben Nielsens initial patch
++ * - replaced thread_info references by task_struct refs
++ * - removed task->pending_owner dependency
++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
++ * in the scheduler return path as discussed with Steven Rostedt
++ *
++ * Copyright (C) 2006, Kihon Technologies Inc.
++ * Steven Rostedt <rostedt at goodmis.org>
++ * - debugged and patched Thomas Gleixner's rework.
++ * - added back the cmpxchg to the rework.
++ * - turned atomic require back on for SMP.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/rtmutex.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/syscalls.h>
++#include <linux/interrupt.h>
++#include <linux/plist.h>
++#include <linux/fs.h>
++#include <linux/futex.h>
++#include <linux/hrtimer.h>
++
++#include "rtmutex_common.h"
++
++/*
++ * struct mutex functions
++ */
++void __mutex_do_init(struct mutex *mutex, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
++ lockdep_init_map(&mutex->dep_map, name, key, 0);
++#endif
++ mutex->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__mutex_do_init);
++
++void __lockfunc _mutex_lock(struct mutex *lock)
++{
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock);
++
++int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_interruptible(&lock->lock, 0);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible);
++
++int __lockfunc _mutex_lock_killable(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&lock->lock, 0);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
++{
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nested);
++
++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
++{
++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nest_lock);
++
++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ ret = rt_mutex_lock_interruptible(&lock->lock, 0);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
++
++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&lock->lock, 0);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable_nested);
++#endif
++
++int __lockfunc _mutex_trylock(struct mutex *lock)
++{
++ int ret = rt_mutex_trylock(&lock->lock);
++
++ if (ret)
++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_trylock);
++
++void __lockfunc _mutex_unlock(struct mutex *lock)
++{
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_unlock);
++
++/*
++ * rwlock_t functions
++ */
++int __lockfunc rt_write_trylock(rwlock_t *rwlock)
++{
++ int ret = rt_mutex_trylock(&rwlock->lock);
++
++ migrate_disable();
++ if (ret)
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock);
++
++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
++{
++ int ret;
++
++ *flags = 0;
++ migrate_disable();
++ ret = rt_write_trylock(rwlock);
++ if (!ret)
++ migrate_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock_irqsave);
++
++int __lockfunc rt_read_trylock(rwlock_t *rwlock)
++{
++ struct rt_mutex *lock = &rwlock->lock;
++ int ret = 1;
++
++ /*
++ * recursive read locks succeed when current owns the lock,
++ * but not when read_depth == 0 which means that the lock is
++ * write locked.
++ */
++ migrate_disable();
++ if (rt_mutex_owner(lock) != current)
++ ret = rt_mutex_trylock(lock);
++ else if (!rwlock->read_depth)
++ ret = 0;
++
++ if (ret) {
++ rwlock->read_depth++;
++ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
++ } else
++ migrate_enable();
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_read_trylock);
++
++void __lockfunc rt_write_lock(rwlock_t *rwlock)
++{
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __rt_spin_lock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_write_lock);
++
++void __lockfunc rt_read_lock(rwlock_t *rwlock)
++{
++ struct rt_mutex *lock = &rwlock->lock;
++
++ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
++
++ /*
++ * recursive read locks succeed when current owns the lock
++ */
++ if (rt_mutex_owner(lock) != current)
++ __rt_spin_lock(lock);
++ rwlock->read_depth++;
++}
++
++EXPORT_SYMBOL(rt_read_lock);
++
++void __lockfunc rt_write_unlock(rwlock_t *rwlock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++ __rt_spin_unlock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_write_unlock);
++
++void __lockfunc rt_read_unlock(rwlock_t *rwlock)
++{
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++
++ /* Release the lock only when read_depth is down to 0 */
++ if (--rwlock->read_depth == 0)
++ __rt_spin_unlock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_read_unlock);
++
++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
++{
++ rt_write_lock(rwlock);
++
++ return 0;
++}
++EXPORT_SYMBOL(rt_write_lock_irqsave);
++
++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
++{
++ rt_read_lock(rwlock);
++
++ return 0;
++}
++EXPORT_SYMBOL(rt_read_lock_irqsave);
++
++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
++ lockdep_init_map(&rwlock->dep_map, name, key, 0);
++#endif
++ rwlock->lock.save_state = 1;
++ rwlock->read_depth = 0;
++}
++EXPORT_SYMBOL(__rt_rwlock_init);
++
++/*
++ * rw_semaphores
++ */
++
++void rt_up_write(struct rw_semaphore *rwsem)
++{
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_up_write);
++
++void rt_up_read(struct rw_semaphore *rwsem)
++{
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ if (--rwsem->read_depth == 0)
++ rt_mutex_unlock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_up_read);
++
++/*
++ * downgrade a write lock into a read lock
++ * - just wake up any readers at the front of the queue
++ */
++void rt_downgrade_write(struct rw_semaphore *rwsem)
++{
++ BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
++ rwsem->read_depth = 1;
++}
++EXPORT_SYMBOL(rt_downgrade_write);
++
++int rt_down_write_trylock(struct rw_semaphore *rwsem)
++{
++ int ret = rt_mutex_trylock(&rwsem->lock);
++
++ if (ret)
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_write_trylock);
++
++void rt_down_write(struct rw_semaphore *rwsem)
++{
++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write);
++
++void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write_nested);
++
++int rt_down_read_trylock(struct rw_semaphore *rwsem)
++{
++ struct rt_mutex *lock = &rwsem->lock;
++ int ret = 1;
++
++ /*
++ * recursive read locks succeed when current owns the rwsem,
++ * but not when read_depth == 0 which means that the rwsem is
++ * write locked.
++ */
++ if (rt_mutex_owner(lock) != current)
++ ret = rt_mutex_trylock(&rwsem->lock);
++ else if (!rwsem->read_depth)
++ ret = 0;
++
++ if (ret) {
++ rwsem->read_depth++;
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_read_trylock);
++
++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
++{
++ struct rt_mutex *lock = &rwsem->lock;
++
++ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
++
++ if (rt_mutex_owner(lock) != current)
++ rt_mutex_lock(&rwsem->lock);
++ rwsem->read_depth++;
++}
++
++void rt_down_read(struct rw_semaphore *rwsem)
++{
++ __rt_down_read(rwsem, 0);
++}
++EXPORT_SYMBOL(rt_down_read);
++
++void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ __rt_down_read(rwsem, subclass);
++}
++EXPORT_SYMBOL(rt_down_read_nested);
++
++void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
++ lockdep_init_map(&rwsem->dep_map, name, key, 0);
++#endif
++ rwsem->read_depth = 0;
++ rwsem->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__rt_rwsem_init);
++
++/**
++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
++ * @cnt: the atomic which we are to dec
++ * @lock: the mutex to return holding if we dec to 0
++ *
++ * return true and hold lock if we dec to 0, return false otherwise
++ */
++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
++{
++ /* dec if we can't possibly hit 0 */
++ if (atomic_add_unless(cnt, -1, 1))
++ return 0;
++ /* we might hit 0, so take the lock */
++ mutex_lock(lock);
++ if (!atomic_dec_and_test(cnt)) {
++ /* when we actually did the dec, we didn't hit 0 */
++ mutex_unlock(lock);
++ return 0;
++ }
++ /* we hit 0, and we hold the lock */
++ return 1;
++}
++EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
+diff --git a/kernel/spinlock.c b/kernel/spinlock.c
+index 84c7d96..47fd3cf 100644
+--- a/kernel/spinlock.c
++++ b/kernel/spinlock.c
+@@ -110,8 +110,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
+ * __[spin|read|write]_lock_bh()
+ */
+ BUILD_LOCK_OPS(spin, raw_spinlock);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ BUILD_LOCK_OPS(read, rwlock);
+ BUILD_LOCK_OPS(write, rwlock);
++#endif
+
+ #endif
+
+@@ -195,6 +198,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
+ EXPORT_SYMBOL(_raw_spin_unlock_bh);
+ #endif
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #ifndef CONFIG_INLINE_READ_TRYLOCK
+ int __lockfunc _raw_read_trylock(rwlock_t *lock)
+ {
+@@ -339,6 +344,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
+ EXPORT_SYMBOL(_raw_write_unlock_bh);
+ #endif
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
+index 5f3eacd..f824704 100644
+--- a/lib/spinlock_debug.c
++++ b/lib/spinlock_debug.c
+@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+
+ EXPORT_SYMBOL(__raw_spin_lock_init);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key)
+ {
+@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
+ }
+
+ EXPORT_SYMBOL(__rwlock_init);
++#endif
+
+ static void spin_dump(raw_spinlock_t *lock, const char *msg)
+ {
+@@ -155,6 +157,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
+ arch_spin_unlock(&lock->raw_lock);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static void rwlock_bug(rwlock_t *lock, const char *msg)
+ {
+ if (!debug_locks_off())
+@@ -296,3 +299,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+ debug_write_unlock(lock);
+ arch_write_unlock(&lock->raw_lock);
+ }
++
++#endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0201-rwlocks-Fix-section-mismatch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0201-rwlocks-Fix-section-mismatch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0201-rwlocks-Fix-section-mismatch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0201-rwlocks-Fix-section-mismatch.patch)
@@ -0,0 +1,65 @@
+From 602427c6084befa761cc698eb86ee906a0af16b2 Mon Sep 17 00:00:00 2001
+From: John Kacur <jkacur at redhat.com>
+Date: Mon, 19 Sep 2011 11:09:27 +0200
+Subject: [PATCH 201/303] rwlocks: Fix section mismatch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This fixes the following build error for the preempt-rt kernel.
+
+make kernel/fork.o
+ CC kernel/fork.o
+kernel/fork.c:90: error: section of ¡tasklist_lock¢ conflicts with previous declaration
+make[2]: *** [kernel/fork.o] Error 1
+make[1]: *** [kernel/fork.o] Error 2
+
+The rt kernel cache aligns the RWLOCK in DEFINE_RWLOCK by default.
+The non-rt kernels explicitly cache align only the tasklist_lock in
+kernel/fork.c
+That can create a build conflict. This fixes the build problem by making the
+non-rt kernels cache align RWLOCKs by default. The side effect is that
+the other RWLOCKs are also cache aligned for non-rt.
+
+This is a short term solution for rt only.
+The longer term solution would be to push the cache aligned DEFINE_RWLOCK
+to mainline. If there are objections, then we could create a
+DEFINE_RWLOCK_CACHE_ALIGNED or something of that nature.
+
+Comments? Objections?
+
+Signed-off-by: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Link: http://lkml.kernel.org/r/alpine.LFD.2.00.1109191104010.23118@localhost6.localdomain6
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rwlock_types.h | 3 ++-
+ kernel/fork.c | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
+index 5317cd9..d0da966 100644
+--- a/include/linux/rwlock_types.h
++++ b/include/linux/rwlock_types.h
+@@ -47,6 +47,7 @@ typedef struct {
+ RW_DEP_MAP_INIT(lockname) }
+ #endif
+
+-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+
+ #endif /* __LINUX_RWLOCK_TYPES_H */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index e2d8055..acc6477 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -88,7 +88,7 @@ int max_threads; /* tunable limit on nr_threads */
+
+ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
+
+-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
++DEFINE_RWLOCK(tasklist_lock); /* outer */
+
+ #ifdef CONFIG_PROVE_RCU
+ int lockdep_tasklist_lock_is_held(void)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0202-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0202-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0202-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0202-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch)
@@ -0,0 +1,78 @@
+From bfdde541ef1d11253fc686fb3b463adbcfe9ec7f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 22:08:38 +0200
+Subject: [PATCH 202/303] timer-handle-idle-trylock-in-get-next-timer-irq.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/spinlock_rt.h | 12 +++++++++++-
+ kernel/rtmutex.c | 7 +------
+ kernel/timer.c | 7 ++++---
+ 3 files changed, 16 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
+index 205ca95..3b555b4 100644
+--- a/include/linux/spinlock_rt.h
++++ b/include/linux/spinlock_rt.h
+@@ -51,7 +51,17 @@ extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+
+ #define spin_lock_irq(lock) spin_lock(lock)
+
+-#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
++
++#define spin_trylock(lock) \
++({ \
++ int __locked; \
++ migrate_disable(); \
++ __locked = spin_do_trylock(lock); \
++ if (!__locked) \
++ migrate_enable(); \
++ __locked; \
++})
+
+ #ifdef CONFIG_LOCKDEP
+ # define spin_lock_nested(lock, subclass) \
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index a7723d2..9850dc0 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -861,15 +861,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait);
+
+ int __lockfunc rt_spin_trylock(spinlock_t *lock)
+ {
+- int ret;
++ int ret = rt_mutex_trylock(&lock->lock);
+
+- migrate_disable();
+- ret = rt_mutex_trylock(&lock->lock);
+ if (ret)
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+- else
+- migrate_enable();
+-
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_spin_trylock);
+diff --git a/kernel/timer.c b/kernel/timer.c
+index fbcbd30..7f954e7 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1325,13 +1325,14 @@ unsigned long get_next_timer_interrupt(unsigned long now)
+ /*
+ * On PREEMPT_RT we cannot sleep here. If the trylock does not
+ * succeed then we return the worst-case 'expires in 1 tick'
+- * value:
++ * value. We use the rt functions here directly to avoid a
++ * migrate_disable() call.
+ */
+- if (spin_trylock(&base->lock)) {
++ if (spin_do_trylock(&base->lock)) {
+ if (time_before_eq(base->next_timer, base->timer_jiffies))
+ base->next_timer = __next_timer_interrupt(base);
+ expires = base->next_timer;
+- spin_unlock(&base->lock);
++ rt_spin_unlock(&base->lock);
+ } else {
+ expires = now + 1;
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0203-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0203-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0203-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0203-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch)
@@ -0,0 +1,29 @@
+From 8d46e711747bdb19dfab15dc4a0e5139ebea0d2f Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo at elte.hu>
+Date: Fri, 3 Jul 2009 08:30:30 -0500
+Subject: [PATCH 203/303] RCU: Force PREEMPT_RCU for PREEMPT-RT
+
+PREEMPT_RT relies on PREEMPT_RCU - only allow RCU to be configured
+interactively in the !PREEMPT_RT case.
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Link: http://lkml.kernel.org/n/tip-j1y0phicu6s6pu8guku2vca0@git.kernel.org
+---
+ init/Kconfig | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 720c182..dbc82d0 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -731,7 +731,6 @@ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on CGROUP_SCHED
+- depends on !PREEMPT_RT_FULL
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0204-rcu-Frob-softirq-test.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0204-rcu-Frob-softirq-test.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0204-rcu-Frob-softirq-test.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0204-rcu-Frob-softirq-test.patch)
@@ -0,0 +1,169 @@
+From 17fdf208ef473a2d13fb9e997aaf781557ced355 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Date: Sat, 13 Aug 2011 00:23:17 +0200
+Subject: [PATCH 204/303] rcu: Frob softirq test
+
+With RT_FULL we get the below wreckage:
+
+[ 126.060484] =======================================================
+[ 126.060486] [ INFO: possible circular locking dependency detected ]
+[ 126.060489] 3.0.1-rt10+ #30
+[ 126.060490] -------------------------------------------------------
+[ 126.060492] irq/24-eth0/1235 is trying to acquire lock:
+[ 126.060495] (&(lock)->wait_lock#2){+.+...}, at: [<ffffffff81501c81>] rt_mutex_slowunlock+0x16/0x55
+[ 126.060503]
+[ 126.060504] but task is already holding lock:
+[ 126.060506] (&p->pi_lock){-...-.}, at: [<ffffffff81074fdc>] try_to_wake_up+0x35/0x429
+[ 126.060511]
+[ 126.060511] which lock already depends on the new lock.
+[ 126.060513]
+[ 126.060514]
+[ 126.060514] the existing dependency chain (in reverse order) is:
+[ 126.060516]
+[ 126.060516] -> #1 (&p->pi_lock){-...-.}:
+[ 126.060519] [<ffffffff810afe9e>] lock_acquire+0x145/0x18a
+[ 126.060524] [<ffffffff8150291e>] _raw_spin_lock_irqsave+0x4b/0x85
+[ 126.060527] [<ffffffff810b5aa4>] task_blocks_on_rt_mutex+0x36/0x20f
+[ 126.060531] [<ffffffff815019bb>] rt_mutex_slowlock+0xd1/0x15a
+[ 126.060534] [<ffffffff81501ae3>] rt_mutex_lock+0x2d/0x2f
+[ 126.060537] [<ffffffff810d9020>] rcu_boost+0xad/0xde
+[ 126.060541] [<ffffffff810d90ce>] rcu_boost_kthread+0x7d/0x9b
+[ 126.060544] [<ffffffff8109a760>] kthread+0x99/0xa1
+[ 126.060547] [<ffffffff81509b14>] kernel_thread_helper+0x4/0x10
+[ 126.060551]
+[ 126.060552] -> #0 (&(lock)->wait_lock#2){+.+...}:
+[ 126.060555] [<ffffffff810af1b8>] __lock_acquire+0x1157/0x1816
+[ 126.060558] [<ffffffff810afe9e>] lock_acquire+0x145/0x18a
+[ 126.060561] [<ffffffff8150279e>] _raw_spin_lock+0x40/0x73
+[ 126.060564] [<ffffffff81501c81>] rt_mutex_slowunlock+0x16/0x55
+[ 126.060566] [<ffffffff81501ce7>] rt_mutex_unlock+0x27/0x29
+[ 126.060569] [<ffffffff810d9f86>] rcu_read_unlock_special+0x17e/0x1c4
+[ 126.060573] [<ffffffff810da014>] __rcu_read_unlock+0x48/0x89
+[ 126.060576] [<ffffffff8106847a>] select_task_rq_rt+0xc7/0xd5
+[ 126.060580] [<ffffffff8107511c>] try_to_wake_up+0x175/0x429
+[ 126.060583] [<ffffffff81075425>] wake_up_process+0x15/0x17
+[ 126.060585] [<ffffffff81080a51>] wakeup_softirqd+0x24/0x26
+[ 126.060590] [<ffffffff81081df9>] irq_exit+0x49/0x55
+[ 126.060593] [<ffffffff8150a3bd>] smp_apic_timer_interrupt+0x8a/0x98
+[ 126.060597] [<ffffffff81509793>] apic_timer_interrupt+0x13/0x20
+[ 126.060600] [<ffffffff810d5952>] irq_forced_thread_fn+0x1b/0x44
+[ 126.060603] [<ffffffff810d582c>] irq_thread+0xde/0x1af
+[ 126.060606] [<ffffffff8109a760>] kthread+0x99/0xa1
+[ 126.060608] [<ffffffff81509b14>] kernel_thread_helper+0x4/0x10
+[ 126.060611]
+[ 126.060612] other info that might help us debug this:
+[ 126.060614]
+[ 126.060615] Possible unsafe locking scenario:
+[ 126.060616]
+[ 126.060617] CPU0 CPU1
+[ 126.060619] ---- ----
+[ 126.060620] lock(&p->pi_lock);
+[ 126.060623] lock(&(lock)->wait_lock);
+[ 126.060625] lock(&p->pi_lock);
+[ 126.060627] lock(&(lock)->wait_lock);
+[ 126.060629]
+[ 126.060629] *** DEADLOCK ***
+[ 126.060630]
+[ 126.060632] 1 lock held by irq/24-eth0/1235:
+[ 126.060633] #0: (&p->pi_lock){-...-.}, at: [<ffffffff81074fdc>] try_to_wake_up+0x35/0x429
+[ 126.060638]
+[ 126.060638] stack backtrace:
+[ 126.060641] Pid: 1235, comm: irq/24-eth0 Not tainted 3.0.1-rt10+ #30
+[ 126.060643] Call Trace:
+[ 126.060644] <IRQ> [<ffffffff810acbde>] print_circular_bug+0x289/0x29a
+[ 126.060651] [<ffffffff810af1b8>] __lock_acquire+0x1157/0x1816
+[ 126.060655] [<ffffffff810ab3aa>] ? trace_hardirqs_off_caller+0x1f/0x99
+[ 126.060658] [<ffffffff81501c81>] ? rt_mutex_slowunlock+0x16/0x55
+[ 126.060661] [<ffffffff810afe9e>] lock_acquire+0x145/0x18a
+[ 126.060664] [<ffffffff81501c81>] ? rt_mutex_slowunlock+0x16/0x55
+[ 126.060668] [<ffffffff8150279e>] _raw_spin_lock+0x40/0x73
+[ 126.060671] [<ffffffff81501c81>] ? rt_mutex_slowunlock+0x16/0x55
+[ 126.060674] [<ffffffff810d9655>] ? rcu_report_qs_rsp+0x87/0x8c
+[ 126.060677] [<ffffffff81501c81>] rt_mutex_slowunlock+0x16/0x55
+[ 126.060680] [<ffffffff810d9ea3>] ? rcu_read_unlock_special+0x9b/0x1c4
+[ 126.060683] [<ffffffff81501ce7>] rt_mutex_unlock+0x27/0x29
+[ 126.060687] [<ffffffff810d9f86>] rcu_read_unlock_special+0x17e/0x1c4
+[ 126.060690] [<ffffffff810da014>] __rcu_read_unlock+0x48/0x89
+[ 126.060693] [<ffffffff8106847a>] select_task_rq_rt+0xc7/0xd5
+[ 126.060696] [<ffffffff810683da>] ? select_task_rq_rt+0x27/0xd5
+[ 126.060701] [<ffffffff810a852a>] ? clockevents_program_event+0x8e/0x90
+[ 126.060704] [<ffffffff8107511c>] try_to_wake_up+0x175/0x429
+[ 126.060708] [<ffffffff810a95dc>] ? tick_program_event+0x1f/0x21
+[ 126.060711] [<ffffffff81075425>] wake_up_process+0x15/0x17
+[ 126.060715] [<ffffffff81080a51>] wakeup_softirqd+0x24/0x26
+[ 126.060718] [<ffffffff81081df9>] irq_exit+0x49/0x55
+[ 126.060721] [<ffffffff8150a3bd>] smp_apic_timer_interrupt+0x8a/0x98
+[ 126.060724] [<ffffffff81509793>] apic_timer_interrupt+0x13/0x20
+[ 126.060726] <EOI> [<ffffffff81072855>] ? migrate_disable+0x75/0x12d
+[ 126.060733] [<ffffffff81080a61>] ? local_bh_disable+0xe/0x1f
+[ 126.060736] [<ffffffff81080a70>] ? local_bh_disable+0x1d/0x1f
+[ 126.060739] [<ffffffff810d5952>] irq_forced_thread_fn+0x1b/0x44
+[ 126.060742] [<ffffffff81502ac0>] ? _raw_spin_unlock_irq+0x3b/0x59
+[ 126.060745] [<ffffffff810d582c>] irq_thread+0xde/0x1af
+[ 126.060748] [<ffffffff810d5937>] ? irq_thread_fn+0x3a/0x3a
+[ 126.060751] [<ffffffff810d574e>] ? irq_finalize_oneshot+0xd1/0xd1
+[ 126.060754] [<ffffffff810d574e>] ? irq_finalize_oneshot+0xd1/0xd1
+[ 126.060757] [<ffffffff8109a760>] kthread+0x99/0xa1
+[ 126.060761] [<ffffffff81509b14>] kernel_thread_helper+0x4/0x10
+[ 126.060764] [<ffffffff81069ed7>] ? finish_task_switch+0x87/0x10a
+[ 126.060768] [<ffffffff81502ec4>] ? retint_restore_args+0xe/0xe
+[ 126.060771] [<ffffffff8109a6c7>] ? __init_kthread_worker+0x8c/0x8c
+[ 126.060774] [<ffffffff81509b10>] ? gs_change+0xb/0xb
+
+Because irq_exit() does:
+
+void irq_exit(void)
+{
+ account_system_vtime(current);
+ trace_hardirq_exit();
+ sub_preempt_count(IRQ_EXIT_OFFSET);
+ if (!in_interrupt() && local_softirq_pending())
+ invoke_softirq();
+
+ ...
+}
+
+Which triggers a wakeup, which uses RCU, now if the interrupted task has
+t->rcu_read_unlock_special set, the rcu usage from the wakeup will end
+up in rcu_read_unlock_special(). rcu_read_unlock_special() will test
+for in_irq(), which will fail as we just decremented preempt_count
+with IRQ_EXIT_OFFSET, and in_sering_softirq(), which for
+PREEMPT_RT_FULL reads:
+
+int in_serving_softirq(void)
+{
+ int res;
+
+ preempt_disable();
+ res = __get_cpu_var(local_softirq_runner) == current;
+ preempt_enable();
+ return res;
+}
+
+Which will thus also fail, resulting in the above wreckage.
+
+The 'somewhat' ugly solution is to open-code the preempt_count() test
+in rcu_read_unlock_special().
+
+Also, we're not at all sure how ->rcu_read_unlock_special gets set
+here... so this is very likely a bandaid and more thought is required.
+
+Cc: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+---
+ kernel/rcutree_plugin.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
+index 73cab33..2e63942 100644
+--- a/kernel/rcutree_plugin.h
++++ b/kernel/rcutree_plugin.h
+@@ -336,7 +336,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
+ }
+
+ /* Hardware IRQ handlers cannot block. */
+- if (in_irq() || in_serving_softirq()) {
++ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
+ local_irq_restore(flags);
+ return;
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0205-rcu-Merge-RCU-bh-into-RCU-preempt.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0205-rcu-Merge-RCU-bh-into-RCU-preempt.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0205-rcu-Merge-RCU-bh-into-RCU-preempt.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0205-rcu-Merge-RCU-bh-into-RCU-preempt.patch)
@@ -0,0 +1,261 @@
+From a9e8a2f0883f3b7390d57862d016db3a1d76035e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 5 Oct 2011 11:59:38 -0700
+Subject: [PATCH 205/303] rcu: Merge RCU-bh into RCU-preempt
+
+The Linux kernel has long RCU-bh read-side critical sections that
+intolerably increase scheduling latency under mainline's RCU-bh rules,
+which include RCU-bh read-side critical sections being non-preemptible.
+This patch therefore arranges for RCU-bh to be implemented in terms of
+RCU-preempt for CONFIG_PREEMPT_RT_FULL=y.
+
+This has the downside of defeating the purpose of RCU-bh, namely,
+handling the case where the system is subjected to a network-based
+denial-of-service attack that keeps at least one CPU doing full-time
+softirq processing. This issue will be fixed by a later commit.
+
+The current commit will need some work to make it appropriate for
+mainline use, for example, it needs to be extended to cover Tiny RCU.
+
+[ paulmck: Added a useful changelog ]
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rcupdate.h | 25 +++++++++++++++++++++++++
+ include/linux/rcutree.h | 18 ++++++++++++++++--
+ kernel/rcupdate.c | 2 ++
+ kernel/rcutree.c | 10 ++++++++++
+ 4 files changed, 53 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index a0082e2..7c31d86 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -92,6 +92,9 @@ extern void call_rcu(struct rcu_head *head,
+
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#define call_rcu_bh call_rcu
++#else
+ /**
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+@@ -112,6 +115,7 @@ extern void call_rcu(struct rcu_head *head,
+ */
+ extern void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
++#endif
+
+ /**
+ * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
+@@ -181,7 +185,13 @@ static inline int rcu_preempt_depth(void)
+
+ /* Internal to kernel */
+ extern void rcu_sched_qs(int cpu);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void rcu_bh_qs(int cpu);
++#else
++static inline void rcu_bh_qs(int cpu) { }
++#endif
++
+ extern void rcu_check_callbacks(int cpu, int user);
+ struct notifier_block;
+
+@@ -281,7 +291,14 @@ static inline int rcu_read_lock_held(void)
+ * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
+ * hell.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline int rcu_read_lock_bh_held(void)
++{
++ return rcu_read_lock_held();
++}
++#else
+ extern int rcu_read_lock_bh_held(void);
++#endif
+
+ /**
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+@@ -684,8 +701,12 @@ static inline void rcu_read_unlock(void)
+ static inline void rcu_read_lock_bh(void)
+ {
+ local_bh_disable();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_lock();
++#else
+ __acquire(RCU_BH);
+ rcu_read_acquire_bh();
++#endif
+ }
+
+ /*
+@@ -695,8 +716,12 @@ static inline void rcu_read_lock_bh(void)
+ */
+ static inline void rcu_read_unlock_bh(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_unlock();
++#else
+ rcu_read_release_bh();
+ __release(RCU_BH);
++#endif
+ local_bh_enable();
+ }
+
+diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
+index 6745846..800b840 100644
+--- a/include/linux/rcutree.h
++++ b/include/linux/rcutree.h
+@@ -57,7 +57,11 @@ static inline void exit_rcu(void)
+
+ #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void synchronize_rcu_bh(void);
++#else
++# define synchronize_rcu_bh() synchronize_rcu()
++#endif
+ extern void synchronize_sched_expedited(void);
+ extern void synchronize_rcu_expedited(void);
+
+@@ -67,19 +71,29 @@ static inline void synchronize_rcu_bh_expedited(void)
+ }
+
+ extern void rcu_barrier(void);
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define rcu_barrier_bh rcu_barrier
++#else
+ extern void rcu_barrier_bh(void);
++#endif
+ extern void rcu_barrier_sched(void);
+
+ extern unsigned long rcutorture_testseq;
+ extern unsigned long rcutorture_vernum;
+ extern long rcu_batches_completed(void);
+-extern long rcu_batches_completed_bh(void);
+ extern long rcu_batches_completed_sched(void);
+
+ extern void rcu_force_quiescent_state(void);
+-extern void rcu_bh_force_quiescent_state(void);
+ extern void rcu_sched_force_quiescent_state(void);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++extern void rcu_bh_force_quiescent_state(void);
++extern long rcu_batches_completed_bh(void);
++#else
++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
++# define rcu_batches_completed_bh rcu_batches_completed
++#endif
++
+ /* A context switch is a grace period for RCU-sched and RCU-bh. */
+ static inline int rcu_blocking_is_gp(void)
+ {
+diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
+index c5b98e5..24dcc71 100644
+--- a/kernel/rcupdate.c
++++ b/kernel/rcupdate.c
+@@ -77,6 +77,7 @@ int debug_lockdep_rcu_enabled(void)
+ }
+ EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
+ *
+@@ -96,6 +97,7 @@ int rcu_read_lock_bh_held(void)
+ return in_softirq() || irqs_disabled();
+ }
+ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
++#endif
+
+ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index d0e5491..f794cdf 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -170,6 +170,7 @@ void rcu_sched_qs(int cpu)
+ rdp->passed_quiesce = 1;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void rcu_bh_qs(int cpu)
+ {
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+@@ -180,6 +181,7 @@ void rcu_bh_qs(int cpu)
+ trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
+ rdp->passed_quiesce = 1;
+ }
++#endif
+
+ /*
+ * Note a context switch. This is a quiescent state for RCU-sched,
+@@ -225,6 +227,7 @@ long rcu_batches_completed_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Return the number of RCU BH batches processed thus far for debug & stats.
+ */
+@@ -242,6 +245,7 @@ void rcu_bh_force_quiescent_state(void)
+ force_quiescent_state(&rcu_bh_state, 0);
+ }
+ EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
++#endif
+
+ /*
+ * Record the number of times rcutorture tests have been initiated and
+@@ -1669,6 +1673,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Queue an RCU for invocation after a quicker grace period.
+ */
+@@ -1677,6 +1682,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+ __call_rcu(head, func, &rcu_bh_state);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
+
+ /**
+ * synchronize_sched - wait until an rcu-sched grace period has elapsed.
+@@ -1709,6 +1715,7 @@ void synchronize_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(synchronize_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
+ *
+@@ -1725,6 +1732,7 @@ void synchronize_rcu_bh(void)
+ wait_rcu_gp(call_rcu_bh);
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
++#endif
+
+ /*
+ * Check to see if there is any immediate RCU-related work to be done
+@@ -1879,6 +1887,7 @@ static void _rcu_barrier(struct rcu_state *rsp,
+ mutex_unlock(&rcu_barrier_mutex);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
+ */
+@@ -1887,6 +1896,7 @@ void rcu_barrier_bh(void)
+ _rcu_barrier(&rcu_bh_state, call_rcu_bh);
+ }
+ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
++#endif
+
+ /**
+ * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0206-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0206-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0206-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0206-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch)
@@ -0,0 +1,40 @@
+From 34d800cf7fbf7b079dc1ae4b5c9f530699c8ea54 Mon Sep 17 00:00:00 2001
+From: John Kacur <jkacur at redhat.com>
+Date: Mon, 14 Nov 2011 02:44:42 +0100
+Subject: [PATCH 206/303] rcu: Fix macro substitution for synchronize_rcu_bh()
+ on RT
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+kernel/rcutorture.c:492: error: ‘synchronize_rcu_bh’ undeclared here (not in a function)
+
+synchronize_rcu_bh() is not just called as a normal function, but can
+also be referenced as a function pointer. When CONFIG_PREEMPT_RT_FULL
+is enabled, synchronize_rcu_bh() is defined as synchronize_rcu(), but
+needs to be defined without the parenthesis because the compiler will
+complain when synchronize_rcu_bh is referenced as a function pointer
+and not a function.
+
+Signed-off-by: John Kacur <jkacur at redhat.com>
+Cc: Paul McKenney <paulmck at linux.vnet.ibm.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/1321235083-21756-1-git-send-email-jkacur@redhat.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rcutree.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
+index 800b840..6e503a3 100644
+--- a/include/linux/rcutree.h
++++ b/include/linux/rcutree.h
+@@ -60,7 +60,7 @@ static inline void exit_rcu(void)
+ #ifndef CONFIG_PREEMPT_RT_FULL
+ extern void synchronize_rcu_bh(void);
+ #else
+-# define synchronize_rcu_bh() synchronize_rcu()
++# define synchronize_rcu_bh synchronize_rcu
+ #endif
+ extern void synchronize_sched_expedited(void);
+ extern void synchronize_rcu_expedited(void);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0207-rcu-more-fallout.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0207-rcu-more-fallout.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0207-rcu-more-fallout.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0207-rcu-more-fallout.patch.patch)
@@ -0,0 +1,27 @@
+From 0982640f89071e6b7cc56292fcf8a2806bc0d723 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 14 Nov 2011 10:57:54 +0100
+Subject: [PATCH 207/303] rcu-more-fallout.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/rcutiny.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
+index 636af6d..6689097 100644
+--- a/kernel/rcutiny.c
++++ b/kernel/rcutiny.c
+@@ -243,6 +243,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Post an RCU bottom-half callback to be invoked after any subsequent
+ * quiescent state.
+@@ -252,3 +253,4 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+ __call_rcu(head, func, &rcu_bh_ctrlblk);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0208-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0208-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0208-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0208-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch)
@@ -0,0 +1,166 @@
+From a66cce5380f0af0c5b394f1613602f67481cf7ae Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
+Date: Wed, 5 Oct 2011 11:45:18 -0700
+Subject: [PATCH 208/303] rcu: Make ksoftirqd do RCU quiescent states
+
+Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable
+to network-based denial-of-service attacks. This patch therefore
+makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq()
+is running in ksoftirqd context. A wrapper layer in interposed so that
+other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying
+function __do_softirq_common() does the actual work.
+
+The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is
+that there might be a local_bh_enable() inside an RCU-preempt read-side
+critical section. This local_bh_enable() can invoke __do_softirq()
+directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just
+calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be
+an illegal RCU-preempt quiescent state in the middle of an RCU-preempt
+read-side critical section. Therefore, quiescent states can only happen
+in cases where __do_softirq() is invoked directly from ksoftirqd.
+
+Signed-off-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/rcupdate.h | 6 ------
+ kernel/rcutree.c | 7 ++++++-
+ kernel/rcutree.h | 1 +
+ kernel/rcutree_plugin.h | 2 +-
+ kernel/softirq.c | 20 +++++++++++++-------
+ 5 files changed, 21 insertions(+), 15 deletions(-)
+
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 7c31d86..0e6fb5c 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -185,13 +185,7 @@ static inline int rcu_preempt_depth(void)
+
+ /* Internal to kernel */
+ extern void rcu_sched_qs(int cpu);
+-
+-#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void rcu_bh_qs(int cpu);
+-#else
+-static inline void rcu_bh_qs(int cpu) { }
+-#endif
+-
+ extern void rcu_check_callbacks(int cpu, int user);
+ struct notifier_block;
+
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index f794cdf..5ebbd13 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -170,7 +170,12 @@ void rcu_sched_qs(int cpu)
+ rdp->passed_quiesce = 1;
+ }
+
+-#ifndef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT_FULL
++void rcu_bh_qs(int cpu)
++{
++ rcu_preempt_qs(cpu);
++}
++#else
+ void rcu_bh_qs(int cpu)
+ {
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+diff --git a/kernel/rcutree.h b/kernel/rcutree.h
+index dca495d..b522273 100644
+--- a/kernel/rcutree.h
++++ b/kernel/rcutree.h
+@@ -430,6 +430,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
+ /* Forward declarations for rcutree_plugin.h */
+ static void rcu_bootup_announce(void);
+ long rcu_batches_completed(void);
++static void rcu_preempt_qs(int cpu);
+ static void rcu_preempt_note_context_switch(int cpu);
+ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
+ #ifdef CONFIG_HOTPLUG_CPU
+diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
+index 2e63942..936441d 100644
+--- a/kernel/rcutree_plugin.h
++++ b/kernel/rcutree_plugin.h
+@@ -1933,7 +1933,7 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+ #endif /* #else #ifndef CONFIG_SMP */
+
+-#if !defined(CONFIG_RCU_FAST_NO_HZ)
++#if 1 /* !defined(CONFIG_RCU_FAST_NO_HZ) */
+
+ /*
+ * Check to see if any future RCU-related work will need to be done
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index bef08f3..ca00a68 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -139,7 +139,7 @@ static void wakeup_softirqd(void)
+ wake_up_process(tsk);
+ }
+
+-static void handle_pending_softirqs(u32 pending, int cpu)
++static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
+ {
+ struct softirq_action *h = softirq_vec;
+ unsigned int prev_count = preempt_count();
+@@ -162,7 +162,8 @@ static void handle_pending_softirqs(u32 pending, int cpu)
+ prev_count, (unsigned int) preempt_count());
+ preempt_count() = prev_count;
+ }
+- rcu_bh_qs(cpu);
++ if (need_rcu_bh_qs)
++ rcu_bh_qs(cpu);
+ }
+ local_irq_disable();
+ }
+@@ -314,7 +315,7 @@ restart:
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
+
+- handle_pending_softirqs(pending, cpu);
++ handle_pending_softirqs(pending, cpu, 1);
+
+ pending = local_softirq_pending();
+ if (pending && --max_restart)
+@@ -384,7 +385,12 @@ static inline void ksoftirqd_clr_sched_params(void) { }
+ static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
+ static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
+
+-static void __do_softirq(void);
++static void __do_softirq_common(int need_rcu_bh_qs);
++
++void __do_softirq(void)
++{
++ __do_softirq_common(0);
++}
+
+ void __init softirq_early_init(void)
+ {
+@@ -455,7 +461,7 @@ EXPORT_SYMBOL(in_serving_softirq);
+ * Called with bh and local interrupts disabled. For full RT cpu must
+ * be pinned.
+ */
+-static void __do_softirq(void)
++static void __do_softirq_common(int need_rcu_bh_qs)
+ {
+ u32 pending = local_softirq_pending();
+ int cpu = smp_processor_id();
+@@ -469,7 +475,7 @@ static void __do_softirq(void)
+
+ lockdep_softirq_enter();
+
+- handle_pending_softirqs(pending, cpu);
++ handle_pending_softirqs(pending, cpu, need_rcu_bh_qs);
+
+ pending = local_softirq_pending();
+ if (pending)
+@@ -508,7 +514,7 @@ static int __thread_do_softirq(int cpu)
+ * schedule!
+ */
+ if (local_softirq_pending())
+- __do_softirq();
++ __do_softirq_common(cpu >= 0);
+ local_unlock(local_softirq_lock);
+ unpin_current_cpu();
+ preempt_disable();
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0209-rt-rcutree-Move-misplaced-prototype.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0209-rt-rcutree-Move-misplaced-prototype.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0209-rt-rcutree-Move-misplaced-prototype.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0209-rt-rcutree-Move-misplaced-prototype.patch)
@@ -0,0 +1,47 @@
+From 95e6111c12172b3e5e6748bef1fe2dc522ad128a Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo at elte.hu>
+Date: Wed, 14 Dec 2011 12:51:28 +0100
+Subject: [PATCH 209/303] rt/rcutree: Move misplaced prototype
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix this warning on x86 defconfig:
+
+ kernel/rcutree.h:433:13: warning: ‘rcu_preempt_qs’ declared ‘static’ but never defined [-Wunused-function]
+
+The #ifdefs and prototypes here are a maze, move it closer to the
+usage site that needs it.
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/rcutree.c | 2 ++
+ kernel/rcutree.h | 1 -
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index 5ebbd13..82c2224 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -171,6 +171,8 @@ void rcu_sched_qs(int cpu)
+ }
+
+ #ifdef CONFIG_PREEMPT_RT_FULL
++static void rcu_preempt_qs(int cpu);
++
+ void rcu_bh_qs(int cpu)
+ {
+ rcu_preempt_qs(cpu);
+diff --git a/kernel/rcutree.h b/kernel/rcutree.h
+index b522273..dca495d 100644
+--- a/kernel/rcutree.h
++++ b/kernel/rcutree.h
+@@ -430,7 +430,6 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
+ /* Forward declarations for rcutree_plugin.h */
+ static void rcu_bootup_announce(void);
+ long rcu_batches_completed(void);
+-static void rcu_preempt_qs(int cpu);
+ static void rcu_preempt_note_context_switch(int cpu);
+ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
+ #ifdef CONFIG_HOTPLUG_CPU
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0210-lglocks-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0210-lglocks-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0210-lglocks-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0210-lglocks-rt.patch.patch)
@@ -0,0 +1,125 @@
+From b9bf25e20d6a1c9c75d1eee5a414a028d10d5594 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 15 Jun 2011 11:02:21 +0200
+Subject: [PATCH 210/303] lglocks-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/lglock.h | 99 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 99 insertions(+)
+
+diff --git a/include/linux/lglock.h b/include/linux/lglock.h
+index 87f402c..52b289f 100644
+--- a/include/linux/lglock.h
++++ b/include/linux/lglock.h
+@@ -71,6 +71,8 @@
+ extern void name##_global_lock_online(void); \
+ extern void name##_global_unlock_online(void); \
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #define DEFINE_LGLOCK(name) \
+ \
+ DEFINE_SPINLOCK(name##_cpu_lock); \
+@@ -197,4 +199,101 @@
+ preempt_enable(); \
+ } \
+ EXPORT_SYMBOL(name##_global_unlock);
++
++#else /* !PREEMPT_RT_FULL */
++#define DEFINE_LGLOCK(name) \
++ \
++ DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
++ DEFINE_LGLOCK_LOCKDEP(name); \
++ \
++ void name##_lock_init(void) { \
++ int i; \
++ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
++ for_each_possible_cpu(i) { \
++ struct rt_mutex *lock; \
++ lock = &per_cpu(name##_lock, i); \
++ rt_mutex_init(lock); \
++ } \
++ } \
++ EXPORT_SYMBOL(name##_lock_init); \
++ \
++ void name##_local_lock(void) { \
++ struct rt_mutex *lock; \
++ migrate_disable(); \
++ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
++ lock = &__get_cpu_var(name##_lock); \
++ __rt_spin_lock(lock); \
++ } \
++ EXPORT_SYMBOL(name##_local_lock); \
++ \
++ void name##_local_unlock(void) { \
++ struct rt_mutex *lock; \
++ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
++ lock = &__get_cpu_var(name##_lock); \
++ __rt_spin_unlock(lock); \
++ migrate_enable(); \
++ } \
++ EXPORT_SYMBOL(name##_local_unlock); \
++ \
++ void name##_local_lock_cpu(int cpu) { \
++ struct rt_mutex *lock; \
++ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
++ lock = &per_cpu(name##_lock, cpu); \
++ __rt_spin_lock(lock); \
++ } \
++ EXPORT_SYMBOL(name##_local_lock_cpu); \
++ \
++ void name##_local_unlock_cpu(int cpu) { \
++ struct rt_mutex *lock; \
++ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
++ lock = &per_cpu(name##_lock, cpu); \
++ __rt_spin_unlock(lock); \
++ } \
++ EXPORT_SYMBOL(name##_local_unlock_cpu); \
++ \
++ void name##_global_lock_online(void) { \
++ int i; \
++ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
++ for_each_online_cpu(i) { \
++ struct rt_mutex *lock; \
++ lock = &per_cpu(name##_lock, i); \
++ __rt_spin_lock(lock); \
++ } \
++ } \
++ EXPORT_SYMBOL(name##_global_lock_online); \
++ \
++ void name##_global_unlock_online(void) { \
++ int i; \
++ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
++ for_each_online_cpu(i) { \
++ struct rt_mutex *lock; \
++ lock = &per_cpu(name##_lock, i); \
++ __rt_spin_unlock(lock); \
++ } \
++ } \
++ EXPORT_SYMBOL(name##_global_unlock_online); \
++ \
++ void name##_global_lock(void) { \
++ int i; \
++ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
++ for_each_possible_cpu(i) { \
++ struct rt_mutex *lock; \
++ lock = &per_cpu(name##_lock, i); \
++ __rt_spin_lock(lock); \
++ } \
++ } \
++ EXPORT_SYMBOL(name##_global_lock); \
++ \
++ void name##_global_unlock(void) { \
++ int i; \
++ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
++ for_each_possible_cpu(i) { \
++ struct rt_mutex *lock; \
++ lock = &per_cpu(name##_lock, i); \
++ __rt_spin_unlock(lock); \
++ } \
++ } \
++ EXPORT_SYMBOL(name##_global_unlock);
++#endif /* PRREMPT_RT_FULL */
++
+ #endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0211-serial-8250-Clean-up-the-locking-for-rt.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0211-serial-8250-Clean-up-the-locking-for-rt.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0211-serial-8250-Clean-up-the-locking-for-rt.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0211-serial-8250-Clean-up-the-locking-for-rt.patch)
@@ -0,0 +1,44 @@
+From 5c7f6d11db6179312e8351cce27200daf57d8579 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo at elte.hu>
+Date: Fri, 3 Jul 2009 08:30:01 -0500
+Subject: [PATCH 211/303] serial: 8250: Clean up the locking for -rt
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/tty/serial/8250.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index 6748568..b1695d1 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -2858,14 +2858,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
+
+ touch_nmi_watchdog();
+
+- local_irq_save(flags);
+- if (up->port.sysrq) {
+- /* serial8250_handle_port() already took the lock */
+- locked = 0;
+- } else if (oops_in_progress) {
+- locked = spin_trylock(&up->port.lock);
+- } else
+- spin_lock(&up->port.lock);
++ if (up->port.sysrq || oops_in_progress)
++ locked = spin_trylock_irqsave(&up->port.lock, flags);
++ else
++ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -2897,8 +2893,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
+ check_modem_status(up);
+
+ if (locked)
+- spin_unlock(&up->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+
+ static int __init serial8250_console_setup(struct console *co, char *options)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0212-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0212-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0212-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0212-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch)
@@ -0,0 +1,50 @@
+From ec65eaf85034ac0c56116757144ad98133cd2a95 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo at elte.hu>
+Date: Fri, 3 Jul 2009 08:30:01 -0500
+Subject: [PATCH 212/303] serial: 8250: Call flush_to_ldisc when the irq is
+ threaded
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+---
+ drivers/tty/serial/8250.c | 2 ++
+ drivers/tty/tty_buffer.c | 4 ++++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index b1695d1..d4d0c34 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -1642,12 +1642,14 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
+
+ l = l->next;
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (l == i->head && pass_counter++ > PASS_LIMIT) {
+ /* If we hit this, we're dead. */
+ printk_ratelimited(KERN_ERR
+ "serial8250: too much work for irq%d\n", irq);
+ break;
+ }
++#endif
+ } while (l != end);
+
+ spin_unlock(&i->lock);
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 6c9b7cd..a56c223 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -493,10 +493,14 @@ void tty_flip_buffer_push(struct tty_struct *tty)
+ tty->buf.tail->commit = tty->buf.tail->used;
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (tty->low_latency)
+ flush_to_ldisc(&tty->buf.work);
+ else
+ schedule_work(&tty->buf.work);
++#else
++ flush_to_ldisc(&tty->buf.work);
++#endif
+ }
+ EXPORT_SYMBOL(tty_flip_buffer_push);
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0213-drivers-tty-fix-omap-lock-crap.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0213-drivers-tty-fix-omap-lock-crap.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0213-drivers-tty-fix-omap-lock-crap.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0213-drivers-tty-fix-omap-lock-crap.patch.patch)
@@ -0,0 +1,40 @@
+From 05be65d4a6f4b94ad2cfaf14c451e38dc6cb63f4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 28 Jul 2011 13:32:57 +0200
+Subject: [PATCH 213/303] drivers-tty-fix-omap-lock-crap.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/tty/serial/omap-serial.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 5e713d3..93cdb92 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -946,13 +946,12 @@ serial_omap_console_write(struct console *co, const char *s,
+ unsigned int ier;
+ int locked = 1;
+
+- local_irq_save(flags);
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&up->port.lock);
++ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+- spin_lock(&up->port.lock);
++ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -979,8 +978,7 @@ serial_omap_console_write(struct console *co, const char *s,
+ check_modem_status(up);
+
+ if (locked)
+- spin_unlock(&up->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+
+ static int __init
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0214-rt-Improve-the-serial-console-PASS_LIMIT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0214-rt-Improve-the-serial-console-PASS_LIMIT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0214-rt-Improve-the-serial-console-PASS_LIMIT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0214-rt-Improve-the-serial-console-PASS_LIMIT.patch)
@@ -0,0 +1,58 @@
+From 4d059948ac201424338cfeac72e3c16189a54cbf Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo at elte.hu>
+Date: Wed, 14 Dec 2011 13:05:54 +0100
+Subject: [PATCH 214/303] rt: Improve the serial console PASS_LIMIT
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Beyond the warning:
+
+ drivers/tty/serial/8250.c:1613:6: warning: unused variable ‘pass_counter’ [-Wunused-variable]
+
+the solution of just looping infinitely was ugly - up it to 1 million to
+give it a chance to continue in some really ugly situation.
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/tty/serial/8250.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index d4d0c34..0101b2c 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -81,7 +81,16 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
+ #define DEBUG_INTR(fmt...) do { } while (0)
+ #endif
+
+-#define PASS_LIMIT 512
++/*
++ * On -rt we can have a more delays, and legitimately
++ * so - so don't drop work spuriously and spam the
++ * syslog:
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define PASS_LIMIT 1000000
++#else
++# define PASS_LIMIT 512
++#endif
+
+ #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+@@ -1642,14 +1651,12 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
+
+ l = l->next;
+
+-#ifndef CONFIG_PREEMPT_RT_FULL
+ if (l == i->head && pass_counter++ > PASS_LIMIT) {
+ /* If we hit this, we're dead. */
+ printk_ratelimited(KERN_ERR
+ "serial8250: too much work for irq%d\n", irq);
+ break;
+ }
+-#endif
+ } while (l != end);
+
+ spin_unlock(&i->lock);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0215-fs-namespace-preemption-fix.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0215-fs-namespace-preemption-fix.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0215-fs-namespace-preemption-fix.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0215-fs-namespace-preemption-fix.patch)
@@ -0,0 +1,45 @@
+From 2cde1ef5d1837799cc45692959e06b583094a523 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 19 Jul 2009 08:44:27 -0500
+Subject: [PATCH 215/303] fs: namespace preemption fix
+
+On RT we cannot loop with preemption disabled here as
+mnt_make_readonly() might have been preempted. We can safely enable
+preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT
+as well.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ fs/namespace.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index ca4913a..644dbde 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -341,8 +341,14 @@ int mnt_want_write(struct vfsmount *mnt)
+ * incremented count after it has set MNT_WRITE_HOLD.
+ */
+ smp_mb();
+- while (mnt->mnt_flags & MNT_WRITE_HOLD)
++ /*
++ * No need to keep preemption disabled accross the spin loop.
++ */
++ while (mnt->mnt_flags & MNT_WRITE_HOLD) {
++ preempt_enable();
+ cpu_relax();
++ preempt_disable();
++ }
+ /*
+ * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
+ * be set to match its requirements. So we must not load that until
+@@ -352,9 +358,7 @@ int mnt_want_write(struct vfsmount *mnt)
+ if (__mnt_is_readonly(mnt)) {
+ mnt_dec_writers(mnt);
+ ret = -EROFS;
+- goto out;
+ }
+-out:
+ preempt_enable();
+ return ret;
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0216-mm-protect-activate-switch-mm.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0216-mm-protect-activate-switch-mm.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0216-mm-protect-activate-switch-mm.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0216-mm-protect-activate-switch-mm.patch.patch)
@@ -0,0 +1,48 @@
+From 9d329920ed85838cb410be28b8d20150ddcdf0ff Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 4 Jul 2011 09:48:40 +0200
+Subject: [PATCH 216/303] mm-protect-activate-switch-mm.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ fs/exec.c | 2 ++
+ mm/mmu_context.c | 2 ++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/fs/exec.c b/fs/exec.c
+index 312e297..1e38ffa 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -837,10 +837,12 @@ static int exec_mmap(struct mm_struct *mm)
+ }
+ }
+ task_lock(tsk);
++ local_irq_disable_rt();
+ active_mm = tsk->active_mm;
+ tsk->mm = mm;
+ tsk->active_mm = mm;
+ activate_mm(active_mm, mm);
++ local_irq_enable_rt();
+ task_unlock(tsk);
+ arch_pick_mmap_layout(mm);
+ if (old_mm) {
+diff --git a/mm/mmu_context.c b/mm/mmu_context.c
+index cf332bc..64ce279 100644
+--- a/mm/mmu_context.c
++++ b/mm/mmu_context.c
+@@ -26,6 +26,7 @@ void use_mm(struct mm_struct *mm)
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
++ local_irq_disable_rt();
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ atomic_inc(&mm->mm_count);
+@@ -33,6 +34,7 @@ void use_mm(struct mm_struct *mm)
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
++ local_irq_enable_rt();
+ task_unlock(tsk);
+
+ if (active_mm != mm)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0217-fs-block-rt-support.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0217-fs-block-rt-support.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0217-fs-block-rt-support.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0217-fs-block-rt-support.patch.patch)
@@ -0,0 +1,45 @@
+From ef76a57ce302406b0f66e64099b4126c536ec607 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 14 Jun 2011 17:05:09 +0200
+Subject: [PATCH 217/303] fs-block-rt-support.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ block/blk-core.c | 2 +-
+ fs/file.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 33bf42c..25e47e8 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -235,7 +235,7 @@ EXPORT_SYMBOL(blk_delay_queue);
+ **/
+ void blk_start_queue(struct request_queue *q)
+ {
+- WARN_ON(!irqs_disabled());
++ WARN_ON_NONRT(!irqs_disabled());
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ __blk_run_queue(q);
+diff --git a/fs/file.c b/fs/file.c
+index 375472d..fd03258 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -105,14 +105,14 @@ void free_fdtable_rcu(struct rcu_head *rcu)
+ kfree(fdt->open_fds);
+ kfree(fdt);
+ } else {
+- fddef = &get_cpu_var(fdtable_defer_list);
++ fddef = &per_cpu(fdtable_defer_list, get_cpu_light());
+ spin_lock(&fddef->lock);
+ fdt->next = fddef->next;
+ fddef->next = fdt;
+ /* vmallocs are handled from the workqueue context */
+ schedule_work(&fddef->wq);
+ spin_unlock(&fddef->lock);
+- put_cpu_var(fdtable_defer_list);
++ put_cpu_light();
+ }
+ }
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0218-fs-ntfs-disable-interrupt-only-on-RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0218-fs-ntfs-disable-interrupt-only-on-RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0218-fs-ntfs-disable-interrupt-only-on-RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0218-fs-ntfs-disable-interrupt-only-on-RT.patch)
@@ -0,0 +1,61 @@
+From aca41c3c8376db3bff4fca840e03b8c01ad87098 Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <efault at gmx.de>
+Date: Fri, 3 Jul 2009 08:44:12 -0500
+Subject: [PATCH 218/303] fs: ntfs: disable interrupt only on !RT
+
+On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
+> * Nick Piggin <nickpiggin at yahoo.com.au> wrote:
+>
+> > > [10138.175796] [<c0105de3>] show_trace+0x12/0x14
+> > > [10138.180291] [<c0105dfb>] dump_stack+0x16/0x18
+> > > [10138.184769] [<c011609f>] native_smp_call_function_mask+0x138/0x13d
+> > > [10138.191117] [<c0117606>] smp_call_function+0x1e/0x24
+> > > [10138.196210] [<c012f85c>] on_each_cpu+0x25/0x50
+> > > [10138.200807] [<c0115c74>] flush_tlb_all+0x1e/0x20
+> > > [10138.205553] [<c016caaf>] kmap_high+0x1b6/0x417
+> > > [10138.210118] [<c011ec88>] kmap+0x4d/0x4f
+> > > [10138.214102] [<c026a9d8>] ntfs_end_buffer_async_read+0x228/0x2f9
+> > > [10138.220163] [<c01a0e9e>] end_bio_bh_io_sync+0x26/0x3f
+> > > [10138.225352] [<c01a2b09>] bio_endio+0x42/0x6d
+> > > [10138.229769] [<c02c2a08>] __end_that_request_first+0x115/0x4ac
+> > > [10138.235682] [<c02c2da7>] end_that_request_chunk+0x8/0xa
+> > > [10138.241052] [<c0365943>] ide_end_request+0x55/0x10a
+> > > [10138.246058] [<c036dae3>] ide_dma_intr+0x6f/0xac
+> > > [10138.250727] [<c0366d83>] ide_intr+0x93/0x1e0
+> > > [10138.255125] [<c015afb4>] handle_IRQ_event+0x5c/0xc9
+> >
+> > Looks like ntfs is kmap()ing from interrupt context. Should be using
+> > kmap_atomic instead, I think.
+>
+> it's not atomic interrupt context but irq thread context - and -rt
+> remaps kmap_atomic() to kmap() internally.
+
+Hm. Looking at the change to mm/bounce.c, perhaps I should do this
+instead?
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ fs/ntfs/aops.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
+index 7fb7f1b..4c8095c 100644
+--- a/fs/ntfs/aops.c
++++ b/fs/ntfs/aops.c
+@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
+ recs = PAGE_CACHE_SIZE / rec_size;
+ /* Should have been verified before we got here... */
+ BUG_ON(!recs);
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+ for (i = 0; i < recs; i++)
+ post_read_mst_fixup((NTFS_RECORD*)(kaddr +
+ i * rec_size), rec_size);
+ kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ flush_dcache_page(page);
+ if (likely(page_uptodate && !PageError(page)))
+ SetPageUptodate(page);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0219-x86-Convert-mce-timer-to-hrtimer.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0219-x86-Convert-mce-timer-to-hrtimer.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0219-x86-Convert-mce-timer-to-hrtimer.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0219-x86-Convert-mce-timer-to-hrtimer.patch)
@@ -0,0 +1,147 @@
+From 07c851f2a998eae7d91572ad52c3a9ebb07588d0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 13 Dec 2010 16:33:39 +0100
+Subject: [PATCH 219/303] x86: Convert mce timer to hrtimer
+
+mce_timer is started in atomic contexts of cpu bringup. This results
+in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
+avoid this.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/kernel/cpu/mcheck/mce.c | 49 ++++++++++++++++++--------------------
+ 1 file changed, 23 insertions(+), 26 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 3b67877..7e4f230 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -38,6 +38,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
++#include <linux/jiffies.h>
+
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+@@ -1112,17 +1113,14 @@ void mce_log_therm_throt_event(__u64 status)
+ * poller finds an MCE, poll 2x faster. When the poller finds no more
+ * errors, poll 2x slower (up to check_interval seconds).
+ */
+-static int check_interval = 5 * 60; /* 5 minutes */
++static unsigned long check_interval = 5 * 60; /* 5 minutes */
+
+-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
+-static DEFINE_PER_CPU(struct timer_list, mce_timer);
++static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
++static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+
+-static void mce_start_timer(unsigned long data)
++static enum hrtimer_restart mce_start_timer(struct hrtimer *timer)
+ {
+- struct timer_list *t = &per_cpu(mce_timer, data);
+- int *n;
+-
+- WARN_ON(smp_processor_id() != data);
++ unsigned long *n;
+
+ if (mce_available(__this_cpu_ptr(&cpu_info))) {
+ machine_check_poll(MCP_TIMESTAMP,
+@@ -1135,21 +1133,22 @@ static void mce_start_timer(unsigned long data)
+ */
+ n = &__get_cpu_var(mce_next_interval);
+ if (mce_notify_irq())
+- *n = max(*n/2, HZ/100);
++ *n = max(*n/2, HZ/100UL);
+ else
+- *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
++ *n = min(*n*2, round_jiffies_relative(check_interval*HZ));
+
+- t->expires = jiffies + *n;
+- add_timer_on(t, smp_processor_id());
++ hrtimer_forward(timer, timer->base->get_time(),
++ ns_to_ktime(jiffies_to_usecs(*n) * 1000));
++ return HRTIMER_RESTART;
+ }
+
+-/* Must not be called in IRQ context where del_timer_sync() can deadlock */
++/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */
+ static void mce_timer_delete_all(void)
+ {
+ int cpu;
+
+ for_each_online_cpu(cpu)
+- del_timer_sync(&per_cpu(mce_timer, cpu));
++ hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ }
+
+ static void mce_do_trigger(struct work_struct *work)
+@@ -1381,10 +1380,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
+
+ static void __mcheck_cpu_init_timer(void)
+ {
+- struct timer_list *t = &__get_cpu_var(mce_timer);
+- int *n = &__get_cpu_var(mce_next_interval);
++ struct hrtimer *t = &__get_cpu_var(mce_timer);
++ unsigned long *n = &__get_cpu_var(mce_next_interval);
+
+- setup_timer(t, mce_start_timer, smp_processor_id());
++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ t->function = mce_start_timer;
+
+ if (mce_ignore_ce)
+ return;
+@@ -1392,8 +1392,9 @@ static void __mcheck_cpu_init_timer(void)
+ *n = check_interval * HZ;
+ if (!*n)
+ return;
+- t->expires = round_jiffies(jiffies + *n);
+- add_timer_on(t, smp_processor_id());
++
++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(*n) * 1000),
++ 0 , HRTIMER_MODE_REL_PINNED);
+ }
+
+ /* Handle unconfigured int18 (should never happen) */
+@@ -2029,6 +2030,8 @@ static void __cpuinit mce_disable_cpu(void *h)
+ if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ return;
+
++ hrtimer_cancel(&__get_cpu_var(mce_timer));
++
+ if (!(action & CPU_TASKS_FROZEN))
+ cmci_clear();
+ for (i = 0; i < banks; i++) {
+@@ -2055,6 +2058,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
+ if (b->init)
+ wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+ }
++ __mcheck_cpu_init_timer();
+ }
+
+ /* Get notified when a cpu comes on/off. Be hotplug friendly. */
+@@ -2062,7 +2066,6 @@ static int __cpuinit
+ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ {
+ unsigned int cpu = (unsigned long)hcpu;
+- struct timer_list *t = &per_cpu(mce_timer, cpu);
+
+ switch (action) {
+ case CPU_ONLINE:
+@@ -2079,16 +2082,10 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+- del_timer_sync(t);
+ smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+ break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+- if (!mce_ignore_ce && check_interval) {
+- t->expires = round_jiffies(jiffies +
+- __get_cpu_var(mce_next_interval));
+- add_timer_on(t, cpu);
+- }
+ smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+ break;
+ case CPU_POST_DEAD:
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0220-x86-stackprotector-Avoid-random-pool-on-rt.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0220-x86-stackprotector-Avoid-random-pool-on-rt.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0220-x86-stackprotector-Avoid-random-pool-on-rt.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0220-x86-stackprotector-Avoid-random-pool-on-rt.patch)
@@ -0,0 +1,49 @@
+From 1073c58ac3c97834e46ca50ec5e5f6d377a0b95b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 16 Dec 2010 14:25:18 +0100
+Subject: [PATCH 220/303] x86: stackprotector: Avoid random pool on rt
+
+CPU bringup calls into the random pool to initialize the stack
+canary. During boot that works nicely even on RT as the might sleep
+checks are disabled. During CPU hotplug the might sleep checks
+trigger. Making the locks in random raw is a major PITA, so avoid the
+call on RT is the only sensible solution. This is basically the same
+randomness which we get during boot where the random pool has no
+entropy and we rely on the TSC randomnness.
+
+Reported-by: Carsten Emde <carsten.emde at osadl.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/include/asm/stackprotector.h | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 1575177..ac0703b 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -58,7 +58,7 @@
+ */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+- u64 canary;
++ u64 uninitialized_var(canary);
+ u64 tsc;
+
+ #ifdef CONFIG_X86_64
+@@ -69,8 +69,16 @@ static __always_inline void boot_init_stack_canary(void)
+ * of randomness. The TSC only matters for very early init,
+ * there it already has some randomness on most systems. Later
+ * on during the bootup the random pool has true entropy too.
++ *
++ * For preempt-rt we need to weaken the randomness a bit, as
++ * we can't call into the random generator from atomic context
++ * due to locking constraints. We just leave canary
++ * uninitialized and use the TSC based randomness on top of
++ * it.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ get_random_bytes(&canary, sizeof(canary));
++#endif
+ tsc = __native_read_tsc();
+ canary += tsc + (tsc << 32UL);
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0221-x86-Use-generic-rwsem_spinlocks-on-rt.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0221-x86-Use-generic-rwsem_spinlocks-on-rt.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0221-x86-Use-generic-rwsem_spinlocks-on-rt.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0221-x86-Use-generic-rwsem_spinlocks-on-rt.patch)
@@ -0,0 +1,30 @@
+From 578e8dc5b208e5838b8f92fc977068abecbdb272 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 26 Jul 2009 02:21:32 +0200
+Subject: [PATCH 221/303] x86: Use generic rwsem_spinlocks on -rt
+
+Simplifies the separation of anon_rw_semaphores and rw_semaphores for
+-rt.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/Kconfig | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 9a42703..0941e6b 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -161,10 +161,10 @@ config ARCH_MAY_HAVE_PC_FDC
+ def_bool ISA_DMA_API
+
+ config RWSEM_GENERIC_SPINLOCK
+- def_bool !X86_XADD
++ def_bool !X86_XADD || PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+- def_bool X86_XADD
++ def_bool X86_XADD && !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+
+ config ARCH_HAS_CPU_IDLE_WAIT
+ def_bool y
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0222-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0222-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0222-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0222-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch)
@@ -0,0 +1,110 @@
+From 9d6eba60a4198861c4c957f884aa3e95d0bc983f Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak at suse.de>
+Date: Fri, 3 Jul 2009 08:44:10 -0500
+Subject: [PATCH 222/303] x86: Disable IST stacks for debug/int 3/stack fault
+ for PREEMPT_RT
+
+Normally the x86-64 trap handlers for debug/int 3/stack fault run
+on a special interrupt stack to make them more robust
+when dealing with kernel code.
+
+The PREEMPT_RT kernel can sleep in locks even while allocating
+GFP_ATOMIC memory. When one of these trap handlers needs to send
+real time signals for ptrace it allocates memory and could then
+try to to schedule. But it is not allowed to schedule on a
+IST stack. This can cause warnings and hangs.
+
+This patch disables the IST stacks for these handlers for PREEMPT_RT
+kernel. Instead let them run on the normal process stack.
+
+The kernel only really needs the ISTs here to make kernel debuggers more
+robust in case someone sets a break point somewhere where the stack is
+invalid. But there are no kernel debuggers in the standard kernel
+that do this.
+
+It also means kprobes cannot be set in situations with invalid stack;
+but that sounds like a reasonable restriction.
+
+The stack fault change could minimally impact oops quality, but not very
+much because stack faults are fairly rare.
+
+A better solution would be to use similar logic as the NMI "paranoid"
+path: check if signal is for user space, if yes go back to entry.S, switch stack,
+call sync_regs, then do the signal sending etc.
+
+But this patch is much simpler and should work too with minimal impact.
+
+Signed-off-by: Andi Kleen <ak at suse.de>
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/include/asm/page_64_types.h | 21 +++++++++++++++------
+ arch/x86/kernel/cpu/common.c | 2 ++
+ arch/x86/kernel/dumpstack_64.c | 4 ++++
+ 3 files changed, 21 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
+index 7639dbf..0883ecd 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -14,12 +14,21 @@
+ #define IRQ_STACK_ORDER 2
+ #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+
+-#define STACKFAULT_STACK 1
+-#define DOUBLEFAULT_STACK 2
+-#define NMI_STACK 3
+-#define DEBUG_STACK 4
+-#define MCE_STACK 5
+-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define STACKFAULT_STACK 0
++# define DOUBLEFAULT_STACK 1
++# define NMI_STACK 2
++# define DEBUG_STACK 0
++# define MCE_STACK 3
++# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */
++#else
++# define STACKFAULT_STACK 1
++# define DOUBLEFAULT_STACK 2
++# define NMI_STACK 3
++# define DEBUG_STACK 4
++# define MCE_STACK 5
++# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
++#endif
+
+ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+ #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index ca93cc7..edc013e 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1050,7 +1050,9 @@ DEFINE_PER_CPU(unsigned int, irq_count) = -1;
+ */
+ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
++#if DEBUG_STACK > 0
+ [DEBUG_STACK - 1] = DEBUG_STKSZ
++#endif
+ };
+
+ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index 6d728d9..352beb7 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -21,10 +21,14 @@
+ (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
+
+ static char x86_stack_ids[][8] = {
++#if DEBUG_STACK > 0
+ [ DEBUG_STACK-1 ] = "#DB",
++#endif
+ [ NMI_STACK-1 ] = "NMI",
+ [ DOUBLEFAULT_STACK-1 ] = "#DF",
++#if STACKFAULT_STACK > 0
+ [ STACKFAULT_STACK-1 ] = "#SS",
++#endif
+ [ MCE_STACK-1 ] = "#MC",
+ #if DEBUG_STKSZ > EXCEPTION_STKSZ
+ [ N_EXCEPTION_STACKS ...
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0223-workqueue-use-get-cpu-light.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0223-workqueue-use-get-cpu-light.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0223-workqueue-use-get-cpu-light.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0223-workqueue-use-get-cpu-light.patch.patch)
@@ -0,0 +1,25 @@
+From fd84b2870acea680b989a3ddf982b46afb01b10e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 21:42:26 +0200
+Subject: [PATCH 223/303] workqueue-use-get-cpu-light.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/workqueue.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 9e99e10..e9c062e 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1083,8 +1083,8 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
+ {
+ int ret;
+
+- ret = queue_work_on(get_cpu(), wq, work);
+- put_cpu();
++ ret = queue_work_on(get_cpu_light(), wq, work);
++ put_cpu_light();
+
+ return ret;
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0224-epoll.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0224-epoll.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0224-epoll.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0224-epoll.patch.patch)
@@ -0,0 +1,29 @@
+From c2ba79a8203fcca4318a0debc2d1a55879be9e22 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 8 Jul 2011 16:35:35 +0200
+Subject: [PATCH 224/303] epoll.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ fs/eventpoll.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 451b9b8..3ed4aca 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -464,12 +464,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
+ */
+ static void ep_poll_safewake(wait_queue_head_t *wq)
+ {
+- int this_cpu = get_cpu();
++ int this_cpu = get_cpu_light();
+
+ ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
+ ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
+
+- put_cpu();
++ put_cpu_light();
+ }
+
+ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0225-mm-vmalloc.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0225-mm-vmalloc.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0225-mm-vmalloc.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0225-mm-vmalloc.patch.patch)
@@ -0,0 +1,67 @@
+From d736217e938f29f79463128e4438913715874c8c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 12 Jul 2011 11:39:36 +0200
+Subject: [PATCH 225/303] mm-vmalloc.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ mm/vmalloc.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index eeba3bb..66df815 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -782,7 +782,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
+ struct vmap_block *vb;
+ struct vmap_area *va;
+ unsigned long vb_idx;
+- int node, err;
++ int node, err, cpu;
+
+ node = numa_node_id();
+
+@@ -821,12 +821,13 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
+ BUG_ON(err);
+ radix_tree_preload_end();
+
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = &__get_cpu_var(vmap_block_queue);
+ vb->vbq = vbq;
+ spin_lock(&vbq->lock);
+ list_add_rcu(&vb->free_list, &vbq->free);
+ spin_unlock(&vbq->lock);
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+
+ return vb;
+ }
+@@ -900,7 +901,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ struct vmap_block *vb;
+ unsigned long addr = 0;
+ unsigned int order;
+- int purge = 0;
++ int purge = 0, cpu;
+
+ BUG_ON(size & ~PAGE_MASK);
+ BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+@@ -908,7 +909,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+
+ again:
+ rcu_read_lock();
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = &__get_cpu_var(vmap_block_queue);
+ list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+ int i;
+
+@@ -945,7 +947,7 @@ next:
+ if (purge)
+ purge_fragmented_blocks_thiscpu();
+
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+ rcu_read_unlock();
+
+ if (!addr) {
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0226-debugobjects-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0226-debugobjects-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0226-debugobjects-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0226-debugobjects-rt.patch.patch)
@@ -0,0 +1,37 @@
+From dcfa273215e90d4bd0e196c3066353ddce27c50a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 21:41:35 +0200
+Subject: [PATCH 226/303] debugobjects-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ lib/debugobjects.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index a78b7c6..9b622c9 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -306,7 +306,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
+ struct debug_obj *obj;
+ unsigned long flags;
+
+- fill_pool();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (preempt_count() == 0 && !irqs_disabled())
++#endif
++ fill_pool();
+
+ db = get_bucket((unsigned long) addr);
+
+@@ -1015,9 +1018,9 @@ static int __init debug_objects_replace_static_objects(void)
+ }
+ }
+
++ local_irq_enable();
+ printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
+ obj_pool_used);
+- local_irq_enable();
+ return 0;
+ free:
+ hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0227-jump-label-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0227-jump-label-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0227-jump-label-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0227-jump-label-rt.patch.patch)
@@ -0,0 +1,23 @@
+From 84d1ee580122b45b6564bf63a459738866e80c0a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 13 Jul 2011 11:03:16 +0200
+Subject: [PATCH 227/303] jump-label-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/jump_label.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
+index 388b0d4..9cc8ed9 100644
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -4,7 +4,7 @@
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+
+-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && !defined(CONFIG_PREEMPT_BASE)
+
+ struct jump_label_key {
+ atomic_t enabled;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0228-skbufhead-raw-lock.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0228-skbufhead-raw-lock.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0228-skbufhead-raw-lock.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0228-skbufhead-raw-lock.patch.patch)
@@ -0,0 +1,134 @@
+From 77a549b52c9cd6b561637e6d7f25d9ee6aa623ab Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 12 Jul 2011 15:38:34 +0200
+Subject: [PATCH 228/303] skbufhead-raw-lock.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/netdevice.h | 1 +
+ include/linux/skbuff.h | 7 +++++++
+ net/core/dev.c | 26 ++++++++++++++++++++------
+ 3 files changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 00ca32b..9ec24b3 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1755,6 +1755,7 @@ struct softnet_data {
+ unsigned dropped;
+ struct sk_buff_head input_pkt_queue;
+ struct napi_struct backlog;
++ struct sk_buff_head tofree_queue;
+ };
+
+ static inline void input_queue_head_incr(struct softnet_data *sd)
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 53dc7e7..5db4d1e 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -124,6 +124,7 @@ struct sk_buff_head {
+
+ __u32 qlen;
+ spinlock_t lock;
++ raw_spinlock_t raw_lock;
+ };
+
+ struct sk_buff;
+@@ -923,6 +924,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
+ __skb_queue_head_init(list);
+ }
+
++static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
++{
++ raw_spin_lock_init(&list->raw_lock);
++ __skb_queue_head_init(list);
++}
++
+ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
+ struct lock_class_key *class)
+ {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index e04372e..1943426 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -222,14 +222,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
+ static inline void rps_lock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_lock(&sd->input_pkt_queue.lock);
++ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+ static inline void rps_unlock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_unlock(&sd->input_pkt_queue.lock);
++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+@@ -3406,7 +3406,7 @@ static void flush_backlog(void *arg)
+ skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->input_pkt_queue);
+- kfree_skb(skb);
++ __skb_queue_tail(&sd->tofree_queue, skb);
+ input_queue_head_incr(sd);
+ }
+ }
+@@ -3415,10 +3415,13 @@ static void flush_backlog(void *arg)
+ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->process_queue);
+- kfree_skb(skb);
++ __skb_queue_tail(&sd->tofree_queue, skb);
+ input_queue_head_incr(sd);
+ }
+ }
++
++ if (!skb_queue_empty(&sd->tofree_queue))
++ raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ }
+
+ static int napi_gro_complete(struct sk_buff *skb)
+@@ -3902,10 +3905,17 @@ static void net_rx_action(struct softirq_action *h)
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ unsigned long time_limit = jiffies + 2;
+ int budget = netdev_budget;
++ struct sk_buff *skb;
+ void *have;
+
+ local_irq_disable();
+
++ while ((skb = __skb_dequeue(&sd->tofree_queue))) {
++ local_irq_enable();
++ kfree_skb(skb);
++ local_irq_disable();
++ }
++
+ while (!list_empty(&sd->poll_list)) {
+ struct napi_struct *n;
+ int work, weight;
+@@ -6342,6 +6352,9 @@ static int dev_cpu_callback(struct notifier_block *nfb,
+ netif_rx(skb);
+ input_queue_head_incr(oldsd);
+ }
++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
++ kfree_skb(skb);
++ }
+
+ return NOTIFY_OK;
+ }
+@@ -6609,8 +6622,9 @@ static int __init net_dev_init(void)
+ struct softnet_data *sd = &per_cpu(softnet_data, i);
+
+ memset(sd, 0, sizeof(*sd));
+- skb_queue_head_init(&sd->input_pkt_queue);
+- skb_queue_head_init(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->input_pkt_queue);
++ skb_queue_head_init_raw(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->tofree_queue);
+ sd->completion_queue = NULL;
+ INIT_LIST_HEAD(&sd->poll_list);
+ sd->output_queue = NULL;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0229-x86-no-perf-irq-work-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0229-x86-no-perf-irq-work-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0229-x86-no-perf-irq-work-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0229-x86-no-perf-irq-work-rt.patch.patch)
@@ -0,0 +1,68 @@
+From e0c27cffe6a6fad09fce1c068eff0895c69d1801 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 13 Jul 2011 14:05:05 +0200
+Subject: [PATCH 229/303] x86-no-perf-irq-work-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/kernel/irq_work.c | 2 ++
+ kernel/irq_work.c | 2 ++
+ kernel/timer.c | 6 +++++-
+ 3 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
+index ca8f703..129b8bb 100644
+--- a/arch/x86/kernel/irq_work.c
++++ b/arch/x86/kernel/irq_work.c
+@@ -18,6 +18,7 @@ void smp_irq_work_interrupt(struct pt_regs *regs)
+ irq_exit();
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void arch_irq_work_raise(void)
+ {
+ #ifdef CONFIG_X86_LOCAL_APIC
+@@ -28,3 +29,4 @@ void arch_irq_work_raise(void)
+ apic_wait_icr_idle();
+ #endif
+ }
++#endif
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index c3c46c7..727ba59 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
+@@ -105,8 +105,10 @@ void irq_work_run(void)
+ if (llist_empty(this_list))
+ return;
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ BUG_ON(!in_irq());
+ BUG_ON(!irqs_disabled());
++#endif
+
+ llnode = llist_del_all(this_list);
+ while (llnode != NULL) {
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 7f954e7..985772e 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1364,7 +1364,7 @@ void update_process_times(int user_tick)
+ scheduler_tick();
+ run_local_timers();
+ rcu_check_callbacks(cpu, user_tick);
+-#ifdef CONFIG_IRQ_WORK
++#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
+ if (in_irq())
+ irq_work_run();
+ #endif
+@@ -1378,6 +1378,10 @@ static void run_timer_softirq(struct softirq_action *h)
+ {
+ struct tvec_base *base = __this_cpu_read(tvec_bases);
+
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
++ irq_work_run();
++#endif
++
+ printk_tick();
+ hrtimer_run_pending();
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0230-console-make-rt-friendly.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0230-console-make-rt-friendly.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0230-console-make-rt-friendly.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0230-console-make-rt-friendly.patch.patch)
@@ -0,0 +1,85 @@
+From 3ae8b3941753b6bcc11c09d719a9a95d683a7870 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 17 Jul 2011 22:43:07 +0200
+Subject: [PATCH 230/303] console-make-rt-friendly.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/printk.c | 26 +++++++++++++++++++++++---
+ 1 file changed, 23 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/printk.c b/kernel/printk.c
+index c07a0e2..23b4eb5 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -504,6 +504,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
+ {
+ struct console *con;
+
++ migrate_disable();
+ for_each_console(con) {
+ if (exclusive_console && con != exclusive_console)
+ continue;
+@@ -512,6 +513,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
+ (con->flags & CON_ANYTIME)))
+ con->write(con, &LOG_BUF(start), end - start);
+ }
++ migrate_enable();
+ }
+
+ #ifdef CONFIG_EARLY_PRINTK
+@@ -838,12 +840,18 @@ static inline int can_use_console(unsigned int cpu)
+ * interrupts disabled. It should return with 'lockbuf_lock'
+ * released but interrupts still disabled.
+ */
+-static int console_trylock_for_printk(unsigned int cpu)
++static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+ __releases(&logbuf_lock)
+ {
+ int retval = 0, wake = 0;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
++ !preempt_count();
++#else
++ int lock = 1;
++#endif
+
+- if (console_trylock()) {
++ if (lock && console_trylock()) {
+ retval = 1;
+
+ /*
+@@ -1021,8 +1029,15 @@ asmlinkage int vprintk(const char *fmt, va_list args)
+ * will release 'logbuf_lock' regardless of whether it
+ * actually gets the semaphore or not.
+ */
+- if (console_trylock_for_printk(this_cpu))
++ if (console_trylock_for_printk(this_cpu, flags)) {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ console_unlock();
++#else
++ raw_local_irq_restore(flags);
++ console_unlock();
++ raw_local_irq_save(flags);
++#endif
++ }
+
+ lockdep_on();
+ out_restore_irqs:
+@@ -1332,11 +1347,16 @@ again:
+ _con_start = con_start;
+ _log_end = log_end;
+ con_start = log_end; /* Flush */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ raw_spin_unlock(&logbuf_lock);
+ stop_critical_timings(); /* don't trace print latency */
+ call_console_drivers(_con_start, _log_end);
+ start_critical_timings();
+ local_irq_restore(flags);
++#else
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(_con_start, _log_end);
++#endif
+ }
+ console_locked = 0;
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0231-printk-Disable-migration-instead-of-preemption.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0231-printk-Disable-migration-instead-of-preemption.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0231-printk-Disable-migration-instead-of-preemption.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0231-printk-Disable-migration-instead-of-preemption.patch)
@@ -0,0 +1,61 @@
+From 6b0f113de1f2bd0579095331994fd835fb3a49a7 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <rw at linutronix.de>
+Date: Mon, 12 Dec 2011 14:35:56 +0100
+Subject: [PATCH 231/303] printk: Disable migration instead of preemption
+
+There is no need do disable preemption in vprintk(), disable_migrate()
+is sufficient. This fixes the following bug in -rt:
+
+[ 14.759233] BUG: sleeping function called from invalid context
+at /home/rw/linux-rt/kernel/rtmutex.c:645
+[ 14.759235] in_atomic(): 1, irqs_disabled(): 0, pid: 547, name: bash
+[ 14.759244] Pid: 547, comm: bash Not tainted 3.0.12-rt29+ #3
+[ 14.759246] Call Trace:
+[ 14.759301] [<ffffffff8106fade>] __might_sleep+0xeb/0xf0
+[ 14.759318] [<ffffffff810ad784>] rt_spin_lock_fastlock.constprop.9+0x21/0x43
+[ 14.759336] [<ffffffff8161fef0>] rt_spin_lock+0xe/0x10
+[ 14.759354] [<ffffffff81347ad1>] serial8250_console_write+0x81/0x121
+[ 14.759366] [<ffffffff8107ecd3>] __call_console_drivers+0x7c/0x93
+[ 14.759369] [<ffffffff8107ef31>] _call_console_drivers+0x5c/0x60
+[ 14.759372] [<ffffffff8107f7e5>] console_unlock+0x147/0x1a2
+[ 14.759374] [<ffffffff8107fd33>] vprintk+0x3ea/0x462
+[ 14.759383] [<ffffffff816160e0>] printk+0x51/0x53
+[ 14.759399] [<ffffffff811974e4>] ? proc_reg_poll+0x9a/0x9a
+[ 14.759403] [<ffffffff81335b42>] __handle_sysrq+0x50/0x14d
+[ 14.759406] [<ffffffff81335c8a>] write_sysrq_trigger+0x4b/0x53
+[ 14.759408] [<ffffffff81335c3f>] ? __handle_sysrq+0x14d/0x14d
+[ 14.759410] [<ffffffff81197583>] proc_reg_write+0x9f/0xbe
+[ 14.759426] [<ffffffff811497ec>] vfs_write+0xac/0xf3
+[ 14.759429] [<ffffffff8114a9b3>] ? fget_light+0x3a/0x9b
+[ 14.759431] [<ffffffff811499db>] sys_write+0x4a/0x6e
+[ 14.759438] [<ffffffff81625d52>] system_call_fastpath+0x16/0x1b
+
+Signed-off-by: Richard Weinberger <rw at linutronix.de>
+Link: http://lkml.kernel.org/r/1323696956-11445-1-git-send-email-rw@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/printk.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 23b4eb5..8f677b4 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -912,7 +912,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
+ boot_delay_msec();
+ printk_delay();
+
+- preempt_disable();
++ migrate_disable();
+ /* This stops the holder of console_sem just where we want him */
+ raw_local_irq_save(flags);
+ this_cpu = smp_processor_id();
+@@ -1043,7 +1043,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
+ out_restore_irqs:
+ raw_local_irq_restore(flags);
+
+- preempt_enable();
++ migrate_enable();
+ return printed_len;
+ }
+ EXPORT_SYMBOL(printk);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0232-power-use-generic-rwsem-on-rt.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0232-power-use-generic-rwsem-on-rt.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0232-power-use-generic-rwsem-on-rt.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0232-power-use-generic-rwsem-on-rt.patch)
@@ -0,0 +1,26 @@
+From 58de22ea84ab355618312c7f746bcef02d2720b8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 10 Apr 2012 14:34:18 -0400
+Subject: [PATCH 232/303] power-use-generic-rwsem-on-rt
+
+---
+ arch/powerpc/Kconfig | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 7c93c7e..c504625 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -69,10 +69,11 @@ config LOCKDEP_SUPPORT
+
+ config RWSEM_GENERIC_SPINLOCK
+ bool
++ default y if PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
+
+ config GENERIC_LOCKBREAK
+ bool
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0233-power-disable-highmem-on-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0233-power-disable-highmem-on-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0233-power-disable-highmem-on-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0233-power-disable-highmem-on-rt.patch.patch)
@@ -0,0 +1,23 @@
+From b5c9cbb3cdfed35595bc5c776f8411ac50201404 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 18 Jul 2011 17:08:34 +0200
+Subject: [PATCH 233/303] power-disable-highmem-on-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/powerpc/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index c504625..d01baf8 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -276,7 +276,7 @@ menu "Kernel options"
+
+ config HIGHMEM
+ bool "High memory support"
+- depends on PPC32
++ depends on PPC32 && !PREEMPT_RT_FULL
+
+ source kernel/time/Kconfig
+ source kernel/Kconfig.hz
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0234-arm-disable-highmem-on-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0234-arm-disable-highmem-on-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0234-arm-disable-highmem-on-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0234-arm-disable-highmem-on-rt.patch.patch)
@@ -0,0 +1,23 @@
+From e1fb1d28d0db8f8995e70bccb555af12e8d6406e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 18 Jul 2011 17:09:28 +0200
+Subject: [PATCH 234/303] arm-disable-highmem-on-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/arm/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 9fd387d..ad3a202 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1678,7 +1678,7 @@ config HAVE_ARCH_PFN_VALID
+
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on MMU
++ depends on MMU && !PREEMPT_RT_FULL
+ help
+ The address space of ARM processors is only 4 Gigabytes large
+ and it has to accommodate user address space, kernel address
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0235-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0235-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0235-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0235-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch)
@@ -0,0 +1,34 @@
+From e48e315d8623ac642f5b140d982ec6e89a400fec Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sat, 1 May 2010 18:29:35 +0200
+Subject: [PATCH 235/303] ARM: at91: tclib: Default to tclib timer for RT
+
+RT is not too happy about the shared timer interrupt in AT91
+devices. Default to tclib timer for RT.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/misc/Kconfig | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 1cb530c..951ae6c 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -82,6 +82,7 @@ config AB8500_PWM
+ config ATMEL_TCLIB
+ bool "Atmel AT32/AT91 Timer/Counter Library"
+ depends on (AVR32 || ARCH_AT91)
++ default y if PREEMPT_RT_FULL
+ help
+ Select this if you want a library to allocate the Timer/Counter
+ blocks found on many Atmel processors. This facilitates using
+@@ -114,7 +115,7 @@ config ATMEL_TCB_CLKSRC_BLOCK
+ config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ bool "TC Block use 32 KiHz clock"
+ depends on ATMEL_TCB_CLKSRC
+- default y
++ default y if !PREEMPT_RT_FULL
+ help
+ Select this to use 32 KiHz base clock rate as TC block clock
+ source for clock events.
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0236-mips-disable-highmem-on-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0236-mips-disable-highmem-on-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0236-mips-disable-highmem-on-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0236-mips-disable-highmem-on-rt.patch.patch)
@@ -0,0 +1,23 @@
+From cdad9574d56c555d184af525672979b60e7085f7 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 18 Jul 2011 17:10:12 +0200
+Subject: [PATCH 236/303] mips-disable-highmem-on-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/mips/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index d46f1da..9f02e8b 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2040,7 +2040,7 @@ config CPU_R4400_WORKAROUNDS
+ #
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL
+
+ config CPU_SUPPORTS_HIGHMEM
+ bool
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0237-net-Avoid-livelock-in-net_tx_action-on-RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0237-net-Avoid-livelock-in-net_tx_action-on-RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0237-net-Avoid-livelock-in-net_tx_action-on-RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0237-net-Avoid-livelock-in-net_tx_action-on-RT.patch)
@@ -0,0 +1,94 @@
+From 16d70371c870b1cc0d86b3b5639baa88a2eeeb8d Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <srostedt at redhat.com>
+Date: Thu, 6 Oct 2011 10:48:39 -0400
+Subject: [PATCH 237/303] net: Avoid livelock in net_tx_action() on RT
+
+qdisc_lock is taken w/o disabling interrupts or bottom halfs. So code
+holding a qdisc_lock() can be interrupted and softirqs can run on the
+return of interrupt in !RT.
+
+The spin_trylock() in net_tx_action() makes sure, that the softirq
+does not deadlock. When the lock can't be acquired q is requeued and
+the NET_TX softirq is raised. That causes the softirq to run over and
+over.
+
+That works in mainline as do_softirq() has a retry loop limit and
+leaves the softirq processing in the interrupt return path and
+schedules ksoftirqd. The task which holds qdisc_lock cannot be
+preempted, so the lock is released and either ksoftirqd or the next
+softirq in the return from interrupt path can proceed. Though it's a
+bit strange to actually run MAX_SOFTIRQ_RESTART (10) loops before it
+decides to bail out even if it's clear in the first iteration :)
+
+On RT all softirq processing is done in a FIFO thread and we don't
+have a loop limit, so ksoftirqd preempts the lock holder forever and
+unqueues and requeues until the reset button is hit.
+
+Due to the forced threading of ksoftirqd on RT we actually cannot
+deadlock on qdisc_lock because it's a "sleeping lock". So it's safe to
+replace the spin_trylock() with a spin_lock(). When contended,
+ksoftirqd is scheduled out and the lock holder can proceed.
+
+[ tglx: Massaged changelog and code comments ]
+
+Solved-by: Thomas Gleixner <tglx at linuxtronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Tested-by: Carsten Emde <cbe at osadl.org>
+Cc: Clark Williams <williams at redhat.com>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Luis Claudio R. Goncalves <lclaudio at redhat.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ net/core/dev.c | 32 +++++++++++++++++++++++++++++++-
+ 1 file changed, 31 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1943426..c3b7e06 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3040,6 +3040,36 @@ int netif_rx_ni(struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * RT runs ksoftirqd as a real time thread and the root_lock is a
++ * "sleeping spinlock". If the trylock fails then we can go into an
++ * infinite loop when ksoftirqd preempted the task which actually
++ * holds the lock, because we requeue q and raise NET_TX softirq
++ * causing ksoftirqd to loop forever.
++ *
++ * It's safe to use spin_lock on RT here as softirqs run in thread
++ * context and cannot deadlock against the thread which is holding
++ * root_lock.
++ *
++ * On !RT the trylock might fail, but there we bail out from the
++ * softirq loop after 10 attempts which we can't do on RT. And the
++ * task holding root_lock cannot be preempted, so the only downside of
++ * that trylock is that we need 10 loops to decide that we should have
++ * given up in the first one :)
++ */
++static inline int take_root_lock(spinlock_t *lock)
++{
++ spin_lock(lock);
++ return 1;
++}
++#else
++static inline int take_root_lock(spinlock_t *lock)
++{
++ return spin_trylock(lock);
++}
++#endif
++
+ static void net_tx_action(struct softirq_action *h)
+ {
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+@@ -3078,7 +3108,7 @@ static void net_tx_action(struct softirq_action *h)
+ head = head->next_sched;
+
+ root_lock = qdisc_lock(q);
+- if (spin_trylock(root_lock)) {
++ if (take_root_lock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0238-ping-sysrq.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0238-ping-sysrq.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0238-ping-sysrq.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0238-ping-sysrq.patch.patch)
@@ -0,0 +1,129 @@
+From 6d77afa8ae71917e74649b882e553244237c0b2b Mon Sep 17 00:00:00 2001
+From: Carsten Emde <C.Emde at osadl.org>
+Date: Tue, 19 Jul 2011 13:51:17 +0100
+Subject: [PATCH 238/303] ping-sysrq.patch
+
+There are (probably rare) situations when a system crashed and the system
+console becomes unresponsive but the network icmp layer still is alive.
+Wouldn't it be wonderful, if we then could submit a sysreq command via ping?
+
+This patch provides this facility. Please consult the updated documentation
+Documentation/sysrq.txt for details.
+
+Signed-off-by: Carsten Emde <C.Emde at osadl.org>
+---
+ Documentation/sysrq.txt | 11 +++++++++--
+ include/net/netns/ipv4.h | 1 +
+ net/ipv4/icmp.c | 30 ++++++++++++++++++++++++++++++
+ net/ipv4/sysctl_net_ipv4.c | 7 +++++++
+ 4 files changed, 47 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
+index 312e375..9981f30 100644
+--- a/Documentation/sysrq.txt
++++ b/Documentation/sysrq.txt
+@@ -57,10 +57,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
+ On other - If you know of the key combos for other architectures, please
+ let me know so I can add them to this section.
+
+-On all - write a character to /proc/sysrq-trigger. e.g.:
+-
++On all - write a character to /proc/sysrq-trigger, e.g.:
+ echo t > /proc/sysrq-trigger
+
++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
++ Send an ICMP echo request with this pattern plus the particular
++ SysRq command key. Example:
++ # ping -c1 -s57 -p0102030468
++ will trigger the SysRq-H (help) command.
++
++
+ * What are the 'command' keys?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 'b' - Will immediately reboot the system without syncing or unmounting
+diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
+index d786b4f..8cef1d1 100644
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -47,6 +47,7 @@ struct netns_ipv4 {
+
+ int sysctl_icmp_echo_ignore_all;
+ int sysctl_icmp_echo_ignore_broadcasts;
++ int sysctl_icmp_echo_sysrq;
+ int sysctl_icmp_ignore_bogus_error_responses;
+ int sysctl_icmp_ratelimit;
+ int sysctl_icmp_ratemask;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index ab188ae..028eb47 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -67,6 +67,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/kernel.h>
+ #include <linux/fcntl.h>
++#include <linux/sysrq.h>
+ #include <linux/socket.h>
+ #include <linux/in.h>
+ #include <linux/inet.h>
+@@ -801,6 +802,30 @@ out_err:
+ }
+
+ /*
++ * 32bit and 64bit have different timestamp length, so we check for
++ * the cookie at offset 20 and verify it is repeated at offset 50
++ */
++#define CO_POS0 20
++#define CO_POS1 50
++#define CO_SIZE sizeof(int)
++#define ICMP_SYSRQ_SIZE 57
++
++/*
++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
++ * pattern and if it matches send the next byte as a trigger to sysrq.
++ */
++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
++{
++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
++ char *p = skb->data;
++
++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
++ handle_sysrq(p[CO_POS0 + CO_SIZE]);
++}
++
++/*
+ * Handle ICMP_ECHO ("ping") requests.
+ *
+ * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
+@@ -827,6 +852,11 @@ static void icmp_echo(struct sk_buff *skb)
+ icmp_param.data_len = skb->len;
+ icmp_param.head_len = sizeof(struct icmphdr);
+ icmp_reply(&icmp_param, skb);
++
++ if (skb->len == ICMP_SYSRQ_SIZE &&
++ net->ipv4.sysctl_icmp_echo_sysrq) {
++ icmp_check_sysrq(net, skb);
++ }
+ }
+ }
+
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 5485077..969c6ef 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -687,6 +687,13 @@ static struct ctl_table ipv4_net_table[] = {
+ .proc_handler = proc_dointvec
+ },
+ {
++ .procname = "icmp_echo_sysrq",
++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {
+ .procname = "icmp_ignore_bogus_error_responses",
+ .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
+ .maxlen = sizeof(int),
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0239-kgdb-serial-Short-term-workaround.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0239-kgdb-serial-Short-term-workaround.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0239-kgdb-serial-Short-term-workaround.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0239-kgdb-serial-Short-term-workaround.patch)
@@ -0,0 +1,116 @@
+From cea29bd0a9ec2bb5a5f5f78aba65ba51f4e16dc2 Mon Sep 17 00:00:00 2001
+From: Jason Wessel <jason.wessel at windriver.com>
+Date: Thu, 28 Jul 2011 12:42:23 -0500
+Subject: [PATCH 239/303] kgdb/serial: Short term workaround
+
+On 07/27/2011 04:37 PM, Thomas Gleixner wrote:
+> - KGDB (not yet disabled) is reportedly unusable on -rt right now due
+> to missing hacks in the console locking which I dropped on purpose.
+>
+
+To work around this in the short term you can use this patch, in
+addition to the clocksource watchdog patch that Thomas brewed up.
+
+Comments are welcome of course. Ultimately the right solution is to
+change separation between the console and the HW to have a polled mode
++ work queue so as not to introduce any kind of latency.
+
+Thanks,
+Jason.
+---
+ drivers/tty/serial/8250.c | 13 +++++++++----
+ include/linux/kdb.h | 2 ++
+ kernel/debug/kdb/kdb_io.c | 6 ++----
+ 3 files changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index 0101b2c..2b411117 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -38,6 +38,7 @@
+ #include <linux/nmi.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/kdb.h>
+
+ #include <asm/io.h>
+ #include <asm/irq.h>
+@@ -2867,10 +2868,14 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
+
+ touch_nmi_watchdog();
+
+- if (up->port.sysrq || oops_in_progress)
+- locked = spin_trylock_irqsave(&up->port.lock, flags);
+- else
+- spin_lock_irqsave(&up->port.lock, flags);
++ if (unlikely(in_kdb_printk())) {
++ locked = 0;
++ } else {
++ if (up->port.sysrq || oops_in_progress)
++ locked = spin_trylock_irqsave(&up->port.lock, flags);
++ else
++ spin_lock_irqsave(&up->port.lock, flags);
++ }
+
+ /*
+ * First save the IER then disable the interrupts
+diff --git a/include/linux/kdb.h b/include/linux/kdb.h
+index 0647258..0d1ebfc 100644
+--- a/include/linux/kdb.h
++++ b/include/linux/kdb.h
+@@ -150,12 +150,14 @@ extern int kdb_register(char *, kdb_func_t, char *, char *, short);
+ extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
+ short, kdb_repeat_t);
+ extern int kdb_unregister(char *);
++#define in_kdb_printk() (kdb_trap_printk)
+ #else /* ! CONFIG_KGDB_KDB */
+ #define kdb_printf(...)
+ #define kdb_init(x)
+ #define kdb_register(...)
+ #define kdb_register_repeat(...)
+ #define kdb_uregister(x)
++#define in_kdb_printk() (0)
+ #endif /* CONFIG_KGDB_KDB */
+ enum {
+ KDB_NOT_INITIALIZED,
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 4802eb5..5b7455f 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -553,7 +553,6 @@ int vkdb_printf(const char *fmt, va_list ap)
+ int diag;
+ int linecount;
+ int logging, saved_loglevel = 0;
+- int saved_trap_printk;
+ int got_printf_lock = 0;
+ int retlen = 0;
+ int fnd, len;
+@@ -564,8 +563,6 @@ int vkdb_printf(const char *fmt, va_list ap)
+ unsigned long uninitialized_var(flags);
+
+ preempt_disable();
+- saved_trap_printk = kdb_trap_printk;
+- kdb_trap_printk = 0;
+
+ /* Serialize kdb_printf if multiple cpus try to write at once.
+ * But if any cpu goes recursive in kdb, just print the output,
+@@ -821,7 +818,6 @@ kdb_print_out:
+ } else {
+ __release(kdb_printf_lock);
+ }
+- kdb_trap_printk = saved_trap_printk;
+ preempt_enable();
+ return retlen;
+ }
+@@ -831,9 +827,11 @@ int kdb_printf(const char *fmt, ...)
+ va_list ap;
+ int r;
+
++ kdb_trap_printk++;
+ va_start(ap, fmt);
+ r = vkdb_printf(fmt, ap);
+ va_end(ap);
++ kdb_trap_printk--;
+
+ return r;
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0240-add-sys-kernel-realtime-entry.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0240-add-sys-kernel-realtime-entry.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0240-add-sys-kernel-realtime-entry.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0240-add-sys-kernel-realtime-entry.patch)
@@ -0,0 +1,50 @@
+From 9be2fe449639927bf6665792fc1c6a86ccbcb9e5 Mon Sep 17 00:00:00 2001
+From: Clark Williams <williams at redhat.com>
+Date: Sat, 30 Jul 2011 21:55:53 -0500
+Subject: [PATCH 240/303] add /sys/kernel/realtime entry
+
+Add a /sys/kernel entry to indicate that the kernel is a
+realtime kernel.
+
+Clark says that he needs this for udev rules, udev needs to evaluate
+if its a PREEMPT_RT kernel a few thousand times and parsing uname
+output is too slow or so.
+
+Are there better solutions? Should it exist and return 0 on !-rt?
+
+Signed-off-by: Clark Williams <williams at redhat.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+---
+ kernel/ksysfs.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
+index 4e316e1..a546d33 100644
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -133,6 +133,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
+
+ #endif /* CONFIG_KEXEC */
+
++#if defined(CONFIG_PREEMPT_RT_FULL)
++static ssize_t realtime_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", 1);
++}
++KERNEL_ATTR_RO(realtime);
++#endif
++
+ /* whether file capabilities are enabled */
+ static ssize_t fscaps_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+@@ -182,6 +191,9 @@ static struct attribute * kernel_attrs[] = {
+ &kexec_crash_size_attr.attr,
+ &vmcoreinfo_attr.attr,
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ &realtime_attr.attr,
++#endif
+ NULL
+ };
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0241-mm-rt-kmap_atomic-scheduling.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0241-mm-rt-kmap_atomic-scheduling.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0241-mm-rt-kmap_atomic-scheduling.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0241-mm-rt-kmap_atomic-scheduling.patch)
@@ -0,0 +1,120 @@
+From 5171cde6815a4c1c42453fb376c772bf763512eb Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Thu, 28 Jul 2011 10:43:51 +0200
+Subject: [PATCH 241/303] mm, rt: kmap_atomic scheduling
+
+In fact, with migrate_disable() existing one could play games with
+kmap_atomic. You could save/restore the kmap_atomic slots on context
+switch (if there are any in use of course), this should be esp easy now
+that we have a kmap_atomic stack.
+
+Something like the below.. it wants replacing all the preempt_disable()
+stuff with pagefault_disable() && migrate_disable() of course, but then
+you can flip kmaps around like below.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+[dvhart at linux.intel.com: build fix]
+Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
+---
+ arch/x86/kernel/process_32.c | 36 ++++++++++++++++++++++++++++++++++++
+ include/linux/sched.h | 5 +++++
+ mm/memory.c | 2 ++
+ 3 files changed, 43 insertions(+)
+
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index ada175e..20f1573 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -39,6 +39,7 @@
+ #include <linux/io.h>
+ #include <linux/kdebug.h>
+ #include <linux/cpuidle.h>
++#include <linux/highmem.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+@@ -339,6 +340,41 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
+
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++ /*
++ * Save @prev's kmap_atomic stack
++ */
++ prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx);
++ if (unlikely(prev_p->kmap_idx)) {
++ int i;
++
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ pte_t *ptep = kmap_pte - idx;
++ prev_p->kmap_pte[i] = *ptep;
++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++ }
++
++ __this_cpu_write(__kmap_atomic_idx, 0);
++ }
++
++ /*
++ * Restore @next_p's kmap_atomic stack
++ */
++ if (unlikely(next_p->kmap_idx)) {
++ int i;
++
++ __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx);
++
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++ }
++ }
++#endif
++
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 6d920a0..1d9c8a1 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -63,6 +63,7 @@ struct sched_param {
+ #include <linux/nodemask.h>
+ #include <linux/mm_types.h>
+
++#include <asm/kmap_types.h>
+ #include <asm/system.h>
+ #include <asm/page.h>
+ #include <asm/ptrace.h>
+@@ -1607,6 +1608,10 @@ struct task_struct {
+ struct rcu_head put_rcu;
+ int softirq_nestcnt;
+ #endif
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++ int kmap_idx;
++ pte_t kmap_pte[KM_TYPE_NR];
++#endif
+ };
+
+ #ifdef CONFIG_PREEMPT_RT_FULL
+diff --git a/mm/memory.c b/mm/memory.c
+index 77288df..21304ee 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3455,6 +3455,7 @@ unlock:
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ void pagefault_disable(void)
+ {
++ migrate_disable();
+ current->pagefault_disabled++;
+ /*
+ * make sure to have issued the store before a pagefault
+@@ -3472,6 +3473,7 @@ void pagefault_enable(void)
+ */
+ barrier();
+ current->pagefault_disabled--;
++ migrate_enable();
+ }
+ EXPORT_SYMBOL_GPL(pagefault_enable);
+ #endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0242-ipc-sem-Rework-semaphore-wakeups.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0242-ipc-sem-Rework-semaphore-wakeups.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0242-ipc-sem-Rework-semaphore-wakeups.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0242-ipc-sem-Rework-semaphore-wakeups.patch)
@@ -0,0 +1,72 @@
+From 2e4b47010625b4c27c2035295fd8565bbf928622 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Date: Tue, 13 Sep 2011 15:09:40 +0200
+Subject: [PATCH 242/303] ipc/sem: Rework semaphore wakeups
+
+Current sysv sems have a weird ass wakeup scheme that involves keeping
+preemption disabled over a potential O(n^2) loop and busy waiting on
+that on other CPUs.
+
+Kill this and simply wake the task directly from under the sem_lock.
+
+This was discovered by a migrate_disable() debug feature that
+disallows:
+
+ spin_lock();
+ preempt_disable();
+ spin_unlock()
+ preempt_enable();
+
+Cc: Manfred Spraul <manfred at colorfullife.com>
+Suggested-by: Thomas Gleixner <tglx at linutronix.de>
+Reported-by: Mike Galbraith <efault at gmx.de>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Cc: Manfred Spraul <manfred at colorfullife.com>
+Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ ipc/sem.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 5215a81..5eaf684 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -461,6 +461,13 @@ undo:
+ static void wake_up_sem_queue_prepare(struct list_head *pt,
+ struct sem_queue *q, int error)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct task_struct *p = q->sleeper;
++ get_task_struct(p);
++ q->status = error;
++ wake_up_process(p);
++ put_task_struct(p);
++#else
+ if (list_empty(pt)) {
+ /*
+ * Hold preempt off so that we don't get preempted and have the
+@@ -472,6 +479,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
+ q->pid = error;
+
+ list_add_tail(&q->simple_list, pt);
++#endif
+ }
+
+ /**
+@@ -485,6 +493,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
+ */
+ static void wake_up_sem_queue_do(struct list_head *pt)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ struct sem_queue *q, *t;
+ int did_something;
+
+@@ -497,6 +506,7 @@ static void wake_up_sem_queue_do(struct list_head *pt)
+ }
+ if (did_something)
+ preempt_enable();
++#endif
+ }
+
+ static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0243-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0243-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0243-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0243-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch)
@@ -0,0 +1,166 @@
+From 2cd68f3755493613c873e8429ccacaf545f3244a Mon Sep 17 00:00:00 2001
+From: Frank Rowand <frank.rowand at am.sony.com>
+Date: Fri, 23 Sep 2011 13:43:12 -0700
+Subject: [PATCH 243/303] sysrq: Allow immediate Magic SysRq output for
+ PREEMPT_RT_FULL
+
+Add a CONFIG option to allow the output from Magic SysRq to be output
+immediately, even if this causes large latencies.
+
+If PREEMPT_RT_FULL, printk() will not try to acquire the console lock
+when interrupts or preemption are disabled. If the console lock is
+not acquired the printk() output will be buffered, but will not be
+output immediately. Some drivers call into the Magic SysRq code
+with interrupts or preemption disabled, so the output of Magic SysRq
+will be buffered instead of printing immediately if this option is
+not selected.
+
+Even with this option selected, Magic SysRq output will be delayed
+if the attempt to acquire the console lock fails.
+
+Signed-off-by: Frank Rowand <frank.rowand at am.sony.com>
+Link: http://lkml.kernel.org/r/4E7CEF60.5020508@am.sony.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+[ukleinek: make apply on top of debian/sysrq-mask.patch]
+
+ drivers/tty/serial/cpm_uart/cpm_uart_core.c | 2 +-
+ drivers/tty/sysrq.c | 23 +++++++++++++++++++++++
+ include/linux/sysrq.h | 5 +++++
+ kernel/printk.c | 5 +++--
+ lib/Kconfig.debug | 22 ++++++++++++++++++++++
+ 5 files changed, 54 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+index b418947..a8b0559 100644
+--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
++++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+@@ -1226,7 +1226,7 @@ static void cpm_uart_console_write(struct console *co, const char *s,
+ {
+ struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
+ unsigned long flags;
+- int nolock = oops_in_progress;
++ int nolock = oops_in_progress || sysrq_in_progress;
+
+ if (unlikely(nolock)) {
+ local_irq_save(flags);
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 43db715..5219738 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -492,6 +492,23 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
+ sysrq_key_table[i] = op_p;
+ }
+
++#ifdef CONFIG_MAGIC_SYSRQ_FORCE_PRINTK
++
++int sysrq_in_progress;
++
++static void set_sysrq_in_progress(int value)
++{
++ sysrq_in_progress = value;
++}
++
++#else
++
++static void set_sysrq_in_progress(int value)
++{
++}
++
++#endif
++
+ void __handle_sysrq(int key, bool check_mask)
+ {
+ struct sysrq_key_op *op_p;
+@@ -500,6 +517,9 @@ void __handle_sysrq(int key, bool check_mask)
+ unsigned long flags;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
++
++ set_sysrq_in_progress(1);
++
+ /*
+ * Raise the apparent loglevel to maximum so that the sysrq header
+ * is shown to provide the user with positive feedback. We do not
+@@ -541,6 +561,9 @@ void __handle_sysrq(int key, bool check_mask)
+ printk("\n");
+ console_loglevel = orig_log_level;
+ }
++
++ set_sysrq_in_progress(0);
++
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+ }
+
+diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
+index 7faf933..d224c0b 100644
+--- a/include/linux/sysrq.h
++++ b/include/linux/sysrq.h
+@@ -38,6 +38,11 @@ struct sysrq_key_op {
+ int enable_mask;
+ };
+
++#ifdef CONFIG_MAGIC_SYSRQ_FORCE_PRINTK
++extern int sysrq_in_progress;
++#else
++#define sysrq_in_progress 0
++#endif
+ #ifdef CONFIG_MAGIC_SYSRQ
+
+ /* Generic SysRq interface -- you may call it from any device driver, supplying
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 8f677b4..b5e609e 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -21,6 +21,7 @@
+ #include <linux/tty.h>
+ #include <linux/tty_driver.h>
+ #include <linux/console.h>
++#include <linux/sysrq.h>
+ #include <linux/init.h>
+ #include <linux/jiffies.h>
+ #include <linux/nmi.h>
+@@ -845,8 +846,8 @@ static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+ {
+ int retval = 0, wake = 0;
+ #ifdef CONFIG_PREEMPT_RT_FULL
+- int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
+- !preempt_count();
++ int lock = (!early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
++ !preempt_count()) || sysrq_in_progress;
+ #else
+ int lock = 1;
+ #endif
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index c347db3..13a937b 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -62,6 +62,28 @@ config MAGIC_SYSRQ
+ Specifies the default mask for the allowed SysRq keys. This can be
+ used to disable several sensitive keys by default.
+
++config MAGIC_SYSRQ_FORCE_PRINTK
++ bool "Force printk from Magic SysRq"
++ depends on MAGIC_SYSRQ && PREEMPT_RT_FULL
++ default n
++ help
++ Allow the output from Magic SysRq to be output immediately, even if
++ this causes large latencies. This can cause performance problems
++ for real-time processes.
++
++ If PREEMPT_RT_FULL, printk() will not try to acquire the console lock
++ when interrupts or preemption are disabled. If the console lock is
++ not acquired the printk() output will be buffered, but will not be
++ output immediately. Some drivers call into the Magic SysRq code
++ with interrupts or preemption disabled, so the output of Magic SysRq
++ will be buffered instead of printing immediately if this option is
++ not selected.
++
++ Even with this option selected, Magic SysRq output will be delayed
++ if the attempt to acquire the console lock fails.
++
++ Don't say Y unless you really know what this hack does.
++
+ config STRIP_ASM_SYMS
+ bool "Strip assembler-generated symbols during link"
+ default n
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0244-x86-kvm-require-const-tsc-for-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0244-x86-kvm-require-const-tsc-for-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0244-x86-kvm-require-const-tsc-for-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0244-x86-kvm-require-const-tsc-for-rt.patch.patch)
@@ -0,0 +1,28 @@
+From 4abc365bdb577a08114d6fb36655678cf9c12e02 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 6 Nov 2011 12:26:18 +0100
+Subject: [PATCH 244/303] x86-kvm-require-const-tsc-for-rt.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/kvm/x86.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f4063fd..97f1819 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5213,6 +5213,13 @@ int kvm_arch_init(void *opaque)
+ goto out;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
++ return -EOPNOTSUPP;
++ }
++#endif
++
+ r = kvm_mmu_module_init();
+ if (r)
+ goto out;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0245-scsi-fcoe-rt-aware.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0245-scsi-fcoe-rt-aware.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0245-scsi-fcoe-rt-aware.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0245-scsi-fcoe-rt-aware.patch.patch)
@@ -0,0 +1,113 @@
+From e2463bfee94d35cf0ce0b9af25fe93909cae6066 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sat, 12 Nov 2011 14:00:48 +0100
+Subject: [PATCH 245/303] scsi-fcoe-rt-aware.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/scsi/fcoe/fcoe.c | 16 ++++++++--------
+ drivers/scsi/fcoe/fcoe_ctlr.c | 4 ++--
+ drivers/scsi/libfc/fc_exch.c | 4 ++--
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index 8d67467..4085187 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -1156,7 +1156,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
+ struct sk_buff *skb;
+ #ifdef CONFIG_SMP
+ struct fcoe_percpu_s *p0;
+- unsigned targ_cpu = get_cpu();
++ unsigned targ_cpu = get_cpu_light();
+ #endif /* CONFIG_SMP */
+
+ FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
+@@ -1212,7 +1212,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
+ kfree_skb(skb);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ }
+- put_cpu();
++ put_cpu_light();
+ #else
+ /*
+ * This a non-SMP scenario where the singular Rx thread is
+@@ -1435,11 +1435,11 @@ err2:
+ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
+ {
+ struct fcoe_percpu_s *fps;
+- int rc;
++ int rc, cpu = get_cpu_light();
+
+- fps = &get_cpu_var(fcoe_percpu);
++ fps = &per_cpu(fcoe_percpu, cpu);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
+- put_cpu_var(fcoe_percpu);
++ put_cpu_light();
+
+ return rc;
+ }
+@@ -1680,7 +1680,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
+ */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+
+- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
++ stats = per_cpu_ptr(lport->dev_stats, get_cpu_light());
+ if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ if (stats->ErrorFrames < 5)
+ printk(KERN_WARNING "fcoe: FCoE version "
+@@ -1712,13 +1712,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
+ goto drop;
+
+ if (!fcoe_filter_frames(lport, fp)) {
+- put_cpu();
++ put_cpu_light();
+ fc_exch_recv(lport, fp);
+ return;
+ }
+ drop:
+ stats->ErrorFrames++;
+- put_cpu();
++ put_cpu_light();
+ kfree_skb(skb);
+ }
+
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index e7522dc..bfb83c0 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -719,7 +719,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
+ unsigned long sel_time = 0;
+ struct fcoe_dev_stats *stats;
+
+- stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
++ stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu_light());
+
+ list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+ deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
+@@ -752,7 +752,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
+ sel_time = fcf->time;
+ }
+ }
+- put_cpu();
++ put_cpu_light();
+ if (sel_time && !fip->sel_fcf && !fip->sel_time) {
+ sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+ fip->sel_time = sel_time;
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 9de9db2..340998f 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -724,10 +724,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+ }
+ memset(ep, 0, sizeof(*ep));
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ pool = per_cpu_ptr(mp->pool, cpu);
+ spin_lock_bh(&pool->lock);
+- put_cpu();
++ put_cpu_light();
+
+ /* peek cache of free slot */
+ if (pool->left != FC_XID_UNKNOWN) {
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0246-x86-crypto-Reduce-preempt-disabled-regions.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0246-x86-crypto-Reduce-preempt-disabled-regions.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0246-x86-crypto-Reduce-preempt-disabled-regions.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0246-x86-crypto-Reduce-preempt-disabled-regions.patch)
@@ -0,0 +1,115 @@
+From 4ac93b76906a9e31c113bbda794d217a10153208 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Mon, 14 Nov 2011 18:19:27 +0100
+Subject: [PATCH 246/303] x86: crypto: Reduce preempt disabled regions
+
+Restrict the preempt disabled regions to the actual floating point
+operations and enable preemption for the administrative actions.
+
+This is necessary on RT to avoid that kfree and other operations are
+called with preemption disabled.
+
+Reported-and-tested-by: Carsten Emde <cbe at osadl.org>
+Signed-off-by: Peter Zijlstra <peterz at infradead.org>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index 545d0ce..0c9eaf1 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -289,14 +289,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+- nbytes & AES_BLOCK_MASK);
++ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -313,14 +313,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -359,14 +359,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -383,14 +383,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -445,18 +445,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
++ kernel_fpu_begin();
+ aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+ if (walk.nbytes) {
++ kernel_fpu_begin();
+ ctr_crypt_final(ctx, &walk);
++ kernel_fpu_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0247-dm-Make-rt-aware.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0247-dm-Make-rt-aware.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0247-dm-Make-rt-aware.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0247-dm-Make-rt-aware.patch)
@@ -0,0 +1,37 @@
+From fbb904c35472f2ea543bb1896b5f394058036367 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 14 Nov 2011 23:06:09 +0100
+Subject: [PATCH 247/303] dm: Make rt aware
+
+Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
+interrupts legitimately enabled here as we cant deadlock against the
+irq thread due to the "sleeping spinlocks" conversion.
+
+Reported-by: Luis Claudio R. Goncalves <lclaudio at uudg.org>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/md/dm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 8953630..2dc648b2 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1676,14 +1676,14 @@ static void dm_request_fn(struct request_queue *q)
+ if (map_request(ti, clone, md))
+ goto requeued;
+
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ spin_lock(q->queue_lock);
+ }
+
+ goto out;
+
+ requeued:
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ spin_lock(q->queue_lock);
+
+ delay_and_out:
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0248-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0248-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0248-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0248-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch)
@@ -0,0 +1,39 @@
+From cb3251ddb574c2197bcde20a85b68b829a8c7b11 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 14 Dec 2011 01:03:49 +0100
+Subject: [PATCH 248/303] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
+
+We can't deal with the cpumask allocations which happen in atomic
+context (see arch/x86/kernel/apic/io_apic.c) on RT right now.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/Kconfig | 2 +-
+ lib/Kconfig | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 0941e6b..90876a6 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -730,7 +730,7 @@ config IOMMU_HELPER
+ config MAXSMP
+ bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+ depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
+- select CPUMASK_OFFSTACK
++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
+ ---help---
+ Enable maximum number of CPUS and NUMA Nodes for this architecture.
+ If unsure, say N.
+diff --git a/lib/Kconfig b/lib/Kconfig
+index 32f3e5a..63d81e8 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -231,6 +231,7 @@ config CHECK_SIGNATURE
+
+ config CPUMASK_OFFSTACK
+ bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
++ depends on !PREEMPT_RT_FULL
+ help
+ Use dynamic allocation for cpumask_var_t, instead of putting
+ them on the stack. This is a bit more expensive, but avoids
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0249-seqlock-Prevent-rt-starvation.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0249-seqlock-Prevent-rt-starvation.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0249-seqlock-Prevent-rt-starvation.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0249-seqlock-Prevent-rt-starvation.patch)
@@ -0,0 +1,168 @@
+From 273127f770bc92da2b82576ba1b969f495fb5c61 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 22 Feb 2012 12:03:30 +0100
+Subject: [PATCH 249/303] seqlock: Prevent rt starvation
+
+If a low prio writer gets preempted while holding the seqlock write
+locked, a high prio reader spins forever on RT.
+
+To prevent this let the reader grab the spinlock, so it blocks and
+eventually boosts the writer. This way the writer can proceed and
+endless spinning is prevented.
+
+For seqcount writers we disable preemption over the update code
+path. Thaanks to Al Viro for distangling some VFS code to make that
+possible.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ include/linux/seqlock.h | 55 ++++++++++++++++++++++++++++++++++++++---------
+ include/net/neighbour.h | 2 +-
+ 2 files changed, 46 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index cc7b65d..4ea1409 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -125,18 +125,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+ * Sequence counter only version assumes that callers are using their
+ * own mutexing.
+ */
+-static inline void write_seqcount_begin(seqcount_t *s)
++static inline void __write_seqcount_begin(seqcount_t *s)
+ {
+ s->sequence++;
+ smp_wmb();
+ }
+
+-static inline void write_seqcount_end(seqcount_t *s)
++static inline void write_seqcount_begin(seqcount_t *s)
++{
++ preempt_disable_rt();
++ __write_seqcount_begin(s);
++}
++
++static inline void __write_seqcount_end(seqcount_t *s)
+ {
+ smp_wmb();
+ s->sequence++;
+ }
+
++static inline void write_seqcount_end(seqcount_t *s)
++{
++ __write_seqcount_end(s);
++ preempt_enable_rt();
++}
++
+ /**
+ * write_seqcount_barrier - invalidate in-progress read-side seq operations
+ * @s: pointer to seqcount_t
+@@ -177,10 +189,33 @@ typedef struct {
+ /*
+ * Read side functions for starting and finalizing a read side section.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline unsigned read_seqbegin(const seqlock_t *sl)
+ {
+ return read_seqcount_begin(&sl->seqcount);
+ }
++#else
++/*
++ * Starvation safe read side for RT
++ */
++static inline unsigned read_seqbegin(seqlock_t *sl)
++{
++ unsigned ret;
++
++repeat:
++ ret = sl->seqcount.sequence;
++ if (unlikely(ret & 1)) {
++ /*
++ * Take the lock and let the writer proceed (i.e. evtl
++ * boost it), otherwise we could loop here forever.
++ */
++ spin_lock(&sl->lock);
++ spin_unlock(&sl->lock);
++ goto repeat;
++ }
++ return ret;
++}
++#endif
+
+ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ {
+@@ -195,36 +230,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ static inline void write_seqlock(seqlock_t *sl)
+ {
+ spin_lock(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __write_seqcount_end(&sl->seqcount);
+ spin_unlock(&sl->lock);
+ }
+
+ static inline void write_seqlock_bh(seqlock_t *sl)
+ {
+ spin_lock_bh(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock_bh(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __write_seqcount_end(&sl->seqcount);
+ spin_unlock_bh(&sl->lock);
+ }
+
+ static inline void write_seqlock_irq(seqlock_t *sl)
+ {
+ spin_lock_irq(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock_irq(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __write_seqcount_end(&sl->seqcount);
+ spin_unlock_irq(&sl->lock);
+ }
+
+@@ -233,7 +268,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+- write_seqcount_begin(&sl->seqcount);
++ __write_seqcount_begin(&sl->seqcount);
+ return flags;
+ }
+
+@@ -243,7 +278,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ static inline void
+ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __write_seqcount_end(&sl->seqcount);
+ spin_unlock_irqrestore(&sl->lock, flags);
+ }
+
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 2720884..6fda9fa 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -385,7 +385,7 @@ struct neighbour_cb {
+
+ #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
+
+-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
+ const struct net_device *dev)
+ {
+ unsigned int seq;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0250-timer-Fix-hotplug-for-rt.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0250-timer-Fix-hotplug-for-rt.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0250-timer-Fix-hotplug-for-rt.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0250-timer-Fix-hotplug-for-rt.patch)
@@ -0,0 +1,73 @@
+From 805be67d8daa007a8360d13d0263c297ea143441 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 1 Mar 2012 13:55:28 -0500
+Subject: [PATCH 250/303] timer: Fix hotplug for -rt
+
+Revert the RT patch:
+ Author: Ingo Molnar <mingo at elte.hu>
+ Date: Fri Jul 3 08:30:32 2009 -0500
+ timers: fix timer hotplug on -rt
+
+ Here we are in the CPU_DEAD notifier, and we must not sleep nor
+ enable interrupts.
+
+There's no problem with sleeping in this notifier.
+
+But the get_cpu_var() had to be converted to a get_local_var().
+
+Replace the previous fix with the get_local_var() convert.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190344.948157137@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/timer.c | 16 +++++-----------
+ 1 file changed, 5 insertions(+), 11 deletions(-)
+
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 985772e..05d9be9 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1745,21 +1745,17 @@ static void __cpuinit migrate_timers(int cpu)
+ {
+ struct tvec_base *old_base;
+ struct tvec_base *new_base;
+- unsigned long flags;
+ int i;
+
+ BUG_ON(cpu_online(cpu));
+ old_base = per_cpu(tvec_bases, cpu);
+- new_base = get_cpu_var(tvec_bases);
++ new_base = get_local_var(tvec_bases);
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+ */
+- local_irq_save(flags);
+- while (!spin_trylock(&new_base->lock))
+- cpu_relax();
+- while (!spin_trylock(&old_base->lock))
+- cpu_relax();
++ spin_lock_irq(&new_base->lock);
++ spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+ BUG_ON(old_base->running_timer);
+
+@@ -1773,10 +1769,8 @@ static void __cpuinit migrate_timers(int cpu)
+ }
+
+ spin_unlock(&old_base->lock);
+- spin_unlock(&new_base->lock);
+- local_irq_restore(flags);
+-
+- put_cpu_var(tvec_bases);
++ spin_unlock_irq(&new_base->lock);
++ put_local_var(tvec_bases);
+ }
+ #endif /* CONFIG_HOTPLUG_CPU */
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0251-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0251-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0251-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0251-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch)
@@ -0,0 +1,45 @@
+From 2d8a88cb1195bbf514e617962b5535bf2c32c6ba Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 1 Mar 2012 13:55:29 -0500
+Subject: [PATCH 251/303] futex/rt: Fix possible lockup when taking pi_lock in
+ proxy handler
+
+When taking the pi_lock, we must disable interrupts because the
+pi_lock can also be taken in an interrupt handler.
+
+Use raw_spin_lock_irq() instead of raw_spin_lock().
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190345.165160680@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/rtmutex.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index 9850dc0..b525158 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -1373,14 +1373,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
+ * it will know that we are in the process of requeuing it.
+ */
+- raw_spin_lock(&task->pi_lock);
++ raw_spin_lock_irq(&task->pi_lock);
+ if (task->pi_blocked_on) {
+- raw_spin_unlock(&task->pi_lock);
++ raw_spin_unlock_irq(&task->pi_lock);
+ raw_spin_unlock(&lock->wait_lock);
+ return -EAGAIN;
+ }
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
+- raw_spin_unlock(&task->pi_lock);
++ raw_spin_unlock_irq(&task->pi_lock);
+ #endif
+
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0252-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0252-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0252-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0252-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch)
@@ -0,0 +1,35 @@
+From f0c9f58b58d1a5d1fb5f9b478d00d15a1eec80d3 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 1 Mar 2012 13:55:32 -0500
+Subject: [PATCH 252/303] ring-buffer/rt: Check for irqs disabled before
+ grabbing reader lock
+
+In RT the reader lock is a mutex and we can not grab it when preemption is
+disabled. The in_atomic() check that is there does not check if irqs are
+disabled. Add that check as well.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190345.786365803@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/trace/ring_buffer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 70112f3..aa89695 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1054,7 +1054,7 @@ static inline int ok_to_lock(void)
+ if (in_nmi())
+ return 0;
+ #ifdef CONFIG_PREEMPT_RT_FULL
+- if (in_atomic())
++ if (in_atomic() || irqs_disabled())
+ return 0;
+ #endif
+ return 1;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0253-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0253-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0253-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0253-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch)
@@ -0,0 +1,50 @@
+From c0aa6a62f84c536b0552ff992559e7a89c42067c Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 1 Mar 2012 13:55:33 -0500
+Subject: [PATCH 253/303] sched/rt: Fix wait_task_interactive() to test
+ rt_spin_lock state
+
+The wait_task_interactive() will have a task sleep waiting for another
+task to have a certain state. But it ignores the rt_spin_locks state
+and can return with an incorrect result if the task it is waiting
+for is blocked on a rt_spin_lock() and is waking up.
+
+The rt_spin_locks save the tasks state in the saved_state field
+and the wait_task_interactive() must also test that state.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190345.979435764@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 364a863..bba3c83 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2446,7 +2446,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ * is actually now running somewhere else!
+ */
+ while (task_running(rq, p)) {
+- if (match_state && unlikely(p->state != match_state))
++ if (match_state && unlikely(p->state != match_state)
++ && unlikely(p->saved_state != match_state))
+ return 0;
+ cpu_relax();
+ }
+@@ -2461,7 +2462,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ running = task_running(rq, p);
+ on_rq = p->on_rq;
+ ncsw = 0;
+- if (!match_state || p->state == match_state)
++ if (!match_state || p->state == match_state
++ || p->saved_state == match_state)
+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+ task_rq_unlock(rq, p, &flags);
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0254-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0254-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0254-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0254-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch)
@@ -0,0 +1,109 @@
+From 0b6ebd86816aea30e7b4ca813805388935c7108d Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 1 Mar 2012 13:55:30 -0500
+Subject: [PATCH 254/303] lglock/rt: Use non-rt for_each_cpu() in -rt code
+
+Currently the RT version of the lglocks() does a for_each_online_cpu()
+in the name##_global_lock_online() functions. Non-rt uses its own
+mask for this, and for good reason.
+
+A task may grab a *_global_lock_online(), and in the mean time, one
+of the CPUs goes offline. Now when that task does a *_global_unlock_online()
+it releases all the locks *except* the one that went offline.
+
+Now if that CPU were to come back on line, its lock is now owned by a
+task that never released it when it should have.
+
+This causes all sorts of fun errors. Like owners of a lock no longer
+existing, or sleeping on IO, waiting to be woken up by a task that
+happens to be blocked on the lock it never released.
+
+Convert the RT versions to use the lglock specific cpumasks. As once
+a CPU comes on line, the mask is set, and never cleared even when the
+CPU goes offline. The locks for that CPU will still be taken and released.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190345.374756214@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/lglock.h | 35 ++++++++++++++++++++++++++++++++---
+ 1 file changed, 32 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/lglock.h b/include/linux/lglock.h
+index 52b289f..cdfcef3 100644
+--- a/include/linux/lglock.h
++++ b/include/linux/lglock.h
+@@ -203,9 +203,31 @@
+ #else /* !PREEMPT_RT_FULL */
+ #define DEFINE_LGLOCK(name) \
+ \
+- DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
++ DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
++ DEFINE_SPINLOCK(name##_cpu_lock); \
++ cpumask_t name##_cpus __read_mostly; \
+ DEFINE_LGLOCK_LOCKDEP(name); \
+ \
++ static int \
++ name##_lg_cpu_callback(struct notifier_block *nb, \
++ unsigned long action, void *hcpu) \
++ { \
++ switch (action & ~CPU_TASKS_FROZEN) { \
++ case CPU_UP_PREPARE: \
++ spin_lock(&name##_cpu_lock); \
++ cpu_set((unsigned long)hcpu, name##_cpus); \
++ spin_unlock(&name##_cpu_lock); \
++ break; \
++ case CPU_UP_CANCELED: case CPU_DEAD: \
++ spin_lock(&name##_cpu_lock); \
++ cpu_clear((unsigned long)hcpu, name##_cpus); \
++ spin_unlock(&name##_cpu_lock); \
++ } \
++ return NOTIFY_OK; \
++ } \
++ static struct notifier_block name##_lg_cpu_notifier = { \
++ .notifier_call = name##_lg_cpu_callback, \
++ }; \
+ void name##_lock_init(void) { \
+ int i; \
+ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+@@ -214,6 +236,11 @@
+ lock = &per_cpu(name##_lock, i); \
+ rt_mutex_init(lock); \
+ } \
++ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
++ get_online_cpus(); \
++ for_each_online_cpu(i) \
++ cpu_set(i, name##_cpus); \
++ put_online_cpus(); \
+ } \
+ EXPORT_SYMBOL(name##_lock_init); \
+ \
+@@ -254,7 +281,8 @@
+ void name##_global_lock_online(void) { \
+ int i; \
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
+- for_each_online_cpu(i) { \
++ spin_lock(&name##_cpu_lock); \
++ for_each_cpu(i, &name##_cpus) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_lock(lock); \
+@@ -265,11 +293,12 @@
+ void name##_global_unlock_online(void) { \
+ int i; \
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
+- for_each_online_cpu(i) { \
++ for_each_cpu(i, &name##_cpus) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_unlock(lock); \
+ } \
++ spin_unlock(&name##_cpu_lock); \
+ } \
+ EXPORT_SYMBOL(name##_global_unlock_online); \
+ \
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0255-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0255-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0255-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0255-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch)
@@ -0,0 +1,124 @@
+From f53ddfa177ee15de50e95d145c6c6f8aff2f14f3 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Fri, 2 Mar 2012 10:36:57 -0500
+Subject: [PATCH 255/303] cpu: Make hotplug.lock a "sleeping" spinlock on RT
+
+Tasks can block on hotplug.lock in pin_current_cpu(), but their state
+might be != RUNNING. So the mutex wakeup will set the state
+unconditionally to RUNNING. That might cause spurious unexpected
+wakeups. We could provide a state preserving mutex_lock() function,
+but this is semantically backwards. So instead we convert the
+hotplug.lock() to a spinlock for RT, which has the state preserving
+semantics already.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/cpu.c | 35 ++++++++++++++++++++++++++---------
+ 1 file changed, 26 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index fa40834..66dfb74 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -46,7 +46,12 @@ static int cpu_hotplug_disabled;
+
+ static struct {
+ struct task_struct *active_writer;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /* Makes the lock keep the task's state */
++ spinlock_t lock;
++#else
+ struct mutex lock; /* Synchronizes accesses to refcount, */
++#endif
+ /*
+ * Also blocks the new readers during
+ * an ongoing cpu hotplug operation.
+@@ -54,10 +59,22 @@ static struct {
+ int refcount;
+ } cpu_hotplug = {
+ .active_writer = NULL,
++#ifdef CONFIG_PREEMPT_RT_FULL
++ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
++#else
+ .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
++#endif
+ .refcount = 0,
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
++# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
++#else
++# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
++# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
++#endif
++
+ struct hotplug_pcp {
+ struct task_struct *unplug;
+ int refcount;
+@@ -87,8 +104,8 @@ retry:
+ return;
+ }
+ preempt_enable();
+- mutex_lock(&cpu_hotplug.lock);
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_lock();
++ hotplug_unlock();
+ preempt_disable();
+ goto retry;
+ }
+@@ -161,9 +178,9 @@ void get_online_cpus(void)
+ might_sleep();
+ if (cpu_hotplug.active_writer == current)
+ return;
+- mutex_lock(&cpu_hotplug.lock);
++ hotplug_lock();
+ cpu_hotplug.refcount++;
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+
+ }
+ EXPORT_SYMBOL_GPL(get_online_cpus);
+@@ -172,10 +189,10 @@ void put_online_cpus(void)
+ {
+ if (cpu_hotplug.active_writer == current)
+ return;
+- mutex_lock(&cpu_hotplug.lock);
++ hotplug_lock();
+ if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
+ wake_up_process(cpu_hotplug.active_writer);
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+
+ }
+ EXPORT_SYMBOL_GPL(put_online_cpus);
+@@ -207,11 +224,11 @@ static void cpu_hotplug_begin(void)
+ cpu_hotplug.active_writer = current;
+
+ for (;;) {
+- mutex_lock(&cpu_hotplug.lock);
++ hotplug_lock();
+ if (likely(!cpu_hotplug.refcount))
+ break;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+ schedule();
+ }
+ }
+@@ -219,7 +236,7 @@ static void cpu_hotplug_begin(void)
+ static void cpu_hotplug_done(void)
+ {
+ cpu_hotplug.active_writer = NULL;
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+ }
+
+ #else /* #if CONFIG_HOTPLUG_CPU */
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0256-softirq-Check-preemption-after-reenabling-interrupts.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0256-softirq-Check-preemption-after-reenabling-interrupts.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0256-softirq-Check-preemption-after-reenabling-interrupts.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0256-softirq-Check-preemption-after-reenabling-interrupts.patch)
@@ -0,0 +1,155 @@
+From 2d93e9278c2e9b30fcf36ced75ca315bf2cf5128 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 13 Nov 2011 17:17:09 +0100
+Subject: [PATCH 256/303] softirq: Check preemption after reenabling
+ interrupts
+
+raise_softirq_irqoff() disables interrupts and wakes the softirq
+daemon, but after reenabling interrupts there is no preemption check,
+so the execution of the softirq thread might be delayed arbitrarily.
+
+In principle we could add that check to local_irq_enable/restore, but
+that's overkill as the rasie_softirq_irqoff() sections are the only
+ones which show this behaviour.
+
+Reported-by: Carsten Emde <cbe at osadl.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ block/blk-iopoll.c | 3 +++
+ block/blk-softirq.c | 3 +++
+ include/linux/preempt.h | 3 +++
+ net/core/dev.c | 6 ++++++
+ 4 files changed, 15 insertions(+)
+
+diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
+index 58916af..f7ca9b4 100644
+--- a/block/blk-iopoll.c
++++ b/block/blk-iopoll.c
+@@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll *iop)
+ list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(blk_iopoll_sched);
+
+@@ -135,6 +136,7 @@ static void blk_iopoll_softirq(struct softirq_action *h)
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ /**
+@@ -204,6 +206,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
+ &__get_cpu_var(blk_cpu_iopoll));
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ return NOTIFY_OK;
+diff --git a/block/blk-softirq.c b/block/blk-softirq.c
+index 1366a89..60a88ab 100644
+--- a/block/blk-softirq.c
++++ b/block/blk-softirq.c
+@@ -50,6 +50,7 @@ static void trigger_softirq(void *data)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /*
+@@ -92,6 +93,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+ &__get_cpu_var(blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ return NOTIFY_OK;
+@@ -150,6 +152,7 @@ do_local:
+ goto do_local;
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /**
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 6450c01..58d8982 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -56,8 +56,10 @@ do { \
+
+ #ifndef CONFIG_PREEMPT_RT_BASE
+ # define preempt_enable_no_resched() __preempt_enable_no_resched()
++# define preempt_check_resched_rt() do { } while (0)
+ #else
+ # define preempt_enable_no_resched() preempt_enable()
++# define preempt_check_resched_rt() preempt_check_resched()
+ #endif
+
+ #define preempt_enable() \
+@@ -105,6 +107,7 @@ do { \
+ #define preempt_disable_notrace() do { } while (0)
+ #define preempt_enable_no_resched_notrace() do { } while (0)
+ #define preempt_enable_notrace() do { } while (0)
++#define preempt_check_resched_rt() do { } while (0)
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index c3b7e06..70198ee 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1819,6 +1819,7 @@ static inline void __netif_reschedule(struct Qdisc *q)
+ sd->output_queue_tailp = &q->next_sched;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ void __netif_schedule(struct Qdisc *q)
+@@ -1840,6 +1841,7 @@ void dev_kfree_skb_irq(struct sk_buff *skb)
+ sd->completion_queue = skb;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ }
+ EXPORT_SYMBOL(dev_kfree_skb_irq);
+@@ -2965,6 +2967,7 @@ enqueue:
+ rps_unlock(sd);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+
+ atomic_long_inc(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+@@ -3789,6 +3792,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+ } else
+ #endif
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ static int process_backlog(struct napi_struct *napi, int quota)
+@@ -3861,6 +3865,7 @@ void __napi_schedule(struct napi_struct *n)
+ local_irq_save(flags);
+ ____napi_schedule(&__get_cpu_var(softnet_data), n);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__napi_schedule);
+
+@@ -6372,6 +6377,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
+
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Process offline CPU's input_pkt_queue */
+ while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0257-rt-Introduce-cpu_chill.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0257-rt-Introduce-cpu_chill.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0257-rt-Introduce-cpu_chill.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0257-rt-Introduce-cpu_chill.patch)
@@ -0,0 +1,31 @@
+From 96cb0bdc6d630c37b315fbd9ee84925ea1b2252b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 7 Mar 2012 20:51:03 +0100
+Subject: [PATCH 257/303] rt: Introduce cpu_chill()
+
+Retry loops on RT might loop forever when the modifying side was
+preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
+defaults to cpu_relax() for non RT. On RT it puts the looping task to
+sleep for a tick so the preempted task can make progress.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ include/linux/delay.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/include/linux/delay.h b/include/linux/delay.h
+index a6ecb34..e23a7c0 100644
+--- a/include/linux/delay.h
++++ b/include/linux/delay.h
+@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int seconds)
+ msleep(seconds * 1000);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define cpu_chill() msleep(1)
++#else
++# define cpu_chill() cpu_relax()
++#endif
++
+ #endif /* defined(_LINUX_DELAY_H) */
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0258-fs-dcache-Use-cpu_chill-in-trylock-loops.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0258-fs-dcache-Use-cpu_chill-in-trylock-loops.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0258-fs-dcache-Use-cpu_chill-in-trylock-loops.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0258-fs-dcache-Use-cpu_chill-in-trylock-loops.patch)
@@ -0,0 +1,103 @@
+From 1cc0713df9990ea24af2338e09bfe9f5839fdee5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 7 Mar 2012 21:00:34 +0100
+Subject: [PATCH 258/303] fs: dcache: Use cpu_chill() in trylock loops
+
+Retry loops on RT might loop forever when the modifying side was
+preempted. Use cpu_chill() instead of cpu_relax() to let the system
+make progress.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ fs/autofs4/autofs_i.h | 1 +
+ fs/autofs4/expire.c | 2 +-
+ fs/dcache.c | 7 ++++---
+ fs/namespace.c | 3 ++-
+ 4 files changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
+index 650d520..fb77639 100644
+--- a/fs/autofs4/autofs_i.h
++++ b/fs/autofs4/autofs_i.h
+@@ -34,6 +34,7 @@
+ #include <linux/sched.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
++#include <linux/delay.h>
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index 450f529..43afbb0 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -170,7 +170,7 @@ again:
+ parent = p->d_parent;
+ if (!spin_trylock(&parent->d_lock)) {
+ spin_unlock(&p->d_lock);
+- cpu_relax();
++ cpu_chill();
+ goto relock;
+ }
+ spin_unlock(&p->d_lock);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index bb7f4cc..0840534 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -37,6 +37,7 @@
+ #include <linux/rculist_bl.h>
+ #include <linux/prefetch.h>
+ #include <linux/ratelimit.h>
++#include <linux/delay.h>
+ #include "internal.h"
+
+ /*
+@@ -410,7 +411,7 @@ static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
+ if (inode && !spin_trylock(&inode->i_lock)) {
+ relock:
+ spin_unlock(&dentry->d_lock);
+- cpu_relax();
++ cpu_chill();
+ return dentry; /* try again with same dentry */
+ }
+ if (IS_ROOT(dentry))
+@@ -796,7 +797,7 @@ relock:
+
+ if (!spin_trylock(&dentry->d_lock)) {
+ spin_unlock(&dcache_lru_lock);
+- cpu_relax();
++ cpu_chill();
+ goto relock;
+ }
+
+@@ -1979,7 +1980,7 @@ again:
+ if (dentry->d_count == 1) {
+ if (inode && !spin_trylock(&inode->i_lock)) {
+ spin_unlock(&dentry->d_lock);
+- cpu_relax();
++ cpu_chill();
+ goto again;
+ }
+ dentry->d_flags &= ~DCACHE_CANT_MOUNT;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 644dbde..c45da77 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -31,6 +31,7 @@
+ #include <linux/idr.h>
+ #include <linux/fs_struct.h>
+ #include <linux/fsnotify.h>
++#include <linux/delay.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+ #include "pnode.h"
+@@ -346,7 +347,7 @@ int mnt_want_write(struct vfsmount *mnt)
+ */
+ while (mnt->mnt_flags & MNT_WRITE_HOLD) {
+ preempt_enable();
+- cpu_relax();
++ cpu_chill();
+ preempt_disable();
+ }
+ /*
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0259-net-Use-cpu_chill-instead-of-cpu_relax.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0259-net-Use-cpu_chill-instead-of-cpu_relax.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0259-net-Use-cpu_chill-instead-of-cpu_relax.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0259-net-Use-cpu_chill-instead-of-cpu_relax.patch)
@@ -0,0 +1,67 @@
+From 0de94f9ae162ee101078b86f0ad803ddf589c349 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 7 Mar 2012 21:10:04 +0100
+Subject: [PATCH 259/303] net: Use cpu_chill() instead of cpu_relax()
+
+Retry loops on RT might loop forever when the modifying side was
+preempted. Use cpu_chill() instead of cpu_relax() to let the system
+make progress.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ net/packet/af_packet.c | 5 +++--
+ net/rds/ib_rdma.c | 3 ++-
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 835fcea..0b3c649 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -89,6 +89,7 @@
+ #include <linux/virtio_net.h>
+ #include <linux/errqueue.h>
+ #include <linux/net_tstamp.h>
++#include <linux/delay.h>
+
+ #ifdef CONFIG_INET
+ #include <net/inet_common.h>
+@@ -673,7 +674,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
+ if (BLOCK_NUM_PKTS(pbd)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+
+@@ -928,7 +929,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
+ if (!(status & TP_STATUS_BLK_TMO)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+ prb_close_block(pkc, pbd, po, status);
+diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
+index e8fdb17..5a44c6e 100644
+--- a/net/rds/ib_rdma.c
++++ b/net/rds/ib_rdma.c
+@@ -34,6 +34,7 @@
+ #include <linux/slab.h>
+ #include <linux/rculist.h>
+ #include <linux/llist.h>
++#include <linux/delay.h>
+
+ #include "rds.h"
+ #include "ib.h"
+@@ -286,7 +287,7 @@ static inline void wait_clean_list_grace(void)
+ for_each_online_cpu(cpu) {
+ flag = &per_cpu(clean_list_grace, cpu);
+ while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0260-kconfig-disable-a-few-options-rt.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0260-kconfig-disable-a-few-options-rt.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0260-kconfig-disable-a-few-options-rt.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0260-kconfig-disable-a-few-options-rt.patch.patch)
@@ -0,0 +1,51 @@
+From 34141278ac902a887488a9f90bcbe16e1fc106b8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 24 Jul 2011 12:11:43 +0200
+Subject: [PATCH 260/303] kconfig-disable-a-few-options-rt.patch
+
+Disable stuff which is known to have issues on RT
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/Kconfig | 1 +
+ drivers/net/Kconfig | 1 +
+ mm/Kconfig | 2 +-
+ 3 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 4b0669c..73fc7cd 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -6,6 +6,7 @@ config OPROFILE
+ tristate "OProfile system profiling"
+ depends on PROFILING
+ depends on HAVE_OPROFILE
++ depends on !PREEMPT_RT_FULL
+ select RING_BUFFER
+ select RING_BUFFER_ALLOW_SWAP
+ help
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index 654a5e9..0f5149d 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -154,6 +154,7 @@ config MACVTAP
+
+ config NETCONSOLE
+ tristate "Network console logging support"
++ depends on !PREEMPT_RT_FULL
+ ---help---
+ If you want to log kernel messages over the network, enable this.
+ See <file:Documentation/networking/netconsole.txt> for details.
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 011b110..0526445 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -307,7 +307,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+
+ config TRANSPARENT_HUGEPAGE
+ bool "Transparent Hugepage Support"
+- depends on X86 && MMU
++ depends on X86 && MMU && !PREEMPT_RT_FULL
+ select COMPACTION
+ help
+ Transparent Hugepages allows the kernel to use huge pages and
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0261-kconfig-preempt-rt-full.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0261-kconfig-preempt-rt-full.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0261-kconfig-preempt-rt-full.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0261-kconfig-preempt-rt-full.patch.patch)
@@ -0,0 +1,62 @@
+From 9887c53af1891f2618e570a093260a2d9eafce14 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 29 Jun 2011 14:58:57 +0200
+Subject: [PATCH 261/303] kconfig-preempt-rt-full.patch
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ init/Makefile | 2 +-
+ kernel/Kconfig.preempt | 7 +++++++
+ scripts/mkcompile_h | 4 +++-
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/init/Makefile b/init/Makefile
+index 0bf677a..6b473cd 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -29,4 +29,4 @@ silent_chk_compile.h = :
+ include/generated/compile.h: FORCE
+ @$($(quiet)chk_compile.h)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index 35c6f20..d0e9372 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -66,6 +66,13 @@ config PREEMPT_RTB
+ enables changes which are preliminary for the full preemptiple
+ RT kernel.
+
++config PREEMPT_RT_FULL
++ bool "Fully Preemptible Kernel (RT)"
++ depends on IRQ_FORCED_THREADING
++ select PREEMPT_RT_BASE
++ help
++ All and everything
++
+ endchoice
+
+ config PREEMPT_COUNT
+diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
+index f221ddf..5f44009 100755
+--- a/scripts/mkcompile_h
++++ b/scripts/mkcompile_h
+@@ -4,7 +4,8 @@ TARGET=$1
+ ARCH=$2
+ SMP=$3
+ PREEMPT=$4
+-CC=$5
++RT=$5
++CC=$6
+
+ vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
+
+@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
+ CONFIG_FLAGS=""
+ if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
+ if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
+ UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
+
+ # Truncate to maximum length
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0262-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0262-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0262-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0262-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch)
@@ -0,0 +1,51 @@
+From 84dfe3dc79ff047ae38477f6fa7b1770214cb6d4 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <srostedt at redhat.com>
+Date: Mon, 16 Apr 2012 21:51:54 -0400
+Subject: [PATCH 262/303] rt: Make migrate_disable/enable() and
+ __rt_mutex_init non-GPL only
+
+Modules that load on the normal vanilla kernel should also load on
+an -rt kernel as well. This does not mean we condone non-GPL modules,
+we are only being consistent.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/rtmutex.c | 2 +-
+ kernel/sched.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index b525158..9c4f6e5 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -1290,7 +1290,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+
+ debug_rt_mutex_init(lock, name);
+ }
+-EXPORT_SYMBOL_GPL(__rt_mutex_init);
++EXPORT_SYMBOL(__rt_mutex_init);
+
+ /**
+ * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
+diff --git a/kernel/sched.c b/kernel/sched.c
+index bba3c83..1dfdae9 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4619,7 +4619,7 @@ void migrate_disable(void)
+ p->migrate_disable = 1;
+ preempt_enable();
+ }
+-EXPORT_SYMBOL_GPL(migrate_disable);
++EXPORT_SYMBOL(migrate_disable);
+
+ void migrate_enable(void)
+ {
+@@ -4671,7 +4671,7 @@ void migrate_enable(void)
+ unpin_current_cpu();
+ preempt_enable();
+ }
+-EXPORT_SYMBOL_GPL(migrate_enable);
++EXPORT_SYMBOL(migrate_enable);
+ #else
+ static inline void update_migrate_disable(struct task_struct *p) { }
+ #define migrate_disabled_updated(p) 0
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0263-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0263-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0263-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0263-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch)
@@ -0,0 +1,52 @@
+From e61a4b5e4157b00065c083a3646d1ada810669f6 Mon Sep 17 00:00:00 2001
+From: John Kacur <jkacur at redhat.com>
+Date: Fri, 27 Apr 2012 12:48:46 +0200
+Subject: [PATCH 263/303] scsi: qla2xxx: Use local_irq_save_nort() in
+ qla2x00_poll
+
+RT triggers the following:
+
+[ 11.307652] [<ffffffff81077b27>] __might_sleep+0xe7/0x110
+[ 11.307663] [<ffffffff8150e524>] rt_spin_lock+0x24/0x60
+[ 11.307670] [<ffffffff8150da78>] ? rt_spin_lock_slowunlock+0x78/0x90
+[ 11.307703] [<ffffffffa0272d83>] qla24xx_intr_handler+0x63/0x2d0 [qla2xxx]
+[ 11.307736] [<ffffffffa0262307>] qla2x00_poll+0x67/0x90 [qla2xxx]
+
+Function qla2x00_poll does local_irq_save() before calling qla24xx_intr_handler
+which has a spinlock. Since spinlocks are sleepable on rt, it is not allowed
+to call them with interrupts disabled. Therefore we use local_irq_save_nort()
+instead which saves flags without disabling interrupts.
+
+This fix needs to be applied to v3.0-rt, v3.2-rt and v3.4-rt
+
+Suggested-by: Thomas Gleixner
+Signed-off-by: John Kacur <jkacur at redhat.com>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Cc: David Sommerseth <davids at redhat.com>
+Link: http://lkml.kernel.org/r/1335523726-10024-1-git-send-email-jkacur@redhat.com
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ drivers/scsi/qla2xxx/qla_inline.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index 9902834..6d01db6 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -36,12 +36,12 @@ qla2x00_poll(struct rsp_que *rsp)
+ {
+ unsigned long flags;
+ struct qla_hw_data *ha = rsp->hw;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (IS_QLA82XX(ha))
+ qla82xx_poll(0, rsp);
+ else
+ ha->isp_ops->intr_handler(0, rsp);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ static inline uint8_t *
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0264-net-RT-REmove-preemption-disabling-in-netif_rx.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0264-net-RT-REmove-preemption-disabling-in-netif_rx.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0264-net-RT-REmove-preemption-disabling-in-netif_rx.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0264-net-RT-REmove-preemption-disabling-in-netif_rx.patch)
@@ -0,0 +1,67 @@
+From 12a1311dc6d7431975a54d1cb0307189cf30015e Mon Sep 17 00:00:00 2001
+From: Priyanka Jain <Priyanka.Jain at freescale.com>
+Date: Thu, 17 May 2012 09:35:11 +0530
+Subject: [PATCH 264/303] net,RT:REmove preemption disabling in netif_rx()
+
+1)enqueue_to_backlog() (called from netif_rx) should be
+ bind to a particluar CPU. This can be achieved by
+ disabling migration. No need to disable preemption
+
+2)Fixes crash "BUG: scheduling while atomic: ksoftirqd"
+ in case of RT.
+ If preemption is disabled, enqueue_to_backog() is called
+ in atomic context. And if backlog exceeds its count,
+ kfree_skb() is called. But in RT, kfree_skb() might
+ gets scheduled out, so it expects non atomic context.
+
+3)When CONFIG_PREEMPT_RT_FULL is not defined,
+ migrate_enable(), migrate_disable() maps to
+ preempt_enable() and preempt_disable(), so no
+ change in functionality in case of non-RT.
+
+-Replace preempt_enable(), preempt_disable() with
+ migrate_enable(), migrate_disable() respectively
+-Replace get_cpu(), put_cpu() with get_cpu_light(),
+ put_cpu_light() respectively
+
+Signed-off-by: Priyanka Jain <Priyanka.Jain at freescale.com>
+Acked-by: Rajan Srivastava <Rajan.Srivastava at freescale.com>
+Cc: <rostedt at goodmis.orgn>
+Link: http://lkml.kernel.org/r/1337227511-2271-1-git-send-email-Priyanka.Jain@freescale.com
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ net/core/dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 70198ee..7e8f459 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3006,7 +3006,7 @@ int netif_rx(struct sk_buff *skb)
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu;
+
+- preempt_disable();
++ migrate_disable();
+ rcu_read_lock();
+
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+@@ -3016,13 +3016,13 @@ int netif_rx(struct sk_buff *skb)
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+
+ rcu_read_unlock();
+- preempt_enable();
++ migrate_enable();
+ }
+ #else
+ {
+ unsigned int qtail;
+- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+- put_cpu();
++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
++ put_cpu_light();
+ }
+ #endif
+ return ret;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0265-mips-remove-smp-reserve-lock.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0265-mips-remove-smp-reserve-lock.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0265-mips-remove-smp-reserve-lock.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0265-mips-remove-smp-reserve-lock.patch.patch)
@@ -0,0 +1,45 @@
+From c3018adca4a8eece8735345c91d61640b5fdc931 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 22 May 2012 21:15:10 +0200
+Subject: [PATCH 265/303] mips-remove-smp-reserve-lock.patch
+
+Instead of making the lock raw, remove it as it protects nothing.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ arch/mips/cavium-octeon/smp.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
+index efcfff4..86fce15 100644
+--- a/arch/mips/cavium-octeon/smp.c
++++ b/arch/mips/cavium-octeon/smp.c
+@@ -257,8 +257,6 @@ DEFINE_PER_CPU(int, cpu_state);
+
+ extern void fixup_irqs(void);
+
+-static DEFINE_SPINLOCK(smp_reserve_lock);
+-
+ static int octeon_cpu_disable(void)
+ {
+ unsigned int cpu = smp_processor_id();
+@@ -266,8 +264,6 @@ static int octeon_cpu_disable(void)
+ if (cpu == 0)
+ return -EBUSY;
+
+- spin_lock(&smp_reserve_lock);
+-
+ cpu_clear(cpu, cpu_online_map);
+ cpu_clear(cpu, cpu_callin_map);
+ local_irq_disable();
+@@ -277,8 +273,6 @@ static int octeon_cpu_disable(void)
+ flush_cache_all();
+ local_flush_tlb_all();
+
+- spin_unlock(&smp_reserve_lock);
+-
+ return 0;
+ }
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0266-Latency-histogramms-Cope-with-backwards-running-loca.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0266-Latency-histogramms-Cope-with-backwards-running-loca.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0266-Latency-histogramms-Cope-with-backwards-running-loca.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0266-Latency-histogramms-Cope-with-backwards-running-loca.patch)
@@ -0,0 +1,264 @@
+From 9d87523905462bd112e5dae57c53cfe1867403c5 Mon Sep 17 00:00:00 2001
+From: Carsten Emde <C.Emde at osadl.org>
+Date: Wed, 11 Jul 2012 22:05:17 +0000
+Subject: [PATCH 266/303] Latency histogramms: Cope with backwards running
+ local trace clock
+
+Thanks to the wonders of modern technology, the local trace clock can
+now run backwards. Since this never happened before, the time difference
+between now and somewhat earlier was expected to never become negative
+and, thus, stored in an unsigned integer variable. Nowadays, we need a
+signed integer to ensure that the value is stored as underflow in the
+related histogram. (In cases where this is not a misfunction, bipolar
+histograms can be used.)
+
+This patch takes care that all latency variables are represented as
+signed integers and negative numbers are considered as histogram
+underflows.
+
+In one of the misbehaving processors switching to global clock solved
+the problem:
+ echo global >/sys/kernel/debug/tracing/trace_clock
+
+Signed-off-by: Carsten Emde <C.Emde at osadl.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ include/linux/sched.h | 2 +-
+ kernel/trace/latency_hist.c | 71 ++++++++++++++++++++++---------------------
+ 2 files changed, 38 insertions(+), 35 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1d9c8a1..5be568f 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1589,7 +1589,7 @@ struct task_struct {
+ #ifdef CONFIG_WAKEUP_LATENCY_HIST
+ u64 preempt_timestamp_hist;
+ #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+- unsigned long timer_offset;
++ long timer_offset;
+ #endif
+ #endif
+ #endif /* CONFIG_TRACING */
+diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
+index 9d49fcb..d514eef 100644
+--- a/kernel/trace/latency_hist.c
++++ b/kernel/trace/latency_hist.c
+@@ -27,6 +27,8 @@
+ #include "trace.h"
+ #include <trace/events/sched.h>
+
++#define NSECS_PER_USECS 1000L
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/hist.h>
+
+@@ -46,11 +48,11 @@ enum {
+ struct hist_data {
+ atomic_t hist_mode; /* 0 log, 1 don't log */
+ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
+- unsigned long min_lat;
+- unsigned long max_lat;
++ long min_lat;
++ long max_lat;
+ unsigned long long below_hist_bound_samples;
+ unsigned long long above_hist_bound_samples;
+- unsigned long long accumulate_lat;
++ long long accumulate_lat;
+ unsigned long long total_samples;
+ unsigned long long hist_array[MAX_ENTRY_NUM];
+ };
+@@ -152,8 +154,8 @@ static struct enable_data timerandwakeup_enabled_data = {
+ static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
+ #endif
+
+-void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
+- unsigned long timeroffset, cycle_t stop,
++void notrace latency_hist(int latency_type, int cpu, long latency,
++ long timeroffset, cycle_t stop,
+ struct task_struct *p)
+ {
+ struct hist_data *my_hist;
+@@ -224,7 +226,7 @@ void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
+ my_hist->hist_array[latency]++;
+
+ if (unlikely(latency > my_hist->max_lat ||
+- my_hist->min_lat == ULONG_MAX)) {
++ my_hist->min_lat == LONG_MAX)) {
+ #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ if (latency_type == WAKEUP_LATENCY ||
+@@ -263,15 +265,14 @@ static void *l_start(struct seq_file *m, loff_t *pos)
+ atomic_dec(&my_hist->hist_mode);
+
+ if (likely(my_hist->total_samples)) {
+- unsigned long avg = (unsigned long)
+- div64_u64(my_hist->accumulate_lat,
++ long avg = (long) div64_s64(my_hist->accumulate_lat,
+ my_hist->total_samples);
+ snprintf(minstr, sizeof(minstr), "%ld",
+- (long) my_hist->min_lat - my_hist->offset);
++ my_hist->min_lat - my_hist->offset);
+ snprintf(avgstr, sizeof(avgstr), "%ld",
+- (long) avg - my_hist->offset);
++ avg - my_hist->offset);
+ snprintf(maxstr, sizeof(maxstr), "%ld",
+- (long) my_hist->max_lat - my_hist->offset);
++ my_hist->max_lat - my_hist->offset);
+ } else {
+ strcpy(minstr, "<undef>");
+ strcpy(avgstr, minstr);
+@@ -376,10 +377,10 @@ static void hist_reset(struct hist_data *hist)
+ memset(hist->hist_array, 0, sizeof(hist->hist_array));
+ hist->below_hist_bound_samples = 0ULL;
+ hist->above_hist_bound_samples = 0ULL;
+- hist->min_lat = ULONG_MAX;
+- hist->max_lat = 0UL;
++ hist->min_lat = LONG_MAX;
++ hist->max_lat = LONG_MIN;
+ hist->total_samples = 0ULL;
+- hist->accumulate_lat = 0ULL;
++ hist->accumulate_lat = 0LL;
+
+ atomic_inc(&hist->hist_mode);
+ }
+@@ -790,9 +791,9 @@ static notrace void probe_preemptirqsoff_hist(void *v, int reason,
+
+ stop = ftrace_now(cpu);
+ time_set++;
+- if (start && stop >= start) {
+- unsigned long latency =
+- nsecs_to_usecs(stop - start);
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
+
+ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
+ stop, NULL);
+@@ -808,9 +809,9 @@ static notrace void probe_preemptirqsoff_hist(void *v, int reason,
+
+ if (!(time_set++))
+ stop = ftrace_now(cpu);
+- if (start && stop >= start) {
+- unsigned long latency =
+- nsecs_to_usecs(stop - start);
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
+
+ latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
+ 0, stop, NULL);
+@@ -827,9 +828,10 @@ static notrace void probe_preemptirqsoff_hist(void *v, int reason,
+
+ if (!time_set)
+ stop = ftrace_now(cpu);
+- if (start && stop >= start) {
+- unsigned long latency =
+- nsecs_to_usecs(stop - start);
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
+ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
+ latency, 0, stop, NULL);
+ }
+@@ -908,7 +910,7 @@ static notrace void probe_wakeup_latency_hist_stop(void *v,
+ {
+ unsigned long flags;
+ int cpu = task_cpu(next);
+- unsigned long latency;
++ long latency;
+ cycle_t stop;
+ struct task_struct *cpu_wakeup_task;
+
+@@ -939,7 +941,8 @@ static notrace void probe_wakeup_latency_hist_stop(void *v,
+ */
+ stop = ftrace_now(raw_smp_processor_id());
+
+- latency = nsecs_to_usecs(stop - next->preempt_timestamp_hist);
++ latency = ((long) (stop - next->preempt_timestamp_hist)) /
++ NSECS_PER_USECS;
+
+ if (per_cpu(wakeup_sharedprio, cpu)) {
+ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
+@@ -975,7 +978,7 @@ static notrace void probe_hrtimer_interrupt(void *v, int cpu,
+ (task->prio < curr->prio ||
+ (task->prio == curr->prio &&
+ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
+- unsigned long latency;
++ long latency;
+ cycle_t now;
+
+ if (missed_timer_offsets_pid) {
+@@ -985,7 +988,7 @@ static notrace void probe_hrtimer_interrupt(void *v, int cpu,
+ }
+
+ now = ftrace_now(cpu);
+- latency = (unsigned long) div_s64(-latency_ns, 1000);
++ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
+ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
+ task);
+ #ifdef CONFIG_WAKEUP_LATENCY_HIST
+@@ -1026,7 +1029,7 @@ static __init int latency_hist_init(void)
+ &per_cpu(irqsoff_hist, i), &latency_hist_fops);
+ my_hist = &per_cpu(irqsoff_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+- my_hist->min_lat = 0xFFFFFFFFUL;
++ my_hist->min_lat = LONG_MAX;
+ }
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
+@@ -1041,7 +1044,7 @@ static __init int latency_hist_init(void)
+ &per_cpu(preemptoff_hist, i), &latency_hist_fops);
+ my_hist = &per_cpu(preemptoff_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+- my_hist->min_lat = 0xFFFFFFFFUL;
++ my_hist->min_lat = LONG_MAX;
+ }
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
+@@ -1056,7 +1059,7 @@ static __init int latency_hist_init(void)
+ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
+ my_hist = &per_cpu(preemptirqsoff_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+- my_hist->min_lat = 0xFFFFFFFFUL;
++ my_hist->min_lat = LONG_MAX;
+ }
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
+@@ -1081,14 +1084,14 @@ static __init int latency_hist_init(void)
+ &latency_hist_fops);
+ my_hist = &per_cpu(wakeup_latency_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+- my_hist->min_lat = 0xFFFFFFFFUL;
++ my_hist->min_lat = LONG_MAX;
+
+ entry = debugfs_create_file(name, 0444, dentry_sharedprio,
+ &per_cpu(wakeup_latency_hist_sharedprio, i),
+ &latency_hist_fops);
+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
+ atomic_set(&my_hist->hist_mode, 1);
+- my_hist->min_lat = 0xFFFFFFFFUL;
++ my_hist->min_lat = LONG_MAX;
+
+ sprintf(name, cpufmt_maxlatproc, i);
+
+@@ -1122,7 +1125,7 @@ static __init int latency_hist_init(void)
+ &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
+ my_hist = &per_cpu(missed_timer_offsets, i);
+ atomic_set(&my_hist->hist_mode, 1);
+- my_hist->min_lat = 0xFFFFFFFFUL;
++ my_hist->min_lat = LONG_MAX;
+
+ sprintf(name, cpufmt_maxlatproc, i);
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
+@@ -1150,7 +1153,7 @@ static __init int latency_hist_init(void)
+ &latency_hist_fops);
+ my_hist = &per_cpu(timerandwakeup_latency_hist, i);
+ atomic_set(&my_hist->hist_mode, 1);
+- my_hist->min_lat = 0xFFFFFFFFUL;
++ my_hist->min_lat = LONG_MAX;
+
+ sprintf(name, cpufmt_maxlatproc, i);
+ mp = &per_cpu(timerandwakeup_maxlatproc, i);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0267-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0267-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0267-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0267-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch)
@@ -0,0 +1,70 @@
+From 614da4fefa29186028b9b9fc030fb2979534a731 Mon Sep 17 00:00:00 2001
+From: Carsten Emde <C.Emde at osadl.org>
+Date: Wed, 11 Jul 2012 22:05:18 +0000
+Subject: [PATCH 267/303] Latency histograms: Adjust timer, if already elapsed
+ when programmed
+
+Nothing prevents a programmer from calling clock_nanosleep() with an
+already elapsed wakeup time in absolute time mode or with a too small
+delay in relative time mode. Such timers cannot wake up in time and,
+thus, should be corrected when entered into the missed timers latency
+histogram (CONFIG_MISSED_TIMERS_HIST).
+
+This patch marks such timers and uses a corrected expiration time.
+
+Signed-off-by: Carsten Emde <C.Emde at osadl.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ include/linux/hrtimer.h | 3 +++
+ kernel/hrtimer.c | 16 ++++++++++++++--
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index 26b008b..7259cd3 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -113,6 +113,9 @@ struct hrtimer {
+ unsigned long state;
+ struct list_head cb_entry;
+ int irqsafe;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ ktime_t praecox;
++#endif
+ #ifdef CONFIG_TIMER_STATS
+ int start_pid;
+ void *start_site;
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index fdfe1bb..31923d5 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -1009,6 +1009,17 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ #endif
+ }
+
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ {
++ ktime_t now = new_base->get_time();
++
++ if (ktime_to_ns(tim) < ktime_to_ns(now))
++ timer->praecox = now;
++ else
++ timer->praecox = ktime_set(0, 0);
++ }
++#endif
++
+ hrtimer_set_expires_range_ns(timer, tim, delta_ns);
+
+ timer_stats_hrtimer_set_start_info(timer);
+@@ -1465,8 +1476,9 @@ retry:
+ timer = container_of(node, struct hrtimer, node);
+
+ trace_hrtimer_interrupt(raw_smp_processor_id(),
+- ktime_to_ns(ktime_sub(
+- hrtimer_get_expires(timer), basenow)),
++ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
++ timer->praecox : hrtimer_get_expires(timer),
++ basenow)),
+ current,
+ timer->function == hrtimer_wakeup ?
+ container_of(timer, struct hrtimer_sleeper,
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0268-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0268-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0268-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0268-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch)
@@ -0,0 +1,29 @@
+From 13808375eb8603c130a8476cf044d079cedc3607 Mon Sep 17 00:00:00 2001
+From: Carsten Emde <C.Emde at osadl.org>
+Date: Wed, 11 Jul 2012 22:05:18 +0000
+Subject: [PATCH 268/303] Disable RT_GROUP_SCHED in PREEMPT_RT_FULL
+
+Strange CPU stalls have been observed in RT when RT_GROUP_SCHED
+was configured.
+
+Disable it for now.
+
+Signed-off-by: Carsten Emde <C.Emde at osadl.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ init/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index dbc82d0..720c182 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -731,6 +731,7 @@ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on CGROUP_SCHED
++ depends on !PREEMPT_RT_FULL
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0269-Latency-histograms-Detect-another-yet-overlooked-sha.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0269-Latency-histograms-Detect-another-yet-overlooked-sha.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0269-Latency-histograms-Detect-another-yet-overlooked-sha.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0269-Latency-histograms-Detect-another-yet-overlooked-sha.patch)
@@ -0,0 +1,35 @@
+From feca7115a72ba5a999689968b4fe16e358e07484 Mon Sep 17 00:00:00 2001
+From: Carsten Emde <C.Emde at osadl.org>
+Date: Wed, 11 Jul 2012 22:05:19 +0000
+Subject: [PATCH 269/303] Latency histograms: Detect another yet overlooked
+ sharedprio condition
+
+While waiting for an RT process to be woken up, the previous process may
+go to wait and switch to another one with the same priority which then
+becomes current. This condition was not correctly recognized and led to
+erroneously high latency recordings during periods of low CPU load.
+
+This patch correctly marks such latencies as sharedprio and prevents
+them from being recorded as actual system latency.
+
+Signed-off-by: Carsten Emde <C.Emde at osadl.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/trace/latency_hist.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
+index d514eef..6a4c869 100644
+--- a/kernel/trace/latency_hist.c
++++ b/kernel/trace/latency_hist.c
+@@ -935,6 +935,9 @@ static notrace void probe_wakeup_latency_hist_stop(void *v,
+ goto out;
+ }
+
++ if (current->prio == cpu_wakeup_task->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++
+ /*
+ * The task we are waiting for is about to be switched to.
+ * Calculate latency and store it in histogram.
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0270-slab-Prevent-local-lock-deadlock.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0270-slab-Prevent-local-lock-deadlock.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0270-slab-Prevent-local-lock-deadlock.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0270-slab-Prevent-local-lock-deadlock.patch)
@@ -0,0 +1,74 @@
+From 512c15755029c14c24b243447097c2125029e560 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 11 Jul 2012 22:05:19 +0000
+Subject: [PATCH 270/303] slab: Prevent local lock deadlock
+
+On RT we avoid the cross cpu function calls and take the per cpu local
+locks instead. Now the code missed that taking the local lock on the
+cpu which runs the code must use the proper local lock functions and
+not a simple spin_lock(). Otherwise it deadlocks later when trying to
+acquire the local lock with the proper function.
+
+Reported-and-tested-by: Chris Pringle <chris.pringle at miranda.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ mm/slab.c | 26 ++++++++++++++++++++++----
+ 1 file changed, 22 insertions(+), 4 deletions(-)
+
+diff --git a/mm/slab.c b/mm/slab.c
+index 59b7e8c..7b2d343 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -739,8 +739,26 @@ slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
+ {
+ unsigned int i;
+
++ get_cpu_light();
+ for_each_online_cpu(i)
+ func(arg, i);
++ put_cpu_light();
++}
++
++static void lock_slab_on(unsigned int cpu)
++{
++ if (cpu == smp_processor_id())
++ local_lock_irq(slab_lock);
++ else
++ local_spin_lock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock);
++}
++
++static void unlock_slab_on(unsigned int cpu)
++{
++ if (cpu == smp_processor_id())
++ local_unlock_irq(slab_lock);
++ else
++ local_spin_unlock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock);
+ }
+ #endif
+
+@@ -2630,10 +2648,10 @@ static void do_drain(void *arg, int cpu)
+ {
+ LIST_HEAD(tmp);
+
+- spin_lock_irq(&per_cpu(slab_lock, cpu).lock);
++ lock_slab_on(cpu);
+ __do_drain(arg, cpu);
+ list_splice_init(&per_cpu(slab_free_list, cpu), &tmp);
+- spin_unlock_irq(&per_cpu(slab_lock, cpu).lock);
++ unlock_slab_on(cpu);
+ free_delayed(&tmp);
+ }
+ #endif
+@@ -4101,9 +4119,9 @@ static void do_ccupdate_local(void *info)
+ #else
+ static void do_ccupdate_local(void *info, int cpu)
+ {
+- spin_lock_irq(&per_cpu(slab_lock, cpu).lock);
++ lock_slab_on(cpu);
+ __do_ccupdate_local(info, cpu);
+- spin_unlock_irq(&per_cpu(slab_lock, cpu).lock);
++ unlock_slab_on(cpu);
+ }
+ #endif
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0271-fs-jbd-pull-your-plug-when-waiting-for-space.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0271-fs-jbd-pull-your-plug-when-waiting-for-space.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0271-fs-jbd-pull-your-plug-when-waiting-for-space.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0271-fs-jbd-pull-your-plug-when-waiting-for-space.patch)
@@ -0,0 +1,32 @@
+From 6135ac256c5a9b31235434826bc90589ad06160b Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <mgalbraith at suse.de>
+Date: Wed, 11 Jul 2012 22:05:20 +0000
+Subject: [PATCH 271/303] fs, jbd: pull your plug when waiting for space
+
+With an -rt kernel, and a heavy sync IO load, tasks can jam
+up on journal locks without unplugging, which can lead to
+terminal IO starvation. Unplug and schedule when waiting for space.
+
+Signed-off-by: Mike Galbraith <mgalbraith at suse.de>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Cc: Theodore Tso <tytso at mit.edu>
+Link: http://lkml.kernel.org/r/1341812414.7370.73.camel@marge.simpson.net
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ fs/jbd/checkpoint.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
+index 5c93ffc..ddbd223 100644
+--- a/fs/jbd/checkpoint.c
++++ b/fs/jbd/checkpoint.c
+@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *journal)
+ if (journal->j_flags & JFS_ABORT)
+ return;
+ spin_unlock(&journal->j_state_lock);
++ if (current->plug)
++ io_schedule();
+ mutex_lock(&journal->j_checkpoint_mutex);
+
+ /*
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0272-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0272-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0272-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0272-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch)
@@ -0,0 +1,71 @@
+From eacc7dd685945e587f068d02057eea6e4656d390 Mon Sep 17 00:00:00 2001
+From: Yong Zhang <yong.zhang at windriver.com>
+Date: Wed, 11 Jul 2012 22:05:21 +0000
+Subject: [PATCH 272/303] perf: Make swevent hrtimer run in irq instead of
+ softirq
+
+Otherwise we get a deadlock like below:
+
+[ 1044.042749] BUG: scheduling while atomic: ksoftirqd/21/141/0x00010003
+[ 1044.042752] INFO: lockdep is turned off.
+[ 1044.042754] Modules linked in:
+[ 1044.042757] Pid: 141, comm: ksoftirqd/21 Tainted: G W 3.4.0-rc2-rt3-23676-ga723175-dirty #29
+[ 1044.042759] Call Trace:
+[ 1044.042761] <IRQ> [<ffffffff8107d8e5>] __schedule_bug+0x65/0x80
+[ 1044.042770] [<ffffffff8168978c>] __schedule+0x83c/0xa70
+[ 1044.042775] [<ffffffff8106bdd2>] ? prepare_to_wait+0x32/0xb0
+[ 1044.042779] [<ffffffff81689a5e>] schedule+0x2e/0xa0
+[ 1044.042782] [<ffffffff81071ebd>] hrtimer_wait_for_timer+0x6d/0xb0
+[ 1044.042786] [<ffffffff8106bb30>] ? wake_up_bit+0x40/0x40
+[ 1044.042790] [<ffffffff81071f20>] hrtimer_cancel+0x20/0x40
+[ 1044.042794] [<ffffffff8111da0c>] perf_swevent_cancel_hrtimer+0x3c/0x50
+[ 1044.042798] [<ffffffff8111da31>] task_clock_event_stop+0x11/0x40
+[ 1044.042802] [<ffffffff8111da6e>] task_clock_event_del+0xe/0x10
+[ 1044.042805] [<ffffffff8111c568>] event_sched_out+0x118/0x1d0
+[ 1044.042809] [<ffffffff8111c649>] group_sched_out+0x29/0x90
+[ 1044.042813] [<ffffffff8111ed7e>] __perf_event_disable+0x18e/0x200
+[ 1044.042817] [<ffffffff8111c343>] remote_function+0x63/0x70
+[ 1044.042821] [<ffffffff810b0aae>] generic_smp_call_function_single_interrupt+0xce/0x120
+[ 1044.042826] [<ffffffff81022bc7>] smp_call_function_single_interrupt+0x27/0x40
+[ 1044.042831] [<ffffffff8168d50c>] call_function_single_interrupt+0x6c/0x80
+[ 1044.042833] <EOI> [<ffffffff811275b0>] ? perf_event_overflow+0x20/0x20
+[ 1044.042840] [<ffffffff8168b970>] ? _raw_spin_unlock_irq+0x30/0x70
+[ 1044.042844] [<ffffffff8168b976>] ? _raw_spin_unlock_irq+0x36/0x70
+[ 1044.042848] [<ffffffff810702e2>] run_hrtimer_softirq+0xc2/0x200
+[ 1044.042853] [<ffffffff811275b0>] ? perf_event_overflow+0x20/0x20
+[ 1044.042857] [<ffffffff81045265>] __do_softirq_common+0xf5/0x3a0
+[ 1044.042862] [<ffffffff81045c3d>] __thread_do_softirq+0x15d/0x200
+[ 1044.042865] [<ffffffff81045dda>] run_ksoftirqd+0xfa/0x210
+[ 1044.042869] [<ffffffff81045ce0>] ? __thread_do_softirq+0x200/0x200
+[ 1044.042873] [<ffffffff81045ce0>] ? __thread_do_softirq+0x200/0x200
+[ 1044.042877] [<ffffffff8106b596>] kthread+0xb6/0xc0
+[ 1044.042881] [<ffffffff8168b97b>] ? _raw_spin_unlock_irq+0x3b/0x70
+[ 1044.042886] [<ffffffff8168d994>] kernel_thread_helper+0x4/0x10
+[ 1044.042889] [<ffffffff8107d98c>] ? finish_task_switch+0x8c/0x110
+[ 1044.042894] [<ffffffff8168b97b>] ? _raw_spin_unlock_irq+0x3b/0x70
+[ 1044.042897] [<ffffffff8168bd5d>] ? retint_restore_args+0xe/0xe
+[ 1044.042900] [<ffffffff8106b4e0>] ? kthreadd+0x1e0/0x1e0
+[ 1044.042902] [<ffffffff8168d990>] ? gs_change+0xb/0xb
+
+Signed-off-by: Yong Zhang <yong.zhang0 at gmail.com>
+Cc: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Link: http://lkml.kernel.org/r/1341476476-5666-1-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/events/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7d1f05e..7975149 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5428,6 +5428,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
+
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swevent_hrtimer;
++ hwc->hrtimer.irqsafe = 1;
+
+ /*
+ * Since hrtimers have a fixed rate, we can do a static freq->period
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0273-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0273-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0273-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0273-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch)
@@ -0,0 +1,542 @@
+From 01fe0dbdd021cb93dc1de2aa907a52184d7f285d Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <srostedt at redhat.com>
+Date: Mon, 16 Jul 2012 08:07:43 +0000
+Subject: [PATCH 273/303] cpu/rt: Rework cpu down for PREEMPT_RT
+
+Bringing a CPU down is a pain with the PREEMPT_RT kernel because
+tasks can be preempted in many more places than in non-RT. In
+order to handle per_cpu variables, tasks may be pinned to a CPU
+for a while, and even sleep. But these tasks need to be off the CPU
+if that CPU is going down.
+
+Several synchronization methods have been tried, but when stressed
+they failed. This is a new approach.
+
+A sync_tsk thread is still created and tasks may still block on a
+lock when the CPU is going down, but how that works is a bit different.
+When cpu_down() starts, it will create the sync_tsk and wait on it
+to inform that current tasks that are pinned on the CPU are no longer
+pinned. But new tasks that are about to be pinned will still be allowed
+to do so at this time.
+
+Then the notifiers are called. Several notifiers will bring down tasks
+that will enter these locations. Some of these tasks will take locks
+of other tasks that are on the CPU. If we don't let those other tasks
+continue, but make them block until CPU down is done, the tasks that
+the notifiers are waiting on will never complete as they are waiting
+for the locks held by the tasks that are blocked.
+
+Thus we still let the task pin the CPU until the notifiers are done.
+After the notifiers run, we then make new tasks entering the pinned
+CPU sections grab a mutex and wait. This mutex is now a per CPU mutex
+in the hotplug_pcp descriptor.
+
+To help things along, a new function in the scheduler code is created
+called migrate_me(). This function will try to migrate the current task
+off the CPU this is going down if possible. When the sync_tsk is created,
+all tasks will then try to migrate off the CPU going down. There are
+several cases that this wont work, but it helps in most cases.
+
+After the notifiers are called and if a task can't migrate off but enters
+the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex
+until the CPU down is complete. Then the scheduler will force the migration
+anyway.
+
+Also, I found that THREAD_BOUND need to also be accounted for in the
+pinned CPU, and the migrate_disable no longer treats them special.
+This helps fix issues with ksoftirqd and workqueue that unbind on CPU down.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/sched.h | 7 ++
+ kernel/cpu.c | 236 +++++++++++++++++++++++++++++++++++++++++--------
+ kernel/sched.c | 82 ++++++++++++++++-
+ 3 files changed, 285 insertions(+), 40 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 5be568f..fa24643 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1937,6 +1937,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
+
+ extern int set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask);
++int migrate_me(void);
++void tell_sched_cpu_down_begin(int cpu);
++void tell_sched_cpu_down_done(int cpu);
++
+ #else
+ static inline void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask)
+@@ -1949,6 +1953,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
+ return -EINVAL;
+ return 0;
+ }
++static inline int migrate_me(void) { return 0; }
++static inline void tell_sched_cpu_down_begin(int cpu) { }
++static inline void tell_sched_cpu_down_done(int cpu) { }
+ #endif
+
+ #ifdef CONFIG_NO_HZ
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 66dfb74..0964e93 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -46,12 +46,7 @@ static int cpu_hotplug_disabled;
+
+ static struct {
+ struct task_struct *active_writer;
+-#ifdef CONFIG_PREEMPT_RT_FULL
+- /* Makes the lock keep the task's state */
+- spinlock_t lock;
+-#else
+ struct mutex lock; /* Synchronizes accesses to refcount, */
+-#endif
+ /*
+ * Also blocks the new readers during
+ * an ongoing cpu hotplug operation.
+@@ -67,20 +62,42 @@ static struct {
+ .refcount = 0,
+ };
+
+-#ifdef CONFIG_PREEMPT_RT_FULL
+-# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
+-# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
+-#else
+-# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
+-# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
+-#endif
+-
++/**
++ * hotplug_pcp - per cpu hotplug descriptor
++ * @unplug: set when pin_current_cpu() needs to sync tasks
++ * @sync_tsk: the task that waits for tasks to finish pinned sections
++ * @refcount: counter of tasks in pinned sections
++ * @grab_lock: set when the tasks entering pinned sections should wait
++ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
++ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
++ * @mutex_init: zero if the mutex hasn't been initialized yet.
++ *
++ * Although @unplug and @sync_tsk may point to the same task, the @unplug
++ * is used as a flag and still exists after @sync_tsk has exited and
++ * @sync_tsk set to NULL.
++ */
+ struct hotplug_pcp {
+ struct task_struct *unplug;
++ struct task_struct *sync_tsk;
+ int refcount;
++ int grab_lock;
+ struct completion synced;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ spinlock_t lock;
++#else
++ struct mutex mutex;
++#endif
++ int mutex_init;
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
++# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
++#else
++# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
++# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
++#endif
++
+ static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
+
+ /**
+@@ -94,18 +111,40 @@ static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
+ void pin_current_cpu(void)
+ {
+ struct hotplug_pcp *hp;
++ int force = 0;
+
+ retry:
+ hp = &__get_cpu_var(hotplug_pcp);
+
+- if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
++ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
+ hp->unplug == current || (current->flags & PF_STOMPER)) {
+ hp->refcount++;
+ return;
+ }
+- preempt_enable();
+- hotplug_lock();
+- hotplug_unlock();
++
++ if (hp->grab_lock) {
++ preempt_enable();
++ hotplug_lock(hp);
++ hotplug_unlock(hp);
++ } else {
++ preempt_enable();
++ /*
++ * Try to push this task off of this CPU.
++ */
++ if (!migrate_me()) {
++ preempt_disable();
++ hp = &__get_cpu_var(hotplug_pcp);
++ if (!hp->grab_lock) {
++ /*
++ * Just let it continue it's already pinned
++ * or about to sleep.
++ */
++ force = 1;
++ goto retry;
++ }
++ preempt_enable();
++ }
++ }
+ preempt_disable();
+ goto retry;
+ }
+@@ -127,26 +166,84 @@ void unpin_current_cpu(void)
+ wake_up_process(hp->unplug);
+ }
+
+-/*
+- * FIXME: Is this really correct under all circumstances ?
+- */
++static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
++{
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (hp->refcount) {
++ schedule_preempt_disabled();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++}
++
+ static int sync_unplug_thread(void *data)
+ {
+ struct hotplug_pcp *hp = data;
+
+ preempt_disable();
+ hp->unplug = current;
++ wait_for_pinned_cpus(hp);
++
++ /*
++ * This thread will synchronize the cpu_down() with threads
++ * that have pinned the CPU. When the pinned CPU count reaches
++ * zero, we inform the cpu_down code to continue to the next step.
++ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- while (hp->refcount) {
+- schedule_preempt_disabled();
++ preempt_enable();
++ complete(&hp->synced);
++
++ /*
++ * If all succeeds, the next step will need tasks to wait till
++ * the CPU is offline before continuing. To do this, the grab_lock
++ * is set and tasks going into pin_current_cpu() will block on the
++ * mutex. But we still need to wait for those that are already in
++ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
++ * will kick this thread out.
++ */
++ while (!hp->grab_lock && !kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++
++ /* Make sure grab_lock is seen before we see a stale completion */
++ smp_mb();
++
++ /*
++ * Now just before cpu_down() enters stop machine, we need to make
++ * sure all tasks that are in pinned CPU sections are out, and new
++ * tasks will now grab the lock, keeping them from entering pinned
++ * CPU sections.
++ */
++ if (!kthread_should_stop()) {
++ preempt_disable();
++ wait_for_pinned_cpus(hp);
++ preempt_enable();
++ complete(&hp->synced);
++ }
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+- preempt_enable();
+- complete(&hp->synced);
++
++ /*
++ * Force this thread off this CPU as it's going down and
++ * we don't want any more work on this CPU.
++ */
++ current->flags &= ~PF_THREAD_BOUND;
++ do_set_cpus_allowed(current, cpu_present_mask);
++ migrate_me();
+ return 0;
+ }
+
++static void __cpu_unplug_sync(struct hotplug_pcp *hp)
++{
++ wake_up_process(hp->sync_tsk);
++ wait_for_completion(&hp->synced);
++}
++
+ /*
+ * Start the sync_unplug_thread on the target cpu and wait for it to
+ * complete.
+@@ -154,23 +251,83 @@ static int sync_unplug_thread(void *data)
+ static int cpu_unplug_begin(unsigned int cpu)
+ {
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+- struct task_struct *tsk;
++ int err;
++
++ /* Protected by cpu_hotplug.lock */
++ if (!hp->mutex_init) {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ spin_lock_init(&hp->lock);
++#else
++ mutex_init(&hp->mutex);
++#endif
++ hp->mutex_init = 1;
++ }
++
++ /* Inform the scheduler to migrate tasks off this CPU */
++ tell_sched_cpu_down_begin(cpu);
+
+ init_completion(&hp->synced);
+- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+- if (IS_ERR(tsk))
+- return (PTR_ERR(tsk));
+- kthread_bind(tsk, cpu);
+- wake_up_process(tsk);
+- wait_for_completion(&hp->synced);
++
++ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
++ if (IS_ERR(hp->sync_tsk)) {
++ err = PTR_ERR(hp->sync_tsk);
++ hp->sync_tsk = NULL;
++ return err;
++ }
++ kthread_bind(hp->sync_tsk, cpu);
++
++ /*
++ * Wait for tasks to get out of the pinned sections,
++ * it's still OK if new tasks enter. Some CPU notifiers will
++ * wait for tasks that are going to enter these sections and
++ * we must not have them block.
++ */
++ __cpu_unplug_sync(hp);
++
+ return 0;
+ }
+
++static void cpu_unplug_sync(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ init_completion(&hp->synced);
++ /* The completion needs to be initialzied before setting grab_lock */
++ smp_wmb();
++
++ /* Grab the mutex before setting grab_lock */
++ hotplug_lock(hp);
++ hp->grab_lock = 1;
++
++ /*
++ * The CPU notifiers have been completed.
++ * Wait for tasks to get out of pinned CPU sections and have new
++ * tasks block until the CPU is completely down.
++ */
++ __cpu_unplug_sync(hp);
++
++ /* All done with the sync thread */
++ kthread_stop(hp->sync_tsk);
++ hp->sync_tsk = NULL;
++}
++
+ static void cpu_unplug_done(unsigned int cpu)
+ {
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+ hp->unplug = NULL;
++ /* Let all tasks know cpu unplug is finished before cleaning up */
++ smp_wmb();
++
++ if (hp->sync_tsk)
++ kthread_stop(hp->sync_tsk);
++
++ if (hp->grab_lock) {
++ hotplug_unlock(hp);
++ /* protected by cpu_hotplug.lock */
++ hp->grab_lock = 0;
++ }
++ tell_sched_cpu_down_done(cpu);
+ }
+
+ void get_online_cpus(void)
+@@ -178,9 +335,9 @@ void get_online_cpus(void)
+ might_sleep();
+ if (cpu_hotplug.active_writer == current)
+ return;
+- hotplug_lock();
++ mutex_lock(&cpu_hotplug.lock);
+ cpu_hotplug.refcount++;
+- hotplug_unlock();
++ mutex_unlock(&cpu_hotplug.lock);
+
+ }
+ EXPORT_SYMBOL_GPL(get_online_cpus);
+@@ -189,10 +346,10 @@ void put_online_cpus(void)
+ {
+ if (cpu_hotplug.active_writer == current)
+ return;
+- hotplug_lock();
++ mutex_lock(&cpu_hotplug.lock);
+ if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
+ wake_up_process(cpu_hotplug.active_writer);
+- hotplug_unlock();
++ mutex_unlock(&cpu_hotplug.lock);
+
+ }
+ EXPORT_SYMBOL_GPL(put_online_cpus);
+@@ -224,11 +381,11 @@ static void cpu_hotplug_begin(void)
+ cpu_hotplug.active_writer = current;
+
+ for (;;) {
+- hotplug_lock();
++ mutex_lock(&cpu_hotplug.lock);
+ if (likely(!cpu_hotplug.refcount))
+ break;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+- hotplug_unlock();
++ mutex_unlock(&cpu_hotplug.lock);
+ schedule();
+ }
+ }
+@@ -236,7 +393,7 @@ static void cpu_hotplug_begin(void)
+ static void cpu_hotplug_done(void)
+ {
+ cpu_hotplug.active_writer = NULL;
+- hotplug_unlock();
++ mutex_unlock(&cpu_hotplug.lock);
+ }
+
+ #else /* #if CONFIG_HOTPLUG_CPU */
+@@ -371,6 +528,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ goto out_release;
+ }
+
++ /* Notifiers are done. Don't let any more tasks pin this CPU. */
++ cpu_unplug_sync(cpu);
++
+ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+ if (err) {
+ /* CPU didn't die: tell everyone. Can't complain. */
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 1dfdae9..af7b82b 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4597,7 +4597,7 @@ void migrate_disable(void)
+ {
+ struct task_struct *p = current;
+
+- if (in_atomic() || p->flags & PF_THREAD_BOUND) {
++ if (in_atomic()) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+ #endif
+@@ -4628,7 +4628,7 @@ void migrate_enable(void)
+ unsigned long flags;
+ struct rq *rq;
+
+- if (in_atomic() || p->flags & PF_THREAD_BOUND) {
++ if (in_atomic()) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+ #endif
+@@ -6554,6 +6554,84 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ }
+
++static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
++static DEFINE_MUTEX(sched_down_mutex);
++static cpumask_t sched_down_cpumask;
++
++void tell_sched_cpu_down_begin(int cpu)
++{
++ mutex_lock(&sched_down_mutex);
++ cpumask_set_cpu(cpu, &sched_down_cpumask);
++ mutex_unlock(&sched_down_mutex);
++}
++
++void tell_sched_cpu_down_done(int cpu)
++{
++ mutex_lock(&sched_down_mutex);
++ cpumask_clear_cpu(cpu, &sched_down_cpumask);
++ mutex_unlock(&sched_down_mutex);
++}
++
++/**
++ * migrate_me - try to move the current task off this cpu
++ *
++ * Used by the pin_current_cpu() code to try to get tasks
++ * to move off the current CPU as it is going down.
++ * It will only move the task if the task isn't pinned to
++ * the CPU (with migrate_disable, affinity or THREAD_BOUND)
++ * and the task has to be in a RUNNING state. Otherwise the
++ * movement of the task will wake it up (change its state
++ * to running) when the task did not expect it.
++ *
++ * Returns 1 if it succeeded in moving the current task
++ * 0 otherwise.
++ */
++int migrate_me(void)
++{
++ struct task_struct *p = current;
++ struct migration_arg arg;
++ struct cpumask *cpumask;
++ struct cpumask *mask;
++ unsigned long flags;
++ unsigned int dest_cpu;
++ struct rq *rq;
++
++ /*
++ * We can not migrate tasks bounded to a CPU or tasks not
++ * running. The movement of the task will wake it up.
++ */
++ if (p->flags & PF_THREAD_BOUND || p->state)
++ return 0;
++
++ mutex_lock(&sched_down_mutex);
++ rq = task_rq_lock(p, &flags);
++
++ cpumask = &__get_cpu_var(sched_cpumasks);
++ mask = &p->cpus_allowed;
++
++ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
++
++ if (!cpumask_weight(cpumask)) {
++ /* It's only on this CPU? */
++ task_rq_unlock(rq, p, &flags);
++ mutex_unlock(&sched_down_mutex);
++ return 0;
++ }
++
++ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
++
++ arg.task = p;
++ arg.dest_cpu = dest_cpu;
++
++ task_rq_unlock(rq, p, &flags);
++
++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++ tlb_migrate_finish(p->mm);
++ mutex_unlock(&sched_down_mutex);
++
++ return 1;
++}
++
+ /*
+ * This is how migration works:
+ *
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0274-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0274-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0274-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0274-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch)
@@ -0,0 +1,32 @@
+From 4864b176741223e3c994ac3ead66a190ad516d1a Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <srostedt at redhat.com>
+Date: Tue, 17 Jul 2012 10:45:59 -0400
+Subject: [PATCH 274/303] cpu/rt: Fix cpu_hotplug variable initialization
+
+The commit "cpu/rt: Rework cpu down for PREEMPT_RT" changed the double
+meaning of the cpu_hotplug.lock, where it was a spinlock for RT and a
+mutex for non-RT, to just a mutex for both. But the initialization of
+the variable was not updated to reflect this change.
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/cpu.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 0964e93..8f87b72 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -54,11 +54,7 @@ static struct {
+ int refcount;
+ } cpu_hotplug = {
+ .active_writer = NULL,
+-#ifdef CONFIG_PREEMPT_RT_FULL
+- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
+-#else
+ .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+-#endif
+ .refcount = 0,
+ };
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0275-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0275-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0275-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0275-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch)
@@ -0,0 +1,66 @@
+From 8f0b42f9586ffa85d30b635fa04178926dd8e932 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 2 Aug 2012 18:56:52 -0400
+Subject: [PATCH 275/303] time/rt: Fix up leap-second backport for RT changes
+
+The leap-second backport broke RT, and a few changes had to be done.
+
+1) The second_overflow now encompasses ntp_leap_second, and since
+second_overflow is called with the xtime_lock held, we can not take that
+lock either.
+
+2) Change ktime_get_update_offsets() to use read_seqcount_begin() instead
+of read_seq_begin() (and retry).
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/time/ntp.c | 6 ------
+ kernel/time/timekeeping.c | 4 ++--
+ 2 files changed, 2 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 09079b7..4b63943 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -362,9 +362,6 @@ int second_overflow(unsigned long secs)
+ int leap = 0;
+ s64 delta;
+
+- raw_spin_lock(&xtime_lock);
+- write_seqcount_begin(&xtime_seq);
+-
+ /*
+ * Leap second processing. If in leap-insert state at the end of the
+ * day, the system clock is set back one second; if in leap-delete
+@@ -405,9 +402,6 @@ int second_overflow(unsigned long secs)
+ break;
+ }
+
+- write_seqcount_end(&xtime_seq);
+- raw_spin_unlock(&xtime_lock);
+-
+ /* Bump the maxerror field */
+ time_maxerror += MAXFREQ / NSEC_PER_USEC;
+ if (time_maxerror > NTP_PHASE_LIMIT) {
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 9ca2a82..8cf55f2 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1295,7 +1295,7 @@ ktime_t ktime_get_update_offsets(ktime_t *real, ktime_t *boot)
+ u64 secs, nsecs;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+
+ secs = xtime.tv_sec;
+ nsecs = xtime.tv_nsec;
+@@ -1305,7 +1305,7 @@ ktime_t ktime_get_update_offsets(ktime_t *real, ktime_t *boot)
+
+ *real = offs_real;
+ *boot = offs_boot;
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ now = ktime_add_ns(ktime_set(secs, 0), nsecs);
+ now = ktime_sub(now, *real);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0276-fix-printk-flush-of-messages.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0276-fix-printk-flush-of-messages.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0276-fix-printk-flush-of-messages.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0276-fix-printk-flush-of-messages.patch)
@@ -0,0 +1,55 @@
+From 4995bc8d9b4cd6e4cffbd5a09c46be6afd5cf27e Mon Sep 17 00:00:00 2001
+From: Frank Rowand <frank.rowand at am.sony.com>
+Date: Wed, 16 May 2012 18:09:36 -0700
+Subject: [PATCH 276/303] fix printk flush of messages
+
+Updates console-make-rt-friendly.patch
+
+#ifdef CONFIG_PREEMPT_RT_FULL, printk() output is never flushed by
+printk() because:
+
+ # some liberties taken in this pseudo-code to make it easier to follow
+ printk()
+ vprintk()
+ raw_spin_lock(&logbuf_lock)
+ # increment preempt_count():
+ preempt_disable()
+ result = console_trylock_for_printk()
+ retval = 0
+ # lock will always be false, because preempt_count() will be >= 1
+ lock = ... && !preempt_count()
+ if (lock)
+ retval = 1
+ return retval
+ # result will always be false since lock will always be false
+ if (result)
+ console_unlock()
+ # this is where the printk() output would be flushed
+
+On system boot some printk() output is flushed because register_console()
+and tty_open() call console_unlock().
+
+This change also fixes the problem that was previously fixed by
+preempt-rt-allow-immediate-magic-sysrq-output-for-preempt_rt_full.patch
+
+Signed-off-by: Frank Rowand <frank.rowand at am.sony.com>
+Cc: Frank <Frank_Rowand at sonyusa.com>
+Link: http://lkml.kernel.org/r/4FB44FD0.4090800@am.sony.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/printk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/printk.c b/kernel/printk.c
+index b5e609e..4d2cb7d 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -847,7 +847,7 @@ static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+ int retval = 0, wake = 0;
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ int lock = (!early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
+- !preempt_count()) || sysrq_in_progress;
++ (preempt_count() <= 1)) || sysrq_in_progress;
+ #else
+ int lock = 1;
+ #endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0277-fix-printk-flush-of-messages.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0277-fix-printk-flush-of-messages.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0277-fix-printk-flush-of-messages.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0277-fix-printk-flush-of-messages.patch)
@@ -0,0 +1,145 @@
+From 672ad215c2078c51134cf5312fe3ec00b2f2df7e Mon Sep 17 00:00:00 2001
+From: Frank Rowand <frank.rowand at am.sony.com>
+Date: Wed, 16 May 2012 18:05:53 -0700
+Subject: [PATCH 277/303] fix printk flush of messages
+
+Reverse preempt-rt-allow-immediate-magic-sysrq-output-for-preempt_rt_full.patch
+
+The problem addressed by that patch does not exist after applying
+console-make-rt-friendly-update.patch
+
+Signed-off-by: Frank Rowand <frank.rowand at am.sony.com>
+Link: http://lkml.kernel.org/r/4FB44EF1.9050809@am.sony.com
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+[bwh: make apply on top of debian/sysrq-mask.patch]
+---
+ drivers/tty/serial/cpm_uart/cpm_uart_core.c | 2 +-
+ drivers/tty/sysrq.c | 23 -----------------------
+ include/linux/sysrq.h | 5 -----
+ kernel/printk.c | 5 ++---
+ lib/Kconfig.debug | 22 ----------------------
+ 5 files changed, 3 insertions(+), 54 deletions(-)
+
+--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
++++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+@@ -1226,7 +1226,7 @@ static void cpm_uart_console_write(struct console *co, const char *s,
+ {
+ struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
+ unsigned long flags;
+- int nolock = oops_in_progress || sysrq_in_progress;
++ int nolock = oops_in_progress;
+
+ if (unlikely(nolock)) {
+ local_irq_save(flags);
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -492,23 +492,6 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
+ sysrq_key_table[i] = op_p;
+ }
+
+-#ifdef CONFIG_MAGIC_SYSRQ_FORCE_PRINTK
+-
+-int sysrq_in_progress;
+-
+-static void set_sysrq_in_progress(int value)
+-{
+- sysrq_in_progress = value;
+-}
+-
+-#else
+-
+-static void set_sysrq_in_progress(int value)
+-{
+-}
+-
+-#endif
+-
+ void __handle_sysrq(int key, bool check_mask)
+ {
+ struct sysrq_key_op *op_p;
+@@ -517,9 +500,6 @@ void __handle_sysrq(int key, bool check_mask)
+ unsigned long flags;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+-
+- set_sysrq_in_progress(1);
+-
+ /*
+ * Raise the apparent loglevel to maximum so that the sysrq header
+ * is shown to provide the user with positive feedback. We do not
+@@ -561,9 +541,6 @@ void __handle_sysrq(int key, bool check_mask)
+ printk("\n");
+ console_loglevel = orig_log_level;
+ }
+-
+- set_sysrq_in_progress(0);
+-
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+ }
+
+--- a/include/linux/sysrq.h
++++ b/include/linux/sysrq.h
+@@ -38,11 +38,6 @@ struct sysrq_key_op {
+ int enable_mask;
+ };
+
+-#ifdef CONFIG_MAGIC_SYSRQ_FORCE_PRINTK
+-extern int sysrq_in_progress;
+-#else
+-#define sysrq_in_progress 0
+-#endif
+ #ifdef CONFIG_MAGIC_SYSRQ
+
+ /* Generic SysRq interface -- you may call it from any device driver, supplying
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -21,7 +21,6 @@
+ #include <linux/tty.h>
+ #include <linux/tty_driver.h>
+ #include <linux/console.h>
+-#include <linux/sysrq.h>
+ #include <linux/init.h>
+ #include <linux/jiffies.h>
+ #include <linux/nmi.h>
+@@ -846,8 +845,8 @@ static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+ {
+ int retval = 0, wake = 0;
+ #ifdef CONFIG_PREEMPT_RT_FULL
+- int lock = (!early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
+- (preempt_count() <= 1)) || sysrq_in_progress;
++ int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
++ (preempt_count() <= 1);
+ #else
+ int lock = 1;
+ #endif
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -62,28 +62,6 @@ config MAGIC_SYSRQ
+ Specifies the default mask for the allowed SysRq keys. This can be
+ used to disable several sensitive keys by default.
+
+-config MAGIC_SYSRQ_FORCE_PRINTK
+- bool "Force printk from Magic SysRq"
+- depends on MAGIC_SYSRQ && PREEMPT_RT_FULL
+- default n
+- help
+- Allow the output from Magic SysRq to be output immediately, even if
+- this causes large latencies. This can cause performance problems
+- for real-time processes.
+-
+- If PREEMPT_RT_FULL, printk() will not try to acquire the console lock
+- when interrupts or preemption are disabled. If the console lock is
+- not acquired the printk() output will be buffered, but will not be
+- output immediately. Some drivers call into the Magic SysRq code
+- with interrupts or preemption disabled, so the output of Magic SysRq
+- will be buffered instead of printing immediately if this option is
+- not selected.
+-
+- Even with this option selected, Magic SysRq output will be delayed
+- if the attempt to acquire the console lock fails.
+-
+- Don't say Y unless you really know what this hack does.
+-
+ config STRIP_ASM_SYMS
+ bool "Strip assembler-generated symbols during link"
+ default n
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch)
@@ -0,0 +1,125 @@
+From eccec95ad9c2b1142eded0079c335745530bd5b0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 21 Aug 2012 20:38:50 +0200
+Subject: [PATCH 278/303] random: Make it work on rt
+
+Delegate the random insertion to the forced threaded interrupt
+handler. Store the return IP of the hard interrupt handler in the irq
+descriptor and feed it into the random generator as a source of
+entropy.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ drivers/char/random.c | 10 ++++++----
+ include/linux/irqdesc.h | 1 +
+ include/linux/random.h | 2 +-
+ kernel/irq/handle.c | 7 +++++--
+ kernel/irq/manage.c | 6 ++++++
+ 5 files changed, 19 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index d38af32..66c8a0f9 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -767,18 +767,16 @@ EXPORT_SYMBOL_GPL(add_input_randomness);
+
+ static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
+
+-void add_interrupt_randomness(int irq, int irq_flags)
++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
+ {
+ struct entropy_store *r;
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+- struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ __u32 input[4], cycles = get_cycles();
+
+ input[0] = cycles ^ jiffies;
+ input[1] = irq;
+- if (regs) {
+- __u64 ip = instruction_pointer(regs);
++ if (ip) {
+ input[2] = ip;
+ input[3] = ip >> 32;
+ }
+@@ -792,7 +790,11 @@ void add_interrupt_randomness(int irq, int irq_flags)
+ fast_pool->last = now;
+
+ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
++#ifndef CONFIG_PREEMPT_RT_FULL
+ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
++#else
++ mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
++#endif
+ /*
+ * If we don't have a valid cycle counter, and we see
+ * back-to-back timer interrupts, then skip giving credit for
+diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
+index f1e2527..5f4f091 100644
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -53,6 +53,7 @@ struct irq_desc {
+ unsigned int irq_count; /* For detecting broken IRQs */
+ unsigned long last_unhandled; /* Aging timer for unhandled count */
+ unsigned int irqs_unhandled;
++ u64 random_ip;
+ raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
+ #ifdef CONFIG_SMP
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 29e217a..3995b33 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -53,7 +53,7 @@ extern void rand_initialize_irq(int irq);
+ extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+-extern void add_interrupt_randomness(int irq, int irq_flags);
++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
+
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index a768885..f6b91bc 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -116,6 +116,8 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
+ irqreturn_t
+ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ {
++ struct pt_regs *regs = get_irq_regs();
++ u64 ip = regs ? instruction_pointer(regs) : 0;
+ irqreturn_t retval = IRQ_NONE;
+ unsigned int flags = 0, irq = desc->irq_data.irq;
+
+@@ -157,8 +159,9 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ } while (action);
+
+ #ifndef CONFIG_PREEMPT_RT_FULL
+- /* FIXME: Can we unbreak that ? */
+- add_interrupt_randomness(irq, flags);
++ add_interrupt_randomness(irq, flags, ip);
++#else
++ desc->random_ip = ip;
+ #endif
+
+ if (!noirqdebug)
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index f52263a..ff7bb89 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -825,6 +825,12 @@ static int irq_thread(void *data)
+ action_ret = handler_fn(desc, action);
+ if (!noirqdebug)
+ note_interrupt(action->irq, desc, action_ret);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_disable();
++ add_interrupt_randomness(action->irq, 0,
++ desc->random_ip ^ (u64) action);
++ migrate_enable();
++#endif
+ }
+
+ wake = atomic_dec_and_test(&desc->threads_active);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0279-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0279-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0279-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0279-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch)
@@ -0,0 +1,137 @@
+From db2475e2d37afa9d570d615beb5f54d1d09ec77e Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 4 Oct 2012 11:02:04 -0400
+Subject: [PATCH 279/303] softirq: Init softirq local lock after per cpu
+ section is set up
+
+I discovered this bug when booting 3.4-rt on my powerpc box. It crashed
+with the following report:
+
+------------[ cut here ]------------
+kernel BUG at /work/rt/stable-rt.git/kernel/rtmutex_common.h:75!
+Oops: Exception in kernel mode, sig: 5 [#1]
+PREEMPT SMP NR_CPUS=64 NUMA PA Semi PWRficient
+Modules linked in:
+NIP: c0000000004aa03c LR: c0000000004aa01c CTR: c00000000009b2ac
+REGS: c00000003e8d7950 TRAP: 0700 Not tainted (3.4.11-test-rt19)
+MSR: 9000000000029032 <SF,HV,EE,ME,IR,DR,RI> CR: 24000082 XER: 20000000
+SOFTE: 0
+TASK = c00000003e8fdcd0[11] 'ksoftirqd/1' THREAD: c00000003e8d4000 CPU: 1
+GPR00: 0000000000000001 c00000003e8d7bd0 c000000000d6cbb0 0000000000000000
+GPR04: c00000003e8fdcd0 0000000000000000 0000000024004082 c000000000011454
+GPR08: 0000000000000000 0000000080000001 c00000003e8fdcd1 0000000000000000
+GPR12: 0000000024000084 c00000000fff0280 ffffffffffffffff 000000003ffffad8
+GPR16: ffffffffffffffff 000000000072c798 0000000000000060 0000000000000000
+GPR20: 0000000000642741 000000000072c858 000000003ffffaf0 0000000000000417
+GPR24: 000000000072dcd0 c00000003e7ff990 0000000000000000 0000000000000001
+GPR28: 0000000000000000 c000000000792340 c000000000ccec78 c000000001182338
+NIP [c0000000004aa03c] .wakeup_next_waiter+0x44/0xb8
+LR [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8
+Call Trace:
+[c00000003e8d7bd0] [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8 (unreliable)
+[c00000003e8d7c60] [c0000000004a0320] .rt_spin_lock_slowunlock+0x8c/0xe4
+[c00000003e8d7ce0] [c0000000004a07cc] .rt_spin_unlock+0x54/0x64
+[c00000003e8d7d60] [c0000000000636bc] .__thread_do_softirq+0x130/0x174
+[c00000003e8d7df0] [c00000000006379c] .run_ksoftirqd+0x9c/0x1a4
+[c00000003e8d7ea0] [c000000000080b68] .kthread+0xa8/0xb4
+[c00000003e8d7f90] [c00000000001c2f8] .kernel_thread+0x54/0x70
+Instruction dump:
+60000000 e86d01c8 38630730 4bff7061 60000000 ebbf0008 7c7c1b78 e81d0040
+7fe00278 7c000074 7800d182 68000001 <0b000000> e88d01c8 387d0010 38840738
+
+The rtmutex_common.h:75 is:
+
+rt_mutex_top_waiter(struct rt_mutex *lock)
+{
+ struct rt_mutex_waiter *w;
+
+ w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
+ list_entry);
+ BUG_ON(w->lock != lock);
+
+ return w;
+}
+
+Where the waiter->lock is corrupted. I saw various other random bugs
+that all had to with the softirq lock and plist. As plist needs to be
+initialized before it is used I investigated how this lock is
+initialized. It's initialized with:
+
+void __init softirq_early_init(void)
+{
+ local_irq_lock_init(local_softirq_lock);
+}
+
+Where:
+
+#define local_irq_lock_init(lvar) \
+ do { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) \
+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
+ } while (0)
+
+As the softirq lock is a local_irq_lock, which is a per_cpu lock, the
+initialization is done to all per_cpu versions of the lock. But lets
+look at where the softirq_early_init() is called from.
+
+In init/main.c: start_kernel()
+
+/*
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them
+ */
+ softirq_early_init();
+ tick_init();
+ boot_cpu_init();
+ page_address_init();
+ printk(KERN_NOTICE "%s", linux_banner);
+ setup_arch(&command_line);
+ mm_init_owner(&init_mm, &init_task);
+ mm_init_cpumask(&init_mm);
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+
+One of the first things that is called is the initialization of the
+softirq lock. But if you look further down, we see the per_cpu areas
+have not been set up yet. Thus initializing a local_irq_lock() before
+the per_cpu section is set up, may not work as it is initializing the
+per cpu locks before the per cpu exists.
+
+By moving the softirq_early_init() right after setup_per_cpu_areas(),
+the kernel boots fine.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Clark Williams <clark at redhat.com>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Carsten Emde <cbe at osadl.org>
+Cc: vomlehn at texas.net
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/1349362924.6755.18.camel@gandalf.local.home
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ init/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/init/main.c b/init/main.c
+index b00c71b..c1f0c45 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -490,7 +490,6 @@ asmlinkage void __init start_kernel(void)
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them
+ */
+- softirq_early_init();
+ tick_init();
+ boot_cpu_init();
+ page_address_init();
+@@ -501,6 +500,7 @@ asmlinkage void __init start_kernel(void)
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
++ softirq_early_init();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+
+ build_all_zonelists(NULL);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0280-mm-slab-Fix-potential-deadlock.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0280-mm-slab-Fix-potential-deadlock.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0280-mm-slab-Fix-potential-deadlock.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0280-mm-slab-Fix-potential-deadlock.patch)
@@ -0,0 +1,124 @@
+From e0e7cde7548d079913788dd5fb989f1b2110f442 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 26 Sep 2012 16:20:00 +0200
+Subject: [PATCH 280/303] mm: slab: Fix potential deadlock
+
+ =============================================
+[ INFO: possible recursive locking detected ]
+ 3.6.0-rt1+ #49 Not tainted
+ ---------------------------------------------
+ swapper/0/1 is trying to acquire lock:
+ lock_slab_on+0x72/0x77
+
+ but task is already holding lock:
+ __local_lock_irq+0x24/0x77
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(&per_cpu(slab_lock, __cpu).lock);
+ lock(&per_cpu(slab_lock, __cpu).lock);
+
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+ 2 locks held by swapper/0/1:
+ kmem_cache_create+0x33/0x89
+ __local_lock_irq+0x24/0x77
+
+ stack backtrace:
+ Pid: 1, comm: swapper/0 Not tainted 3.6.0-rt1+ #49
+ Call Trace:
+ __lock_acquire+0x9a4/0xdc4
+ ? __local_lock_irq+0x24/0x77
+ ? lock_slab_on+0x72/0x77
+ lock_acquire+0xc4/0x108
+ ? lock_slab_on+0x72/0x77
+ ? unlock_slab_on+0x5b/0x5b
+ rt_spin_lock+0x36/0x3d
+ ? lock_slab_on+0x72/0x77
+ ? migrate_disable+0x85/0x93
+ lock_slab_on+0x72/0x77
+ do_ccupdate_local+0x19/0x44
+ slab_on_each_cpu+0x36/0x5a
+ do_tune_cpucache+0xc1/0x305
+ enable_cpucache+0x8c/0xb5
+ setup_cpu_cache+0x28/0x182
+ __kmem_cache_create+0x34b/0x380
+ ? shmem_mount+0x1a/0x1a
+ kmem_cache_create+0x4a/0x89
+ ? shmem_mount+0x1a/0x1a
+ shmem_init+0x3e/0xd4
+ kernel_init+0x11c/0x214
+ kernel_thread_helper+0x4/0x10
+ ? retint_restore_args+0x13/0x13
+ ? start_kernel+0x3bc/0x3bc
+ ? gs_change+0x13/0x13
+
+It's not a missing annotation. It's simply wrong code and needs to be
+fixed. Instead of nesting the local and the remote cpu lock simply
+acquire only the remote cpu lock, which is sufficient protection for
+this procedure.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ include/linux/locallock.h | 8 ++++++++
+ mm/slab.c | 10 ++--------
+ 2 files changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/include/linux/locallock.h b/include/linux/locallock.h
+index 8fbc393..0161fbb 100644
+--- a/include/linux/locallock.h
++++ b/include/linux/locallock.h
+@@ -96,6 +96,9 @@ static inline void __local_lock_irq(struct local_irq_lock *lv)
+ #define local_lock_irq(lvar) \
+ do { __local_lock_irq(&get_local_var(lvar)); } while (0)
+
++#define local_lock_irq_on(lvar, cpu) \
++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
++
+ static inline void __local_unlock_irq(struct local_irq_lock *lv)
+ {
+ LL_WARN(!lv->nestcnt);
+@@ -111,6 +114,11 @@ static inline void __local_unlock_irq(struct local_irq_lock *lv)
+ put_local_var(lvar); \
+ } while (0)
+
++#define local_unlock_irq_on(lvar, cpu) \
++ do { \
++ __local_unlock_irq(&per_cpu(lvar, cpu)); \
++ } while (0)
++
+ static inline int __local_lock_irqsave(struct local_irq_lock *lv)
+ {
+ if (lv->owner != current) {
+diff --git a/mm/slab.c b/mm/slab.c
+index 7b2d343..81d3481 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -747,18 +747,12 @@ slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
+
+ static void lock_slab_on(unsigned int cpu)
+ {
+- if (cpu == smp_processor_id())
+- local_lock_irq(slab_lock);
+- else
+- local_spin_lock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock);
++ local_lock_irq_on(slab_lock, cpu);
+ }
+
+ static void unlock_slab_on(unsigned int cpu)
+ {
+- if (cpu == smp_processor_id())
+- local_unlock_irq(slab_lock);
+- else
+- local_spin_unlock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock);
++ local_unlock_irq_on(slab_lock, cpu);
+ }
+ #endif
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0281-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0281-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0281-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0281-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch)
@@ -0,0 +1,62 @@
+From bb2389ac2f86bc49ee0395aad440d6d0679759a5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 27 Sep 2012 11:11:46 +0200
+Subject: [PATCH 281/303] mm: page_alloc: Use local_lock_on() instead of plain
+ spinlock
+
+The plain spinlock while sufficient does not update the local_lock
+internals. Use a proper local_lock function instead to ease debugging.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ include/linux/locallock.h | 11 +++++++++++
+ mm/page_alloc.c | 4 ++--
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/locallock.h b/include/linux/locallock.h
+index 0161fbb..f1804a3 100644
+--- a/include/linux/locallock.h
++++ b/include/linux/locallock.h
+@@ -137,6 +137,12 @@ static inline int __local_lock_irqsave(struct local_irq_lock *lv)
+ _flags = __get_cpu_var(lvar).flags; \
+ } while (0)
+
++#define local_lock_irqsave_on(lvar, _flags, cpu) \
++ do { \
++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \
++ _flags = per_cpu(lvar, cpu).flags; \
++ } while (0)
++
+ static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
+ unsigned long flags)
+ {
+@@ -156,6 +162,11 @@ static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
+ put_local_var(lvar); \
+ } while (0)
+
++#define local_unlock_irqrestore_on(lvar, flags, cpu) \
++ do { \
++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
++ } while (0)
++
+ #define local_spin_trylock_irq(lvar, lock) \
+ ({ \
+ int __locked; \
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 9849f08..f97122e 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -227,9 +227,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+
+ #ifdef CONFIG_PREEMPT_RT_BASE
+ # define cpu_lock_irqsave(cpu, flags) \
+- spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
++ local_lock_irqsave_on(pa_lock, flags, cpu)
+ # define cpu_unlock_irqrestore(cpu, flags) \
+- spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
++ local_unlock_irqrestore_on(pa_lock, flags, cpu)
+ #else
+ # define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
+ # define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0282-rt-rwsem-rwlock-lockdep-annotations.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0282-rt-rwsem-rwlock-lockdep-annotations.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0282-rt-rwsem-rwlock-lockdep-annotations.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0282-rt-rwsem-rwlock-lockdep-annotations.patch)
@@ -0,0 +1,123 @@
+From f241795abf649639a246996752e55a7afd9974fd Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 28 Sep 2012 10:49:42 +0100
+Subject: [PATCH 282/303] rt: rwsem/rwlock: lockdep annotations
+
+rwlocks and rwsems on RT do not allow multiple readers. Annotate the
+lockdep acquire functions accordingly.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/rt.c | 46 +++++++++++++++++++++++++---------------------
+ 1 file changed, 25 insertions(+), 21 deletions(-)
+
+diff --git a/kernel/rt.c b/kernel/rt.c
+index 092d6b3..aa10504 100644
+--- a/kernel/rt.c
++++ b/kernel/rt.c
+@@ -216,15 +216,17 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+ * write locked.
+ */
+ migrate_disable();
+- if (rt_mutex_owner(lock) != current)
++ if (rt_mutex_owner(lock) != current) {
+ ret = rt_mutex_trylock(lock);
+- else if (!rwlock->read_depth)
++ if (ret)
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ } else if (!rwlock->read_depth) {
+ ret = 0;
++ }
+
+- if (ret) {
++ if (ret)
+ rwlock->read_depth++;
+- rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+- } else
++ else
+ migrate_enable();
+
+ return ret;
+@@ -242,13 +244,13 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock)
+ {
+ struct rt_mutex *lock = &rwlock->lock;
+
+- rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+-
+ /*
+ * recursive read locks succeed when current owns the lock
+ */
+- if (rt_mutex_owner(lock) != current)
++ if (rt_mutex_owner(lock) != current) {
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __rt_spin_lock(lock);
++ }
+ rwlock->read_depth++;
+ }
+
+@@ -264,11 +266,11 @@ EXPORT_SYMBOL(rt_write_unlock);
+
+ void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+ {
+- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+-
+ /* Release the lock only when read_depth is down to 0 */
+- if (--rwlock->read_depth == 0)
++ if (--rwlock->read_depth == 0) {
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+ __rt_spin_unlock(&rwlock->lock);
++ }
+ }
+ EXPORT_SYMBOL(rt_read_unlock);
+
+@@ -315,9 +317,10 @@ EXPORT_SYMBOL(rt_up_write);
+
+ void rt_up_read(struct rw_semaphore *rwsem)
+ {
+- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+- if (--rwsem->read_depth == 0)
++ if (--rwsem->read_depth == 0) {
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+ rt_mutex_unlock(&rwsem->lock);
++ }
+ }
+ EXPORT_SYMBOL(rt_up_read);
+
+@@ -366,15 +369,16 @@ int rt_down_read_trylock(struct rw_semaphore *rwsem)
+ * but not when read_depth == 0 which means that the rwsem is
+ * write locked.
+ */
+- if (rt_mutex_owner(lock) != current)
++ if (rt_mutex_owner(lock) != current) {
+ ret = rt_mutex_trylock(&rwsem->lock);
+- else if (!rwsem->read_depth)
++ if (ret)
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++ } else if (!rwsem->read_depth) {
+ ret = 0;
++ }
+
+- if (ret) {
++ if (ret)
+ rwsem->read_depth++;
+- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+- }
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_down_read_trylock);
+@@ -383,10 +387,10 @@ static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
+ {
+ struct rt_mutex *lock = &rwsem->lock;
+
+- rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
+-
+- if (rt_mutex_owner(lock) != current)
++ if (rt_mutex_owner(lock) != current) {
++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
+ rt_mutex_lock(&rwsem->lock);
++ }
+ rwsem->read_depth++;
+ }
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0283-sched-Better-debug-output-for-might-sleep.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0283-sched-Better-debug-output-for-might-sleep.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0283-sched-Better-debug-output-for-might-sleep.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0283-sched-Better-debug-output-for-might-sleep.patch)
@@ -0,0 +1,79 @@
+From ecf358e1be06dba14abebc2d1f41c1a44f5c4cbd Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 5 Oct 2012 08:56:15 +0100
+Subject: [PATCH 283/303] sched: Better debug output for might sleep
+
+might sleep can tell us where interrupts have been disabled, but we
+have no idea what disabled preemption. Add some debug infrastructure.
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ include/linux/sched.h | 4 ++++
+ kernel/sched.c | 23 +++++++++++++++++++++--
+ 2 files changed, 25 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index fa24643..e9854c5 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1612,6 +1612,10 @@ struct task_struct {
+ int kmap_idx;
+ pte_t kmap_pte[KM_TYPE_NR];
+ #endif
++
++#ifdef CONFIG_DEBUG_PREEMPT
++ unsigned long preempt_disable_ip;
++#endif
+ };
+
+ #ifdef CONFIG_PREEMPT_RT_FULL
+diff --git a/kernel/sched.c b/kernel/sched.c
+index af7b82b..db46cd4 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4488,8 +4488,13 @@ void __kprobes add_preempt_count(int val)
+ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
+ PREEMPT_MASK - 10);
+ #endif
+- if (preempt_count() == val)
+- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
++ if (preempt_count() == val) {
++ unsigned long ip = get_parent_ip(CALLER_ADDR1);
++#ifdef CONFIG_DEBUG_PREEMPT
++ current->preempt_disable_ip = ip;
++#endif
++ trace_preempt_off(CALLER_ADDR0, ip);
++ }
+ }
+ EXPORT_SYMBOL(add_preempt_count);
+
+@@ -4531,6 +4536,13 @@ static noinline void __schedule_bug(struct task_struct *prev)
+ print_modules();
+ if (irqs_disabled())
+ print_irqtrace_events(prev);
++#ifdef DEBUG_PREEMPT
++ if (in_atomic_preempt_off()) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(current->preempt_disable_ip);
++ pr_cont("\n");
++ }
++#endif
+
+ if (regs)
+ show_regs(regs);
+@@ -8913,6 +8925,13 @@ void __might_sleep(const char *file, int line, int preempt_offset)
+ debug_show_held_locks(current);
+ if (irqs_disabled())
+ print_irqtrace_events(current);
++#ifdef DEBUG_PREEMPT
++ if (!preempt_count_equals(preempt_offset)) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(current->preempt_disable_ip);
++ pr_cont("\n");
++ }
++#endif
+ dump_stack();
+ }
+ EXPORT_SYMBOL(__might_sleep);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0284-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0284-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0284-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0284-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch)
@@ -0,0 +1,63 @@
+From 432b9e4953c308c983185ac7600c59c0d54d6023 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 3 Oct 2012 17:21:53 +0100
+Subject: [PATCH 284/303] stomp_machine: Use mutex_trylock when called from
+ inactive cpu
+
+If the stop machinery is called from inactive CPU we cannot use
+mutex_lock, because some other stomp machine invokation might be in
+progress and the mutex can be contended. We cannot schedule from this
+context, so trylock and loop.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/stop_machine.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 561ba3a..e98c70b 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -158,7 +158,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
+
+ static void queue_stop_cpus_work(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg,
+- struct cpu_stop_done *done)
++ struct cpu_stop_done *done, bool inactive)
+ {
+ struct cpu_stop_work *work;
+ unsigned int cpu;
+@@ -175,7 +175,12 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
+ * Make sure that all work is queued on all cpus before we
+ * any of the cpus can execute it.
+ */
+- mutex_lock(&stopper_lock);
++ if (!inactive) {
++ mutex_lock(&stopper_lock);
++ } else {
++ while (!mutex_trylock(&stopper_lock))
++ cpu_relax();
++ }
+ for_each_cpu(cpu, cpumask)
+ cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
+ &per_cpu(stop_cpus_work, cpu));
+@@ -188,7 +193,7 @@ static int __stop_cpus(const struct cpumask *cpumask,
+ struct cpu_stop_done done;
+
+ cpu_stop_init_done(&done, cpumask_weight(cpumask));
+- queue_stop_cpus_work(cpumask, fn, arg, &done);
++ queue_stop_cpus_work(cpumask, fn, arg, &done, false);
+ wait_for_stop_done(&done);
+ return done.executed ? done.ret : -ENOENT;
+ }
+@@ -601,7 +606,7 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+ set_state(&smdata, STOPMACHINE_PREPARE);
+ cpu_stop_init_done(&done, num_active_cpus());
+ queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
+- &done);
++ &done, true);
+ ret = stop_machine_cpu_stop(&smdata);
+
+ /* Busy wait for completion. */
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0285-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0285-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0285-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0285-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch)
@@ -0,0 +1,36 @@
+From f4df1770c4b440ef73d89244e23d89f37474bccd Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <srostedt at redhat.com>
+Date: Tue, 16 Oct 2012 17:00:19 -0400
+Subject: [PATCH 285/303] slab: Fix up stable merge of slab init_lock_keys()
+
+There was a stable fix that moved the init_lock_keys() to after
+the enable_cpucache(). But -rt changed this function to
+init_cachep_lock_keys(). This moves the init afterwards to
+match the stable fix.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ mm/slab.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/mm/slab.c b/mm/slab.c
+index 81d3481..fff347f 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -1747,15 +1747,12 @@ void __init kmem_cache_init_late(void)
+ /* 6) resize the head arrays to their final sizes */
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next) {
+- init_cachep_lock_keys(cachep);
+ if (enable_cpucache(cachep, GFP_NOWAIT))
+ BUG();
++ init_cachep_lock_keys(cachep);
+ }
+ mutex_unlock(&cache_chain_mutex);
+
+- /* Annotate slab for lockdep -- annotate the malloc caches */
+- init_lock_keys();
+-
+ /* Done! */
+ g_cpucache_up = FULL;
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0286-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0286-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0286-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0286-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch)
@@ -0,0 +1,40 @@
+From 6ba9b460906d4d04744a5cde9bd95b48be63684d Mon Sep 17 00:00:00 2001
+From: Watanabe <shunsuke.watanabe at tel.com>
+Date: Sun, 28 Oct 2012 11:13:44 +0100
+Subject: [PATCH 286/303] hrtimer: Raise softirq if hrtimer irq stalled
+
+When the hrtimer stall detection hits the softirq is not raised.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ kernel/hrtimer.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 31923d5..7021e6d 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -1525,11 +1525,7 @@ retry:
+ if (expires_next.tv64 == KTIME_MAX ||
+ !tick_program_event(expires_next, 0)) {
+ cpu_base->hang_detected = 0;
+-
+- if (raise)
+- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-
+- return;
++ goto out;
+ }
+
+ /*
+@@ -1573,6 +1569,9 @@ retry:
+ tick_program_event(expires_next, 1);
+ printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
+ ktime_to_ns(delta));
++out:
++ if (raise)
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+
+ /*
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0287-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0287-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0287-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0287-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch)
@@ -0,0 +1,27 @@
+From 32165cc175e3febb64c77d8d702e1b88fe8d0391 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 28 Oct 2012 13:26:09 +0000
+Subject: [PATCH 287/303] rcu: Disable RCU_FAST_NO_HZ on RT
+
+This uses a timer_list timer from the irq disabled guts of the idle
+code. Disable it for now to prevent wreckage.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ init/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 720c182..aa6545f 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -469,7 +469,7 @@ config RCU_FANOUT_EXACT
+
+ config RCU_FAST_NO_HZ
+ bool "Accelerate last non-dyntick-idle CPU's grace periods"
+- depends on TREE_RCU && NO_HZ && SMP
++ depends on TREE_RCU && NO_HZ && SMP && !PREEMPT_RT_FULL
+ default n
+ help
+ This option causes RCU to attempt to accelerate grace periods
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0288-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0288-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0288-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0288-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch)
@@ -0,0 +1,102 @@
+From 09b677f2fc687259c1046b5b6fa136630d765d0f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Sun, 28 Oct 2012 11:18:08 +0100
+Subject: [PATCH 288/303] net: netfilter: Serialize xt_write_recseq sections
+ on RT
+
+The netfilter code relies only on the implicit semantics of
+local_bh_disable() for serializing wt_write_recseq sections. RT breaks
+that and needs explicit serialization here.
+
+Reported-by: Peter LaDow <petela at gocougs.wsu.edu>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ include/linux/locallock.h | 4 ++++
+ include/linux/netfilter/x_tables.h | 7 +++++++
+ net/netfilter/core.c | 6 ++++++
+ 3 files changed, 17 insertions(+)
+
+diff --git a/include/linux/locallock.h b/include/linux/locallock.h
+index f1804a3..a5eea5d 100644
+--- a/include/linux/locallock.h
++++ b/include/linux/locallock.h
+@@ -25,6 +25,9 @@ struct local_irq_lock {
+ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
+ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
+
++#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
++ DECLARE_PER_CPU(struct local_irq_lock, lvar)
++
+ #define local_irq_lock_init(lvar) \
+ do { \
+ int __cpu; \
+@@ -220,6 +223,7 @@ static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
+ #else /* PREEMPT_RT_BASE */
+
+ #define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
++#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
+
+ static inline void local_irq_lock_init(int lvar) { }
+
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index 32cddf7..bed90da2 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -186,6 +186,7 @@ struct xt_counters_info {
+ #ifdef __KERNEL__
+
+ #include <linux/netdevice.h>
++#include <linux/locallock.h>
+
+ /**
+ * struct xt_action_param - parameters for matches/targets
+@@ -466,6 +467,8 @@ extern void xt_free_table_info(struct xt_table_info *info);
+ */
+ DECLARE_PER_CPU(seqcount_t, xt_recseq);
+
++DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
++
+ /**
+ * xt_write_recseq_begin - start of a write section
+ *
+@@ -480,6 +483,9 @@ static inline unsigned int xt_write_recseq_begin(void)
+ {
+ unsigned int addend;
+
++ /* RT protection */
++ local_lock(xt_write_lock);
++
+ /*
+ * Low order bit of sequence is set if we already
+ * called xt_write_recseq_begin().
+@@ -510,6 +516,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
+ /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
+ smp_wmb();
+ __this_cpu_add(xt_recseq.sequence, addend);
++ local_unlock(xt_write_lock);
+ }
+
+ /*
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index afca6c7..aa3f87b 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -20,11 +20,17 @@
+ #include <linux/proc_fs.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/locallock.h>
+ #include <net/net_namespace.h>
+ #include <net/sock.h>
+
+ #include "nf_internals.h"
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
++EXPORT_PER_CPU_SYMBOL(xt_write_lock);
++#endif
++
+ static DEFINE_MUTEX(afinfo_mutex);
+
+ const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0289-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0289-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0289-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0289-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch)
@@ -0,0 +1,36 @@
+From 8596c5bddab48a58771790aeefc64581d4c00dec Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 20 Dec 2012 14:58:00 +0100
+Subject: [PATCH 289/303] sched: Adjust sched_reset_on_fork when nothing else
+ changes
+
+If the policy and priority remain unchanged a possible modification of
+sched_reset_on_fork gets lost in the early exit path.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable at vger.kernel.org
+Cc: stable-rt at vger.kernel.org
+---
+ kernel/sched.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index db46cd4..1101ef2 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -5720,11 +5720,13 @@ recheck:
+ }
+
+ /*
+- * If not changing anything there's no need to proceed further:
++ * If not changing anything there's no need to proceed
++ * further, but store a possible modification of
++ * reset_on_fork.
+ */
+ if (unlikely(policy == p->policy && (!rt_policy(policy) ||
+ param->sched_priority == p->rt_priority))) {
+-
++ p->sched_reset_on_fork = reset_on_fork;
+ __task_rq_unlock(rq);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ return 0;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0290-sched-Queue-RT-tasks-to-head-when-prio-drops.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0290-sched-Queue-RT-tasks-to-head-when-prio-drops.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0290-sched-Queue-RT-tasks-to-head-when-prio-drops.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0290-sched-Queue-RT-tasks-to-head-when-prio-drops.patch)
@@ -0,0 +1,71 @@
+From d573cca0ae6ee9a699e2005c21532effa8c8a428 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 4 Dec 2012 08:56:41 +0100
+Subject: [PATCH 290/303] sched: Queue RT tasks to head when prio drops
+
+The following scenario does not work correctly:
+
+Runqueue of CPUx contains two runnable and pinned tasks:
+ T1: SCHED_FIFO, prio 80
+ T2: SCHED_FIFO, prio 80
+
+T1 is on the cpu and executes the following syscalls (classic priority
+ceiling scenario):
+
+ sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 90);
+ ...
+ sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 80);
+ ...
+
+Now T1 gets preempted by T3 (SCHED_FIFO, prio 95). After T3 goes back
+to sleep the scheduler picks T2. Surprise!
+
+The same happens w/o actual preemption when T1 is forced into the
+scheduler due to a sporadic NEED_RESCHED event. The scheduler invokes
+pick_next_task() which returns T2. So T1 gets preempted and scheduled
+out.
+
+This happens because sched_setscheduler() dequeues T1 from the prio 90
+list and then enqueues it on the tail of the prio 80 list behind T2.
+This violates the POSIX spec and surprises user space which relies on
+the guarantee that SCHED_FIFO tasks are not scheduled out unless they
+give the CPU up voluntarily or are preempted by a higher priority
+task. In the latter case the preempted task must get back on the CPU
+after the preempting task schedules out again.
+
+We fixed a similar issue already in commit 60db48c (sched: Queue a
+deboosted task to the head of the RT prio queue). The same treatment
+is necessary for sched_setscheduler(). So enqueue to head of the prio
+bucket list if the priority of the task is lowered.
+
+It might be possible that existing user space relies on the current
+behaviour, but it can be considered highly unlikely due to the corner
+case nature of the application scenario.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable at vger.kernel.org
+Cc: stable-rt at vger.kernel.org
+---
+ kernel/sched.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 1101ef2..f97f894 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -5768,8 +5768,13 @@ recheck:
+
+ if (running)
+ p->sched_class->set_curr_task(rq);
+- if (on_rq)
+- activate_task(rq, p, 0);
++ if (on_rq) {
++ /*
++ * We enqueue to tail when the priority of a task is
++ * increased (user space view).
++ */
++ activate_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
++ }
+
+ check_class_changed(rq, p, prev_class, oldprio);
+ task_rq_unlock(rq, p, &flags);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0291-sched-Consider-pi-boosting-in-setscheduler.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0291-sched-Consider-pi-boosting-in-setscheduler.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0291-sched-Consider-pi-boosting-in-setscheduler.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0291-sched-Consider-pi-boosting-in-setscheduler.patch)
@@ -0,0 +1,160 @@
+From c2f7832d9388d76e564cc12afc4acda8995afa93 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 20 Dec 2012 15:13:49 +0100
+Subject: [PATCH 291/303] sched: Consider pi boosting in setscheduler
+
+If a PI boosted task policy/priority is modified by a setscheduler()
+call we unconditionally dequeue and requeue the task if it is on the
+runqueue even if the new priority is lower than the current effective
+boosted priority. This can result in undesired reordering of the
+priority bucket list.
+
+If the new priority is less or equal than the current effective we
+just store the new parameters in the task struct and leave the
+scheduler class and the runqueue untouched. This is handled when the
+task deboosts itself. Only if the new priority is higher than the
+effective boosted priority we apply the change immediately.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable at vger.kernel.org
+Cc: stable-rt at vger.kernel.org
+---
+ include/linux/sched.h | 5 +++++
+ kernel/rtmutex.c | 12 ++++++++++++
+ kernel/sched.c | 39 +++++++++++++++++++++++++++++++--------
+ 3 files changed, 48 insertions(+), 8 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index e9854c5..380e503 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2127,6 +2127,7 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
+ #ifdef CONFIG_RT_MUTEXES
+ extern int rt_mutex_getprio(struct task_struct *p);
+ extern void rt_mutex_setprio(struct task_struct *p, int prio);
++extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
+ static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+ {
+@@ -2137,6 +2138,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
+ {
+ return p->normal_prio;
+ }
++static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
++{
++ return 0;
++}
+ # define rt_mutex_adjust_pi(p) do { } while (0)
+ static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+ {
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index 9c4f6e5..6075f176e 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -124,6 +124,18 @@ int rt_mutex_getprio(struct task_struct *task)
+ }
+
+ /*
++ * Called by sched_setscheduler() to check whether the priority change
++ * is overruled by a possible priority boosting.
++ */
++int rt_mutex_check_prio(struct task_struct *task, int newprio)
++{
++ if (!task_has_pi_waiters(task))
++ return 0;
++
++ return task_top_pi_waiter(task)->pi_list_entry.prio <= newprio;
++}
++
++/*
+ * Adjust the priority of a task, after its pi_waiters got modified.
+ *
+ * This can be both boosting and unboosting. task->pi_lock must be held.
+diff --git a/kernel/sched.c b/kernel/sched.c
+index f97f894..c6ff5be 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -5364,7 +5364,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
+ * This function changes the 'effective' priority of a task. It does
+ * not touch ->normal_prio like __setscheduler().
+ *
+- * Used by the rt_mutex code to implement priority inheritance logic.
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
+ */
+ void rt_mutex_setprio(struct task_struct *p, int prio)
+ {
+@@ -5587,20 +5588,25 @@ static struct task_struct *find_process_by_pid(pid_t pid)
+ return pid ? find_task_by_vpid(pid) : current;
+ }
+
+-/* Actually do priority change: must hold rq lock. */
+-static void
+-__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
++static void __setscheduler_params(struct task_struct *p, int policy, int prio)
+ {
+ p->policy = policy;
+ p->rt_priority = prio;
+ p->normal_prio = normal_prio(p);
++ set_load_weight(p);
++}
++
++/* Actually do priority change: must hold rq lock. */
++static void
++__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
++{
++ __setscheduler_params(p, policy, prio);
+ /* we are holding p->pi_lock already */
+ p->prio = rt_mutex_getprio(p);
+ if (rt_prio(p->prio))
+ p->sched_class = &rt_sched_class;
+ else
+ p->sched_class = &fair_sched_class;
+- set_load_weight(p);
+ }
+
+ /*
+@@ -5625,6 +5631,7 @@ static bool check_same_owner(struct task_struct *p)
+ static int __sched_setscheduler(struct task_struct *p, int policy,
+ const struct sched_param *param, bool user)
+ {
++ int newprio = MAX_RT_PRIO - 1 - param->sched_priority;
+ int retval, oldprio, oldpolicy = -1, on_rq, running;
+ unsigned long flags;
+ const struct sched_class *prev_class;
+@@ -5753,6 +5760,25 @@ recheck:
+ task_rq_unlock(rq, p, &flags);
+ goto recheck;
+ }
++
++ p->sched_reset_on_fork = reset_on_fork;
++ oldprio = p->prio;
++
++ /*
++ * Special case for priority boosted tasks.
++ *
++ * If the new priority is lower or equal (user space view)
++ * than the current (boosted) priority, we just store the new
++ * normal parameters and do not touch the scheduler class and
++ * the runqueue. This will be done when the task deboost
++ * itself.
++ */
++ if (rt_mutex_check_prio(p, newprio)) {
++ __setscheduler_params(p, policy, param->sched_priority);
++ task_rq_unlock(rq, p, &flags);
++ return 0;
++ }
++
+ on_rq = p->on_rq;
+ running = task_current(rq, p);
+ if (on_rq)
+@@ -5760,9 +5786,6 @@ recheck:
+ if (running)
+ p->sched_class->put_prev_task(rq, p);
+
+- p->sched_reset_on_fork = reset_on_fork;
+-
+- oldprio = p->prio;
+ prev_class = p->sched_class;
+ __setscheduler(rq, p, policy, param->sched_priority);
+
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0292-drivers-tty-pl011-irq-disable-madness.patch.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0292-drivers-tty-pl011-irq-disable-madness.patch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0292-drivers-tty-pl011-irq-disable-madness.patch.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0292-drivers-tty-pl011-irq-disable-madness.patch.patch)
@@ -0,0 +1,49 @@
+From 6206a6798d99bc23abb9f7d3d1ee86df4f2309af Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 8 Jan 2013 21:36:51 +0100
+Subject: [PATCH 292/303] drivers-tty-pl011-irq-disable-madness.patch
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ drivers/tty/serial/amba-pl011.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index fe9f111..1fbaf66 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1761,13 +1761,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+
+ clk_enable(uap->clk);
+
+- local_irq_save(flags);
++ /*
++ * local_irq_save(flags);
++ *
++ * This local_irq_save() is nonsense. If we come in via sysrq
++ * handling then interrupts are already disabled. Aside of
++ * that the port.sysrq check is racy on SMP regardless.
++ */
+ if (uap->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&uap->port.lock);
++ locked = spin_trylock_irqsave(&uap->port.lock, flags);
+ else
+- spin_lock(&uap->port.lock);
++ spin_lock_irqsave(&uap->port.lock, flags);
+
+ /*
+ * First save the CR then disable the interrupts
+@@ -1789,8 +1795,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+ writew(old_cr, uap->port.membase + UART011_CR);
+
+ if (locked)
+- spin_unlock(&uap->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&uap->port.lock, flags);
+
+ clk_disable(uap->clk);
+ }
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0293-mmci-Remove-bogus-local_irq_save.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0293-mmci-Remove-bogus-local_irq_save.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0293-mmci-Remove-bogus-local_irq_save.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0293-mmci-Remove-bogus-local_irq_save.patch)
@@ -0,0 +1,44 @@
+From 07a3aef9cf51bf39f42e0053bf7b69b3f5cb9fb9 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 9 Jan 2013 12:11:12 +0100
+Subject: [PATCH 293/303] mmci: Remove bogus local_irq_save()
+
+On !RT interrupt runs with interrupts disabled. On RT it's in a
+thread, so no need to disable interrupts at all.
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ drivers/mmc/host/mmci.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index 0726e59..5d7bf83 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -859,15 +859,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ struct variant_data *variant = host->variant;
+ void __iomem *base = host->base;
+- unsigned long flags;
+ u32 status;
+
+ status = readl(base + MMCISTATUS);
+
+ dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
+
+- local_irq_save(flags);
+-
+ do {
+ unsigned int remain, len;
+ char *buffer;
+@@ -907,8 +904,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
+
+ sg_miter_stop(sg_miter);
+
+- local_irq_restore(flags);
+-
+ /*
+ * If we have less than the fifo 'half-full' threshold to transfer,
+ * trigger a PIO interrupt as soon as any data is available.
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0294-sched-Init-idle-on_rq-in-init_idle.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0294-sched-Init-idle-on_rq-in-init_idle.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0294-sched-Init-idle-on_rq-in-init_idle.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0294-sched-Init-idle-on_rq-in-init_idle.patch)
@@ -0,0 +1,24 @@
+From 932d7704d73bbd5d4feee760bd65f4d5e647587d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 9 Jan 2013 23:03:29 +0100
+Subject: [PATCH 294/303] sched: Init idle->on_rq in init_idle()
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/sched.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index c6ff5be..ef5fe63 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6520,6 +6520,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
+ rcu_read_unlock();
+
+ rq->curr = rq->idle = idle;
++ idle->on_rq = 1;
+ #if defined(CONFIG_SMP)
+ idle->on_cpu = 1;
+ #endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0295-sched-Check-for-idle-task-in-might_sleep.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0295-sched-Check-for-idle-task-in-might_sleep.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0295-sched-Check-for-idle-task-in-might_sleep.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0295-sched-Check-for-idle-task-in-might_sleep.patch)
@@ -0,0 +1,28 @@
+From e4c734d653b9bcda6ba21c8581d9391dfd2c4a30 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 9 Jan 2013 23:34:08 +0100
+Subject: [PATCH 295/303] sched: Check for idle task in might_sleep()
+
+Idle is not allowed to call sleeping functions ever!
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/sched.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index ef5fe63..03015b2 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -8938,7 +8938,8 @@ void __might_sleep(const char *file, int line, int preempt_offset)
+ static unsigned long prev_jiffy; /* ratelimiting */
+
+ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
+- if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
++ !is_idle_task(current)) ||
+ system_state != SYSTEM_RUNNING || oops_in_progress)
+ return;
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0296-mm-swap-Initialize-local-locks-early.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0296-mm-swap-Initialize-local-locks-early.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0296-mm-swap-Initialize-local-locks-early.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0296-mm-swap-Initialize-local-locks-early.patch)
@@ -0,0 +1,42 @@
+From 2da770e15842abdc5282b527b6c51fa37bbe7bbf Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 10 Jan 2013 09:50:51 +0100
+Subject: [PATCH 296/303] mm: swap: Initialize local locks early
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ mm/swap.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/mm/swap.c b/mm/swap.c
+index e3f7d6f..c428897 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -772,6 +772,15 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
+
+ EXPORT_SYMBOL(pagevec_lookup);
+
++/* Early setup for the local locks */
++static int __init swap_init_locks(void)
++{
++ local_irq_lock_init(rotate_lock);
++ local_irq_lock_init(swap_lock);
++ return 1;
++}
++early_initcall(swap_init_locks);
++
+ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
+ pgoff_t *index, int tag, unsigned nr_pages)
+ {
+@@ -789,9 +798,6 @@ void __init swap_setup(void)
+ {
+ unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
+
+- local_irq_lock_init(rotate_lock);
+- local_irq_lock_init(swap_lock);
+-
+ #ifdef CONFIG_SWAP
+ bdi_init(swapper_space.backing_dev_info);
+ #endif
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0297-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0297-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0297-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0297-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch)
@@ -0,0 +1,46 @@
+From 748f03f190528b4342810e9270a1e1fa3033f279 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 13 Feb 2013 10:59:53 +0100
+Subject: [PATCH 297/303] x86/32: Use kmap switch for non highmem as well
+
+Even with CONFIG_HIGHMEM=n we need to take care of the "atomic"
+mappings which are installed via iomap_atomic.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ arch/x86/kernel/process_32.c | 2 +-
+ include/linux/sched.h | 4 +++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 20f1573..66ee590 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -340,7 +340,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
+
+-#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Save @prev's kmap_atomic stack
+ */
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 380e503..41c0979 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1608,9 +1608,11 @@ struct task_struct {
+ struct rcu_head put_rcu;
+ int softirq_nestcnt;
+ #endif
+-#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++#ifdef CONFIG_PREEMPT_RT_FULL
++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
+ int kmap_idx;
+ pte_t kmap_pte[KM_TYPE_NR];
++# endif
+ #endif
+
+ #ifdef CONFIG_DEBUG_PREEMPT
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0298-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0298-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0298-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0298-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch)
@@ -0,0 +1,183 @@
+From c009497912e59cd37b937a587bd34136220a0222 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Wed, 13 Feb 2013 09:26:05 -0500
+Subject: [PATCH 298/303] acpi/rt: Convert acpi_gbl_hardware lock back to a
+ raw_spinlock_t
+
+We hit the following bug with 3.6-rt:
+
+[ 5.898990] BUG: scheduling while atomic: swapper/3/0/0x00000002
+[ 5.898991] no locks held by swapper/3/0.
+[ 5.898993] Modules linked in:
+[ 5.898996] Pid: 0, comm: swapper/3 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1
+[ 5.898997] Call Trace:
+[ 5.899011] [<ffffffff810804e7>] __schedule_bug+0x67/0x90
+[ 5.899028] [<ffffffff81577923>] __schedule+0x793/0x7a0
+[ 5.899032] [<ffffffff810b4e40>] ? debug_rt_mutex_print_deadlock+0x50/0x200
+[ 5.899034] [<ffffffff81577b89>] schedule+0x29/0x70
+[ 5.899036] BUG: scheduling while atomic: swapper/7/0/0x00000002
+[ 5.899037] no locks held by swapper/7/0.
+[ 5.899039] [<ffffffff81578525>] rt_spin_lock_slowlock+0xe5/0x2f0
+[ 5.899040] Modules linked in:
+[ 5.899041]
+[ 5.899045] [<ffffffff81579a58>] ? _raw_spin_unlock_irqrestore+0x38/0x90
+[ 5.899046] Pid: 0, comm: swapper/7 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1
+[ 5.899047] Call Trace:
+[ 5.899049] [<ffffffff81578bc6>] rt_spin_lock+0x16/0x40
+[ 5.899052] [<ffffffff810804e7>] __schedule_bug+0x67/0x90
+[ 5.899054] [<ffffffff8157d3f0>] ? notifier_call_chain+0x80/0x80
+[ 5.899056] [<ffffffff81577923>] __schedule+0x793/0x7a0
+[ 5.899059] [<ffffffff812f2034>] acpi_os_acquire_lock+0x1f/0x23
+[ 5.899062] [<ffffffff810b4e40>] ? debug_rt_mutex_print_deadlock+0x50/0x200
+[ 5.899068] [<ffffffff8130be64>] acpi_write_bit_register+0x33/0xb0
+[ 5.899071] [<ffffffff81577b89>] schedule+0x29/0x70
+[ 5.899072] [<ffffffff8130be13>] ? acpi_read_bit_register+0x33/0x51
+[ 5.899074] [<ffffffff81578525>] rt_spin_lock_slowlock+0xe5/0x2f0
+[ 5.899077] [<ffffffff8131d1fc>] acpi_idle_enter_bm+0x8a/0x28e
+[ 5.899079] [<ffffffff81579a58>] ? _raw_spin_unlock_irqrestore+0x38/0x90
+[ 5.899081] [<ffffffff8107e5da>] ? this_cpu_load+0x1a/0x30
+[ 5.899083] [<ffffffff81578bc6>] rt_spin_lock+0x16/0x40
+[ 5.899087] [<ffffffff8144c759>] cpuidle_enter+0x19/0x20
+[ 5.899088] [<ffffffff8157d3f0>] ? notifier_call_chain+0x80/0x80
+[ 5.899090] [<ffffffff8144c777>] cpuidle_enter_state+0x17/0x50
+[ 5.899092] [<ffffffff812f2034>] acpi_os_acquire_lock+0x1f/0x23
+[ 5.899094] [<ffffffff8144d1a1>] cpuidle899101] [<ffffffff8130be13>] ?
+
+As the acpi code disables interrupts in acpi_idle_enter_bm, and calls
+code that grabs the acpi lock, it causes issues as the lock is currently
+in RT a sleeping lock.
+
+The lock was converted from a raw to a sleeping lock due to some
+previous issues, and tests that showed it didn't seem to matter.
+Unfortunately, it did matter for one of our boxes.
+
+This patch converts the lock back to a raw lock. I've run this code on a
+few of my own machines, one being my laptop that uses the acpi quite
+extensively. I've been able to suspend and resume without issues.
+
+[ tglx: Made the change exclusive for acpi_gbl_hardware_lock ]
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: John Kacur <jkacur at gmail.com>
+Cc: Clark Williams <clark at redhat.com>
+Link: http://lkml.kernel.org/r/1360765565.23152.5.camel@gandalf.local.home
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/acpi/acpica/acglobal.h | 2 +-
+ drivers/acpi/acpica/hwregs.c | 4 ++--
+ drivers/acpi/acpica/hwxface.c | 4 ++--
+ drivers/acpi/acpica/utmutex.c | 4 ++--
+ include/acpi/platform/aclinux.h | 14 ++++++++++++++
+ 5 files changed, 21 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
+index 76dc02f1..ca6bbe7 100644
+--- a/drivers/acpi/acpica/acglobal.h
++++ b/drivers/acpi/acpica/acglobal.h
+@@ -236,7 +236,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
+ * interrupt level
+ */
+ ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
+-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
++ACPI_EXTERN acpi_raw_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+
+ /*****************************************************************************
+ *
+diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
+index cc70f3f..1df2ce6 100644
+--- a/drivers/acpi/acpica/hwregs.c
++++ b/drivers/acpi/acpica/hwregs.c
+@@ -263,14 +263,14 @@ acpi_status acpi_hw_clear_acpi_status(void)
+ ACPI_BITMASK_ALL_FIXED_STATUS,
+ ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
+
+- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
+
+ /* Clear the fixed events in PM1 A/B */
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITMASK_ALL_FIXED_STATUS);
+
+- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
+
+ if (ACPI_FAILURE(status))
+ goto exit;
+diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
+index c2793a8..5b28769 100644
+--- a/drivers/acpi/acpica/hwxface.c
++++ b/drivers/acpi/acpica/hwxface.c
+@@ -387,7 +387,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
+
+ /*
+ * At this point, we know that the parent register is one of the
+@@ -448,7 +448,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
+
+ unlock_and_exit:
+
+- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
+ return_ACPI_STATUS(status);
+ }
+
+diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
+index 7d797e2..b611bf3 100644
+--- a/drivers/acpi/acpica/utmutex.c
++++ b/drivers/acpi/acpica/utmutex.c
+@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void)
+ return_ACPI_STATUS (status);
+ }
+
+- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
++ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
+@@ -135,7 +135,7 @@ void acpi_ut_mutex_terminate(void)
+ /* Delete the spinlocks */
+
+ acpi_os_delete_lock(acpi_gbl_gpe_lock);
+- acpi_os_delete_lock(acpi_gbl_hardware_lock);
++ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
+
+ /* Delete the reader/writer lock */
+
+diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
+index f4b2eff..0e70789 100644
+--- a/include/acpi/platform/aclinux.h
++++ b/include/acpi/platform/aclinux.h
+@@ -73,6 +73,7 @@
+
+ #define acpi_cache_t struct kmem_cache
+ #define acpi_spinlock spinlock_t *
++#define acpi_raw_spinlock raw_spinlock_t *
+ #define acpi_cpu_flags unsigned long
+
+ #else /* !__KERNEL__ */
+@@ -176,6 +177,19 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
+ lock ? AE_OK : AE_NO_MEMORY; \
+ })
+
++#define acpi_os_create_raw_lock(__handle) \
++({ \
++ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
++ \
++ if (lock) { \
++ *(__handle) = lock; \
++ raw_spin_lock_init(*(__handle)); \
++ } \
++ lock ? AE_OK : AE_NO_MEMORY; \
++})
++
++#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
++
+ #endif /* __KERNEL__ */
+
+ #endif /* __ACLINUX_H__ */
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0299-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0299-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0299-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0299-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch)
@@ -0,0 +1,40 @@
+From 8f49003abe24dbc3439f17ce590f6021818c28b9 Mon Sep 17 00:00:00 2001
+From: "Bu, Yitian" <ybu at qti.qualcomm.com>
+Date: Mon, 18 Feb 2013 12:53:37 +0000
+Subject: [PATCH 299/303] printk: Fix rq->lock vs logbuf_lock unlock lock
+ inversion
+
+commit 07354eb1a74d1 ("locking printk: Annotate logbuf_lock as raw")
+reintroduced a lock inversion problem which was fixed in commit
+0b5e1c5255 ("printk: Release console_sem after logbuf_lock"). This
+happened probably when fixing up patch rejects.
+
+Restore the ordering and unlock logbuf_lock before releasing
+console_sem.
+
+Signed-off-by: ybu <ybu at qti.qualcomm.com>
+Cc: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Cc: stable at vger.kernel.org
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/E807E903FE6CBE4D95E420FBFCC273B827413C@nasanexd01h.na.qualcomm.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/printk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 972cc56..37b9b99 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -867,9 +867,9 @@ static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+ }
+ }
+ printk_cpu = UINT_MAX;
++ raw_spin_unlock(&logbuf_lock);
+ if (wake)
+ up(&console_sem);
+- raw_spin_unlock(&logbuf_lock);
+ return retval;
+ }
+ static const char recursion_bug_msg [] =
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0300-serial-Imx-Fix-recursive-locking-bug.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0300-serial-Imx-Fix-recursive-locking-bug.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0300-serial-Imx-Fix-recursive-locking-bug.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0300-serial-Imx-Fix-recursive-locking-bug.patch)
@@ -0,0 +1,125 @@
+From 1e92509213ae045e263acfc9afa3ed90f5dc6ccf Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 14 Feb 2013 21:01:06 +0100
+Subject: [PATCH 300/303] serial: Imx: Fix recursive locking bug
+
+commit 9ec1882df2 (tty: serial: imx: console write routing is unsafe
+on SMP) introduced a recursive locking bug in imx_console_write().
+
+The callchain is:
+
+imx_rxint()
+ spin_lock_irqsave(&sport->port.lock,flags);
+ ...
+ uart_handle_sysrq_char();
+ sysrq_function();
+ printk();
+ imx_console_write();
+ spin_lock_irqsave(&sport->port.lock,flags); <--- DEAD
+
+The bad news is that the kernel debugging facilities can dectect the
+problem, but the printks never surface on the serial console for
+obvious reasons.
+
+There is a similar issue with oops_in_progress. If the kernel crashes
+we really don't want to be stuck on the lock and unable to tell what
+happened.
+
+In general most UP originated drivers miss these checks and nobody
+ever notices because CONFIG_PROVE_LOCKING seems to be still ignored by
+a large number of developers.
+
+The solution is to avoid locking in the sysrq case and trylock in the
+oops_in_progress case.
+
+This scheme is used in other drivers as well and it would be nice if
+we could move this to a common place, so the usual copy/paste/modify
+bugs can be avoided.
+
+Now there is another issue with this scheme:
+
+CPU0 CPU1
+printk()
+ rxint()
+ sysrq_detection() -> sets port->sysrq
+ return from interrupt
+ console_write()
+ if (port->sysrq)
+ avoid locking
+
+port->sysrq is reset with the next receive character. So as long as
+the port->sysrq is not reset and this can take an endless amount of
+time if after the break no futher receive character follows, all
+console writes happen unlocked.
+
+While the current writer is protected against other console writers by
+the console sem, it's unprotected against open/close or other
+operations which fiddle with the port. That's what the above mentioned
+commit tried to solve.
+
+That's an issue in all drivers which use that scheme and unfortunately
+there is no easy workaround. The only solution is to have a separate
+indicator port->sysrq_cpu. uart_handle_sysrq_char() then sets it to
+smp_processor_id() before calling into handle_sysrq() and resets it to
+-1 after that. Then change the locking check to:
+
+ if (port->sysrq_cpu == smp_processor_id())
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(port->lock, flags);
+ else
+ spin_lock_irqsave(port->lock, flags);
+
+That would force all other cpus into the spin_lock path. Problem
+solved, but that's way beyond the scope of this fix and really wants
+to be implemented in a common function which calls the uart specific
+write function to avoid another gazillion of hard to debug
+copy/paste/modify bugs.
+
+Reported-and-tested-by: Tim Sander <tim at krieglstein.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Jiri Slaby <jslaby at suse.cz>
+Cc: Xinyu Chen <xinyu.chen at freescale.com>
+Cc: Dirk Behme <dirk.behme at de.bosch.com>
+Cc: Shawn Guo <shawn.guo at linaro.org>
+Cc: Tim Sander <tim at krieglstein.org>
+Cc: Sascha Hauer <s.hauer at pengutronix.de>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1302142006050.22263@ionos
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ drivers/tty/serial/imx.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 8e68f79..5f5c214 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -1115,8 +1115,14 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
+ struct imx_port *sport = imx_ports[co->index];
+ unsigned int old_ucr1, old_ucr2, ucr1;
+ unsigned long flags;
++ int locked = 1;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ if (sport->port.sysrq)
++ locked = 0;
++ else if (oops_in_progress)
++ locked = spin_trylock_irqsave(&sport->port.lock, flags);
++ else
++ spin_lock_irqsave(&sport->port.lock, flags);
+
+ /*
+ * First, save UCR1/2 and then disable interrupts
+@@ -1144,7 +1150,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
+ writel(old_ucr1, sport->port.membase + UCR1);
+ writel(old_ucr2, sport->port.membase + UCR2);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ if (locked)
++ spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
+
+ /*
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0301-wait-simple-Simple-waitqueue-implementation.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0301-wait-simple-Simple-waitqueue-implementation.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0301-wait-simple-Simple-waitqueue-implementation.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0301-wait-simple-Simple-waitqueue-implementation.patch)
@@ -0,0 +1,341 @@
+From 053dc208381d2dc76ca340ef1cdd4b92e1311e86 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 12 Dec 2011 12:29:04 +0100
+Subject: [PATCH 301/303] wait-simple: Simple waitqueue implementation
+
+wait_queue is a swiss army knife and in most of the cases the
+complexity is not needed. For RT waitqueues are a constant source of
+trouble as we can't convert the head lock to a raw spinlock due to
+fancy and long lasting callbacks.
+
+Provide a slim version, which allows RT to replace wait queues. This
+should go mainline as well, as it lowers memory consumption and
+runtime overhead.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+[backport by: Tim Sander <tim.sander at hbm.com> ]
+---
+ include/linux/wait-simple.h | 172 +++++++++++++++++++++++++++++++++++++++++++
+ kernel/Makefile | 2 +-
+ kernel/wait-simple.c | 119 ++++++++++++++++++++++++++++++
+ 3 files changed, 292 insertions(+), 1 deletion(-)
+ create mode 100644 include/linux/wait-simple.h
+ create mode 100644 kernel/wait-simple.c
+
+diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h
+new file mode 100644
+index 0000000..44fee60
+--- /dev/null
++++ b/include/linux/wait-simple.h
+@@ -0,0 +1,172 @@
++#ifndef _LINUX_WAIT_SIMPLE_H
++#define _LINUX_WAIT_SIMPLE_H
++
++#include <linux/spinlock.h>
++#include <linux/list.h>
++
++#include <asm/current.h>
++
++struct swaiter {
++ struct task_struct *task;
++ struct list_head node;
++};
++
++#define DEFINE_SWAITER(name) \
++ struct swaiter name = { \
++ .task = current, \
++ .node = LIST_HEAD_INIT((name).node), \
++ }
++
++struct swait_head {
++ raw_spinlock_t lock;
++ struct list_head list;
++};
++
++#define SWAIT_HEAD_INITIALIZER(name) { \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
++ .list = LIST_HEAD_INIT((name).list), \
++ }
++
++#define DEFINE_SWAIT_HEAD(name) \
++ struct swait_head name = SWAIT_HEAD_INITIALIZER(name)
++
++extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
++
++#define init_swait_head(swh) \
++ do { \
++ static struct lock_class_key __key; \
++ \
++ __init_swait_head((swh), &__key); \
++ } while (0)
++
++/*
++ * Waiter functions
++ */
++extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w);
++extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
++extern void swait_finish_locked(struct swait_head *head, struct swaiter *w);
++extern void swait_finish(struct swait_head *head, struct swaiter *w);
++
++/*
++ * Wakeup functions
++ */
++extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num);
++extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num);
++
++#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1)
++#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1)
++#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0)
++#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0)
++
++/*
++ * Event API
++ */
++#define __swait_event(wq, condition) \
++do { \
++ DEFINE_SWAITER(__wait); \
++ \
++ for (;;) { \
++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ schedule(); \
++ } \
++ swait_finish(&wq, &__wait); \
++} while (0)
++
++/**
++ * swait_event - sleep until a condition gets true
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ *
++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
++ * @condition evaluates to true. The @condition is checked each time
++ * the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ */
++#define swait_event(wq, condition) \
++do { \
++ if (condition) \
++ break; \
++ __swait_event(wq, condition); \
++} while (0)
++
++#define __swait_event_interruptible(wq, condition, ret) \
++do { \
++ DEFINE_SWAITER(__wait); \
++ \
++ for (;;) { \
++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ if (signal_pending(current)) { \
++ ret = -ERESTARTSYS; \
++ break; \
++ } \
++ schedule(); \
++ } \
++ swait_finish(&wq, &__wait); \
++} while (0)
++
++/**
++ * swait_event_interruptible - sleep until a condition gets true
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ *
++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
++ * @condition evaluates to true. The @condition is checked each time
++ * the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ */
++#define swait_event_interruptible(wq, condition) \
++({ \
++ int __ret = 0; \
++ if (!(condition)) \
++ __swait_event_interruptible(wq, condition, __ret); \
++ __ret; \
++})
++
++#define __swait_event_timeout(wq, condition, ret) \
++do { \
++ DEFINE_SWAITER(__wait); \
++ \
++ for (;;) { \
++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ ret = schedule_timeout(ret); \
++ if (!ret) \
++ break; \
++ } \
++ swait_finish(&wq, &__wait); \
++} while (0)
++
++/**
++ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ * @timeout: timeout, in jiffies
++ *
++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
++ * @condition evaluates to true. The @condition is checked each time
++ * the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ *
++ * The function returns 0 if the @timeout elapsed, and the remaining
++ * jiffies if the condition evaluated to true before the timeout elapsed.
++ */
++#define swait_event_timeout(wq, condition, timeout) \
++({ \
++ long __ret = timeout; \
++ if (!(condition)) \
++ __swait_event_timeout(wq, condition, __ret); \
++ __ret; \
++})
++
++#endif
+diff --git a/kernel/Makefile b/kernel/Makefile
+index c961d3a..0b0ed50 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -11,7 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
+ hrtimer.o nsproxy.o srcu.o semaphore.o \
+ notifier.o ksysfs.o sched_clock.o cred.o \
+ async.o range.o
+-obj-y += groups.o
++obj-y += groups.o wait-simple.o
+
+ ifdef CONFIG_FUNCTION_TRACER
+ # Do not trace debug files and internal ftrace files
+diff --git a/kernel/wait-simple.c b/kernel/wait-simple.c
+new file mode 100644
+index 0000000..c35ec78
+--- /dev/null
++++ b/kernel/wait-simple.c
+@@ -0,0 +1,119 @@
++/*
++ * Simple waitqueues without fancy flags and callbacks
++ *
++ * (C) 2011 Thomas Gleixner <tglx at linutronix.de>
++ *
++ * Based on kernel/wait.c
++ *
++ * For licencing details see kernel-base/COPYING
++ */
++#include <linux/init.h>
++#include <linux/export.h>
++#include <linux/sched.h>
++#include <linux/wait-simple.h>
++
++/* Adds w to head->list. Must be called with head->lock locked. */
++static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
++{
++ list_add(&w->node, &head->list);
++}
++
++/* Removes w from head->list. Must be called with head->lock locked. */
++static inline void __swait_dequeue(struct swaiter *w)
++{
++ list_del_init(&w->node);
++}
++
++/* Check whether a head has waiters enqueued */
++static inline bool swait_head_has_waiters(struct swait_head *h)
++{
++ return !list_empty(&h->list);
++}
++
++void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
++{
++ raw_spin_lock_init(&head->lock);
++ lockdep_set_class(&head->lock, key);
++ INIT_LIST_HEAD(&head->list);
++}
++EXPORT_SYMBOL_GPL(__init_swait_head);
++
++void swait_prepare_locked(struct swait_head *head, struct swaiter *w)
++{
++ w->task = current;
++ if (list_empty(&w->node))
++ __swait_enqueue(head, w);
++}
++
++void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&head->lock, flags);
++ swait_prepare_locked(head, w);
++ __set_current_state(state);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
++}
++EXPORT_SYMBOL_GPL(swait_prepare);
++
++void swait_finish_locked(struct swait_head *head, struct swaiter *w)
++{
++ __set_current_state(TASK_RUNNING);
++ if (w->task)
++ __swait_dequeue(w);
++}
++
++void swait_finish(struct swait_head *head, struct swaiter *w)
++{
++ unsigned long flags;
++
++ __set_current_state(TASK_RUNNING);
++ if (w->task) {
++ raw_spin_lock_irqsave(&head->lock, flags);
++ __swait_dequeue(w);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
++ }
++}
++EXPORT_SYMBOL_GPL(swait_finish);
++
++unsigned int
++__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num)
++{
++ struct swaiter *curr, *next;
++ int woken = 0;
++
++ list_for_each_entry_safe(curr, next, &head->list, node) {
++ if (wake_up_state(curr->task, state)) {
++ __swait_dequeue(curr);
++ /*
++ * The waiting task can free the waiter as
++ * soon as curr->task = NULL is written,
++ * without taking any locks. A memory barrier
++ * is required here to prevent the following
++ * store to curr->task from getting ahead of
++ * the dequeue operation.
++ */
++ smp_wmb();
++ curr->task = NULL;
++ if (++woken == num)
++ break;
++ }
++ }
++ return woken;
++}
++
++unsigned int
++__swait_wake(struct swait_head *head, unsigned int state, unsigned int num)
++{
++ unsigned long flags;
++ int woken;
++
++ if (!swait_head_has_waiters(head))
++ return 0;
++
++ raw_spin_lock_irqsave(&head->lock, flags);
++ woken = __swait_wake_locked(head, state, num);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
++ return woken;
++}
++EXPORT_SYMBOL_GPL(__swait_wake);
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0302-rcutiny-Use-simple-waitqueue.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0302-rcutiny-Use-simple-waitqueue.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0302-rcutiny-Use-simple-waitqueue.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0302-rcutiny-Use-simple-waitqueue.patch)
@@ -0,0 +1,83 @@
+From bfb19e23ef6cdf32bb07efb2a729c8747c4fbd1e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 3 Dec 2012 16:25:21 +0100
+Subject: [PATCH 302/303] rcutiny: Use simple waitqueue
+
+Simple waitqueues can be handled from interrupt disabled contexts.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ kernel/rcutiny_plugin.h | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
+index 2b0484a..f0a6606 100644
+--- a/kernel/rcutiny_plugin.h
++++ b/kernel/rcutiny_plugin.h
+@@ -26,6 +26,7 @@
+ #include <linux/module.h>
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
++#include <linux/wait-simple.h>
+
+ /* Global control variables for rcupdate callback mechanism. */
+ struct rcu_ctrlblk {
+@@ -250,7 +251,7 @@ static void show_tiny_preempt_stats(struct seq_file *m)
+
+ /* Controls for rcu_kthread() kthread. */
+ static struct task_struct *rcu_kthread_task;
+-static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
++static DEFINE_SWAIT_HEAD(rcu_kthread_wq);
+ static unsigned long have_rcu_kthread_work;
+
+ /*
+@@ -720,7 +721,7 @@ void synchronize_rcu(void)
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu);
+
+-static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
++static DEFINE_SWAIT_HEAD(sync_rcu_preempt_exp_wq);
+ static unsigned long sync_rcu_preempt_exp_count;
+ static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
+
+@@ -742,7 +743,7 @@ static int rcu_preempted_readers_exp(void)
+ */
+ static void rcu_report_exp_done(void)
+ {
+- wake_up(&sync_rcu_preempt_exp_wq);
++ swait_wake(&sync_rcu_preempt_exp_wq);
+ }
+
+ /*
+@@ -794,8 +795,8 @@ void synchronize_rcu_expedited(void)
+ else {
+ rcu_initiate_boost();
+ local_irq_restore(flags);
+- wait_event(sync_rcu_preempt_exp_wq,
+- !rcu_preempted_readers_exp());
++ swait_event(sync_rcu_preempt_exp_wq,
++ !rcu_preempted_readers_exp());
+ }
+
+ /* Clean up and exit. */
+@@ -882,7 +883,7 @@ static void rcu_preempt_process_callbacks(void)
+ static void invoke_rcu_callbacks(void)
+ {
+ have_rcu_kthread_work = 1;
+- wake_up(&rcu_kthread_wq);
++ swake_up(&rcu_kthread_wq);
+ }
+
+ /*
+@@ -899,8 +900,8 @@ static int rcu_kthread(void *arg)
+ unsigned long flags;
+
+ for (;;) {
+- wait_event_interruptible(rcu_kthread_wq,
+- have_rcu_kthread_work != 0);
++ swait_event_interruptible(rcu_kthread_wq,
++ have_rcu_kthread_work != 0);
+ morework = rcu_boost();
+ local_irq_save(flags);
+ work = have_rcu_kthread_work;
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/0303-Linux-3.2.40-rt60-REBASE.patch (from r19949, dists/sid/linux/debian/patches/features/all/rt/0303-Linux-3.2.40-rt60-REBASE.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0303-Linux-3.2.40-rt60-REBASE.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/all/rt/0303-Linux-3.2.40-rt60-REBASE.patch)
@@ -0,0 +1,16 @@
+From 60b354dbc916f4106aa691719ae2a14c510c60e7 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <srostedt at redhat.com>
+Date: Fri, 22 Feb 2013 12:08:15 -0500
+Subject: [PATCH 303/303] Linux 3.2.40-rt60 REBASE
+
+---
+ localversion-rt | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/localversion-rt b/localversion-rt
+index b2111a2..66fa05e 100644
+--- a/localversion-rt
++++ b/localversion-rt
+@@ -1 +1 @@
+--rt24
++-rt60
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/series
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/series Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/series Sun Apr 7 23:16:11 2013 (r19970)
@@ -133,172 +133,171 @@
0133-hrtimers-prepare-full-preemption.patch
0134-hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
0135-hrtimer-Don-t-call-the-timer-handler-from-hrtimer_st.patch
-0136-hrtimer-Add-missing-debug_activate-aid-Was-Re-ANNOUN.patch
-0137-hrtimer-fix-reprogram-madness.patch.patch
-0138-timer-fd-Prevent-live-lock.patch
-0139-posix-timers-thread-posix-cpu-timers-on-rt.patch
-0140-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch
-0141-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch
-0142-sched-delay-put-task.patch.patch
-0143-sched-limit-nr-migrate.patch.patch
-0144-sched-mmdrop-delayed.patch.patch
-0145-sched-rt-mutex-wakeup.patch.patch
-0146-sched-prevent-idle-boost.patch.patch
-0147-sched-might-sleep-do-not-account-rcu-depth.patch.patch
-0148-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
-0149-sched-cond-resched.patch.patch
-0150-cond-resched-softirq-fix.patch.patch
-0151-sched-no-work-when-pi-blocked.patch.patch
-0152-cond-resched-lock-rt-tweak.patch.patch
-0153-sched-disable-ttwu-queue.patch.patch
-0154-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
-0155-sched-ttwu-Return-success-when-only-changing-the-sav.patch
-0156-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
-0157-stomp-machine-mark-stomper-thread.patch.patch
-0158-stomp-machine-raw-lock.patch.patch
-0159-hotplug-Lightweight-get-online-cpus.patch
-0160-hotplug-sync_unplug-No.patch
-0161-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch
-0162-sched-migrate-disable.patch.patch
-0163-hotplug-use-migrate-disable.patch.patch
-0164-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch
-0165-ftrace-migrate-disable-tracing.patch.patch
-0166-tracing-Show-padding-as-unsigned-short.patch
-0167-migrate-disable-rt-variant.patch.patch
-0168-sched-Optimize-migrate_disable.patch
-0169-sched-Generic-migrate_disable.patch
-0170-sched-rt-Fix-migrate_enable-thinko.patch
-0171-sched-teach-migrate_disable-about-atomic-contexts.patch
-0172-sched-Postpone-actual-migration-disalbe-to-schedule.patch
-0173-sched-Do-not-compare-cpu-masks-in-scheduler.patch
-0174-sched-Have-migrate_disable-ignore-bounded-threads.patch
-0175-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch
-0176-ftrace-crap.patch.patch
-0177-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch
-0178-net-netif_rx_ni-migrate-disable.patch.patch
-0179-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch
-0180-lockdep-rt.patch.patch
-0181-mutex-no-spin-on-rt.patch.patch
-0182-softirq-local-lock.patch.patch
-0183-softirq-Export-in_serving_softirq.patch
-0184-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch
-0185-softirq-Fix-unplug-deadlock.patch
-0186-softirq-disable-softirq-stacks-for-rt.patch.patch
-0187-softirq-make-fifo.patch.patch
-0188-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch
-0189-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch
-0190-local-vars-migrate-disable.patch.patch
-0191-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
-0192-rtmutex-lock-killable.patch.patch
-0193-rtmutex-futex-prepare-rt.patch.patch
-0194-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
-0195-rt-mutex-add-sleeping-spinlocks-support.patch.patch
-0196-spinlock-types-separate-raw.patch.patch
-0197-rtmutex-avoid-include-hell.patch.patch
-0198-rt-add-rt-spinlocks.patch.patch
-0199-rt-add-rt-to-mutex-headers.patch.patch
-0200-rwsem-add-rt-variant.patch.patch
-0201-rt-Add-the-preempt-rt-lock-replacement-APIs.patch
-0202-rwlocks-Fix-section-mismatch.patch
-0203-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch
-0204-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch
-0205-rcu-Frob-softirq-test.patch
-0206-rcu-Merge-RCU-bh-into-RCU-preempt.patch
-0207-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch
-0208-rcu-more-fallout.patch.patch
-0209-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch
-0210-rt-rcutree-Move-misplaced-prototype.patch
-0211-lglocks-rt.patch.patch
-0212-serial-8250-Clean-up-the-locking-for-rt.patch
-0213-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch
-0214-drivers-tty-fix-omap-lock-crap.patch.patch
-0215-rt-Improve-the-serial-console-PASS_LIMIT.patch
-0216-fs-namespace-preemption-fix.patch
-0217-mm-protect-activate-switch-mm.patch.patch
-0218-fs-block-rt-support.patch.patch
-0219-fs-ntfs-disable-interrupt-only-on-RT.patch
-0220-x86-Convert-mce-timer-to-hrtimer.patch
-0221-x86-stackprotector-Avoid-random-pool-on-rt.patch
-0222-x86-Use-generic-rwsem_spinlocks-on-rt.patch
-0223-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch
-0224-workqueue-use-get-cpu-light.patch.patch
-0225-epoll.patch.patch
-0226-mm-vmalloc.patch.patch
-0227-debugobjects-rt.patch.patch
-0228-jump-label-rt.patch.patch
-0229-skbufhead-raw-lock.patch.patch
-0230-x86-no-perf-irq-work-rt.patch.patch
-0231-console-make-rt-friendly.patch.patch
-0232-printk-Disable-migration-instead-of-preemption.patch
-0233-power-use-generic-rwsem-on-rt.patch
-0234-power-disable-highmem-on-rt.patch.patch
-0235-arm-disable-highmem-on-rt.patch.patch
-0236-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch
-0237-mips-disable-highmem-on-rt.patch.patch
-0238-net-Avoid-livelock-in-net_tx_action-on-RT.patch
-0239-ping-sysrq.patch.patch
-0240-kgdb-serial-Short-term-workaround.patch
-0241-add-sys-kernel-realtime-entry.patch
-0242-mm-rt-kmap_atomic-scheduling.patch
-0243-ipc-sem-Rework-semaphore-wakeups.patch
-0244-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch
-0245-x86-kvm-require-const-tsc-for-rt.patch.patch
-0246-scsi-fcoe-rt-aware.patch.patch
-0247-x86-crypto-Reduce-preempt-disabled-regions.patch
-0248-dm-Make-rt-aware.patch
-0249-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch
-0250-seqlock-Prevent-rt-starvation.patch
-0251-timer-Fix-hotplug-for-rt.patch
-0252-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch
-0253-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch
-0254-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch
-0255-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch
-0256-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch
-0257-softirq-Check-preemption-after-reenabling-interrupts.patch
-0258-rt-Introduce-cpu_chill.patch
-0259-fs-dcache-Use-cpu_chill-in-trylock-loops.patch
-0260-net-Use-cpu_chill-instead-of-cpu_relax.patch
-0261-kconfig-disable-a-few-options-rt.patch.patch
-0262-kconfig-preempt-rt-full.patch.patch
-0263-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch
-0264-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch
-0265-net-RT-REmove-preemption-disabling-in-netif_rx.patch
-0266-mips-remove-smp-reserve-lock.patch.patch
-0267-Latency-histogramms-Cope-with-backwards-running-loca.patch
-0268-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch
-0269-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch
-0270-Latency-histograms-Detect-another-yet-overlooked-sha.patch
-0271-slab-Prevent-local-lock-deadlock.patch
-0272-fs-jbd-pull-your-plug-when-waiting-for-space.patch
-0273-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch
-0274-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch
-0275-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch
-0276-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch
+0136-hrtimer-fix-reprogram-madness.patch.patch
+0137-timer-fd-Prevent-live-lock.patch
+0138-posix-timers-thread-posix-cpu-timers-on-rt.patch
+0139-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch
+0140-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch
+0141-sched-delay-put-task.patch.patch
+0142-sched-limit-nr-migrate.patch.patch
+0143-sched-mmdrop-delayed.patch.patch
+0144-sched-rt-mutex-wakeup.patch.patch
+0145-sched-prevent-idle-boost.patch.patch
+0146-sched-might-sleep-do-not-account-rcu-depth.patch.patch
+0147-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
+0148-sched-cond-resched.patch.patch
+0149-cond-resched-softirq-fix.patch.patch
+0150-sched-no-work-when-pi-blocked.patch.patch
+0151-cond-resched-lock-rt-tweak.patch.patch
+0152-sched-disable-ttwu-queue.patch.patch
+0153-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
+0154-sched-ttwu-Return-success-when-only-changing-the-sav.patch
+0155-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+0156-stomp-machine-mark-stomper-thread.patch.patch
+0157-stomp-machine-raw-lock.patch.patch
+0158-hotplug-Lightweight-get-online-cpus.patch
+0159-hotplug-sync_unplug-No.patch
+0160-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch
+0161-sched-migrate-disable.patch.patch
+0162-hotplug-use-migrate-disable.patch.patch
+0163-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch
+0164-ftrace-migrate-disable-tracing.patch.patch
+0165-tracing-Show-padding-as-unsigned-short.patch
+0166-migrate-disable-rt-variant.patch.patch
+0167-sched-Optimize-migrate_disable.patch
+0168-sched-Generic-migrate_disable.patch
+0169-sched-rt-Fix-migrate_enable-thinko.patch
+0170-sched-teach-migrate_disable-about-atomic-contexts.patch
+0171-sched-Postpone-actual-migration-disalbe-to-schedule.patch
+0172-sched-Do-not-compare-cpu-masks-in-scheduler.patch
+0173-sched-Have-migrate_disable-ignore-bounded-threads.patch
+0174-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch
+0175-ftrace-crap.patch.patch
+0176-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch
+0177-net-netif_rx_ni-migrate-disable.patch.patch
+0178-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch
+0179-lockdep-rt.patch.patch
+0180-mutex-no-spin-on-rt.patch.patch
+0181-softirq-local-lock.patch.patch
+0182-softirq-Export-in_serving_softirq.patch
+0183-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch
+0184-softirq-Fix-unplug-deadlock.patch
+0185-softirq-disable-softirq-stacks-for-rt.patch.patch
+0186-softirq-make-fifo.patch.patch
+0187-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch
+0188-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch
+0189-local-vars-migrate-disable.patch.patch
+0190-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
+0191-rtmutex-lock-killable.patch.patch
+0192-rtmutex-futex-prepare-rt.patch.patch
+0193-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
+0194-rt-mutex-add-sleeping-spinlocks-support.patch.patch
+0195-spinlock-types-separate-raw.patch.patch
+0196-rtmutex-avoid-include-hell.patch.patch
+0197-rt-add-rt-spinlocks.patch.patch
+0198-rt-add-rt-to-mutex-headers.patch.patch
+0199-rwsem-add-rt-variant.patch.patch
+0200-rt-Add-the-preempt-rt-lock-replacement-APIs.patch
+0201-rwlocks-Fix-section-mismatch.patch
+0202-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch
+0203-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch
+0204-rcu-Frob-softirq-test.patch
+0205-rcu-Merge-RCU-bh-into-RCU-preempt.patch
+0206-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch
+0207-rcu-more-fallout.patch.patch
+0208-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch
+0209-rt-rcutree-Move-misplaced-prototype.patch
+0210-lglocks-rt.patch.patch
+0211-serial-8250-Clean-up-the-locking-for-rt.patch
+0212-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch
+0213-drivers-tty-fix-omap-lock-crap.patch.patch
+0214-rt-Improve-the-serial-console-PASS_LIMIT.patch
+0215-fs-namespace-preemption-fix.patch
+0216-mm-protect-activate-switch-mm.patch.patch
+0217-fs-block-rt-support.patch.patch
+0218-fs-ntfs-disable-interrupt-only-on-RT.patch
+0219-x86-Convert-mce-timer-to-hrtimer.patch
+0220-x86-stackprotector-Avoid-random-pool-on-rt.patch
+0221-x86-Use-generic-rwsem_spinlocks-on-rt.patch
+0222-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch
+0223-workqueue-use-get-cpu-light.patch.patch
+0224-epoll.patch.patch
+0225-mm-vmalloc.patch.patch
+0226-debugobjects-rt.patch.patch
+0227-jump-label-rt.patch.patch
+0228-skbufhead-raw-lock.patch.patch
+0229-x86-no-perf-irq-work-rt.patch.patch
+0230-console-make-rt-friendly.patch.patch
+0231-printk-Disable-migration-instead-of-preemption.patch
+0232-power-use-generic-rwsem-on-rt.patch
+0233-power-disable-highmem-on-rt.patch.patch
+0234-arm-disable-highmem-on-rt.patch.patch
+0235-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch
+0236-mips-disable-highmem-on-rt.patch.patch
+0237-net-Avoid-livelock-in-net_tx_action-on-RT.patch
+0238-ping-sysrq.patch.patch
+0239-kgdb-serial-Short-term-workaround.patch
+0240-add-sys-kernel-realtime-entry.patch
+0241-mm-rt-kmap_atomic-scheduling.patch
+0242-ipc-sem-Rework-semaphore-wakeups.patch
+0243-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch
+0244-x86-kvm-require-const-tsc-for-rt.patch.patch
+0245-scsi-fcoe-rt-aware.patch.patch
+0246-x86-crypto-Reduce-preempt-disabled-regions.patch
+0247-dm-Make-rt-aware.patch
+0248-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch
+0249-seqlock-Prevent-rt-starvation.patch
+0250-timer-Fix-hotplug-for-rt.patch
+0251-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch
+0252-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch
+0253-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch
+0254-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch
+0255-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch
+0256-softirq-Check-preemption-after-reenabling-interrupts.patch
+0257-rt-Introduce-cpu_chill.patch
+0258-fs-dcache-Use-cpu_chill-in-trylock-loops.patch
+0259-net-Use-cpu_chill-instead-of-cpu_relax.patch
+0260-kconfig-disable-a-few-options-rt.patch.patch
+0261-kconfig-preempt-rt-full.patch.patch
+0262-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch
+0263-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch
+0264-net-RT-REmove-preemption-disabling-in-netif_rx.patch
+0265-mips-remove-smp-reserve-lock.patch.patch
+0266-Latency-histogramms-Cope-with-backwards-running-loca.patch
+0267-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch
+0268-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch
+0269-Latency-histograms-Detect-another-yet-overlooked-sha.patch
+0270-slab-Prevent-local-lock-deadlock.patch
+0271-fs-jbd-pull-your-plug-when-waiting-for-space.patch
+0272-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch
+0273-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch
+0274-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch
+0275-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch
+0276-fix-printk-flush-of-messages.patch
0277-fix-printk-flush-of-messages.patch
-0278-fix-printk-flush-of-messages.patch
-0279-random-Make-it-work-on-rt.patch
-0280-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch
-0281-mm-slab-Fix-potential-deadlock.patch
-0282-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch
-0283-rt-rwsem-rwlock-lockdep-annotations.patch
-0284-sched-Better-debug-output-for-might-sleep.patch
-0285-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch
-0286-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch
-0287-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch
-0288-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch
-0289-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch
-0290-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch
-0291-sched-Queue-RT-tasks-to-head-when-prio-drops.patch
-0292-sched-Consider-pi-boosting-in-setscheduler.patch
-0293-drivers-tty-pl011-irq-disable-madness.patch.patch
-0294-mmci-Remove-bogus-local_irq_save.patch
-0295-sched-Init-idle-on_rq-in-init_idle.patch
-0296-sched-Check-for-idle-task-in-might_sleep.patch
-0297-mm-swap-Initialize-local-locks-early.patch
-0298-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch
-0299-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
-0300-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch
-0301-serial-Imx-Fix-recursive-locking-bug.patch
-0302-wait-simple-Simple-waitqueue-implementation.patch
-0303-rcutiny-Use-simple-waitqueue.patch
-0304-Linux-3.2.39-rt59-REBASE.patch
+0278-random-Make-it-work-on-rt.patch
+0279-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch
+0280-mm-slab-Fix-potential-deadlock.patch
+0281-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch
+0282-rt-rwsem-rwlock-lockdep-annotations.patch
+0283-sched-Better-debug-output-for-might-sleep.patch
+0284-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch
+0285-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch
+0286-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch
+0287-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch
+0288-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch
+0289-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch
+0290-sched-Queue-RT-tasks-to-head-when-prio-drops.patch
+0291-sched-Consider-pi-boosting-in-setscheduler.patch
+0292-drivers-tty-pl011-irq-disable-madness.patch.patch
+0293-mmci-Remove-bogus-local_irq_save.patch
+0294-sched-Init-idle-on_rq-in-init_idle.patch
+0295-sched-Check-for-idle-task-in-might_sleep.patch
+0296-mm-swap-Initialize-local-locks-early.patch
+0297-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch
+0298-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+0299-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch
+0300-serial-Imx-Fix-recursive-locking-bug.patch
+0301-wait-simple-Simple-waitqueue-implementation.patch
+0302-rcutiny-Use-simple-waitqueue.patch
+0303-Linux-3.2.40-rt60-REBASE.patch
Copied: dists/squeeze-backports/linux/debian/patches/features/x86/efi-stub/0019-x86-efi-Fix-processor-specific-memcpy-build-error.patch (from r19949, dists/sid/linux/debian/patches/features/x86/efi-stub/0019-x86-efi-Fix-processor-specific-memcpy-build-error.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/x86/efi-stub/0019-x86-efi-Fix-processor-specific-memcpy-build-error.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/x86/efi-stub/0019-x86-efi-Fix-processor-specific-memcpy-build-error.patch)
@@ -0,0 +1,41 @@
+From 0f905a43ce955b638139bd84486194770a6a2c08 Mon Sep 17 00:00:00 2001
+From: Matt Fleming <matt.fleming at intel.com>
+Date: Tue, 20 Nov 2012 13:07:46 +0000
+Subject: [PATCH] x86, efi: Fix processor-specific memcpy() build error
+
+Building for Athlon/Duron/K7 results in the following build error,
+
+arch/x86/boot/compressed/eboot.o: In function `__constant_memcpy3d':
+eboot.c:(.text+0x385): undefined reference to `_mmx_memcpy'
+arch/x86/boot/compressed/eboot.o: In function `efi_main':
+eboot.c:(.text+0x1a22): undefined reference to `_mmx_memcpy'
+
+because the boot stub code doesn't link with the kernel proper, and
+therefore doesn't have access to the 3DNow version of memcpy. So,
+follow the example of misc.c and #undef memcpy so that we use the
+version provided by misc.c.
+
+See https://bugzilla.kernel.org/show_bug.cgi?id=50391
+
+Reported-by: Al Viro <viro at zeniv.linux.org.uk>
+Reported-by: Ryan Underwood <nemesis at icequake.net>
+Cc: H. Peter Anvin <hpa at zytor.com>
+Cc: stable at vger.kernel.org
+Signed-off-by: Matt Fleming <matt.fleming at intel.com>
+---
+ arch/x86/boot/compressed/eboot.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
+index c760e07..e87b0ca 100644
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -12,6 +12,8 @@
+ #include <asm/setup.h>
+ #include <asm/desc.h>
+
++#undef memcpy /* Use memcpy from misc.c */
++
+ #include "eboot.h"
+
+ static efi_system_table_t *sys_table;
Modified: dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0055-Staging-hv-storvsc-Get-rid-of-the-on_io_completion-i.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0055-Staging-hv-storvsc-Get-rid-of-the-on_io_completion-i.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0055-Staging-hv-storvsc-Get-rid-of-the-on_io_completion-i.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -10,12 +10,12 @@
Signed-off-by: K. Y. Srinivasan <kys at microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+[bwh: Adjusted to apply after backported commit 9d2696e658ef
+ '[SCSI] storvsc: Initialize the sglist']
---
drivers/staging/hv/storvsc_drv.c | 630 +++++++++++++++++++-------------------
1 file changed, 313 insertions(+), 317 deletions(-)
-diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
-index 204b3ca..7c9fa19 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -276,7 +276,6 @@ struct hv_storvsc_request {
@@ -26,7 +26,7 @@
struct hv_multipage_buffer data_buffer;
struct vstor_packet vstor_packet;
-@@ -436,6 +435,227 @@ get_in_err:
+@@ -436,6 +435,228 @@ get_in_err:
}
@@ -89,6 +89,7 @@
+ if (!bounce_sgl)
+ return NULL;
+
++ sg_init_table(bounce_sgl, num_pages);
+ for (i = 0; i < num_pages; i++) {
+ page_buf = alloc_page(GFP_ATOMIC);
+ if (!page_buf)
@@ -254,7 +255,7 @@
static int storvsc_channel_init(struct hv_device *device)
{
struct storvsc_device *stor_device;
-@@ -562,24 +782,101 @@ cleanup:
+@@ -562,23 +783,100 @@ cleanup:
return ret;
}
@@ -296,7 +297,6 @@
- * MODE_SENSE command with cmd[2] == 0x1c
- *
- * Setup srb and scsi status so this won't be fatal.
-- * We do this so we can distinguish truly fatal failues
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side.
@@ -366,11 +366,10 @@
+ * MODE_SENSE command with cmd[2] == 0x1c
+ *
+ * Setup srb and scsi status so this won't be fatal.
-+ * We do this so we can distinguish truly fatal failues
+ * We do this so we can distinguish truly fatal failues
* (srb status == 0x4) and off-line the device in that case.
*/
-
-@@ -625,7 +922,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
+@@ -625,7 +923,7 @@ static void storvsc_on_io_completion(str
stor_pkt->vm_srb.data_transfer_length =
vstor_packet->vm_srb.data_transfer_length;
@@ -379,7 +378,7 @@
if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
stor_device->drain_notify)
-@@ -875,229 +1172,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
+@@ -875,230 +1173,6 @@ static int storvsc_device_configure(stru
return 0;
}
@@ -442,6 +441,7 @@
- if (!bounce_sgl)
- return NULL;
-
+- sg_init_table(bounce_sgl, num_pages);
- for (i = 0; i < num_pages; i++) {
- page_buf = alloc_page(GFP_ATOMIC);
- if (!page_buf)
@@ -609,7 +609,7 @@
static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
sector_t capacity, int *info)
{
-@@ -1166,83 +1240,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+@@ -1172,83 +1246,6 @@ static int storvsc_host_reset_handler(st
return SUCCESS;
}
@@ -693,7 +693,7 @@
static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
{
bool allowed = true;
-@@ -1318,7 +1315,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+@@ -1324,7 +1321,6 @@ static int storvsc_queuecommand(struct S
break;
}
@@ -701,6 +701,3 @@
request->context = cmd_request;/* scmnd; */
vm_srb->port_number = host_dev->port;
---
-1.7.9.5
-
Modified: dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0067-Staging-hv-storvsc-Move-the-storage-driver-out-of-th.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0067-Staging-hv-storvsc-Move-the-storage-driver-out-of-th.patch Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0067-Staging-hv-storvsc-Move-the-storage-driver-out-of-th.patch Sun Apr 7 23:16:11 2013 (r19970)
@@ -19,9 +19,9 @@
Signed-off-by: K. Y. Srinivasan <kys at microsoft.com>
Acked-by: James Bottomley <JBottomley at Parallels.com>
Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
-[bwh: Adjusted to apply after commit 5c1b10ab7f93d24f29b5630286e323d1c5802d5c
- ('storvsc: Account for in-transit packets in the RESET path') backported
- in 3.2.33, moving that fix to the new file]
+[bwh: Adjusted to apply after backported commits 5c1b10ab7f93
+ '[SCSI] storvsc: Account for in-transit packets in the RESET path' and
+ 9d2696e658ef '[SCSI] storvsc: Initialize the sglist']
---
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -60,7 +60,7 @@
sd_mod-objs := sd.o
--- /dev/null
+++ b/drivers/scsi/storvsc_drv.c
-@@ -0,0 +1,1553 @@
+@@ -0,0 +1,1554 @@
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
@@ -530,6 +530,7 @@
+ if (!bounce_sgl)
+ return NULL;
+
++ sg_init_table(bounce_sgl, num_pages);
+ for (i = 0; i < num_pages; i++) {
+ page_buf = alloc_page(GFP_ATOMIC);
+ if (!page_buf)
@@ -1659,7 +1660,7 @@
-Haiyang Zhang <haiyangz at microsoft.com>, and K. Y. Srinivasan <kys at microsoft.com>
--- a/drivers/staging/hv/storvsc_drv.c
+++ /dev/null
-@@ -1,1553 +0,0 @@
+@@ -1,1554 +0,0 @@
-/*
- * Copyright (c) 2009, Microsoft Corporation.
- *
@@ -2129,6 +2130,7 @@
- if (!bounce_sgl)
- return NULL;
-
+- sg_init_table(bounce_sgl, num_pages);
- for (i = 0; i < num_pages; i++) {
- page_buf = alloc_page(GFP_ATOMIC);
- if (!page_buf)
Copied: dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0080-ata_piix-reenable-ms-virtual-pc-guests.patch (from r19949, dists/sid/linux/debian/patches/features/x86/hyperv/0080-ata_piix-reenable-ms-virtual-pc-guests.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0080-ata_piix-reenable-ms-virtual-pc-guests.patch Sun Apr 7 23:16:11 2013 (r19970, copy of r19949, dists/sid/linux/debian/patches/features/x86/hyperv/0080-ata_piix-reenable-ms-virtual-pc-guests.patch)
@@ -0,0 +1,145 @@
+From: Olaf Hering <olaf at aepfle.de>
+Date: Tue, 18 Sep 2012 17:48:01 +0200
+Subject: ata_piix: reenable MS Virtual PC guests
+
+commit d9904344fc4052fbe7e4dc137eba0dcdadf326bd upstream.
+
+An earlier commit cd006086fa5d91414d8ff9ff2b78fbb593878e3c ("ata_piix:
+defer disks to the Hyper-V drivers by default") broke MS Virtual PC
+guests. Hyper-V guests and Virtual PC guests have nearly identical DMI
+info. As a result the driver does currently ignore the emulated hardware
+in Virtual PC guests and defers the handling to hv_blkvsc. Since Virtual
+PC does not offer paravirtualized drivers no disks will be found in the
+guest.
+
+One difference in the DMI info is the product version. This patch adds a
+match for MS Virtual PC 2007 and "unignores" the emulated hardware.
+
+This was reported for openSuSE 12.1 in bugzilla:
+https://bugzilla.novell.com/show_bug.cgi?id=737532
+
+Here is a detailed list of DMI info from example guests:
+
+hwinfo --bios:
+
+virtual pc guest:
+
+ System Info: #1
+ Manufacturer: "Microsoft Corporation"
+ Product: "Virtual Machine"
+ Version: "VS2005R2"
+ Serial: "3178-9905-1533-4840-9282-0569-59"
+ UUID: undefined, but settable
+ Wake-up: 0x06 (Power Switch)
+ Board Info: #2
+ Manufacturer: "Microsoft Corporation"
+ Product: "Virtual Machine"
+ Version: "5.0"
+ Serial: "3178-9905-1533-4840-9282-0569-59"
+ Chassis Info: #3
+ Manufacturer: "Microsoft Corporation"
+ Version: "5.0"
+ Serial: "3178-9905-1533-4840-9282-0569-59"
+ Asset Tag: "7188-3705-6309-9738-9645-0364-00"
+ Type: 0x03 (Desktop)
+ Bootup State: 0x03 (Safe)
+ Power Supply State: 0x03 (Safe)
+ Thermal State: 0x01 (Other)
+ Security Status: 0x01 (Other)
+
+win2k8 guest:
+
+ System Info: #1
+ Manufacturer: "Microsoft Corporation"
+ Product: "Virtual Machine"
+ Version: "7.0"
+ Serial: "9106-3420-9819-5495-1514-2075-48"
+ UUID: undefined, but settable
+ Wake-up: 0x06 (Power Switch)
+ Board Info: #2
+ Manufacturer: "Microsoft Corporation"
+ Product: "Virtual Machine"
+ Version: "7.0"
+ Serial: "9106-3420-9819-5495-1514-2075-48"
+ Chassis Info: #3
+ Manufacturer: "Microsoft Corporation"
+ Version: "7.0"
+ Serial: "9106-3420-9819-5495-1514-2075-48"
+ Asset Tag: "7076-9522-6699-1042-9501-1785-77"
+ Type: 0x03 (Desktop)
+ Bootup State: 0x03 (Safe)
+ Power Supply State: 0x03 (Safe)
+ Thermal State: 0x01 (Other)
+ Security Status: 0x01 (Other)
+
+win2k12 guest:
+
+ System Info: #1
+ Manufacturer: "Microsoft Corporation"
+ Product: "Virtual Machine"
+ Version: "7.0"
+ Serial: "8179-1954-0187-0085-3868-2270-14"
+ UUID: undefined, but settable
+ Wake-up: 0x06 (Power Switch)
+ Board Info: #2
+ Manufacturer: "Microsoft Corporation"
+ Product: "Virtual Machine"
+ Version: "7.0"
+ Serial: "8179-1954-0187-0085-3868-2270-14"
+ Chassis Info: #3
+ Manufacturer: "Microsoft Corporation"
+ Version: "7.0"
+ Serial: "8179-1954-0187-0085-3868-2270-14"
+ Asset Tag: "8374-0485-4557-6331-0620-5845-25"
+ Type: 0x03 (Desktop)
+ Bootup State: 0x03 (Safe)
+ Power Supply State: 0x03 (Safe)
+ Thermal State: 0x01 (Other)
+ Security Status: 0x01 (Other)
+
+Signed-off-by: Olaf Hering <olaf at aepfle.de>
+Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ drivers/ata/ata_piix.c | 25 ++++++++++++++++++++++---
+ 1 file changed, 22 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index ef773e1..bec35f4 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -1585,12 +1585,31 @@ static void piix_ignore_devices_quirk(struct ata_host *host)
+ },
+ { } /* terminate list */
+ };
+- const struct dmi_system_id *dmi = dmi_first_match(ignore_hyperv);
++ static const struct dmi_system_id allow_virtual_pc[] = {
++ {
++ /* In MS Virtual PC guests the DMI ident is nearly
++ * identical to a Hyper-V guest. One difference is the
++ * product version which is used here to identify
++ * a Virtual PC guest. This entry allows ata_piix to
++ * drive the emulated hardware.
++ */
++ .ident = "MS Virtual PC 2007",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR,
++ "Microsoft Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
++ },
++ },
++ { } /* terminate list */
++ };
++ const struct dmi_system_id *ignore = dmi_first_match(ignore_hyperv);
++ const struct dmi_system_id *allow = dmi_first_match(allow_virtual_pc);
+
+- if (dmi && prefer_ms_hyperv) {
++ if (ignore && !allow && prefer_ms_hyperv) {
+ host->flags |= ATA_HOST_IGNORE_ATA;
+ dev_info(host->dev, "%s detected, ATA device ignore set\n",
+- dmi->ident);
++ ignore->ident);
+ }
+ #endif
+ }
Modified: dists/squeeze-backports/linux/debian/patches/series
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/series Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/series Sun Apr 7 23:16:11 2013 (r19970)
@@ -67,7 +67,6 @@
features/all/fs-prevent-use-after-free-in-auditing-when-symlink-f.patch
# Update all Hyper-V drivers to 3.4-rc1 (no longer staging)
-features/x86/hyperv/0001-NLS-improve-UTF8-UTF16-string-conversion-routine.patch
features/x86/hyperv/0002-HID-Move-the-hid-hyperv-driver-out-of-staging.patch
features/x86/hyperv/0003-Staging-hv-storvsc-Use-mempools-to-allocate-struct-s.patch
features/x86/hyperv/0004-Staging-hv-storvsc-Cleanup-error-handling-in-the-pro.patch
@@ -146,6 +145,7 @@
features/x86/hyperv/0077-hv-remove-the-second-argument-of-k-un-map_atomic.patch
features/x86/hyperv/0078-libata-add-a-host-flag-to-ignore-detected-ATA-device.patch
features/x86/hyperv/0079-ata_piix-defer-disks-to-the-Hyper-V-drivers-by-defau.patch
+features/x86/hyperv/0080-ata_piix-reenable-ms-virtual-pc-guests.patch
features/x86/efi-stub/0001-x86-Add-missing-bzImage-fields-to-struct-setup_heade.patch
features/x86/efi-stub/0002-x86-Don-t-use-magic-strings-for-EFI-loader-signature.patch
@@ -394,17 +394,13 @@
bugfix/all/PCI-PM-Runtime-make-PCI-traces-quieter.patch
features/all/USB-add-USB_VENDOR_AND_INTERFACE_INFO-macro.patch
-bugfix/all/usb-Add-quirk-detection-based-on-interface-informati.patch
-bugfix/all/usb-Add-USB_QUIRK_RESET_RESUME-for-all-Logitech-UVC-.patch
bugfix/alpha/alpha-use-large-data-model.diff
features/arm/ahci-Add-JMicron-362-device-IDs.patch
-bugfix/all/speakup-lower-default-software-speech-rate.patch
debian/perf-hide-abi-change-in-3.2.30.patch
debian/iwlwifi-do-not-request-unreleased-firmware.patch
debian/hid-avoid-ABI-change-in-3.2.31.patch
debian/xfrm-avoid-ABI-change-in-3.2.31.patch
debian/fs-writeback-avoid-ABI-change-in-3.2.32.patch
-bugfix/x86/asus-laptop-Do-not-call-HWRS-on-init.patch
features/all/xen/microcode.patch
debian/ALSA-avoid-ABI-change-in-3.2.34.patch
@@ -419,12 +415,6 @@
bugfix/all/firmware_class-log-every-success-and-failure.patch
bugfix/all/firmware-remove-redundant-log-messages-from-drivers.patch
-bugfix/all/usermodehelper-introduce-umh_complete.patch
-bugfix/all/usermodehelper-implement-UMH_KILLABLE.patch
-bugfix/all/usermodehelper-____call_usermodehelper-doesnt-need-do_exit.patch
-bugfix/all/kmod-introduce-call_modprobe-helper.patch
-bugfix/all/kmod-make-__request_module-killable.patch
-bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch
bugfix/all/megaraid_sas-fix-memory-leak-if-SGL-has-zero-length-entries.patch
debian/audit-increase-AUDIT_NAMES.patch
features/all/asix-Adds-support-for-Lenovo-10-100-USB-dongle.patch
@@ -448,11 +438,6 @@
features/all/iguanair/0010-media-iguanair-do-not-modify-transmit-buffer.patch
features/all/iguanair/0011-media-iguanair-cannot-send-data-from-the-stack.patch
features/all/rt2800-add-chipset-revision-RT5390R-support.patch
-bugfix/all/fs-cachefiles-add-support-for-large-files-in-filesys.patch
-bugfix/all/ext4-rewrite-punch-hole-to-use-ext4_ext_remove_space.patch
-bugfix/all/ext4-fix-hole-punch-failure-when-depth-is-greater-th.patch
-bugfix/all/ext4-fix-kernel-BUG-on-large-scale-rm-rf-commands.patch
-bugfix/all/md-protect-against-crash-upon-fsync-on-ro-array.patch
debian/net-avoid-ABI-break-in-3.2.37.patch
features/all/net-define-netdev_features_t.patch
@@ -489,7 +474,6 @@
bugfix/x86/drm-i915-add-quirk-to-invert-brightness-on-emachines-e725.patch
bugfix/x86/drm-i915-add-quirk-to-invert-brightness-on-packard-bell-ncl20.patch
bugfix/all/drm-nouveau-fix-init-with-agpgart-uninorth.patch
-bugfix/x86/drm-i915-EBUSY-status-handling-added-to-i915_gem_fau.patch
bugfix/x86/drm-i915-Close-race-between-processing-unpin-task-an.patch
bugfix/all/drm-radeon-dce32-use-fractional-fb-dividers-for-high.patch
bugfix/all/drm-radeon-fix-amd-afusion-gpu-setup-aka-sumo-v2.patch
@@ -624,8 +608,34 @@
features/all/line6/0105-staging-line6-drop-dump-requests-from-pod-startup.patch
features/all/line6/0106-staging-line6-drop-unused-dumprequest-code.patch
bugfix/all/mm-Try-harder-to-allocate-vmemmap-blocks.patch
-bugfix/x86/efi-Clear-EFI_RUNTIME_SERVICES-rather-than-EFI_BOOT-.patch
-bugfix/x86/x86-efi-Make-noefi-really-disable-EFI-runtime-serivc.patch
-bugfix/all/mm-fix-pageblock-bitmap-allocation.patch
-bugfix/all/USB-usb-storage-unusual_devs-update-for-Super-TOP-SA.patch
debian/x86-efi-avoid-abi-change-in-3.2.38.patch
+features/x86/efi-stub/0019-x86-efi-Fix-processor-specific-memcpy-build-error.patch
+debian/pps-avoid-abi-change-in-3.2.40.patch
+bugfix/x86/drm-i915-Unconditionally-initialise-the-interrupt-wo.patch
+debian/efi-autoload-efivars.patch
+bugfix/all/kexec-remove-KMSG_DUMP_KEXEC.patch
+bugfix/all/kmsg_dump-don-t-run-on-non-error-paths-by-default.patch
+bugfix/all/i915-initialize-CADL-in-opregion.patch
+bugfix/all/signal-fix-use-of-missing-sa_restorer-field.patch
+bugfix/all/kernel-signal.c-use-__ARCH_HAS_SA_RESTORER-instead-o.patch
+bugfix/all/rds-limit-the-size-allocated-by-rds_message_alloc.patch
+bugfix/all/rtnl-fix-info-leak-on-rtm_getlink-request-for-vf-devices.patch
+bugfix/all/dcbnl-fix-various-netlink-info-leaks.patch
+bugfix/s390/s390-mm-fix-flush_tlb_kernel_range.patch
+bugfix/powerpc/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch
+bugfix/all/vhost-net-fix-heads-usage-of-ubuf_info.patch
+bugfix/all/udf-avoid-info-leak-on-export.patch
+bugfix/all/isofs-avoid-info-leak-on-export.patch
+debian/dm-avoid-ABI-change-in-3.2.41.patch
+bugfix/all/efivars-Allow-disabling-use-as-a-pstore-backend.patch
+bugfix/all/efivars-Add-module-parameter-to-disable-use-as-a-pst.patch
+bugfix/all/efivars-Fix-check-for-CONFIG_EFI_VARS_PSTORE_DEFAULT.patch
+bugfix/all/efi_pstore-Introducing-workqueue-updating-sysfs.patch
+bugfix/all/efivars-explicitly-calculate-length-of-VariableName.patch
+bugfix/all/efivars-Handle-duplicate-names-from-get_next_variabl.patch
+bugfix/all/efivars-pstore-do-not-check-size-when-erasing-variable.patch
+debian/efivars-remove-check-for-50-full-on-write.patch
+bugfix/x86/drm-i915-bounds-check-execbuffer-relocation-count.patch
+bugfix/x86/KVM-x86-fix-for-buffer-overflow-in-handling-of-MSR_K.patch
+bugfix/x86/KVM-x86-Convert-MSR_KVM_SYSTEM_TIME-to-use-gfn_to_hv.patch
+bugfix/all/KVM-Fix-bounds-checking-in-ioapic-indirect-register-.patch
Modified: dists/squeeze-backports/linux/debian/patches/series-rt
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/series-rt Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/patches/series-rt Sun Apr 7 23:16:11 2013 (r19970)
@@ -133,172 +133,171 @@
features/all/rt/0133-hrtimers-prepare-full-preemption.patch
features/all/rt/0134-hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
features/all/rt/0135-hrtimer-Don-t-call-the-timer-handler-from-hrtimer_st.patch
-features/all/rt/0136-hrtimer-Add-missing-debug_activate-aid-Was-Re-ANNOUN.patch
-features/all/rt/0137-hrtimer-fix-reprogram-madness.patch.patch
-features/all/rt/0138-timer-fd-Prevent-live-lock.patch
-features/all/rt/0139-posix-timers-thread-posix-cpu-timers-on-rt.patch
-features/all/rt/0140-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch
-features/all/rt/0141-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch
-features/all/rt/0142-sched-delay-put-task.patch.patch
-features/all/rt/0143-sched-limit-nr-migrate.patch.patch
-features/all/rt/0144-sched-mmdrop-delayed.patch.patch
-features/all/rt/0145-sched-rt-mutex-wakeup.patch.patch
-features/all/rt/0146-sched-prevent-idle-boost.patch.patch
-features/all/rt/0147-sched-might-sleep-do-not-account-rcu-depth.patch.patch
-features/all/rt/0148-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
-features/all/rt/0149-sched-cond-resched.patch.patch
-features/all/rt/0150-cond-resched-softirq-fix.patch.patch
-features/all/rt/0151-sched-no-work-when-pi-blocked.patch.patch
-features/all/rt/0152-cond-resched-lock-rt-tweak.patch.patch
-features/all/rt/0153-sched-disable-ttwu-queue.patch.patch
-features/all/rt/0154-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
-features/all/rt/0155-sched-ttwu-Return-success-when-only-changing-the-sav.patch
-features/all/rt/0156-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
-features/all/rt/0157-stomp-machine-mark-stomper-thread.patch.patch
-features/all/rt/0158-stomp-machine-raw-lock.patch.patch
-features/all/rt/0159-hotplug-Lightweight-get-online-cpus.patch
-features/all/rt/0160-hotplug-sync_unplug-No.patch
-features/all/rt/0161-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch
-features/all/rt/0162-sched-migrate-disable.patch.patch
-features/all/rt/0163-hotplug-use-migrate-disable.patch.patch
-features/all/rt/0164-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch
-features/all/rt/0165-ftrace-migrate-disable-tracing.patch.patch
-features/all/rt/0166-tracing-Show-padding-as-unsigned-short.patch
-features/all/rt/0167-migrate-disable-rt-variant.patch.patch
-features/all/rt/0168-sched-Optimize-migrate_disable.patch
-features/all/rt/0169-sched-Generic-migrate_disable.patch
-features/all/rt/0170-sched-rt-Fix-migrate_enable-thinko.patch
-features/all/rt/0171-sched-teach-migrate_disable-about-atomic-contexts.patch
-features/all/rt/0172-sched-Postpone-actual-migration-disalbe-to-schedule.patch
-features/all/rt/0173-sched-Do-not-compare-cpu-masks-in-scheduler.patch
-features/all/rt/0174-sched-Have-migrate_disable-ignore-bounded-threads.patch
-features/all/rt/0175-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch
-features/all/rt/0176-ftrace-crap.patch.patch
-features/all/rt/0177-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch
-features/all/rt/0178-net-netif_rx_ni-migrate-disable.patch.patch
-features/all/rt/0179-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch
-features/all/rt/0180-lockdep-rt.patch.patch
-features/all/rt/0181-mutex-no-spin-on-rt.patch.patch
-features/all/rt/0182-softirq-local-lock.patch.patch
-features/all/rt/0183-softirq-Export-in_serving_softirq.patch
-features/all/rt/0184-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch
-features/all/rt/0185-softirq-Fix-unplug-deadlock.patch
-features/all/rt/0186-softirq-disable-softirq-stacks-for-rt.patch.patch
-features/all/rt/0187-softirq-make-fifo.patch.patch
-features/all/rt/0188-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch
-features/all/rt/0189-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch
-features/all/rt/0190-local-vars-migrate-disable.patch.patch
-features/all/rt/0191-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
-features/all/rt/0192-rtmutex-lock-killable.patch.patch
-features/all/rt/0193-rtmutex-futex-prepare-rt.patch.patch
-features/all/rt/0194-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
-features/all/rt/0195-rt-mutex-add-sleeping-spinlocks-support.patch.patch
-features/all/rt/0196-spinlock-types-separate-raw.patch.patch
-features/all/rt/0197-rtmutex-avoid-include-hell.patch.patch
-features/all/rt/0198-rt-add-rt-spinlocks.patch.patch
-features/all/rt/0199-rt-add-rt-to-mutex-headers.patch.patch
-features/all/rt/0200-rwsem-add-rt-variant.patch.patch
-features/all/rt/0201-rt-Add-the-preempt-rt-lock-replacement-APIs.patch
-features/all/rt/0202-rwlocks-Fix-section-mismatch.patch
-features/all/rt/0203-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch
-features/all/rt/0204-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch
-features/all/rt/0205-rcu-Frob-softirq-test.patch
-features/all/rt/0206-rcu-Merge-RCU-bh-into-RCU-preempt.patch
-features/all/rt/0207-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch
-features/all/rt/0208-rcu-more-fallout.patch.patch
-features/all/rt/0209-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch
-features/all/rt/0210-rt-rcutree-Move-misplaced-prototype.patch
-features/all/rt/0211-lglocks-rt.patch.patch
-features/all/rt/0212-serial-8250-Clean-up-the-locking-for-rt.patch
-features/all/rt/0213-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch
-features/all/rt/0214-drivers-tty-fix-omap-lock-crap.patch.patch
-features/all/rt/0215-rt-Improve-the-serial-console-PASS_LIMIT.patch
-features/all/rt/0216-fs-namespace-preemption-fix.patch
-features/all/rt/0217-mm-protect-activate-switch-mm.patch.patch
-features/all/rt/0218-fs-block-rt-support.patch.patch
-features/all/rt/0219-fs-ntfs-disable-interrupt-only-on-RT.patch
-features/all/rt/0220-x86-Convert-mce-timer-to-hrtimer.patch
-features/all/rt/0221-x86-stackprotector-Avoid-random-pool-on-rt.patch
-features/all/rt/0222-x86-Use-generic-rwsem_spinlocks-on-rt.patch
-features/all/rt/0223-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch
-features/all/rt/0224-workqueue-use-get-cpu-light.patch.patch
-features/all/rt/0225-epoll.patch.patch
-features/all/rt/0226-mm-vmalloc.patch.patch
-features/all/rt/0227-debugobjects-rt.patch.patch
-features/all/rt/0228-jump-label-rt.patch.patch
-features/all/rt/0229-skbufhead-raw-lock.patch.patch
-features/all/rt/0230-x86-no-perf-irq-work-rt.patch.patch
-features/all/rt/0231-console-make-rt-friendly.patch.patch
-features/all/rt/0232-printk-Disable-migration-instead-of-preemption.patch
-features/all/rt/0233-power-use-generic-rwsem-on-rt.patch
-features/all/rt/0234-power-disable-highmem-on-rt.patch.patch
-features/all/rt/0235-arm-disable-highmem-on-rt.patch.patch
-features/all/rt/0236-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch
-features/all/rt/0237-mips-disable-highmem-on-rt.patch.patch
-features/all/rt/0238-net-Avoid-livelock-in-net_tx_action-on-RT.patch
-features/all/rt/0239-ping-sysrq.patch.patch
-features/all/rt/0240-kgdb-serial-Short-term-workaround.patch
-features/all/rt/0241-add-sys-kernel-realtime-entry.patch
-features/all/rt/0242-mm-rt-kmap_atomic-scheduling.patch
-features/all/rt/0243-ipc-sem-Rework-semaphore-wakeups.patch
-features/all/rt/0244-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch
-features/all/rt/0245-x86-kvm-require-const-tsc-for-rt.patch.patch
-features/all/rt/0246-scsi-fcoe-rt-aware.patch.patch
-features/all/rt/0247-x86-crypto-Reduce-preempt-disabled-regions.patch
-features/all/rt/0248-dm-Make-rt-aware.patch
-features/all/rt/0249-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch
-features/all/rt/0250-seqlock-Prevent-rt-starvation.patch
-features/all/rt/0251-timer-Fix-hotplug-for-rt.patch
-features/all/rt/0252-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch
-features/all/rt/0253-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch
-features/all/rt/0254-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch
-features/all/rt/0255-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch
-features/all/rt/0256-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch
-features/all/rt/0257-softirq-Check-preemption-after-reenabling-interrupts.patch
-features/all/rt/0258-rt-Introduce-cpu_chill.patch
-features/all/rt/0259-fs-dcache-Use-cpu_chill-in-trylock-loops.patch
-features/all/rt/0260-net-Use-cpu_chill-instead-of-cpu_relax.patch
-features/all/rt/0261-kconfig-disable-a-few-options-rt.patch.patch
-features/all/rt/0262-kconfig-preempt-rt-full.patch.patch
-features/all/rt/0263-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch
-features/all/rt/0264-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch
-features/all/rt/0265-net-RT-REmove-preemption-disabling-in-netif_rx.patch
-features/all/rt/0266-mips-remove-smp-reserve-lock.patch.patch
-features/all/rt/0267-Latency-histogramms-Cope-with-backwards-running-loca.patch
-features/all/rt/0268-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch
-features/all/rt/0269-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch
-features/all/rt/0270-Latency-histograms-Detect-another-yet-overlooked-sha.patch
-features/all/rt/0271-slab-Prevent-local-lock-deadlock.patch
-features/all/rt/0272-fs-jbd-pull-your-plug-when-waiting-for-space.patch
-features/all/rt/0273-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch
-features/all/rt/0274-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch
-features/all/rt/0275-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch
-features/all/rt/0276-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch
+features/all/rt/0136-hrtimer-fix-reprogram-madness.patch.patch
+features/all/rt/0137-timer-fd-Prevent-live-lock.patch
+features/all/rt/0138-posix-timers-thread-posix-cpu-timers-on-rt.patch
+features/all/rt/0139-posix-timers-Shorten-posix_cpu_timers-CPU-kernel-thr.patch
+features/all/rt/0140-posix-timers-Avoid-wakeups-when-no-timers-are-active.patch
+features/all/rt/0141-sched-delay-put-task.patch.patch
+features/all/rt/0142-sched-limit-nr-migrate.patch.patch
+features/all/rt/0143-sched-mmdrop-delayed.patch.patch
+features/all/rt/0144-sched-rt-mutex-wakeup.patch.patch
+features/all/rt/0145-sched-prevent-idle-boost.patch.patch
+features/all/rt/0146-sched-might-sleep-do-not-account-rcu-depth.patch.patch
+features/all/rt/0147-sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
+features/all/rt/0148-sched-cond-resched.patch.patch
+features/all/rt/0149-cond-resched-softirq-fix.patch.patch
+features/all/rt/0150-sched-no-work-when-pi-blocked.patch.patch
+features/all/rt/0151-cond-resched-lock-rt-tweak.patch.patch
+features/all/rt/0152-sched-disable-ttwu-queue.patch.patch
+features/all/rt/0153-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
+features/all/rt/0154-sched-ttwu-Return-success-when-only-changing-the-sav.patch
+features/all/rt/0155-stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+features/all/rt/0156-stomp-machine-mark-stomper-thread.patch.patch
+features/all/rt/0157-stomp-machine-raw-lock.patch.patch
+features/all/rt/0158-hotplug-Lightweight-get-online-cpus.patch
+features/all/rt/0159-hotplug-sync_unplug-No.patch
+features/all/rt/0160-hotplug-Reread-hotplug_pcp-on-pin_current_cpu-retry.patch
+features/all/rt/0161-sched-migrate-disable.patch.patch
+features/all/rt/0162-hotplug-use-migrate-disable.patch.patch
+features/all/rt/0163-hotplug-Call-cpu_unplug_begin-before-DOWN_PREPARE.patch
+features/all/rt/0164-ftrace-migrate-disable-tracing.patch.patch
+features/all/rt/0165-tracing-Show-padding-as-unsigned-short.patch
+features/all/rt/0166-migrate-disable-rt-variant.patch.patch
+features/all/rt/0167-sched-Optimize-migrate_disable.patch
+features/all/rt/0168-sched-Generic-migrate_disable.patch
+features/all/rt/0169-sched-rt-Fix-migrate_enable-thinko.patch
+features/all/rt/0170-sched-teach-migrate_disable-about-atomic-contexts.patch
+features/all/rt/0171-sched-Postpone-actual-migration-disalbe-to-schedule.patch
+features/all/rt/0172-sched-Do-not-compare-cpu-masks-in-scheduler.patch
+features/all/rt/0173-sched-Have-migrate_disable-ignore-bounded-threads.patch
+features/all/rt/0174-sched-clear-pf-thread-bound-on-fallback-rq.patch.patch
+features/all/rt/0175-ftrace-crap.patch.patch
+features/all/rt/0176-ring-buffer-Convert-reader_lock-from-raw_spin_lock-i.patch
+features/all/rt/0177-net-netif_rx_ni-migrate-disable.patch.patch
+features/all/rt/0178-softirq-Sanitize-softirq-pending-for-NOHZ-RT.patch
+features/all/rt/0179-lockdep-rt.patch.patch
+features/all/rt/0180-mutex-no-spin-on-rt.patch.patch
+features/all/rt/0181-softirq-local-lock.patch.patch
+features/all/rt/0182-softirq-Export-in_serving_softirq.patch
+features/all/rt/0183-hardirq.h-Define-softirq_count-as-OUL-to-kill-build-.patch
+features/all/rt/0184-softirq-Fix-unplug-deadlock.patch
+features/all/rt/0185-softirq-disable-softirq-stacks-for-rt.patch.patch
+features/all/rt/0186-softirq-make-fifo.patch.patch
+features/all/rt/0187-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch
+features/all/rt/0188-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch
+features/all/rt/0189-local-vars-migrate-disable.patch.patch
+features/all/rt/0190-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
+features/all/rt/0191-rtmutex-lock-killable.patch.patch
+features/all/rt/0192-rtmutex-futex-prepare-rt.patch.patch
+features/all/rt/0193-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
+features/all/rt/0194-rt-mutex-add-sleeping-spinlocks-support.patch.patch
+features/all/rt/0195-spinlock-types-separate-raw.patch.patch
+features/all/rt/0196-rtmutex-avoid-include-hell.patch.patch
+features/all/rt/0197-rt-add-rt-spinlocks.patch.patch
+features/all/rt/0198-rt-add-rt-to-mutex-headers.patch.patch
+features/all/rt/0199-rwsem-add-rt-variant.patch.patch
+features/all/rt/0200-rt-Add-the-preempt-rt-lock-replacement-APIs.patch
+features/all/rt/0201-rwlocks-Fix-section-mismatch.patch
+features/all/rt/0202-timer-handle-idle-trylock-in-get-next-timer-irq.patc.patch
+features/all/rt/0203-RCU-Force-PREEMPT_RCU-for-PREEMPT-RT.patch
+features/all/rt/0204-rcu-Frob-softirq-test.patch
+features/all/rt/0205-rcu-Merge-RCU-bh-into-RCU-preempt.patch
+features/all/rt/0206-rcu-Fix-macro-substitution-for-synchronize_rcu_bh-on.patch
+features/all/rt/0207-rcu-more-fallout.patch.patch
+features/all/rt/0208-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch
+features/all/rt/0209-rt-rcutree-Move-misplaced-prototype.patch
+features/all/rt/0210-lglocks-rt.patch.patch
+features/all/rt/0211-serial-8250-Clean-up-the-locking-for-rt.patch
+features/all/rt/0212-serial-8250-Call-flush_to_ldisc-when-the-irq-is-thre.patch
+features/all/rt/0213-drivers-tty-fix-omap-lock-crap.patch.patch
+features/all/rt/0214-rt-Improve-the-serial-console-PASS_LIMIT.patch
+features/all/rt/0215-fs-namespace-preemption-fix.patch
+features/all/rt/0216-mm-protect-activate-switch-mm.patch.patch
+features/all/rt/0217-fs-block-rt-support.patch.patch
+features/all/rt/0218-fs-ntfs-disable-interrupt-only-on-RT.patch
+features/all/rt/0219-x86-Convert-mce-timer-to-hrtimer.patch
+features/all/rt/0220-x86-stackprotector-Avoid-random-pool-on-rt.patch
+features/all/rt/0221-x86-Use-generic-rwsem_spinlocks-on-rt.patch
+features/all/rt/0222-x86-Disable-IST-stacks-for-debug-int-3-stack-fault-f.patch
+features/all/rt/0223-workqueue-use-get-cpu-light.patch.patch
+features/all/rt/0224-epoll.patch.patch
+features/all/rt/0225-mm-vmalloc.patch.patch
+features/all/rt/0226-debugobjects-rt.patch.patch
+features/all/rt/0227-jump-label-rt.patch.patch
+features/all/rt/0228-skbufhead-raw-lock.patch.patch
+features/all/rt/0229-x86-no-perf-irq-work-rt.patch.patch
+features/all/rt/0230-console-make-rt-friendly.patch.patch
+features/all/rt/0231-printk-Disable-migration-instead-of-preemption.patch
+features/all/rt/0232-power-use-generic-rwsem-on-rt.patch
+features/all/rt/0233-power-disable-highmem-on-rt.patch.patch
+features/all/rt/0234-arm-disable-highmem-on-rt.patch.patch
+features/all/rt/0235-ARM-at91-tclib-Default-to-tclib-timer-for-RT.patch
+features/all/rt/0236-mips-disable-highmem-on-rt.patch.patch
+features/all/rt/0237-net-Avoid-livelock-in-net_tx_action-on-RT.patch
+features/all/rt/0238-ping-sysrq.patch.patch
+features/all/rt/0239-kgdb-serial-Short-term-workaround.patch
+features/all/rt/0240-add-sys-kernel-realtime-entry.patch
+features/all/rt/0241-mm-rt-kmap_atomic-scheduling.patch
+features/all/rt/0242-ipc-sem-Rework-semaphore-wakeups.patch
+features/all/rt/0243-sysrq-Allow-immediate-Magic-SysRq-output-for-PREEMPT.patch
+features/all/rt/0244-x86-kvm-require-const-tsc-for-rt.patch.patch
+features/all/rt/0245-scsi-fcoe-rt-aware.patch.patch
+features/all/rt/0246-x86-crypto-Reduce-preempt-disabled-regions.patch
+features/all/rt/0247-dm-Make-rt-aware.patch
+features/all/rt/0248-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch
+features/all/rt/0249-seqlock-Prevent-rt-starvation.patch
+features/all/rt/0250-timer-Fix-hotplug-for-rt.patch
+features/all/rt/0251-futex-rt-Fix-possible-lockup-when-taking-pi_lock-in-.patch
+features/all/rt/0252-ring-buffer-rt-Check-for-irqs-disabled-before-grabbi.patch
+features/all/rt/0253-sched-rt-Fix-wait_task_interactive-to-test-rt_spin_l.patch
+features/all/rt/0254-lglock-rt-Use-non-rt-for_each_cpu-in-rt-code.patch
+features/all/rt/0255-cpu-Make-hotplug.lock-a-sleeping-spinlock-on-RT.patch
+features/all/rt/0256-softirq-Check-preemption-after-reenabling-interrupts.patch
+features/all/rt/0257-rt-Introduce-cpu_chill.patch
+features/all/rt/0258-fs-dcache-Use-cpu_chill-in-trylock-loops.patch
+features/all/rt/0259-net-Use-cpu_chill-instead-of-cpu_relax.patch
+features/all/rt/0260-kconfig-disable-a-few-options-rt.patch.patch
+features/all/rt/0261-kconfig-preempt-rt-full.patch.patch
+features/all/rt/0262-rt-Make-migrate_disable-enable-and-__rt_mutex_init-n.patch
+features/all/rt/0263-scsi-qla2xxx-Use-local_irq_save_nort-in-qla2x00_poll.patch
+features/all/rt/0264-net-RT-REmove-preemption-disabling-in-netif_rx.patch
+features/all/rt/0265-mips-remove-smp-reserve-lock.patch.patch
+features/all/rt/0266-Latency-histogramms-Cope-with-backwards-running-loca.patch
+features/all/rt/0267-Latency-histograms-Adjust-timer-if-already-elapsed-w.patch
+features/all/rt/0268-Disable-RT_GROUP_SCHED-in-PREEMPT_RT_FULL.patch
+features/all/rt/0269-Latency-histograms-Detect-another-yet-overlooked-sha.patch
+features/all/rt/0270-slab-Prevent-local-lock-deadlock.patch
+features/all/rt/0271-fs-jbd-pull-your-plug-when-waiting-for-space.patch
+features/all/rt/0272-perf-Make-swevent-hrtimer-run-in-irq-instead-of-soft.patch
+features/all/rt/0273-cpu-rt-Rework-cpu-down-for-PREEMPT_RT.patch
+features/all/rt/0274-cpu-rt-Fix-cpu_hotplug-variable-initialization.patch
+features/all/rt/0275-time-rt-Fix-up-leap-second-backport-for-RT-changes.patch
+features/all/rt/0276-fix-printk-flush-of-messages.patch
features/all/rt/0277-fix-printk-flush-of-messages.patch
-features/all/rt/0278-fix-printk-flush-of-messages.patch
-features/all/rt/0279-random-Make-it-work-on-rt.patch
-features/all/rt/0280-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch
-features/all/rt/0281-mm-slab-Fix-potential-deadlock.patch
-features/all/rt/0282-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch
-features/all/rt/0283-rt-rwsem-rwlock-lockdep-annotations.patch
-features/all/rt/0284-sched-Better-debug-output-for-might-sleep.patch
-features/all/rt/0285-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch
-features/all/rt/0286-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch
-features/all/rt/0287-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch
-features/all/rt/0288-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch
-features/all/rt/0289-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch
-features/all/rt/0290-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch
-features/all/rt/0291-sched-Queue-RT-tasks-to-head-when-prio-drops.patch
-features/all/rt/0292-sched-Consider-pi-boosting-in-setscheduler.patch
-features/all/rt/0293-drivers-tty-pl011-irq-disable-madness.patch.patch
-features/all/rt/0294-mmci-Remove-bogus-local_irq_save.patch
-features/all/rt/0295-sched-Init-idle-on_rq-in-init_idle.patch
-features/all/rt/0296-sched-Check-for-idle-task-in-might_sleep.patch
-features/all/rt/0297-mm-swap-Initialize-local-locks-early.patch
-features/all/rt/0298-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch
-features/all/rt/0299-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
-features/all/rt/0300-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch
-features/all/rt/0301-serial-Imx-Fix-recursive-locking-bug.patch
-features/all/rt/0302-wait-simple-Simple-waitqueue-implementation.patch
-features/all/rt/0303-rcutiny-Use-simple-waitqueue.patch
-features/all/rt/0304-Linux-3.2.39-rt59-REBASE.patch
+features/all/rt/0278-random-Make-it-work-on-rt.patch
+features/all/rt/0279-softirq-Init-softirq-local-lock-after-per-cpu-sectio.patch
+features/all/rt/0280-mm-slab-Fix-potential-deadlock.patch
+features/all/rt/0281-mm-page_alloc-Use-local_lock_on-instead-of-plain-spi.patch
+features/all/rt/0282-rt-rwsem-rwlock-lockdep-annotations.patch
+features/all/rt/0283-sched-Better-debug-output-for-might-sleep.patch
+features/all/rt/0284-stomp_machine-Use-mutex_trylock-when-called-from-ina.patch
+features/all/rt/0285-slab-Fix-up-stable-merge-of-slab-init_lock_keys.patch
+features/all/rt/0286-hrtimer-Raise-softirq-if-hrtimer-irq-stalled.patch
+features/all/rt/0287-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch
+features/all/rt/0288-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch
+features/all/rt/0289-sched-Adjust-sched_reset_on_fork-when-nothing-else-c.patch
+features/all/rt/0290-sched-Queue-RT-tasks-to-head-when-prio-drops.patch
+features/all/rt/0291-sched-Consider-pi-boosting-in-setscheduler.patch
+features/all/rt/0292-drivers-tty-pl011-irq-disable-madness.patch.patch
+features/all/rt/0293-mmci-Remove-bogus-local_irq_save.patch
+features/all/rt/0294-sched-Init-idle-on_rq-in-init_idle.patch
+features/all/rt/0295-sched-Check-for-idle-task-in-might_sleep.patch
+features/all/rt/0296-mm-swap-Initialize-local-locks-early.patch
+features/all/rt/0297-x86-32-Use-kmap-switch-for-non-highmem-as-well.patch
+features/all/rt/0298-acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+features/all/rt/0299-printk-Fix-rq-lock-vs-logbuf_lock-unlock-lock-invers.patch
+features/all/rt/0300-serial-Imx-Fix-recursive-locking-bug.patch
+features/all/rt/0301-wait-simple-Simple-waitqueue-implementation.patch
+features/all/rt/0302-rcutiny-Use-simple-waitqueue.patch
+features/all/rt/0303-Linux-3.2.40-rt60-REBASE.patch
Modified: dists/squeeze-backports/linux/debian/rules.real
==============================================================================
--- dists/squeeze-backports/linux/debian/rules.real Tue Apr 2 04:52:07 2013 (r19969)
+++ dists/squeeze-backports/linux/debian/rules.real Sun Apr 7 23:16:11 2013 (r19970)
@@ -198,6 +198,7 @@
mkdir -p $(OUT_DIR)
cp -a CREDITS MAINTAINERS README REPORTING-BUGS Documentation $(OUT_DIR)
rm -rf $(OUT_DIR)/Documentation/DocBook
+ set -o pipefail; \
cd $(DIR)/Documentation/DocBook; \
find * -name '*.html' -print \
| \
@@ -210,6 +211,7 @@
install-manual: DH_OPTIONS = -p$(PACKAGE_NAME)
install-manual: $(STAMPS_DIR)/build-doc
dh_prep
+ set -o pipefail; \
find $(DIR)/Documentation/DocBook/man/ -name '*.9' | xargs dh_installman
+$(MAKE_SELF) install-base GENCONTROL_ARGS='$(call DEFINE_MULTIARCH,foreign)'
@@ -231,11 +233,12 @@
dh_testroot
dh_prep
+ set -o pipefail; \
cd $(SOURCE_DIR); \
( \
echo Makefile; \
find arch/$(KERNEL_ARCH) -maxdepth 1 -name 'Makefile*' -print; \
- find arch/$(KERNEL_ARCH) -name 'module.lds' -print; \
+ find arch/$(KERNEL_ARCH) \( -name 'module.lds' -o -name 'Kbuild.platforms' -o -name 'Platform' \) -print; \
find $$(find arch/$(KERNEL_ARCH) \( -name include -o -name scripts \) -type d -print) -print; \
find include -name 'asm*' -prune -o -print; \
find include/asm-generic -print; \
@@ -362,6 +365,7 @@
chmod a+x $(CURDIR)/debian/bin/no-depmod
+$(MAKE_CLEAN) -C $(DIR) modules_install DEPMOD='$(CURDIR)/debian/bin/no-depmod' INSTALL_MOD_PATH='$(CURDIR)'/$(PACKAGE_DIR) INSTALL_MOD_STRIP=1
ifeq ($(DEBUG),True)
+ set -o pipefail; \
find $(PACKAGE_DIR) -name '*.ko' | sed 's|$(PACKAGE_DIR)/lib/modules/$(REAL_VERSION)/kernel/||' | while read module ; do \
objcopy --add-gnu-debuglink=$(DIR)/$$module $(PACKAGE_DIR)/lib/modules/$(REAL_VERSION)/kernel/$$module || exit; \
done
More information about the Kernel-svn-changes
mailing list