[linux] 01/01: [rt] Update to 4.8-rt1 and re-enable

debian-kernel at lists.debian.org debian-kernel at lists.debian.org
Tue Oct 11 18:59:25 UTC 2016


This is an automated email from the git hooks/post-receive script.

benh pushed a commit to branch master
in repository linux.

commit 387dbb7803bdaf494fb7115f0025b243b7f7c56b
Author: Ben Hutchings <ben at decadent.org.uk>
Date:   Tue Oct 11 19:58:48 2016 +0100

    [rt] Update to 4.8-rt1 and re-enable
---
 debian/changelog                                   |   1 +
 debian/config/defines                              |   2 +-
 ...irq-in-translation-section-permission-fau.patch |   2 +-
 ...CK-printk-drop-the-logbuf_lock-more-often.patch |  12 +-
 ...64-downgrade-preempt_disable-d-region-to-.patch |   8 +-
 ...lapic-mark-LAPIC-timer-handler-as-irqsafe.patch |   4 +-
 ...vert-acpi_gbl_hardware-lock-back-to-a-raw.patch |  12 +-
 .../rt/arch-arm64-Add-lazy-preempt-support.patch   |  32 ++-
 ...t-remove-irq-handler-when-clock-is-unused.patch |  52 ++--
 ...-at91-tclib-default-to-tclib-timer-for-rt.patch |   2 +-
 .../all/rt/arm-convert-boot-lock-to-raw.patch      |  22 +-
 .../all/rt/arm-enable-highmem-for-rt.patch         |   2 +-
 .../all/rt/arm-highmem-flush-tlb-on-unmap.patch    |   2 +-
 .../features/all/rt/arm-preempt-lazy-support.patch |  70 +++++-
 .../features/all/rt/arm-unwind-use_raw_lock.patch  |   2 +-
 .../rt/arm64-xen--Make-XEN-depend-on-non-rt.patch  |   4 +-
 .../all/rt/at91_dont_enable_disable_clock.patch    |   2 +-
 .../all/rt/ata-disable-interrupts-if-non-rt.patch  |   2 +-
 ...st-pone-notifier-to-POST_D.patchto-POST_D.patch |   4 +-
 .../features/all/rt/block-blk-mq-use-swait.patch   |  10 +-
 .../block-mq-don-t-complete-requests-via-IPI.patch |  16 +-
 .../all/rt/block-mq-drop-preempt-disable.patch     |   8 +-
 .../features/all/rt/block-mq-use-cpu_light.patch   |   2 +-
 .../block-shorten-interrupt-disabled-regions.patch |  12 +-
 .../features/all/rt/block-use-cpu-chill.patch      |   2 +-
 .../all/rt/bug-rt-dependend-variants.patch         |   2 +-
 ...ps-scheduling-while-atomic-in-cgroup-code.patch |  74 ++++--
 .../cgroups-use-simple-wait-in-css_release.patch   |   8 +-
 ...-drivers-timer-atmel-pit-fix-double-free_.patch |   2 +-
 ...clocksource-tclib-allow-higher-clockrates.patch |   2 +-
 .../all/rt/completion-use-simple-wait-queues.patch |  24 +-
 .../all/rt/cond-resched-lock-rt-tweak.patch        |   2 +-
 .../features/all/rt/cond-resched-softirq-rt.patch  |   8 +-
 ...g-Document-why-PREEMPT_RT-uses-a-spinlock.patch |   2 +-
 ...ke-hotplug-lock-a-sleeping-spinlock-on-rt.patch |  22 +-
 .../features/all/rt/cpu-rt-rework-cpu-down.patch   |  48 ++--
 ...l-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch |  14 +-
 .../all/rt/cpu_down_move_migrate_enable_back.patch |   6 +-
 ...req-drop-K8-s-driver-from-beeing-selected.patch |   4 +-
 .../all/rt/cpumask-disable-offstack-on-rt.patch    |   6 +-
 ...educe-preempt-disabled-regions-more-algos.patch |   2 +-
 .../patches/features/all/rt/debugobjects-rt.patch  |   4 +-
 .../patches/features/all/rt/dm-make-rt-aware.patch |  10 +-
 ...ck-zram-Replace-bit-spinlocks-with-rtmute.patch |  28 +--
 .../rt/drivers-net-8139-disable-irq-nosync.patch   |   4 +-
 .../rt/drivers-net-vortex-fix-locking-issues.patch |   2 +-
 ...ers-random-reduce-preempt-disabled-region.patch |   6 +-
 .../all/rt/drivers-tty-fix-omap-lock-crap.patch    |   2 +-
 .../rt/drivers-tty-pl011-irq-disable-madness.patch |   6 +-
 ...15-drop-trace_i915_gem_ring_dispatch-onrt.patch |   6 +-
 ...ock_irq()_in_intel_pipe_update_startend().patch |  16 +-
 ...empt_disableenable_rt()_where_recommended.patch |  10 +-
 ...ack-don-t-disable-preemption-during-trace.patch |  16 +-
 .../features/all/rt/epoll-use-get-cpu-light.patch  |   2 +-
 .../all/rt/fs-aio-simple-simple-work.patch         |  12 +-
 .../features/all/rt/fs-block-rt-support.patch      |   2 +-
 .../features/all/rt/fs-dcache-include-wait.h.patch |  24 ++
 .../rt/fs-dcache-init-in_lookup_hashtable.patch    |  28 +++
 .../fs-dcache-use-cpu-chill-in-trylock-loops.patch |  44 +++-
 ...ache-use-swait_queue-instead-of-waitqueue.patch | 215 ++++++++++++++++
 .../all/rt/fs-jbd-replace-bh_state-lock.patch      |   2 +-
 ...bd2-pull-your-plug-when-waiting-for-space.patch |   2 +-
 .../all/rt/fs-namespace-preemption-fix.patch       |   2 +-
 .../fs-nfs-turn-rmdir_sem-into-a-semaphore.patch   | 139 +++++++++++
 .../all/rt/fs-ntfs-disable-interrupt-non-rt.patch  |   2 +-
 .../rt/fs-replace-bh_uptodate_lock-for-rt.patch    |  14 +-
 .../all/rt/ftrace-migrate-disable-tracing.patch    |   8 +-
 ...e-lock-unlock-symetry-versus-pi_lock-and-.patch |  43 ++++
 .../features/all/rt/futex-requeue-pi-fix.patch     |   2 +-
 .../all/rt/genirq-disable-irqpoll-on-rt.patch      |   2 +-
 ...ot-invoke-the-affinity-callback-via-a-wor.patch | 160 +++++-------
 .../features/all/rt/genirq-force-threading.patch   |   4 +-
 ...pdate-irq_set_irqchip_state-documentation.patch |   4 +-
 .../rt/gpu_don_t_check_for_the_lock_owner.patch    |  33 +++
 ...-set_cpus_allowed_ptr-in-sync_unplug_thre.patch |   2 +-
 .../all/rt/hotplug-light-get-online-cpus.patch     |  14 +-
 ...lug-sync_unplug-no-27-5cn-27-in-task-name.patch |   2 +-
 .../all/rt/hotplug-use-migrate-disable.patch       |   6 +-
 ...-Move-schedule_work-call-to-helper-thread.patch |  64 ++---
 .../all/rt/hrtimer-enfore-64byte-alignment.patch   |   2 +-
 ...up-hrtimer-callback-changes-for-preempt-r.patch |  55 +++--
 .../all/rt/hrtimers-prepare-full-preemption.patch  |  12 +-
 ...tor-Don-t-ignore-threshold-module-paramet.patch |   2 +-
 ...tor-Update-hwlat_detector-to-add-outer-lo.patch |   2 +-
 ...tector-Use-thread-instead-of-stop-machine.patch |   2 +-
 ...tector-Use-trace_clock_local-if-available.patch |   2 +-
 debian/patches/features/all/rt/hwlatdetect.patch   |   4 +-
 ...warning-from-i915-when-running-on-PREEMPT.patch |   6 +-
 .../all/rt/ide-use-nort-local-irq-variants.patch   |   2 +-
 .../all/rt/idr-use-local-lock-for-protection.patch |   2 +-
 .../rt/infiniband-mellanox-ib-use-nort-irq.patch   |   6 +-
 .../all/rt/inpt-gameport-use-local-irq-nort.patch  |   2 +-
 .../rt/introduce_migrate_disable_cpu_light.patch   |  24 +-
 .../all/rt/iommu-amd--Use-WARN_ON_NORT.patch       |   6 +-
 ...don-t-disable-preempt-around-this_cpu_ptr.patch |  82 ++++++
 ...don-t-disable-preemption-while-accessing-.patch |  59 +++++
 ...-msg-Implement-lockless-pipelined-wakeups.patch |   2 +-
 .../all/rt/ipc-sem-rework-semaphore-wakeups.patch  |  10 +-
 ...-softirq-processing-in-irq-thread-context.patch |   6 +-
 ...irqwork-Move-irq-safe-work-to-irq-context.patch |  12 +-
 ...qwork-push_most_work_into_softirq_context.patch |  14 +-
 ...ckdep-annotation-in-add_transaction_credi.patch |  65 +++++
 debian/patches/features/all/rt/jump-label-rt.patch |   6 +-
 .../all/rt/kconfig-disable-a-few-options-rt.patch  |   6 +-
 .../features/all/rt/kconfig-preempt-rt-full.patch  |   2 +-
 .../kernel-SRCU-provide-a-static-initializer.patch |   2 +-
 ...fix-cpu-down-problem-if-kthread-s-cpu-is-.patch |   8 +-
 .../rt/kernel-futex-don-t-deboost-too-early.patch  | 162 ++++++++++++
 ...plug-restore-original-cpu-mask-oncpu-down.patch |  10 +-
 ...ate_disable-do-fastpath-in-atomic-irqs-of.patch |   6 +-
 ...-mark-perf_cpu_context-s-timer-as-irqsafe.patch |   4 +-
 ...tk-Don-t-try-to-print-from-IRQ-NMI-region.patch |   6 +-
 .../rt/kernel-softirq-unlock-with-irqs-on.patch    |   2 +-
 .../features/all/rt/kgb-serial-hackaround.patch    |   4 +-
 debian/patches/features/all/rt/latency-hist.patch  |  16 +-
 .../latency_hist-update-sched_wakeup-probe.patch   |   2 +-
 .../all/rt/latencyhist-disable-jump-labels.patch   |   2 +-
 .../leds-trigger-disable-CPU-trigger-on-RT.patch   |   4 +-
 debian/patches/features/all/rt/lglocks-rt.patch    |   4 +-
 .../rt/list_bl-fixup-bogus-lockdep-warning.patch   |   2 +-
 .../list_bl.h-make-list-head-locking-RT-safe.patch |   2 +-
 .../all/rt/local-irq-rt-depending-variants.patch   |   2 +-
 .../all/rt/locallock-add-local_lock_on.patch       |   2 +-
 debian/patches/features/all/rt/localversion.patch  |   4 +-
 ...et-gcc-about-dangerous-__builtin_return_a.patch | 111 +++++++++
 .../rt/lockdep-no-softirq-accounting-on-rt.patch   |   6 +-
 ...ftest-fix-warnings-due-to-missing-PREEMPT.patch |   2 +-
 ...-do-hardirq-context-test-for-raw-spinlock.patch |   2 +-
 ...ktorture-Do-NOT-include-rwlock.h-directly.patch |   2 +-
 ...inglglocks_Use_preempt_enabledisable_nort.patch |  35 +++
 .../features/all/rt/md-disable-bcache.patch        |   2 +-
 .../all/rt/md-raid5-percpu-handling-rt-aware.patch |  10 +-
 .../all/rt/mips-disable-highmem-on-rt.patch        |   4 +-
 .../mm--rt--Fix-generic-kmap_atomic-for-RT.patch   |   2 +-
 ...dev-don-t-disable-IRQs-in-wb_congested_pu.patch |   2 +-
 .../all/rt/mm-bounce-local-irq-save-nort.patch     |   2 +-
 .../all/rt/mm-convert-swap-to-percpu-locked.patch  |  54 ++--
 .../features/all/rt/mm-disable-sloub-rt.patch      |   8 +-
 .../patches/features/all/rt/mm-enable-slub.patch   |  88 +++----
 ...don-t-plant-shadow-entries-without-radix-.patch | 186 ++++++++++++++
 ...fix-mapping-nrpages-double-accounting-in-.patch |  41 +++
 .../features/all/rt/mm-make-vmstat-rt-aware.patch  |  66 ++++-
 ...ol-Don-t-call-schedule_work_on-in-preempt.patch |   6 +-
 .../all/rt/mm-memcontrol-do_not_disable_irq.patch  |  17 +-
 ...ol-mem_cgroup_migrate-replace-another-loc.patch |  12 +-
 ...m-page-alloc-use-local-lock-on-target-cpu.patch |   4 +-
 ...m-page_alloc-reduce-lock-sections-further.patch |  64 ++---
 .../mm-page_alloc-rt-friendly-per-cpu-pages.patch  |  55 ++---
 .../rt/mm-perform-lru_add_drain_all-remotely.patch |  45 ++--
 .../all/rt/mm-protect-activate-switch-mm.patch     |   4 +-
 .../all/rt/mm-rt-kmap-atomic-scheduling.patch      |   4 +-
 .../mm-scatterlist-dont-disable-irqs-on-RT.patch   |   2 +-
 .../all/rt/mm-vmalloc-use-get-cpu-light.patch      |  12 +-
 ...et-do-not-protect-workingset_shadow_nodes.patch |  76 +++---
 ...-Use-get-put_cpu_light-in-zs_map_object-z.patch | 148 ++++++++++-
 .../all/rt/mmci-remove-bogus-irq-save.patch        |   6 +-
 .../all/rt/move_sched_delayed_work_to_helper.patch |  56 ++---
 .../features/all/rt/mutex-no-spin-on-rt.patch      |   2 +-
 .../net-Qdisc-use-a-seqlock-instead-seqcount.patch | 274 +++++++++++++++++++++
 .../all/rt/net-add-a-lock-around-icmp_sk.patch     |  73 ++++++
 ...k-the-missing-serialization-in-ip_send_un.patch |  94 +++++++
 ...r-local-irq-disable-alloc-atomic-headache.patch |  10 +-
 ...cpuhotplug-drain-input_pkt_queue-lockless.patch |   4 +-
 ...otect-users-of-napi_alloc_cache-against-r.patch |  16 +-
 ...ays-take-qdisc-s-busylock-in-__dev_xmit_s.patch |   6 +-
 ...-iptable-xt-write-recseq-begin-rt-fallout.patch |  10 +-
 .../rt/net-make-devnet_rename_seq-a-mutex.patch    |  14 +-
 ...xmit_recursion-to-per-task-variable-on-RT.patch | 119 +++++----
 .../all/rt/net-prevent-abba-deadlock.patch         |   4 +-
 ...-a-way-to-delegate-processing-a-softirq-t.patch |   6 +-
 ...ev_deactivate_many-use-msleep-1-instead-o.patch |   4 +-
 .../features/all/rt/net-use-cpu-chill.patch        |  10 +-
 .../features/all/rt/net-wireless-warn-nort.patch   |   6 +-
 ...onize-rcu_expedited_conditional-on-non-rt.patch |   4 +-
 .../features/all/rt/oleg-signal-rt-fix.patch       |   6 +-
 .../all/rt/panic-disable-random-on-rt.patch        |   4 +-
 ...troduce-rcu-bh-qs-where-safe-from-softirq.patch |  14 +-
 .../rt/pci-access-use-__wake_up_all_locked.patch   |   2 +-
 .../features/all/rt/percpu_ida-use-locklocks.patch |   2 +-
 .../all/rt/perf-make-swevent-hrtimer-irqsafe.patch |   4 +-
 .../features/all/rt/peter_zijlstra-frob-rcu.patch  |   4 +-
 .../features/all/rt/peterz-srcu-crypto-chain.patch |   2 +-
 .../features/all/rt/pid.h-include-atomic.h.patch   |   2 +-
 debian/patches/features/all/rt/ping-sysrq.patch    |   6 +-
 .../all/rt/posix-timers-no-broadcast.patch         |   2 +-
 ...osix-timers-thread-posix-cpu-timers-on-rt.patch |  12 +-
 .../all/rt/power-disable-highmem-on-rt.patch       |   4 +-
 .../all/rt/power-use-generic-rwsem-on-rt.patch     |   2 +-
 ...-Disable-in-kernel-MPIC-emulation-for-PRE.patch |   2 +-
 .../all/rt/powerpc-preempt-lazy-support.patch      |  34 +--
 ...-device-init.c-adapt-to-completions-using.patch |   2 +-
 .../features/all/rt/preempt-lazy-support.patch     | 170 +++++++------
 .../features/all/rt/preempt-nort-rt-variants.patch |   2 +-
 ...intk-27-boot-param-to-help-with-debugging.patch |   4 +-
 debian/patches/features/all/rt/printk-kill.patch   |  14 +-
 .../patches/features/all/rt/printk-rt-aware.patch  |  12 +-
 .../ptrace-fix-ptrace-vs-tasklist_lock-race.patch  |  14 +-
 .../features/all/rt/radix-tree-rt-aware.patch      |  36 +--
 .../all/rt/random-make-it-work-on-rt.patch         |  49 ++--
 .../rbtree-include-rcu.h-because-we-use-it.patch   |  25 ++
 ...Eliminate-softirq-processing-from-rcutree.patch |  26 +-
 .../all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch  |   4 +-
 .../all/rt/rcu-make-RCU_BOOST-default-on-RT.patch  |   6 +-
 .../rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch | 144 +++++++++--
 ..._bh_qs-disable-irq-while-calling-rcu_pree.patch |   4 +-
 ...-migrate_disable-race-with-cpu-hotplug-3f.patch |   2 +-
 ...t_full-arm-coredump-fails-for-cpu-3e-3d-4.patch |   4 +-
 .../features/all/rt/relay-fix-timer-madness.patch  |   4 +-
 ...ping-function-called-from-invalid-context.patch |   2 +-
 .../patches/features/all/rt/rt-add-rt-locks.patch  | 188 +++++++++-----
 .../features/all/rt/rt-introduce-cpu-chill.patch   |   4 +-
 .../features/all/rt/rt-local-irq-lock.patch        |   2 +-
 ...cking-Reenable-migration-accross-schedule.patch |  10 +-
 .../features/all/rt/rt-preempt-base-config.patch   |   2 +-
 .../features/all/rt/rt-serial-warn-fix.patch       |   2 +-
 ...x--Handle-non-enqueued-waiters-gracefully.patch |   2 +-
 .../rt/rtmutex-add-a-first-shot-of-ww_mutex.patch  |  30 +--
 .../all/rt/rtmutex-avoid-include-hell.patch        |   2 +-
 .../features/all/rt/rtmutex-futex-prepare-rt.patch |  12 +-
 .../features/all/rt/rtmutex-lock-killable.patch    |   2 +-
 .../all/rt/rtmutex-trylock-is-okay-on-RT.patch     |   6 +-
 .../features/all/rt/rtmutex_dont_include_rcu.patch | 172 +++++++++----
 ...i-dont-t-disable-interrupts-in-qc_issue-h.patch |   4 +-
 .../sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch |   4 +-
 ...-deadline-dl_task_timer-has-to-be-irqsafe.patch |   4 +-
 .../features/all/rt/sched-delay-put-task.patch     |  16 +-
 .../rt/sched-disable-rt-group-sched-on-rt.patch    |   4 +-
 .../features/all/rt/sched-disable-ttwu-queue.patch |   2 +-
 .../features/all/rt/sched-limit-nr-migrate.patch   |   4 +-
 ...ched-might-sleep-do-not-account-rcu-depth.patch |   8 +-
 .../features/all/rt/sched-mmdrop-delayed.patch     |  62 +++--
 .../features/all/rt/sched-rt-mutex-wakeup.patch    |  12 +-
 ...hed-ttwu-ensure-success-return-is-correct.patch |   4 +-
 ...ueue-Only-wake-up-idle-workers-if-not-blo.patch |   4 +-
 .../features/all/rt/scsi-fcoe-rt-aware.patch       |  43 ++--
 ...ping-function-called-from-invalid-context.patch |   2 +-
 .../all/rt/seqlock-prevent-rt-starvation.patch     |  12 +-
 .../all/rt/signal-fix-up-rcu-wreckage.patch        |   2 +-
 .../rt/signal-revert-ptrace-preempt-magic.patch    |   2 +-
 ...low-rt-tasks-to-cache-one-sigqueue-struct.patch |   6 +-
 .../features/all/rt/skbufhead-raw-lock.patch       |  18 +-
 .../all/rt/slub-disable-SLUB_CPU_PARTIAL.patch     |   4 +-
 .../all/rt/slub-enable-irqs-for-no-wait.patch      |   8 +-
 ...-snd_pcm_stream_lock-irqs_disabled-splats.patch |   2 +-
 .../rt/softirq-disable-softirq-stacks-for-rt.patch |  18 +-
 .../features/all/rt/softirq-preempt-fix-3-re.patch |  16 +-
 .../features/all/rt/softirq-split-locks.patch      |  18 +-
 ...irq-split-timer-softirqs-out-of-ksoftirqd.patch |   2 +-
 .../sparc64-use-generic-rwsem-spinlocks-rt.patch   |   4 +-
 .../all/rt/spinlock-types-separate-raw.patch       |   2 +-
 ...ne-create-lg_global_trylock_relax-primiti.patch |  16 +-
 ...ne-use-lg_global_trylock_relax-to-dead-wi.patch |  10 +-
 .../features/all/rt/stop-machine-raw-lock.patch    |  16 +-
 ...ne-convert-stop_machine_run-to-PREEMPT_RT.patch |   4 +-
 ...ake-svc_xprt_do_enqueue-use-get_cpu_light.patch |   8 +-
 .../rt/suspend-prevernt-might-sleep-splats.patch   |  20 +-
 .../features/all/rt/sysfs-realtime-entry.patch     |   2 +-
 ...klets-from-going-into-infinite-spin-in-rt.patch |   8 +-
 .../thermal-Defer-thermal-wakups-to-threads.patch  |   2 +-
 .../rt/tick-broadcast--Make-hrtimer-irqsafe.patch  |   4 +-
 .../all/rt/timekeeping-split-jiffies-lock.patch    |   6 +-
 ...delay-waking-softirqs-from-the-jiffy-tick.patch |   4 +-
 .../features/all/rt/timer-fd-avoid-live-lock.patch |   4 +-
 .../all/rt/timer-make-the-base-lock-raw.patch      | 181 ++++++++++++++
 .../rt/timers-prepare-for-full-preemption.patch    |  91 ++++---
 ...cy-hist-Consider-new-argument-when-probin.patch |   2 +-
 ...e_version_for_preemptoff_hist_trace_point.patch |   2 +-
 ...count-for-preempt-off-in-preempt_schedule.patch |   6 +-
 ...l-8250-don-t-take-the-trylock-during-oops.patch |   5 +-
 ...t-remove-preemption-disabling-in-netif_rx.patch |   6 +-
 .../all/rt/usb-use-_nort-in-giveback.patch         |   4 +-
 .../features/all/rt/user-use-local-irq-nort.patch  |   2 +-
 .../features/all/rt/wait.h-include-atomic.h.patch  |   2 +-
 ...ue-work-around-irqsafe-timer-optimization.patch |   4 +-
 ...rk-simple-Simple-work-queue-implemenation.patch |   2 +-
 .../all/rt/workqueue-distangle-from-rq-lock.patch  |  30 +--
 .../all/rt/workqueue-prevent-deadlock-stall.patch  |  18 +-
 .../features/all/rt/workqueue-use-locallock.patch  |  22 +-
 .../features/all/rt/workqueue-use-rcu.patch        |  46 ++--
 .../all/rt/x86-UV-raw_spinlock-conversion.patch    |  50 +---
 ...86-crypto-reduce-preempt-disabled-regions.patch |  12 +-
 .../x86-highmem-add-a-already-used-pte-check.patch |   2 +-
 .../all/rt/x86-io-apic-migra-no-unmask.patch       |   4 +-
 .../all/rt/x86-kvm-require-const-tsc-for-rt.patch  |   4 +-
 .../features/all/rt/x86-mce-timer-hrtimer.patch    |  30 +--
 .../x86-mce-use-swait-queue-for-mce-wakeups.patch  |   8 +-
 .../patches/features/all/rt/x86-preempt-lazy.patch | 100 ++++++--
 ...x86-signal-delay-calling-signals-on-32bit.patch |   2 +-
 .../all/rt/x86-stackprot-no-random-on-rt.patch     |   2 +-
 .../all/rt/x86-use-gen-rwsem-spinlocks-rt.patch    |   4 +-
 debian/patches/series-rt                           |  59 ++---
 291 files changed, 4231 insertions(+), 1802 deletions(-)

diff --git a/debian/changelog b/debian/changelog
index 7d56167..3c0f522 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -25,6 +25,7 @@ linux (4.8.1-1~exp1) UNRELEASED; urgency=medium
     reading the kernel log by default (sysctl: kernel.dmesg_restrict)
   * bug script: Optionally use sudo to read a restricted kernel log, and fall
     back to writing a placeholder
+  * [rt] Update to 4.8-rt1 and re-enable
 
  -- Ben Hutchings <ben at decadent.org.uk>  Sat, 01 Oct 2016 21:51:33 +0100
 
diff --git a/debian/config/defines b/debian/config/defines
index 774c0ea..da2967d 100644
--- a/debian/config/defines
+++ b/debian/config/defines
@@ -47,7 +47,7 @@ debug-info: true
 signed-modules: true
 
 [featureset-rt_base]
-enabled: false
+enabled: true
 
 [description]
 part-long-up: This kernel is not suitable for SMP (multi-processor,
diff --git a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
index 7335c9f..822724b 100644
--- a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
+++ b/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
@@ -1,7 +1,7 @@
 From: "Yadi.hu" <yadi.hu at windriver.com>
 Date: Wed, 10 Dec 2014 10:32:09 +0800
 Subject: ARM: enable irq in translation/section permission fault handlers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Probably happens on all ARM, with
 CONFIG_PREEMPT_RT_FULL
diff --git a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
index 3b662f1..857280e 100644
--- a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 21 Mar 2013 19:01:05 +0100
 Subject: printk: Drop the logbuf_lock more often
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The lock is hold with irgs off. The latency drops 500us+ on my arm bugs
 with a "full" buffer after executing "dmesg" on the shell.
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -1268,6 +1268,7 @@ static int syslog_print_all(char __user
+@@ -1399,6 +1399,7 @@ static int syslog_print_all(char __user
  {
  	char *text;
  	int len = 0;
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
  	if (!text)
-@@ -1279,6 +1280,14 @@ static int syslog_print_all(char __user
+@@ -1410,6 +1411,14 @@ static int syslog_print_all(char __user
  		u64 seq;
  		u32 idx;
  		enum log_flags prev;
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		/*
  		 * Find first record that fits, including all following records,
-@@ -1294,6 +1303,14 @@ static int syslog_print_all(char __user
+@@ -1425,6 +1434,14 @@ static int syslog_print_all(char __user
  			prev = msg->flags;
  			idx = log_next(idx);
  			seq++;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		}
  
  		/* move first record forward until length fits into the buffer */
-@@ -1307,6 +1324,14 @@ static int syslog_print_all(char __user
+@@ -1438,6 +1455,14 @@ static int syslog_print_all(char __user
  			prev = msg->flags;
  			idx = log_next(idx);
  			seq++;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		}
  
  		/* last message fitting into this dump */
-@@ -1347,6 +1372,7 @@ static int syslog_print_all(char __user
+@@ -1478,6 +1503,7 @@ static int syslog_print_all(char __user
  		clear_seq = log_next_seq;
  		clear_idx = log_next_idx;
  	}
diff --git a/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 3a724e6..2b5da54 100644
--- a/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 11 Feb 2016 11:54:01 -0600
 Subject: KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating
 the vgic and timer states to prevent the calling task from migrating to
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/arm/kvm/arm.c
 +++ b/arch/arm/kvm/arm.c
-@@ -581,7 +581,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -584,7 +584,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
  		 * involves poking the GIC, which must be done in a
  		 * non-preemptible context.
  		 */
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		kvm_pmu_flush_hwstate(vcpu);
  		kvm_timer_flush_hwstate(vcpu);
  		kvm_vgic_flush_hwstate(vcpu);
-@@ -602,7 +602,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -605,7 +605,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
  			kvm_pmu_sync_hwstate(vcpu);
  			kvm_timer_sync_hwstate(vcpu);
  			kvm_vgic_sync_hwstate(vcpu);
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			continue;
  		}
  
-@@ -658,7 +658,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -661,7 +661,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
  
  		kvm_vgic_sync_hwstate(vcpu);
  
diff --git a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
index 15c075b..c467b1e 100644
--- a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ b/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Marcelo Tosatti <mtosatti at redhat.com>
 Date: Wed, 8 Apr 2015 20:33:25 -0300
 Subject: KVM: lapic: mark LAPIC timer handler as irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Since lapic timer handler only wakes up a simple waitqueue,
 it can be executed from hardirq context.
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/x86/kvm/lapic.c
 +++ b/arch/x86/kvm/lapic.c
-@@ -1870,6 +1870,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -1938,6 +1938,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
  	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
  		     HRTIMER_MODE_ABS_PINNED);
  	apic->lapic_timer.timer.function = apic_timer_fn;
diff --git a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index 5bd1dfe..ae5606d 100644
--- a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Wed, 13 Feb 2013 09:26:05 -0500
 Subject: acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 We hit the following bug with 3.6-rt:
 
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /* Mutex for _OSI support */
 --- a/drivers/acpi/acpica/hwregs.c
 +++ b/drivers/acpi/acpica/hwregs.c
-@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
+@@ -363,14 +363,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
  			  ACPI_BITMASK_ALL_FIXED_STATUS,
  			  ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
  
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		goto exit;
 --- a/drivers/acpi/acpica/hwxface.c
 +++ b/drivers/acpi/acpica/hwxface.c
-@@ -374,7 +374,7 @@ acpi_status acpi_write_bit_register(u32
+@@ -373,7 +373,7 @@ acpi_status acpi_write_bit_register(u32
  		return_ACPI_STATUS(AE_BAD_PARAMETER);
  	}
  
@@ -112,7 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * At this point, we know that the parent register is one of the
-@@ -435,7 +435,7 @@ acpi_status acpi_write_bit_register(u32
+@@ -434,7 +434,7 @@ acpi_status acpi_write_bit_register(u32
  
  unlock_and_exit:
  
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	/* Delete the reader/writer lock */
 --- a/include/acpi/platform/aclinux.h
 +++ b/include/acpi/platform/aclinux.h
-@@ -127,6 +127,7 @@
+@@ -131,6 +131,7 @@
  
  #define acpi_cache_t                        struct kmem_cache
  #define acpi_spinlock                       spinlock_t *
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #define acpi_cpu_flags                      unsigned long
  
  /* Use native linux version of acpi_os_allocate_zeroed */
-@@ -145,6 +146,20 @@
+@@ -149,6 +150,20 @@
  #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
  #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
  
diff --git a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch b/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
index c118f00..458d457 100644
--- a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
+++ b/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
@@ -1,7 +1,7 @@
 From: Anders Roxell <anders.roxell at linaro.org>
 Date: Thu, 14 May 2015 17:52:17 +0200
 Subject: arch/arm64: Add lazy preempt support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 arm64 is missing support for PREEMPT_RT. The main feature which is
 lacking is support for lazy preemption. The arch-specific entry code,
@@ -13,21 +13,21 @@ indicate that support for full RT preemption is now available.
 Signed-off-by: Anders Roxell <anders.roxell at linaro.org>
 ---
  arch/arm64/Kconfig                   |    1 +
- arch/arm64/include/asm/thread_info.h |    3 +++
+ arch/arm64/include/asm/thread_info.h |    6 +++++-
  arch/arm64/kernel/asm-offsets.c      |    1 +
  arch/arm64/kernel/entry.S            |   13 ++++++++++---
- 4 files changed, 15 insertions(+), 3 deletions(-)
+ 4 files changed, 17 insertions(+), 4 deletions(-)
 
 --- a/arch/arm64/Kconfig
 +++ b/arch/arm64/Kconfig
-@@ -81,6 +81,7 @@ config ARM64
+@@ -90,6 +90,7 @@ config ARM64
+ 	select HAVE_PERF_EVENTS
  	select HAVE_PERF_REGS
  	select HAVE_PERF_USER_STACK_DUMP
- 	select HAVE_RCU_TABLE_FREE
 +	select HAVE_PREEMPT_LAZY
+ 	select HAVE_REGS_AND_STACK_ACCESS_API
+ 	select HAVE_RCU_TABLE_FREE
  	select HAVE_SYSCALL_TRACEPOINTS
- 	select IOMMU_DMA if IOMMU_SUPPORT
- 	select IRQ_DOMAIN
 --- a/arch/arm64/include/asm/thread_info.h
 +++ b/arch/arm64/include/asm/thread_info.h
 @@ -49,6 +49,7 @@ struct thread_info {
@@ -54,9 +54,19 @@ Signed-off-by: Anders Roxell <anders.roxell at linaro.org>
  #define _TIF_NOHZ		(1 << TIF_NOHZ)
  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
+@@ -132,7 +135,8 @@ static inline struct thread_info *curren
+ #define _TIF_32BIT		(1 << TIF_32BIT)
+ 
+ #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+-				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
++				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
++				 _TIF_NEED_RESCHED_LAZY)
+ 
+ #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ 				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
 --- a/arch/arm64/kernel/asm-offsets.c
 +++ b/arch/arm64/kernel/asm-offsets.c
-@@ -36,6 +36,7 @@ int main(void)
+@@ -37,6 +37,7 @@ int main(void)
    BLANK();
    DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
    DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
@@ -66,7 +76,7 @@ Signed-off-by: Anders Roxell <anders.roxell at linaro.org>
    DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
 --- a/arch/arm64/kernel/entry.S
 +++ b/arch/arm64/kernel/entry.S
-@@ -411,11 +411,16 @@ ENDPROC(el1_sync)
+@@ -434,11 +434,16 @@ ENDPROC(el1_sync)
  
  #ifdef CONFIG_PREEMPT
  	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
@@ -86,7 +96,7 @@ Signed-off-by: Anders Roxell <anders.roxell at linaro.org>
  #endif
  #ifdef CONFIG_TRACE_IRQFLAGS
  	bl	trace_hardirqs_on
-@@ -429,6 +434,7 @@ ENDPROC(el1_irq)
+@@ -452,6 +457,7 @@ ENDPROC(el1_irq)
  1:	bl	preempt_schedule_irq		// irq en/disable is done inside
  	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
  	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
@@ -94,7 +104,7 @@ Signed-off-by: Anders Roxell <anders.roxell at linaro.org>
  	ret	x24
  #endif
  
-@@ -675,6 +681,7 @@ ENDPROC(cpu_switch_to)
+@@ -708,6 +714,7 @@ ENDPROC(cpu_switch_to)
   */
  work_pending:
  	tbnz	x1, #TIF_NEED_RESCHED, work_resched
diff --git a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
index 7016663..06068da 100644
--- a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
+++ b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
@@ -1,7 +1,7 @@
 From: Benedikt Spranger <b.spranger at linutronix.de>
 Date: Sat, 6 Mar 2010 17:47:10 +0100
 Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Setup and remove the interrupt handler in clock event mode selection.
 This avoids calling the (shared) interrupt handler when the device is
@@ -14,9 +14,9 @@ commit 8fe82a55 ("ARM: at91: sparse irq support") which is included since v3.6.
 Patch based on what Sami Pietikäinen <Sami.Pietikainen at wapice.com> suggested].
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- drivers/clocksource/timer-atmel-pit.c |   17 +++++++++--------
- drivers/clocksource/timer-atmel-st.c  |   32 ++++++++++++++++++++++----------
- 2 files changed, 31 insertions(+), 18 deletions(-)
+ drivers/clocksource/timer-atmel-pit.c |   18 +++++++++---------
+ drivers/clocksource/timer-atmel-st.c  |   34 ++++++++++++++++++++++------------
+ 2 files changed, 31 insertions(+), 21 deletions(-)
 
 --- a/drivers/clocksource/timer-atmel-pit.c
 +++ b/drivers/clocksource/timer-atmel-pit.c
@@ -45,24 +45,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/* update clocksource counter */
  	data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
-@@ -181,7 +190,6 @@ static void __init at91sam926x_pit_commo
- {
- 	unsigned long	pit_rate;
- 	unsigned	bits;
--	int		ret;
- 
- 	/*
- 	 * Use our actual MCK to figure out how many MCK/16 ticks per
-@@ -206,13 +214,6 @@ static void __init at91sam926x_pit_commo
- 	data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
- 	clocksource_register_hz(&data->clksrc, pit_rate);
+@@ -211,15 +220,6 @@ static int __init at91sam926x_pit_common
+ 		return ret;
+ 	}
  
 -	/* Set up irq handler */
 -	ret = request_irq(data->irq, at91sam926x_pit_interrupt,
 -			  IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
 -			  "at91_tick", data);
--	if (ret)
--		panic(pr_fmt("Unable to setup IRQ\n"));
+-	if (ret) {
+-		pr_err("Unable to setup IRQ\n");
+-		return ret;
+-	}
 -
  	/* Set up and register clockevents */
  	data->clkevt.name = "pit";
@@ -116,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	/* PIT for periodic irqs; fixed rate of 1/HZ */
  	irqmask = AT91_ST_PITS;
  	regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
-@@ -198,7 +217,7 @@ static void __init atmel_st_timer_init(s
+@@ -198,7 +217,7 @@ static int __init atmel_st_timer_init(st
  {
  	struct clk *sclk;
  	unsigned int sclk_rate, val;
@@ -124,24 +118,28 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +	int ret;
  
  	regmap_st = syscon_node_to_regmap(node);
- 	if (IS_ERR(regmap_st))
-@@ -210,17 +229,10 @@ static void __init atmel_st_timer_init(s
+ 	if (IS_ERR(regmap_st)) {
+@@ -212,21 +231,12 @@ static int __init atmel_st_timer_init(st
  	regmap_read(regmap_st, AT91_ST_SR, &val);
  
  	/* Get the interrupts property */
 -	irq  = irq_of_parse_and_map(node, 0);
--	if (!irq)
+-	if (!irq) {
 +	atmel_st_irq  = irq_of_parse_and_map(node, 0);
-+	if (!atmel_st_irq)
- 		panic(pr_fmt("Unable to get IRQ from DT\n"));
++	if (!atmel_st_irq) {
+ 		pr_err("Unable to get IRQ from DT\n");
+ 		return -EINVAL;
+ 	}
  
 -	/* Make IRQs happen for the system timer */
 -	ret = request_irq(irq, at91rm9200_timer_interrupt,
 -			  IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
 -			  "at91_tick", regmap_st);
--	if (ret)
--		panic(pr_fmt("Unable to setup IRQ\n"));
+-	if (ret) {
+-		pr_err("Unable to setup IRQ\n");
+-		return ret;
+-	}
 -
  	sclk = of_clk_get(node, 0);
- 	if (IS_ERR(sclk))
- 		panic(pr_fmt("Unable to get slow clock\n"));
+ 	if (IS_ERR(sclk)) {
+ 		pr_err("Unable to get slow clock\n");
diff --git a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
index d79082a..eeb53e6 100644
--- a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
+++ b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 May 2010 18:29:35 +0200
 Subject: ARM: at91: tclib: Default to tclib timer for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 RT is not too happy about the shared timer interrupt in AT91
 devices. Default to tclib timer for RT.
diff --git a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
index 7f78397..f968802 100644
--- a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
+++ b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
@@ -1,7 +1,7 @@
 From: Frank Rowand <frank.rowand at am.sony.com>
 Date: Mon, 19 Sep 2011 14:51:14 -0700
 Subject: arm: Convert arm boot_lock to raw
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The arm boot_lock is used by the secondary processor startup code.  The locking
 task is the idle thread, which has idle->sched_class == &idle_sched_class.
@@ -168,16 +168,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
 --- a/arch/arm/mach-omap2/omap-smp.c
 +++ b/arch/arm/mach-omap2/omap-smp.c
-@@ -43,7 +43,7 @@
- /* SCU base address */
- static void __iomem *scu_base;
+@@ -64,7 +64,7 @@ static const struct omap_smp_config omap
+ 	.startup_addr = omap5_secondary_startup,
+ };
  
 -static DEFINE_SPINLOCK(boot_lock);
 +static DEFINE_RAW_SPINLOCK(boot_lock);
  
  void __iomem *omap4_get_scu_base(void)
  {
-@@ -74,8 +74,8 @@ static void omap4_secondary_init(unsigne
+@@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigne
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -188,7 +188,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -89,7 +89,7 @@ static int omap4_boot_secondary(unsigned
+@@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned
  	 * Set synchronisation state between this boot processor
  	 * and the secondary one
  	 */
@@ -197,7 +197,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -166,7 +166,7 @@ static int omap4_boot_secondary(unsigned
+@@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned
  	 * Now the secondary core is starting up let it run its
  	 * calibrations, then wait for it to finish
  	 */
@@ -368,7 +368,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/arch/arm/plat-versatile/platsmp.c
 +++ b/arch/arm/plat-versatile/platsmp.c
-@@ -30,7 +30,7 @@ static void write_pen_release(int val)
+@@ -32,7 +32,7 @@ static void write_pen_release(int val)
  	sync_cache_w(&pen_release);
  }
  
@@ -377,7 +377,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  void versatile_secondary_init(unsigned int cpu)
  {
-@@ -43,8 +43,8 @@ void versatile_secondary_init(unsigned i
+@@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned i
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -388,7 +388,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -55,7 +55,7 @@ int versatile_boot_secondary(unsigned in
+@@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned in
  	 * Set synchronisation state between this boot processor
  	 * and the secondary one
  	 */
@@ -397,7 +397,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * This is really belt and braces; we hold unintended secondary
-@@ -85,7 +85,7 @@ int versatile_boot_secondary(unsigned in
+@@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned in
  	 * now the secondary core is starting up let it run its
  	 * calibrations, then wait for it to finish
  	 */
diff --git a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
index 7188e41..b5db91e 100644
--- a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
+++ b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: arm: Enable highmem for rt
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 13 Feb 2013 11:03:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 fixup highmem for ARM.
 
diff --git a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch b/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
index 6ea9e19..24021a2 100644
--- a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
+++ b/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 11 Mar 2013 21:37:27 +0100
 Subject: arm/highmem: Flush tlb on unmap
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The tlb should be flushed on unmap and thus make the mapping entry
 invalid. This is only done in the non-debug case which does not look
diff --git a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
index c48735e..d270b50 100644
--- a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
@@ -1,22 +1,23 @@
 Subject: arm: Add support for lazy preemption
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 31 Oct 2012 12:04:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Implement the arm pieces for lazy preempt.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  arch/arm/Kconfig                   |    1 +
- arch/arm/include/asm/thread_info.h |    3 +++
+ arch/arm/include/asm/thread_info.h |    8 ++++++--
  arch/arm/kernel/asm-offsets.c      |    1 +
- arch/arm/kernel/entry-armv.S       |   13 +++++++++++--
+ arch/arm/kernel/entry-armv.S       |   19 ++++++++++++++++---
+ arch/arm/kernel/entry-common.S     |    9 +++++++--
  arch/arm/kernel/signal.c           |    3 ++-
- 5 files changed, 18 insertions(+), 3 deletions(-)
+ 6 files changed, 33 insertions(+), 8 deletions(-)
 
 --- a/arch/arm/Kconfig
 +++ b/arch/arm/Kconfig
-@@ -71,6 +71,7 @@ config ARM
+@@ -75,6 +75,7 @@ config ARM
  	select HAVE_PERF_EVENTS
  	select HAVE_PERF_REGS
  	select HAVE_PERF_USER_STACK_DUMP
@@ -34,11 +35,13 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	mm_segment_t		addr_limit;	/* address limit */
  	struct task_struct	*task;		/* main task structure */
  	__u32			cpu;		/* cpu */
-@@ -143,6 +144,7 @@ extern int vfp_restore_user_hwstate(stru
+@@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(stru
+ #define TIF_SYSCALL_TRACE	4	/* syscall trace active */
  #define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
  #define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
- #define TIF_SECCOMP		7	/* seccomp syscall filtering active */
-+#define TIF_NEED_RESCHED_LAZY	8
+-#define TIF_SECCOMP		7	/* seccomp syscall filtering active */
++#define TIF_SECCOMP		8	/* seccomp syscall filtering active */
++#define TIF_NEED_RESCHED_LAZY	7
  
  #define TIF_NOHZ		12	/* in adaptive nohz mode */
  #define TIF_USING_IWMMXT	17
@@ -50,6 +53,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define _TIF_UPROBE		(1 << TIF_UPROBE)
  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
+@@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(stru
+  * Change these and you break ASM code in entry-common.S
+  */
+ #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+-				 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
++				 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
++				 _TIF_NEED_RESCHED_LAZY)
+ 
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_ARM_THREAD_INFO_H */
 --- a/arch/arm/kernel/asm-offsets.c
 +++ b/arch/arm/kernel/asm-offsets.c
 @@ -65,6 +65,7 @@ int main(void)
@@ -62,9 +75,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
    DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
 --- a/arch/arm/kernel/entry-armv.S
 +++ b/arch/arm/kernel/entry-armv.S
-@@ -215,11 +215,18 @@ ENDPROC(__dabt_svc)
+@@ -220,11 +220,18 @@ ENDPROC(__dabt_svc)
+ 
  #ifdef CONFIG_PREEMPT
- 	get_thread_info tsk
  	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
 -	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
  	teq	r8, #0				@ if preempt count != 0
@@ -83,15 +96,48 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  
  	svc_exit r5, irq = 1			@ return from exception
-@@ -234,6 +241,8 @@ ENDPROC(__irq_svc)
+@@ -239,8 +246,14 @@ ENDPROC(__irq_svc)
  1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
  	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
  	tst	r0, #_TIF_NEED_RESCHED
 +	bne	1b
 +	tst	r0, #_TIF_NEED_RESCHED_LAZY
  	reteq	r8				@ go again
- 	b	1b
+-	b	1b
++	ldr	r0, [tsk, #TI_PREEMPT_LAZY]	@ get preempt lazy count
++	teq	r0, #0				@ if preempt lazy count != 0
++	beq	1b
++	ret	r8				@ go again
++
  #endif
+ 
+ __und_fault:
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -36,7 +36,9 @@
+  UNWIND(.cantunwind	)
+ 	disable_irq_notrace			@ disable interrupts
+ 	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
+-	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++	tst	r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++	bne	fast_work_pending
++	tst	r1, #_TIF_SECCOMP
+ 	bne	fast_work_pending
+ 
+ 	/* perform architecture specific actions before user return */
+@@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
+ 	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
+ 	disable_irq_notrace			@ disable interrupts
+ 	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
+-	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++	tst	r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++	bne 	do_slower_path
++	tst	r1, #_TIF_SECCOMP
+ 	beq	no_work_pending
++do_slower_path:
+  UNWIND(.fnend		)
+ ENDPROC(ret_fast_syscall)
+ 
 --- a/arch/arm/kernel/signal.c
 +++ b/arch/arm/kernel/signal.c
 @@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, un
diff --git a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
index af969eb..3570942 100644
--- a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
+++ b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 20 Sep 2013 14:31:54 +0200
 Subject: arm/unwind: use a raw_spin_lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Mostly unwind is done with irqs enabled however SLUB may call it with
 irqs disabled while creating a new SLUB cache.
diff --git a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch b/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
index 574dafc..4bb3d52 100644
--- a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -1,7 +1,7 @@
 Subject: arm64/xen: Make XEN depend on !RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 12 Oct 2015 11:18:40 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 It's not ready and probably never will be, unless xen folks have a
 look at it.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/arm64/Kconfig
 +++ b/arch/arm64/Kconfig
-@@ -624,7 +624,7 @@ config XEN_DOM0
+@@ -689,7 +689,7 @@ config XEN_DOM0
  
  config XEN
  	bool "Xen guest support on ARM64"
diff --git a/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch b/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
index acba749..411a6a8 100644
--- a/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
+++ b/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 09 Mar 2016 10:51:06 +0100
 Subject: arm: at91: do not disable/enable clocks in a row
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Currently the driver will disable the clock and enable it one line later
 if it is switching from periodic mode into one shot.
diff --git a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
index 4b4d10d..70d2d8a 100644
--- a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
+++ b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <srostedt at redhat.com>
 Date: Fri, 3 Jul 2009 08:44:29 -0500
 Subject: ata: Do not disable interrupts in ide code for preempt-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use the local_irq_*_nort variants.
 
diff --git a/debian/patches/features/all/rt/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch b/debian/patches/features/all/rt/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
index c825fe1..b9bf7c3 100644
--- a/debian/patches/features/all/rt/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
+++ b/debian/patches/features/all/rt/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Sat, 3 May 2014 11:00:29 +0200
 Subject: blk-mq: revert raw locks, post pone notifier to POST_DEAD
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The blk_mq_cpu_notify_lock should be raw because some CPU down levels
 are called with interrupts off. The notifier itself calls currently one
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
-@@ -1641,7 +1641,7 @@ static int blk_mq_hctx_notify(void *data
+@@ -1687,7 +1687,7 @@ static int blk_mq_hctx_notify(void *data
  {
  	struct blk_mq_hw_ctx *hctx = data;
  
diff --git a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch b/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
index 8afb529..7381090 100644
--- a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
+++ b/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 13 Feb 2015 11:01:26 +0100
 Subject: block: blk-mq: Use swait
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
 | in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
-@@ -660,7 +660,7 @@ int blk_queue_enter(struct request_queue
+@@ -662,7 +662,7 @@ int blk_queue_enter(struct request_queue
  		if (nowait)
  			return -EBUSY;
  
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  				!atomic_read(&q->mq_freeze_depth) ||
  				blk_queue_dying(q));
  		if (blk_queue_dying(q))
-@@ -680,7 +680,7 @@ static void blk_queue_usage_counter_rele
+@@ -682,7 +682,7 @@ static void blk_queue_usage_counter_rele
  	struct request_queue *q =
  		container_of(ref, struct request_queue, q_usage_counter);
  
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void blk_rq_timed_out_timer(unsigned long data)
-@@ -749,7 +749,7 @@ struct request_queue *blk_alloc_queue_no
+@@ -751,7 +751,7 @@ struct request_queue *blk_alloc_queue_no
  	q->bypass_depth = 1;
  	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
  
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 --- a/include/linux/blkdev.h
 +++ b/include/linux/blkdev.h
-@@ -458,7 +458,7 @@ struct request_queue {
+@@ -468,7 +468,7 @@ struct request_queue {
  	struct throtl_data *td;
  #endif
  	struct rcu_head		rcu_head;
diff --git a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch b/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
index fd28740..d927352 100644
--- a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Jan 2015 15:10:08 +0100
 Subject: block/mq: don't complete requests via IPI
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The IPI runs in hardirq context and there are sleeping locks. This patch
 moves the completion into a workqueue.
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	rq->__sector = (sector_t) -1;
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
-@@ -196,6 +196,9 @@ static void blk_mq_rq_ctx_init(struct re
+@@ -197,6 +197,9 @@ static void blk_mq_rq_ctx_init(struct re
  	rq->resid_len = 0;
  	rq->sense = NULL;
  
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	INIT_LIST_HEAD(&rq->timeout_list);
  	rq->timeout = 0;
  
-@@ -323,6 +326,17 @@ void blk_mq_end_request(struct request *
+@@ -379,6 +382,17 @@ void blk_mq_end_request(struct request *
  }
  EXPORT_SYMBOL(blk_mq_end_request);
  
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static void __blk_mq_complete_request_remote(void *data)
  {
  	struct request *rq = data;
-@@ -330,6 +344,8 @@ static void __blk_mq_complete_request_re
+@@ -386,6 +400,8 @@ static void __blk_mq_complete_request_re
  	rq->q->softirq_done_fn(rq);
  }
  
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static void blk_mq_ipi_complete_request(struct request *rq)
  {
  	struct blk_mq_ctx *ctx = rq->mq_ctx;
-@@ -346,10 +362,14 @@ static void blk_mq_ipi_complete_request(
+@@ -402,10 +418,14 @@ static void blk_mq_ipi_complete_request(
  		shared = cpus_share_cache(cpu, ctx->cpu);
  
  	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
 --- a/include/linux/blk-mq.h
 +++ b/include/linux/blk-mq.h
-@@ -218,6 +218,7 @@ static inline u16 blk_mq_unique_tag_to_t
+@@ -222,6 +222,7 @@ static inline u16 blk_mq_unique_tag_to_t
  
  struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
  struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
@@ -92,11 +92,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  void blk_mq_start_request(struct request *rq);
 --- a/include/linux/blkdev.h
 +++ b/include/linux/blkdev.h
-@@ -90,6 +90,7 @@ struct request {
+@@ -89,6 +89,7 @@ struct request {
  	struct list_head queuelist;
  	union {
  		struct call_single_data csd;
 +		struct work_struct work;
- 		unsigned long fifo_time;
+ 		u64 fifo_time;
  	};
  
diff --git a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch b/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
index 0bbf93a..45bf4e3 100644
--- a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
+++ b/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: block/mq: do not invoke preempt_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 preempt_disable() and get_cpu() don't play well together with the sleeping
 locks it tries to allocate later.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
-@@ -341,7 +341,7 @@ static void blk_mq_ipi_complete_request(
+@@ -397,7 +397,7 @@ static void blk_mq_ipi_complete_request(
  		return;
  	}
  
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
  		shared = cpus_share_cache(cpu, ctx->cpu);
  
-@@ -353,7 +353,7 @@ static void blk_mq_ipi_complete_request(
+@@ -409,7 +409,7 @@ static void blk_mq_ipi_complete_request(
  	} else {
  		rq->q->softirq_done_fn(rq);
  	}
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void __blk_mq_complete_request(struct request *rq)
-@@ -868,14 +868,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
+@@ -938,14 +938,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
  		return;
  
  	if (!async) {
diff --git a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
index 8af0165..97d1db0 100644
--- a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
+++ b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 9 Apr 2014 10:37:23 +0200
 Subject: block: mq: use cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 there is a might sleep splat because get_cpu() disables preemption and
 later we grab a lock. As a workaround for this we use get_cpu_light().
diff --git a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
index 1a31010..14eb578 100644
--- a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
+++ b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
@@ -1,7 +1,7 @@
 Subject: block: Shorten interrupt disabled regions
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 22 Jun 2011 19:47:02 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Moving the blk_sched_flush_plug() call out of the interrupt/preempt
 disabled region in the scheduler allows us to replace
@@ -48,7 +48,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
 
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
-@@ -3209,7 +3209,7 @@ static void queue_unplugged(struct reque
+@@ -3171,7 +3171,7 @@ static void queue_unplugged(struct reque
  		blk_run_queue_async(q);
  	else
  		__blk_run_queue(q);
@@ -57,7 +57,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
  }
  
  static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3257,7 +3257,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3219,7 +3219,6 @@ EXPORT_SYMBOL(blk_check_plugged);
  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  {
  	struct request_queue *q;
@@ -65,7 +65,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
  	struct request *rq;
  	LIST_HEAD(list);
  	unsigned int depth;
-@@ -3277,11 +3276,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3239,11 +3238,6 @@ void blk_flush_plug_list(struct blk_plug
  	q = NULL;
  	depth = 0;
  
@@ -77,7 +77,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
  	while (!list_empty(&list)) {
  		rq = list_entry_rq(list.next);
  		list_del_init(&rq->queuelist);
-@@ -3294,7 +3288,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3256,7 +3250,7 @@ void blk_flush_plug_list(struct blk_plug
  				queue_unplugged(q, depth, from_schedule);
  			q = rq->q;
  			depth = 0;
@@ -86,7 +86,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
  		}
  
  		/*
-@@ -3321,8 +3315,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3283,8 +3277,6 @@ void blk_flush_plug_list(struct blk_plug
  	 */
  	if (q)
  		queue_unplugged(q, depth, from_schedule);
diff --git a/debian/patches/features/all/rt/block-use-cpu-chill.patch b/debian/patches/features/all/rt/block-use-cpu-chill.patch
index c75ae41..2f4e1af 100644
--- a/debian/patches/features/all/rt/block-use-cpu-chill.patch
+++ b/debian/patches/features/all/rt/block-use-cpu-chill.patch
@@ -1,7 +1,7 @@
 Subject: block: Use cpu_chill() for retry loops
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 20 Dec 2012 18:28:26 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Steven also observed a live lock when there was a
diff --git a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
index 8a3d77f..678bfa1 100644
--- a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
+++ b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:58 -0500
 Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Introduce RT/NON-RT WARN/BUG statements to avoid ifdefs in the code.
 
diff --git a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index 456af41..1bf7837 100644
--- a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sat, 21 Jun 2014 10:09:48 +0200
 Subject: memcontrol: Prevent scheduling while atomic in cgroup code
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 mm, memcg: make refill_stock() use get_cpu_light()
 
@@ -33,33 +33,73 @@ What happens:
 
 Fix it by replacing get/put_cpu_var() with get/put_cpu_light().
 
-
 Reported-by: Nikita Yushchenko <nyushchenko at dev.rtsoft.ru>
 Signed-off-by: Mike Galbraith <umgwanakikbuti at gmail.com>
+[bigeasy: use memcg_stock_ll as a locallock since it is now IRQ-off region]
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- mm/memcontrol.c |    7 +++++--
- 1 file changed, 5 insertions(+), 2 deletions(-)
+ mm/memcontrol.c |   13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -1828,14 +1828,17 @@ static void drain_local_stock(struct wor
-  */
- static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
- {
--	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
-+	struct memcg_stock_pcp *stock;
-+	int cpu = get_cpu_light();
-+
-+	stock = &per_cpu(memcg_stock, cpu);
+@@ -1727,6 +1727,7 @@ struct memcg_stock_pcp {
+ #define FLUSHING_CACHED_CHARGE	0
+ };
+ static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
++static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll);
+ static DEFINE_MUTEX(percpu_charge_mutex);
+ 
+ /**
+@@ -1749,7 +1750,7 @@ static bool consume_stock(struct mem_cgr
+ 	if (nr_pages > CHARGE_BATCH)
+ 		return ret;
+ 
+-	local_irq_save(flags);
++	local_lock_irqsave(memcg_stock_ll, flags);
+ 
+ 	stock = this_cpu_ptr(&memcg_stock);
+ 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
+@@ -1757,7 +1758,7 @@ static bool consume_stock(struct mem_cgr
+ 		ret = true;
+ 	}
+ 
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(memcg_stock_ll, flags);
+ 
+ 	return ret;
+ }
+@@ -1784,13 +1785,13 @@ static void drain_local_stock(struct wor
+ 	struct memcg_stock_pcp *stock;
+ 	unsigned long flags;
  
+-	local_irq_save(flags);
++	local_lock_irqsave(memcg_stock_ll, flags);
+ 
+ 	stock = this_cpu_ptr(&memcg_stock);
+ 	drain_stock(stock);
+ 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
+ 
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(memcg_stock_ll, flags);
+ }
+ 
+ /*
+@@ -1802,7 +1803,7 @@ static void refill_stock(struct mem_cgro
+ 	struct memcg_stock_pcp *stock;
+ 	unsigned long flags;
+ 
+-	local_irq_save(flags);
++	local_lock_irqsave(memcg_stock_ll, flags);
+ 
+ 	stock = this_cpu_ptr(&memcg_stock);
  	if (stock->cached != memcg) { /* reset if necessary */
- 		drain_stock(stock);
- 		stock->cached = memcg;
+@@ -1811,7 +1812,7 @@ static void refill_stock(struct mem_cgro
  	}
  	stock->nr_pages += nr_pages;
--	put_cpu_var(memcg_stock);
-+	put_cpu_light();
+ 
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(memcg_stock_ll, flags);
  }
  
  /*
diff --git a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch b/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
index 5cc6cfd..f5354fd 100644
--- a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
+++ b/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 13 Feb 2015 15:52:24 +0100
 Subject: cgroups: use simple wait in css_release()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 To avoid:
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
 --- a/kernel/cgroup.c
 +++ b/kernel/cgroup.c
-@@ -5005,10 +5005,10 @@ static void css_free_rcu_fn(struct rcu_h
+@@ -5027,10 +5027,10 @@ static void css_free_rcu_fn(struct rcu_h
  	queue_work(cgroup_destroy_wq, &css->destroy_work);
  }
  
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct cgroup_subsys *ss = css->ss;
  	struct cgroup *cgrp = css->cgroup;
  
-@@ -5049,8 +5049,8 @@ static void css_release(struct percpu_re
+@@ -5071,8 +5071,8 @@ static void css_release(struct percpu_re
  	struct cgroup_subsys_state *css =
  		container_of(ref, struct cgroup_subsys_state, refcnt);
  
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5694,6 +5694,7 @@ static int __init cgroup_wq_init(void)
+@@ -5716,6 +5716,7 @@ static int __init cgroup_wq_init(void)
  	 */
  	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
  	BUG_ON(!cgroup_destroy_wq);
diff --git a/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch b/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
index 8377d56..2a309e9 100644
--- a/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
+++ b/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
@@ -1,7 +1,7 @@
 From: Alexandre Belloni <alexandre.belloni at free-electrons.com>
 Date: Thu, 17 Mar 2016 21:09:43 +0100
 Subject: [PATCH] clockevents/drivers/timer-atmel-pit: fix double free_irq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 clockevents_exchange_device() changes the state from detached to shutdown
 and so at that point the IRQ has not yet been requested.
diff --git a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
index 68e2da4..f23afd8 100644
--- a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
+++ b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
@@ -1,7 +1,7 @@
 From: Benedikt Spranger <b.spranger at linutronix.de>
 Date: Mon, 8 Mar 2010 18:57:04 +0100
 Subject: clocksource: TCLIB: Allow higher clock rates for clock events
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 As default the TCLIB uses the 32KiHz base clock rate for clock events.
 Add a compile time selection to allow higher clock resulution.
diff --git a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
index aa786cf..ddde3a8 100644
--- a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
+++ b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
@@ -1,7 +1,7 @@
 Subject: completion: Use simple wait queues
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 11 Jan 2013 11:23:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Completions have no long lasting callbacks and therefor do not need
 the complex waitqueue variant. Use simple waitqueues which reduces the
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		break;
 --- a/drivers/usb/gadget/function/f_fs.c
 +++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1393,7 +1393,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1509,7 +1509,7 @@ static void ffs_data_put(struct ffs_data
  		pr_info("%s(): freeing\n", __func__);
  		ffs_data_clear(ffs);
  		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
 --- a/include/linux/suspend.h
 +++ b/include/linux/suspend.h
-@@ -194,6 +194,12 @@ struct platform_freeze_ops {
+@@ -193,6 +193,12 @@ struct platform_freeze_ops {
  	void (*end)(void);
  };
  
@@ -137,8 +137,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  struct mm_struct;
 --- a/kernel/power/hibernate.c
 +++ b/kernel/power/hibernate.c
-@@ -649,6 +649,10 @@ static void power_down(void)
- 		cpu_relax();
+@@ -681,6 +681,10 @@ static int load_image_and_restore(void)
+ 	return error;
  }
  
 +#ifndef CONFIG_SUSPEND
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * hibernate - Carry out system hibernation, including saving the image.
   */
-@@ -661,6 +665,8 @@ int hibernate(void)
+@@ -694,6 +698,8 @@ int hibernate(void)
  		return -EPERM;
  	}
  
@@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	lock_system_sleep();
  	/* The snapshot device should not be opened while we're running */
  	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
-@@ -726,6 +732,7 @@ int hibernate(void)
+@@ -771,6 +777,7 @@ int hibernate(void)
  	atomic_inc(&snapshot_device_available);
   Unlock:
  	unlock_system_sleep();
@@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/power/suspend.c
 +++ b/kernel/power/suspend.c
-@@ -521,6 +521,8 @@ static int enter_state(suspend_state_t s
+@@ -523,6 +523,8 @@ static int enter_state(suspend_state_t s
  	return error;
  }
  
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * pm_suspend - Externally visible function for suspending the system.
   * @state: System sleep state to enter.
-@@ -535,6 +537,8 @@ int pm_suspend(suspend_state_t state)
+@@ -537,6 +539,8 @@ int pm_suspend(suspend_state_t state)
  	if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
  		return -EINVAL;
  
@@ -185,7 +185,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	error = enter_state(state);
  	if (error) {
  		suspend_stats.fail++;
-@@ -542,6 +546,7 @@ int pm_suspend(suspend_state_t state)
+@@ -544,6 +548,7 @@ int pm_suspend(suspend_state_t state)
  	} else {
  		suspend_stats.success++;
  	}
@@ -287,7 +287,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  EXPORT_SYMBOL(completion_done);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3141,7 +3141,10 @@ void migrate_disable(void)
+@@ -3317,7 +3317,10 @@ void migrate_disable(void)
  	}
  
  #ifdef CONFIG_SCHED_DEBUG
@@ -299,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  
  	if (p->migrate_disable) {
-@@ -3168,7 +3171,10 @@ void migrate_enable(void)
+@@ -3344,7 +3347,10 @@ void migrate_enable(void)
  	}
  
  #ifdef CONFIG_SCHED_DEBUG
diff --git a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
index c837292..abe09dd 100644
--- a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
+++ b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
@@ -1,7 +1,7 @@
 Subject: sched: Use the proper LOCK_OFFSET for cond_resched()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 22:51:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 RT does not increment preempt count when a 'sleeping' spinlock is
 locked. Update PREEMPT_LOCK_OFFSET for that case.
diff --git a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
index 3614b28..9fa301b 100644
--- a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
+++ b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
@@ -1,7 +1,7 @@
 Subject: sched: Take RT softirq semantics into account in cond_resched()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 14 Jul 2011 09:56:44 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The softirq semantics work different on -RT. There is no SOFTIRQ_MASK in
 the preemption counter which leads to the BUG_ON() statement in
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -3029,12 +3029,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -3258,12 +3258,16 @@ extern int __cond_resched_lock(spinlock_
  	__cond_resched_lock(lock);				\
  })
  
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -4812,6 +4812,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -5021,6 +5021,7 @@ int __cond_resched_lock(spinlock_t *lock
  }
  EXPORT_SYMBOL(__cond_resched_lock);
  
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  int __sched __cond_resched_softirq(void)
  {
  	BUG_ON(!in_softirq());
-@@ -4825,6 +4826,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5034,6 +5035,7 @@ int __sched __cond_resched_softirq(void)
  	return 0;
  }
  EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
index 29df05a..b7ed14b 100644
--- a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
+++ b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Thu, 5 Dec 2013 09:16:52 -0500
 Subject: cpu hotplug: Document why PREEMPT_RT uses a spinlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The patch:
 
diff --git a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index 6b1ac95..58abaf4 100644
--- a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Fri, 02 Mar 2012 10:36:57 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Tasks can block on hotplug.lock in pin_current_cpu(), but their state
 might be != RUNNING. So the mutex wakeup will set the state
@@ -20,8 +20,8 @@ Cc: Clark Williams <clark.williams at gmail.com>
 Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- kernel/cpu.c |   34 +++++++++++++++++++++++++++-------
- 1 file changed, 27 insertions(+), 7 deletions(-)
+ kernel/cpu.c |   32 +++++++++++++++++++++++++-------
+ 1 file changed, 25 insertions(+), 7 deletions(-)
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Also blocks the new readers during
  	 * an ongoing cpu hotplug operation.
-@@ -153,12 +159,26 @@ static struct {
+@@ -153,12 +159,24 @@ static struct {
  } cpu_hotplug = {
  	.active_writer = NULL,
  	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -57,19 +57,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  };
  
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+# define hotplug_lock()		rt_spin_lock(&cpu_hotplug.lock)
-+# define hotplug_trylock()	rt_spin_trylock(&cpu_hotplug.lock)
-+# define hotplug_unlock()	rt_spin_unlock(&cpu_hotplug.lock)
++# define hotplug_lock()		rt_spin_lock__no_mg(&cpu_hotplug.lock)
++# define hotplug_unlock()	rt_spin_unlock__no_mg(&cpu_hotplug.lock)
 +#else
 +# define hotplug_lock()		mutex_lock(&cpu_hotplug.lock)
-+# define hotplug_trylock()	mutex_trylock(&cpu_hotplug.lock)
 +# define hotplug_unlock()	mutex_unlock(&cpu_hotplug.lock)
 +#endif
 +
  /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  #define cpuhp_lock_acquire_tryread() \
-@@ -195,8 +215,8 @@ void pin_current_cpu(void)
+@@ -195,8 +213,8 @@ void pin_current_cpu(void)
  		return;
  	}
  	preempt_enable();
@@ -80,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	preempt_disable();
  	goto retry;
  }
-@@ -269,9 +289,9 @@ void get_online_cpus(void)
+@@ -269,9 +287,9 @@ void get_online_cpus(void)
  	if (cpu_hotplug.active_writer == current)
  		return;
  	cpuhp_lock_acquire_read();
@@ -92,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(get_online_cpus);
  
-@@ -324,11 +344,11 @@ void cpu_hotplug_begin(void)
+@@ -324,11 +342,11 @@ void cpu_hotplug_begin(void)
  	cpuhp_lock_acquire();
  
  	for (;;) {
@@ -106,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		schedule();
  	}
  	finish_wait(&cpu_hotplug.wq, &wait);
-@@ -337,7 +357,7 @@ void cpu_hotplug_begin(void)
+@@ -337,7 +355,7 @@ void cpu_hotplug_begin(void)
  void cpu_hotplug_done(void)
  {
  	cpu_hotplug.active_writer = NULL;
diff --git a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
index 0d622d2..d6ce11f 100644
--- a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
+++ b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <srostedt at redhat.com>
 Date: Mon, 16 Jul 2012 08:07:43 +0000
 Subject: cpu/rt: Rework cpu down for PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Bringing a CPU down is a pain with the PREEMPT_RT kernel because
 tasks can be preempted in many more places than in non-RT. In
@@ -51,13 +51,13 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 ---
  include/linux/sched.h |    7 +
- kernel/cpu.c          |  240 ++++++++++++++++++++++++++++++++++++++++----------
+ kernel/cpu.c          |  238 +++++++++++++++++++++++++++++++++++++++++---------
  kernel/sched/core.c   |   78 ++++++++++++++++
- 3 files changed, 281 insertions(+), 44 deletions(-)
+ 3 files changed, 281 insertions(+), 42 deletions(-)
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -2325,6 +2325,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -2429,6 +2429,10 @@ extern void do_set_cpus_allowed(struct t
  
  extern int set_cpus_allowed_ptr(struct task_struct *p,
  				const struct cpumask *new_mask);
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #else
  static inline void do_set_cpus_allowed(struct task_struct *p,
  				      const struct cpumask *new_mask)
-@@ -2337,6 +2341,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -2441,6 +2445,9 @@ static inline int set_cpus_allowed_ptr(s
  		return -EINVAL;
  	return 0;
  }
@@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Also blocks the new readers during
  	 * an ongoing cpu hotplug operation.
-@@ -158,27 +152,13 @@ static struct {
+@@ -158,25 +152,13 @@ static struct {
  #endif
  } cpu_hotplug = {
  	.active_writer = NULL,
@@ -114,19 +114,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  };
  
 -#ifdef CONFIG_PREEMPT_RT_FULL
--# define hotplug_lock()		rt_spin_lock(&cpu_hotplug.lock)
--# define hotplug_trylock()	rt_spin_trylock(&cpu_hotplug.lock)
--# define hotplug_unlock()	rt_spin_unlock(&cpu_hotplug.lock)
+-# define hotplug_lock()		rt_spin_lock__no_mg(&cpu_hotplug.lock)
+-# define hotplug_unlock()	rt_spin_unlock__no_mg(&cpu_hotplug.lock)
 -#else
 -# define hotplug_lock()		mutex_lock(&cpu_hotplug.lock)
--# define hotplug_trylock()	mutex_trylock(&cpu_hotplug.lock)
 -# define hotplug_unlock()	mutex_unlock(&cpu_hotplug.lock)
 -#endif
 -
  /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  #define cpuhp_lock_acquire_tryread() \
-@@ -186,12 +166,42 @@ static struct {
+@@ -184,12 +166,42 @@ static struct {
  #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
  #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
  
@@ -159,8 +157,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  };
  
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
-+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
++# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
++# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
 +#else
 +# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
 +# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
@@ -169,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
  
  /**
-@@ -205,18 +215,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -203,18 +215,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
  void pin_current_cpu(void)
  {
  	struct hotplug_pcp *hp;
@@ -213,7 +211,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	preempt_disable();
  	goto retry;
  }
-@@ -237,26 +268,84 @@ void unpin_current_cpu(void)
+@@ -235,26 +268,84 @@ void unpin_current_cpu(void)
  		wake_up_process(hp->unplug);
  }
  
@@ -305,7 +303,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Start the sync_unplug_thread on the target cpu and wait for it to
   * complete.
-@@ -264,23 +353,83 @@ static int sync_unplug_thread(void *data
+@@ -262,23 +353,83 @@ static int sync_unplug_thread(void *data
  static int cpu_unplug_begin(unsigned int cpu)
  {
  	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
@@ -396,7 +394,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  void get_online_cpus(void)
-@@ -289,9 +438,9 @@ void get_online_cpus(void)
+@@ -287,9 +438,9 @@ void get_online_cpus(void)
  	if (cpu_hotplug.active_writer == current)
  		return;
  	cpuhp_lock_acquire_read();
@@ -408,7 +406,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(get_online_cpus);
  
-@@ -344,11 +493,11 @@ void cpu_hotplug_begin(void)
+@@ -342,11 +493,11 @@ void cpu_hotplug_begin(void)
  	cpuhp_lock_acquire();
  
  	for (;;) {
@@ -422,7 +420,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		schedule();
  	}
  	finish_wait(&cpu_hotplug.wq, &wait);
-@@ -357,7 +506,7 @@ void cpu_hotplug_begin(void)
+@@ -355,7 +506,7 @@ void cpu_hotplug_begin(void)
  void cpu_hotplug_done(void)
  {
  	cpu_hotplug.active_writer = NULL;
@@ -431,7 +429,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	cpuhp_lock_release();
  }
  
-@@ -838,6 +987,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -828,6 +979,9 @@ static int takedown_cpu(unsigned int cpu
  	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
  	smpboot_park_threads(cpu);
  
@@ -443,7 +441,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	 * interrupt affinities.
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1091,6 +1091,84 @@ void do_set_cpus_allowed(struct task_str
+@@ -1129,6 +1129,84 @@ void do_set_cpus_allowed(struct task_str
  		enqueue_task(rq, p, ENQUEUE_RESTORE);
  }
  
@@ -485,8 +483,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	struct migration_arg arg;
 +	struct cpumask *cpumask;
 +	struct cpumask *mask;
-+	unsigned long flags;
 +	unsigned int dest_cpu;
++	struct rq_flags rf;
 +	struct rq *rq;
 +
 +	/*
@@ -497,7 +495,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		return 0;
 +
 +	mutex_lock(&sched_down_mutex);
-+	rq = task_rq_lock(p, &flags);
++	rq = task_rq_lock(p, &rf);
 +
 +	cpumask = this_cpu_ptr(&sched_cpumasks);
 +	mask = &p->cpus_allowed;
@@ -506,7 +504,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +
 +	if (!cpumask_weight(cpumask)) {
 +		/* It's only on this CPU? */
-+		task_rq_unlock(rq, p, &flags);
++		task_rq_unlock(rq, p, &rf);
 +		mutex_unlock(&sched_down_mutex);
 +		return 0;
 +	}
@@ -516,7 +514,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	arg.task = p;
 +	arg.dest_cpu = dest_cpu;
 +
-+	task_rq_unlock(rq, p, &flags);
++	task_rq_unlock(rq, p, &rf);
 +
 +	stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
 +	tlb_migrate_finish(p->mm);
diff --git a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index 6d392bb..d84bf67 100644
--- a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Tue, 4 Mar 2014 12:28:32 -0500
 Subject: cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 We hit another bug that was caused by switching cpu_chill() from
 msleep() to hrtimer_nanosleep().
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -1669,12 +1669,13 @@ void hrtimer_init_sleeper(struct hrtimer
+@@ -1649,12 +1649,13 @@ void hrtimer_init_sleeper(struct hrtimer
  }
  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
  
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		hrtimer_start_expires(&t->timer, mode);
  
  		if (likely(t->task))
-@@ -1716,7 +1717,8 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1696,7 +1697,8 @@ long __sched hrtimer_nanosleep_restart(s
  				HRTIMER_MODE_ABS);
  	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
  
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		goto out;
  
  	rmtp = restart->nanosleep.rmtp;
-@@ -1733,8 +1735,10 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1713,8 +1715,10 @@ long __sched hrtimer_nanosleep_restart(s
  	return ret;
  }
  
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	struct restart_block *restart;
  	struct hrtimer_sleeper t;
-@@ -1747,7 +1751,7 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1727,7 +1731,7 @@ long hrtimer_nanosleep(struct timespec *
  
  	hrtimer_init_on_stack(&t.timer, clockid, mode);
  	hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		goto out;
  
  	/* Absolute timers do not update the rmtp value and restart: */
-@@ -1774,6 +1778,12 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1754,6 +1758,12 @@ long hrtimer_nanosleep(struct timespec *
  	return ret;
  }
  
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
  		struct timespec __user *, rmtp)
  {
-@@ -1800,7 +1810,8 @@ void cpu_chill(void)
+@@ -1780,7 +1790,8 @@ void cpu_chill(void)
  	unsigned int freeze_flag = current->flags & PF_NOFREEZE;
  
  	current->flags |= PF_NOFREEZE;
diff --git a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
index f649457..74398f9 100644
--- a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
+++ b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
@@ -1,7 +1,7 @@
 From:	Tiejun Chen <tiejun.chen at windriver.com>
 Subject: cpu_down: move migrate_enable() back
 Date:	Thu, 7 Nov 2013 10:06:07 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to
 use migrate_enable()/migrate_disable() to replace that combination
@@ -35,7 +35,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen at windriver.com>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -1125,6 +1125,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1117,6 +1117,7 @@ static int __ref _cpu_down(unsigned int
  		goto restore_cpus;
  	}
  
@@ -43,7 +43,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen at windriver.com>
  	cpu_hotplug_begin();
  	ret = cpu_unplug_begin(cpu);
  	if (ret) {
-@@ -1172,7 +1173,6 @@ static int __ref _cpu_down(unsigned int
+@@ -1164,7 +1165,6 @@ static int __ref _cpu_down(unsigned int
  	cpu_unplug_done(cpu);
  out_cancel:
  	cpu_hotplug_done();
diff --git a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
index 8dde00e..237bf72 100644
--- a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
+++ b/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 9 Apr 2015 15:23:01 +0200
 Subject: cpufreq: drop K8's driver from beeing selected
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Ralf posted a picture of a backtrace from
 
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/cpufreq/Kconfig.x86
 +++ b/drivers/cpufreq/Kconfig.x86
-@@ -123,7 +123,7 @@ config X86_POWERNOW_K7_ACPI
+@@ -124,7 +124,7 @@ config X86_POWERNOW_K7_ACPI
  
  config X86_POWERNOW_K8
  	tristate "AMD Opteron/Athlon64 PowerNow!"
diff --git a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
index 6f3158b..2233f40 100644
--- a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
+++ b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 14 Dec 2011 01:03:49 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 We can't deal with the cpumask allocations which happen in atomic
 context (see arch/x86/kernel/apic/io_apic.c) on RT right now.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
-@@ -892,7 +892,7 @@ config IOMMU_HELPER
+@@ -888,7 +888,7 @@ config IOMMU_HELPER
  config MAXSMP
  	bool "Enable Maximum number of SMP Processors and NUMA Nodes"
  	depends on X86_64 && SMP && DEBUG_KERNEL
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	  If unsure, say N.
 --- a/lib/Kconfig
 +++ b/lib/Kconfig
-@@ -397,6 +397,7 @@ config CHECK_SIGNATURE
+@@ -400,6 +400,7 @@ config CHECK_SIGNATURE
  
  config CPUMASK_OFFSTACK
  	bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
diff --git a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
index 84a0f8a..fb20500 100644
--- a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
+++ b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 21 Feb 2014 17:24:04 +0100
 Subject: crypto: Reduce preempt disabled regions, more algos
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Don Estabrook reported
 | kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
diff --git a/debian/patches/features/all/rt/debugobjects-rt.patch b/debian/patches/features/all/rt/debugobjects-rt.patch
index 780ed84..cd02d92 100644
--- a/debian/patches/features/all/rt/debugobjects-rt.patch
+++ b/debian/patches/features/all/rt/debugobjects-rt.patch
@@ -1,7 +1,7 @@
 Subject: debugobjects: Make RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:41:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Avoid filling the pool / allocating memory with irqs off().
 
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/lib/debugobjects.c
 +++ b/lib/debugobjects.c
-@@ -309,7 +309,10 @@ static void
+@@ -308,7 +308,10 @@ static void
  	struct debug_obj *obj;
  	unsigned long flags;
  
diff --git a/debian/patches/features/all/rt/dm-make-rt-aware.patch b/debian/patches/features/all/rt/dm-make-rt-aware.patch
index c9a963f..c329172 100644
--- a/debian/patches/features/all/rt/dm-make-rt-aware.patch
+++ b/debian/patches/features/all/rt/dm-make-rt-aware.patch
@@ -1,7 +1,7 @@
 Subject: dm: Make rt aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 14 Nov 2011 23:06:09 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
 interrupts legitimately enabled here as we cant deadlock against the
@@ -11,12 +11,12 @@ Reported-by: Luis Claudio R. Goncalves <lclaudio at uudg.org>
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- drivers/md/dm.c |    2 +-
+ drivers/md/dm-rq.c |    2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)
 
---- a/drivers/md/dm.c
-+++ b/drivers/md/dm.c
-@@ -2187,7 +2187,7 @@ static void dm_request_fn(struct request
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -802,7 +802,7 @@ static void dm_old_request_fn(struct req
  		/* Establish tio->ti before queuing work (map_tio_request) */
  		tio->ti = ti;
  		queue_kthread_work(&md->kworker, &tio->work);
diff --git a/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index aab89d4..9f426ba 100644
--- a/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Thu, 31 Mar 2016 04:08:28 +0200
 Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex
  for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 They're nondeterministic, and lead to ___might_sleep() splats in -rt.
 OTOH, they're a lot less wasteful than an rtmutex per page.
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/block/zram/zram_drv.c
 +++ b/drivers/block/zram/zram_drv.c
-@@ -520,6 +520,8 @@ static struct zram_meta *zram_meta_alloc
+@@ -519,6 +519,8 @@ static struct zram_meta *zram_meta_alloc
  		goto out_error;
  	}
  
@@ -25,9 +25,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return meta;
  
  out_error:
-@@ -568,12 +570,12 @@ static int zram_decompress_page(struct z
+@@ -567,12 +569,12 @@ static int zram_decompress_page(struct z
  	unsigned long handle;
- 	size_t size;
+ 	unsigned int size;
  
 -	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
 +	zram_lock_table(&meta->table[index]);
@@ -40,16 +40,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		clear_page(mem);
  		return 0;
  	}
-@@ -584,7 +586,7 @@ static int zram_decompress_page(struct z
- 	else
- 		ret = zcomp_decompress(zram->comp, cmem, size, mem);
+@@ -587,7 +589,7 @@ static int zram_decompress_page(struct z
+ 		zcomp_stream_put(zram->comp);
+ 	}
  	zs_unmap_object(meta->mem_pool, handle);
 -	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 +	zram_unlock_table(&meta->table[index]);
  
  	/* Should NEVER happen. Return bio error if it does. */
  	if (unlikely(ret)) {
-@@ -604,14 +606,14 @@ static int zram_bvec_read(struct zram *z
+@@ -607,14 +609,14 @@ static int zram_bvec_read(struct zram *z
  	struct zram_meta *meta = zram->meta;
  	page = bvec->bv_page;
  
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (is_partial_io(bvec))
  		/* Use  a temporary buffer to decompress the page */
-@@ -689,10 +691,10 @@ static int zram_bvec_write(struct zram *
+@@ -691,10 +693,10 @@ static int zram_bvec_write(struct zram *
  		if (user_mem)
  			kunmap_atomic(user_mem);
  		/* Free memory associated with this sector now. */
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		atomic64_inc(&zram->stats.zero_pages);
  		ret = 0;
-@@ -752,12 +754,12 @@ static int zram_bvec_write(struct zram *
+@@ -785,12 +787,12 @@ static int zram_bvec_write(struct zram *
  	 * Free memory associated with this sector
  	 * before overwriting unused sectors.
  	 */
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/* Update stats */
  	atomic64_add(clen, &zram->stats.compr_data_size);
-@@ -800,9 +802,9 @@ static void zram_bio_discard(struct zram
+@@ -833,9 +835,9 @@ static void zram_bio_discard(struct zram
  	}
  
  	while (n >= PAGE_SIZE) {
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		atomic64_inc(&zram->stats.notify_free);
  		index++;
  		n -= PAGE_SIZE;
-@@ -928,9 +930,9 @@ static void zram_slot_free_notify(struct
+@@ -964,9 +966,9 @@ static void zram_slot_free_notify(struct
  	zram = bdev->bd_disk->private_data;
  	meta = zram->meta;
  
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
 --- a/drivers/block/zram/zram_drv.h
 +++ b/drivers/block/zram/zram_drv.h
-@@ -72,6 +72,9 @@ enum zram_pageflags {
+@@ -73,6 +73,9 @@ enum zram_pageflags {
  struct zram_table_entry {
  	unsigned long handle;
  	unsigned long value;
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  };
  
  struct zram_stats {
-@@ -119,4 +122,42 @@ struct zram {
+@@ -120,4 +123,42 @@ struct zram {
  	 */
  	bool claim; /* Protected by bdev->bd_mutex */
  };
diff --git a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
index 6f8564a..fb28ad9 100644
--- a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
+++ b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:24 -0500
 Subject: drivers/net: Use disable_irq_nosync() in 8139too
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use disable_irq_nosync() instead of disable_irq() as this might be
 called in atomic context with netpoll.
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/net/ethernet/realtek/8139too.c
 +++ b/drivers/net/ethernet/realtek/8139too.c
-@@ -2229,7 +2229,7 @@ static void rtl8139_poll_controller(stru
+@@ -2233,7 +2233,7 @@ static void rtl8139_poll_controller(stru
  	struct rtl8139_private *tp = netdev_priv(dev);
  	const int irq = tp->pci_dev->irq;
  
diff --git a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
index 94b29f2..6829c57 100644
--- a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
+++ b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Fri, 3 Jul 2009 08:30:00 -0500
 Subject: drivers/net: vortex fix locking issues
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Argh, cut and paste wasn't enough...
 
diff --git a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
index 76f8a30..15b2842 100644
--- a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
+++ b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:30 -0500
 Subject: drivers: random: Reduce preempt disabled region
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 No need to keep preemption disabled across the whole function.
 
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
-@@ -796,8 +796,6 @@ static void add_timer_randomness(struct
+@@ -1028,8 +1028,6 @@ static void add_timer_randomness(struct
  	} sample;
  	long delta, delta2, delta3;
  
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	sample.jiffies = jiffies;
  	sample.cycles = random_get_entropy();
  	sample.num = num;
-@@ -838,7 +836,6 @@ static void add_timer_randomness(struct
+@@ -1070,7 +1068,6 @@ static void add_timer_randomness(struct
  		 */
  		credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
  	}
diff --git a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
index aef5671..54f7194 100644
--- a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
+++ b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
@@ -1,7 +1,7 @@
 Subject: tty/serial/omap: Make the locking RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 28 Jul 2011 13:32:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The lock is a sleeping lock and local_irq_save() is not the
 optimsation we are looking for. Redo it to make it work on -RT and
diff --git a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
index 48d1934..ee7d65b 100644
--- a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
+++ b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
@@ -1,7 +1,7 @@
 Subject: tty/serial/pl011: Make the locking work on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 08 Jan 2013 21:36:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The lock is a sleeping lock and local_irq_save() is not the optimsation
 we are looking for. Redo it to make it work on -RT and non-RT.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/tty/serial/amba-pl011.c
 +++ b/drivers/tty/serial/amba-pl011.c
-@@ -2166,13 +2166,19 @@ pl011_console_write(struct console *co,
+@@ -2167,13 +2167,19 @@ pl011_console_write(struct console *co,
  
  	clk_enable(uap->clk);
  
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 *	First save the CR then disable the interrupts
-@@ -2196,8 +2202,7 @@ pl011_console_write(struct console *co,
+@@ -2197,8 +2203,7 @@ pl011_console_write(struct console *co,
  		pl011_write(old_cr, uap, REG_CR);
  
  	if (locked)
diff --git a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
index a94717c..9f10129 100644
--- a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
+++ b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 25 Apr 2013 18:12:52 +0200
 Subject: drm/i915: drop trace_i915_gem_ring_dispatch on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 This tracepoint is responsible for:
 
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -1314,7 +1314,9 @@ i915_gem_ringbuffer_submission(struct i9
+@@ -1302,7 +1302,9 @@ i915_gem_ringbuffer_submission(struct i9
  	if (ret)
  		return ret;
  
@@ -56,4 +56,4 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +#endif
  
  	i915_gem_execbuffer_move_to_active(vmas, params->request);
- 	i915_gem_execbuffer_retire_commands(params);
+ 
diff --git a/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
index de99cf7..1e6b57c 100644
--- a/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+++ b/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
@@ -1,7 +1,7 @@
 Subject: drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end()
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sat, 27 Feb 2016 09:01:42 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 
 [    8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static bool
  format_is_yuv(uint32_t format)
-@@ -64,6 +65,8 @@ static int usecs_to_scanlines(const stru
+@@ -64,6 +65,8 @@ int intel_usecs_to_scanlines(const struc
  			    1000 * adjusted_mode->crtc_htotal);
  }
  
@@ -79,8 +79,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * intel_pipe_update_start() - start update of a set of display registers
   * @crtc: the crtc of which the registers are going to be updated
-@@ -96,7 +99,7 @@ void intel_pipe_update_start(struct inte
- 	min = vblank_start - usecs_to_scanlines(adjusted_mode, 100);
+@@ -94,7 +97,7 @@ void intel_pipe_update_start(struct inte
+ 	min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
  	max = vblank_start - 1;
  
 -	local_irq_disable();
@@ -88,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (min <= 0 || max <= 0)
  		return;
-@@ -126,11 +129,11 @@ void intel_pipe_update_start(struct inte
+@@ -124,11 +127,11 @@ void intel_pipe_update_start(struct inte
  			break;
  		}
  
@@ -102,9 +102,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  
  	finish_wait(wq, &wait);
-@@ -164,7 +167,7 @@ void intel_pipe_update_end(struct intel_
- 
- 	trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+@@ -180,7 +183,7 @@ void intel_pipe_update_end(struct intel_
+ 		crtc->base.state->event = NULL;
+ 	}
  
 -	local_irq_enable();
 +	local_unlock_irq(pipe_update_lock);
diff --git a/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index e16ff2f..28529f2 100644
--- a/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -1,7 +1,7 @@
 Subject: drm,radeon,i915: Use preempt_disable/enable_rt() where recommended
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sat, 27 Feb 2016 08:09:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 DRM folks identified the spots, so use them.
 
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/gpu/drm/i915/i915_irq.c
 +++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -830,6 +830,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(stru
  	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  
  	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* Get optional system timestamp before query. */
  	if (stime)
-@@ -881,6 +882,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(stru
  		*etime = ktime_get();
  
  	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/drivers/gpu/drm/radeon/radeon_display.c
 +++ b/drivers/gpu/drm/radeon/radeon_display.c
-@@ -1863,6 +1863,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1869,6 +1869,7 @@ int radeon_get_crtc_scanoutpos(struct dr
  	struct radeon_device *rdev = dev->dev_private;
  
  	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* Get optional system timestamp before query. */
  	if (stime)
-@@ -1955,6 +1956,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1961,6 +1962,7 @@ int radeon_get_crtc_scanoutpos(struct dr
  		*etime = ktime_get();
  
  	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/debian/patches/features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch b/debian/patches/features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch
index bec2ca6..e12d456 100644
--- a/debian/patches/features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch
+++ b/debian/patches/features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Sun, 16 Aug 2015 14:27:50 +0200
 Subject: dump stack: don't disable preemption during trace
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 I see here large latencies during a stack dump on x86. The
 preempt_disable() and get_cpu() should forbid moving the task to another
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	int graph = 0;
  	u32 *prev_esp;
  
-@@ -86,7 +86,7 @@ void dump_trace(struct task_struct *task
+@@ -84,7 +84,7 @@ void dump_trace(struct task_struct *task
  			break;
  		touch_nmi_watchdog();
  	}
@@ -46,19 +46,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
 -	const unsigned cpu = get_cpu();
 +	const unsigned cpu = get_cpu_light();
- 	struct thread_info *tinfo;
  	unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
  	unsigned long dummy;
-@@ -241,7 +241,7 @@ void dump_trace(struct task_struct *task
+ 	unsigned used = 0;
+@@ -239,7 +239,7 @@ void dump_trace(struct task_struct *task
  	 * This handles the process stack:
  	 */
- 	bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+ 	bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
 -	put_cpu();
 +	put_cpu_light();
  }
  EXPORT_SYMBOL(dump_trace);
  
-@@ -255,7 +255,7 @@ show_stack_log_lvl(struct task_struct *t
+@@ -253,7 +253,7 @@ show_stack_log_lvl(struct task_struct *t
  	int cpu;
  	int i;
  
@@ -67,8 +67,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	cpu = smp_processor_id();
  
  	irq_stack_end	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
-@@ -291,7 +291,7 @@ show_stack_log_lvl(struct task_struct *t
- 			pr_cont(" %016lx", *stack++);
+@@ -299,7 +299,7 @@ show_stack_log_lvl(struct task_struct *t
+ 		stack++;
  		touch_nmi_watchdog();
  	}
 -	preempt_enable();
diff --git a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
index bdf675b..2f6dbf1 100644
--- a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
+++ b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
 Subject: fs/epoll: Do not disable preemption on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 08 Jul 2011 16:35:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 ep_call_nested() takes a sleeping lock so we can't disable preemption.
 The light version is enough since ep_call_nested() doesn't mind beeing
diff --git a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch b/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
index 5c710c9..16f7192 100644
--- a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
+++ b/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 16 Feb 2015 18:49:10 +0100
 Subject: fs/aio: simple simple work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768
 |in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * signals when all in-flight requests are done
-@@ -253,6 +254,7 @@ static int __init aio_setup(void)
+@@ -258,6 +259,7 @@ static int __init aio_setup(void)
  		.mount		= aio_mount,
  		.kill_sb	= kill_anon_super,
  	};
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	aio_mnt = kern_mount(&aio_fs);
  	if (IS_ERR(aio_mnt))
  		panic("Failed to create aio fs mount.");
-@@ -568,9 +570,9 @@ static int kiocb_cancel(struct aio_kiocb
+@@ -578,9 +580,9 @@ static int kiocb_cancel(struct aio_kiocb
  	return cancel(&kiocb->common);
  }
  
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	pr_debug("freeing %p\n", ctx);
  
-@@ -589,8 +591,8 @@ static void free_ioctx_reqs(struct percp
+@@ -599,8 +601,8 @@ static void free_ioctx_reqs(struct percp
  	if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
  		complete(&ctx->rq_wait->comp);
  
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /*
-@@ -598,9 +600,9 @@ static void free_ioctx_reqs(struct percp
+@@ -608,9 +610,9 @@ static void free_ioctx_reqs(struct percp
   * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
   * now it's safe to cancel any that need to be.
   */
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct aio_kiocb *req;
  
  	spin_lock_irq(&ctx->ctx_lock);
-@@ -619,6 +621,14 @@ static void free_ioctx_users(struct perc
+@@ -629,6 +631,14 @@ static void free_ioctx_users(struct perc
  	percpu_ref_put(&ctx->reqs);
  }
  
diff --git a/debian/patches/features/all/rt/fs-block-rt-support.patch b/debian/patches/features/all/rt/fs-block-rt-support.patch
index d26cf92..f1f0341 100644
--- a/debian/patches/features/all/rt/fs-block-rt-support.patch
+++ b/debian/patches/features/all/rt/fs-block-rt-support.patch
@@ -1,7 +1,7 @@
 Subject: block: Turn off warning which is bogus on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jun 2011 17:05:09 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On -RT the context is always with IRQs enabled. Ignore this warning on -RT.
 
diff --git a/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch b/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch
new file mode 100644
index 0000000..4c52db3
--- /dev/null
+++ b/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch
@@ -0,0 +1,24 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Wed, 14 Sep 2016 11:55:23 +0200
+Subject: fs/dcache: include wait.h
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+Since commit d9171b934526 ("parallel lookups machinery, part 4 (and
+last)") dcache.h is using but does not include wait.h. It works as long
+as it is included somehow earlier and fails otherwise.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/dcache.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -11,6 +11,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/lockref.h>
+ #include <linux/stringhash.h>
++#include <linux/wait.h>
+ 
+ struct path;
+ struct vfsmount;
diff --git a/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch b/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
new file mode 100644
index 0000000..cd770f0
--- /dev/null
+++ b/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
@@ -0,0 +1,28 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Wed, 14 Sep 2016 17:57:03 +0200
+Subject: [PATCH] fs/dcache: init in_lookup_hashtable
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+in_lookup_hashtable was introduced in commit 94bdd655caba ("parallel
+lookups machinery, part 3") and never initialized but since it is in
+the data it is all zeros. But we need this for -RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ fs/dcache.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -3601,6 +3601,11 @@ EXPORT_SYMBOL(d_genocide);
+ 
+ void __init vfs_caches_init_early(void)
+ {
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
++		INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
++
+ 	dcache_init_early();
+ 	inode_init_early();
+ }
diff --git a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index 5defe34..63ccd31 100644
--- a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -1,7 +1,7 @@
 Subject: fs: dcache: Use cpu_chill() in trylock loops
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 07 Mar 2012 21:00:34 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Use cpu_chill() instead of cpu_relax() to let the system
@@ -12,9 +12,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  fs/autofs4/autofs_i.h |    1 +
  fs/autofs4/expire.c   |    2 +-
- fs/dcache.c           |    5 +++--
+ fs/dcache.c           |   20 ++++++++++++++++----
  fs/namespace.c        |    3 ++-
- 4 files changed, 7 insertions(+), 4 deletions(-)
+ 4 files changed, 20 insertions(+), 6 deletions(-)
 
 --- a/fs/autofs4/autofs_i.h
 +++ b/fs/autofs4/autofs_i.h
@@ -47,16 +47,38 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #include <linux/slab.h>
  #include <linux/init.h>
  #include <linux/hash.h>
-@@ -578,7 +579,7 @@ static struct dentry *dentry_kill(struct
+@@ -750,6 +751,8 @@ static inline bool fast_dput(struct dent
+  */
+ void dput(struct dentry *dentry)
+ {
++	struct dentry *parent;
++
+ 	if (unlikely(!dentry))
+ 		return;
  
- failed:
- 	spin_unlock(&dentry->d_lock);
--	cpu_relax();
-+	cpu_chill();
- 	return dentry; /* try again with same dentry */
- }
+@@ -788,9 +791,18 @@ void dput(struct dentry *dentry)
+ 	return;
  
-@@ -2316,7 +2317,7 @@ void d_delete(struct dentry * dentry)
+ kill_it:
+-	dentry = dentry_kill(dentry);
+-	if (dentry) {
+-		cond_resched();
++	parent = dentry_kill(dentry);
++	if (parent) {
++		int r;
++
++		if (parent == dentry) {
++			/* the task with the highest priority won't schedule */
++			r = cond_resched();
++			if (!r)
++				cpu_chill();
++		} else {
++			dentry = parent;
++		}
+ 		goto repeat;
+ 	}
+ }
+@@ -2321,7 +2333,7 @@ void d_delete(struct dentry * dentry)
  	if (dentry->d_lockref.count == 1) {
  		if (!spin_trylock(&inode->i_lock)) {
  			spin_unlock(&dentry->d_lock);
diff --git a/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
new file mode 100644
index 0000000..10179af
--- /dev/null
+++ b/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -0,0 +1,215 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Wed, 14 Sep 2016 14:35:49 +0200
+Subject: [PATCH] fs/dcache: use swait_queue instead of waitqueue
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+__d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock()
+which disables preemption. As a workaround convert it to swait.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ fs/cifs/readdir.c       |    2 +-
+ fs/dcache.c             |   27 +++++++++++++++------------
+ fs/fuse/dir.c           |    2 +-
+ fs/namei.c              |    4 ++--
+ fs/nfs/dir.c            |    4 ++--
+ fs/nfs/unlink.c         |    4 ++--
+ fs/proc/base.c          |    2 +-
+ fs/proc/proc_sysctl.c   |    2 +-
+ include/linux/dcache.h  |    4 ++--
+ include/linux/nfs_xdr.h |    2 +-
+ kernel/sched/swait.c    |    1 +
+ 11 files changed, 29 insertions(+), 25 deletions(-)
+
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent,
+ 	struct inode *inode;
+ 	struct super_block *sb = parent->d_sb;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++	DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ 
+ 	cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
+ 
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2393,21 +2393,24 @@ static inline void end_dir_add(struct in
+ 
+ static void d_wait_lookup(struct dentry *dentry)
+ {
+-	if (d_in_lookup(dentry)) {
+-		DECLARE_WAITQUEUE(wait, current);
+-		add_wait_queue(dentry->d_wait, &wait);
+-		do {
+-			set_current_state(TASK_UNINTERRUPTIBLE);
+-			spin_unlock(&dentry->d_lock);
+-			schedule();
+-			spin_lock(&dentry->d_lock);
+-		} while (d_in_lookup(dentry));
+-	}
++	struct swait_queue __wait;
++
++	if (!d_in_lookup(dentry))
++		return;
++
++	INIT_LIST_HEAD(&__wait.task_list);
++	do {
++		prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
++		spin_unlock(&dentry->d_lock);
++		schedule();
++		spin_lock(&dentry->d_lock);
++	} while (d_in_lookup(dentry));
++	finish_swait(dentry->d_wait, &__wait);
+ }
+ 
+ struct dentry *d_alloc_parallel(struct dentry *parent,
+ 				const struct qstr *name,
+-				wait_queue_head_t *wq)
++				struct swait_queue_head *wq)
+ {
+ 	unsigned int hash = name->hash;
+ 	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
+@@ -2516,7 +2519,7 @@ void __d_lookup_done(struct dentry *dent
+ 	hlist_bl_lock(b);
+ 	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
+ 	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
+-	wake_up_all(dentry->d_wait);
++	swake_up_all(dentry->d_wait);
+ 	dentry->d_wait = NULL;
+ 	hlist_bl_unlock(b);
+ 	INIT_HLIST_NODE(&dentry->d_u.d_alias);
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1174,7 +1174,7 @@ static int fuse_direntplus_link(struct f
+ 	struct inode *dir = d_inode(parent);
+ 	struct fuse_conn *fc;
+ 	struct inode *inode;
+-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++	DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ 
+ 	if (!o->nodeid) {
+ 		/*
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1629,7 +1629,7 @@ static struct dentry *lookup_slow(const
+ {
+ 	struct dentry *dentry = ERR_PTR(-ENOENT), *old;
+ 	struct inode *inode = dir->d_inode;
+-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++	DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ 
+ 	inode_lock_shared(inode);
+ 	/* Don't go there if it's already dead */
+@@ -3086,7 +3086,7 @@ static int lookup_open(struct nameidata
+ 	struct dentry *dentry;
+ 	int error, create_error = 0;
+ 	umode_t mode = op->mode;
+-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++	DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ 
+ 	if (unlikely(IS_DEADDIR(dir_inode)))
+ 		return -ENOENT;
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -485,7 +485,7 @@ static
+ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+ {
+ 	struct qstr filename = QSTR_INIT(entry->name, entry->len);
+-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++	DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ 	struct dentry *dentry;
+ 	struct dentry *alias;
+ 	struct inode *dir = d_inode(parent);
+@@ -1484,7 +1484,7 @@ int nfs_atomic_open(struct inode *dir, s
+ 		    struct file *file, unsigned open_flags,
+ 		    umode_t mode, int *opened)
+ {
+-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++	DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ 	struct nfs_open_context *ctx;
+ 	struct dentry *res;
+ 	struct iattr attr = { .ia_valid = ATTR_OPEN };
+--- a/fs/nfs/unlink.c
++++ b/fs/nfs/unlink.c
+@@ -12,7 +12,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/nfs_fs.h>
+ #include <linux/sched.h>
+-#include <linux/wait.h>
++#include <linux/swait.h>
+ #include <linux/namei.h>
+ #include <linux/fsnotify.h>
+ 
+@@ -205,7 +205,7 @@ nfs_async_unlink(struct dentry *dentry,
+ 		goto out_free_name;
+ 	}
+ 	data->res.dir_attr = &data->dir_attr;
+-	init_waitqueue_head(&data->wq);
++	init_swait_queue_head(&data->wq);
+ 
+ 	status = -EBUSY;
+ 	spin_lock(&dentry->d_lock);
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1819,7 +1819,7 @@ bool proc_fill_cache(struct file *file,
+ 
+ 	child = d_hash_and_lookup(dir, &qname);
+ 	if (!child) {
+-		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++		DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ 		child = d_alloc_parallel(dir, &qname, &wq);
+ 		if (IS_ERR(child))
+ 			goto end_instantiate;
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -627,7 +627,7 @@ static bool proc_sys_fill_cache(struct f
+ 
+ 	child = d_lookup(dir, &qname);
+ 	if (!child) {
+-		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++		DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ 		child = d_alloc_parallel(dir, &qname, &wq);
+ 		if (IS_ERR(child))
+ 			return false;
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -101,7 +101,7 @@ struct dentry {
+ 
+ 	union {
+ 		struct list_head d_lru;		/* LRU list */
+-		wait_queue_head_t *d_wait;	/* in-lookup ones only */
++		struct swait_queue_head *d_wait;	/* in-lookup ones only */
+ 	};
+ 	struct list_head d_child;	/* child of parent list */
+ 	struct list_head d_subdirs;	/* our children */
+@@ -231,7 +231,7 @@ extern void d_set_d_op(struct dentry *de
+ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
+ extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+ extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
+-					wait_queue_head_t *);
++					struct swait_queue_head *);
+ extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
+ extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+ extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1484,7 +1484,7 @@ struct nfs_unlinkdata {
+ 	struct nfs_removeargs args;
+ 	struct nfs_removeres res;
+ 	struct dentry *dentry;
+-	wait_queue_head_t wq;
++	struct swait_queue_head wq;
+ 	struct rpc_cred	*cred;
+ 	struct nfs_fattr dir_attr;
+ 	long timeout;
+--- a/kernel/sched/swait.c
++++ b/kernel/sched/swait.c
+@@ -74,6 +74,7 @@ void swake_up_all(struct swait_queue_hea
+ 	if (!swait_active(q))
+ 		return;
+ 
++	WARN_ON(irqs_disabled());
+ 	raw_spin_lock_irq(&q->lock);
+ 	list_splice_init(&q->task_list, &tmp);
+ 	while (!list_empty(&tmp)) {
diff --git a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
index 75c6fc8..d8eade8 100644
--- a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
+++ b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 18 Mar 2011 10:11:25 +0100
 Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 bit_spin_locks break under RT.
 
diff --git a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
index b6f4e53..f95711f 100644
--- a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
+++ b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 17 Feb 2014 17:30:03 +0100
 Subject: fs: jbd2: pull your plug when waiting for space
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Two cps in parallel managed to stall the the ext4 fs. It seems that
 journal code is either waiting for locks or sleeping waiting for
diff --git a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
index dd548f1..c39feaf 100644
--- a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
+++ b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 19 Jul 2009 08:44:27 -0500
 Subject: fs: namespace preemption fix
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On RT we cannot loop with preemption disabled here as
 mnt_make_readonly() might have been preempted. We can safely enable
diff --git a/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
new file mode 100644
index 0000000..d9bdb3a
--- /dev/null
+++ b/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -0,0 +1,139 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Thu, 15 Sep 2016 10:51:27 +0200
+Subject: [PATCH] fs/nfs: turn rmdir_sem into a semaphore
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+The RW semaphore had a reader side which used the _non_owner version
+because it most likely took the reader lock in one thread and released it
+in another which would cause lockdep to complain if the "regular"
+version was used.
+On -RT we need the owner because the rw lock is turned into a rtmutex.
+The semaphores on the hand are "plain simple" and should work as
+expected. We can't have multiple readers but on -RT we don't allow
+multiple readers anyway so that is not a loss.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ fs/nfs/dir.c           |    8 ++++++++
+ fs/nfs/inode.c         |    4 ++++
+ fs/nfs/unlink.c        |   31 +++++++++++++++++++++++++++----
+ include/linux/nfs_fs.h |    4 ++++
+ 4 files changed, 43 insertions(+), 4 deletions(-)
+
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1799,7 +1799,11 @@ int nfs_rmdir(struct inode *dir, struct
+ 
+ 	trace_nfs_rmdir_enter(dir, dentry);
+ 	if (d_really_is_positive(dentry)) {
++#ifdef CONFIG_PREEMPT_RT_BASE
++		down(&NFS_I(d_inode(dentry))->rmdir_sem);
++#else
+ 		down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
++#endif
+ 		error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
+ 		/* Ensure the VFS deletes this inode */
+ 		switch (error) {
+@@ -1809,7 +1813,11 @@ int nfs_rmdir(struct inode *dir, struct
+ 		case -ENOENT:
+ 			nfs_dentry_handle_enoent(dentry);
+ 		}
++#ifdef CONFIG_PREEMPT_RT_BASE
++		up(&NFS_I(d_inode(dentry))->rmdir_sem);
++#else
+ 		up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
++#endif
+ 	} else
+ 		error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
+ 	trace_nfs_rmdir_exit(dir, dentry, error);
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1957,7 +1957,11 @@ static void init_once(void *foo)
+ 	nfsi->nrequests = 0;
+ 	nfsi->commit_info.ncommit = 0;
+ 	atomic_set(&nfsi->commit_info.rpcs_out, 0);
++#ifdef CONFIG_PREEMPT_RT_BASE
++	sema_init(&nfsi->rmdir_sem, 1);
++#else
+ 	init_rwsem(&nfsi->rmdir_sem);
++#endif
+ 	nfs4_init_once(nfsi);
+ }
+ 
+--- a/fs/nfs/unlink.c
++++ b/fs/nfs/unlink.c
+@@ -51,6 +51,29 @@ static void nfs_async_unlink_done(struct
+ 		rpc_restart_call_prepare(task);
+ }
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void nfs_down_anon(struct semaphore *sema)
++{
++	down(sema);
++}
++
++static void nfs_up_anon(struct semaphore *sema)
++{
++	up(sema);
++}
++
++#else
++static void nfs_down_anon(struct rw_semaphore *rwsem)
++{
++	down_read_non_owner(rwsem);
++}
++
++static void nfs_up_anon(struct rw_semaphore *rwsem)
++{
++	up_read_non_owner(rwsem);
++}
++#endif
++
+ /**
+  * nfs_async_unlink_release - Release the sillydelete data.
+  * @task: rpc_task of the sillydelete
+@@ -64,7 +87,7 @@ static void nfs_async_unlink_release(voi
+ 	struct dentry *dentry = data->dentry;
+ 	struct super_block *sb = dentry->d_sb;
+ 
+-	up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
++	nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
+ 	d_lookup_done(dentry);
+ 	nfs_free_unlinkdata(data);
+ 	dput(dentry);
+@@ -117,10 +140,10 @@ static int nfs_call_unlink(struct dentry
+ 	struct inode *dir = d_inode(dentry->d_parent);
+ 	struct dentry *alias;
+ 
+-	down_read_non_owner(&NFS_I(dir)->rmdir_sem);
++	nfs_down_anon(&NFS_I(dir)->rmdir_sem);
+ 	alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq);
+ 	if (IS_ERR(alias)) {
+-		up_read_non_owner(&NFS_I(dir)->rmdir_sem);
++		nfs_up_anon(&NFS_I(dir)->rmdir_sem);
+ 		return 0;
+ 	}
+ 	if (!d_in_lookup(alias)) {
+@@ -142,7 +165,7 @@ static int nfs_call_unlink(struct dentry
+ 			ret = 0;
+ 		spin_unlock(&alias->d_lock);
+ 		dput(alias);
+-		up_read_non_owner(&NFS_I(dir)->rmdir_sem);
++		nfs_up_anon(&NFS_I(dir)->rmdir_sem);
+ 		/*
+ 		 * If we'd displaced old cached devname, free it.  At that
+ 		 * point dentry is definitely not a root, so we won't need
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -165,7 +165,11 @@ struct nfs_inode {
+ 
+ 	/* Readers: in-flight sillydelete RPC calls */
+ 	/* Writers: rmdir */
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct semaphore	rmdir_sem;
++#else
+ 	struct rw_semaphore	rmdir_sem;
++#endif
+ 
+ #if IS_ENABLED(CONFIG_NFS_V4)
+ 	struct nfs4_cached_acl	*nfs4_acl;
diff --git a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
index b237357..7b5a92a 100644
--- a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
+++ b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <efault at gmx.de>
 Date: Fri, 3 Jul 2009 08:44:12 -0500
 Subject: fs: ntfs: disable interrupt only on !RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
 > * Nick Piggin <nickpiggin at yahoo.com.au> wrote:
diff --git a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
index 9c095c3..ee1b315 100644
--- a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 18 Mar 2011 09:18:52 +0100
 Subject: buffer_head: Replace bh_uptodate_lock for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Wrap the bit_spin_lock calls into a separate inline and add the RT
 replacements with a real spinlock.
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/fs/buffer.c
 +++ b/fs/buffer.c
-@@ -300,8 +300,7 @@ static void end_buffer_async_read(struct
+@@ -301,8 +301,7 @@ static void end_buffer_async_read(struct
  	 * decide that the page is now completely done.
  	 */
  	first = page_buffers(page);
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	clear_buffer_async_read(bh);
  	unlock_buffer(bh);
  	tmp = bh;
-@@ -314,8 +313,7 @@ static void end_buffer_async_read(struct
+@@ -315,8 +314,7 @@ static void end_buffer_async_read(struct
  		}
  		tmp = tmp->b_this_page;
  	} while (tmp != bh);
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * If none of the buffers had errors and they are all
-@@ -327,9 +325,7 @@ static void end_buffer_async_read(struct
+@@ -328,9 +326,7 @@ static void end_buffer_async_read(struct
  	return;
  
  still_busy:
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -357,8 +353,7 @@ void end_buffer_async_write(struct buffe
+@@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffe
  	}
  
  	first = page_buffers(page);
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	clear_buffer_async_write(bh);
  	unlock_buffer(bh);
-@@ -370,15 +365,12 @@ void end_buffer_async_write(struct buffe
+@@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffe
  		}
  		tmp = tmp->b_this_page;
  	}
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(end_buffer_async_write);
  
-@@ -3314,6 +3306,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3384,6 +3376,7 @@ struct buffer_head *alloc_buffer_head(gf
  	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
  	if (ret) {
  		INIT_LIST_HEAD(&ret->b_assoc_buffers);
diff --git a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
index 8225833..b14c2d0 100644
--- a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
+++ b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:56:42 +0200
 Subject: trace: Add migrate-disabled counter to tracing output
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define TRACE_EVENT_TYPE_MAX						\
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
-@@ -1669,6 +1669,8 @@ tracing_generic_entry_update(struct trac
+@@ -1909,6 +1909,8 @@ tracing_generic_entry_update(struct trac
  		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
  		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
  		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
  
-@@ -2566,9 +2568,10 @@ static void print_lat_help_header(struct
+@@ -2897,9 +2899,10 @@ static void print_lat_help_header(struct
  		    "#                | / _----=> need-resched    \n"
  		    "#                || / _---=> hardirq/softirq \n"
  		    "#                ||| / _--=> preempt-depth   \n"
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
 --- a/kernel/trace/trace_events.c
 +++ b/kernel/trace/trace_events.c
-@@ -188,6 +188,8 @@ static int trace_define_common_fields(vo
+@@ -187,6 +187,8 @@ static int trace_define_common_fields(vo
  	__common_field(unsigned char, flags);
  	__common_field(unsigned char, preempt_count);
  	__common_field(int, pid);
diff --git a/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
new file mode 100644
index 0000000..c17a065
--- /dev/null
+++ b/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -0,0 +1,43 @@
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Fri, 1 Mar 2013 11:17:42 +0100
+Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+In exit_pi_state_list() we have the following locking construct:
+
+   spin_lock(&hb->lock);
+   raw_spin_lock_irq(&curr->pi_lock);
+
+   ...
+   spin_unlock(&hb->lock);
+
+In !RT this works, but on RT the migrate_enable() function which is
+called from spin_unlock() sees atomic context due to the held pi_lock
+and just decrements the migrate_disable_atomic counter of the
+task. Now the next call to migrate_disable() sees the counter being
+negative and issues a warning. That check should be in
+migrate_enable() already.
+
+Fix this by dropping pi_lock before unlocking hb->lock and reaquire
+pi_lock after that again. This is safe as the loop code reevaluates
+head again under the pi_lock.
+
+Reported-by: Yong Zhang <yong.zhang at windriver.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/futex.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -895,7 +895,9 @@ void exit_pi_state_list(struct task_stru
+ 		 * task still owns the PI-state:
+ 		 */
+ 		if (head->next != next) {
++			raw_spin_unlock_irq(&curr->pi_lock);
+ 			spin_unlock(&hb->lock);
++			raw_spin_lock_irq(&curr->pi_lock);
+ 			continue;
+ 		}
+ 
diff --git a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
index 113441d..116d97b 100644
--- a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
+++ b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: futex: Fix bug on when a requeued RT task times out
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Requeue with timeout causes a bug with PREEMPT_RT_FULL.
 
diff --git a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
index b93b787..7ff1846 100644
--- a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
+++ b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:57 -0500
 Subject: genirq: Disable irqpoll on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Creates long latencies for no value
 
diff --git a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
index f157b79..c0b9537 100644
--- a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -1,122 +1,72 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 21 Aug 2013 17:48:46 +0200
 Subject: genirq: Do not invoke the affinity callback via a workqueue on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Joe Korty reported, that __irq_set_affinity_locked() schedules a
 workqueue while holding a rawlock which results in a might_sleep()
 warning.
-This patch moves the invokation into a process context so that we only
-wakeup() a process while holding the lock.
+This patch uses swork_queue() instead.
 
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- include/linux/interrupt.h |    2 +
- kernel/irq/manage.c       |   79 ++++++++++++++++++++++++++++++++++++++++++++--
- 2 files changed, 78 insertions(+), 3 deletions(-)
+ drivers/scsi/qla2xxx/qla_isr.c |    4 +++
+ include/linux/interrupt.h      |    5 ++++
+ kernel/irq/manage.c            |   43 ++++++++++++++++++++++++++++++++++++++---
+ 3 files changed, 49 insertions(+), 3 deletions(-)
 
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3125,7 +3125,11 @@ qla24xx_enable_msix(struct qla_hw_data *
+ 		* kref_put().
+ 		*/
+ 		kref_get(&qentry->irq_notify.kref);
++#ifdef CONFIG_PREEMPT_RT_BASE
++		swork_queue(&qentry->irq_notify.swork);
++#else
+ 		schedule_work(&qentry->irq_notify.work);
++#endif
+ 	}
+ 
+ 	/*
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -217,6 +217,7 @@ extern void resume_device_irqs(void);
-  * @irq:		Interrupt to which notification applies
-  * @kref:		Reference count, for internal use
-  * @work:		Work item, for internal use
-+ * @list:		List item for deferred callbacks
-  * @notify:		Function to be called on change.  This will be
-  *			called in process context.
-  * @release:		Function to be called on release.  This will be
-@@ -228,6 +229,7 @@ struct irq_affinity_notify {
+@@ -14,6 +14,7 @@
+ #include <linux/hrtimer.h>
+ #include <linux/kref.h>
+ #include <linux/workqueue.h>
++#include <linux/swork.h>
+ 
+ #include <linux/atomic.h>
+ #include <asm/ptrace.h>
+@@ -229,7 +230,11 @@ extern void resume_device_irqs(void);
+ struct irq_affinity_notify {
  	unsigned int irq;
  	struct kref kref;
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct swork_event swork;
++#else
  	struct work_struct work;
-+	struct list_head list;
++#endif
  	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
  	void (*release)(struct kref *ref);
  };
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -181,6 +181,62 @@ static inline void
- irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
- #endif
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
-+static struct task_struct *set_affinity_helper;
-+static LIST_HEAD(affinity_list);
-+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
-+
-+static int set_affinity_thread(void *unused)
-+{
-+	while (1) {
-+		struct irq_affinity_notify *notify;
-+		int empty;
-+
-+		set_current_state(TASK_INTERRUPTIBLE);
-+
-+		raw_spin_lock_irq(&affinity_list_lock);
-+		empty = list_empty(&affinity_list);
-+		raw_spin_unlock_irq(&affinity_list_lock);
-+
-+		if (empty)
-+			schedule();
-+		if (kthread_should_stop())
-+			break;
-+		set_current_state(TASK_RUNNING);
-+try_next:
-+		notify = NULL;
-+
-+		raw_spin_lock_irq(&affinity_list_lock);
-+		if (!list_empty(&affinity_list)) {
-+			notify = list_first_entry(&affinity_list,
-+					struct irq_affinity_notify, list);
-+			list_del_init(&notify->list);
-+		}
-+		raw_spin_unlock_irq(&affinity_list_lock);
-+
-+		if (!notify)
-+			continue;
-+		_irq_affinity_notify(notify);
-+		goto try_next;
-+	}
-+	return 0;
-+}
-+
-+static void init_helper_thread(void)
-+{
-+	if (set_affinity_helper)
-+		return;
-+	set_affinity_helper = kthread_run(set_affinity_thread, NULL,
-+			"affinity-cb");
-+	WARN_ON(IS_ERR(set_affinity_helper));
-+}
-+#else
-+
-+static inline void init_helper_thread(void) { }
-+
-+#endif
-+
- int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
- 			bool force)
- {
-@@ -220,7 +276,17 @@ int irq_set_affinity_locked(struct irq_d
+@@ -235,7 +235,12 @@ int irq_set_affinity_locked(struct irq_d
  
  	if (desc->affinity_notify) {
  		kref_get(&desc->affinity_notify->kref);
 +
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+		raw_spin_lock(&affinity_list_lock);
-+		if (list_empty(&desc->affinity_notify->list))
-+			list_add_tail(&affinity_list,
-+					&desc->affinity_notify->list);
-+		raw_spin_unlock(&affinity_list_lock);
-+		wake_up_process(set_affinity_helper);
++#ifdef CONFIG_PREEMPT_RT_BASE
++		swork_queue(&desc->affinity_notify->swork);
 +#else
  		schedule_work(&desc->affinity_notify->work);
 +#endif
  	}
  	irqd_set(data, IRQD_AFFINITY_SET);
  
-@@ -258,10 +324,8 @@ int irq_set_affinity_hint(unsigned int i
+@@ -273,10 +278,8 @@ int irq_set_affinity_hint(unsigned int i
  }
  EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  
@@ -128,26 +78,52 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct irq_desc *desc = irq_to_desc(notify->irq);
  	cpumask_var_t cpumask;
  	unsigned long flags;
-@@ -283,6 +347,13 @@ static void irq_affinity_notify(struct w
+@@ -298,6 +301,35 @@ static void irq_affinity_notify(struct w
  	kref_put(&notify->kref, notify->release);
  }
  
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void init_helper_thread(void)
++{
++	static int init_sworker_once;
++
++	if (init_sworker_once)
++		return;
++	if (WARN_ON(swork_get()))
++		return;
++	init_sworker_once = 1;
++}
++
++static void irq_affinity_notify(struct swork_event *swork)
++{
++	struct irq_affinity_notify *notify =
++		container_of(swork, struct irq_affinity_notify, swork);
++	_irq_affinity_notify(notify);
++}
++
++#else
++
 +static void irq_affinity_notify(struct work_struct *work)
 +{
 +	struct irq_affinity_notify *notify =
 +		container_of(work, struct irq_affinity_notify, work);
 +	_irq_affinity_notify(notify);
 +}
++#endif
 +
  /**
   *	irq_set_affinity_notifier - control notification of IRQ affinity changes
   *	@irq:		Interrupt for which to enable/disable notification
-@@ -312,6 +383,8 @@ irq_set_affinity_notifier(unsigned int i
+@@ -326,7 +358,12 @@ irq_set_affinity_notifier(unsigned int i
+ 	if (notify) {
  		notify->irq = irq;
  		kref_init(&notify->kref);
- 		INIT_WORK(&notify->work, irq_affinity_notify);
-+		INIT_LIST_HEAD(&notify->list);
++#ifdef CONFIG_PREEMPT_RT_BASE
++		INIT_SWORK(&notify->swork, irq_affinity_notify);
 +		init_helper_thread();
++#else
+ 		INIT_WORK(&notify->work, irq_affinity_notify);
++#endif
  	}
  
  	raw_spin_lock_irqsave(&desc->lock, flags);
diff --git a/debian/patches/features/all/rt/genirq-force-threading.patch b/debian/patches/features/all/rt/genirq-force-threading.patch
index 54572fb..0ada9c7 100644
--- a/debian/patches/features/all/rt/genirq-force-threading.patch
+++ b/debian/patches/features/all/rt/genirq-force-threading.patch
@@ -1,7 +1,7 @@
 Subject: genirq: Force interrupt thread on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 03 Apr 2011 11:57:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Force threaded_irqs and optimize the code (force_irqthreads) in regard
 to this.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -390,9 +390,13 @@ extern int irq_set_irqchip_state(unsigne
+@@ -398,9 +398,13 @@ extern int irq_set_irqchip_state(unsigne
  				 bool state);
  
  #ifdef CONFIG_IRQ_FORCED_THREADING
diff --git a/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch b/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
index 3a34fa6..c9a5b23 100644
--- a/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 11 Feb 2016 11:54:00 -0600
 Subject: genirq: update irq_set_irqchip_state documentation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On -rt kernels, the use of migrate_disable()/migrate_enable() is
 sufficient to guarantee a task isn't moved to another CPU.  Update the
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -2084,7 +2084,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2111,7 +2111,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
   *	This call sets the internal irqchip state of an interrupt,
   *	depending on the value of @which.
   *
diff --git a/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch b/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch
new file mode 100644
index 0000000..1b4c278
--- /dev/null
+++ b/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch
@@ -0,0 +1,33 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Tue, 14 Jul 2015 14:26:34 +0200
+Subject: gpu: don't check for the lock owner.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_gem_shrinker.c |    2 +-
+ drivers/gpu/drm/msm/msm_gem_shrinker.c   |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+@@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mu
+ 	if (!mutex_is_locked(mutex))
+ 		return false;
+ 
+-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
++#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
+ 	return mutex->owner == task;
+ #else
+ 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
+--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
++++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+@@ -23,7 +23,7 @@ static bool mutex_is_locked_by(struct mu
+ 	if (!mutex_is_locked(mutex))
+ 		return false;
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
++#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
+ 	return mutex->owner == task;
+ #else
+ 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
index fb2d632..53e3538 100644
--- a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+++ b/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Tue, 24 Mar 2015 08:14:49 +0100
 Subject: hotplug: Use set_cpus_allowed_ptr() in sync_unplug_thread()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 do_set_cpus_allowed() is not safe vs ->sched_class change.
 
diff --git a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
index 6f7f4b1..5aa610c 100644
--- a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
+++ b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
@@ -1,7 +1,7 @@
 Subject: hotplug: Lightweight get online cpus
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 15 Jun 2011 12:36:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 get_online_cpus() is a heavy weight function which involves a global
 mutex. migrate_disable() wants a simpler construct which prevents only
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/cpu.h
 +++ b/include/linux/cpu.h
-@@ -221,9 +221,6 @@ static inline void cpu_notifier_register
+@@ -192,9 +192,6 @@ static inline void cpu_notifier_register
  #endif /* CONFIG_SMP */
  extern struct bus_type cpu_subsys;
  
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_HOTPLUG_CPU
  /* Stop CPUs going up and down. */
  
-@@ -233,6 +230,8 @@ extern void get_online_cpus(void);
+@@ -204,6 +201,8 @@ extern void get_online_cpus(void);
  extern void put_online_cpus(void);
  extern void cpu_hotplug_disable(void);
  extern void cpu_hotplug_enable(void);
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define hotcpu_notifier(fn, pri)	cpu_notifier(fn, pri)
  #define __hotcpu_notifier(fn, pri)	__cpu_notifier(fn, pri)
  #define register_hotcpu_notifier(nb)	register_cpu_notifier(nb)
-@@ -250,6 +249,8 @@ static inline void cpu_hotplug_done(void
+@@ -221,6 +220,8 @@ static inline void cpu_hotplug_done(void
  #define put_online_cpus()	do { } while (0)
  #define cpu_hotplug_disable()	do { } while (0)
  #define cpu_hotplug_enable()	do { } while (0)
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  void get_online_cpus(void)
  {
-@@ -807,6 +901,8 @@ static int __ref _cpu_down(unsigned int
+@@ -799,6 +893,8 @@ static int __ref _cpu_down(unsigned int
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	int prev_state, ret = 0;
  	bool hasdied = false;
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
-@@ -814,7 +910,27 @@ static int __ref _cpu_down(unsigned int
+@@ -806,7 +902,27 @@ static int __ref _cpu_down(unsigned int
  	if (!cpu_present(cpu))
  		return -EINVAL;
  
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	cpuhp_tasks_frozen = tasks_frozen;
  
-@@ -853,6 +969,8 @@ static int __ref _cpu_down(unsigned int
+@@ -845,6 +961,8 @@ static int __ref _cpu_down(unsigned int
  
  	hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
  out:
diff --git a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
index 9486a88..e2f31d4 100644
--- a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+++ b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
@@ -1,7 +1,7 @@
 Subject: hotplug: sync_unplug: No "\n" in task name
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Sun, 16 Oct 2011 18:56:43 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Otherwise the output will look a little odd.
 
diff --git a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
index cad9f6b..9719bf9 100644
--- a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
+++ b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
@@ -1,7 +1,7 @@
 Subject: hotplug: Use migrate disable on unplug
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 19:35:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Migration needs to be disabled accross the unplug handling to make
 sure that the unplug thread is off the unplugged cpu.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -918,14 +918,13 @@ static int __ref _cpu_down(unsigned int
+@@ -910,14 +910,13 @@ static int __ref _cpu_down(unsigned int
  	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
  	set_cpus_allowed_ptr(current, cpumask);
  	free_cpumask_var(cpumask);
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	cpu_hotplug_begin();
  	ret = cpu_unplug_begin(cpu);
-@@ -974,6 +973,7 @@ static int __ref _cpu_down(unsigned int
+@@ -966,6 +965,7 @@ static int __ref _cpu_down(unsigned int
  	cpu_unplug_done(cpu);
  out_cancel:
  	cpu_hotplug_done();
diff --git a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
index 175fdf9..c461514 100644
--- a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
+++ b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at windriver.com>
 Date: Mon, 16 Sep 2013 14:09:19 -0700
 Subject: hrtimer: Move schedule_work call to helper thread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 When run ltp leapsec_timer test, the following call trace is caught:
 
@@ -43,72 +43,46 @@ Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca
 from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which
 makes a similar change.
 
-add a helper thread which does the call to schedule_work and wake up that
-thread instead of calling schedule_work directly.
-
-
 Signed-off-by: Yang Shi <yang.shi at windriver.com>
+[bigeasy: use swork_queue() instead a helper thread]
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- kernel/time/hrtimer.c |   40 ++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 40 insertions(+)
+ kernel/time/hrtimer.c |   24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
 
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -48,6 +48,7 @@
- #include <linux/sched/rt.h>
- #include <linux/sched/deadline.h>
- #include <linux/timer.h>
-+#include <linux/kthread.h>
- #include <linux/freezer.h>
- 
- #include <asm/uaccess.h>
-@@ -707,6 +708,44 @@ static void clock_was_set_work(struct wo
- 
- static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+@@ -696,6 +696,29 @@ static void hrtimer_switch_to_hres(void)
+ 	retrigger_next_event(NULL);
+ }
  
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * RT can not call schedule_work from real interrupt context.
-+ * Need to make a thread to do the real work.
-+ */
-+static struct task_struct *clock_set_delay_thread;
-+static bool do_clock_set_delay;
 +
-+static int run_clock_set_delay(void *ignore)
++static struct swork_event clock_set_delay_work;
++
++static void run_clock_set_delay(struct swork_event *event)
 +{
-+	while (!kthread_should_stop()) {
-+		set_current_state(TASK_INTERRUPTIBLE);
-+		if (do_clock_set_delay) {
-+			do_clock_set_delay = false;
-+			schedule_work(&hrtimer_work);
-+		}
-+		schedule();
-+	}
-+	__set_current_state(TASK_RUNNING);
-+	return 0;
++	clock_was_set();
 +}
 +
 +void clock_was_set_delayed(void)
 +{
-+	do_clock_set_delay = true;
-+	/* Make visible before waking up process */
-+	smp_wmb();
-+	wake_up_process(clock_set_delay_thread);
++	swork_queue(&clock_set_delay_work);
 +}
 +
 +static __init int create_clock_set_delay_thread(void)
 +{
-+	clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd");
-+	BUG_ON(!clock_set_delay_thread);
++	WARN_ON(swork_get());
++	INIT_SWORK(&clock_set_delay_work, run_clock_set_delay);
 +	return 0;
 +}
 +early_initcall(create_clock_set_delay_thread);
 +#else /* PREEMPT_RT_FULL */
- /*
-  * Called from timekeeping and resume code to reprogramm the hrtimer
-  * interrupt device on all cpus.
-@@ -715,6 +754,7 @@ void clock_was_set_delayed(void)
++
+ static void clock_was_set_work(struct work_struct *work)
+ {
+ 	clock_was_set();
+@@ -711,6 +734,7 @@ void clock_was_set_delayed(void)
  {
  	schedule_work(&hrtimer_work);
  }
diff --git a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch b/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
index f0e9194..155611d 100644
--- a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
+++ b/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 23 Dec 2015 20:57:41 +0100
 Subject: hrtimer: enfore 64byte alignment
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The patch "hrtimer: Fixup hrtimer callback changes for preempt-rt" adds
 a list_head expired to struct hrtimer_clock_base and with it we run into
diff --git a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index e08c604..8ab3b1c 100644
--- a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:44:31 -0500
 Subject: hrtimer: Fixup hrtimer callback changes for preempt-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 In preempt-rt we can not call the callbacks which take sleeping locks
 from the timer interrupt context.
@@ -16,10 +16,10 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  include/linux/hrtimer.h  |    7 ++
  kernel/sched/core.c      |    1 
  kernel/sched/rt.c        |    1 
- kernel/time/hrtimer.c    |  137 +++++++++++++++++++++++++++++++++++++++++++----
+ kernel/time/hrtimer.c    |  144 ++++++++++++++++++++++++++++++++++++++++++++---
  kernel/time/tick-sched.c |    1 
  kernel/watchdog.c        |    1 
- 6 files changed, 139 insertions(+), 9 deletions(-)
+ 6 files changed, 146 insertions(+), 9 deletions(-)
 
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
@@ -67,7 +67,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	unsigned int			clock_was_set_seq;
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -306,6 +306,7 @@ static void init_rq_hrtick(struct rq *rq
+@@ -345,6 +345,7 @@ static void init_rq_hrtick(struct rq *rq
  
  	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	rq->hrtick_timer.function = hrtick;
@@ -87,7 +87,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -724,11 +724,8 @@ static inline int hrtimer_is_hres_enable
+@@ -720,11 +720,8 @@ static inline int hrtimer_is_hres_enable
  static inline void hrtimer_switch_to_hres(void) { }
  static inline void
  hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
@@ -101,7 +101,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
  static inline void retrigger_next_event(void *arg) { }
  
-@@ -877,7 +874,7 @@ void hrtimer_wait_for_timer(const struct
+@@ -873,7 +870,7 @@ void hrtimer_wait_for_timer(const struct
  {
  	struct hrtimer_clock_base *base = timer->base;
  
@@ -110,7 +110,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  		wait_event(base->cpu_base->wait,
  				!(hrtimer_callback_running(timer)));
  }
-@@ -927,6 +924,11 @@ static void __remove_hrtimer(struct hrti
+@@ -923,6 +920,11 @@ static void __remove_hrtimer(struct hrti
  	if (!(state & HRTIMER_STATE_ENQUEUED))
  		return;
  
@@ -122,7 +122,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	if (!timerqueue_del(&base->active, &timer->node))
  		cpu_base->active_bases &= ~(1 << base->index);
  
-@@ -1167,6 +1169,7 @@ static void __hrtimer_init(struct hrtime
+@@ -1163,6 +1165,7 @@ static void __hrtimer_init(struct hrtime
  
  	base = hrtimer_clockid_to_base(clock_id);
  	timer->base = &cpu_base->clock_base[base];
@@ -130,7 +130,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	timerqueue_init(&timer->node);
  
  #ifdef CONFIG_TIMER_STATS
-@@ -1207,6 +1210,7 @@ bool hrtimer_active(const struct hrtimer
+@@ -1203,6 +1206,7 @@ bool hrtimer_active(const struct hrtimer
  		seq = raw_read_seqcount_begin(&cpu_base->seq);
  
  		if (timer->state != HRTIMER_STATE_INACTIVE ||
@@ -138,7 +138,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  		    cpu_base->running == timer)
  			return true;
  
-@@ -1305,12 +1309,112 @@ static void __run_hrtimer(struct hrtimer
+@@ -1301,12 +1305,112 @@ static void __run_hrtimer(struct hrtimer
  	cpu_base->running = NULL;
  }
  
@@ -251,7 +251,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  
  	for (; active; base++, active >>= 1) {
  		struct timerqueue_node *node;
-@@ -1350,9 +1454,14 @@ static void __hrtimer_run_queues(struct
+@@ -1346,9 +1450,14 @@ static void __hrtimer_run_queues(struct
  			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
  				break;
  
@@ -267,7 +267,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  }
  
  #ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1494,8 +1603,6 @@ void hrtimer_run_queues(void)
+@@ -1490,8 +1599,6 @@ void hrtimer_run_queues(void)
  	now = hrtimer_update_base(cpu_base);
  	__hrtimer_run_queues(cpu_base, now);
  	raw_spin_unlock(&cpu_base->lock);
@@ -276,7 +276,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  }
  
  /*
-@@ -1517,6 +1624,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1513,6 +1620,7 @@ static enum hrtimer_restart hrtimer_wake
  void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
  {
  	sl->timer.function = hrtimer_wakeup;
@@ -284,7 +284,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	sl->task = task;
  }
  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1651,6 +1759,7 @@ static void init_hrtimers_cpu(int cpu)
+@@ -1647,6 +1755,7 @@ int hrtimers_prepare_cpu(unsigned int cp
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  		cpu_base->clock_base[i].cpu_base = cpu_base;
  		timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -292,38 +292,43 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	}
  
  	cpu_base->cpu = cpu;
-@@ -1755,11 +1864,21 @@ static struct notifier_block hrtimers_nb
- 	.notifier_call = hrtimer_cpu_notify,
- };
+@@ -1723,9 +1832,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ 
+ #endif /* CONFIG_HOTPLUG_CPU */
  
 +#ifdef CONFIG_PREEMPT_RT_BASE
++
 +static void run_hrtimer_softirq(struct softirq_action *h)
 +{
 +	hrtimer_rt_run_pending();
 +}
++
++static void hrtimers_open_softirq(void)
++{
++	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
++}
++
++#else
++static void hrtimers_open_softirq(void) { }
 +#endif
 +
  void __init hrtimers_init(void)
  {
- 	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
- 			  (void *)(long)smp_processor_id());
- 	register_cpu_notifier(&hrtimers_nb);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-+#endif
+ 	hrtimers_prepare_cpu(smp_processor_id());
++	hrtimers_open_softirq();
  }
  
  /**
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -1213,6 +1213,7 @@ void tick_setup_sched_timer(void)
+@@ -1195,6 +1195,7 @@ void tick_setup_sched_timer(void)
  	 * Emulate tick processing via per-CPU hrtimers:
  	 */
  	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 +	ts->sched_timer.irqsafe = 1;
  	ts->sched_timer.function = tick_sched_timer;
  
- 	/* Get the next period (per cpu) */
+ 	/* Get the next period (per-CPU) */
 --- a/kernel/watchdog.c
 +++ b/kernel/watchdog.c
 @@ -523,6 +523,7 @@ static void watchdog_enable(unsigned int
diff --git a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
index e04ff81..5e9a9ec 100644
--- a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
+++ b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:34 -0500
 Subject: hrtimers: Prepare full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Make cancellation of a running callback in softirq context safe
 against preemption.
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -860,6 +860,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -856,6 +856,32 @@ u64 hrtimer_forward(struct hrtimer *time
  }
  EXPORT_SYMBOL_GPL(hrtimer_forward);
  
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * enqueue_hrtimer - internal function to (re)start a timer
   *
-@@ -1077,7 +1103,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1073,7 +1099,7 @@ int hrtimer_cancel(struct hrtimer *timer
  
  		if (ret >= 0)
  			return ret;
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1468,6 +1494,8 @@ void hrtimer_run_queues(void)
+@@ -1464,6 +1490,8 @@ void hrtimer_run_queues(void)
  	now = hrtimer_update_base(cpu_base);
  	__hrtimer_run_queues(cpu_base, now);
  	raw_spin_unlock(&cpu_base->lock);
@@ -104,16 +104,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -1627,6 +1655,9 @@ static void init_hrtimers_cpu(int cpu)
+@@ -1623,6 +1651,9 @@ int hrtimers_prepare_cpu(unsigned int cp
  
  	cpu_base->cpu = cpu;
  	hrtimer_init_hres(cpu_base);
 +#ifdef CONFIG_PREEMPT_RT_BASE
 +	init_waitqueue_head(&cpu_base->wait);
 +#endif
+ 	return 0;
  }
  
- #ifdef CONFIG_HOTPLUG_CPU
 --- a/kernel/time/itimer.c
 +++ b/kernel/time/itimer.c
 @@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
diff --git a/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch b/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch
index 852ac07..e7f4e5f 100644
--- a/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch
+++ b/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <bitbucket at online.de>
 Date: Fri, 30 Aug 2013 07:57:25 +0200
 Subject: hwlat-detector: Don't ignore threshold module parameter
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 If the user specified a threshold at module load time, use it.
 
diff --git a/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch b/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
index b0e311d..fbbb93f 100644
--- a/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
+++ b/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Mon, 19 Aug 2013 17:33:25 -0400
 Subject: hwlat-detector: Update hwlat_detector to add outer loop detection
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The hwlat_detector reads two timestamps in a row, then reports any
 gap between those calls. The problem is, it misses everything between
diff --git a/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch b/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch
index ba42648..9fcaf32 100644
--- a/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch
+++ b/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Mon, 19 Aug 2013 17:33:27 -0400
 Subject: hwlat-detector: Use thread instead of stop machine
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 There's no reason to use stop machine to search for hardware latency.
 Simply disabling interrupts while running the loop will do enough to
diff --git a/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch b/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch
index 86a27aa..06fb296 100644
--- a/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch
+++ b/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Mon, 19 Aug 2013 17:33:26 -0400
 Subject: hwlat-detector: Use trace_clock_local if available
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 As ktime_get() calls into the timing code which does a read_seq(), it
 may be affected by other CPUS that touch that lock. To remove this
diff --git a/debian/patches/features/all/rt/hwlatdetect.patch b/debian/patches/features/all/rt/hwlatdetect.patch
index 030cd4e..e1a8767 100644
--- a/debian/patches/features/all/rt/hwlatdetect.patch
+++ b/debian/patches/features/all/rt/hwlatdetect.patch
@@ -1,7 +1,7 @@
 Subject: hwlatdetect.patch
 From: Carsten Emde <C.Emde at osadl.org>
 Date: Tue, 19 Jul 2011 13:53:12 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Jon Masters developed this wonderful SMI detector. For details please
 consult Documentation/hwlat_detector.txt. It could be ported to Linux
@@ -123,7 +123,7 @@ Signed-off-by: Carsten Emde <C.Emde at osadl.org>
  	depends on PCI
 --- a/drivers/misc/Makefile
 +++ b/drivers/misc/Makefile
-@@ -39,6 +39,7 @@ obj-$(CONFIG_C2PORT)		+= c2port/
+@@ -38,6 +38,7 @@ obj-$(CONFIG_C2PORT)		+= c2port/
  obj-$(CONFIG_HMC6352)		+= hmc6352.o
  obj-y				+= eeprom/
  obj-y				+= cb710/
diff --git a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index 49672be..3f30f97 100644
--- a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -1,7 +1,7 @@
 From: Clark Williams <williams at redhat.com>
 Date: Tue, 26 May 2015 10:43:43 -0500
 Subject: i915: bogus warning from i915 when running on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The i915 driver has a 'WARN_ON(!in_interrupt())' in the display
 handler, which whines constanly on the RT kernel (since the interrupt
@@ -19,9 +19,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/gpu/drm/i915/intel_display.c
 +++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -11475,7 +11475,7 @@ void intel_check_page_flip(struct drm_de
+@@ -11613,7 +11613,7 @@ void intel_check_page_flip(struct drm_i9
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- 	struct intel_unpin_work *work;
+ 	struct intel_flip_work *work;
  
 -	WARN_ON(!in_interrupt());
 +	WARN_ON_NONRT(!in_interrupt());
diff --git a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
index 6d240b8..074c5de 100644
--- a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
+++ b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:16 -0500
 Subject: ide: Do not disable interrupts for PREEMPT-RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use the local_irq_*_nort variants.
 
diff --git a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch
index 173cbbb..9c911c8 100644
--- a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch
+++ b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: idr: Use local lock instead of preempt enable/disable
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 We need to protect the per cpu variable and prevent migration.
 
diff --git a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
index 6aacb3f..2c63120 100644
--- a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
+++ b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
@@ -1,7 +1,7 @@
 From: Sven-Thorsten Dietrich <sdietrich at novell.com>
 Date: Fri, 3 Jul 2009 08:30:35 -0500
 Subject: infiniband: Mellanox IB driver patch use _nort() primitives
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT
 Kernel.
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-@@ -883,7 +883,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -897,7 +897,7 @@ void ipoib_mcast_restart_task(struct wor
  
  	ipoib_dbg_mcast(priv, "restarting multicast task\n");
  
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	netif_addr_lock(dev);
  	spin_lock(&priv->lock);
  
-@@ -965,7 +965,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -979,7 +979,7 @@ void ipoib_mcast_restart_task(struct wor
  
  	spin_unlock(&priv->lock);
  	netif_addr_unlock(dev);
diff --git a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
index a827cbc..6e7a957 100644
--- a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
+++ b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:16 -0500
 Subject: input: gameport: Do not disable interrupts on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use the _nort() primitives.
 
diff --git a/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch b/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch
index a6a662d..63d9ca9 100644
--- a/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch
+++ b/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch
@@ -1,7 +1,7 @@
 Subject: Intrduce migrate_disable() + cpu_light()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 17 Jun 2011 15:42:38 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Introduce migrate_disable(). The task can't be pushed to another CPU but can
 be preempted.
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/cpu.h
 +++ b/include/linux/cpu.h
-@@ -221,6 +221,9 @@ static inline void cpu_notifier_register
+@@ -192,6 +192,9 @@ static inline void cpu_notifier_register
  #endif /* CONFIG_SMP */
  extern struct bus_type cpu_subsys;
  
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_PREEMPT_NOTIFIERS
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1429,6 +1429,12 @@ struct task_struct {
+@@ -1495,6 +1495,12 @@ struct task_struct {
  #endif
  
  	unsigned int policy;
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	int nr_cpus_allowed;
  	cpumask_t cpus_allowed;
  
-@@ -1875,14 +1881,6 @@ extern int arch_task_struct_size __read_
+@@ -1946,14 +1952,6 @@ extern int arch_task_struct_size __read_
  # define arch_task_struct_size (sizeof(struct task_struct))
  #endif
  
@@ -105,7 +105,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define TNF_MIGRATED	0x01
  #define TNF_NO_GROUP	0x02
  #define TNF_SHARED	0x04
-@@ -3164,6 +3162,31 @@ static inline void set_task_cpu(struct t
+@@ -3394,6 +3392,31 @@ static inline void set_task_cpu(struct t
  
  #endif /* CONFIG_SMP */
  
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * boot command line:
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1051,6 +1051,11 @@ void do_set_cpus_allowed(struct task_str
+@@ -1089,6 +1089,11 @@ void do_set_cpus_allowed(struct task_str
  
  	lockdep_assert_held(&p->pi_lock);
  
@@ -163,16 +163,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	queued = task_on_rq_queued(p);
  	running = task_current(rq, p);
  
-@@ -1112,7 +1117,7 @@ static int __set_cpus_allowed_ptr(struct
- 	do_set_cpus_allowed(p, new_mask);
+@@ -1168,7 +1173,7 @@ static int __set_cpus_allowed_ptr(struct
+ 	}
  
  	/* Can the task run on the task's current CPU? If so, we're done */
 -	if (cpumask_test_cpu(task_cpu(p), new_mask))
 +	if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
  		goto out;
  
- 	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -3061,6 +3066,69 @@ static inline void schedule_debug(struct
+ 	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+@@ -3237,6 +3242,69 @@ static inline void schedule_debug(struct
  	schedstat_inc(this_rq(), sched_count);
  }
  
@@ -244,7 +244,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   */
 --- a/kernel/sched/debug.c
 +++ b/kernel/sched/debug.c
-@@ -559,6 +559,9 @@ void print_rt_rq(struct seq_file *m, int
+@@ -552,6 +552,9 @@ void print_rt_rq(struct seq_file *m, int
  	P(rt_throttled);
  	PN(rt_time);
  	PN(rt_runtime);
@@ -254,7 +254,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #undef PN
  #undef P
-@@ -954,6 +957,10 @@ void proc_sched_show_task(struct task_st
+@@ -947,6 +950,10 @@ void proc_sched_show_task(struct task_st
  #endif
  	P(policy);
  	P(prio);
diff --git a/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch b/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
index 09b7841..21b0937 100644
--- a/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
+++ b/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
@@ -1,7 +1,7 @@
 Subject: iommu/amd: Use WARN_ON_NORT in __attach_device()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 27 Feb 2016 10:22:23 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 RT does not disable interrupts here, but the protection is still
 correct. Fixup the WARN_ON so it won't yell on RT.
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/iommu/amd_iommu.c
 +++ b/drivers/iommu/amd_iommu.c
-@@ -2165,10 +2165,10 @@ static int __attach_device(struct iommu_
+@@ -1832,10 +1832,10 @@ static int __attach_device(struct iommu_
  	int ret;
  
  	/*
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* lock domain */
  	spin_lock(&domain->lock);
-@@ -2331,10 +2331,10 @@ static void __detach_device(struct iommu
+@@ -2003,10 +2003,10 @@ static void __detach_device(struct iommu
  	struct protection_domain *domain;
  
  	/*
diff --git a/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch b/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
new file mode 100644
index 0000000..72fd2ae
--- /dev/null
+++ b/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
@@ -0,0 +1,82 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Thu, 15 Sep 2016 16:58:19 +0200
+Subject: [PATCH] iommu/iova: don't disable preempt around this_cpu_ptr()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+Commit 583248e6620a ("iommu/iova: Disable preemption around use of
+this_cpu_ptr()") disables preemption while accessing a per-CPU variable.
+This does keep lockdep quiet. However I don't see the point why it is
+bad if we get migrated after its access to another CPU.
+__iova_rcache_insert() and __iova_rcache_get() immediately locks the
+variable after obtaining it - before accessing its members.
+_If_ we get migrated away after retrieving the address of cpu_rcache
+before taking the lock then the *other* task on the same CPU will
+retrieve the same address of cpu_rcache and will spin on the lock.
+
+alloc_iova_fast() disables preemption while invoking
+free_cpu_cached_iovas() on each CPU. The function itself uses
+per_cpu_ptr() which does not trigger a warning (like this_cpu_ptr()
+does) because it assumes the caller knows what he does because he might
+access the data structure from a different CPU (which means he needs
+protection against concurrent access).
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ drivers/iommu/iova.c |    9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -22,6 +22,7 @@
+ #include <linux/slab.h>
+ #include <linux/smp.h>
+ #include <linux/bitops.h>
++#include <linux/cpu.h>
+ 
+ static bool iova_rcache_insert(struct iova_domain *iovad,
+ 			       unsigned long pfn,
+@@ -420,10 +421,8 @@ alloc_iova_fast(struct iova_domain *iova
+ 
+ 		/* Try replenishing IOVAs by flushing rcache. */
+ 		flushed_rcache = true;
+-		preempt_disable();
+ 		for_each_online_cpu(cpu)
+ 			free_cpu_cached_iovas(cpu, iovad);
+-		preempt_enable();
+ 		goto retry;
+ 	}
+ 
+@@ -751,7 +750,7 @@ static bool __iova_rcache_insert(struct
+ 	bool can_insert = false;
+ 	unsigned long flags;
+ 
+-	cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
++	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
+ 	spin_lock_irqsave(&cpu_rcache->lock, flags);
+ 
+ 	if (!iova_magazine_full(cpu_rcache->loaded)) {
+@@ -781,7 +780,6 @@ static bool __iova_rcache_insert(struct
+ 		iova_magazine_push(cpu_rcache->loaded, iova_pfn);
+ 
+ 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+-	put_cpu_ptr(rcache->cpu_rcaches);
+ 
+ 	if (mag_to_free) {
+ 		iova_magazine_free_pfns(mag_to_free, iovad);
+@@ -815,7 +813,7 @@ static unsigned long __iova_rcache_get(s
+ 	bool has_pfn = false;
+ 	unsigned long flags;
+ 
+-	cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
++	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
+ 	spin_lock_irqsave(&cpu_rcache->lock, flags);
+ 
+ 	if (!iova_magazine_empty(cpu_rcache->loaded)) {
+@@ -837,7 +835,6 @@ static unsigned long __iova_rcache_get(s
+ 		iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
+ 
+ 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+-	put_cpu_ptr(rcache->cpu_rcaches);
+ 
+ 	return iova_pfn;
+ }
diff --git a/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch b/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
new file mode 100644
index 0000000..c125cac
--- /dev/null
+++ b/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
@@ -0,0 +1,59 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Thu, 15 Sep 2016 17:16:44 +0200
+Subject: [PATCH] iommu/vt-d: don't disable preemption while accessing
+ deferred_flush()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+get_cpu() disables preemption and returns the current CPU number. The
+CPU number is later only used once while retrieving the address of the
+local's CPU deferred_flush pointer.
+We can instead use raw_cpu_ptr() while we remain preemptible. The worst
+thing that can happen is that flush_unmaps_timeout() is invoked multiple
+times: once by taskA after seeing HIGH_WATER_MARK and then preempted to
+another CPU and then by taskB which saw HIGH_WATER_MARK on the same CPU
+as taskA. It is also likely that ->size got from HIGH_WATER_MARK to 0
+right after its read because another CPU invoked flush_unmaps_timeout()
+for this CPU.
+The access to flush_data is protected by a spinlock so even if we get
+migrated to another CPU or preempted - the data structure is protected.
+
+While at it, I marked deferred_flush static since I can't find a
+reference to it outside of this file.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ drivers/iommu/intel-iommu.c |    8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -479,7 +479,7 @@ struct deferred_flush_data {
+ 	struct deferred_flush_table *tables;
+ };
+ 
+-DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
++static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
+ 
+ /* bitmap for indexing intel_iommus */
+ static int g_num_of_iommus;
+@@ -3626,10 +3626,8 @@ static void add_unmap(struct dmar_domain
+ 	struct intel_iommu *iommu;
+ 	struct deferred_flush_entry *entry;
+ 	struct deferred_flush_data *flush_data;
+-	unsigned int cpuid;
+ 
+-	cpuid = get_cpu();
+-	flush_data = per_cpu_ptr(&deferred_flush, cpuid);
++	flush_data = raw_cpu_ptr(&deferred_flush);
+ 
+ 	/* Flush all CPUs' entries to avoid deferring too much.  If
+ 	 * this becomes a bottleneck, can just flush us, and rely on
+@@ -3662,8 +3660,6 @@ static void add_unmap(struct dmar_domain
+ 	}
+ 	flush_data->size++;
+ 	spin_unlock_irqrestore(&flush_data->lock, flags);
+-
+-	put_cpu();
+ }
+ 
+ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
diff --git a/debian/patches/features/all/rt/ipc-msg-Implement-lockless-pipelined-wakeups.patch b/debian/patches/features/all/rt/ipc-msg-Implement-lockless-pipelined-wakeups.patch
index 3b66585..c23b845 100644
--- a/debian/patches/features/all/rt/ipc-msg-Implement-lockless-pipelined-wakeups.patch
+++ b/debian/patches/features/all/rt/ipc-msg-Implement-lockless-pipelined-wakeups.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 30 Oct 2015 11:59:07 +0100
 Subject: ipc/msg: Implement lockless pipelined wakeups
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 This patch moves the wakeup_process() invocation so it is not done under
 the perm->lock by making use of a lockless wake_q. With this change, the
diff --git a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch
index 8e085a5..1b9efd2 100644
--- a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch
+++ b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch
@@ -1,7 +1,7 @@
 Subject: ipc/sem: Rework semaphore wakeups
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 14 Sep 2011 11:57:04 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Current sysv sems have a weird ass wakeup scheme that involves keeping
 preemption disabled over a potential O(n^2) loop and busy waiting on
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/ipc/sem.c
 +++ b/ipc/sem.c
-@@ -697,6 +697,13 @@ static int perform_atomic_semop(struct s
+@@ -686,6 +686,13 @@ static int perform_atomic_semop(struct s
  static void wake_up_sem_queue_prepare(struct list_head *pt,
  				struct sem_queue *q, int error)
  {
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (list_empty(pt)) {
  		/*
  		 * Hold preempt off so that we don't get preempted and have the
-@@ -708,6 +715,7 @@ static void wake_up_sem_queue_prepare(st
+@@ -697,6 +704,7 @@ static void wake_up_sem_queue_prepare(st
  	q->pid = error;
  
  	list_add_tail(&q->list, pt);
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -721,6 +729,7 @@ static void wake_up_sem_queue_prepare(st
+@@ -710,6 +718,7 @@ static void wake_up_sem_queue_prepare(st
   */
  static void wake_up_sem_queue_do(struct list_head *pt)
  {
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	struct sem_queue *q, *t;
  	int did_something;
  
-@@ -733,6 +742,7 @@ static void wake_up_sem_queue_do(struct
+@@ -722,6 +731,7 @@ static void wake_up_sem_queue_do(struct
  	}
  	if (did_something)
  		preempt_enable();
diff --git a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index 597f25c..a9e8816 100644
--- a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -1,7 +1,7 @@
 Subject: genirq: Allow disabling of softirq processing in irq thread context
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 31 Jan 2012 13:01:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The processing of softirqs in irq thread context is a performance gain
 for the non-rt workloads of a system, but it's counterproductive for
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -938,7 +938,15 @@ irq_forced_thread_fn(struct irq_desc *de
+@@ -881,7 +881,15 @@ irq_forced_thread_fn(struct irq_desc *de
  	local_bh_disable();
  	ret = action->thread_fn(action->irq, action->dev_id);
  	irq_finalize_oneshot(desc, action);
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return ret;
  }
  
-@@ -1388,6 +1396,9 @@ static int
+@@ -1338,6 +1346,9 @@ static int
  			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  		}
  
diff --git a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch b/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
index c796fa5..b4450dd 100644
--- a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
+++ b/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
@@ -1,7 +1,7 @@
 Subject: irqwork: Move irq safe work to irq context
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 15 Nov 2015 18:40:17 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On architectures where arch_irq_work_has_interrupt() returns false, we
 end up running the irq safe work from the softirq context. That
@@ -56,7 +56,7 @@ Cc: stable-rt at vger.kernel.org
   * Synchronize against the irq_work @entry, ensures the entry is not
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -1484,7 +1484,7 @@ void update_process_times(int user_tick)
+@@ -1630,7 +1630,7 @@ void update_process_times(int user_tick)
  	scheduler_tick();
  	run_local_timers();
  	rcu_check_callbacks(user_tick);
@@ -65,14 +65,14 @@ Cc: stable-rt at vger.kernel.org
  	if (in_irq())
  		irq_work_tick();
  #endif
-@@ -1498,9 +1498,7 @@ static void run_timer_softirq(struct sof
+@@ -1670,9 +1670,7 @@ static void run_timer_softirq(struct sof
  {
- 	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+ 	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
  
 -#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
 -	irq_work_tick();
 -#endif
 +	irq_work_tick_soft();
  
- 	if (time_after_eq(jiffies, base->timer_jiffies))
- 		__run_timers(base);
+ 	__run_timers(base);
+ 	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
diff --git a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch b/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
index 9356ec1..01e0820 100644
--- a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
+++ b/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
@@ -1,7 +1,7 @@
 Subject: irqwork: push most work into softirq context
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 23 Jun 2015 15:32:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Initially we defered all irqwork into softirq because we didn't want the
 latency spikes if perf or another user was busy and delayed the RT task.
@@ -164,7 +164,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -1484,7 +1484,7 @@ void update_process_times(int user_tick)
+@@ -1630,7 +1630,7 @@ void update_process_times(int user_tick)
  	scheduler_tick();
  	run_local_timers();
  	rcu_check_callbacks(user_tick);
@@ -173,14 +173,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (in_irq())
  		irq_work_tick();
  #endif
-@@ -1498,6 +1498,10 @@ static void run_timer_softirq(struct sof
+@@ -1670,6 +1670,10 @@ static void run_timer_softirq(struct sof
  {
- 	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+ 	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
  
 +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
 +	irq_work_tick();
 +#endif
 +
- 	if (time_after_eq(jiffies, base->timer_jiffies))
- 		__run_timers(base);
- }
+ 	__run_timers(base);
+ 	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+ 		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
diff --git a/debian/patches/features/all/rt/jbd2-Fix-lockdep-annotation-in-add_transaction_credi.patch b/debian/patches/features/all/rt/jbd2-Fix-lockdep-annotation-in-add_transaction_credi.patch
new file mode 100644
index 0000000..5778c08
--- /dev/null
+++ b/debian/patches/features/all/rt/jbd2-Fix-lockdep-annotation-in-add_transaction_credi.patch
@@ -0,0 +1,65 @@
+From: Jan Kara <jack at suse.cz>
+Date: Mon, 19 Sep 2016 14:30:43 +0200
+Subject: [PATCH] jbd2: Fix lockdep annotation in add_transaction_credits()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+Thomas has reported a lockdep splat hitting in
+add_transaction_credits(). The problem is that that function calls
+jbd2_might_wait_for_commit() while holding j_state_lock which is wrong
+(we do not really wait for transaction commit while holding that lock).
+
+Fix the problem by moving jbd2_might_wait_for_commit() into places where
+we are ready to wait for transaction commit and thus j_state_lock is
+unlocked.
+
+Fixes: 1eaa566d368b214d99cbb973647c1b0b8102a9ae
+Reported-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Jan Kara <jack at suse.cz>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ fs/jbd2/transaction.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -159,6 +159,7 @@ static void wait_transaction_locked(jour
+ 	read_unlock(&journal->j_state_lock);
+ 	if (need_to_start)
+ 		jbd2_log_start_commit(journal, tid);
++	jbd2_might_wait_for_commit(journal);
+ 	schedule();
+ 	finish_wait(&journal->j_wait_transaction_locked, &wait);
+ }
+@@ -182,8 +183,6 @@ static int add_transaction_credits(journ
+ 	int needed;
+ 	int total = blocks + rsv_blocks;
+ 
+-	jbd2_might_wait_for_commit(journal);
+-
+ 	/*
+ 	 * If the current transaction is locked down for commit, wait
+ 	 * for the lock to be released.
+@@ -214,6 +213,7 @@ static int add_transaction_credits(journ
+ 		if (atomic_read(&journal->j_reserved_credits) + total >
+ 		    journal->j_max_transaction_buffers) {
+ 			read_unlock(&journal->j_state_lock);
++			jbd2_might_wait_for_commit(journal);
+ 			wait_event(journal->j_wait_reserved,
+ 				   atomic_read(&journal->j_reserved_credits) + total <=
+ 				   journal->j_max_transaction_buffers);
+@@ -238,6 +238,7 @@ static int add_transaction_credits(journ
+ 	if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
+ 		atomic_sub(total, &t->t_outstanding_credits);
+ 		read_unlock(&journal->j_state_lock);
++		jbd2_might_wait_for_commit(journal);
+ 		write_lock(&journal->j_state_lock);
+ 		if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
+ 			__jbd2_log_wait_for_space(journal);
+@@ -255,6 +256,7 @@ static int add_transaction_credits(journ
+ 		sub_reserved_credits(journal, rsv_blocks);
+ 		atomic_sub(total, &t->t_outstanding_credits);
+ 		read_unlock(&journal->j_state_lock);
++		jbd2_might_wait_for_commit(journal);
+ 		wait_event(journal->j_wait_reserved,
+ 			 atomic_read(&journal->j_reserved_credits) + rsv_blocks
+ 			 <= journal->j_max_transaction_buffers / 2);
diff --git a/debian/patches/features/all/rt/jump-label-rt.patch b/debian/patches/features/all/rt/jump-label-rt.patch
index 526783d..dd67054 100644
--- a/debian/patches/features/all/rt/jump-label-rt.patch
+++ b/debian/patches/features/all/rt/jump-label-rt.patch
@@ -1,7 +1,7 @@
 Subject: jump-label: disable if stop_machine() is used
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 08 Jul 2015 17:14:48 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Some architectures are using stop_machine() while switching the opcode which
 leads to latency spikes.
@@ -25,10 +25,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/arm/Kconfig
 +++ b/arch/arm/Kconfig
-@@ -35,7 +35,7 @@ config ARM
- 	select HARDIRQS_SW_RESEND
+@@ -36,7 +36,7 @@ config ARM
  	select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
  	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+ 	select HAVE_ARCH_HARDENED_USERCOPY
 -	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
 +	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
  	select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
diff --git a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
index 0b86d1e..0c26257 100644
--- a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
+++ b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
@@ -1,7 +1,7 @@
 Subject: kconfig: Disable config options which are not RT compatible
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 24 Jul 2011 12:11:43 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Disable stuff which is known to have issues on RT
 
@@ -23,12 +23,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	help
 --- a/mm/Kconfig
 +++ b/mm/Kconfig
-@@ -391,7 +391,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+@@ -410,7 +410,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
  
  config TRANSPARENT_HUGEPAGE
  	bool "Transparent Hugepage Support"
 -	depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
 +	depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
  	select COMPACTION
+ 	select RADIX_TREE_MULTIORDER
  	help
- 	  Transparent Hugepages allows the kernel to use huge pages and
diff --git a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
index f4b7130..a714089 100644
--- a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
+++ b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
@@ -1,7 +1,7 @@
 Subject: kconfig: Add PREEMPT_RT_FULL
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 14:58:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Introduce the final symbol for PREEMPT_RT_FULL.
 
diff --git a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch b/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
index 4edb5db..d3b8e9a 100644
--- a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
+++ b/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 19 Mar 2013 14:44:30 +0100
 Subject: kernel/SRCU: provide a static initializer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 There are macros for static initializer for the three out of four
 possible notifier types, that are:
diff --git a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index edbc95b..58c735d 100644
--- a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 7 Jun 2013 22:37:06 +0200
 Subject: kernel/cpu: fix cpu down problem if kthread's cpu is going down
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 If kthread is pinned to CPUx and CPUx is going down then we get into
 trouble:
@@ -76,9 +76,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return 0;
  }
  
-@@ -991,6 +1001,7 @@ static int takedown_cpu(unsigned int cpu
- 	else
- 		synchronize_rcu();
+@@ -983,6 +993,7 @@ static int takedown_cpu(unsigned int cpu
+ 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ 	int err;
  
 +	__cpu_unplug_wait(cpu);
  	/* Park the smpboot threads */
diff --git a/debian/patches/features/all/rt/kernel-futex-don-t-deboost-too-early.patch b/debian/patches/features/all/rt/kernel-futex-don-t-deboost-too-early.patch
new file mode 100644
index 0000000..6e377ad
--- /dev/null
+++ b/debian/patches/features/all/rt/kernel-futex-don-t-deboost-too-early.patch
@@ -0,0 +1,162 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Thu, 29 Sep 2016 18:49:22 +0200
+Subject: [PATCH] kernel/futex: don't deboost too early
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+The sequence:
+ T1 holds futex
+ T2 blocks on futex and boosts T1
+ T1 unlocks futex and holds hb->lock
+ T1 unlocks rt mutex, so T1 has no more pi waiters
+ T3 blocks on hb->lock and adds itself to the pi waiters list of T1
+ T1 unlocks hb->lock and deboosts itself
+ T4 preempts T1 so the wakeup of T2 gets delayed
+
+As a workaround I attempt here do unlock the hb->lock without a deboost
+and perform the deboost after the wake up of the waiter.
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/spinlock.h    |    6 ++++
+ include/linux/spinlock_rt.h |    2 +
+ kernel/futex.c              |    2 -
+ kernel/locking/rtmutex.c    |   53 ++++++++++++++++++++++++++++++++++++++------
+ 4 files changed, 55 insertions(+), 8 deletions(-)
+
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -355,6 +355,12 @@ static __always_inline void spin_unlock(
+ 	raw_spin_unlock(&lock->rlock);
+ }
+ 
++static __always_inline int spin_unlock_no_deboost(spinlock_t *lock)
++{
++	raw_spin_unlock(&lock->rlock);
++	return 0;
++}
++
+ static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ {
+ 	raw_spin_unlock_bh(&lock->rlock);
+--- a/include/linux/spinlock_rt.h
++++ b/include/linux/spinlock_rt.h
+@@ -26,6 +26,7 @@ extern void __lockfunc rt_spin_lock(spin
+ extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
+ extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
+ extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
++extern int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock);
+ extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
+ extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
+ extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
+@@ -111,6 +112,7 @@ static inline unsigned long spin_lock_tr
+ #define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
+ 
+ #define spin_unlock(lock)			rt_spin_unlock(lock)
++#define spin_unlock_no_deboost(lock)		rt_spin_unlock_no_deboost(lock)
+ 
+ #define spin_unlock_bh(lock)				\
+ 	do {						\
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1368,7 +1368,7 @@ static int wake_futex_pi(u32 __user *uad
+ 	 * deboost first (and lose our higher priority), then the task might get
+ 	 * scheduled away before the wake up can take place.
+ 	 */
+-	spin_unlock(&hb->lock);
++	deboost |= spin_unlock_no_deboost(&hb->lock);
+ 	wake_up_q(&wake_q);
+ 	wake_up_q_sleeper(&wake_sleeper_q);
+ 	if (deboost)
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -933,13 +933,14 @@ static inline void rt_spin_lock_fastlock
+ 		slowfn(lock);
+ }
+ 
+-static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
+-					   void  (*slowfn)(struct rt_mutex *lock))
++static inline int rt_spin_lock_fastunlock(struct rt_mutex *lock,
++					  int (*slowfn)(struct rt_mutex *lock))
+ {
+-	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
++	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+ 		rt_mutex_deadlock_account_unlock(current);
+-	else
+-		slowfn(lock);
++		return 0;
++	}
++	return slowfn(lock);
+ }
+ #ifdef CONFIG_SMP
+ /*
+@@ -1074,7 +1075,7 @@ static void mark_wakeup_next_waiter(stru
+ /*
+  * Slow path to release a rt_mutex spin_lock style
+  */
+-static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
++static int noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+ {
+ 	unsigned long flags;
+ 	WAKE_Q(wake_q);
+@@ -1089,7 +1090,7 @@ static void  noinline __sched rt_spin_lo
+ 	if (!rt_mutex_has_waiters(lock)) {
+ 		lock->owner = NULL;
+ 		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+-		return;
++		return 0;
+ 	}
+ 
+ 	mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
+@@ -1100,6 +1101,33 @@ static void  noinline __sched rt_spin_lo
+ 
+ 	/* Undo pi boosting.when necessary */
+ 	rt_mutex_adjust_prio(current);
++	return 0;
++}
++
++static int noinline __sched rt_spin_lock_slowunlock_no_deboost(struct rt_mutex *lock)
++{
++	unsigned long flags;
++	WAKE_Q(wake_q);
++	WAKE_Q(wake_sleeper_q);
++
++	raw_spin_lock_irqsave(&lock->wait_lock, flags);
++
++	debug_rt_mutex_unlock(lock);
++
++	rt_mutex_deadlock_account_unlock(current);
++
++	if (!rt_mutex_has_waiters(lock)) {
++		lock->owner = NULL;
++		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++		return 0;
++	}
++
++	mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
++
++	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++	wake_up_q(&wake_q);
++	wake_up_q_sleeper(&wake_sleeper_q);
++	return 1;
+ }
+ 
+ void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
+@@ -1157,6 +1185,17 @@ void __lockfunc rt_spin_unlock(spinlock_
+ }
+ EXPORT_SYMBOL(rt_spin_unlock);
+ 
++int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock)
++{
++	int ret;
++
++	/* NOTE: we always pass in '1' for nested, for simplicity */
++	spin_release(&lock->dep_map, 1, _RET_IP_);
++	ret = rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_no_deboost);
++	migrate_enable();
++	return ret;
++}
++
+ void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
+ {
+ 	rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
diff --git a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index 1286490..89b376d 100644
--- a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 14 Jun 2013 17:16:35 +0200
 Subject: kernel/hotplug: restore original cpu mask oncpu/down
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 If a task which is allowed to run only on CPU X puts CPU Y down then it
 will be allowed on all CPUs but the on CPU Y after it comes back from
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -1096,6 +1096,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1088,6 +1088,7 @@ static int __ref _cpu_down(unsigned int
  	bool hasdied = false;
  	int mycpu;
  	cpumask_var_t cpumask;
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
-@@ -1106,6 +1107,12 @@ static int __ref _cpu_down(unsigned int
+@@ -1098,6 +1099,12 @@ static int __ref _cpu_down(unsigned int
  	/* Move the downtaker off the unplug cpu */
  	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
  		return -ENOMEM;
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
  	set_cpus_allowed_ptr(current, cpumask);
  	free_cpumask_var(cpumask);
-@@ -1114,7 +1121,8 @@ static int __ref _cpu_down(unsigned int
+@@ -1106,7 +1113,8 @@ static int __ref _cpu_down(unsigned int
  	if (mycpu == cpu) {
  		printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
  		migrate_enable();
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  
  	cpu_hotplug_begin();
-@@ -1168,6 +1176,9 @@ static int __ref _cpu_down(unsigned int
+@@ -1160,6 +1168,9 @@ static int __ref _cpu_down(unsigned int
  	/* This post dead nonsense must die */
  	if (!ret && hasdied)
  		cpu_notify_nofail(CPU_POST_DEAD, cpu);
diff --git a/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch b/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
index 02f4767..405eff4 100644
--- a/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
+++ b/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 9 Feb 2016 18:18:01 +0100
 Subject: kernel: migrate_disable() do fastpath in atomic &
  irqs-off
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 With interrupts off it makes no sense to do the long path since we can't
 leave the CPU anyway. Also we might end up in a recursion with lockdep.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3117,7 +3117,7 @@ void migrate_disable(void)
+@@ -3293,7 +3293,7 @@ void migrate_disable(void)
  {
  	struct task_struct *p = current;
  
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #ifdef CONFIG_SCHED_DEBUG
  		p->migrate_disable_atomic++;
  #endif
-@@ -3144,7 +3144,7 @@ void migrate_enable(void)
+@@ -3320,7 +3320,7 @@ void migrate_enable(void)
  {
  	struct task_struct *p = current;
  
diff --git a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index 4ba5309..d6a524c 100644
--- a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 4 Feb 2016 16:38:10 +0100
 Subject: [PATCH] kernel/perf: mark perf_cpu_context's timer as irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Otherwise we get a WARN_ON() backtrace and some events are reported as
 "not counted".
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
-@@ -963,6 +963,7 @@ static void __perf_mux_hrtimer_init(stru
+@@ -1042,6 +1042,7 @@ static void __perf_mux_hrtimer_init(stru
  	raw_spin_lock_init(&cpuctx->hrtimer_lock);
  	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
  	timer->function = perf_mux_hrtimer_handler;
diff --git a/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
index 2eda1c5..1fe5340 100644
--- a/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
+++ b/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 19 May 2016 17:45:27 +0200
 Subject: [PATCH] kernel/printk: Don't try to print from IRQ/NMI region
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On -RT we try to acquire sleeping locks which might lead to warnings
 from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -1502,6 +1502,11 @@ static void call_console_drivers(int lev
+@@ -1631,6 +1631,11 @@ static void call_console_drivers(int lev
  	if (!console_drivers)
  		return;
  
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	migrate_disable();
  	for_each_console(con) {
  		if (exclusive_console && con != exclusive_console)
-@@ -2434,6 +2439,11 @@ void console_unblank(void)
+@@ -2565,6 +2570,11 @@ void console_unblank(void)
  {
  	struct console *c;
  
diff --git a/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch b/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
index bedb52f..beffb44 100644
--- a/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
+++ b/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 9 Feb 2016 18:17:18 +0100
 Subject: kernel: softirq: unlock with irqs on
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 We unlock the lock while the interrupts are off. This isn't a problem
 now but will get because the migrate_disable() + enable are not
diff --git a/debian/patches/features/all/rt/kgb-serial-hackaround.patch b/debian/patches/features/all/rt/kgb-serial-hackaround.patch
index e8eb5c0..812da0c 100644
--- a/debian/patches/features/all/rt/kgb-serial-hackaround.patch
+++ b/debian/patches/features/all/rt/kgb-serial-hackaround.patch
@@ -1,7 +1,7 @@
 From: Jason Wessel <jason.wessel at windriver.com>
 Date: Thu, 28 Jul 2011 12:42:23 -0500
 Subject: kgdb/serial: Short term workaround
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On 07/27/2011 04:37 PM, Thomas Gleixner wrote:
 >  - KGDB (not yet disabled) is reportedly unusable on -rt right now due
@@ -34,7 +34,7 @@ Jason.
  #include <linux/uaccess.h>
  #include <linux/pm_runtime.h>
  #include <linux/timer.h>
-@@ -3094,6 +3095,8 @@ void serial8250_console_write(struct uar
+@@ -3112,6 +3113,8 @@ void serial8250_console_write(struct uar
  
  	if (port->sysrq || oops_in_progress)
  		locked = 0;
diff --git a/debian/patches/features/all/rt/latency-hist.patch b/debian/patches/features/all/rt/latency-hist.patch
index 5ae254e..33a3a13 100644
--- a/debian/patches/features/all/rt/latency-hist.patch
+++ b/debian/patches/features/all/rt/latency-hist.patch
@@ -1,7 +1,7 @@
 Subject: tracing: Add latency histograms
 From: Carsten Emde <C.Emde at osadl.org>
 Date: Tue, 19 Jul 2011 14:03:41 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 This patch provides a recording mechanism to store data of potential
 sources of system latencies. The recordings separately determine the
@@ -237,7 +237,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	int				start_pid;
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1821,6 +1821,12 @@ struct task_struct {
+@@ -1892,6 +1892,12 @@ struct task_struct {
  	/* bitmask and counter of trace recursion */
  	unsigned long trace_recursion;
  #endif /* CONFIG_TRACING */
@@ -368,7 +368,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #include "tick-internal.h"
  
-@@ -995,7 +996,16 @@ void hrtimer_start_range_ns(struct hrtim
+@@ -991,7 +992,16 @@ void hrtimer_start_range_ns(struct hrtim
  	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
  
  	timer_stats_hrtimer_set_start_info(timer);
@@ -385,7 +385,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	leftmost = enqueue_hrtimer(timer, new_base);
  	if (!leftmost)
  		goto unlock;
-@@ -1269,6 +1279,8 @@ static void __run_hrtimer(struct hrtimer
+@@ -1265,6 +1275,8 @@ static void __run_hrtimer(struct hrtimer
  	cpu_base->running = NULL;
  }
  
@@ -394,7 +394,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
  {
  	struct hrtimer_clock_base *base = cpu_base->clock_base;
-@@ -1288,6 +1300,15 @@ static void __hrtimer_run_queues(struct
+@@ -1284,6 +1296,15 @@ static void __hrtimer_run_queues(struct
  
  			timer = container_of(node, struct hrtimer, node);
  
@@ -437,7 +437,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  config PREEMPT_TRACER
  	bool "Preemption-off Latency Tracer"
  	default n
-@@ -211,6 +229,24 @@ config PREEMPT_TRACER
+@@ -212,6 +230,24 @@ config PREEMPT_TRACER
  	  enabled. This option and the irqs-off timing option can be
  	  used together or separately.)
  
@@ -462,7 +462,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  config SCHED_TRACER
  	bool "Scheduling Latency Tracer"
  	select GENERIC_TRACER
-@@ -221,6 +257,74 @@ config SCHED_TRACER
+@@ -222,6 +258,74 @@ config SCHED_TRACER
  	  This tracer tracks the latency of the highest priority task
  	  to be scheduled in, starting from the point it has woken up.
  
@@ -539,7 +539,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	depends on !GENERIC_TRACER
 --- a/kernel/trace/Makefile
 +++ b/kernel/trace/Makefile
-@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f
+@@ -41,6 +41,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f
  obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
  obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
  obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
diff --git a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch b/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
index 51148ed..10ae756 100644
--- a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
+++ b/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
@@ -1,7 +1,7 @@
 Subject: latency_hist: Update sched_wakeup probe
 From: Mathieu Desnoyers <mathieu.desnoyers at efficios.com>
 Date: Sun, 25 Oct 2015 18:06:05 -0400
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 "sched: Introduce the 'trace_sched_waking' tracepoint" introduces a
 prototype change for the sched_wakeup probe: the "success" argument is
diff --git a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch b/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
index e7eedd5..e038f18 100644
--- a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
+++ b/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 4 Feb 2016 14:08:06 +0100
 Subject: latencyhist: disable jump-labels
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Atleast on X86 we die a recursive death
 
diff --git a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
index 50d0011..91e52d6 100644
--- a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
+++ b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 23 Jan 2014 14:45:59 +0100
 Subject: leds: trigger: disable CPU trigger on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 as it triggers:
 |CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/leds/trigger/Kconfig
 +++ b/drivers/leds/trigger/Kconfig
-@@ -61,7 +61,7 @@ config LEDS_TRIGGER_BACKLIGHT
+@@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT
  
  config LEDS_TRIGGER_CPU
  	bool "LED CPU Trigger"
diff --git a/debian/patches/features/all/rt/lglocks-rt.patch b/debian/patches/features/all/rt/lglocks-rt.patch
index 6cf511d..236c5cc 100644
--- a/debian/patches/features/all/rt/lglocks-rt.patch
+++ b/debian/patches/features/all/rt/lglocks-rt.patch
@@ -1,7 +1,7 @@
 Subject: lglocks: Provide a RT safe variant
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 15 Jun 2011 11:02:21 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 lglocks by itself will spin in order to get the lock. This will end up
 badly if a task with the highest priority keeps spinning while a task
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +# define lg_do_unlock(l)	arch_spin_unlock(l)
 +#else
 +# define lg_lock_ptr		struct rt_mutex
-+# define lg_do_lock(l)		__rt_spin_lock(l)
++# define lg_do_lock(l)		__rt_spin_lock__no_mg(l)
 +# define lg_do_unlock(l)	__rt_spin_unlock(l)
 +#endif
  /*
diff --git a/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch b/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
index a23d584..8ad50ab 100644
--- a/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
+++ b/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 31 Mar 2016 00:04:25 -0500
 Subject: [PATCH] list_bl: fixup bogus lockdep warning
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 At first glance, the use of 'static inline' seems appropriate for
 INIT_HLIST_BL_HEAD().
diff --git a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
index 7e9b75e..de0fad6 100644
--- a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
+++ b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
@@ -1,7 +1,7 @@
 From: Paul Gortmaker <paul.gortmaker at windriver.com>
 Date: Fri, 21 Jun 2013 15:07:25 -0400
 Subject: list_bl: Make list head locking RT safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 As per changes in include/linux/jbd_common.h for avoiding the
 bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal
diff --git a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
index a8a30fe..899c3a8 100644
--- a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
+++ b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 21 Jul 2009 22:34:14 +0200
 Subject: rt: local_irq_* variants depending on RT/!RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Add local_irq_*_(no)rt variant which are mainly used to break
 interrupt disabled sections on PREEMPT_RT or to explicitely disable
diff --git a/debian/patches/features/all/rt/locallock-add-local_lock_on.patch b/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
index 7f28d4e..7dcedeb 100644
--- a/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
+++ b/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 27 May 2016 15:11:51 +0200
 Subject: [PATCH] locallock: add local_lock_on()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
diff --git a/debian/patches/features/all/rt/localversion.patch b/debian/patches/features/all/rt/localversion.patch
index 8a12298..f955f3c 100644
--- a/debian/patches/features/all/rt/localversion.patch
+++ b/debian/patches/features/all/rt/localversion.patch
@@ -1,7 +1,7 @@
 Subject: Add localversion for -RT release
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 08 Jul 2011 20:25:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
@@ -11,4 +11,4 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt5
++-rt1
diff --git a/debian/patches/features/all/rt/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch b/debian/patches/features/all/rt/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
new file mode 100644
index 0000000..9e9f1b2
--- /dev/null
+++ b/debian/patches/features/all/rt/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
@@ -0,0 +1,111 @@
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 8 Sep 2016 12:34:33 -0400
+Subject: [PATCH] lockdep: Quiet gcc about dangerous __builtin_return_address()
+ operations
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+[
+  Boris, does this quiet gcc for you?
+  I haven't fully tested this yet, as I still don't have a compiler
+  that does the warning.
+]
+
+Gcc's new warnings about __builtin_return_address(n) operations with
+n > 0 is popping up around the kernel. The operation is dangerous, and
+the warning is "good to know". But there's instances that we use
+__builtin_return_address(n) with n > 0 and are aware of the issues,
+and work around them. And its used mostly for tracing and debugging. In
+these cases, the warning becomes a distraction and is not helpful.
+
+To get better lock issue traces, a function like get_lock_parent_ip()
+uses __builtin_return_address() to find the caller of the lock, and
+skip over the internal callers of the lock itself. Currently it is only
+used in the kernel/ directory and only if certain configs are enabled.
+
+Create a new config called CONFIG_USING_GET_LOCK_PARENT_IP that gets
+selected when another config relies on get_lock_parent_ip(), and this
+will now enable the function get_lock_parent_ip(), otherwise it wont be
+defined. It will also disable the frame-address warnings from gcc in
+the kernel directory.
+
+Reported-by: Borislav Petkov <bp at alien8.de>
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+---
+ include/linux/ftrace.h |    2 ++
+ kernel/Makefile        |    7 +++++++
+ kernel/trace/Kconfig   |    1 +
+ lib/Kconfig.debug      |   10 ++++++++++
+ 4 files changed, 20 insertions(+)
+
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -714,6 +714,7 @@ static inline void __ftrace_enabled_rest
+ #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
+ #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+ 
++#ifdef CONFIG_USING_GET_LOCK_PARENT_IP
+ static inline unsigned long get_lock_parent_ip(void)
+ {
+ 	unsigned long addr = CALLER_ADDR0;
+@@ -725,6 +726,7 @@ static inline unsigned long get_lock_par
+ 		return addr;
+ 	return CALLER_ADDR2;
+ }
++#endif
+ 
+ #ifdef CONFIG_IRQSOFF_TRACER
+   extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -11,6 +11,13 @@ obj-y     = fork.o exec_domain.o panic.o
+ 	    notifier.o ksysfs.o cred.o reboot.o \
+ 	    async.o range.o smpboot.o
+ 
++# Tracing may do some dangerous __builtin_return_address() operations
++# We know they are dangerous, we don't need gcc telling us that.
++ifdef CONFIG_USING_GET_LOCK_PARENT_IP
++FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
++KBUILD_CFLAGS += $(FRAME_CFLAGS)
++endif
++
+ obj-$(CONFIG_MULTIUSER) += groups.o
+ 
+ ifdef CONFIG_FUNCTION_TRACER
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -197,6 +197,7 @@ config PREEMPT_TRACER
+ 	select RING_BUFFER_ALLOW_SWAP
+ 	select TRACER_SNAPSHOT
+ 	select TRACER_SNAPSHOT_PER_CPU_SWAP
++	select USING_GET_LOCK_PARENT_IP
+ 	help
+ 	  This option measures the time spent in preemption-off critical
+ 	  sections, with microsecond accuracy.
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -977,6 +977,7 @@ config TIMER_STATS
+ config DEBUG_PREEMPT
+ 	bool "Debug preemptible kernel"
+ 	depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
++	select USING_GET_LOCK_PARENT_IP
+ 	default y
+ 	help
+ 	  If you say Y here then the kernel will use a debug variant of the
+@@ -1159,8 +1160,17 @@ config LOCK_TORTURE_TEST
+ 
+ endmenu # lock debugging
+ 
++config USING_GET_LOCK_PARENT_IP
++        bool
++	help
++	  Enables the use of the function get_lock_parent_ip() that
++	  will use __builtin_return_address(n) with n > 0 causing
++	  some gcc warnings. When this is selected, those warnings
++	  will be suppressed.
++
+ config TRACE_IRQFLAGS
+ 	bool
++	select USING_GET_LOCK_PARENT_IP
+ 	help
+ 	  Enables hooks to interrupt enabling and disabling for
+ 	  either tracing or lock debugging.
diff --git a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
index ad9dea5..0b1a14e 100644
--- a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
+++ b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: lockdep: Make it RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 18:51:23 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 teach lockdep that we don't really do softirqs on -RT.
 
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #if defined(CONFIG_IRQSOFF_TRACER) || \
 --- a/kernel/locking/lockdep.c
 +++ b/kernel/locking/lockdep.c
-@@ -3648,6 +3648,7 @@ static void check_flags(unsigned long fl
+@@ -3686,6 +3686,7 @@ static void check_flags(unsigned long fl
  		}
  	}
  
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * We dont accurately track softirq state in e.g.
  	 * hardirq contexts (such as on 4KSTACKS), so only
-@@ -3662,6 +3663,7 @@ static void check_flags(unsigned long fl
+@@ -3700,6 +3701,7 @@ static void check_flags(unsigned long fl
  			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
  		}
  	}
diff --git a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
index 44933c5..59378b6 100644
--- a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
+++ b/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <josh.cartwright at ni.com>
 Date: Wed, 28 Jan 2015 13:08:45 -0600
 Subject: lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 "lockdep: Selftest: Only do hardirq context test for raw spinlock"
 disabled the execution of certain tests with PREEMPT_RT_FULL, but did
diff --git a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
index b3437ec..e38a993 100644
--- a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
+++ b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
@@ -1,7 +1,7 @@
 Subject: lockdep: selftest: Only do hardirq context test for raw spinlock
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Mon, 16 Apr 2012 15:01:56 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 From: Yong Zhang <yong.zhang at windriver.com>
 
diff --git a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
index 7ceff59..1eea5de 100644
--- a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
+++ b/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
@@ -1,7 +1,7 @@
 From: "Wolfgang M. Reimer" <linuxball at gmail.com>
 Date: Tue, 21 Jul 2015 16:20:07 +0200
 Subject: locking: locktorture: Do NOT include rwlock.h directly
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Including rwlock.h directly will cause kernel builds to fail
 if CONFIG_PREEMPT_RT_FULL is defined. The correct header file
diff --git a/debian/patches/features/all/rt/lockinglglocks_Use_preempt_enabledisable_nort.patch b/debian/patches/features/all/rt/lockinglglocks_Use_preempt_enabledisable_nort.patch
new file mode 100644
index 0000000..686a140
--- /dev/null
+++ b/debian/patches/features/all/rt/lockinglglocks_Use_preempt_enabledisable_nort.patch
@@ -0,0 +1,35 @@
+Subject: locking/lglocks: Use preempt_enable/disable_nort() in lg_double_[un]lock
+From: Mike Galbraith <umgwanakikbuti at gmail.com>
+Date: Sat, 27 Feb 2016 08:34:43 +0100
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+Let's not do that when snagging an rtmutex.
+
+Signed-off-by: Mike Galbraith <umgwanakilbuti at gmail.com>
+Cc: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Cc: linux-rt-users <linux-rt-users at vger.kernel.org>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/locking/lglock.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/locking/lglock.c
++++ b/kernel/locking/lglock.c
+@@ -86,7 +86,7 @@ void lg_double_lock(struct lglock *lg, i
+ 	if (cpu2 < cpu1)
+ 		swap(cpu1, cpu2);
+ 
+-	preempt_disable();
++	preempt_disable_nort();
+ 	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+ 	lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
+ 	lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
+@@ -97,7 +97,7 @@ void lg_double_unlock(struct lglock *lg,
+ 	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ 	lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
+ 	lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
+-	preempt_enable();
++	preempt_enable_nort();
+ }
+ 
+ void lg_global_lock(struct lglock *lg)
diff --git a/debian/patches/features/all/rt/md-disable-bcache.patch b/debian/patches/features/all/rt/md-disable-bcache.patch
index a980069..45e56ba 100644
--- a/debian/patches/features/all/rt/md-disable-bcache.patch
+++ b/debian/patches/features/all/rt/md-disable-bcache.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Aug 2013 11:48:57 +0200
 Subject: md: disable bcache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 It uses anon semaphores
 |drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’:
diff --git a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
index d130159..8885950 100644
--- a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
+++ b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 6 Apr 2010 16:51:31 +0200
 Subject: md: raid5: Make raid5_percpu handling RT aware
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 __raid_run_ops() disables preemption with get_cpu() around the access
 to the raid5_percpu variables. That causes scheduling while atomic
@@ -21,7 +21,7 @@ Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
 
 --- a/drivers/md/raid5.c
 +++ b/drivers/md/raid5.c
-@@ -1918,8 +1918,9 @@ static void raid_run_ops(struct stripe_h
+@@ -1928,8 +1928,9 @@ static void raid_run_ops(struct stripe_h
  	struct raid5_percpu *percpu;
  	unsigned long cpu;
  
@@ -32,7 +32,7 @@ Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
  	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
  		ops_run_biofill(sh);
  		overlap_clear++;
-@@ -1975,7 +1976,8 @@ static void raid_run_ops(struct stripe_h
+@@ -1985,7 +1986,8 @@ static void raid_run_ops(struct stripe_h
  			if (test_and_clear_bit(R5_Overlap, &dev->flags))
  				wake_up(&sh->raid_conf->wait_for_overlap);
  		}
@@ -41,8 +41,8 @@ Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
 +	put_cpu_light();
  }
  
- static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
-@@ -6415,6 +6417,7 @@ static int raid5_alloc_percpu(struct r5c
+ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
+@@ -6438,6 +6440,7 @@ static int raid5_alloc_percpu(struct r5c
  			       __func__, cpu);
  			break;
  		}
diff --git a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
index 538e623..4f0ff4e 100644
--- a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
+++ b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: mips: Disable highmem on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 17:10:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The current highmem handling on -RT is not compatible and needs fixups.
 
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/mips/Kconfig
 +++ b/arch/mips/Kconfig
-@@ -2416,7 +2416,7 @@ config CPU_R4400_WORKAROUNDS
+@@ -2480,7 +2480,7 @@ config MIPS_ASID_BITS_VARIABLE
  #
  config HIGHMEM
  	bool "High Memory Support"
diff --git a/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch b/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
index 75c09e4..d8662a3 100644
--- a/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
+++ b/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
@@ -1,7 +1,7 @@
 Subject: mm: rt: Fix generic kmap_atomic for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 19 Sep 2015 10:15:00 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The update to 4.1 brought in the mainline variant of the pagefault
 disable distangling from preempt count. That introduced a
diff --git a/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch b/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
index 40ef409..523ebeb 100644
--- a/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
+++ b/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 5 Feb 2016 12:17:14 +0100
 Subject: mm: backing-dev: don't disable IRQs in wb_congested_put()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 it triggers:
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:930
diff --git a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
index 84095dd..79e7f72 100644
--- a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
+++ b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
@@ -1,7 +1,7 @@
 Subject: mm: bounce: Use local_irq_save_nort
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 09 Jan 2013 10:33:09 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 kmap_atomic() is preemptible on RT.
 
diff --git a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
index 426f243..7c3bb78 100644
--- a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
+++ b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:51 -0500
 Subject: mm/swap: Convert to percpu locked
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Replace global locks (get_cpu + local_irq_save) with "local_locks()".
 Currently there is one of for "rotate" and one for "swap".
@@ -13,12 +13,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  include/linux/swap.h |    1 +
  mm/compaction.c      |    6 ++++--
  mm/page_alloc.c      |    2 ++
- mm/swap.c            |   39 +++++++++++++++++++++++----------------
- 4 files changed, 30 insertions(+), 18 deletions(-)
+ mm/swap.c            |   38 ++++++++++++++++++++++----------------
+ 4 files changed, 29 insertions(+), 18 deletions(-)
 
 --- a/include/linux/swap.h
 +++ b/include/linux/swap.h
-@@ -297,6 +297,7 @@ extern unsigned long nr_free_pagecache_p
+@@ -290,6 +290,7 @@ extern unsigned long nr_free_pagecache_p
  
  
  /* linux/mm/swap.c */
@@ -28,8 +28,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  extern void lru_cache_add_file(struct page *page);
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
-@@ -1414,10 +1414,12 @@ static int compact_zone(struct zone *zon
- 				cc->migrate_pfn & ~((1UL << cc->order) - 1);
+@@ -1585,10 +1585,12 @@ static enum compact_result compact_zone(
+ 				block_start_pfn(cc->migrate_pfn, cc->order);
  
  			if (cc->last_migrated_pfn < current_block_start) {
 -				cpu = get_cpu();
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			}
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -6274,7 +6274,9 @@ static int page_alloc_cpu_notify(struct
+@@ -6590,7 +6590,9 @@ static int page_alloc_cpu_notify(struct
  	int cpu = (unsigned long)hcpu;
  
  	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
@@ -65,47 +65,46 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #include <linux/hugetlb.h>
  #include <linux/page_idle.h>
  
-@@ -48,6 +49,9 @@ static DEFINE_PER_CPU(struct pagevec, lr
- static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
- static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
- 
+@@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lr
+ #ifdef CONFIG_SMP
+ static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
+ #endif
 +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
 +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
-+
+ 
  /*
   * This path almost never happens for VM activity - pages are normally
-  * freed via pagevecs.  But it gets used by networking.
-@@ -237,11 +241,11 @@ void rotate_reclaimable_page(struct page
+@@ -240,11 +243,11 @@ void rotate_reclaimable_page(struct page
  		unsigned long flags;
  
  		get_page(page);
 -		local_irq_save(flags);
 +		local_lock_irqsave(rotate_lock, flags);
  		pvec = this_cpu_ptr(&lru_rotate_pvecs);
- 		if (!pagevec_add(pvec, page))
+ 		if (!pagevec_add(pvec, page) || PageCompound(page))
  			pagevec_move_tail(pvec);
 -		local_irq_restore(flags);
 +		local_unlock_irqrestore(rotate_lock, flags);
  	}
  }
  
-@@ -292,12 +296,13 @@ static bool need_activate_page_drain(int
- void activate_page(struct page *page)
+@@ -294,12 +297,13 @@ void activate_page(struct page *page)
  {
+ 	page = compound_head(page);
  	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 -		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 +		struct pagevec *pvec = &get_locked_var(swapvec_lock,
 +						       activate_page_pvecs);
  
  		get_page(page);
- 		if (!pagevec_add(pvec, page))
+ 		if (!pagevec_add(pvec, page) || PageCompound(page))
  			pagevec_lru_move_fn(pvec, __activate_page, NULL);
 -		put_cpu_var(activate_page_pvecs);
 +		put_locked_var(swapvec_lock, activate_page_pvecs);
  	}
  }
  
-@@ -323,7 +328,7 @@ void activate_page(struct page *page)
+@@ -326,7 +330,7 @@ void activate_page(struct page *page)
  
  static void __lru_cache_activate_page(struct page *page)
  {
@@ -114,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	int i;
  
  	/*
-@@ -345,7 +350,7 @@ static void __lru_cache_activate_page(st
+@@ -348,7 +352,7 @@ static void __lru_cache_activate_page(st
  		}
  	}
  
@@ -123,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -387,13 +392,13 @@ EXPORT_SYMBOL(mark_page_accessed);
+@@ -390,12 +394,12 @@ EXPORT_SYMBOL(mark_page_accessed);
  
  static void __lru_cache_add(struct page *page)
  {
@@ -131,15 +130,14 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
  
  	get_page(page);
- 	if (!pagevec_space(pvec))
+ 	if (!pagevec_add(pvec, page) || PageCompound(page))
  		__pagevec_lru_add(pvec);
- 	pagevec_add(pvec, page);
 -	put_cpu_var(lru_add_pvec);
 +	put_locked_var(swapvec_lock, lru_add_pvec);
  }
  
  /**
-@@ -591,9 +596,9 @@ void lru_add_drain_cpu(int cpu)
+@@ -593,9 +597,9 @@ void lru_add_drain_cpu(int cpu)
  		unsigned long flags;
  
  		/* No harm done if a racing interrupt already did this */
@@ -151,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  
  	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -625,11 +630,12 @@ void deactivate_file_page(struct page *p
+@@ -627,11 +631,12 @@ void deactivate_file_page(struct page *p
  		return;
  
  	if (likely(get_page_unless_zero(page))) {
@@ -159,14 +157,14 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		struct pagevec *pvec = &get_locked_var(swapvec_lock,
 +						       lru_deactivate_file_pvecs);
  
- 		if (!pagevec_add(pvec, page))
+ 		if (!pagevec_add(pvec, page) || PageCompound(page))
  			pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
 -		put_cpu_var(lru_deactivate_file_pvecs);
 +		put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
  	}
  }
  
-@@ -644,19 +650,20 @@ void deactivate_file_page(struct page *p
+@@ -646,19 +651,20 @@ void deactivate_file_page(struct page *p
  void deactivate_page(struct page *page)
  {
  	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
@@ -175,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +						       lru_deactivate_pvecs);
  
  		get_page(page);
- 		if (!pagevec_add(pvec, page))
+ 		if (!pagevec_add(pvec, page) || PageCompound(page))
  			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 -		put_cpu_var(lru_deactivate_pvecs);
 +		put_locked_var(swapvec_lock, lru_deactivate_pvecs);
diff --git a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
index 67e7f1a..15c9f01 100644
--- a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
+++ b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:44:03 -0500
 Subject: mm: Allow only slub on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs.
 
@@ -14,15 +14,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -1717,6 +1717,7 @@ choice
+@@ -1761,6 +1761,7 @@ choice
  
  config SLAB
  	bool "SLAB"
 +	depends on !PREEMPT_RT_FULL
+ 	select HAVE_HARDENED_USERCOPY_ALLOCATOR
  	help
  	  The regular slab allocator that is established and known to work
- 	  well in all environments. It organizes cache hot objects in
-@@ -1735,6 +1736,7 @@ config SLUB
+@@ -1781,6 +1782,7 @@ config SLUB
  config SLOB
  	depends on EXPERT
  	bool "SLOB (Simple Allocator)"
diff --git a/debian/patches/features/all/rt/mm-enable-slub.patch b/debian/patches/features/all/rt/mm-enable-slub.patch
index 46dcfe6..971486b 100644
--- a/debian/patches/features/all/rt/mm-enable-slub.patch
+++ b/debian/patches/features/all/rt/mm-enable-slub.patch
@@ -1,7 +1,7 @@
 Subject: mm: Enable SLUB for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 25 Oct 2012 10:32:35 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Make SLUB RT aware by converting locks to raw and using free lists to
 move the freeing out of the lock held region.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/mm/slab.h
 +++ b/mm/slab.h
-@@ -415,7 +415,11 @@ static inline void slab_post_alloc_hook(
+@@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(
   * The slab lists for all objects.
   */
  struct kmem_cache_node {
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	struct list_head slabs_partial;	/* partial list first, better asm code */
 --- a/mm/slub.c
 +++ b/mm/slub.c
-@@ -1143,7 +1143,7 @@ static noinline int free_debug_processin
+@@ -1145,7 +1145,7 @@ static noinline int free_debug_processin
  	unsigned long uninitialized_var(flags);
  	int ret = 0;
  
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	slab_lock(page);
  
  	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1178,7 +1178,7 @@ static noinline int free_debug_processin
+@@ -1180,7 +1180,7 @@ static noinline int free_debug_processin
  			 bulk_cnt, cnt);
  
  	slab_unlock(page);
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (!ret)
  		slab_fix(s, "Object at 0x%p not freed", object);
  	return ret;
-@@ -1306,6 +1306,12 @@ static inline void dec_slabs_node(struct
+@@ -1308,6 +1308,12 @@ static inline void dec_slabs_node(struct
  
  #endif /* CONFIG_SLUB_DEBUG */
  
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Hooks for other subsystems that check memory allocations. In a typical
   * production configuration these hooks all should produce no code at all.
-@@ -1415,7 +1421,11 @@ static struct page *allocate_slab(struct
+@@ -1530,7 +1536,11 @@ static struct page *allocate_slab(struct
  
  	flags &= gfp_allowed_mask;
  
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		local_irq_enable();
  
  	flags |= s->allocflags;
-@@ -1486,7 +1496,11 @@ static struct page *allocate_slab(struct
+@@ -1605,7 +1615,11 @@ static struct page *allocate_slab(struct
  	page->frozen = 1;
  
  out:
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		local_irq_disable();
  	if (!page)
  		return NULL;
-@@ -1543,6 +1557,16 @@ static void __free_slab(struct kmem_cach
+@@ -1664,6 +1678,16 @@ static void __free_slab(struct kmem_cach
  	__free_pages(page, order);
  }
  
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define need_reserve_slab_rcu						\
  	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
  
-@@ -1574,6 +1598,12 @@ static void free_slab(struct kmem_cache
+@@ -1695,6 +1719,12 @@ static void free_slab(struct kmem_cache
  		}
  
  		call_rcu(head, rcu_free_slab);
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	} else
  		__free_slab(s, page);
  }
-@@ -1681,7 +1711,7 @@ static void *get_partial_node(struct kme
+@@ -1802,7 +1832,7 @@ static void *get_partial_node(struct kme
  	if (!n || !n->nr_partial)
  		return NULL;
  
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	list_for_each_entry_safe(page, page2, &n->partial, lru) {
  		void *t;
  
-@@ -1706,7 +1736,7 @@ static void *get_partial_node(struct kme
+@@ -1827,7 +1857,7 @@ static void *get_partial_node(struct kme
  			break;
  
  	}
@@ -131,7 +131,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return object;
  }
  
-@@ -1952,7 +1982,7 @@ static void deactivate_slab(struct kmem_
+@@ -2073,7 +2103,7 @@ static void deactivate_slab(struct kmem_
  			 * that acquire_slab() will see a slab page that
  			 * is frozen
  			 */
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  	} else {
  		m = M_FULL;
-@@ -1963,7 +1993,7 @@ static void deactivate_slab(struct kmem_
+@@ -2084,7 +2114,7 @@ static void deactivate_slab(struct kmem_
  			 * slabs from diagnostic functions will not see
  			 * any frozen slabs.
  			 */
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  	}
  
-@@ -1998,7 +2028,7 @@ static void deactivate_slab(struct kmem_
+@@ -2119,7 +2149,7 @@ static void deactivate_slab(struct kmem_
  		goto redo;
  
  	if (lock)
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (m == M_FREE) {
  		stat(s, DEACTIVATE_EMPTY);
-@@ -2030,10 +2060,10 @@ static void unfreeze_partials(struct kme
+@@ -2151,10 +2181,10 @@ static void unfreeze_partials(struct kme
  		n2 = get_node(s, page_to_nid(page));
  		if (n != n2) {
  			if (n)
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  
  		do {
-@@ -2062,7 +2092,7 @@ static void unfreeze_partials(struct kme
+@@ -2183,7 +2213,7 @@ static void unfreeze_partials(struct kme
  	}
  
  	if (n)
@@ -180,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	while (discard_page) {
  		page = discard_page;
-@@ -2101,14 +2131,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2222,14 +2252,21 @@ static void put_cpu_partial(struct kmem_
  			pobjects = oldpage->pobjects;
  			pages = oldpage->pages;
  			if (drain && pobjects > s->cpu_partial) {
@@ -202,7 +202,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  				oldpage = NULL;
  				pobjects = 0;
  				pages = 0;
-@@ -2180,7 +2217,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2301,7 +2338,22 @@ static bool has_cpu_slab(int cpu, void *
  
  static void flush_all(struct kmem_cache *s)
  {
@@ -225,7 +225,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2216,10 +2268,10 @@ static unsigned long count_partial(struc
+@@ -2337,10 +2389,10 @@ static unsigned long count_partial(struc
  	unsigned long x = 0;
  	struct page *page;
  
@@ -238,7 +238,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return x;
  }
  #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2357,8 +2409,10 @@ static inline void *get_freelist(struct
+@@ -2478,8 +2530,10 @@ static inline void *get_freelist(struct
   * already disabled (which is the case for bulk allocation).
   */
  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -250,7 +250,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	void *freelist;
  	struct page *page;
  
-@@ -2418,6 +2472,13 @@ static void *___slab_alloc(struct kmem_c
+@@ -2539,6 +2593,13 @@ static void *___slab_alloc(struct kmem_c
  	VM_BUG_ON(!c->page->frozen);
  	c->freelist = get_freepointer(s, freelist);
  	c->tid = next_tid(c->tid);
@@ -264,7 +264,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return freelist;
  
  new_slab:
-@@ -2449,7 +2510,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2570,7 +2631,7 @@ static void *___slab_alloc(struct kmem_c
  	deactivate_slab(s, page, get_freepointer(s, freelist));
  	c->page = NULL;
  	c->freelist = NULL;
@@ -273,7 +273,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2461,6 +2522,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2582,6 +2643,7 @@ static void *__slab_alloc(struct kmem_ca
  {
  	void *p;
  	unsigned long flags;
@@ -281,7 +281,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	local_irq_save(flags);
  #ifdef CONFIG_PREEMPT
-@@ -2472,8 +2534,9 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2593,8 +2655,9 @@ static void *__slab_alloc(struct kmem_ca
  	c = this_cpu_ptr(s->cpu_slab);
  #endif
  
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return p;
  }
  
-@@ -2659,7 +2722,7 @@ static void __slab_free(struct kmem_cach
+@@ -2780,7 +2843,7 @@ static void __slab_free(struct kmem_cach
  
  	do {
  		if (unlikely(n)) {
@@ -301,7 +301,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			n = NULL;
  		}
  		prior = page->freelist;
-@@ -2691,7 +2754,7 @@ static void __slab_free(struct kmem_cach
+@@ -2812,7 +2875,7 @@ static void __slab_free(struct kmem_cach
  				 * Otherwise the list_lock will synchronize with
  				 * other processors updating the list of slabs.
  				 */
@@ -310,7 +310,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  			}
  		}
-@@ -2733,7 +2796,7 @@ static void __slab_free(struct kmem_cach
+@@ -2854,7 +2917,7 @@ static void __slab_free(struct kmem_cach
  		add_partial(n, page, DEACTIVATE_TO_TAIL);
  		stat(s, FREE_ADD_PARTIAL);
  	}
@@ -319,7 +319,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return;
  
  slab_empty:
-@@ -2748,7 +2811,7 @@ static void __slab_free(struct kmem_cach
+@@ -2869,7 +2932,7 @@ static void __slab_free(struct kmem_cach
  		remove_full(s, n, page);
  	}
  
@@ -328,7 +328,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	stat(s, FREE_SLAB);
  	discard_slab(s, page);
  }
-@@ -2935,6 +2998,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3074,6 +3137,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
  			  void **p)
  {
  	struct kmem_cache_cpu *c;
@@ -336,7 +336,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	int i;
  
  	/* memcg and kmem_cache debug support */
-@@ -2958,7 +3022,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3097,7 +3161,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
  			 * of re-populating per CPU c->freelist
  			 */
  			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -345,7 +345,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			if (unlikely(!p[i]))
  				goto error;
  
-@@ -2970,6 +3034,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3109,6 +3173,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
  	}
  	c->tid = next_tid(c->tid);
  	local_irq_enable();
@@ -353,7 +353,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* Clear memory outside IRQ disabled fastpath loop */
  	if (unlikely(flags & __GFP_ZERO)) {
-@@ -3117,7 +3182,7 @@ static void
+@@ -3256,7 +3321,7 @@ static void
  init_kmem_cache_node(struct kmem_cache_node *n)
  {
  	n->nr_partial = 0;
@@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	INIT_LIST_HEAD(&n->partial);
  #ifdef CONFIG_SLUB_DEBUG
  	atomic_long_set(&n->nr_slabs, 0);
-@@ -3450,6 +3515,10 @@ static void list_slab_objects(struct kme
+@@ -3600,6 +3665,10 @@ static void list_slab_objects(struct kme
  							const char *text)
  {
  #ifdef CONFIG_SLUB_DEBUG
@@ -373,7 +373,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	void *addr = page_address(page);
  	void *p;
  	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3470,6 +3539,7 @@ static void list_slab_objects(struct kme
+@@ -3620,6 +3689,7 @@ static void list_slab_objects(struct kme
  	slab_unlock(page);
  	kfree(map);
  #endif
@@ -381,7 +381,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -3482,7 +3552,7 @@ static void free_partial(struct kmem_cac
+@@ -3633,7 +3703,7 @@ static void free_partial(struct kmem_cac
  	struct page *page, *h;
  
  	BUG_ON(irqs_disabled());
@@ -390,16 +390,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	list_for_each_entry_safe(page, h, &n->partial, lru) {
  		if (!page->inuse) {
  			remove_partial(n, page);
-@@ -3492,7 +3562,7 @@ static void free_partial(struct kmem_cac
+@@ -3643,7 +3713,7 @@ static void free_partial(struct kmem_cac
  			"Objects remaining in %s on __kmem_cache_shutdown()");
  		}
  	}
 -	spin_unlock_irq(&n->list_lock);
 +	raw_spin_unlock_irq(&n->list_lock);
- }
  
- /*
-@@ -3706,7 +3776,7 @@ int __kmem_cache_shrink(struct kmem_cach
+ 	list_for_each_entry_safe(page, h, &discard, lru)
+ 		discard_slab(s, page);
+@@ -3901,7 +3971,7 @@ int __kmem_cache_shrink(struct kmem_cach
  		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
  			INIT_LIST_HEAD(promote + i);
  
@@ -408,7 +408,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		/*
  		 * Build lists of slabs to discard or promote.
-@@ -3737,7 +3807,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3932,7 +4002,7 @@ int __kmem_cache_shrink(struct kmem_cach
  		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
  			list_splice(promote + i, &n->partial);
  
@@ -417,7 +417,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		/* Release empty slabs */
  		list_for_each_entry_safe(page, t, &discard, lru)
-@@ -3913,6 +3983,12 @@ void __init kmem_cache_init(void)
+@@ -4108,6 +4178,12 @@ void __init kmem_cache_init(void)
  {
  	static __initdata struct kmem_cache boot_kmem_cache,
  		boot_kmem_cache_node;
@@ -430,7 +430,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (debug_guardpage_minorder())
  		slub_max_order = 0;
-@@ -4156,7 +4232,7 @@ static int validate_slab_node(struct kme
+@@ -4354,7 +4430,7 @@ static int validate_slab_node(struct kme
  	struct page *page;
  	unsigned long flags;
  
@@ -439,7 +439,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	list_for_each_entry(page, &n->partial, lru) {
  		validate_slab_slab(s, page, map);
-@@ -4178,7 +4254,7 @@ static int validate_slab_node(struct kme
+@@ -4376,7 +4452,7 @@ static int validate_slab_node(struct kme
  		       s->name, count, atomic_long_read(&n->nr_slabs));
  
  out:
@@ -448,7 +448,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return count;
  }
  
-@@ -4366,12 +4442,12 @@ static int list_locations(struct kmem_ca
+@@ -4564,12 +4640,12 @@ static int list_locations(struct kmem_ca
  		if (!atomic_long_read(&n->nr_slabs))
  			continue;
  
diff --git a/debian/patches/features/all/rt/mm-filemap-don-t-plant-shadow-entries-without-radix-.patch b/debian/patches/features/all/rt/mm-filemap-don-t-plant-shadow-entries-without-radix-.patch
new file mode 100644
index 0000000..bc19191
--- /dev/null
+++ b/debian/patches/features/all/rt/mm-filemap-don-t-plant-shadow-entries-without-radix-.patch
@@ -0,0 +1,186 @@
+From: Johannes Weiner <hannes at cmpxchg.org>
+Date: Tue, 4 Oct 2016 22:02:08 +0200
+Subject: [PATCH] mm: filemap: don't plant shadow entries without radix tree
+ node
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+Upstream commit d3798ae8c6f3767c726403c2ca6ecc317752c9dd
+
+When the underflow checks were added to workingset_node_shadow_dec(),
+they triggered immediately:
+
+  kernel BUG at ./include/linux/swap.h:276!
+  invalid opcode: 0000 [#1] SMP
+  Modules linked in: isofs usb_storage fuse xt_CHECKSUM ipt_MASQUERADE nf_nat_masquerade_ipv4 tun nf_conntrack_netbios_ns nf_conntrack_broadcast ip6t_REJECT nf_reject_ipv6
+   soundcore wmi acpi_als pinctrl_sunrisepoint kfifo_buf tpm_tis industrialio acpi_pad pinctrl_intel tpm_tis_core tpm nfsd auth_rpcgss nfs_acl lockd grace sunrpc dm_crypt
+  CPU: 0 PID: 20929 Comm: blkid Not tainted 4.8.0-rc8-00087-gbe67d60ba944 #1
+  Hardware name: System manufacturer System Product Name/Z170-K, BIOS 1803 05/06/2016
+  task: ffff8faa93ecd940 task.stack: ffff8faa7f478000
+  RIP: page_cache_tree_insert+0xf1/0x100
+  Call Trace:
+    __add_to_page_cache_locked+0x12e/0x270
+    add_to_page_cache_lru+0x4e/0xe0
+    mpage_readpages+0x112/0x1d0
+    blkdev_readpages+0x1d/0x20
+    __do_page_cache_readahead+0x1ad/0x290
+    force_page_cache_readahead+0xaa/0x100
+    page_cache_sync_readahead+0x3f/0x50
+    generic_file_read_iter+0x5af/0x740
+    blkdev_read_iter+0x35/0x40
+    __vfs_read+0xe1/0x130
+    vfs_read+0x96/0x130
+    SyS_read+0x55/0xc0
+    entry_SYSCALL_64_fastpath+0x13/0x8f
+  Code: 03 00 48 8b 5d d8 65 48 33 1c 25 28 00 00 00 44 89 e8 75 19 48 83 c4 18 5b 41 5c 41 5d 41 5e 5d c3 0f 0b 41 bd ef ff ff ff eb d7 <0f> 0b e8 88 68 ef ff 0f 1f 84 00
+  RIP  page_cache_tree_insert+0xf1/0x100
+
+This is a long-standing bug in the way shadow entries are accounted in
+the radix tree nodes. The shrinker needs to know when radix tree nodes
+contain only shadow entries, no pages, so node->count is split in half
+to count shadows in the upper bits and pages in the lower bits.
+
+Unfortunately, the radix tree implementation doesn't know of this and
+assumes all entries are in node->count. When there is a shadow entry
+directly in root->rnode and the tree is later extended, the radix tree
+implementation will copy that entry into the new node and and bump its
+node->count, i.e. increases the page count bits. Once the shadow gets
+removed and we subtract from the upper counter, node->count underflows
+and triggers the warning. Afterwards, without node->count reaching 0
+again, the radix tree node is leaked.
+
+Limit shadow entries to when we have actual radix tree nodes and can
+count them properly. That means we lose the ability to detect refaults
+from files that had only the first page faulted in at eviction time.
+
+Fixes: 449dd6984d0e ("mm: keep page cache radix tree nodes in check")
+Signed-off-by: Johannes Weiner <hannes at cmpxchg.org>
+Reported-and-tested-by: Linus Torvalds <torvalds at linux-foundation.org>
+Reviewed-by: Jan Kara <jack at suse.cz>
+Cc: Andrew Morton <akpm at linux-foundation.org>
+Cc: stable at vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+---
+ include/linux/radix-tree.h |    6 ++---
+ lib/radix-tree.c           |   14 ++-----------
+ mm/filemap.c               |   46 +++++++++++++++++++++++++++++----------------
+ 3 files changed, 36 insertions(+), 30 deletions(-)
+
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct rad
+ 			      struct radix_tree_node *node);
+ void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
+ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
+-struct radix_tree_node *radix_tree_replace_clear_tags(
+-				struct radix_tree_root *root,
+-				unsigned long index, void *entry);
++void radix_tree_clear_tags(struct radix_tree_root *root,
++			   struct radix_tree_node *node,
++			   void **slot);
+ unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
+ 			void **results, unsigned long first_index,
+ 			unsigned int max_items);
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tre
+ }
+ EXPORT_SYMBOL(radix_tree_delete);
+ 
+-struct radix_tree_node *radix_tree_replace_clear_tags(
+-			struct radix_tree_root *root,
+-			unsigned long index, void *entry)
++void radix_tree_clear_tags(struct radix_tree_root *root,
++			   struct radix_tree_node *node,
++			   void **slot)
+ {
+-	struct radix_tree_node *node;
+-	void **slot;
+-
+-	__radix_tree_lookup(root, index, &node, &slot);
+-
+ 	if (node) {
+ 		unsigned int tag, offset = get_slot_offset(node, slot);
+ 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+@@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_repla
+ 		/* Clear root node tags */
+ 		root->gfp_mask &= __GFP_BITS_MASK;
+ 	}
+-
+-	radix_tree_replace_slot(slot, entry);
+-	return node;
+ }
+ 
+ /**
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -169,33 +169,35 @@ static int page_cache_tree_insert(struct
+ static void page_cache_tree_delete(struct address_space *mapping,
+ 				   struct page *page, void *shadow)
+ {
+-	struct radix_tree_node *node;
+ 	int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
+ 
+ 	VM_BUG_ON_PAGE(!PageLocked(page), page);
+ 	VM_BUG_ON_PAGE(PageTail(page), page);
+ 	VM_BUG_ON_PAGE(nr != 1 && shadow, page);
+ 
+-	if (shadow) {
+-		mapping->nrexceptional += nr;
+-		/*
+-		 * Make sure the nrexceptional update is committed before
+-		 * the nrpages update so that final truncate racing
+-		 * with reclaim does not see both counters 0 at the
+-		 * same time and miss a shadow entry.
+-		 */
+-		smp_wmb();
+-	}
+-	mapping->nrpages -= nr;
+-
+ 	for (i = 0; i < nr; i++) {
+-		node = radix_tree_replace_clear_tags(&mapping->page_tree,
+-				page->index + i, shadow);
++		struct radix_tree_node *node;
++		void **slot;
++
++		__radix_tree_lookup(&mapping->page_tree, page->index + i,
++				    &node, &slot);
++
++		radix_tree_clear_tags(&mapping->page_tree, node, slot);
++
+ 		if (!node) {
+ 			VM_BUG_ON_PAGE(nr != 1, page);
+-			return;
++			/*
++			 * We need a node to properly account shadow
++			 * entries. Don't plant any without. XXX
++			 */
++			shadow = NULL;
+ 		}
+ 
++		radix_tree_replace_slot(slot, shadow);
++
++		if (!node)
++			break;
++
+ 		workingset_node_pages_dec(node);
+ 		if (shadow)
+ 			workingset_node_shadows_inc(node);
+@@ -219,6 +221,18 @@ static void page_cache_tree_delete(struc
+ 					&node->private_list);
+ 		}
+ 	}
++
++	if (shadow) {
++		mapping->nrexceptional += nr;
++		/*
++		 * Make sure the nrexceptional update is committed before
++		 * the nrpages update so that final truncate racing
++		 * with reclaim does not see both counters 0 at the
++		 * same time and miss a shadow entry.
++		 */
++		smp_wmb();
++	}
++	mapping->nrpages -= nr;
+ }
+ 
+ /*
diff --git a/debian/patches/features/all/rt/mm-filemap-fix-mapping-nrpages-double-accounting-in-.patch b/debian/patches/features/all/rt/mm-filemap-fix-mapping-nrpages-double-accounting-in-.patch
new file mode 100644
index 0000000..3dff2aa
--- /dev/null
+++ b/debian/patches/features/all/rt/mm-filemap-fix-mapping-nrpages-double-accounting-in-.patch
@@ -0,0 +1,41 @@
+From: Johannes Weiner <hannes at cmpxchg.org>
+Date: Tue, 4 Oct 2016 16:58:06 +0200
+Subject: [PATCH] mm: filemap: fix mapping->nrpages double accounting in fuse
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+Upstream commit 3ddf40e8c31964b744ff10abb48c8e36a83ec6e7
+
+Commit 22f2ac51b6d6 ("mm: workingset: fix crash in shadow node shrinker
+caused by replace_page_cache_page()") switched replace_page_cache() from
+raw radix tree operations to page_cache_tree_insert() but didn't take
+into account that the latter function, unlike the raw radix tree op,
+handles mapping->nrpages.  As a result, that counter is bumped for each
+page replacement rather than balanced out even.
+
+The mapping->nrpages counter is used to skip needless radix tree walks
+when invalidating, truncating, syncing inodes without pages, as well as
+statistics for userspace.  Since the error is positive, we'll do more
+page cache tree walks than necessary; we won't miss a necessary one.
+And we'll report more buffer pages to userspace than there are.  The
+error is limited to fuse inodes.
+
+Fixes: 22f2ac51b6d6 ("mm: workingset: fix crash in shadow node shrinker caused by replace_page_cache_page()")
+Signed-off-by: Johannes Weiner <hannes at cmpxchg.org>
+Cc: Andrew Morton <akpm at linux-foundation.org>
+Cc: Miklos Szeredi <miklos at szeredi.hu>
+Cc: stable at vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+---
+ mm/filemap.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -633,7 +633,6 @@ int replace_page_cache_page(struct page
+ 		__delete_from_page_cache(old, NULL);
+ 		error = page_cache_tree_insert(mapping, new, NULL);
+ 		BUG_ON(error);
+-		mapping->nrpages++;
+ 
+ 		/*
+ 		 * hugetlb pages do not participate in page cache accounting.
diff --git a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
index 5634a43..efe0ed6 100644
--- a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
+++ b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:13 -0500
 Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Disable preemption on -RT for the vmstat code. On vanila the code runs in
 IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the
@@ -12,8 +12,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 ---
  include/linux/vmstat.h |    4 ++++
- mm/vmstat.c            |    6 ++++++
- 2 files changed, 10 insertions(+)
+ mm/vmstat.c            |   12 ++++++++++++
+ 2 files changed, 16 insertions(+)
 
 --- a/include/linux/vmstat.h
 +++ b/include/linux/vmstat.h
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static inline void count_vm_events(enum vm_event_item item, long delta)
 --- a/mm/vmstat.c
 +++ b/mm/vmstat.c
-@@ -226,6 +226,7 @@ void __mod_zone_page_state(struct zone *
+@@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *
  	long x;
  	long t;
  
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	x = delta + __this_cpu_read(*p);
  
  	t = __this_cpu_read(pcp->stat_threshold);
-@@ -235,6 +236,7 @@ void __mod_zone_page_state(struct zone *
+@@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone *
  		x = 0;
  	}
  	__this_cpu_write(*p, x);
@@ -55,7 +55,23 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(__mod_zone_page_state);
  
-@@ -267,6 +269,7 @@ void __inc_zone_state(struct zone *zone,
+@@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist
+ 	long x;
+ 	long t;
+ 
++	preempt_disable_rt();
+ 	x = delta + __this_cpu_read(*p);
+ 
+ 	t = __this_cpu_read(pcp->stat_threshold);
+@@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist
+ 		x = 0;
+ 	}
+ 	__this_cpu_write(*p, x);
++	preempt_enable_rt();
+ }
+ EXPORT_SYMBOL(__mod_node_page_state);
+ 
+@@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone,
  	s8 __percpu *p = pcp->vm_stat_diff + item;
  	s8 v, t;
  
@@ -63,15 +79,31 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	v = __this_cpu_inc_return(*p);
  	t = __this_cpu_read(pcp->stat_threshold);
  	if (unlikely(v > t)) {
-@@ -275,6 +278,7 @@ void __inc_zone_state(struct zone *zone,
+@@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone,
  		zone_page_state_add(v + overstep, zone, item);
  		__this_cpu_write(*p, -overstep);
  	}
 +	preempt_enable_rt();
  }
  
+ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data
+ 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ 	s8 v, t;
+ 
++	preempt_disable_rt();
+ 	v = __this_cpu_inc_return(*p);
+ 	t = __this_cpu_read(pcp->stat_threshold);
+ 	if (unlikely(v > t)) {
+@@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data
+ 		node_page_state_add(v + overstep, pgdat, item);
+ 		__this_cpu_write(*p, -overstep);
+ 	}
++	preempt_enable_rt();
+ }
+ 
  void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
-@@ -289,6 +293,7 @@ void __dec_zone_state(struct zone *zone,
+@@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone,
  	s8 __percpu *p = pcp->vm_stat_diff + item;
  	s8 v, t;
  
@@ -79,11 +111,27 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	v = __this_cpu_dec_return(*p);
  	t = __this_cpu_read(pcp->stat_threshold);
  	if (unlikely(v < - t)) {
-@@ -297,6 +302,7 @@ void __dec_zone_state(struct zone *zone,
+@@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone,
  		zone_page_state_add(v - overstep, zone, item);
  		__this_cpu_write(*p, overstep);
  	}
 +	preempt_enable_rt();
  }
  
+ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data
+ 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ 	s8 v, t;
+ 
++	preempt_disable_rt();
+ 	v = __this_cpu_dec_return(*p);
+ 	t = __this_cpu_read(pcp->stat_threshold);
+ 	if (unlikely(v < - t)) {
+@@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data
+ 		node_page_state_add(v - overstep, pgdat, item);
+ 		__this_cpu_write(*p, overstep);
+ 	}
++	preempt_enable_rt();
+ }
+ 
  void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
diff --git a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 11bd8d4..b6f1bbe 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at windriver.com>
 Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context
 Date: Wed, 30 Oct 2013 11:48:33 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The following trace is triggered when running ltp oom test cases:
 
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -1848,7 +1848,7 @@ static void drain_all_stock(struct mem_c
+@@ -1824,7 +1824,7 @@ static void drain_all_stock(struct mem_c
  		return;
  	/* Notify other cpus that system-wide "drain" is running */
  	get_online_cpus();
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	for_each_online_cpu(cpu) {
  		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  		struct mem_cgroup *memcg;
-@@ -1865,7 +1865,7 @@ static void drain_all_stock(struct mem_c
+@@ -1841,7 +1841,7 @@ static void drain_all_stock(struct mem_c
  				schedule_work_on(cpu, &stock->work);
  		}
  	}
diff --git a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch b/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
index 4ed2d41..79472d7 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Subject: mm/memcontrol: Replace local_irq_disable with local locks
 Date: Wed, 28 Jan 2015 17:14:16 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 There are a few local_irq_disable() which then take sleeping locks. This
 patch converts them local locks.
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /* Whether legacy memory+swap accounting is active */
  static bool do_memsw_account(void)
  {
-@@ -4484,12 +4487,12 @@ static int mem_cgroup_move_account(struc
+@@ -4566,12 +4569,12 @@ static int mem_cgroup_move_account(struc
  
  	ret = 0;
  
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  out_unlock:
  	unlock_page(page);
  out:
-@@ -5339,10 +5342,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5444,10 +5447,10 @@ void mem_cgroup_commit_charge(struct pag
  
  	commit_charge(page, memcg, lrucare);
  
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (do_memsw_account() && PageSwapCache(page)) {
  		swp_entry_t entry = { .val = page_private(page) };
-@@ -5394,14 +5397,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5503,14 +5506,14 @@ static void uncharge_batch(struct mem_cg
  		memcg_oom_recover(memcg);
  	}
  
@@ -75,15 +75,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (!mem_cgroup_is_root(memcg))
  		css_put_many(&memcg->css, nr_pages);
-@@ -5719,6 +5722,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5845,6 +5848,7 @@ void mem_cgroup_swapout(struct page *pag
  {
- 	struct mem_cgroup *memcg;
+ 	struct mem_cgroup *memcg, *swap_memcg;
  	unsigned short oldid;
 +	unsigned long flags;
  
  	VM_BUG_ON_PAGE(PageLRU(page), page);
  	VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5747,9 +5751,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5885,12 +5889,16 @@ void mem_cgroup_swapout(struct page *pag
  	 * important here to have the interrupts disabled because it is the
  	 * only synchronisation we have for udpating the per-CPU variables.
  	 */
@@ -93,6 +93,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +#endif
  	mem_cgroup_charge_statistics(memcg, page, false, -1);
  	memcg_check_events(memcg, page);
+ 
+ 	if (!mem_cgroup_is_root(memcg))
+ 		css_put(&memcg->css);
 +	local_unlock_irqrestore(event_lock, flags);
  }
  
diff --git a/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch b/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
index 5476416..a731bdb 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 5 Jun 2016 08:11:13 +0200
 Subject: [PATCH] mm/memcontrol: mem_cgroup_migrate() - replace another
  local_irq_disable() w. local_lock_irq()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 v4.6 grew a local_irq_disable() in mm/memcontrol.c::mem_cgroup_migrate().
 Convert it to use the existing local lock (event_lock) like the others.
@@ -15,16 +15,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -5554,10 +5554,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -5668,10 +5668,10 @@ void mem_cgroup_migrate(struct page *old
  
  	commit_charge(newpage, memcg, false);
  
--	local_irq_disable();
-+	local_lock_irq(event_lock);
+-	local_irq_save(flags);
++	local_lock_irqsave(event_lock, flags);
  	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
  	memcg_check_events(memcg, newpage);
--	local_irq_enable();
-+	local_unlock_irq(event_lock);
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(event_lock, flags);
  }
  
  DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
index 78b1b37..7fe9f2c 100644
--- a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -1,7 +1,7 @@
 Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 27 Sep 2012 11:11:46 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The plain spinlock while sufficient does not update the local_lock
 internals. Use a proper local_lock function instead to ease debugging.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -280,9 +280,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+@@ -281,9 +281,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
  
  #ifdef CONFIG_PREEMPT_RT_BASE
  # define cpu_lock_irqsave(cpu, flags)		\
diff --git a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
index ae4bfed..2663aa0 100644
--- a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri Jul 3 08:44:37 2009 -0500
 Subject: mm: page_alloc: Reduce lock sections further
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Split out the pages which are to be freed into a separate list and
 call free_pages_bulk() outside of the percpu page allocator locks.
@@ -9,13 +9,13 @@ call free_pages_bulk() outside of the percpu page allocator locks.
 Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- mm/page_alloc.c |   87 +++++++++++++++++++++++++++++++++++++++-----------------
- 1 file changed, 62 insertions(+), 25 deletions(-)
+ mm/page_alloc.c |   94 +++++++++++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 66 insertions(+), 28 deletions(-)
 
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -827,7 +827,7 @@ static inline int free_pages_check(struc
- }
+@@ -1069,7 +1069,7 @@ static bool bulkfree_pcp_prepare(struct
+ #endif /* CONFIG_DEBUG_VM */
  
  /*
 - * Frees a number of pages from the PCP lists
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * Assumes all pages on list are in same zone, and of same order.
   * count is the number of pages to free.
   *
-@@ -838,18 +838,53 @@ static inline int free_pages_check(struc
+@@ -1080,19 +1080,58 @@ static bool bulkfree_pcp_prepare(struct
   * pinned" detection logic.
   */
  static void free_pcppages_bulk(struct zone *zone, int count,
@@ -32,21 +32,23 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
 -	int migratetype = 0;
 -	int batch_free = 0;
- 	int to_free = count;
  	unsigned long nr_scanned;
+ 	bool isolated_pageblocks;
 +	unsigned long flags;
 +
 +	spin_lock_irqsave(&zone->lock, flags);
  
 -	spin_lock(&zone->lock);
- 	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+ 	isolated_pageblocks = has_isolate_pageblock(zone);
+ 	nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
  	if (nr_scanned)
- 		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
+ 		__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
  
 +	while (!list_empty(list)) {
-+		struct page *page = list_first_entry(list, struct page, lru);
++		struct page *page;
 +		int mt;	/* migratetype of the to-be-freed page */
 +
++		page = list_first_entry(list, struct page, lru);
 +		/* must delete as __free_one_page list manipulates */
 +		list_del(&page->lru);
 +
@@ -54,14 +56,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		/* MIGRATE_ISOLATE page should not go to pcplists */
 +		VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
 +		/* Pageblock could have been isolated meanwhile */
-+		if (unlikely(has_isolate_pageblock(zone)))
++		if (unlikely(isolated_pageblocks))
 +			mt = get_pageblock_migratetype(page);
 +
++		if (bulkfree_pcp_prepare(page))
++			continue;
++
 +		__free_one_page(page, page_to_pfn(page), zone, 0, mt);
 +		trace_mm_page_pcpu_drain(page, 0, mt);
-+		to_free--;
++		count--;
 +	}
-+	WARN_ON(to_free != 0);
++	WARN_ON(count != 0);
 +	spin_unlock_irqrestore(&zone->lock, flags);
 +}
 +
@@ -72,16 +77,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 + * Assumes all pages on list are in same zone, and of same order.
 + * count is the number of pages to free.
 + */
-+static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
++static void isolate_pcp_pages(int count, struct per_cpu_pages *src,
 +			      struct list_head *dst)
 +{
 +	int migratetype = 0;
 +	int batch_free = 0;
 +
- 	while (to_free) {
+ 	while (count) {
  		struct page *page;
  		struct list_head *list;
-@@ -865,7 +900,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1108,7 +1147,7 @@ static void free_pcppages_bulk(struct zo
  			batch_free++;
  			if (++migratetype == MIGRATE_PCPTYPES)
  				migratetype = 0;
@@ -90,8 +95,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		} while (list_empty(list));
  
  		/* This is the only non-empty list. Free them all. */
-@@ -873,24 +908,12 @@ static void free_pcppages_bulk(struct zo
- 			batch_free = to_free;
+@@ -1116,27 +1155,12 @@ static void free_pcppages_bulk(struct zo
+ 			batch_free = count;
  
  		do {
 -			int mt;	/* migratetype of the to-be-freed page */
@@ -104,19 +109,22 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 -			/* MIGRATE_ISOLATE page should not go to pcplists */
 -			VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
 -			/* Pageblock could have been isolated meanwhile */
--			if (unlikely(has_isolate_pageblock(zone)))
+-			if (unlikely(isolated_pageblocks))
 -				mt = get_pageblock_migratetype(page);
 -
+-			if (bulkfree_pcp_prepare(page))
+-				continue;
+-
 -			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
 -			trace_mm_page_pcpu_drain(page, 0, mt);
 +			list_add(&page->lru, dst);
- 		} while (--to_free && --batch_free && !list_empty(list));
+ 		} while (--count && --batch_free && !list_empty(list));
  	}
 -	spin_unlock(&zone->lock);
  }
  
  static void free_one_page(struct zone *zone,
-@@ -899,7 +922,9 @@ static void free_one_page(struct zone *z
+@@ -1145,7 +1169,9 @@ static void free_one_page(struct zone *z
  				int migratetype)
  {
  	unsigned long nr_scanned;
@@ -124,10 +132,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	unsigned long flags;
 +
 +	spin_lock_irqsave(&zone->lock, flags);
- 	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+ 	nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
  	if (nr_scanned)
- 		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
-@@ -909,7 +934,7 @@ static void free_one_page(struct zone *z
+ 		__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
+@@ -1155,7 +1181,7 @@ static void free_one_page(struct zone *z
  		migratetype = get_pfnblock_migratetype(page, pfn);
  	}
  	__free_one_page(page, pfn, zone, order, migratetype);
@@ -135,8 +143,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	spin_unlock_irqrestore(&zone->lock, flags);
  }
  
- static int free_tail_pages_check(struct page *head_page, struct page *page)
-@@ -2028,16 +2053,18 @@ static int rmqueue_bulk(struct zone *zon
+ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
+@@ -2232,16 +2258,18 @@ static int rmqueue_bulk(struct zone *zon
  void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  {
  	unsigned long flags;
@@ -156,7 +164,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  #endif
  
-@@ -2053,16 +2080,21 @@ static void drain_pages_zone(unsigned in
+@@ -2257,16 +2285,21 @@ static void drain_pages_zone(unsigned in
  	unsigned long flags;
  	struct per_cpu_pageset *pset;
  	struct per_cpu_pages *pcp;
@@ -180,7 +188,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2240,8 +2272,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2448,8 +2481,13 @@ void free_hot_cold_page(struct page *pag
  	pcp->count++;
  	if (pcp->count >= pcp->high) {
  		unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 8518924..463a4be 100644
--- a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:37 -0500
 Subject: mm: page_alloc: rt-friendly per-cpu pages
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
 method into a preemptible, explicit-per-cpu-locks method.
@@ -25,8 +25,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#include <linux/locallock.h>
  #include <linux/page_owner.h>
  #include <linux/kthread.h>
- 
-@@ -275,6 +276,18 @@ EXPORT_SYMBOL(nr_node_ids);
+ #include <linux/memcontrol.h>
+@@ -276,6 +277,18 @@ EXPORT_SYMBOL(nr_node_ids);
  EXPORT_SYMBOL(nr_online_nodes);
  #endif
  
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  int page_group_by_mobility_disabled __read_mostly;
  
  #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1070,10 +1083,10 @@ static void __free_pages_ok(struct page
+@@ -1228,10 +1241,10 @@ static void __free_pages_ok(struct page
  		return;
  
  	migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,8 +57,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	local_unlock_irqrestore(pa_lock, flags);
  }
  
- static void __init __free_pages_boot_core(struct page *page,
-@@ -2017,14 +2030,14 @@ void drain_zone_pages(struct zone *zone,
+ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+@@ -2221,14 +2234,14 @@ void drain_zone_pages(struct zone *zone,
  	unsigned long flags;
  	int to_drain, batch;
  
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  #endif
  
-@@ -2041,7 +2054,7 @@ static void drain_pages_zone(unsigned in
+@@ -2245,7 +2258,7 @@ static void drain_pages_zone(unsigned in
  	struct per_cpu_pageset *pset;
  	struct per_cpu_pages *pcp;
  
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	pset = per_cpu_ptr(zone->pageset, cpu);
  
  	pcp = &pset->pcp;
-@@ -2049,7 +2062,7 @@ static void drain_pages_zone(unsigned in
+@@ -2253,7 +2266,7 @@ static void drain_pages_zone(unsigned in
  		free_pcppages_bulk(zone, pcp->count, pcp);
  		pcp->count = 0;
  	}
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2135,8 +2148,17 @@ void drain_all_pages(struct zone *zone)
+@@ -2339,8 +2352,17 @@ void drain_all_pages(struct zone *zone)
  		else
  			cpumask_clear_cpu(cpu, &cpus_with_pcps);
  	}
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  #ifdef CONFIG_HIBERNATION
-@@ -2192,7 +2214,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2400,7 +2422,7 @@ void free_hot_cold_page(struct page *pag
  
  	migratetype = get_pfnblock_migratetype(page, pfn);
  	set_pcppage_migratetype(page, migratetype);
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	__count_vm_event(PGFREE);
  
  	/*
-@@ -2223,7 +2245,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2431,7 +2453,7 @@ void free_hot_cold_page(struct page *pag
  	}
  
  out:
@@ -129,28 +129,28 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2358,7 +2380,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2568,7 +2590,7 @@ struct page *buffered_rmqueue(struct zon
  		struct per_cpu_pages *pcp;
  		struct list_head *list;
  
 -		local_irq_save(flags);
 +		local_lock_irqsave(pa_lock, flags);
- 		pcp = &this_cpu_ptr(zone->pageset)->pcp;
- 		list = &pcp->lists[migratetype];
- 		if (list_empty(list)) {
-@@ -2382,7 +2404,7 @@ struct page *buffered_rmqueue(struct zon
+ 		do {
+ 			pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ 			list = &pcp->lists[migratetype];
+@@ -2595,7 +2617,7 @@ struct page *buffered_rmqueue(struct zon
  		 * allocate greater than order-1 page units with __GFP_NOFAIL.
  		 */
  		WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
 -		spin_lock_irqsave(&zone->lock, flags);
 +		local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
  
- 		page = NULL;
- 		if (alloc_flags & ALLOC_HARDER) {
-@@ -2392,11 +2414,13 @@ struct page *buffered_rmqueue(struct zon
- 		}
- 		if (!page)
- 			page = __rmqueue(zone, order, migratetype);
+ 		do {
+ 			page = NULL;
+@@ -2607,22 +2629,24 @@ struct page *buffered_rmqueue(struct zon
+ 			if (!page)
+ 				page = __rmqueue(zone, order, migratetype);
+ 		} while (page && check_new_pages(page, order));
 -		spin_unlock(&zone->lock);
 -		if (!page)
 +		if (!page) {
@@ -162,10 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		spin_unlock(&zone->lock);
  	}
  
- 	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-@@ -2406,13 +2430,13 @@ struct page *buffered_rmqueue(struct zon
- 
- 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
+ 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
  	zone_statistics(preferred_zone, zone, gfp_flags);
 -	local_irq_restore(flags);
 +	local_unlock_irqrestore(pa_lock, flags);
@@ -179,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return NULL;
  }
  
-@@ -6239,6 +6263,7 @@ static int page_alloc_cpu_notify(struct
+@@ -6554,6 +6578,7 @@ static int page_alloc_cpu_notify(struct
  void __init page_alloc_init(void)
  {
  	hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -187,7 +184,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -7163,7 +7188,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7370,7 +7395,7 @@ void zone_pcp_reset(struct zone *zone)
  	struct per_cpu_pageset *pset;
  
  	/* avoid races with drain_pages()  */
@@ -196,7 +193,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (zone->pageset != &boot_pageset) {
  		for_each_online_cpu(cpu) {
  			pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7172,7 +7197,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7379,7 +7404,7 @@ void zone_pcp_reset(struct zone *zone)
  		free_percpu(zone->pageset);
  		zone->pageset = &boot_pageset;
  	}
diff --git a/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch b/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
index 42d56e6..e7e6d4a 100644
--- a/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
+++ b/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
@@ -1,7 +1,7 @@
 From: Luiz Capitulino <lcapitulino at redhat.com>
 Date: Fri, 27 May 2016 15:03:28 +0200
 Subject: [PATCH] mm: perform lru_add_drain_all() remotely
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
 on all CPUs that have non-empty LRU pagevecs and then waiting for
@@ -20,12 +20,12 @@ Signed-off-by: Rik van Riel <riel at redhat.com>
 Signed-off-by: Luiz Capitulino <lcapitulino at redhat.com>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- mm/swap.c |   37 ++++++++++++++++++++++++++++++-------
- 1 file changed, 30 insertions(+), 7 deletions(-)
+ mm/swap.c |   42 ++++++++++++++++++++++++++++++++----------
+ 1 file changed, 32 insertions(+), 10 deletions(-)
 
 --- a/mm/swap.c
 +++ b/mm/swap.c
-@@ -596,9 +596,15 @@ void lru_add_drain_cpu(int cpu)
+@@ -597,9 +597,15 @@ void lru_add_drain_cpu(int cpu)
  		unsigned long flags;
  
  		/* No harm done if a racing interrupt already did this */
@@ -41,40 +41,49 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  
  	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -666,12 +672,32 @@ void lru_add_drain(void)
+@@ -667,12 +673,15 @@ void lru_add_drain(void)
  	local_unlock_cpu(swapvec_lock);
  }
  
-+
+-static void lru_add_drain_per_cpu(struct work_struct *dummy)
 +#ifdef CONFIG_PREEMPT_RT_BASE
 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
-+{
+ {
+-	lru_add_drain();
 +	local_lock_on(swapvec_lock, cpu);
 +	lru_add_drain_cpu(cpu);
 +	local_unlock_on(swapvec_lock, cpu);
-+}
-+
+ }
+ 
+-static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 +#else
-+
- static void lru_add_drain_per_cpu(struct work_struct *dummy)
- {
- 	lru_add_drain();
+ 
+ /*
+  * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
+@@ -692,6 +701,22 @@ static int __init lru_init(void)
  }
+ early_initcall(lru_init);
  
- static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
++static void lru_add_drain_per_cpu(struct work_struct *dummy)
++{
++	lru_add_drain();
++}
++
++static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
 +{
 +	struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 +
 +	INIT_WORK(work, lru_add_drain_per_cpu);
-+	schedule_work_on(cpu, work);
++	queue_work_on(cpu, lru_add_drain_wq, work);
 +	cpumask_set_cpu(cpu, has_work);
 +}
 +#endif
- 
++
  void lru_add_drain_all(void)
  {
-@@ -684,21 +710,18 @@ void lru_add_drain_all(void)
+ 	static DEFINE_MUTEX(lock);
+@@ -703,21 +728,18 @@ void lru_add_drain_all(void)
  	cpumask_clear(&has_work);
  
  	for_each_online_cpu(cpu) {
@@ -86,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
 -		    need_activate_page_drain(cpu)) {
 -			INIT_WORK(work, lru_add_drain_per_cpu);
--			schedule_work_on(cpu, work);
+-			queue_work_on(cpu, lru_add_drain_wq, work);
 -			cpumask_set_cpu(cpu, &has_work);
 -		}
 +		    need_activate_page_drain(cpu))
diff --git a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
index a53c3d5..ff14342 100644
--- a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
+++ b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
@@ -1,7 +1,7 @@
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Tue, 15 May 2012 13:53:56 +0800
 Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 User preempt_*_rt instead of local_irq_*_rt or otherwise there will be
 warning on ARM like below:
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/fs/exec.c
 +++ b/fs/exec.c
-@@ -961,12 +961,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -1012,12 +1012,14 @@ static int exec_mmap(struct mm_struct *m
  		}
  	}
  	task_lock(tsk);
diff --git a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
index fc09d18..0c784b3 100644
--- a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
+++ b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
@@ -1,7 +1,7 @@
 Subject: mm, rt: kmap_atomic scheduling
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 28 Jul 2011 10:43:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 In fact, with migrate_disable() existing one could play games with
 kmap_atomic. You could save/restore the kmap_atomic slots on context
@@ -230,7 +230,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
  
  #include <asm/page.h>
  #include <asm/ptrace.h>
-@@ -1883,6 +1884,12 @@ struct task_struct {
+@@ -1954,6 +1955,12 @@ struct task_struct {
  	int softirq_nestcnt;
  	unsigned int softirqs_raised;
  #endif
diff --git a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
index 6ec588d..5e74603 100644
--- a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
+++ b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:44:34 -0500
 Subject: mm/scatterlist: Do not disable irqs on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The local_irq_save() is not only used to get things done "fast" but
 also to ensure that in case of SG_MITER_ATOMIC we are in "atomic"
diff --git a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
index f6b5e78..b8cfcd6 100644
--- a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
+++ b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
 Subject: mm/vmalloc: Another preempt disable region which sucks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 12 Jul 2011 11:39:36 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Avoid the preempt disable version of get_cpu_var(). The inner-lock should
 provide enough serialisation.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/mm/vmalloc.c
 +++ b/mm/vmalloc.c
-@@ -819,7 +819,7 @@ static void *new_vmap_block(unsigned int
+@@ -845,7 +845,7 @@ static void *new_vmap_block(unsigned int
  	struct vmap_block *vb;
  	struct vmap_area *va;
  	unsigned long vb_idx;
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	void *vaddr;
  
  	node = numa_node_id();
-@@ -862,11 +862,12 @@ static void *new_vmap_block(unsigned int
+@@ -888,11 +888,12 @@ static void *new_vmap_block(unsigned int
  	BUG_ON(err);
  	radix_tree_preload_end();
  
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return vaddr;
  }
-@@ -935,6 +936,7 @@ static void *vb_alloc(unsigned long size
+@@ -961,6 +962,7 @@ static void *vb_alloc(unsigned long size
  	struct vmap_block *vb;
  	void *vaddr = NULL;
  	unsigned int order;
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	BUG_ON(offset_in_page(size));
  	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -949,7 +951,8 @@ static void *vb_alloc(unsigned long size
+@@ -975,7 +977,8 @@ static void *vb_alloc(unsigned long size
  	order = get_order(size);
  
  	rcu_read_lock();
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  		unsigned long pages_off;
  
-@@ -972,7 +975,7 @@ static void *vb_alloc(unsigned long size
+@@ -998,7 +1001,7 @@ static void *vb_alloc(unsigned long size
  		break;
  	}
  
diff --git a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index 00932cb..b86d644 100644
--- a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Jan 2015 17:19:44 +0100
 Subject: mm/workingset: Do not protect workingset_shadow_nodes with irq off
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 workingset_shadow_nodes is protected by local_irq_disable(). Some users
 use spin_lock_irq().
@@ -11,10 +11,10 @@ so I catch users of it which will be introduced later.
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
  include/linux/swap.h |    4 +++-
- mm/filemap.c         |   11 ++++++++---
+ mm/filemap.c         |   13 +++++++++----
  mm/truncate.c        |    7 +++++--
  mm/workingset.c      |   23 ++++++++++++-----------
- 4 files changed, 28 insertions(+), 17 deletions(-)
+ 4 files changed, 29 insertions(+), 18 deletions(-)
 
 --- a/include/linux/swap.h
 +++ b/include/linux/swap.h
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #include <asm/page.h>
  
  struct notifier_block;
-@@ -252,7 +253,8 @@ struct swap_info_struct {
+@@ -243,7 +244,8 @@ struct swap_info_struct {
  void *workingset_eviction(struct address_space *mapping, struct page *page);
  bool workingset_refault(void *shadow);
  void workingset_activation(struct page *page);
@@ -38,18 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
 --- a/mm/filemap.c
 +++ b/mm/filemap.c
-@@ -169,7 +169,9 @@ static void page_cache_tree_delete(struc
- 	if (!workingset_node_pages(node) &&
- 	    list_empty(&node->private_list)) {
- 		node->private_data = mapping;
--		list_lru_add(&workingset_shadow_nodes, &node->private_list);
-+		local_lock(workingset_shadow_lock);
-+		list_lru_add(&__workingset_shadow_nodes, &node->private_list);
-+		local_unlock(workingset_shadow_lock);
- 	}
- }
- 
-@@ -618,9 +620,12 @@ static int page_cache_tree_insert(struct
+@@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct
  		 * node->private_list is protected by
  		 * mapping->tree_lock.
  		 */
@@ -64,26 +53,39 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  	return 0;
  }
---- a/mm/truncate.c
-+++ b/mm/truncate.c
-@@ -63,9 +63,12 @@ static void clear_exceptional_entry(stru
- 		 * protected by mapping->tree_lock.
- 		 */
- 		if (!workingset_node_shadows(node) &&
--		    !list_empty(&node->private_list))
--			list_lru_del(&workingset_shadow_nodes,
-+		    !list_empty(&node->private_list)) {
+@@ -217,8 +220,10 @@ static void page_cache_tree_delete(struc
+ 		if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
+ 				list_empty(&node->private_list)) {
+ 			node->private_data = mapping;
+-			list_lru_add(&workingset_shadow_nodes,
+-					&node->private_list);
 +			local_lock(workingset_shadow_lock);
-+			list_lru_del(&__workingset_shadow_nodes,
- 					&node->private_list);
++			list_lru_add(&__workingset_shadow_nodes,
++				     &node->private_list);
 +			local_unlock(workingset_shadow_lock);
-+		}
- 		__radix_tree_delete_node(&mapping->page_tree, node);
+ 		}
  	}
+ 
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -62,9 +62,12 @@ static void clear_exceptional_entry(stru
+ 	 * protected by mapping->tree_lock.
+ 	 */
+ 	if (!workingset_node_shadows(node) &&
+-	    !list_empty(&node->private_list))
+-		list_lru_del(&workingset_shadow_nodes,
++	    !list_empty(&node->private_list)) {
++		local_lock(workingset_shadow_lock);
++		list_lru_del(&__workingset_shadow_nodes,
+ 				&node->private_list);
++		local_unlock(workingset_shadow_lock);
++	}
+ 	__radix_tree_delete_node(&mapping->page_tree, node);
  unlock:
+ 	spin_unlock_irq(&mapping->tree_lock);
 --- a/mm/workingset.c
 +++ b/mm/workingset.c
-@@ -335,7 +335,8 @@ void workingset_activation(struct page *
+@@ -334,7 +334,8 @@ void workingset_activation(struct page *
   * point where they would still be useful.
   */
  
@@ -93,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  static unsigned long count_shadow_nodes(struct shrinker *shrinker,
  					struct shrink_control *sc)
-@@ -345,9 +346,9 @@ static unsigned long count_shadow_nodes(
+@@ -344,9 +345,9 @@ static unsigned long count_shadow_nodes(
  	unsigned long pages;
  
  	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -104,9 +106,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +	shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
 +	local_unlock_irq(workingset_shadow_lock);
  
- 	if (memcg_kmem_enabled())
+ 	if (memcg_kmem_enabled()) {
  		pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
-@@ -440,9 +441,9 @@ static enum lru_status shadow_lru_isolat
+@@ -438,9 +439,9 @@ static enum lru_status shadow_lru_isolat
  	spin_unlock(&mapping->tree_lock);
  	ret = LRU_REMOVED_RETRY;
  out:
@@ -118,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	spin_lock(lru_lock);
  	return ret;
  }
-@@ -453,10 +454,10 @@ static unsigned long scan_shadow_nodes(s
+@@ -451,10 +452,10 @@ static unsigned long scan_shadow_nodes(s
  	unsigned long ret;
  
  	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -132,8 +134,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return ret;
  }
  
-@@ -494,7 +495,7 @@ static int __init workingset_init(void)
- 	printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+@@ -492,7 +493,7 @@ static int __init workingset_init(void)
+ 	pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
  	       timestamp_bits, max_order, bucket_order);
  
 -	ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
@@ -141,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (ret)
  		goto err;
  	ret = register_shrinker(&workingset_shadow_shrinker);
-@@ -502,7 +503,7 @@ static int __init workingset_init(void)
+@@ -500,7 +501,7 @@ static int __init workingset_init(void)
  		goto err_list_lru;
  	return 0;
  err_list_lru:
diff --git a/debian/patches/features/all/rt/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch b/debian/patches/features/all/rt/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
index d70735b..14b329f 100644
--- a/debian/patches/features/all/rt/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
+++ b/debian/patches/features/all/rt/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
@@ -1,35 +1,165 @@
-From 1fd1b32ad881496d3a3b4caac77965555cc021b0 Mon Sep 17 00:00:00 2001
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Tue, 22 Mar 2016 11:16:09 +0100
 Subject: [PATCH] mm/zsmalloc: Use get/put_cpu_light in
  zs_map_object()/zs_unmap_object()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Otherwise, we get a ___might_sleep() splat.
 
+
 Signed-off-by: Mike Galbraith <umgwanakikbuti at gmail.com>
+[bigeasy: replace the bitspin_lock() with a mutex]
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- mm/zsmalloc.c |    4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
+ mm/zsmalloc.c |   73 ++++++++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 69 insertions(+), 4 deletions(-)
 
 --- a/mm/zsmalloc.c
 +++ b/mm/zsmalloc.c
-@@ -1292,7 +1292,7 @@ void *zs_map_object(struct zs_pool *pool
+@@ -71,7 +71,19 @@
+ #define ZS_MAX_ZSPAGE_ORDER 2
+ #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++
++struct zsmalloc_handle {
++	unsigned long addr;
++	struct mutex lock;
++};
++
++#define ZS_HANDLE_SIZE (sizeof(struct zsmalloc_handle))
++
++#else
++
+ #define ZS_HANDLE_SIZE (sizeof(unsigned long))
++#endif
+ 
+ /*
+  * Object location (<PFN>, <obj_idx>) is encoded as
+@@ -351,9 +363,26 @@ static void destroy_cache(struct zs_pool
+ 
+ static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
+ {
+-	return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
+-			gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
++	void *p;
++
++	p = kmem_cache_alloc(pool->handle_cachep,
++			     gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
++#ifdef CONFIG_PREEMPT_RT_BASE
++	if (p) {
++		struct zsmalloc_handle *zh = p;
++
++		mutex_init(&zh->lock);
++	}
++#endif
++	return (unsigned long)p;
++}
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
++{
++	return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
+ }
++#endif
+ 
+ static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
+ {
+@@ -373,12 +402,18 @@ static void cache_free_zspage(struct zs_
+ 
+ static void record_obj(unsigned long handle, unsigned long obj)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++	WRITE_ONCE(zh->addr, obj);
++#else
+ 	/*
+ 	 * lsb of @obj represents handle lock while other bits
+ 	 * represent object value the handle is pointing so
+ 	 * updating shouldn't do store tearing.
+ 	 */
+ 	WRITE_ONCE(*(unsigned long *)handle, obj);
++#endif
+ }
+ 
+ /* zpool driver */
+@@ -902,7 +937,13 @@ static unsigned long location_to_obj(str
+ 
+ static unsigned long handle_to_obj(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++	return zh->addr;
++#else
+ 	return *(unsigned long *)handle;
++#endif
+ }
+ 
+ static unsigned long obj_to_head(struct page *page, void *obj)
+@@ -916,22 +957,46 @@ static unsigned long obj_to_head(struct
+ 
+ static inline int testpin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++	return mutex_is_locked(&zh->lock);
++#else
+ 	return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+ 
+ static inline int trypin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++	return mutex_trylock(&zh->lock);
++#else
+ 	return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+ 
+ static void pin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++	return mutex_lock(&zh->lock);
++#else
+ 	bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+ 
+ static void unpin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++	return mutex_unlock(&zh->lock);
++#else
+ 	bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+ 
+ static void reset_page(struct page *page)
+@@ -1423,7 +1488,7 @@ void *zs_map_object(struct zs_pool *pool
  	class = pool->size_class[class_idx];
- 	off = obj_idx_to_offset(page, obj_idx, class->size);
+ 	off = (class->size * obj_idx) & ~PAGE_MASK;
  
 -	area = &get_cpu_var(zs_map_area);
 +	area = per_cpu_ptr(&zs_map_area, get_cpu_light());
  	area->vm_mm = mm;
  	if (off + class->size <= PAGE_SIZE) {
  		/* this object is contained entirely within a page */
-@@ -1345,7 +1345,7 @@ void zs_unmap_object(struct zs_pool *poo
+@@ -1477,7 +1542,7 @@ void zs_unmap_object(struct zs_pool *poo
  
  		__zs_unmap_object(area, pages, off, class->size);
  	}
 -	put_cpu_var(zs_map_area);
 +	put_cpu_light();
+ 
+ 	migrate_read_unlock(zspage);
  	unpin_tag(handle);
- }
- EXPORT_SYMBOL_GPL(zs_unmap_object);
diff --git a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
index fc708ba..4789298 100644
--- a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
+++ b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
@@ -1,7 +1,7 @@
 Subject: mmci: Remove bogus local_irq_save()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 09 Jan 2013 12:11:12 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On !RT interrupt runs with interrupts disabled. On RT it's in a
 thread, so no need to disable interrupts at all.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/mmc/host/mmci.c
 +++ b/drivers/mmc/host/mmci.c
-@@ -1155,15 +1155,12 @@ static irqreturn_t mmci_pio_irq(int irq,
+@@ -1147,15 +1147,12 @@ static irqreturn_t mmci_pio_irq(int irq,
  	struct sg_mapping_iter *sg_miter = &host->sg_miter;
  	struct variant_data *variant = host->variant;
  	void __iomem *base = host->base;
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	do {
  		unsigned int remain, len;
  		char *buffer;
-@@ -1203,8 +1200,6 @@ static irqreturn_t mmci_pio_irq(int irq,
+@@ -1195,8 +1192,6 @@ static irqreturn_t mmci_pio_irq(int irq,
  
  	sg_miter_stop(sg_miter);
  
diff --git a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
index 1f9687b..cbc72f6 100644
--- a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
+++ b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
@@ -1,7 +1,7 @@
 Date: Wed, 26 Jun 2013 15:28:11 -0400
 From: Steven Rostedt <rostedt at goodmis.org>
 Subject: rt,ntp: Move call to schedule_delayed_work() to helper thread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The ntp code for notify_cmos_timer() is called from a hard interrupt
 context. schedule_delayed_work() under PREEMPT_RT_FULL calls spinlocks
@@ -19,60 +19,44 @@ a notifier on boot up for your check and wake up the thread when
 needed. This will be a todo.
 
 Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
-
+[bigeasy: use swork_queue() instead a helper thread]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- kernel/time/ntp.c |   43 +++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 43 insertions(+)
+ kernel/time/ntp.c |   26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
 
 --- a/kernel/time/ntp.c
 +++ b/kernel/time/ntp.c
-@@ -10,6 +10,7 @@
- #include <linux/workqueue.h>
- #include <linux/hrtimer.h>
- #include <linux/jiffies.h>
-+#include <linux/kthread.h>
+@@ -17,6 +17,7 @@
+ #include <linux/module.h>
+ #include <linux/rtc.h>
  #include <linux/math64.h>
- #include <linux/timex.h>
- #include <linux/time.h>
-@@ -568,10 +569,52 @@ static void sync_cmos_clock(struct work_
++#include <linux/swork.h>
+ 
+ #include "ntp_internal.h"
+ #include "timekeeping_internal.h"
+@@ -568,10 +569,35 @@ static void sync_cmos_clock(struct work_
  			   &sync_cmos_work, timespec64_to_jiffies(&next));
  }
  
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * RT can not call schedule_delayed_work from real interrupt context.
-+ * Need to make a thread to do the real work.
-+ */
-+static struct task_struct *cmos_delay_thread;
-+static bool do_cmos_delay;
 +
-+static int run_cmos_delay(void *ignore)
++static void run_clock_set_delay(struct swork_event *event)
 +{
-+	while (!kthread_should_stop()) {
-+		set_current_state(TASK_INTERRUPTIBLE);
-+		if (do_cmos_delay) {
-+			do_cmos_delay = false;
-+			queue_delayed_work(system_power_efficient_wq,
-+					   &sync_cmos_work, 0);
-+		}
-+		schedule();
-+	}
-+	__set_current_state(TASK_RUNNING);
-+	return 0;
++	queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
 +}
 +
++static struct swork_event ntp_cmos_swork;
++
 +void ntp_notify_cmos_timer(void)
 +{
-+	do_cmos_delay = true;
-+	/* Make visible before waking up process */
-+	smp_wmb();
-+	wake_up_process(cmos_delay_thread);
++	swork_queue(&ntp_cmos_swork);
 +}
 +
 +static __init int create_cmos_delay_thread(void)
 +{
-+	cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd");
-+	BUG_ON(!cmos_delay_thread);
++	WARN_ON(swork_get());
++	INIT_SWORK(&ntp_cmos_swork, run_clock_set_delay);
 +	return 0;
 +}
 +early_initcall(create_cmos_delay_thread);
diff --git a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
index 13cfa49..3264a75 100644
--- a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
+++ b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:51:45 +0200
 Subject: locking: Disable spin on owner for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Drop spin on owner for mutex / rwsem. We are most likely not using it
 but…
diff --git a/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
new file mode 100644
index 0000000..07888ec
--- /dev/null
+++ b/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -0,0 +1,274 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Wed, 14 Sep 2016 17:36:35 +0200
+Subject: [PATCH] net/Qdisc: use a seqlock instead seqcount
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+The seqcount disables preemption on -RT while it is held which can't
+remove. Also we don't want the reader to spin for ages if the writer is
+scheduled out. The seqlock on the other hand will serialize / sleep on
+the lock while writer is active.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/seqlock.h    |    9 +++++++++
+ include/net/gen_stats.h    |    9 +++++----
+ include/net/net_seq_lock.h |   15 +++++++++++++++
+ include/net/sch_generic.h  |   21 ++++++++++++++++++---
+ net/core/gen_estimator.c   |    6 +++---
+ net/core/gen_stats.c       |    8 ++++----
+ net/sched/sch_api.c        |    2 +-
+ net/sched/sch_generic.c    |   12 ++++++++++++
+ 8 files changed, 67 insertions(+), 15 deletions(-)
+ create mode 100644 include/net/net_seq_lock.h
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -481,6 +481,15 @@ static inline void write_seqlock(seqlock
+ 	__raw_write_seqcount_begin(&sl->seqcount);
+ }
+ 
++static inline int try_write_seqlock(seqlock_t *sl)
++{
++	if (spin_trylock(&sl->lock)) {
++		__raw_write_seqcount_begin(&sl->seqcount);
++		return 1;
++	}
++	return 0;
++}
++
+ static inline void write_sequnlock(seqlock_t *sl)
+ {
+ 	__raw_write_seqcount_end(&sl->seqcount);
+--- a/include/net/gen_stats.h
++++ b/include/net/gen_stats.h
+@@ -5,6 +5,7 @@
+ #include <linux/socket.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/pkt_sched.h>
++#include <net/net_seq_lock.h>
+ 
+ struct gnet_stats_basic_cpu {
+ 	struct gnet_stats_basic_packed bstats;
+@@ -33,11 +34,11 @@ int gnet_stats_start_copy_compat(struct
+ 				 spinlock_t *lock, struct gnet_dump *d,
+ 				 int padattr);
+ 
+-int gnet_stats_copy_basic(const seqcount_t *running,
++int gnet_stats_copy_basic(net_seqlock_t *running,
+ 			  struct gnet_dump *d,
+ 			  struct gnet_stats_basic_cpu __percpu *cpu,
+ 			  struct gnet_stats_basic_packed *b);
+-void __gnet_stats_copy_basic(const seqcount_t *running,
++void __gnet_stats_copy_basic(net_seqlock_t *running,
+ 			     struct gnet_stats_basic_packed *bstats,
+ 			     struct gnet_stats_basic_cpu __percpu *cpu,
+ 			     struct gnet_stats_basic_packed *b);
+@@ -55,14 +56,14 @@ int gen_new_estimator(struct gnet_stats_
+ 		      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ 		      struct gnet_stats_rate_est64 *rate_est,
+ 		      spinlock_t *stats_lock,
+-		      seqcount_t *running, struct nlattr *opt);
++		      net_seqlock_t *running, struct nlattr *opt);
+ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
+ 			struct gnet_stats_rate_est64 *rate_est);
+ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ 			  struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ 			  struct gnet_stats_rate_est64 *rate_est,
+ 			  spinlock_t *stats_lock,
+-			  seqcount_t *running, struct nlattr *opt);
++			  net_seqlock_t *running, struct nlattr *opt);
+ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
+ 			  const struct gnet_stats_rate_est64 *rate_est);
+ #endif
+--- /dev/null
++++ b/include/net/net_seq_lock.h
+@@ -0,0 +1,15 @@
++#ifndef __NET_NET_SEQ_LOCK_H__
++#define __NET_NET_SEQ_LOCK_H__
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define net_seqlock_t			seqlock_t
++# define net_seq_begin(__r)		read_seqbegin(__r)
++# define net_seq_retry(__r, __s)	read_seqretry(__r, __s)
++
++#else
++# define net_seqlock_t			seqcount_t
++# define net_seq_begin(__r)		read_seqcount_begin(__r)
++# define net_seq_retry(__r, __s)	read_seqcount_retry(__r, __s)
++#endif
++
++#endif
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -10,6 +10,7 @@
+ #include <linux/dynamic_queue_limits.h>
+ #include <net/gen_stats.h>
+ #include <net/rtnetlink.h>
++#include <net/net_seq_lock.h>
+ 
+ struct Qdisc_ops;
+ struct qdisc_walker;
+@@ -78,7 +79,7 @@ struct Qdisc {
+ 	struct sk_buff		*gso_skb ____cacheline_aligned_in_smp;
+ 	struct sk_buff_head	q;
+ 	struct gnet_stats_basic_packed bstats;
+-	seqcount_t		running;
++	net_seqlock_t		running;
+ 	struct gnet_stats_queue	qstats;
+ 	unsigned long		state;
+ 	struct Qdisc            *next_sched;
+@@ -90,13 +91,22 @@ struct Qdisc {
+ 	spinlock_t		busylock ____cacheline_aligned_in_smp;
+ };
+ 
+-static inline bool qdisc_is_running(const struct Qdisc *qdisc)
++static inline bool qdisc_is_running(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	return spin_is_locked(&qdisc->running.lock) ? true : false;
++#else
+ 	return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
++#endif
+ }
+ 
+ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	if (try_write_seqlock(&qdisc->running))
++		return true;
++	return false;
++#else
+ 	if (qdisc_is_running(qdisc))
+ 		return false;
+ 	/* Variant of write_seqcount_begin() telling lockdep a trylock
+@@ -105,11 +115,16 @@ static inline bool qdisc_run_begin(struc
+ 	raw_write_seqcount_begin(&qdisc->running);
+ 	seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
+ 	return true;
++#endif
+ }
+ 
+ static inline void qdisc_run_end(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	write_sequnlock(&qdisc->running);
++#else
+ 	write_seqcount_end(&qdisc->running);
++#endif
+ }
+ 
+ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
+@@ -300,7 +315,7 @@ static inline spinlock_t *qdisc_root_sle
+ 	return qdisc_lock(root);
+ }
+ 
+-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
++static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
+ {
+ 	struct Qdisc *root = qdisc_root_sleeping(qdisc);
+ 
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
+@@ -84,7 +84,7 @@ struct gen_estimator
+ 	struct gnet_stats_basic_packed	*bstats;
+ 	struct gnet_stats_rate_est64	*rate_est;
+ 	spinlock_t		*stats_lock;
+-	seqcount_t		*running;
++	net_seqlock_t		*running;
+ 	int			ewma_log;
+ 	u32			last_packets;
+ 	unsigned long		avpps;
+@@ -213,7 +213,7 @@ int gen_new_estimator(struct gnet_stats_
+ 		      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ 		      struct gnet_stats_rate_est64 *rate_est,
+ 		      spinlock_t *stats_lock,
+-		      seqcount_t *running,
++		      net_seqlock_t *running,
+ 		      struct nlattr *opt)
+ {
+ 	struct gen_estimator *est;
+@@ -309,7 +309,7 @@ int gen_replace_estimator(struct gnet_st
+ 			  struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ 			  struct gnet_stats_rate_est64 *rate_est,
+ 			  spinlock_t *stats_lock,
+-			  seqcount_t *running, struct nlattr *opt)
++			  net_seqlock_t *running, struct nlattr *opt)
+ {
+ 	gen_kill_estimator(bstats, rate_est);
+ 	return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -130,7 +130,7 @@ static void
+ }
+ 
+ void
+-__gnet_stats_copy_basic(const seqcount_t *running,
++__gnet_stats_copy_basic(net_seqlock_t *running,
+ 			struct gnet_stats_basic_packed *bstats,
+ 			struct gnet_stats_basic_cpu __percpu *cpu,
+ 			struct gnet_stats_basic_packed *b)
+@@ -143,10 +143,10 @@ void
+ 	}
+ 	do {
+ 		if (running)
+-			seq = read_seqcount_begin(running);
++			seq = net_seq_begin(running);
+ 		bstats->bytes = b->bytes;
+ 		bstats->packets = b->packets;
+-	} while (running && read_seqcount_retry(running, seq));
++	} while (running && net_seq_retry(running, seq));
+ }
+ EXPORT_SYMBOL(__gnet_stats_copy_basic);
+ 
+@@ -164,7 +164,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
+  * if the room in the socket buffer was not sufficient.
+  */
+ int
+-gnet_stats_copy_basic(const seqcount_t *running,
++gnet_stats_copy_basic(net_seqlock_t *running,
+ 		      struct gnet_dump *d,
+ 		      struct gnet_stats_basic_cpu __percpu *cpu,
+ 		      struct gnet_stats_basic_packed *b)
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -975,7 +975,7 @@ qdisc_create(struct net_device *dev, str
+ 			rcu_assign_pointer(sch->stab, stab);
+ 		}
+ 		if (tca[TCA_RATE]) {
+-			seqcount_t *running;
++			net_seqlock_t *running;
+ 
+ 			err = -EOPNOTSUPP;
+ 			if (sch->flags & TCQ_F_MQROOT)
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -426,7 +426,11 @@ struct Qdisc noop_qdisc = {
+ 	.list		=	LIST_HEAD_INIT(noop_qdisc.list),
+ 	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
+ 	.dev_queue	=	&noop_netdev_queue,
++#ifdef CONFIG_PREEMPT_RT_BASE
++	.running	=	__SEQLOCK_UNLOCKED(noop_qdisc.running),
++#else
+ 	.running	=	SEQCNT_ZERO(noop_qdisc.running),
++#endif
+ 	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
+ };
+ EXPORT_SYMBOL(noop_qdisc);
+@@ -620,9 +624,17 @@ struct Qdisc *qdisc_alloc(struct netdev_
+ 	lockdep_set_class(&sch->busylock,
+ 			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++	seqlock_init(&sch->running);
++	lockdep_set_class(&sch->running.seqcount,
++			  dev->qdisc_running_key ?: &qdisc_running_key);
++	lockdep_set_class(&sch->running.lock,
++			  dev->qdisc_running_key ?: &qdisc_running_key);
++#else
+ 	seqcount_init(&sch->running);
+ 	lockdep_set_class(&sch->running,
+ 			  dev->qdisc_running_key ?: &qdisc_running_key);
++#endif
+ 
+ 	sch->ops = ops;
+ 	sch->enqueue = ops->enqueue;
diff --git a/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch b/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
new file mode 100644
index 0000000..8edf16b
--- /dev/null
+++ b/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
@@ -0,0 +1,73 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Wed, 31 Aug 2016 17:54:09 +0200
+Subject: [PATCH] net: add a lock around icmp_sk()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+It looks like the this_cpu_ptr() access in icmp_sk() is protected with
+local_bh_disable(). To avoid missing serialization in -RT I am adding
+here a local lock. No crash has been observed, this is just precaution.
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ net/ipv4/icmp.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -77,6 +77,7 @@
+ #include <linux/string.h>
+ #include <linux/netfilter_ipv4.h>
+ #include <linux/slab.h>
++#include <linux/locallock.h>
+ #include <net/snmp.h>
+ #include <net/ip.h>
+ #include <net/route.h>
+@@ -204,6 +205,8 @@ static const struct icmp_control icmp_po
+  *
+  *	On SMP we have one ICMP socket per-cpu.
+  */
++static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock);
++
+ static struct sock *icmp_sk(struct net *net)
+ {
+ 	return *this_cpu_ptr(net->ipv4.icmp_sk);
+@@ -215,12 +218,14 @@ static inline struct sock *icmp_xmit_loc
+ 
+ 	local_bh_disable();
+ 
++	local_lock(icmp_sk_lock);
+ 	sk = icmp_sk(net);
+ 
+ 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
+ 		/* This can happen if the output path signals a
+ 		 * dst_link_failure() for an outgoing ICMP packet.
+ 		 */
++		local_unlock(icmp_sk_lock);
+ 		local_bh_enable();
+ 		return NULL;
+ 	}
+@@ -230,6 +235,7 @@ static inline struct sock *icmp_xmit_loc
+ static inline void icmp_xmit_unlock(struct sock *sk)
+ {
+ 	spin_unlock_bh(&sk->sk_lock.slock);
++	local_unlock(icmp_sk_lock);
+ }
+ 
+ int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
+@@ -358,6 +364,7 @@ static void icmp_push_reply(struct icmp_
+ 	struct sock *sk;
+ 	struct sk_buff *skb;
+ 
++	local_lock(icmp_sk_lock);
+ 	sk = icmp_sk(dev_net((*rt)->dst.dev));
+ 	if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
+ 			   icmp_param->data_len+icmp_param->head_len,
+@@ -380,6 +387,7 @@ static void icmp_push_reply(struct icmp_
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 		ip_push_pending_frames(sk, fl4);
+ 	}
++	local_unlock(icmp_sk_lock);
+ }
+ 
+ /*
diff --git a/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch b/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
new file mode 100644
index 0000000..bd341e6
--- /dev/null
+++ b/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
@@ -0,0 +1,94 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Wed, 31 Aug 2016 17:21:56 +0200
+Subject: [PATCH] net: add back the missing serialization in
+ ip_send_unicast_reply()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+Some time ago Sami Pietikäinen reported a crash on -RT in
+ip_send_unicast_reply() which was later fixed by Nicholas Mc Guire
+(v3.12.8-rt11). Later (v3.18.8) the code was reworked and I dropped the
+patch. As it turns out it was mistake.
+I have reports that the same crash is possible with a similar backtrace.
+It seems that vanilla protects access to this_cpu_ptr() via
+local_bh_disable(). This does not work the on -RT since we can have
+NET_RX and NET_TX running in parallel on the same CPU.
+This is brings back the old locks.
+
+|Unable to handle kernel NULL pointer dereference at virtual address 00000010
+|PC is at __ip_make_skb+0x198/0x3e8
+|[<c04e39d8>] (__ip_make_skb) from [<c04e3ca8>] (ip_push_pending_frames+0x20/0x40)
+|[<c04e3ca8>] (ip_push_pending_frames) from [<c04e3ff0>] (ip_send_unicast_reply+0x210/0x22c)
+|[<c04e3ff0>] (ip_send_unicast_reply) from [<c04fbb54>] (tcp_v4_send_reset+0x190/0x1c0)
+|[<c04fbb54>] (tcp_v4_send_reset) from [<c04fcc1c>] (tcp_v4_do_rcv+0x22c/0x288)
+|[<c04fcc1c>] (tcp_v4_do_rcv) from [<c0474364>] (release_sock+0xb4/0x150)
+|[<c0474364>] (release_sock) from [<c04ed904>] (tcp_close+0x240/0x454)
+|[<c04ed904>] (tcp_close) from [<c0511408>] (inet_release+0x74/0x7c)
+|[<c0511408>] (inet_release) from [<c0470728>] (sock_release+0x30/0xb0)
+|[<c0470728>] (sock_release) from [<c0470abc>] (sock_close+0x1c/0x24)
+|[<c0470abc>] (sock_close) from [<c0115ec4>] (__fput+0xe8/0x20c)
+|[<c0115ec4>] (__fput) from [<c0116050>] (____fput+0x18/0x1c)
+|[<c0116050>] (____fput) from [<c0058138>] (task_work_run+0xa4/0xb8)
+|[<c0058138>] (task_work_run) from [<c0011478>] (do_work_pending+0xd0/0xe4)
+|[<c0011478>] (do_work_pending) from [<c000e740>] (work_pending+0xc/0x20)
+|Code: e3530001 8a000001 e3a00040 ea000011 (e5973010)
+
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ net/ipv4/tcp_ipv4.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -62,6 +62,7 @@
+ #include <linux/init.h>
+ #include <linux/times.h>
+ #include <linux/slab.h>
++#include <linux/locallock.h>
+ 
+ #include <net/net_namespace.h>
+ #include <net/icmp.h>
+@@ -565,6 +566,7 @@ void tcp_v4_send_check(struct sock *sk,
+ }
+ EXPORT_SYMBOL(tcp_v4_send_check);
+ 
++static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock);
+ /*
+  *	This routine will send an RST to the other tcp.
+  *
+@@ -692,6 +694,8 @@ static void tcp_v4_send_reset(const stru
+ 		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
+ 
+ 	arg.tos = ip_hdr(skb)->tos;
++
++	local_lock(tcp_sk_lock);
+ 	local_bh_disable();
+ 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
+@@ -701,6 +705,7 @@ static void tcp_v4_send_reset(const stru
+ 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ 	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+ 	local_bh_enable();
++	local_unlock(tcp_sk_lock);
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+ out:
+@@ -776,6 +781,7 @@ static void tcp_v4_send_ack(struct net *
+ 	if (oif)
+ 		arg.bound_dev_if = oif;
+ 	arg.tos = tos;
++	local_lock(tcp_sk_lock);
+ 	local_bh_disable();
+ 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
+@@ -784,6 +790,7 @@ static void tcp_v4_send_ack(struct net *
+ 
+ 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ 	local_bh_enable();
++	local_unlock(tcp_sk_lock);
+ }
+ 
+ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
diff --git a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
index a2c12a7..38b4521 100644
--- a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 26 Sep 2012 16:21:08 +0200
 Subject: net: Another local_irq_disable/kmalloc headache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Replace it by a local lock. Though that's pretty inefficient :(
 
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/core/skbuff.c
 +++ b/net/core/skbuff.c
-@@ -63,6 +63,7 @@
+@@ -64,6 +64,7 @@
  #include <linux/errqueue.h>
  #include <linux/prefetch.h>
  #include <linux/if_vlan.h>
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #include <net/protocol.h>
  #include <net/dst.h>
-@@ -359,6 +360,7 @@ struct napi_alloc_cache {
+@@ -360,6 +361,7 @@ struct napi_alloc_cache {
  
  static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
  static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  {
-@@ -366,10 +368,10 @@ static void *__netdev_alloc_frag(unsigne
+@@ -367,10 +369,10 @@ static void *__netdev_alloc_frag(unsigne
  	unsigned long flags;
  	void *data;
  
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return data;
  }
  
-@@ -437,13 +439,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+@@ -438,13 +440,13 @@ struct sk_buff *__netdev_alloc_skb(struc
  	if (sk_memalloc_socks())
  		gfp_mask |= __GFP_MEMALLOC;
  
diff --git a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index c32448f..4852573 100644
--- a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -1,7 +1,7 @@
 Subject: net/core/cpuhotplug: Drain input_pkt_queue lockless
 From: Grygorii Strashko <grygorii.strashko at ti.com>
 Date: Fri, 9 Oct 2015 09:25:49 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 I can constantly see below error report with 4.1 RT-kernel on TI ARM dra7-evm 
 if I'm trying to unplug cpu1:
@@ -36,7 +36,7 @@ Cc: stable-rt at vger.kernel.org
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -7789,7 +7789,7 @@ static int dev_cpu_callback(struct notif
+@@ -7991,7 +7991,7 @@ static int dev_cpu_callback(struct notif
  		netif_rx_ni(skb);
  		input_queue_head_incr(oldsd);
  	}
diff --git a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
index c362106..562e162 100644
--- a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
+++ b/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 15 Jan 2016 16:33:34 +0100
 Subject: net/core: protect users of napi_alloc_cache against
  reentrance
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On -RT the code running in BH can not be moved to another CPU so CPU
 local variable remain local. However the code can be preempted
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/net/core/skbuff.c
 +++ b/net/core/skbuff.c
-@@ -361,6 +361,7 @@ struct napi_alloc_cache {
+@@ -362,6 +362,7 @@ struct napi_alloc_cache {
  static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
  static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
  static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  {
-@@ -390,9 +391,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -391,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
  
  static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  {
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  void *napi_alloc_frag(unsigned int fragsz)
-@@ -486,9 +491,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
+@@ -487,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
  struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
  				 gfp_t gfp_mask)
  {
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	len += NET_SKB_PAD + NET_IP_ALIGN;
  
-@@ -506,7 +512,10 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -507,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct
  	if (sk_memalloc_socks())
  		gfp_mask |= __GFP_MEMALLOC;
  
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (unlikely(!data))
  		return NULL;
  
-@@ -517,7 +526,7 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -518,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct
  	}
  
  	/* use OR instead of assignment to avoid clearing of bits in mask */
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		skb->pfmemalloc = 1;
  	skb->head_frag = 1;
  
-@@ -761,23 +770,26 @@ EXPORT_SYMBOL(consume_skb);
+@@ -762,23 +771,26 @@ EXPORT_SYMBOL(consume_skb);
  
  void __kfree_skb_flush(void)
  {
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	/* record skb to CPU local list */
  	nc->skb_cache[nc->skb_count++] = skb;
  
-@@ -792,6 +804,7 @@ static inline void _kfree_skb_defer(stru
+@@ -793,6 +805,7 @@ static inline void _kfree_skb_defer(stru
  				     nc->skb_cache);
  		nc->skb_count = 0;
  	}
diff --git a/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index 195535d..a347941 100644
--- a/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 30 Mar 2016 13:36:29 +0200
 Subject: [PATCH] net: dev: always take qdisc's busylock in __dev_xmit_skb()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The root-lock is dropped before dev_hard_start_xmit() is invoked and after
 setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away
@@ -21,8 +21,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3037,7 +3037,11 @@ static inline int __dev_xmit_skb(struct
- 	 * This permits __QDISC___STATE_RUNNING owner to get the lock more
+@@ -3084,7 +3084,11 @@ static inline int __dev_xmit_skb(struct
+ 	 * This permits qdisc->running owner to get the lock more
  	 * often and dequeue packets faster.
  	 */
 +#ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index 9a02ee9..9d47e43 100644
--- a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -1,7 +1,7 @@
 Subject: net: netfilter: Serialize xt_write_recseq sections on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 28 Oct 2012 11:18:08 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The netfilter code relies only on the implicit semantics of
 local_bh_disable() for serializing wt_write_recseq sections. RT breaks
@@ -24,8 +24,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#include <linux/locallock.h>
  #include <uapi/linux/netfilter/x_tables.h>
  
- /**
-@@ -285,6 +286,8 @@ void xt_free_table_info(struct xt_table_
+ /* Test a struct->invflags and a boolean for inequality */
+@@ -300,6 +301,8 @@ void xt_free_table_info(struct xt_table_
   */
  DECLARE_PER_CPU(seqcount_t, xt_recseq);
  
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* xt_tee_enabled - true if x_tables needs to handle reentrancy
   *
   * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
-@@ -305,6 +308,9 @@ static inline unsigned int xt_write_recs
+@@ -320,6 +323,9 @@ static inline unsigned int xt_write_recs
  {
  	unsigned int addend;
  
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Low order bit of sequence is set if we already
  	 * called xt_write_recseq_begin().
-@@ -335,6 +341,7 @@ static inline void xt_write_recseq_end(u
+@@ -350,6 +356,7 @@ static inline void xt_write_recseq_end(u
  	/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
  	smp_wmb();
  	__this_cpu_add(xt_recseq.sequence, addend);
diff --git a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
index 724701e..dac699b 100644
--- a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
+++ b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 20 Mar 2013 18:06:20 +0100
 Subject: net: Add a mutex around devnet_rename_seq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On RT write_seqcount_begin() disables preemption and device_rename()
 allocates memory with GFP_KERNEL and grabs later the sysfs_mutex
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -188,6 +188,7 @@ static unsigned int napi_gen_id = NR_CPU
+@@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPU
  static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
  
  static seqcount_t devnet_rename_seq;
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static inline void dev_base_seq_inc(struct net *net)
  {
-@@ -886,7 +887,8 @@ int netdev_get_name(struct net *net, cha
+@@ -888,7 +889,8 @@ int netdev_get_name(struct net *net, cha
  	strcpy(name, dev->name);
  	rcu_read_unlock();
  	if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		goto retry;
  	}
  
-@@ -1155,20 +1157,17 @@ int dev_change_name(struct net_device *d
+@@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *d
  	if (dev->flags & IFF_UP)
  		return -EBUSY;
  
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (oldname[0] && !strchr(oldname, '%'))
  		netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1181,11 +1180,12 @@ int dev_change_name(struct net_device *d
+@@ -1183,11 +1182,12 @@ int dev_change_name(struct net_device *d
  	if (ret) {
  		memcpy(dev->name, oldname, IFNAMSIZ);
  		dev->name_assign_type = old_assign_type;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	netdev_adjacent_rename_links(dev, oldname);
  
-@@ -1206,7 +1206,8 @@ int dev_change_name(struct net_device *d
+@@ -1208,7 +1208,8 @@ int dev_change_name(struct net_device *d
  		/* err >= 0 after dev_alloc_name() or stores the first errno */
  		if (err >= 0) {
  			err = ret;
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			memcpy(dev->name, oldname, IFNAMSIZ);
  			memcpy(oldname, newname, IFNAMSIZ);
  			dev->name_assign_type = old_assign_type;
-@@ -1219,6 +1220,11 @@ int dev_change_name(struct net_device *d
+@@ -1221,6 +1222,11 @@ int dev_change_name(struct net_device *d
  	}
  
  	return err;
diff --git a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 0f10736..5a5ff07 100644
--- a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 13 Jan 2016 15:55:02 +0100
 Subject: net: move xmit_recursion to per-task variable on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 A softirq on -RT can be preempted. That means one task is in
 __dev_queue_xmit(), gets preempted and another task may enter
@@ -16,73 +16,49 @@ the recursion properly on -RT.
 Cc: stable-rt at vger.kernel.org
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- include/linux/netdevice.h |    9 +++++++++
+ include/linux/netdevice.h |   41 ++++++++++++++++++++++++++++++++++++++++-
  include/linux/sched.h     |    3 +++
- net/core/dev.c            |   41 ++++++++++++++++++++++++++++++++++++++---
- 3 files changed, 50 insertions(+), 3 deletions(-)
+ net/core/dev.c            |    9 +++++----
+ net/core/filter.c         |    6 +++---
+ 4 files changed, 51 insertions(+), 8 deletions(-)
 
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
-@@ -2396,11 +2396,20 @@ void netdev_freemem(struct net_device *d
+@@ -2409,14 +2409,53 @@ void netdev_freemem(struct net_device *d
  void synchronize_net(void);
  int init_dummy_netdev(struct net_device *dev);
  
+-DECLARE_PER_CPU(int, xmit_recursion);
+ #define XMIT_RECURSION_LIMIT	10
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +static inline int dev_recursion_level(void)
 +{
 +	return current->xmit_recursion;
 +}
 +
-+#else
-+
- DECLARE_PER_CPU(int, xmit_recursion);
- static inline int dev_recursion_level(void)
- {
- 	return this_cpu_read(xmit_recursion);
- }
-+#endif
- 
- struct net_device *dev_get_by_index(struct net *net, int ifindex);
- struct net_device *__dev_get_by_index(struct net *net, int ifindex);
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1886,6 +1886,9 @@ struct task_struct {
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- 	unsigned long	task_state_change;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+	int xmit_recursion;
-+#endif
- 	int pagefault_disabled;
- #ifdef CONFIG_MMU
- 	struct task_struct *oom_reaper_list;
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -3098,9 +3098,44 @@ static void skb_update_prio(struct sk_bu
- #define skb_update_prio(skb)
- #endif
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
 +static inline int xmit_rec_read(void)
 +{
-+       return current->xmit_recursion;
++	return current->xmit_recursion;
 +}
 +
 +static inline void xmit_rec_inc(void)
 +{
-+       current->xmit_recursion++;
++	current->xmit_recursion++;
 +}
 +
 +static inline void xmit_rec_dec(void)
 +{
-+       current->xmit_recursion--;
++	current->xmit_recursion--;
 +}
 +
 +#else
 +
- DEFINE_PER_CPU(int, xmit_recursion);
- EXPORT_SYMBOL(xmit_recursion);
++DECLARE_PER_CPU(int, xmit_recursion);
+ 
+ static inline int dev_recursion_level(void)
+ {
+ 	return this_cpu_read(xmit_recursion);
+ }
  
 +static inline int xmit_rec_read(void)
 +{
@@ -100,19 +76,45 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +}
 +#endif
 +
- #define RECURSION_LIMIT 10
+ struct net_device *dev_get_by_index(struct net *net, int ifindex);
+ struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1957,6 +1957,9 @@ struct task_struct {
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ 	unsigned long	task_state_change;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++	int xmit_recursion;
++#endif
+ 	int pagefault_disabled;
+ #ifdef CONFIG_MMU
+ 	struct task_struct *oom_reaper_list;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3147,8 +3147,10 @@ static void skb_update_prio(struct sk_bu
+ #define skb_update_prio(skb)
+ #endif
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ DEFINE_PER_CPU(int, xmit_recursion);
+ EXPORT_SYMBOL(xmit_recursion);
++#endif
  
  /**
-@@ -3346,7 +3381,7 @@ static int __dev_queue_xmit(struct sk_bu
+  *	dev_loopback_xmit - loop back @skb
+@@ -3392,8 +3394,7 @@ static int __dev_queue_xmit(struct sk_bu
+ 		int cpu = smp_processor_id(); /* ok because BHs are off */
  
  		if (txq->xmit_lock_owner != cpu) {
- 
--			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
-+			if (xmit_rec_read() > RECURSION_LIMIT)
+-			if (unlikely(__this_cpu_read(xmit_recursion) >
+-				     XMIT_RECURSION_LIMIT))
++			if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
  				goto recursion_alert;
  
  			skb = validate_xmit_skb(skb, dev);
-@@ -3356,9 +3391,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3403,9 +3404,9 @@ static int __dev_queue_xmit(struct sk_bu
  			HARD_TX_LOCK(dev, txq, cpu);
  
  			if (!netif_xmit_stopped(txq)) {
@@ -124,3 +126,26 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  				if (dev_xmit_complete(rc)) {
  					HARD_TX_UNLOCK(dev, txq);
  					goto out;
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1592,7 +1592,7 @@ static inline int __bpf_tx_skb(struct ne
+ {
+ 	int ret;
+ 
+-	if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
++	if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) {
+ 		net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
+ 		kfree_skb(skb);
+ 		return -ENETDOWN;
+@@ -1600,9 +1600,9 @@ static inline int __bpf_tx_skb(struct ne
+ 
+ 	skb->dev = dev;
+ 
+-	__this_cpu_inc(xmit_recursion);
++	xmit_rec_inc();
+ 	ret = dev_queue_xmit(skb);
+-	__this_cpu_dec(xmit_recursion);
++	xmit_rec_dec();
+ 
+ 	return ret;
+ }
diff --git a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch b/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
index 48a9121..58e31ae 100644
--- a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
+++ b/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
@@ -1,7 +1,7 @@
 Subject: net-flip-lock-dep-thingy.patch
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 28 Jun 2011 10:59:58 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 =======================================================
 [ INFO: possible circular locking dependency detected ]
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/core/sock.c
 +++ b/net/core/sock.c
-@@ -2421,12 +2421,11 @@ void lock_sock_nested(struct sock *sk, i
+@@ -2508,12 +2508,11 @@ void lock_sock_nested(struct sock *sk, i
  	if (sk->sk_lock.owned)
  		__lock_sock(sk);
  	sk->sk_lock.owned = 1;
diff --git a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index 8c986a6..6de2074 100644
--- a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 20 Jan 2016 15:39:05 +0100
 Subject: net: provide a way to delegate processing a softirq to
  ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 If the NET_RX uses up all of his budget it moves the following NAPI
 invocations into the `ksoftirqd`. On -RT it does not do so. Instead it
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -476,6 +476,14 @@ extern void thread_do_softirq(void);
+@@ -487,6 +487,14 @@ extern void thread_do_softirq(void);
  extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  extern void softirq_init(void);
  extern void __raise_softirq_irqoff(unsigned int nr);
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  void raise_softirq_irqoff(unsigned int nr)
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -5211,7 +5211,7 @@ static void net_rx_action(struct softirq
+@@ -5237,7 +5237,7 @@ static void net_rx_action(struct softirq
  	list_splice_tail(&repoll, &list);
  	list_splice(&list, &sd->poll_list);
  	if (!list_empty(&sd->poll_list))
diff --git a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index 232b932..7dd22b3 100644
--- a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -1,7 +1,7 @@
 From: Marc Kleine-Budde <mkl at pengutronix.de>
 Date: Wed, 5 Mar 2014 00:49:47 +0100
 Subject: net: sched: Use msleep() instead of yield()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50
 (by default). If a high priority userspace process tries to shut down a busy
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/net/sched/sch_generic.c
 +++ b/net/sched/sch_generic.c
-@@ -894,7 +894,7 @@ void dev_deactivate_many(struct list_hea
+@@ -917,7 +917,7 @@ void dev_deactivate_many(struct list_hea
  	/* Wait for outstanding qdisc_run calls. */
  	list_for_each_entry(dev, head, close_list)
  		while (some_qdisc_is_busy(dev))
diff --git a/debian/patches/features/all/rt/net-use-cpu-chill.patch b/debian/patches/features/all/rt/net-use-cpu-chill.patch
index 4a13a9d..5b3109c 100644
--- a/debian/patches/features/all/rt/net-use-cpu-chill.patch
+++ b/debian/patches/features/all/rt/net-use-cpu-chill.patch
@@ -1,7 +1,7 @@
 Subject: net: Use cpu_chill() instead of cpu_relax()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 07 Mar 2012 21:10:04 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Use cpu_chill() instead of cpu_relax() to let the system
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #include <linux/kmod.h>
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
-@@ -694,7 +695,7 @@ static void prb_retire_rx_blk_timer_expi
+@@ -695,7 +696,7 @@ static void prb_retire_rx_blk_timer_expi
  	if (BLOCK_NUM_PKTS(pbd)) {
  		while (atomic_read(&pkc->blk_fill_in_prog)) {
  			/* Waiting for skb_copy_bits to finish... */
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  	}
  
-@@ -956,7 +957,7 @@ static void prb_retire_current_block(str
+@@ -957,7 +958,7 @@ static void prb_retire_current_block(str
  		if (!(status & TP_STATUS_BLK_TMO)) {
  			while (atomic_read(&pkc->blk_fill_in_prog)) {
  				/* Waiting for skb_copy_bits to finish... */
@@ -50,9 +50,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #include <linux/llist.h>
 +#include <linux/delay.h>
  
+ #include "rds_single_path.h"
  #include "ib_mr.h"
- 
-@@ -209,7 +210,7 @@ static inline void wait_clean_list_grace
+@@ -210,7 +211,7 @@ static inline void wait_clean_list_grace
  	for_each_online_cpu(cpu) {
  		flag = &per_cpu(clean_list_grace, cpu);
  		while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
diff --git a/debian/patches/features/all/rt/net-wireless-warn-nort.patch b/debian/patches/features/all/rt/net-wireless-warn-nort.patch
index e98a9c0..74d325d 100644
--- a/debian/patches/features/all/rt/net-wireless-warn-nort.patch
+++ b/debian/patches/features/all/rt/net-wireless-warn-nort.patch
@@ -1,7 +1,7 @@
 Subject: net/wireless: Use WARN_ON_NORT()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 21 Jul 2011 21:05:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The softirq counter is meaningless on RT, so the check triggers a
 false positive.
@@ -13,12 +13,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/mac80211/rx.c
 +++ b/net/mac80211/rx.c
-@@ -3679,7 +3679,7 @@ void ieee80211_rx_napi(struct ieee80211_
+@@ -4064,7 +4064,7 @@ void ieee80211_rx_napi(struct ieee80211_
  	struct ieee80211_supported_band *sband;
  	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  
 -	WARN_ON_ONCE(softirq_count() == 0);
 +	WARN_ON_ONCE_NONRT(softirq_count() == 0);
  
- 	if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
+ 	if (WARN_ON(status->band >= NUM_NL80211_BANDS))
  		goto drop;
diff --git a/debian/patches/features/all/rt/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch b/debian/patches/features/all/rt/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
index a917a26..5ca1f7c 100644
--- a/debian/patches/features/all/rt/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
+++ b/debian/patches/features/all/rt/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
@@ -1,7 +1,7 @@
 Date: Tue, 27 Oct 2015 07:31:53 -0500
 From: Josh Cartwright <joshc at ni.com>
 Subject: net: Make synchronize_rcu_expedited() conditional on !RT_FULL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 While the use of synchronize_rcu_expedited() might make
 synchronize_net() "faster", it does so at significant cost on RT
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -7538,7 +7538,7 @@ EXPORT_SYMBOL(free_netdev);
+@@ -7740,7 +7740,7 @@ EXPORT_SYMBOL(free_netdev);
  void synchronize_net(void)
  {
  	might_sleep();
diff --git a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
index 8d5dda3..e4e2138 100644
--- a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
+++ b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
@@ -1,7 +1,7 @@
 From: Oleg Nesterov <oleg at redhat.com>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: signal/x86: Delay calling signals in atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On x86_64 we must disable preemption before we enable interrupts
 for stack faults, int3 and debugging, because the current task is using
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/entry/common.c
 +++ b/arch/x86/entry/common.c
-@@ -221,6 +221,13 @@ static void exit_to_usermode_loop(struct
+@@ -155,6 +155,13 @@ static void exit_to_usermode_loop(struct
  		if (cached_flags & _TIF_NEED_RESCHED)
  			schedule();
  
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1600,6 +1600,10 @@ struct task_struct {
+@@ -1670,6 +1670,10 @@ struct task_struct {
  	sigset_t blocked, real_blocked;
  	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
  	struct sigpending pending;
diff --git a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
index ea10630..78fda03 100644
--- a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
+++ b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Disable on -RT. If this is invoked from irq-context we will have problems
 to acquire the sleeping lock.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/panic.c
 +++ b/kernel/panic.c
-@@ -444,9 +444,11 @@ static u64 oops_id;
+@@ -449,9 +449,11 @@ static u64 oops_id;
  
  static int init_oops_id(void)
  {
diff --git a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index caefb2a..c16ed75 100644
--- a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Make ksoftirqd do RCU quiescent states
 From: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
 Date: Wed, 5 Oct 2011 11:45:18 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable
 to network-based denial-of-service attacks.  This patch therefore
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/rcupdate.h
 +++ b/include/linux/rcupdate.h
-@@ -341,11 +341,7 @@ static inline int rcu_preempt_depth(void
+@@ -343,11 +343,7 @@ static inline int rcu_preempt_depth(void
  /* Internal to kernel */
  void rcu_init(void);
  void rcu_sched_qs(void);
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -254,7 +254,14 @@ void rcu_sched_qs(void)
+@@ -259,7 +259,14 @@ void rcu_sched_qs(void)
  			   this_cpu_ptr(&rcu_sched_data), true);
  }
  
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #include "../time/tick-internal.h"
  
  #ifdef CONFIG_RCU_BOOST
-@@ -1338,7 +1339,7 @@ static void rcu_prepare_kthreads(int cpu
+@@ -1244,7 +1245,7 @@ static void rcu_prepare_kthreads(int cpu
  
  #endif /* #else #ifdef CONFIG_RCU_BOOST */
  
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /*
   * Check to see if any future RCU-related work will need to be done
-@@ -1355,7 +1356,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1261,7 +1262,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
  	return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
  	       ? 0 : rcu_cpu_has_callbacks(NULL);
  }
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
   * after it.
-@@ -1451,6 +1454,8 @@ static bool __maybe_unused rcu_try_advan
+@@ -1357,6 +1360,8 @@ static bool __maybe_unused rcu_try_advan
  	return cbs_ready;
  }
  
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
   * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
-@@ -1496,6 +1501,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1402,6 +1407,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
  	*nextevt = basemono + dj * TICK_NSEC;
  	return 0;
  }
diff --git a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch
index d515450..17d021e 100644
--- a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch
+++ b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch
@@ -1,7 +1,7 @@
 Subject: pci: Use __wake_up_all_locked in pci_unblock_user_cfg_access()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 01 Dec 2011 00:07:16 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The waitqueue is protected by the pci_lock, so we can just avoid to
 lock the waitqueue lock itself. That prevents the
diff --git a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
index 17c9359..451b9a6 100644
--- a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
+++ b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 9 Apr 2014 11:58:17 +0200
 Subject: percpu_ida: Use local locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 the local_irq_save() + spin_lock() does not work that well on -RT
 
diff --git a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
index 89b53ed..31324a3 100644
--- a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Yong Zhang <yong.zhang at windriver.com>
 Date: Wed, 11 Jul 2012 22:05:21 +0000
 Subject: perf: Make swevent hrtimer run in irq instead of softirq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Otherwise we get a deadlock like below:
 
@@ -59,7 +59,7 @@ Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
 
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
-@@ -7261,6 +7261,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -8215,6 +8215,7 @@ static void perf_swevent_init_hrtimer(st
  
  	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
index 27f290f..8329322 100644
--- a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
+++ b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Frob softirq test
 From: Peter Zijlstra <a.p.zijlstra at chello.nl>
 Date: Sat Aug 13 00:23:17 CEST 2011
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 With RT_FULL we get the below wreckage:
 
@@ -156,7 +156,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
 
 --- a/kernel/rcu/tree_plugin.h
 +++ b/kernel/rcu/tree_plugin.h
-@@ -428,7 +428,7 @@ void rcu_read_unlock_special(struct task
+@@ -426,7 +426,7 @@ void rcu_read_unlock_special(struct task
  	}
  
  	/* Hardware IRQ handlers cannot block, complain if they get here. */
diff --git a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
index 83d16fe..22abf3b 100644
--- a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
+++ b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
@@ -1,7 +1,7 @@
 Subject: crypto: Convert crypto notifier chain to SRCU
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 05 Oct 2012 09:03:24 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The crypto notifier deadlocks on RT. Though this can be a real deadlock
 on mainline as well due to fifo fair rwsems.
diff --git a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch b/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
index 6dd643c..867deca 100644
--- a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
+++ b/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
 From: Grygorii Strashko <Grygorii.Strashko at linaro.org>
 Date: Tue, 21 Jul 2015 19:43:56 +0300
 Subject: pid.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 This patch fixes build error:
   CC      kernel/pid_namespace.o
diff --git a/debian/patches/features/all/rt/ping-sysrq.patch b/debian/patches/features/all/rt/ping-sysrq.patch
index 223da53..8572705 100644
--- a/debian/patches/features/all/rt/ping-sysrq.patch
+++ b/debian/patches/features/all/rt/ping-sysrq.patch
@@ -1,7 +1,7 @@
 Subject: net: sysrq via icmp
 From: Carsten Emde <C.Emde at osadl.org>
 Date: Tue, 19 Jul 2011 13:51:17 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 There are (probably rare) situations when a system crashed and the system
 console becomes unresponsive but the network icmp layer still is alive.
@@ -61,7 +61,7 @@ Signed-off-by: Carsten Emde <C.Emde at osadl.org>
  #include <linux/socket.h>
  #include <linux/in.h>
  #include <linux/inet.h>
-@@ -891,6 +892,30 @@ static bool icmp_redirect(struct sk_buff
+@@ -899,6 +900,30 @@ static bool icmp_redirect(struct sk_buff
  }
  
  /*
@@ -92,7 +92,7 @@ Signed-off-by: Carsten Emde <C.Emde at osadl.org>
   *	Handle ICMP_ECHO ("ping") requests.
   *
   *	RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
-@@ -917,6 +942,11 @@ static bool icmp_echo(struct sk_buff *sk
+@@ -925,6 +950,11 @@ static bool icmp_echo(struct sk_buff *sk
  		icmp_param.data_len	   = skb->len;
  		icmp_param.head_len	   = sizeof(struct icmphdr);
  		icmp_reply(&icmp_param, skb);
diff --git a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
index d97bd84..c6564c5 100644
--- a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
+++ b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:29:20 -0500
 Subject: posix-timers: Prevent broadcast signals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Posix timers should not send broadcast signals and kernel only
 signals. Prevent it.
diff --git a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 610a419..278db99 100644
--- a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -1,7 +1,7 @@
 From: John Stultz <johnstul at us.ibm.com>
 Date: Fri, 3 Jul 2009 08:29:58 -0500
 Subject: posix-timers: Thread posix-cpu-timers on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 posix-cpu-timer code takes non -rt safe locks in hard irq
 context. Move it to a thread.
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),		\
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1565,6 +1565,9 @@ struct task_struct {
+@@ -1635,6 +1635,9 @@ struct task_struct {
  
  	struct task_cputime cputime_expires;
  	struct list_head cpu_timers[3];
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	const struct cred __rcu *real_cred; /* objective and real subjective task
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -1228,6 +1228,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1274,6 +1274,9 @@ static void rt_mutex_init_task(struct ta
   */
  static void posix_cpu_timers_init(struct task_struct *tsk)
  {
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	ret = 0;
  	old_incr = timer->it.cpu.incr;
-@@ -1063,7 +1064,7 @@ void posix_cpu_timer_schedule(struct k_i
+@@ -1064,7 +1065,7 @@ void posix_cpu_timer_schedule(struct k_i
  	/*
  	 * Now re-arm for the new expiry time.
  	 */
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	arm_timer(timer);
  	unlock_task_sighand(p, &flags);
  
-@@ -1152,13 +1153,13 @@ static inline int fastpath_timer_check(s
+@@ -1153,13 +1154,13 @@ static inline int fastpath_timer_check(s
   * already updated our counts.  We need to check if any timers fire now.
   * Interrupts are disabled.
   */
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * The fast path checks that there are no expired thread or thread
-@@ -1212,6 +1213,190 @@ void run_posix_cpu_timers(struct task_st
+@@ -1213,6 +1214,190 @@ void run_posix_cpu_timers(struct task_st
  	}
  }
  
diff --git a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
index bc50f1a..8985550 100644
--- a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
+++ b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: powerpc: Disable highmem on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 17:08:34 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The current highmem handling on -RT is not compatible and needs fixups.
 
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
-@@ -320,7 +320,7 @@ menu "Kernel options"
+@@ -327,7 +327,7 @@ menu "Kernel options"
  
  config HIGHMEM
  	bool "High memory support"
diff --git a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
index c12c4ec..4f6fc57 100644
--- a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
+++ b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: powerpc: Use generic rwsem on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use generic code which uses rtmutex
 
diff --git a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
index 8582014..a90217d 100644
--- a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
+++ b/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
@@ -1,7 +1,7 @@
 From: Bogdan Purcareata <bogdan.purcareata at freescale.com>
 Date: Fri, 24 Apr 2015 15:53:13 +0000
 Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 While converting the openpic emulation code to use a raw_spinlock_t enables
 guests to run on RT, there's still a performance issue. For interrupts sent in
diff --git a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
index 57017f4..dd26b33 100644
--- a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 1 Nov 2012 10:14:11 +0100
 Subject: powerpc: Add support for lazy preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Implement the powerpc pieces for lazy preempt.
 
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
-@@ -139,6 +139,7 @@ config PPC
+@@ -141,6 +141,7 @@ config PPC
  	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
  	select GENERIC_STRNCPY_FROM_USER
  	select GENERIC_STRNLEN_USER
@@ -26,16 +26,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	select CLONE_BACKWARDS
 --- a/arch/powerpc/include/asm/thread_info.h
 +++ b/arch/powerpc/include/asm/thread_info.h
-@@ -42,6 +42,8 @@ struct thread_info {
+@@ -43,6 +43,8 @@ struct thread_info {
  	int		cpu;			/* cpu we're on */
  	int		preempt_count;		/* 0 => preemptable,
  						   <0 => BUG */
-+	int		preempt_lazy_count;	 /* 0 => preemptable,
++	int		preempt_lazy_count;	/* 0 => preemptable,
 +						   <0 => BUG */
  	unsigned long	local_flags;		/* private flags for thread */
- 
- 	/* low level flags - has atomic operations done on it */
-@@ -82,8 +84,7 @@ static inline struct thread_info *curren
+ #ifdef CONFIG_LIVEPATCH
+ 	unsigned long *livepatch_sp;
+@@ -88,8 +90,7 @@ static inline struct thread_info *curren
  #define TIF_SYSCALL_TRACE	0	/* syscall trace active */
  #define TIF_SIGPENDING		1	/* signal pending */
  #define TIF_NEED_RESCHED	2	/* rescheduling necessary */
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define TIF_32BIT		4	/* 32 bit binary */
  #define TIF_RESTORE_TM		5	/* need to restore TM FP/VEC/VSX */
  #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
-@@ -101,6 +102,8 @@ static inline struct thread_info *curren
+@@ -107,6 +108,8 @@ static inline struct thread_info *curren
  #if defined(CONFIG_PPC64)
  #define TIF_ELF2ABI		18	/* function descriptors must die! */
  #endif
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /* as above, but as bit values */
  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
-@@ -119,14 +122,16 @@ static inline struct thread_info *curren
+@@ -125,14 +128,16 @@ static inline struct thread_info *curren
  #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
  #define _TIF_EMULATE_STACK_STORE	(1<<TIF_EMULATE_STACK_STORE)
  #define _TIF_NOHZ		(1<<TIF_NOHZ)
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
 --- a/arch/powerpc/kernel/asm-offsets.c
 +++ b/arch/powerpc/kernel/asm-offsets.c
-@@ -162,6 +162,7 @@ int main(void)
+@@ -156,6 +156,7 @@ int main(void)
  	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
  	DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
  	DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/arch/powerpc/kernel/entry_32.S
 +++ b/arch/powerpc/kernel/entry_32.S
-@@ -818,7 +818,14 @@ user_exc_return:		/* r10 contains MSR_KE
+@@ -835,7 +835,14 @@ user_exc_return:		/* r10 contains MSR_KE
  	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
  	bne	restore
  	andi.	r8,r8,_TIF_NEED_RESCHED
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	lwz	r3,_MSR(r1)
  	andi.	r0,r3,MSR_EE	/* interrupts off? */
  	beq	restore		/* don't schedule if so */
-@@ -829,11 +836,11 @@ user_exc_return:		/* r10 contains MSR_KE
+@@ -846,11 +853,11 @@ user_exc_return:		/* r10 contains MSR_KE
  	 */
  	bl	trace_hardirqs_off
  #endif
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_TRACE_IRQFLAGS
  	/* And now, to properly rebalance the above, we tell lockdep they
  	 * are being turned back on, which will happen when we return
-@@ -1154,7 +1161,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -1171,7 +1178,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
  #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
  
  do_work:			/* r10 contains MSR_KERNEL here */
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	beq	do_user_signal
  
  do_resched:			/* r10 contains MSR_KERNEL here */
-@@ -1175,7 +1182,7 @@ do_resched:			/* r10 contains MSR_KERNEL
+@@ -1192,7 +1199,7 @@ do_resched:			/* r10 contains MSR_KERNEL
  	MTMSRD(r10)		/* disable interrupts */
  	CURRENT_THREAD_INFO(r9, r1)
  	lwz	r9,TI_FLAGS(r9)
@@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	beq	restore_user
 --- a/arch/powerpc/kernel/entry_64.S
 +++ b/arch/powerpc/kernel/entry_64.S
-@@ -644,7 +644,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -657,7 +657,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
  	bl	restore_math
  	b	restore
  #endif
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	beq	2f
  	bl	restore_interrupts
  	SCHEDULE_USER
-@@ -706,10 +706,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -719,10 +719,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
  
  #ifdef CONFIG_PREEMPT
  	/* Check if we need to preempt */
@@ -163,7 +163,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	cmpwi	cr1,r8,0
  	ld	r0,SOFTE(r1)
  	cmpdi	r0,0
-@@ -726,7 +734,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -739,7 +747,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
  	/* Re-test flags and eventually loop */
  	CURRENT_THREAD_INFO(r9, r1)
  	ld	r4,TI_FLAGS(r9)
diff --git a/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch b/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
index 9329f21..3b1aaba 100644
--- a/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
+++ b/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
@@ -1,7 +1,7 @@
 From: Paul Gortmaker <paul.gortmaker at windriver.com>
 Date: Sun, 31 May 2015 14:44:42 -0400
 Subject: powerpc: ps3/device-init.c - adapt to completions using swait vs wait
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 To fix:
 
diff --git a/debian/patches/features/all/rt/preempt-lazy-support.patch b/debian/patches/features/all/rt/preempt-lazy-support.patch
index ac55329..e161db3 100644
--- a/debian/patches/features/all/rt/preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/preempt-lazy-support.patch
@@ -1,7 +1,7 @@
 Subject: sched: Add support for lazy preemption
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 26 Oct 2012 18:50:54 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 It has become an obsession to mitigate the determinism vs. throughput
 loss of RT. Looking at the mainline semantics of preemption points
@@ -53,58 +53,20 @@ performance.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- arch/x86/include/asm/preempt.h |   18 +++++++++++++-
- include/linux/preempt.h        |   29 ++++++++++++++++++++++-
- include/linux/sched.h          |   37 ++++++++++++++++++++++++++++++
- include/linux/thread_info.h    |   12 +++++++++
- include/linux/trace_events.h   |    1 
- kernel/Kconfig.preempt         |    6 ++++
- kernel/sched/core.c            |   50 ++++++++++++++++++++++++++++++++++++++++-
- kernel/sched/fair.c            |   16 ++++++-------
- kernel/sched/features.h        |    3 ++
- kernel/sched/sched.h           |    9 +++++++
- kernel/trace/trace.c           |   37 ++++++++++++++++++------------
- kernel/trace/trace.h           |    2 +
- kernel/trace/trace_output.c    |   14 +++++++++--
- 13 files changed, 205 insertions(+), 29 deletions(-)
+ include/linux/preempt.h      |   29 ++++++++++++++++-
+ include/linux/sched.h        |   37 ++++++++++++++++++++++
+ include/linux/thread_info.h  |   12 ++++++-
+ include/linux/trace_events.h |    1 
+ kernel/Kconfig.preempt       |    6 +++
+ kernel/sched/core.c          |   72 +++++++++++++++++++++++++++++++++++++++++--
+ kernel/sched/fair.c          |   16 ++++-----
+ kernel/sched/features.h      |    3 +
+ kernel/sched/sched.h         |    9 +++++
+ kernel/trace/trace.c         |   37 +++++++++++++---------
+ kernel/trace/trace.h         |    2 +
+ kernel/trace/trace_output.c  |   14 +++++++-
+ 12 files changed, 209 insertions(+), 29 deletions(-)
 
---- a/arch/x86/include/asm/preempt.h
-+++ b/arch/x86/include/asm/preempt.h
-@@ -79,17 +79,33 @@ static __always_inline void __preempt_co
-  * a decrement which hits zero means we have no preempt_count and should
-  * reschedule.
-  */
--static __always_inline bool __preempt_count_dec_and_test(void)
-+static __always_inline bool ____preempt_count_dec_and_test(void)
- {
- 	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
- }
- 
-+static __always_inline bool __preempt_count_dec_and_test(void)
-+{
-+	if (____preempt_count_dec_and_test())
-+		return true;
-+#ifdef CONFIG_PREEMPT_LAZY
-+	return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
-+	return false;
-+#endif
-+}
-+
- /*
-  * Returns true when we need to resched and can (barring IRQ state).
-  */
- static __always_inline bool should_resched(int preempt_offset)
- {
-+#ifdef CONFIG_PREEMPT_LAZY
-+	return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset ||
-+			test_thread_flag(TIF_NEED_RESCHED_LAZY));
-+#else
- 	return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
-+#endif
- }
- 
- #ifdef CONFIG_PREEMPT
 --- a/include/linux/preempt.h
 +++ b/include/linux/preempt.h
 @@ -153,6 +153,20 @@ extern void preempt_count_sub(int val);
@@ -166,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -3009,6 +3009,43 @@ static inline int test_tsk_need_resched(
+@@ -3238,6 +3238,43 @@ static inline int test_tsk_need_resched(
  	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
  }
  
@@ -229,8 +191,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#define tif_need_resched_lazy()	0
 +#endif
  
- #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
- /*
+ #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+ static inline int arch_within_stack_frames(const void * const stack,
 --- a/include/linux/trace_events.h
 +++ b/include/linux/trace_events.h
 @@ -58,6 +58,7 @@ struct trace_entry {
@@ -258,7 +220,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	default PREEMPT_NONE
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -475,6 +475,38 @@ void resched_curr(struct rq *rq)
+@@ -510,6 +510,38 @@ void resched_curr(struct rq *rq)
  		trace_sched_wake_idle_without_ipi(cpu);
  }
  
@@ -297,7 +259,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void resched_cpu(int cpu)
  {
  	struct rq *rq = cpu_rq(cpu);
-@@ -2392,6 +2424,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2522,6 +2554,9 @@ int sched_fork(unsigned long clone_flags
  	p->on_cpu = 0;
  #endif
  	init_task_preempt_count(p);
@@ -307,7 +269,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_SMP
  	plist_node_init(&p->pushable_tasks, MAX_PRIO);
  	RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3180,6 +3215,7 @@ void migrate_disable(void)
+@@ -3356,6 +3391,7 @@ void migrate_disable(void)
  	}
  
  	preempt_disable();
@@ -315,7 +277,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	pin_current_cpu();
  	p->migrate_disable = 1;
  	preempt_enable();
-@@ -3219,6 +3255,7 @@ void migrate_enable(void)
+@@ -3395,6 +3431,7 @@ void migrate_enable(void)
  
  	unpin_current_cpu();
  	preempt_enable();
@@ -323,30 +285,66 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(migrate_enable);
  #endif
-@@ -3358,6 +3395,7 @@ static void __sched notrace __schedule(b
+@@ -3535,6 +3572,7 @@ static void __sched notrace __schedule(b
  
- 	next = pick_next_task(rq, prev);
+ 	next = pick_next_task(rq, prev, cookie);
  	clear_tsk_need_resched(prev);
 +	clear_tsk_need_resched_lazy(prev);
  	clear_preempt_need_resched();
  	rq->clock_skip_update = 0;
  
-@@ -3503,6 +3541,14 @@ asmlinkage __visible void __sched notrac
+@@ -3654,6 +3692,30 @@ static void __sched notrace preempt_sche
+ 	} while (need_resched());
+ }
+ 
++#ifdef CONFIG_PREEMPT_LAZY
++/*
++ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
++ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
++ * preempt_lazy_count counter >0.
++ */
++static __always_inline int preemptible_lazy(void)
++{
++	if (test_thread_flag(TIF_NEED_RESCHED))
++		return 1;
++	if (current_thread_info()->preempt_lazy_count)
++		return 0;
++	return 1;
++}
++
++#else
++
++static inline int preemptible_lazy(void)
++{
++	return 1;
++}
++
++#endif
++
+ #ifdef CONFIG_PREEMPT
+ /*
+  * this is the entry point to schedule() from in-kernel preemption
+@@ -3668,7 +3730,8 @@ asmlinkage __visible void __sched notrac
+ 	 */
+ 	if (likely(!preemptible()))
+ 		return;
+-
++	if (!preemptible_lazy())
++		return;
+ 	preempt_schedule_common();
+ }
+ NOKPROBE_SYMBOL(preempt_schedule);
+@@ -3695,6 +3758,9 @@ asmlinkage __visible void __sched notrac
  	if (likely(!preemptible()))
  		return;
  
-+#ifdef CONFIG_PREEMPT_LAZY
-+	/*
-+	 * Check for lazy preemption
-+	 */
-+	if (current_thread_info()->preempt_lazy_count &&
-+	    !test_thread_flag(TIF_NEED_RESCHED))
++	if (!preemptible_lazy())
 +		return;
-+#endif
++
  	do {
- 		preempt_disable_notrace();
  		/*
-@@ -5246,7 +5292,9 @@ void init_idle(struct task_struct *idle,
+ 		 * Because the function tracer can trace preempt_count_sub()
+@@ -5458,7 +5524,9 @@ void init_idle(struct task_struct *idle,
  
  	/* Set the preempt count _outside_ the spinlocks! */
  	init_idle_preempt_count(idle, cpu);
@@ -359,7 +357,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	 */
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
-@@ -3318,7 +3318,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3486,7 +3486,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
  	ideal_runtime = sched_slice(cfs_rq, curr);
  	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  	if (delta_exec > ideal_runtime) {
@@ -368,7 +366,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		/*
  		 * The current task ran long enough, ensure it doesn't get
  		 * re-elected due to buddy favours.
-@@ -3342,7 +3342,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3510,7 +3510,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
  		return;
  
  	if (delta > ideal_runtime)
@@ -377,7 +375,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static void
-@@ -3487,7 +3487,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -3655,7 +3655,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
  	 * validating it and just reschedule.
  	 */
  	if (queued) {
@@ -386,7 +384,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		return;
  	}
  	/*
-@@ -3669,7 +3669,7 @@ static void __account_cfs_rq_runtime(str
+@@ -3837,7 +3837,7 @@ static void __account_cfs_rq_runtime(str
  	 * hierarchy can be throttled
  	 */
  	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -395,7 +393,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static __always_inline
-@@ -4281,7 +4281,7 @@ static void hrtick_start_fair(struct rq
+@@ -4465,7 +4465,7 @@ static void hrtick_start_fair(struct rq
  
  		if (delta < 0) {
  			if (rq->curr == p)
@@ -404,7 +402,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			return;
  		}
  		hrtick_start(rq, delta);
-@@ -5421,7 +5421,7 @@ static void check_preempt_wakeup(struct
+@@ -5654,7 +5654,7 @@ static void check_preempt_wakeup(struct
  	return;
  
  preempt:
@@ -413,7 +411,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Only set the backward buddy when the current task is still
  	 * on the rq. This can happen when a wakeup gets interleaved
-@@ -8172,7 +8172,7 @@ static void task_fork_fair(struct task_s
+@@ -8380,7 +8380,7 @@ static void task_fork_fair(struct task_s
  		 * 'current' within the tree based on its new key value.
  		 */
  		swap(curr->vruntime, se->vruntime);
@@ -422,7 +420,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  
  	se->vruntime -= cfs_rq->min_vruntime;
-@@ -8197,7 +8197,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -8404,7 +8404,7 @@ prio_changed_fair(struct rq *rq, struct
  	 */
  	if (rq->curr == p) {
  		if (p->prio > oldprio)
@@ -445,7 +443,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
 --- a/kernel/sched/sched.h
 +++ b/kernel/sched/sched.h
-@@ -1304,6 +1304,15 @@ extern void init_sched_fair_class(void);
+@@ -1317,6 +1317,15 @@ extern void init_sched_fair_class(void);
  extern void resched_curr(struct rq *rq);
  extern void resched_cpu(int cpu);
  
@@ -463,7 +461,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
-@@ -1657,6 +1657,7 @@ tracing_generic_entry_update(struct trac
+@@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trac
  	struct task_struct *tsk = current;
  
  	entry->preempt_count		= pc & 0xff;
@@ -471,7 +469,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	entry->pid			= (tsk) ? tsk->pid : 0;
  	entry->flags =
  #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1667,7 +1668,8 @@ tracing_generic_entry_update(struct trac
+@@ -1907,7 +1908,8 @@ tracing_generic_entry_update(struct trac
  		((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
  		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
  		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -481,7 +479,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
  
  	entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -2563,15 +2565,17 @@ get_total_entries(struct trace_buffer *b
+@@ -2894,15 +2896,17 @@ get_total_entries(struct trace_buffer *b
  
  static void print_lat_help_header(struct seq_file *m)
  {
@@ -508,7 +506,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2597,11 +2601,14 @@ static void print_func_help_header_irq(s
+@@ -2928,11 +2932,14 @@ static void print_func_help_header_irq(s
  	print_event_info(buf, m);
  	seq_puts(m, "#                              _-----=> irqs-off\n"
  		    "#                             / _----=> need-resched\n"
@@ -530,7 +528,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void
 --- a/kernel/trace/trace.h
 +++ b/kernel/trace/trace.h
-@@ -117,6 +117,7 @@ struct kretprobe_trace_entry_head {
+@@ -123,6 +123,7 @@ struct kretprobe_trace_entry_head {
   *  NEED_RESCHED	- reschedule is requested
   *  HARDIRQ		- inside an interrupt handler
   *  SOFTIRQ		- inside a softirq handler
@@ -538,7 +536,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   */
  enum trace_flag_type {
  	TRACE_FLAG_IRQS_OFF		= 0x01,
-@@ -126,6 +127,7 @@ enum trace_flag_type {
+@@ -132,6 +133,7 @@ enum trace_flag_type {
  	TRACE_FLAG_SOFTIRQ		= 0x10,
  	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
  	TRACE_FLAG_NMI			= 0x40,
diff --git a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
index 2a87be8..e908b74 100644
--- a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
+++ b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 24 Jul 2009 12:38:56 +0200
 Subject: preempt: Provide preempt_*_(no)rt variants
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 RT needs a few preempt_disable/enable points which are not necessary
 otherwise. Implement variants to avoid #ifdeffery.
diff --git a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
index a33f56a..cb8960e 100644
--- a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+++ b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -1,7 +1,7 @@
 Subject: printk: Add "force_early_printk" boot param to help with debugging
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 02 Sep 2011 14:41:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Gives me an option to screw printk and actually see what the machine
 says.
@@ -16,7 +16,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -276,6 +276,13 @@ asmlinkage void early_printk(const char
+@@ -381,6 +381,13 @@ asmlinkage void early_printk(const char
   */
  static bool __read_mostly printk_killswitch;
  
diff --git a/debian/patches/features/all/rt/printk-kill.patch b/debian/patches/features/all/rt/printk-kill.patch
index f8506a8..cfb3ccd 100644
--- a/debian/patches/features/all/rt/printk-kill.patch
+++ b/debian/patches/features/all/rt/printk-kill.patch
@@ -1,7 +1,7 @@
 Subject: printk: Add a printk kill switch
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 22 Jul 2011 17:58:40 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that
 it does not dead-lock with the early printk code.
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/printk.h
 +++ b/include/linux/printk.h
-@@ -117,9 +117,11 @@ do {						\
+@@ -125,9 +125,11 @@ struct va_format {
  #ifdef CONFIG_EARLY_PRINTK
  extern asmlinkage __printf(1, 2)
  void early_printk(const char *fmt, ...);
@@ -26,12 +26,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +static inline void printk_kill(void) { }
  #endif
  
- typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
+ #ifdef CONFIG_PRINTK_NMI
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -246,6 +246,58 @@ struct printk_log {
+@@ -351,6 +351,58 @@ struct printk_log {
   */
- static DEFINE_RAW_SPINLOCK(logbuf_lock);
+ DEFINE_RAW_SPINLOCK(logbuf_lock);
  
 +#ifdef CONFIG_EARLY_PRINTK
 +struct console *early_console;
@@ -88,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_PRINTK
  DECLARE_WAIT_QUEUE_HEAD(log_wait);
  /* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1620,6 +1672,13 @@ asmlinkage int vprintk_emit(int facility
+@@ -1750,6 +1802,13 @@ asmlinkage int vprintk_emit(int facility
  	/* cpu currently holding logbuf_lock in this function */
  	static unsigned int logbuf_cpu = UINT_MAX;
  
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (level == LOGLEVEL_SCHED) {
  		level = LOGLEVEL_DEFAULT;
  		in_sched = true;
-@@ -1901,26 +1960,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
+@@ -2023,26 +2082,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
  
  #endif /* CONFIG_PRINTK */
  
diff --git a/debian/patches/features/all/rt/printk-rt-aware.patch b/debian/patches/features/all/rt/printk-rt-aware.patch
index b149f69..8964936 100644
--- a/debian/patches/features/all/rt/printk-rt-aware.patch
+++ b/debian/patches/features/all/rt/printk-rt-aware.patch
@@ -1,7 +1,7 @@
 Subject: printk: Make rt aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 19 Sep 2012 14:50:37 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Drop the lock before calling the console driver and do not disable
 interrupts while printing to a serial console.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -1502,6 +1502,7 @@ static void call_console_drivers(int lev
+@@ -1631,6 +1631,7 @@ static void call_console_drivers(int lev
  	if (!console_drivers)
  		return;
  
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	for_each_console(con) {
  		if (exclusive_console && con != exclusive_console)
  			continue;
-@@ -1517,6 +1518,7 @@ static void call_console_drivers(int lev
+@@ -1646,6 +1647,7 @@ static void call_console_drivers(int lev
  		else
  			con->write(con, text, len);
  	}
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -1821,13 +1823,23 @@ asmlinkage int vprintk_emit(int facility
+@@ -1960,13 +1962,23 @@ asmlinkage int vprintk_emit(int facility
  
  	/* If called from the scheduler, we can not call up(). */
  	if (!in_sched) {
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			console_unlock();
  		lockdep_on();
  	}
-@@ -2229,11 +2241,16 @@ static void console_cont_flush(char *tex
+@@ -2358,11 +2370,16 @@ static void console_cont_flush(char *tex
  		goto out;
  
  	len = cont_print_text(text, size);
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return;
  out:
  	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2355,13 +2372,17 @@ void console_unlock(void)
+@@ -2486,13 +2503,17 @@ void console_unlock(void)
  		console_idx = log_next(console_idx);
  		console_seq++;
  		console_prev = msg->flags;
diff --git a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 0305ce9..78c3afa 100644
--- a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Aug 2013 18:21:04 +0200
 Subject: ptrace: fix ptrace vs tasklist_lock race
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 As explained by Alexander Fyodorov <halcy at yandex.ru>:
 
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -241,10 +241,7 @@ extern char ___assert_task_state[1 - 2*!
+@@ -243,10 +243,7 @@ extern char ___assert_task_state[1 - 2*!
  				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
  				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
  
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #define task_contributes_to_load(task)	\
  				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
  				 (task->flags & PF_FROZEN) == 0 && \
-@@ -3026,6 +3023,51 @@ static inline int signal_pending_state(l
+@@ -3255,6 +3252,51 @@ static inline int signal_pending_state(l
  	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
  }
  
@@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	spin_unlock_irq(&task->sighand->siglock);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1317,6 +1317,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1373,6 +1373,18 @@ int migrate_swap(struct task_struct *cur
  	return ret;
  }
  
@@ -133,7 +133,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * wait_task_inactive - wait for a thread to unschedule.
   *
-@@ -1361,7 +1373,7 @@ unsigned long wait_task_inactive(struct
+@@ -1417,7 +1429,7 @@ unsigned long wait_task_inactive(struct
  		 * is actually now running somewhere else!
  		 */
  		while (task_running(rq, p)) {
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  				return 0;
  			cpu_relax();
  		}
-@@ -1376,7 +1388,8 @@ unsigned long wait_task_inactive(struct
+@@ -1432,7 +1444,8 @@ unsigned long wait_task_inactive(struct
  		running = task_running(rq, p);
  		queued = task_on_rq_queued(p);
  		ncsw = 0;
@@ -150,5 +150,5 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +		if (!match_state || p->state == match_state ||
 +		    p->saved_state == match_state)
  			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
- 		task_rq_unlock(rq, p, &flags);
+ 		task_rq_unlock(rq, p, &rf);
  
diff --git a/debian/patches/features/all/rt/radix-tree-rt-aware.patch b/debian/patches/features/all/rt/radix-tree-rt-aware.patch
index 1418b17..8386fc3 100644
--- a/debian/patches/features/all/rt/radix-tree-rt-aware.patch
+++ b/debian/patches/features/all/rt/radix-tree-rt-aware.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:33:18 +0200
 Subject: radix-tree: Make RT aware
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Disable radix_tree_preload() on -RT. This functions returns with
 preemption disabled which may cause high latencies and breaks if the
@@ -9,27 +9,33 @@ user tries to grab any locks after invoking it.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- include/linux/radix-tree.h |    7 ++++++-
+ include/linux/radix-tree.h |   12 +++++++++++-
  lib/radix-tree.c           |    5 ++++-
- 2 files changed, 10 insertions(+), 2 deletions(-)
+ 2 files changed, 15 insertions(+), 2 deletions(-)
 
 --- a/include/linux/radix-tree.h
 +++ b/include/linux/radix-tree.h
-@@ -294,8 +294,13 @@ radix_tree_gang_lookup(struct radix_tree
+@@ -289,9 +289,19 @@ unsigned int radix_tree_gang_lookup(stru
  unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
  			void ***results, unsigned long *indices,
  			unsigned long first_index, unsigned int max_items);
-+#ifndef CONFIG_PREEMPT_RT_FULL
- int radix_tree_preload(gfp_t gfp_mask);
- int radix_tree_maybe_preload(gfp_t gfp_mask);
-+#else
++#ifdef CONFIG_PREEMPT_RT_FULL
 +static inline int radix_tree_preload(gfp_t gm) { return 0; }
 +static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; }
++static inline int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
++{
++	return 0;
++};
++
++#else
+ int radix_tree_preload(gfp_t gfp_mask);
+ int radix_tree_maybe_preload(gfp_t gfp_mask);
+ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
 +#endif
  void radix_tree_init(void);
  void *radix_tree_tag_set(struct radix_tree_root *root,
  			unsigned long index, unsigned int tag);
-@@ -320,7 +325,7 @@ unsigned long radix_tree_locate_item(str
+@@ -316,7 +326,7 @@ unsigned long radix_tree_locate_item(str
  
  static inline void radix_tree_preload_end(void)
  {
@@ -40,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
 --- a/lib/radix-tree.c
 +++ b/lib/radix-tree.c
-@@ -240,13 +240,14 @@ radix_tree_node_alloc(struct radix_tree_
+@@ -290,13 +290,14 @@ radix_tree_node_alloc(struct radix_tree_
  		 * succeed in getting a node here (and never reach
  		 * kmem_cache_alloc)
  		 */
@@ -56,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		/*
  		 * Update the allocation stack trace as this is more useful
  		 * for debugging.
-@@ -287,6 +288,7 @@ radix_tree_node_free(struct radix_tree_n
+@@ -336,6 +337,7 @@ radix_tree_node_free(struct radix_tree_n
  	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
  }
  
@@ -64,11 +70,11 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Load up this CPU's radix_tree_node buffer with sufficient objects to
   * ensure that the addition of a single element in the tree cannot fail.  On
-@@ -355,6 +357,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
- 	return 0;
+@@ -455,6 +457,7 @@ int radix_tree_maybe_preload_order(gfp_t
+ 
+ 	return __radix_tree_preload(gfp_mask, nr_nodes);
  }
- EXPORT_SYMBOL(radix_tree_maybe_preload);
 +#endif
  
  /*
-  *	Return the maximum key which can be store into a
+  * The maximum index which can be stored in a radix tree
diff --git a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
index 91c837a..439bb80 100644
--- a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
+++ b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: random: Make it work on rt
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 21 Aug 2012 20:38:50 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Delegate the random insertion to the forced threaded interrupt
 handler. Store the return IP of the hard interrupt handler in the irq
@@ -12,15 +12,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 ---
  drivers/char/random.c   |   11 +++++------
+ drivers/hv/vmbus_drv.c  |    4 +++-
  include/linux/irqdesc.h |    1 +
  include/linux/random.h  |    2 +-
  kernel/irq/handle.c     |    8 +++++++-
  kernel/irq/manage.c     |    6 ++++++
- 5 files changed, 20 insertions(+), 8 deletions(-)
+ 6 files changed, 23 insertions(+), 9 deletions(-)
 
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
-@@ -888,28 +888,27 @@ static __u32 get_reg(struct fast_pool *f
+@@ -1120,28 +1120,27 @@ static __u32 get_reg(struct fast_pool *f
  	return *(ptr + f->reg_idx++);
  }
  
@@ -54,6 +55,26 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	fast_mix(fast_pool);
  	add_interrupt_bench(cycles);
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -761,6 +761,8 @@ static void vmbus_isr(void)
+ 	void *page_addr;
+ 	struct hv_message *msg;
+ 	union hv_synic_event_flags *event;
++	struct pt_regs *regs = get_irq_regs();
++	u64 ip = regs ? instruction_pointer(regs) : 0;
+ 	bool handled = false;
+ 
+ 	page_addr = hv_context.synic_event_page[cpu];
+@@ -808,7 +810,7 @@ static void vmbus_isr(void)
+ 			tasklet_schedule(hv_context.msg_dpc[cpu]);
+ 	}
+ 
+-	add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
++	add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip);
+ }
+ 
+ 
 --- a/include/linux/irqdesc.h
 +++ b/include/linux/irqdesc.h
 @@ -64,6 +64,7 @@ struct irq_desc {
@@ -63,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	u64			random_ip;
  	raw_spinlock_t		lock;
  	struct cpumask		*percpu_enabled;
- #ifdef CONFIG_SMP
+ 	const struct cpumask	*percpu_affinity;
 --- a/include/linux/random.h
 +++ b/include/linux/random.h
 @@ -20,7 +20,7 @@ struct random_ready_callback {
@@ -77,31 +98,27 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  extern int add_random_ready_callback(struct random_ready_callback *rdy);
 --- a/kernel/irq/handle.c
 +++ b/kernel/irq/handle.c
-@@ -134,6 +134,8 @@ void __irq_wake_thread(struct irq_desc *
- 
- irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+@@ -181,10 +181,16 @@ irqreturn_t handle_irq_event_percpu(stru
  {
+ 	irqreturn_t retval;
+ 	unsigned int flags = 0;
 +	struct pt_regs *regs = get_irq_regs();
 +	u64 ip = regs ? instruction_pointer(regs) : 0;
- 	irqreturn_t retval = IRQ_NONE;
- 	unsigned int flags = 0, irq = desc->irq_data.irq;
- 	struct irqaction *action;
-@@ -174,7 +176,11 @@ irqreturn_t handle_irq_event_percpu(stru
- 		retval |= res;
- 	}
  
--	add_interrupt_randomness(irq, flags);
+ 	retval = __handle_irq_event_percpu(desc, &flags);
+ 
+-	add_interrupt_randomness(desc->irq_data.irq, flags);
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +	desc->random_ip = ip;
 +#else
-+	add_interrupt_randomness(irq, flags, ip);
++	add_interrupt_randomness(desc->irq_data.irq, flags, ip);
 +#endif
  
  	if (!noirqdebug)
  		note_interrupt(desc, retval);
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -1043,6 +1043,12 @@ static int irq_thread(void *data)
+@@ -1023,6 +1023,12 @@ static int irq_thread(void *data)
  		if (action_ret == IRQ_WAKE_THREAD)
  			irq_wake_secondary(desc, action);
  
diff --git a/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch b/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
new file mode 100644
index 0000000..3497193
--- /dev/null
+++ b/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
@@ -0,0 +1,25 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Wed, 14 Sep 2016 11:52:17 +0200
+Subject: rbtree: include rcu.h because we use it
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+Since commit c1adf20052d8 ("Introduce rb_replace_node_rcu()")
+rbtree_augmented.h uses RCU related data structures but does not include
+them. It works as long as gets somehow included before that and fails
+otherwise.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/rbtree_augmented.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/rbtree_augmented.h
++++ b/include/linux/rbtree_augmented.h
+@@ -26,6 +26,7 @@
+ 
+ #include <linux/compiler.h>
+ #include <linux/rbtree.h>
++#include <linux/rcupdate.h>
+ 
+ /*
+  * Please note - only struct rb_augment_callbacks and the prototypes for
diff --git a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
index d5f34f5..6e6e825 100644
--- a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -1,7 +1,7 @@
 From: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
 Date: Mon, 4 Nov 2013 13:21:10 -0800
 Subject: rcu: Eliminate softirq processing from rcutree
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Running RCU out of softirq is a problem for some workloads that would
 like to manage RCU core processing independently of other softirq work,
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #include "tree.h"
  #include "rcu.h"
-@@ -2946,18 +2951,17 @@ static void
+@@ -3041,18 +3046,17 @@ static void
  /*
   * Do RCU core processing for the current CPU.
   */
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Schedule RCU callback invocation.  If the specified type of RCU
   * does not support RCU priority boosting, just do a direct call,
-@@ -2969,18 +2973,105 @@ static void invoke_rcu_callbacks(struct
+@@ -3064,18 +3068,105 @@ static void invoke_rcu_callbacks(struct
  {
  	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
  		return;
@@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  /*
   * Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4648,7 +4739,6 @@ void __init rcu_init(void)
+@@ -4237,7 +4328,6 @@ void __init rcu_init(void)
  	if (dump_tree)
  		rcu_dump_rcu_node_tree(&rcu_sched_state);
  	__rcu_init_preempt();
@@ -179,7 +179,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 * We don't need protection against CPU-hotplug here because
 --- a/kernel/rcu/tree.h
 +++ b/kernel/rcu/tree.h
-@@ -580,12 +580,10 @@ extern struct rcu_state rcu_bh_state;
+@@ -595,12 +595,10 @@ extern struct rcu_state rcu_bh_state;
  extern struct rcu_state rcu_preempt_state;
  #endif /* #ifdef CONFIG_PREEMPT_RCU */
  
@@ -192,7 +192,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #ifndef RCU_TREE_NONCORE
  
-@@ -605,10 +603,9 @@ void call_rcu(struct rcu_head *head, rcu
+@@ -620,10 +618,9 @@ void call_rcu(struct rcu_head *head, rcu
  static void __init __rcu_init_preempt(void);
  static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
  static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -248,7 +248,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #ifdef CONFIG_RCU_NOCB_CPU
  static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
  static bool have_rcu_nocb_mask;	    /* Was rcu_nocb_mask allocated? */
-@@ -635,15 +627,6 @@ static void rcu_preempt_check_callbacks(
+@@ -633,15 +625,6 @@ static void rcu_preempt_check_callbacks(
  		t->rcu_read_unlock_special.b.need_qs = true;
  }
  
@@ -264,7 +264,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Queue a preemptible-RCU callback for invocation after a grace period.
   */
-@@ -925,6 +908,19 @@ void exit_rcu(void)
+@@ -830,6 +813,19 @@ void exit_rcu(void)
  
  #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
  
@@ -284,7 +284,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #ifdef CONFIG_RCU_BOOST
  
  #include "../locking/rtmutex_common.h"
-@@ -956,16 +952,6 @@ static void rcu_initiate_boost_trace(str
+@@ -861,16 +857,6 @@ static void rcu_initiate_boost_trace(str
  
  #endif /* #else #ifdef CONFIG_RCU_TRACE */
  
@@ -301,7 +301,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Carry out RCU priority boosting on the task indicated by ->exp_tasks
   * or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1109,23 +1095,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1014,23 +1000,6 @@ static void rcu_initiate_boost(struct rc
  }
  
  /*
@@ -325,7 +325,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
   * Is the current CPU running the RCU-callbacks kthread?
   * Caller must have preemption disabled.
   */
-@@ -1179,67 +1148,6 @@ static int rcu_spawn_one_boost_kthread(s
+@@ -1084,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(s
  	return 0;
  }
  
@@ -393,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Set the per-rcu_node kthread's affinity to cover all CPUs that are
   * served by the rcu_node in question.  The CPU hotplug lock is still
-@@ -1269,26 +1177,12 @@ static void rcu_boost_kthread_setaffinit
+@@ -1175,26 +1083,12 @@ static void rcu_boost_kthread_setaffinit
  	free_cpumask_var(cm);
  }
  
@@ -420,7 +420,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	rcu_for_each_leaf_node(rcu_state_p, rnp)
  		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
  }
-@@ -1311,11 +1205,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1217,11 +1111,6 @@ static void rcu_initiate_boost(struct rc
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  }
  
diff --git a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
index ffa194a..635738a 100644
--- a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Disable RCU_FAST_NO_HZ on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 28 Oct 2012 13:26:09 +0000
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 This uses a timer_list timer from the irq disabled guts of the idle
 code. Disable it for now to prevent wreckage.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -610,7 +610,7 @@ config RCU_FANOUT_LEAF
+@@ -613,7 +613,7 @@ config RCU_FANOUT_LEAF
  
  config RCU_FAST_NO_HZ
  	bool "Accelerate last non-dyntick-idle CPU's grace periods"
diff --git a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
index 53a13e4..eb0d8a6 100644
--- a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 21 Mar 2014 20:19:05 +0100
 Subject: rcu: make RCU_BOOST default on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Since it is no longer invoked from the softirq people run into OOM more
 often if the priority of the RCU thread is too low. Making boosting
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -494,7 +494,7 @@ config TINY_RCU
+@@ -496,7 +496,7 @@ config TINY_RCU
  
  config RCU_EXPERT
  	bool "Make expert-level adjustments to RCU configuration"
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	help
  	  This option needs to be enabled if you wish to make
  	  expert-level adjustments to RCU configuration.  By default,
-@@ -637,7 +637,7 @@ config TREE_RCU_TRACE
+@@ -640,7 +640,7 @@ config TREE_RCU_TRACE
  config RCU_BOOST
  	bool "Enable RCU priority boosting"
  	depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
diff --git a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index 218144c..5927955 100644
--- a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Merge RCU-bh into RCU-preempt
 Date: Wed, 5 Oct 2011 11:59:38 -0700
 From: Thomas Gleixner <tglx at linutronix.de>
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The Linux kernel has long RCU-bh read-side critical sections that
 intolerably increase scheduling latency under mainline's RCU-bh rules,
@@ -26,14 +26,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 ---
  include/linux/rcupdate.h |   23 +++++++++++++++++++++++
- include/linux/rcutree.h  |   18 ++++++++++++++++--
- kernel/rcu/tree.c        |   16 ++++++++++++++++
+ include/linux/rcutree.h  |   21 ++++++++++++++++++---
+ kernel/rcu/rcutorture.c  |    7 +++++++
+ kernel/rcu/tree.c        |   24 ++++++++++++++++++++++++
+ kernel/rcu/tree.h        |    2 ++
  kernel/rcu/update.c      |    2 ++
- 4 files changed, 57 insertions(+), 2 deletions(-)
+ 6 files changed, 76 insertions(+), 3 deletions(-)
 
 --- a/include/linux/rcupdate.h
 +++ b/include/linux/rcupdate.h
-@@ -177,6 +177,9 @@ void call_rcu(struct rcu_head *head,
+@@ -179,6 +179,9 @@ void call_rcu(struct rcu_head *head,
  
  #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
  
@@ -43,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
   * @head: structure to be used for queueing the RCU updates.
-@@ -200,6 +203,7 @@ void call_rcu(struct rcu_head *head,
+@@ -202,6 +205,7 @@ void call_rcu(struct rcu_head *head,
   */
  void call_rcu_bh(struct rcu_head *head,
  		 rcu_callback_t func);
@@ -51,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
-@@ -337,7 +341,11 @@ static inline int rcu_preempt_depth(void
+@@ -339,7 +343,11 @@ static inline int rcu_preempt_depth(void
  /* Internal to kernel */
  void rcu_init(void);
  void rcu_sched_qs(void);
@@ -63,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void rcu_check_callbacks(int user);
  void rcu_report_dead(unsigned int cpu);
  
-@@ -505,7 +513,14 @@ extern struct lockdep_map rcu_callback_m
+@@ -508,7 +516,14 @@ extern struct lockdep_map rcu_callback_m
  int debug_lockdep_rcu_enabled(void);
  
  int rcu_read_lock_held(void);
@@ -78,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -953,10 +968,14 @@ static inline void rcu_read_unlock(void)
+@@ -906,10 +921,14 @@ static inline void rcu_read_unlock(void)
  static inline void rcu_read_lock_bh(void)
  {
  	local_bh_disable();
@@ -93,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -966,10 +985,14 @@ static inline void rcu_read_lock_bh(void
+@@ -919,10 +938,14 @@ static inline void rcu_read_lock_bh(void
   */
  static inline void rcu_read_unlock_bh(void)
  {
@@ -134,12 +136,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void rcu_barrier_sched(void);
  unsigned long get_state_synchronize_rcu(void);
  void cond_synchronize_rcu(unsigned long oldstate);
-@@ -85,12 +93,10 @@ unsigned long rcu_batches_started(void);
- unsigned long rcu_batches_started_bh(void);
+@@ -82,17 +90,14 @@ void cond_synchronize_sched(unsigned lon
+ extern unsigned long rcutorture_testseq;
+ extern unsigned long rcutorture_vernum;
+ unsigned long rcu_batches_started(void);
+-unsigned long rcu_batches_started_bh(void);
  unsigned long rcu_batches_started_sched(void);
  unsigned long rcu_batches_completed(void);
 -unsigned long rcu_batches_completed_bh(void);
  unsigned long rcu_batches_completed_sched(void);
+ unsigned long rcu_exp_batches_completed(void);
+ unsigned long rcu_exp_batches_completed_sched(void);
  void show_rcu_gp_kthreads(void);
  
  void rcu_force_quiescent_state(void);
@@ -147,24 +154,49 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void rcu_sched_force_quiescent_state(void);
  
  void rcu_idle_enter(void);
-@@ -107,6 +113,14 @@ extern int rcu_scheduler_active __read_m
+@@ -109,6 +114,16 @@ extern int rcu_scheduler_active __read_m
  
  bool rcu_is_watching(void);
  
 +#ifndef CONFIG_PREEMPT_RT_FULL
 +void rcu_bh_force_quiescent_state(void);
++unsigned long rcu_batches_started_bh(void);
 +unsigned long rcu_batches_completed_bh(void);
 +#else
 +# define rcu_bh_force_quiescent_state	rcu_force_quiescent_state
 +# define rcu_batches_completed_bh	rcu_batches_completed
++# define rcu_batches_started_bh		rcu_batches_completed
 +#endif
 +
  void rcu_all_qs(void);
  
- #endif /* __LINUX_RCUTREE_H */
+ /* RCUtree hotplug events */
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -404,6 +404,7 @@ static struct rcu_torture_ops rcu_ops =
+ 	.name		= "rcu"
+ };
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+  * Definitions for rcu_bh torture testing.
+  */
+@@ -443,6 +444,12 @@ static struct rcu_torture_ops rcu_bh_ops
+ 	.name		= "rcu_bh"
+ };
+ 
++#else
++static struct rcu_torture_ops rcu_bh_ops = {
++	.ttype		= INVALID_RCU_FLAVOR,
++};
++#endif
++
+ /*
+  * Don't even think about trying any of these in real life!!!
+  * The names includes "busted", and they really means it!
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -254,6 +254,7 @@ void rcu_sched_qs(void)
+@@ -259,6 +259,7 @@ void rcu_sched_qs(void)
  			   this_cpu_ptr(&rcu_sched_data), true);
  }
  
@@ -172,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void rcu_bh_qs(void)
  {
  	if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
-@@ -263,6 +264,7 @@ void rcu_bh_qs(void)
+@@ -268,6 +269,7 @@ void rcu_bh_qs(void)
  		__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
  	}
  }
@@ -180,7 +212,21 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
  
-@@ -450,6 +452,7 @@ unsigned long rcu_batches_completed_sche
+@@ -448,11 +450,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
+ /*
+  * Return the number of RCU BH batches started thus far for debug & stats.
+  */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ unsigned long rcu_batches_started_bh(void)
+ {
+ 	return rcu_bh_state.gpnum;
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
++#endif
+ 
+ /*
+  * Return the number of RCU batches completed thus far for debug & stats.
+@@ -472,6 +476,7 @@ unsigned long rcu_batches_completed_sche
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
  
@@ -188,7 +234,23 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Return the number of RCU BH batches completed thus far for debug & stats.
   */
-@@ -477,6 +480,13 @@ void rcu_bh_force_quiescent_state(void)
+@@ -480,6 +485,7 @@ unsigned long rcu_batches_completed_bh(v
+ 	return rcu_bh_state.completed;
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
++#endif
+ 
+ /*
+  * Return the number of RCU expedited batches completed thus far for
+@@ -503,6 +509,7 @@ unsigned long rcu_exp_batches_completed_
+ }
+ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+  * Force a quiescent state.
+  */
+@@ -521,6 +528,13 @@ void rcu_bh_force_quiescent_state(void)
  }
  EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  
@@ -202,7 +264,19 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Force a quiescent state for RCU-sched.
   */
-@@ -3099,6 +3109,7 @@ void call_rcu_sched(struct rcu_head *hea
+@@ -571,9 +585,11 @@ void rcutorture_get_gp_data(enum rcutort
+ 	case RCU_FLAVOR:
+ 		rsp = rcu_state_p;
+ 		break;
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	case RCU_BH_FLAVOR:
+ 		rsp = &rcu_bh_state;
+ 		break;
++#endif
+ 	case RCU_SCHED_FLAVOR:
+ 		rsp = &rcu_sched_state;
+ 		break;
+@@ -3192,6 +3208,7 @@ void call_rcu_sched(struct rcu_head *hea
  }
  EXPORT_SYMBOL_GPL(call_rcu_sched);
  
@@ -210,7 +284,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Queue an RCU callback for invocation after a quicker grace period.
   */
-@@ -3107,6 +3118,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -3200,6 +3217,7 @@ void call_rcu_bh(struct rcu_head *head,
  	__call_rcu(head, func, &rcu_bh_state, -1, 0);
  }
  EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -218,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /*
   * Queue an RCU callback for lazy invocation after a grace period.
-@@ -3198,6 +3210,7 @@ void synchronize_sched(void)
+@@ -3291,6 +3309,7 @@ void synchronize_sched(void)
  }
  EXPORT_SYMBOL_GPL(synchronize_sched);
  
@@ -226,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
   *
-@@ -3224,6 +3237,7 @@ void synchronize_rcu_bh(void)
+@@ -3317,6 +3336,7 @@ void synchronize_rcu_bh(void)
  		wait_rcu_gp(call_rcu_bh);
  }
  EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -234,7 +308,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * get_state_synchronize_rcu - Snapshot current RCU state
-@@ -4104,6 +4118,7 @@ static void _rcu_barrier(struct rcu_stat
+@@ -3695,6 +3715,7 @@ static void _rcu_barrier(struct rcu_stat
  	mutex_unlock(&rsp->barrier_mutex);
  }
  
@@ -242,7 +316,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
   */
-@@ -4112,6 +4127,7 @@ void rcu_barrier_bh(void)
+@@ -3703,6 +3724,7 @@ void rcu_barrier_bh(void)
  	_rcu_barrier(&rcu_bh_state);
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -250,6 +324,28 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
+@@ -4196,7 +4218,9 @@ void __init rcu_init(void)
+ 
+ 	rcu_bootup_announce();
+ 	rcu_init_geometry();
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	rcu_init_one(&rcu_bh_state);
++#endif
+ 	rcu_init_one(&rcu_sched_state);
+ 	if (dump_tree)
+ 		rcu_dump_rcu_node_tree(&rcu_sched_state);
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -587,7 +587,9 @@ extern struct list_head rcu_struct_flavo
+  */
+ extern struct rcu_state rcu_sched_state;
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern struct rcu_state rcu_bh_state;
++#endif
+ 
+ #ifdef CONFIG_PREEMPT_RCU
+ extern struct rcu_state rcu_preempt_state;
 --- a/kernel/rcu/update.c
 +++ b/kernel/rcu/update.c
 @@ -295,6 +295,7 @@ int rcu_read_lock_held(void)
diff --git a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index 847dc7c..9f1f8bd 100644
--- a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -1,7 +1,7 @@
 From: Tiejun Chen <tiejun.chen at windriver.com>
 Date: Wed, 18 Dec 2013 17:51:49 +0800
 Subject: rcutree/rcu_bh_qs: Disable irq while calling rcu_preempt_qs()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Any callers to the function rcu_preempt_qs() must disable irqs in
 order to protect the assignment to ->rcu_read_unlock_special. In
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -259,7 +259,12 @@ static void rcu_preempt_qs(void);
+@@ -264,7 +264,12 @@ static void rcu_preempt_qs(void);
  
  void rcu_bh_qs(void)
  {
diff --git a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
index 3a2b926..9ee6901 100644
--- a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
+++ b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -1,7 +1,7 @@
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Thu, 28 Jul 2011 11:16:00 +0800
 Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 When retry happens, it's likely that the task has been migrated to
 another cpu (except unplug failed), but it still derefernces the
diff --git a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
index 4606daa..5d5b801 100644
--- a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+++ b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -1,7 +1,7 @@
 Subject: ARM: Initialize split page table locks for vector page
 From: Frank Rowand <frank.rowand at am.sony.com>
 Date: Sat, 1 Oct 2011 18:58:13 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if
 PREEMPT_RT_FULL=y because vectors_user_mapping() creates a
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/arm/kernel/process.c
 +++ b/arch/arm/kernel/process.c
-@@ -319,6 +319,30 @@ unsigned long arch_randomize_brk(struct
+@@ -323,6 +323,30 @@ unsigned long arch_randomize_brk(struct
  }
  
  #ifdef CONFIG_MMU
diff --git a/debian/patches/features/all/rt/relay-fix-timer-madness.patch b/debian/patches/features/all/rt/relay-fix-timer-madness.patch
index 6b23b0d..403be62 100644
--- a/debian/patches/features/all/rt/relay-fix-timer-madness.patch
+++ b/debian/patches/features/all/rt/relay-fix-timer-madness.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:44:07 -0500
 Subject: relay: Fix timer madness
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 remove timer calls (!!!) from deep within the tracing infrastructure.
 This was totally bogus code that can cause lockups and worse.  Poll
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	} else
  		del_timer_sync(&buf->timer);
  
-@@ -736,15 +741,6 @@ size_t relay_switch_subbuf(struct rchan_
+@@ -767,15 +772,6 @@ size_t relay_switch_subbuf(struct rchan_
  		else
  			buf->early_bytes += buf->chan->subbuf_size -
  					    buf->padding[old_subbuf];
diff --git a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
index 8e7345b..bca17a2 100644
--- a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
+++ b/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
@@ -1,7 +1,7 @@
 Subject: ARM: smp: Move clear_tasks_mm_cpumask() call to __cpu_die()
 From: Grygorii Strashko <grygorii.strashko at ti.com>
 Date: Fri, 11 Sep 2015 21:21:23 +0300
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 When running with the RT-kernel (4.1.5-rt5) on TI OMAP dra7-evm and trying
 to do Suspend to RAM, the following backtrace occurs:
diff --git a/debian/patches/features/all/rt/rt-add-rt-locks.patch b/debian/patches/features/all/rt/rt-add-rt-locks.patch
index 2e66acb..c7902cf 100644
--- a/debian/patches/features/all/rt/rt-add-rt-locks.patch
+++ b/debian/patches/features/all/rt/rt-add-rt-locks.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 26 Jul 2009 19:39:56 +0200
 Subject: rt: Add the preempt-rt lock replacement APIs
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex
 based locking functions for preempt-rt.
@@ -12,32 +12,32 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  include/linux/kernel.h            |    4 
  include/linux/locallock.h         |    6 
- include/linux/mutex.h             |   20 +
+ include/linux/mutex.h             |   20 -
  include/linux/mutex_rt.h          |   84 ++++++
  include/linux/rtmutex.h           |   29 +-
  include/linux/rwlock_rt.h         |   99 +++++++
  include/linux/rwlock_types_rt.h   |   33 ++
  include/linux/rwsem.h             |    6 
- include/linux/rwsem_rt.h          |  152 ++++++++++++
+ include/linux/rwsem_rt.h          |  167 ++++++++++++
  include/linux/sched.h             |   19 +
  include/linux/spinlock.h          |   12 
  include/linux/spinlock_api_smp.h  |    4 
- include/linux/spinlock_rt.h       |  173 +++++++++++++
+ include/linux/spinlock_rt.h       |  162 ++++++++++++
  include/linux/spinlock_types.h    |   11 
  include/linux/spinlock_types_rt.h |   48 +++
  kernel/futex.c                    |   10 
  kernel/locking/Makefile           |    9 
- kernel/locking/rt.c               |  476 ++++++++++++++++++++++++++++++++++++++
- kernel/locking/rtmutex.c          |  422 +++++++++++++++++++++++++++++++--
- kernel/locking/rtmutex_common.h   |   14 +
+ kernel/locking/rt.c               |  498 ++++++++++++++++++++++++++++++++++++++
+ kernel/locking/rtmutex.c          |  460 +++++++++++++++++++++++++++++++++--
+ kernel/locking/rtmutex_common.h   |   14 -
  kernel/locking/spinlock.c         |    7 
  kernel/locking/spinlock_debug.c   |    5 
  kernel/sched/core.c               |    7 
- 23 files changed, 1594 insertions(+), 56 deletions(-)
+ 23 files changed, 1658 insertions(+), 56 deletions(-)
 
 --- a/include/linux/kernel.h
 +++ b/include/linux/kernel.h
-@@ -188,6 +188,9 @@ extern int _cond_resched(void);
+@@ -194,6 +194,9 @@ extern int _cond_resched(void);
   */
  # define might_sleep() \
  	do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  # define sched_annotate_sleep()	(current->task_state_change = 0)
  #else
    static inline void ___might_sleep(const char *file, int line,
-@@ -195,6 +198,7 @@ extern int _cond_resched(void);
+@@ -201,6 +204,7 @@ extern int _cond_resched(void);
    static inline void __might_sleep(const char *file, int line,
  				   int preempt_offset) { }
  # define might_sleep() do { might_resched(); } while (0)
@@ -62,9 +62,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
   */
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+# define spin_lock_local(lock)			rt_spin_lock(lock)
-+# define spin_trylock_local(lock)		rt_spin_trylock(lock)
-+# define spin_unlock_local(lock)		rt_spin_unlock(lock)
++# define spin_lock_local(lock)			rt_spin_lock__no_mg(lock)
++# define spin_trylock_local(lock)		rt_spin_trylock__no_mg(lock)
++# define spin_unlock_local(lock)		rt_spin_unlock__no_mg(lock)
 +#else
  # define spin_lock_local(lock)			spin_lock(lock)
  # define spin_trylock_local(lock)		spin_trylock(lock)
@@ -411,7 +411,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 --- a/include/linux/rwsem.h
 +++ b/include/linux/rwsem.h
-@@ -18,6 +18,10 @@
+@@ -19,6 +19,10 @@
  #include <linux/osq_lock.h>
  #endif
  
@@ -422,7 +422,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  struct rw_semaphore;
  
  #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-@@ -177,4 +181,6 @@ extern void up_read_non_owner(struct rw_
+@@ -184,4 +188,6 @@ extern void up_read_non_owner(struct rw_
  # define up_read_non_owner(sem)			up_read(sem)
  #endif
  
@@ -431,7 +431,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif /* _LINUX_RWSEM_H */
 --- /dev/null
 +++ b/include/linux/rwsem_rt.h
-@@ -0,0 +1,152 @@
+@@ -0,0 +1,167 @@
 +#ifndef _LINUX_RWSEM_RT_H
 +#define _LINUX_RWSEM_RT_H
 +
@@ -486,8 +486,11 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +} while (0)
 +
 +extern void rt_down_write(struct rw_semaphore *rwsem);
++extern int  rt_down_write_killable(struct rw_semaphore *rwsem);
 +extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
 +extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
++extern int  rt_down_write_killable_nested(struct rw_semaphore *rwsem,
++					  int subclass);
 +extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
 +				      struct lockdep_map *nest);
 +extern void rt__down_read(struct rw_semaphore *rwsem);
@@ -534,6 +537,11 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	rt_down_write(sem);
 +}
 +
++static inline int down_write_killable(struct rw_semaphore *sem)
++{
++	return rt_down_write_killable(sem);
++}
++
 +static inline int down_write_trylock(struct rw_semaphore *sem)
 +{
 +	return rt_down_write_trylock(sem);
@@ -568,6 +576,13 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +{
 +	rt_down_write_nested(sem, subclass);
 +}
++
++static inline int down_write_killable_nested(struct rw_semaphore *sem,
++					     int subclass)
++{
++	return rt_down_write_killable_nested(sem, subclass);
++}
++
 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
 +static inline void down_write_nest_lock(struct rw_semaphore *sem,
 +		struct rw_semaphore *nest_lock)
@@ -586,7 +601,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -310,6 +310,11 @@ extern char ___assert_task_state[1 - 2*!
+@@ -312,6 +312,11 @@ extern char ___assert_task_state[1 - 2*!
  
  #endif
  
@@ -598,7 +613,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Task command name length */
  #define TASK_COMM_LEN 16
  
-@@ -981,8 +986,18 @@ struct wake_q_head {
+@@ -1009,8 +1014,18 @@ struct wake_q_head {
  	struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
  
  extern void wake_q_add(struct wake_q_head *head,
@@ -666,7 +681,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif /* __LINUX_SPINLOCK_API_SMP_H */
 --- /dev/null
 +++ b/include/linux/spinlock_rt.h
-@@ -0,0 +1,173 @@
+@@ -0,0 +1,162 @@
 +#ifndef __LINUX_SPINLOCK_RT_H
 +#define __LINUX_SPINLOCK_RT_H
 +
@@ -687,6 +702,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	__rt_spin_lock_init(slock, #slock, &__key);	\
 +} while (0)
 +
++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
++
 +extern void __lockfunc rt_spin_lock(spinlock_t *lock);
 +extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
 +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
@@ -701,19 +720,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 + * lockdep-less calls, for derived types like rwlock:
 + * (for trylock they can use rt_mutex_trylock() directly.
 + */
++extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
 +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
 +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
 +
-+#define spin_lock(lock)				\
-+	do {					\
-+		migrate_disable();		\
-+		rt_spin_lock(lock);		\
-+	} while (0)
++#define spin_lock(lock)			rt_spin_lock(lock)
 +
 +#define spin_lock_bh(lock)			\
 +	do {					\
 +		local_bh_disable();		\
-+		migrate_disable();		\
 +		rt_spin_lock(lock);		\
 +	} while (0)
 +
@@ -724,24 +739,19 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#define spin_trylock(lock)			\
 +({						\
 +	int __locked;				\
-+	migrate_disable();			\
 +	__locked = spin_do_trylock(lock);	\
-+	if (!__locked)				\
-+		migrate_enable();		\
 +	__locked;				\
 +})
 +
 +#ifdef CONFIG_LOCKDEP
 +# define spin_lock_nested(lock, subclass)		\
 +	do {						\
-+		migrate_disable();			\
 +		rt_spin_lock_nested(lock, subclass);	\
 +	} while (0)
 +
 +#define spin_lock_bh_nested(lock, subclass)		\
 +	do {						\
 +		local_bh_disable();			\
-+		migrate_disable();			\
 +		rt_spin_lock_nested(lock, subclass);	\
 +	} while (0)
 +
@@ -749,7 +759,6 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	do {						 \
 +		typecheck(unsigned long, flags);	 \
 +		flags = 0;				 \
-+		migrate_disable();			 \
 +		rt_spin_lock_nested(lock, subclass);	 \
 +	} while (0)
 +#else
@@ -785,16 +794,11 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +/* FIXME: we need rt_spin_lock_nest_lock */
 +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
 +
-+#define spin_unlock(lock)				\
-+	do {						\
-+		rt_spin_unlock(lock);			\
-+		migrate_enable();			\
-+	} while (0)
++#define spin_unlock(lock)			rt_spin_unlock(lock)
 +
 +#define spin_unlock_bh(lock)				\
 +	do {						\
 +		rt_spin_unlock(lock);			\
-+		migrate_enable();			\
 +		local_bh_enable();			\
 +	} while (0)
 +
@@ -912,7 +916,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1263,6 +1263,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1292,6 +1292,7 @@ static int wake_futex_pi(u32 __user *uad
  	struct futex_pi_state *pi_state = this->pi_state;
  	u32 uninitialized_var(curval), newval;
  	WAKE_Q(wake_q);
@@ -920,7 +924,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	bool deboost;
  	int ret = 0;
  
-@@ -1329,7 +1330,8 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1358,7 +1359,8 @@ static int wake_futex_pi(u32 __user *uad
  
  	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
  
@@ -930,7 +934,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * First unlock HB so the waiter does not spin on it once he got woken
-@@ -1339,6 +1341,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1368,6 +1370,7 @@ static int wake_futex_pi(u32 __user *uad
  	 */
  	spin_unlock(&hb->lock);
  	wake_up_q(&wake_q);
@@ -938,7 +942,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (deboost)
  		rt_mutex_adjust_prio(current);
  
-@@ -2813,10 +2816,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2842,10 +2845,7 @@ static int futex_wait_requeue_pi(u32 __u
  	 * The waiter is allocated on our stack, manipulated by the requeue
  	 * code while we sleep on uaddr.
  	 */
@@ -986,7 +990,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
 --- /dev/null
 +++ b/kernel/locking/rt.c
-@@ -0,0 +1,476 @@
+@@ -0,0 +1,498 @@
 +/*
 + * kernel/rt.c
 + *
@@ -1224,7 +1228,6 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +void __lockfunc rt_write_lock(rwlock_t *rwlock)
 +{
 +	rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
-+	migrate_disable();
 +	__rt_spin_lock(&rwlock->lock);
 +}
 +EXPORT_SYMBOL(rt_write_lock);
@@ -1238,7 +1241,6 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	 * recursive read locks succeed when current owns the lock
 +	 */
 +	if (rt_mutex_owner(lock) != current) {
-+		migrate_disable();
 +		rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
 +		__rt_spin_lock(lock);
 +	}
@@ -1349,6 +1351,30 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +}
 +EXPORT_SYMBOL(rt_down_write);
 +
++int rt_down_write_killable(struct rw_semaphore *rwsem)
++{
++	int ret;
++
++	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
++	ret = rt_mutex_lock_killable(&rwsem->lock);
++	if (ret)
++		rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++	return ret;
++}
++EXPORT_SYMBOL(rt_down_write_killable);
++
++int rt_down_write_killable_nested(struct rw_semaphore *rwsem, int subclass)
++{
++	int ret;
++
++	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
++	ret = rt_mutex_lock_killable(&rwsem->lock);
++	if (ret)
++		rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++	return ret;
++}
++EXPORT_SYMBOL(rt_down_write_killable_nested);
++
 +void  rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
 +{
 +	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
@@ -1579,7 +1605,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			/*
  			 * The current top waiter stays enqueued. We
  			 * don't have to change anything in the lock
-@@ -884,6 +918,314 @@ static int try_to_take_rt_mutex(struct r
+@@ -884,6 +918,352 @@ static int try_to_take_rt_mutex(struct r
  	return 1;
  }
  
@@ -1682,7 +1708,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	__set_current_state_no_track(TASK_UNINTERRUPTIBLE);
 +	raw_spin_unlock(&self->pi_lock);
 +
-+	ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
++	ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
 +	BUG_ON(ret);
 +
 +	for (;;) {
@@ -1767,8 +1793,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	rt_mutex_adjust_prio(current);
 +}
 +
++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
++{
++	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock__no_mg);
++
 +void __lockfunc rt_spin_lock(spinlock_t *lock)
 +{
++	migrate_disable();
 +	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
 +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 +}
@@ -1776,24 +1810,41 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +
 +void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
 +{
++	migrate_disable();
 +	rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
 +}
 +EXPORT_SYMBOL(__rt_spin_lock);
 +
++void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
++{
++	rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++}
++EXPORT_SYMBOL(__rt_spin_lock__no_mg);
++
 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
 +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
 +{
++	migrate_disable();
 +	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
 +	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 +}
 +EXPORT_SYMBOL(rt_spin_lock_nested);
 +#endif
 +
++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
++{
++	/* NOTE: we always pass in '1' for nested, for simplicity */
++	spin_release(&lock->dep_map, 1, _RET_IP_);
++	rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(rt_spin_unlock__no_mg);
++
 +void __lockfunc rt_spin_unlock(spinlock_t *lock)
 +{
 +	/* NOTE: we always pass in '1' for nested, for simplicity */
 +	spin_release(&lock->dep_map, 1, _RET_IP_);
 +	rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++	migrate_enable();
 +}
 +EXPORT_SYMBOL(rt_spin_unlock);
 +
@@ -1815,12 +1866,27 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +}
 +EXPORT_SYMBOL(rt_spin_unlock_wait);
 +
++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
++{
++	int ret;
++
++	ret = rt_mutex_trylock(&lock->lock);
++	if (ret)
++		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++	return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock__no_mg);
++
 +int __lockfunc rt_spin_trylock(spinlock_t *lock)
 +{
-+	int ret = rt_mutex_trylock(&lock->lock);
++	int ret;
 +
++	migrate_disable();
++	ret = rt_mutex_trylock(&lock->lock);
 +	if (ret)
 +		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++	else
++		migrate_enable();
 +	return ret;
 +}
 +EXPORT_SYMBOL(rt_spin_trylock);
@@ -1859,12 +1925,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
 +	if (atomic_add_unless(atomic, -1, 1))
 +		return 0;
-+	migrate_disable();
 +	rt_spin_lock(lock);
 +	if (atomic_dec_and_test(atomic))
 +		return 1;
 +	rt_spin_unlock(lock);
-+	migrate_enable();
 +	return 0;
 +}
 +EXPORT_SYMBOL(atomic_dec_and_spin_lock);
@@ -1894,7 +1958,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Task blocks on lock.
   *
-@@ -996,6 +1338,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -996,6 +1376,7 @@ static int task_blocks_on_rt_mutex(struc
   * Called with lock->wait_lock held and interrupts disabled.
   */
  static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
@@ -1902,7 +1966,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  				    struct rt_mutex *lock)
  {
  	struct rt_mutex_waiter *waiter;
-@@ -1024,7 +1367,10 @@ static void mark_wakeup_next_waiter(stru
+@@ -1024,7 +1405,10 @@ static void mark_wakeup_next_waiter(stru
  
  	raw_spin_unlock(&current->pi_lock);
  
@@ -1914,7 +1978,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -1105,11 +1451,11 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1105,11 +1489,11 @@ void rt_mutex_adjust_pi(struct task_stru
  		return;
  	}
  	next_lock = waiter->lock;
@@ -1927,7 +1991,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
  				   next_lock, NULL, task);
  }
-@@ -1196,9 +1542,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1196,9 +1580,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	unsigned long flags;
  	int ret = 0;
  
@@ -1938,7 +2002,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1292,7 +1636,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1292,7 +1674,8 @@ static inline int rt_mutex_slowtrylock(s
   * Return whether the current task needs to undo a potential priority boosting.
   */
  static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -1948,7 +2012,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	unsigned long flags;
  
-@@ -1348,7 +1693,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1348,7 +1731,7 @@ static bool __sched rt_mutex_slowunlock(
  	 *
  	 * Queue the next waiter for wakeup once we release the wait_lock.
  	 */
@@ -1957,7 +2021,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
  
-@@ -1405,17 +1750,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1405,17 +1788,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
  static inline void
  rt_mutex_fastunlock(struct rt_mutex *lock,
  		    bool (*slowfn)(struct rt_mutex *lock,
@@ -1980,7 +2044,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		/* Undo pi boosting if necessary: */
  		if (deboost)
-@@ -1552,13 +1900,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+@@ -1552,13 +1938,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
   * required or not.
   */
  bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -1997,7 +2061,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -1591,13 +1940,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -1591,13 +1978,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
  void __rt_mutex_init(struct rt_mutex *lock, const char *name)
  {
  	lock->owner = NULL;
@@ -2012,7 +2076,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1612,7 +1960,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1612,7 +1998,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
  void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
  				struct task_struct *proxy_owner)
  {
@@ -2021,7 +2085,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	debug_rt_mutex_proxy_lock(lock, proxy_owner);
  	rt_mutex_set_owner(lock, proxy_owner);
  	rt_mutex_deadlock_account_lock(lock, proxy_owner);
-@@ -1774,3 +2122,25 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -1774,3 +2160,25 @@ int rt_mutex_finish_proxy_lock(struct rt
  
  	return ret;
  }
@@ -2148,7 +2212,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -419,7 +419,7 @@ void wake_q_add(struct wake_q_head *head
+@@ -454,7 +454,7 @@ void wake_q_add(struct wake_q_head *head
  	head->lastp = &node->next;
  }
  
@@ -2157,7 +2221,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	struct wake_q_node *node = head->first;
  
-@@ -436,7 +436,10 @@ void wake_up_q(struct wake_q_head *head)
+@@ -471,7 +471,10 @@ void wake_up_q(struct wake_q_head *head)
  		 * wake_up_process() implies a wmb() to pair with the queueing
  		 * in wake_q_add() so as not to miss wakeups.
  		 */
diff --git a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
index 8cbf28c..852d49c 100644
--- a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
+++ b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
@@ -1,7 +1,7 @@
 Subject: rt: Introduce cpu_chill()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 07 Mar 2012 20:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #endif /* defined(_LINUX_DELAY_H) */
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -1788,6 +1788,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
+@@ -1768,6 +1768,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
  	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
  }
  
diff --git a/debian/patches/features/all/rt/rt-local-irq-lock.patch b/debian/patches/features/all/rt/rt-local-irq-lock.patch
index 6ba7dff..e88399d 100644
--- a/debian/patches/features/all/rt/rt-local-irq-lock.patch
+++ b/debian/patches/features/all/rt/rt-local-irq-lock.patch
@@ -1,7 +1,7 @@
 Subject: rt: Add local irq locks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 20 Jun 2011 09:03:47 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Introduce locallock. For !RT this maps to preempt_disable()/
 local_irq_disable() so there is not much that changes. For RT this will
diff --git a/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch b/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
index 461b062..1e8e205 100644
--- a/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
+++ b/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 8 Feb 2016 16:15:28 +0100
 Subject: rt/locking: Reenable migration accross schedule
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 We currently disable migration across lock acquisition. That includes the part
 where we block on the lock and schedule out. We cannot disable migration after
@@ -40,8 +40,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +		slowfn(lock, do_mig_dis);
  }
  
- static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
-@@ -989,7 +994,8 @@ static int task_blocks_on_rt_mutex(struc
+ static inline int rt_spin_lock_fastunlock(struct rt_mutex *lock,
+@@ -990,7 +995,8 @@ static int task_blocks_on_rt_mutex(struc
   * We store the current state under p->pi_lock in p->saved_state and
   * the try_to_wake_up() code handles this accordingly.
   */
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	struct task_struct *lock_owner, *self = current;
  	struct rt_mutex_waiter waiter, *top_waiter;
-@@ -1033,8 +1039,13 @@ static void  noinline __sched rt_spin_lo
+@@ -1034,8 +1040,13 @@ static void  noinline __sched rt_spin_lo
  
  		debug_rt_mutex_print_deadlock(&waiter);
  
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		raw_spin_lock_irqsave(&lock->wait_lock, flags);
  
-@@ -1105,38 +1116,35 @@ static void  noinline __sched rt_spin_lo
+@@ -1133,38 +1144,35 @@ static int noinline __sched rt_spin_lock
  
  void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
  {
diff --git a/debian/patches/features/all/rt/rt-preempt-base-config.patch b/debian/patches/features/all/rt/rt-preempt-base-config.patch
index f480b28..090fada 100644
--- a/debian/patches/features/all/rt/rt-preempt-base-config.patch
+++ b/debian/patches/features/all/rt/rt-preempt-base-config.patch
@@ -1,7 +1,7 @@
 Subject: rt: Provide PREEMPT_RT_BASE config switch
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 17 Jun 2011 12:39:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Introduce PREEMPT_RT_BASE which enables parts of
 PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT
diff --git a/debian/patches/features/all/rt/rt-serial-warn-fix.patch b/debian/patches/features/all/rt/rt-serial-warn-fix.patch
index 8ff5e61..4855742 100644
--- a/debian/patches/features/all/rt/rt-serial-warn-fix.patch
+++ b/debian/patches/features/all/rt/rt-serial-warn-fix.patch
@@ -1,7 +1,7 @@
 Subject: rt: Improve the serial console PASS_LIMIT
 From: Ingo Molnar <mingo at elte.hu>
 Date: Wed Dec 14 13:05:54 CET 2011
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Beyond the warning:
 
diff --git a/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch b/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
index 648c2c1..416dc22 100644
--- a/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
+++ b/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Handle non enqueued waiters gracefully
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 06 Nov 2015 18:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Yimin debugged that in case of a PI wakeup in progress when
 rt_mutex_start_proxy_lock() calls task_blocks_on_rt_mutex() the latter
diff --git a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
index 32736b6..8c58009 100644
--- a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
 Date: Mon, 28 Oct 2013 09:36:37 +0100
 Subject: rtmutex: Add RT aware ww locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 lockdep says:
 | --------------------------------------------------------------------------
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  
  #include "rtmutex_common.h"
  
-@@ -1219,6 +1220,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1296,6 +1297,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
  
  #endif /* PREEMPT_RT_FULL */
  
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  static inline int
  try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
  		     struct rt_mutex_waiter *waiter)
-@@ -1473,7 +1508,8 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1550,7 +1585,8 @@ void rt_mutex_adjust_pi(struct task_stru
  static int __sched
  __rt_mutex_slowlock(struct rt_mutex *lock, int state,
  		    struct hrtimer_sleeper *timeout,
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  {
  	int ret = 0;
  
-@@ -1496,6 +1532,12 @@ static int __sched
+@@ -1573,6 +1609,12 @@ static int __sched
  				break;
  		}
  
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  		raw_spin_unlock_irq(&lock->wait_lock);
  
  		debug_rt_mutex_print_deadlock(waiter);
-@@ -1530,13 +1572,90 @@ static void rt_mutex_handle_deadlock(int
+@@ -1607,13 +1649,90 @@ static void rt_mutex_handle_deadlock(int
  	}
  }
  
@@ -192,7 +192,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  {
  	struct rt_mutex_waiter waiter;
  	unsigned long flags;
-@@ -1556,6 +1675,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1633,6 +1752,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  
  	/* Try to acquire the lock again: */
  	if (try_to_take_rt_mutex(lock, current, NULL)) {
@@ -201,7 +201,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
  		return 0;
  	}
-@@ -1570,13 +1691,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1647,13 +1768,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  
  	if (likely(!ret))
  		/* sleep on the mutex */
@@ -227,7 +227,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  	}
  
  	/*
-@@ -1709,31 +1840,36 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1786,31 +1917,36 @@ static bool __sched rt_mutex_slowunlock(
   */
  static inline int
  rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -268,7 +268,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  
  static inline int
-@@ -1780,7 +1916,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1857,7 +1993,7 @@ void __sched rt_mutex_lock(struct rt_mut
  {
  	might_sleep();
  
@@ -277,7 +277,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock);
  
-@@ -1797,7 +1933,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1874,7 +2010,7 @@ int __sched rt_mutex_lock_interruptible(
  {
  	might_sleep();
  
@@ -286,7 +286,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
  
-@@ -1810,7 +1946,7 @@ int rt_mutex_timed_futex_lock(struct rt_
+@@ -1887,7 +2023,7 @@ int rt_mutex_timed_futex_lock(struct rt_
  	might_sleep();
  
  	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
@@ -295,7 +295,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  				       rt_mutex_slowlock);
  }
  
-@@ -1829,7 +1965,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1906,7 +2042,7 @@ int __sched rt_mutex_lock_killable(struc
  {
  	might_sleep();
  
@@ -304,7 +304,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
  
-@@ -1853,6 +1989,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1930,6 +2066,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
  
  	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
  				       RT_MUTEX_MIN_CHAINWALK,
@@ -312,7 +312,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  				       rt_mutex_slowlock);
  }
  EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2107,7 +2244,7 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2184,7 +2321,7 @@ int rt_mutex_finish_proxy_lock(struct rt
  	set_current_state(TASK_INTERRUPTIBLE);
  
  	/* sleep on the mutex */
@@ -321,7 +321,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  
  	if (unlikely(ret))
  		remove_waiter(lock, waiter);
-@@ -2123,24 +2260,88 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2200,24 +2337,88 @@ int rt_mutex_finish_proxy_lock(struct rt
  	return ret;
  }
  
diff --git a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
index 37b147c..bbc4fb4 100644
--- a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
+++ b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Avoid include hell
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 20:06:39 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Include only the required raw types. This avoids pulling in the
 complete spinlock header which in turn requires rtmutex.h at some point.
diff --git a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
index 9a124f5..abbb539 100644
--- a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
+++ b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Handle the various new futex race conditions
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 10 Jun 2011 11:04:15 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 RT opens a few new interesting race conditions in the rtmutex/futex
 combo due to futex hash bucket lock being a 'sleeping' spinlock and
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1886,6 +1886,16 @@ static int futex_requeue(u32 __user *uad
+@@ -1915,6 +1915,16 @@ static int futex_requeue(u32 __user *uad
  				requeue_pi_wake_futex(this, &key2, hb2);
  				drop_count++;
  				continue;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			} else if (ret) {
  				/*
  				 * rt_mutex_start_proxy_lock() detected a
-@@ -2776,7 +2786,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2805,7 +2815,7 @@ static int futex_wait_requeue_pi(u32 __u
  	struct hrtimer_sleeper timeout, *to = NULL;
  	struct rt_mutex_waiter rt_waiter;
  	struct rt_mutex *pi_mutex = NULL;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	union futex_key key2 = FUTEX_KEY_INIT;
  	struct futex_q q = futex_q_init;
  	int res, ret;
-@@ -2835,20 +2845,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2864,20 +2874,55 @@ static int futex_wait_requeue_pi(u32 __u
  	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
  	futex_wait_queue_me(hb, &q, to);
  
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* Check if the requeue code acquired the second futex for us. */
  	if (!q.rt_waiter) {
-@@ -2857,14 +2902,15 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2886,14 +2931,15 @@ static int futex_wait_requeue_pi(u32 __u
  		 * did a lock-steal - fix up the PI-state in that case.
  		 */
  		if (q.pi_state && (q.pi_state->owner != current)) {
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  	} else {
  		/*
-@@ -2877,7 +2923,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2906,7 +2952,8 @@ static int futex_wait_requeue_pi(u32 __u
  		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
  		debug_rt_mutex_free_waiter(&rt_waiter);
  
diff --git a/debian/patches/features/all/rt/rtmutex-lock-killable.patch b/debian/patches/features/all/rt/rtmutex-lock-killable.patch
index dcf4e95..f64d98d 100644
--- a/debian/patches/features/all/rt/rtmutex-lock-killable.patch
+++ b/debian/patches/features/all/rt/rtmutex-lock-killable.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Add rtmutex_lock_killable()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 09 Jun 2011 11:43:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Add "killable" type to rtmutex. We need this since rtmutex are used as
 "normal" mutexes which do use this type.
diff --git a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch b/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
index 76e3507..b9ed0e1 100644
--- a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
+++ b/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed 02 Dec 2015 11:34:07 +0100
 Subject: rtmutex: trylock is okay on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On
 -RT we don't run softirqs in IRQ context but in thread context so it is
@@ -19,9 +19,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  int __sched rt_mutex_trylock(struct rt_mutex *lock)
  {
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+	if (WARN_ON(in_irq() || in_nmi()))
++	if (WARN_ON_ONCE(in_irq() || in_nmi()))
 +#else
- 	if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
+ 	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
 +#endif
  		return 0;
  
diff --git a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch b/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
index 419c788..a22f859 100644
--- a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
+++ b/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
@@ -1,6 +1,6 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Subject: rbtree: don't include the rcu header
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The RCU header pulls in spinlock.h and fails due not yet defined types:
 
@@ -14,63 +14,145 @@ The RCU header pulls in spinlock.h and fails due not yet defined types:
 | extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
 |                                      ^
 
-This patch moves the only RCU user from the header file into c file so the
-inclusion can be avoided.
+This patch moves the required RCU function from the rcupdate.h header file into
+a new header file which can be included by both users.
 
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- include/linux/rbtree.h |   11 ++---------
- lib/rbtree.c           |   11 +++++++++++
- 2 files changed, 13 insertions(+), 9 deletions(-)
+ include/linux/rbtree.h             |    2 -
+ include/linux/rcu_assign_pointer.h |   53 +++++++++++++++++++++++++++++++++++++
+ include/linux/rcupdate.h           |   49 ----------------------------------
+ 3 files changed, 55 insertions(+), 49 deletions(-)
 
 --- a/include/linux/rbtree.h
 +++ b/include/linux/rbtree.h
-@@ -31,7 +31,6 @@
+@@ -31,7 +31,7 @@
  
  #include <linux/kernel.h>
  #include <linux/stddef.h>
 -#include <linux/rcupdate.h>
++#include <linux/rcu_assign_pointer.h>
  
  struct rb_node {
  	unsigned long  __rb_parent_color;
-@@ -86,14 +85,8 @@ static inline void rb_link_node(struct r
- 	*rb_link = node;
- }
- 
--static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
--				    struct rb_node **rb_link)
--{
--	node->__rb_parent_color = (unsigned long)parent;
--	node->rb_left = node->rb_right = NULL;
--
--	rcu_assign_pointer(*rb_link, node);
--}
-+void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
-+		      struct rb_node **rb_link);
+--- /dev/null
++++ b/include/linux/rcu_assign_pointer.h
+@@ -0,0 +1,53 @@
++#ifndef __LINUX_RCU_ASSIGN_POINTER_H__
++#define __LINUX_RCU_ASSIGN_POINTER_H__
++#include <linux/compiler.h>
++
++/**
++ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
++ * @v: The value to statically initialize with.
++ */
++#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
++
++/**
++ * rcu_assign_pointer() - assign to RCU-protected pointer
++ * @p: pointer to assign to
++ * @v: value to assign (publish)
++ *
++ * Assigns the specified value to the specified RCU-protected
++ * pointer, ensuring that any concurrent RCU readers will see
++ * any prior initialization.
++ *
++ * Inserts memory barriers on architectures that require them
++ * (which is most of them), and also prevents the compiler from
++ * reordering the code that initializes the structure after the pointer
++ * assignment.  More importantly, this call documents which pointers
++ * will be dereferenced by RCU read-side code.
++ *
++ * In some special cases, you may use RCU_INIT_POINTER() instead
++ * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
++ * to the fact that it does not constrain either the CPU or the compiler.
++ * That said, using RCU_INIT_POINTER() when you should have used
++ * rcu_assign_pointer() is a very bad thing that results in
++ * impossible-to-diagnose memory corruption.  So please be careful.
++ * See the RCU_INIT_POINTER() comment header for details.
++ *
++ * Note that rcu_assign_pointer() evaluates each of its arguments only
++ * once, appearances notwithstanding.  One of the "extra" evaluations
++ * is in typeof() and the other visible only to sparse (__CHECKER__),
++ * neither of which actually execute the argument.  As with most cpp
++ * macros, this execute-arguments-only-once property is important, so
++ * please be careful when making changes to rcu_assign_pointer() and the
++ * other macros that it invokes.
++ */
++#define rcu_assign_pointer(p, v)					      \
++({									      \
++	uintptr_t _r_a_p__v = (uintptr_t)(v);				      \
++									      \
++	if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL)	      \
++		WRITE_ONCE((p), (typeof(p))(_r_a_p__v));		      \
++	else								      \
++		smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
++	_r_a_p__v;							      \
++})
++
++#endif
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -46,6 +46,7 @@
+ #include <linux/compiler.h>
+ #include <linux/ktime.h>
+ #include <linux/irqflags.h>
++#include <linux/rcu_assign_pointer.h>
  
- #define rb_entry_safe(ptr, type, member) \
- 	({ typeof(ptr) ____ptr = (ptr); \
---- a/lib/rbtree.c
-+++ b/lib/rbtree.c
-@@ -23,6 +23,7 @@
+ #include <asm/barrier.h>
  
- #include <linux/rbtree_augmented.h>
- #include <linux/export.h>
-+#include <linux/rcupdate.h>
+@@ -628,54 +629,6 @@ static inline void rcu_preempt_sleep_che
+ })
  
- /*
-  * red-black trees properties:  http://en.wikipedia.org/wiki/Rbtree
-@@ -590,3 +591,13 @@ struct rb_node *rb_first_postorder(const
- 	return rb_left_deepest_node(root->rb_node);
- }
- EXPORT_SYMBOL(rb_first_postorder);
-+
-+void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
-+				    struct rb_node **rb_link)
-+{
-+	node->__rb_parent_color = (unsigned long)parent;
-+	node->rb_left = node->rb_right = NULL;
-+
-+	rcu_assign_pointer(*rb_link, node);
-+}
-+EXPORT_SYMBOL(rb_link_node_rcu);
+ /**
+- * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
+- * @v: The value to statically initialize with.
+- */
+-#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
+-
+-/**
+- * rcu_assign_pointer() - assign to RCU-protected pointer
+- * @p: pointer to assign to
+- * @v: value to assign (publish)
+- *
+- * Assigns the specified value to the specified RCU-protected
+- * pointer, ensuring that any concurrent RCU readers will see
+- * any prior initialization.
+- *
+- * Inserts memory barriers on architectures that require them
+- * (which is most of them), and also prevents the compiler from
+- * reordering the code that initializes the structure after the pointer
+- * assignment.  More importantly, this call documents which pointers
+- * will be dereferenced by RCU read-side code.
+- *
+- * In some special cases, you may use RCU_INIT_POINTER() instead
+- * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
+- * to the fact that it does not constrain either the CPU or the compiler.
+- * That said, using RCU_INIT_POINTER() when you should have used
+- * rcu_assign_pointer() is a very bad thing that results in
+- * impossible-to-diagnose memory corruption.  So please be careful.
+- * See the RCU_INIT_POINTER() comment header for details.
+- *
+- * Note that rcu_assign_pointer() evaluates each of its arguments only
+- * once, appearances notwithstanding.  One of the "extra" evaluations
+- * is in typeof() and the other visible only to sparse (__CHECKER__),
+- * neither of which actually execute the argument.  As with most cpp
+- * macros, this execute-arguments-only-once property is important, so
+- * please be careful when making changes to rcu_assign_pointer() and the
+- * other macros that it invokes.
+- */
+-#define rcu_assign_pointer(p, v)					      \
+-({									      \
+-	uintptr_t _r_a_p__v = (uintptr_t)(v);				      \
+-									      \
+-	if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL)	      \
+-		WRITE_ONCE((p), (typeof(p))(_r_a_p__v));		      \
+-	else								      \
+-		smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
+-	_r_a_p__v;							      \
+-})
+-
+-/**
+  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
+  * @p: The pointer to read
+  *
diff --git a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch b/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
index 07e8edc..097fe86 100644
--- a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
+++ b/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
@@ -1,7 +1,7 @@
 From: Paul Gortmaker <paul.gortmaker at windriver.com>
 Date: Sat, 14 Feb 2015 11:01:16 -0500
 Subject: sas-ata/isci: dont't disable interrupts in qc_issue handler
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On 3.14-rt we see the following trace on Canoe Pass for
 SCSI_ISCI "Intel(R) C600 Series Chipset SAS Controller"
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	spin_unlock(ap->lock);
  
  	/* If the device fell off, no sense in issuing commands */
-@@ -255,7 +255,7 @@ static unsigned int sas_ata_qc_issue(str
+@@ -252,7 +252,7 @@ static unsigned int sas_ata_qc_issue(str
  
   out:
  	spin_lock(ap->lock);
diff --git a/debian/patches/features/all/rt/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch b/debian/patches/features/all/rt/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
index dbb3643..1aa7a3e 100644
--- a/debian/patches/features/all/rt/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
+++ b/debian/patches/features/all/rt/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
@@ -1,7 +1,7 @@
 Subject: sc16is7xx: Drop bogus use of IRQF_ONESHOT
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 18 Feb 2016 11:26:12 -0600
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The use of IRQF_ONESHOT when registering an interrupt handler with
 request_irq() is non-sensical.
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/tty/serial/sc16is7xx.c
 +++ b/drivers/tty/serial/sc16is7xx.c
-@@ -1251,7 +1251,7 @@ static int sc16is7xx_probe(struct device
+@@ -1240,7 +1240,7 @@ static int sc16is7xx_probe(struct device
  
  	/* Setup interrupt */
  	ret = devm_request_irq(dev, irq, sc16is7xx_irq,
diff --git a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
index 7cbd396..b656171 100644
--- a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ b/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Juri Lelli <juri.lelli at gmail.com>
 Date: Tue, 13 May 2014 15:30:20 +0200
 Subject: sched/deadline: dl_task_timer has to be irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 As for rt_period_timer, dl_task_timer has to be irqsafe.
 
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/sched/deadline.c
 +++ b/kernel/sched/deadline.c
-@@ -694,6 +694,7 @@ void init_dl_task_timer(struct sched_dl_
+@@ -697,6 +697,7 @@ void init_dl_task_timer(struct sched_dl_
  
  	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	timer->function = dl_task_timer;
diff --git a/debian/patches/features/all/rt/sched-delay-put-task.patch b/debian/patches/features/all/rt/sched-delay-put-task.patch
index 44f22fc..5750520 100644
--- a/debian/patches/features/all/rt/sched-delay-put-task.patch
+++ b/debian/patches/features/all/rt/sched-delay-put-task.patch
@@ -1,7 +1,7 @@
 Subject: sched: Move task_struct cleanup to RCU
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 31 May 2011 16:59:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 __put_task_struct() does quite some expensive work. We don't want to
 burden random tasks with that.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1865,6 +1865,9 @@ struct task_struct {
+@@ -1936,6 +1936,9 @@ struct task_struct {
  	unsigned int	sequential_io;
  	unsigned int	sequential_io_avg;
  #endif
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  	unsigned long	task_state_change;
  #endif
-@@ -2077,6 +2080,15 @@ extern struct pid *cad_pid;
+@@ -2174,6 +2177,15 @@ extern struct pid *cad_pid;
  extern void free_task(struct task_struct *tsk);
  #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
  
@@ -40,17 +40,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  extern void __put_task_struct(struct task_struct *t);
  
  static inline void put_task_struct(struct task_struct *t)
-@@ -2084,6 +2096,7 @@ static inline void put_task_struct(struc
+@@ -2181,6 +2193,7 @@ static inline void put_task_struct(struc
  	if (atomic_dec_and_test(&t->usage))
  		__put_task_struct(t);
  }
 +#endif
  
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- extern void task_cputime(struct task_struct *t,
+ struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+ struct task_struct *try_get_task_struct(struct task_struct **ptask);
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -253,7 +253,9 @@ static inline void put_signal_struct(str
+@@ -251,7 +251,9 @@ static inline void put_signal_struct(str
  	if (atomic_dec_and_test(&sig->sigcnt))
  		free_signal_struct(sig);
  }
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void __put_task_struct(struct task_struct *tsk)
  {
  	WARN_ON(!tsk->exit_state);
-@@ -270,7 +272,18 @@ void __put_task_struct(struct task_struc
+@@ -268,7 +270,18 @@ void __put_task_struct(struct task_struc
  	if (!profile_handoff_task(tsk))
  		free_task(tsk);
  }
diff --git a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
index 2a96598..c0de8d6 100644
--- a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
+++ b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 17:03:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Carsten reported problems when running:
 
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -1029,6 +1029,7 @@ config CFS_BANDWIDTH
+@@ -1054,6 +1054,7 @@ config CFS_BANDWIDTH
  config RT_GROUP_SCHED
  	bool "Group scheduling for SCHED_RR/FIFO"
  	depends on CGROUP_SCHED
diff --git a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
index 842f594..cf9ce10 100644
--- a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
+++ b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
@@ -1,7 +1,7 @@
 Subject: sched: Disable TTWU_QUEUE on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 13 Sep 2011 16:42:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The queued remote wakeup mechanism can introduce rather large
 latencies if the number of migrated tasks is high. Disable it for RT.
diff --git a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
index ffb876c..9004f80 100644
--- a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
+++ b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
@@ -1,7 +1,7 @@
 Subject: sched: Limit the number of task migrations per batch
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 06 Jun 2011 12:12:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Put an upper limit on the number of tasks which are migrated per batch
 to avoid large latencies.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -128,7 +128,11 @@ const_debug unsigned int sysctl_sched_fe
+@@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_fe
   * Number of tasks to iterate in a single balance run.
   * Limited because this is done with IRQs disabled.
   */
diff --git a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
index 6d761ff..1163a45 100644
--- a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -1,7 +1,7 @@
 Subject: sched: Do not account rcu_preempt_depth on RT in might_sleep()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 07 Jun 2011 09:19:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 RT changes the rcu_preempt_depth semantics, so we cannot check for it
 in might_sleep().
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/rcupdate.h
 +++ b/include/linux/rcupdate.h
-@@ -300,6 +300,11 @@ void synchronize_rcu(void);
+@@ -301,6 +301,11 @@ void synchronize_rcu(void);
   * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
   */
  #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #else /* #ifdef CONFIG_PREEMPT_RCU */
  
-@@ -325,6 +330,8 @@ static inline int rcu_preempt_depth(void
+@@ -326,6 +331,8 @@ static inline int rcu_preempt_depth(void
  	return 0;
  }
  
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Internal to kernel */
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -7534,7 +7534,7 @@ void __init sched_init(void)
+@@ -7697,7 +7697,7 @@ void __init sched_init(void)
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  static inline int preempt_count_equals(int preempt_offset)
  {
diff --git a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
index 1db54a7..e3729ce 100644
--- a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
+++ b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
@@ -1,7 +1,7 @@
 Subject: sched: Move mmdrop to RCU on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 06 Jun 2011 12:20:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Takes sleeping locks and calls into the memory allocator, so nothing
 we want to do in task switch and oder atomic contexts.
@@ -9,9 +9,9 @@ we want to do in task switch and oder atomic contexts.
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  include/linux/mm_types.h |    4 ++++
- include/linux/sched.h    |   12 ++++++++++++
+ include/linux/sched.h    |   11 +++++++++++
  kernel/fork.c            |   13 +++++++++++++
- kernel/sched/core.c      |   18 ++++++++++++++++--
+ kernel/sched/core.c      |   19 +++++++++++++++++--
  4 files changed, 45 insertions(+), 2 deletions(-)
 
 --- a/include/linux/mm_types.h
@@ -22,9 +22,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #include <linux/uprobes.h>
 +#include <linux/rcupdate.h>
  #include <linux/page-flags-layout.h>
+ #include <linux/workqueue.h>
  #include <asm/page.h>
- #include <asm/mmu.h>
-@@ -502,6 +503,9 @@ struct mm_struct {
+@@ -508,6 +509,9 @@ struct mm_struct {
  	bool tlb_flush_pending;
  #endif
  	struct uprobes_state uprobes_state;
@@ -36,14 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	void __user *bd_addr;
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -2640,12 +2640,24 @@ extern struct mm_struct * mm_alloc(void)
- 
- /* mmdrop drops the mm and the page tables */
- extern void __mmdrop(struct mm_struct *);
-+
- static inline void mmdrop(struct mm_struct * mm)
- {
- 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+@@ -2857,6 +2857,17 @@ static inline void mmdrop(struct mm_stru
  		__mmdrop(mm);
  }
  
@@ -58,12 +51,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +# define mmdrop_delayed(mm)	mmdrop(mm)
 +#endif
 +
- /* mmput gets rid of the mappings and all user-space */
- extern void mmput(struct mm_struct *);
- /* Grab a reference to a task's mm, if it is not already going away */
+ static inline bool mmget_not_zero(struct mm_struct *mm)
+ {
+ 	return atomic_inc_not_zero(&mm->mm_users);
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -712,6 +712,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -715,6 +715,19 @@ void __mmdrop(struct mm_struct *mm)
  }
  EXPORT_SYMBOL_GPL(__mmdrop);
  
@@ -80,12 +73,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +}
 +#endif
 +
- /*
-  * Decrement the use count and release all resources for an mm.
-  */
+ static inline void __mmput(struct mm_struct *mm)
+ {
+ 	VM_BUG_ON(atomic_read(&mm->mm_users));
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -2642,8 +2642,12 @@ static struct rq *finish_task_switch(str
+@@ -2776,8 +2776,12 @@ static struct rq *finish_task_switch(str
  	finish_arch_post_lock_switch();
  
  	fire_sched_in_preempt_notifiers(current);
@@ -99,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (unlikely(prev_state == TASK_DEAD)) {
  		if (prev->sched_class->task_dead)
  			prev->sched_class->task_dead(prev);
-@@ -5299,6 +5303,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5513,6 +5517,8 @@ void sched_setnuma(struct task_struct *p
  #endif /* CONFIG_NUMA_BALANCING */
  
  #ifdef CONFIG_HOTPLUG_CPU
@@ -108,8 +101,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Ensures that the idle task is using init_mm right before its cpu goes
   * offline.
-@@ -5313,7 +5319,11 @@ void idle_task_exit(void)
- 		switch_mm(mm, &init_mm, current);
+@@ -5527,7 +5533,12 @@ void idle_task_exit(void)
+ 		switch_mm_irqs_off(mm, &init_mm, current);
  		finish_arch_post_lock_switch();
  	}
 -	mmdrop(mm);
@@ -118,17 +111,18 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	 * call mmdrop() nor mmdrop_delayed() from here.
 +	 */
 +	per_cpu(idle_last_mm, smp_processor_id()) = mm;
++
  }
  
  /*
-@@ -5509,6 +5519,10 @@ migration_call(struct notifier_block *nf
- 
- 	case CPU_DEAD:
- 		calc_load_migrate(rq);
-+		if (per_cpu(idle_last_mm, cpu)) {
-+			mmdrop(per_cpu(idle_last_mm, cpu));
-+			per_cpu(idle_last_mm, cpu) = NULL;
-+		}
- 		break;
+@@ -7402,6 +7413,10 @@ int sched_cpu_dying(unsigned int cpu)
+ 	update_max_interval();
+ 	nohz_balance_exit_idle(cpu);
+ 	hrtick_clear(rq);
++	if (per_cpu(idle_last_mm, cpu)) {
++		mmdrop(per_cpu(idle_last_mm, cpu));
++		per_cpu(idle_last_mm, cpu) = NULL;
++	}
+ 	return 0;
+ }
  #endif
- 	}
diff --git a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
index 43054ed..077a126 100644
--- a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
+++ b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
@@ -1,7 +1,7 @@
 Subject: sched: Add saved_state for tasks blocked on sleeping locks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 25 Jun 2011 09:21:04 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Spinlocks are state preserving in !RT. RT changes the state when a
 task gets blocked on a lock. So we need to remember the state before
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1393,6 +1393,7 @@ struct tlbflush_unmap_batch {
+@@ -1459,6 +1459,7 @@ struct tlbflush_unmap_batch {
  
  struct task_struct {
  	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	void *stack;
  	atomic_t usage;
  	unsigned int flags;	/* per process flags, defined below */
-@@ -2517,6 +2518,7 @@ extern void xtime_update(unsigned long t
+@@ -2649,6 +2650,7 @@ extern void xtime_update(unsigned long t
  
  extern int wake_up_state(struct task_struct *tsk, unsigned int state);
  extern int wake_up_process(struct task_struct *tsk);
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   extern void kick_process(struct task_struct *tsk);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1931,8 +1931,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2023,8 +2023,25 @@ try_to_wake_up(struct task_struct *p, un
  	 */
  	smp_mb__before_spinlock();
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	trace_sched_waking(p);
  
-@@ -2061,6 +2078,18 @@ int wake_up_process(struct task_struct *
+@@ -2172,6 +2189,18 @@ int wake_up_process(struct task_struct *
  }
  EXPORT_SYMBOL(wake_up_process);
  
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return try_to_wake_up(p, state, 0);
 --- a/kernel/sched/sched.h
 +++ b/kernel/sched/sched.h
-@@ -1128,6 +1128,7 @@ static inline void finish_lock_switch(st
+@@ -1138,6 +1138,7 @@ static inline void finish_lock_switch(st
  #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
  #define WF_FORK		0x02		/* child wakeup after fork */
  #define WF_MIGRATED	0x4		/* internal use, task got migrated */
diff --git a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
index 49df8e1..7b8e4d4 100644
--- a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
@@ -1,7 +1,7 @@
 Subject: sched: ttwu: Return success when only changing the saved_state value
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 13 Dec 2011 21:42:19 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 When a task blocks on a rt lock, it saves the current state in
 p->saved_state, so a lock related wake up will not destroy the
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1938,8 +1938,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2030,8 +2030,10 @@ try_to_wake_up(struct task_struct *p, un
  		 * if the wakeup condition is true.
  		 */
  		if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index 38ed6bd..51e5244 100644
--- a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Mon, 18 Mar 2013 15:12:49 -0400
 Subject: sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 In -rt, most spin_locks() turn into mutexes. One of these spin_lock
 conversions is performed on the workqueue gcwq->lock. When the idle
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3305,8 +3305,10 @@ static void __sched notrace __schedule(b
+@@ -3482,8 +3482,10 @@ static void __sched notrace __schedule(b
  			 * If a worker went to sleep, notify and ask workqueue
  			 * whether it wants to wake up a task to maintain
  			 * concurrency.
diff --git a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
index 7402b7a..136b2f7 100644
--- a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
+++ b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
@@ -1,39 +1,21 @@
 Subject: scsi/fcoe: Make RT aware.
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 12 Nov 2011 14:00:48 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Do not disable preemption while taking sleeping locks. All user look safe
 for migrate_diable() only.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- drivers/scsi/fcoe/fcoe.c      |   18 +++++++++---------
+ drivers/scsi/fcoe/fcoe.c      |   16 ++++++++--------
  drivers/scsi/fcoe/fcoe_ctlr.c |    4 ++--
  drivers/scsi/libfc/fc_exch.c  |    4 ++--
- 3 files changed, 13 insertions(+), 13 deletions(-)
+ 3 files changed, 12 insertions(+), 12 deletions(-)
 
 --- a/drivers/scsi/fcoe/fcoe.c
 +++ b/drivers/scsi/fcoe/fcoe.c
-@@ -1286,7 +1286,7 @@ static void fcoe_percpu_thread_destroy(u
- 	struct sk_buff *skb;
- #ifdef CONFIG_SMP
- 	struct fcoe_percpu_s *p0;
--	unsigned targ_cpu = get_cpu();
-+	unsigned targ_cpu = get_cpu_light();
- #endif /* CONFIG_SMP */
- 
- 	FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
-@@ -1342,7 +1342,7 @@ static void fcoe_percpu_thread_destroy(u
- 			kfree_skb(skb);
- 		spin_unlock_bh(&p->fcoe_rx_list.lock);
- 	}
--	put_cpu();
-+	put_cpu_light();
- #else
- 	/*
- 	 * This a non-SMP scenario where the singular Rx thread is
-@@ -1566,11 +1566,11 @@ static int fcoe_rcv(struct sk_buff *skb,
+@@ -1455,11 +1455,11 @@ static int fcoe_rcv(struct sk_buff *skb,
  static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
  {
  	struct fcoe_percpu_s *fps;
@@ -48,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return rc;
  }
-@@ -1766,11 +1766,11 @@ static inline int fcoe_filter_frames(str
+@@ -1646,11 +1646,11 @@ static inline int fcoe_filter_frames(str
  		return 0;
  	}
  
@@ -62,7 +44,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return -EINVAL;
  }
  
-@@ -1846,13 +1846,13 @@ static void fcoe_recv_frame(struct sk_bu
+@@ -1693,7 +1693,7 @@ static void fcoe_recv_frame(struct sk_bu
+ 	 */
+ 	hp = (struct fcoe_hdr *) skb_network_header(skb);
+ 
+-	stats = per_cpu_ptr(lport->stats, get_cpu());
++	stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ 	if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ 		if (stats->ErrorFrames < 5)
+ 			printk(KERN_WARNING "fcoe: FCoE version "
+@@ -1725,13 +1725,13 @@ static void fcoe_recv_frame(struct sk_bu
  		goto drop;
  
  	if (!fcoe_filter_frames(lport, fp)) {
@@ -80,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
-@@ -831,7 +831,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -834,7 +834,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
  
  	INIT_LIST_HEAD(&del_list);
  
@@ -89,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
  		deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
-@@ -867,7 +867,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -870,7 +870,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
  				sel_time = fcf->time;
  		}
  	}
diff --git a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
index 20c9ff5..654d9ee 100644
--- a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
+++ b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
@@ -1,7 +1,7 @@
 Subject: scsi: qla2xxx: Use local_irq_save_nort() in qla2x00_poll
 From: John Kacur <jkacur at redhat.com>
 Date: Fri, 27 Apr 2012 12:48:46 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 RT triggers the following:
 
diff --git a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
index 2e9f601..8d1e6f1 100644
--- a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
+++ b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
@@ -1,7 +1,7 @@
 Subject: seqlock: Prevent rt starvation
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 22 Feb 2012 12:03:30 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 If a low prio writer gets preempted while holding the seqlock write
 locked, a high prio reader spins forever on RT.
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * raw_write_seqcount_barrier - do a seq write barrier
   * @s: pointer to seqcount_t
-@@ -425,10 +435,32 @@ typedef struct {
+@@ -428,10 +438,32 @@ typedef struct {
  /*
   * Read side functions for starting and finalizing a read side section.
   */
@@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
  {
-@@ -443,36 +475,36 @@ static inline unsigned read_seqretry(con
+@@ -446,36 +478,36 @@ static inline unsigned read_seqretry(con
  static inline void write_seqlock(seqlock_t *sl)
  {
  	spin_lock(&sl->lock);
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	spin_unlock_irq(&sl->lock);
  }
  
-@@ -481,7 +513,7 @@ static inline unsigned long __write_seql
+@@ -484,7 +516,7 @@ static inline unsigned long __write_seql
  	unsigned long flags;
  
  	spin_lock_irqsave(&sl->lock, flags);
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return flags;
  }
  
-@@ -491,7 +523,7 @@ static inline unsigned long __write_seql
+@@ -494,7 +526,7 @@ static inline unsigned long __write_seql
  static inline void
  write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
  {
@@ -160,7 +160,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/include/net/dst.h
 +++ b/include/net/dst.h
-@@ -449,7 +449,7 @@ static inline void dst_confirm(struct ds
+@@ -446,7 +446,7 @@ static inline void dst_confirm(struct ds
  static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
  				   struct sk_buff *skb)
  {
diff --git a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
index d378066..475eab9 100644
--- a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
+++ b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
@@ -1,7 +1,7 @@
 Subject: signal: Make __lock_task_sighand() RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 22 Jul 2011 08:07:08 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 local_irq_save() + spin_lock(&sighand->siglock) does not work on
 -RT. Use the nort variants.
diff --git a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
index b7cf8aa..177d54d 100644
--- a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
+++ b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
@@ -1,7 +1,7 @@
 Subject: signal: Revert ptrace preempt magic
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 21 Sep 2011 19:57:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
 than a bandaid around the ptrace design trainwreck. It's not a
diff --git a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 3a05125..d1133ea 100644
--- a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:44:56 -0500
 Subject: signals: Allow rt tasks to cache one sigqueue struct
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 To avoid allocation allow rt tasks to cache one sigqueue struct in
 task struct.
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1589,6 +1589,7 @@ struct task_struct {
+@@ -1659,6 +1659,7 @@ struct task_struct {
  /* signal handlers */
  	struct signal_struct *signal;
  	struct sighand_struct *sighand;
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -1352,6 +1352,7 @@ static struct task_struct *copy_process(
+@@ -1399,6 +1399,7 @@ static struct task_struct *copy_process(
  	spin_lock_init(&p->alloc_lock);
  
  	init_sigpending(&p->pending);
diff --git a/debian/patches/features/all/rt/skbufhead-raw-lock.patch b/debian/patches/features/all/rt/skbufhead-raw-lock.patch
index d10574f..538aa0b 100644
--- a/debian/patches/features/all/rt/skbufhead-raw-lock.patch
+++ b/debian/patches/features/all/rt/skbufhead-raw-lock.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 12 Jul 2011 15:38:34 +0200
 Subject: net: Use skbufhead with raw lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use the rps lock as rawlock so we can keep irq-off regions. It looks low
 latency. However we can't kfree() from this context therefore we defer this
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
-@@ -2776,6 +2776,7 @@ struct softnet_data {
+@@ -2794,6 +2794,7 @@ struct softnet_data {
  	unsigned int		dropped;
  	struct sk_buff_head	input_pkt_queue;
  	struct napi_struct	backlog;
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/include/linux/skbuff.h
 +++ b/include/linux/skbuff.h
-@@ -283,6 +283,7 @@ struct sk_buff_head {
+@@ -284,6 +284,7 @@ struct sk_buff_head {
  
  	__u32		qlen;
  	spinlock_t	lock;
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  };
  
  struct sk_buff;
-@@ -1537,6 +1538,12 @@ static inline void skb_queue_head_init(s
+@@ -1565,6 +1566,12 @@ static inline void skb_queue_head_init(s
  	__skb_queue_head_init(list);
  }
  
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -209,14 +209,14 @@ static inline struct hlist_head *dev_ind
+@@ -211,14 +211,14 @@ static inline struct hlist_head *dev_ind
  static inline void rps_lock(struct softnet_data *sd)
  {
  #ifdef CONFIG_RPS
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  }
  
-@@ -4265,7 +4265,7 @@ static void flush_backlog(void *arg)
+@@ -4322,7 +4322,7 @@ static void flush_backlog(void *arg)
  	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  		if (skb->dev == dev) {
  			__skb_unlink(skb, &sd->input_pkt_queue);
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			input_queue_head_incr(sd);
  		}
  	}
-@@ -4274,10 +4274,13 @@ static void flush_backlog(void *arg)
+@@ -4331,10 +4331,13 @@ static void flush_backlog(void *arg)
  	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  		if (skb->dev == dev) {
  			__skb_unlink(skb, &sd->process_queue);
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static int napi_gro_complete(struct sk_buff *skb)
-@@ -7790,6 +7793,9 @@ static int dev_cpu_callback(struct notif
+@@ -7992,6 +7995,9 @@ static int dev_cpu_callback(struct notif
  		netif_rx_ni(skb);
  		input_queue_head_incr(oldsd);
  	}
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return NOTIFY_OK;
  }
-@@ -8091,8 +8097,9 @@ static int __init net_dev_init(void)
+@@ -8293,8 +8299,9 @@ static int __init net_dev_init(void)
  	for_each_possible_cpu(i) {
  		struct softnet_data *sd = &per_cpu(softnet_data, i);
  
diff --git a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch b/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
index 304eee3..342c005 100644
--- a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 15 Apr 2015 19:00:47 +0200
 Subject: slub: Disable SLUB_CPU_PARTIAL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
 |in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -1746,7 +1746,7 @@ endchoice
+@@ -1801,7 +1801,7 @@ config SLAB_FREELIST_RANDOM
  
  config SLUB_CPU_PARTIAL
  	default y
diff --git a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
index 2251ddf..924d659 100644
--- a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
+++ b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
@@ -1,7 +1,7 @@
 Subject: slub: Enable irqs for __GFP_WAIT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 09 Jan 2013 12:08:15 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 SYSTEM_RUNNING might be too late for enabling interrupts. Allocations
 with GFP_WAIT can happen before that. So use this as an indicator.
@@ -13,10 +13,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/mm/slub.c
 +++ b/mm/slub.c
-@@ -1418,14 +1418,17 @@ static struct page *allocate_slab(struct
- 	gfp_t alloc_gfp;
+@@ -1533,14 +1533,17 @@ static struct page *allocate_slab(struct
  	void *start, *p;
  	int idx, order;
+ 	bool shuffle;
 +	bool enableirqs = false;
  
  	flags &= gfp_allowed_mask;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		local_irq_enable();
  
  	flags |= s->allocflags;
-@@ -1496,11 +1499,7 @@ static struct page *allocate_slab(struct
+@@ -1615,11 +1618,7 @@ static struct page *allocate_slab(struct
  	page->frozen = 1;
  
  out:
diff --git a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch b/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
index 25fcd21..531ae64 100644
--- a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
+++ b/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Wed, 18 Feb 2015 15:09:23 +0100
 Subject: snd/pcm: fix snd_pcm_stream_lock*() irqs_disabled() splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Locking functions previously using read_lock_irq()/read_lock_irqsave() were
 changed to local_irq_disable/save(), leading to gripes.  Use nort variants.
diff --git a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
index 1a61f55..73d42bf 100644
--- a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
+++ b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: softirq: Disable softirq stacks for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 13:59:17 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Disable extra stacks for softirqs. We want to preempt softirqs and
 having them on special IRQ-stack does not make this easier.
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/powerpc/kernel/irq.c
 +++ b/arch/powerpc/kernel/irq.c
-@@ -614,6 +614,7 @@ void irq_ctx_init(void)
+@@ -633,6 +633,7 @@ void irq_ctx_init(void)
  	}
  }
  
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void do_softirq_own_stack(void)
  {
  	struct thread_info *curtp, *irqtp;
-@@ -631,6 +632,7 @@ void do_softirq_own_stack(void)
+@@ -650,6 +651,7 @@ void do_softirq_own_stack(void)
  	if (irqtp->flags)
  		set_bits(irqtp->flags, &curtp->flags);
  }
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void fixup_irqs(void)
 --- a/arch/x86/entry/entry_64.S
 +++ b/arch/x86/entry/entry_64.S
-@@ -799,6 +799,7 @@ END(native_load_gs_index)
+@@ -817,6 +817,7 @@ END(native_load_gs_index)
  	jmp	2b
  	.previous
  
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Call softirq on interrupt stack. Interrupts are off. */
  ENTRY(do_softirq_own_stack)
  	pushq	%rbp
-@@ -811,6 +812,7 @@ ENTRY(do_softirq_own_stack)
+@@ -829,6 +830,7 @@ ENTRY(do_softirq_own_stack)
  	decl	PER_CPU_VAR(irq_count)
  	ret
  END(do_softirq_own_stack)
@@ -128,15 +128,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
 --- a/arch/x86/kernel/irq_32.c
 +++ b/arch/x86/kernel/irq_32.c
-@@ -128,6 +128,7 @@ void irq_ctx_init(int cpu)
+@@ -127,6 +127,7 @@ void irq_ctx_init(int cpu)
  	       cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
  }
  
 +#ifndef CONFIG_PREEMPT_RT_FULL
  void do_softirq_own_stack(void)
  {
- 	struct thread_info *curstk;
-@@ -146,6 +147,7 @@ void do_softirq_own_stack(void)
+ 	struct irq_stack *irqstk;
+@@ -143,6 +144,7 @@ void do_softirq_own_stack(void)
  
  	call_on_stack(__do_softirq, isp);
  }
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -458,7 +458,7 @@ struct softirq_action
+@@ -464,7 +464,7 @@ struct softirq_action
  asmlinkage void do_softirq(void);
  asmlinkage void __do_softirq(void);
  
diff --git a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
index b1373ee..8fcaafc 100644
--- a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
+++ b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
@@ -1,7 +1,7 @@
 Subject: softirq: Check preemption after reenabling interrupts
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 raise_softirq_irqoff() disables interrupts and wakes the softirq
 daemon, but after reenabling interrupts there is no preemption check,
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return NOTIFY_OK;
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -2264,6 +2264,7 @@ static inline void __netif_reschedule(st
+@@ -2268,6 +2268,7 @@ static void __netif_reschedule(struct Qd
  	sd->output_queue_tailp = &q->next_sched;
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  void __netif_schedule(struct Qdisc *q)
-@@ -2345,6 +2346,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2349,6 +2350,7 @@ void __dev_kfree_skb_irq(struct sk_buff
  	__this_cpu_write(softnet_data.completion_queue, skb);
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
-@@ -3730,6 +3732,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3777,6 +3779,7 @@ static int enqueue_to_backlog(struct sk_
  	rps_unlock(sd);
  
  	local_irq_restore(flags);
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	atomic_long_inc(&skb->dev->rx_dropped);
  	kfree_skb(skb);
-@@ -4735,6 +4738,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4795,6 +4798,7 @@ static void net_rps_action_and_irq_enabl
  		sd->rps_ipi_list = NULL;
  
  		local_irq_enable();
@@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		/* Send pending IPI's to kick RPS processing on remote cpus. */
  		while (remsd) {
-@@ -4748,6 +4752,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4808,6 +4812,7 @@ static void net_rps_action_and_irq_enabl
  	} else
  #endif
  		local_irq_enable();
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4829,6 +4834,7 @@ void __napi_schedule(struct napi_struct
+@@ -4889,6 +4894,7 @@ void __napi_schedule(struct napi_struct
  	local_irq_save(flags);
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  	local_irq_restore(flags);
@@ -160,7 +160,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(__napi_schedule);
  
-@@ -7775,6 +7781,7 @@ static int dev_cpu_callback(struct notif
+@@ -7977,6 +7983,7 @@ static int dev_cpu_callback(struct notif
  
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_enable();
diff --git a/debian/patches/features/all/rt/softirq-split-locks.patch b/debian/patches/features/all/rt/softirq-split-locks.patch
index c6a9374..5325fcf 100644
--- a/debian/patches/features/all/rt/softirq-split-locks.patch
+++ b/debian/patches/features/all/rt/softirq-split-locks.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 04 Oct 2012 14:20:47 +0100
 Subject: softirq: Split softirq locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The 3.x RT series removed the split softirq implementation in favour
 of pushing softirq processing into the context of the thread which
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif /* _LINUX_BH_H */
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -455,10 +455,11 @@ struct softirq_action
+@@ -461,10 +461,11 @@ struct softirq_action
  	void	(*action)(struct softirq_action *);
  };
  
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void do_softirq_own_stack(void);
  #else
  static inline void do_softirq_own_stack(void)
-@@ -466,6 +467,9 @@ static inline void do_softirq_own_stack(
+@@ -472,6 +473,9 @@ static inline void do_softirq_own_stack(
  	__do_softirq();
  }
  #endif
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  extern void softirq_init(void);
-@@ -473,6 +477,7 @@ extern void __raise_softirq_irqoff(unsig
+@@ -479,6 +483,7 @@ extern void __raise_softirq_irqoff(unsig
  
  extern void raise_softirq_irqoff(unsigned int nr);
  extern void raise_softirq(unsigned int nr);
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
  
-@@ -630,6 +635,12 @@ void tasklet_hrtimer_cancel(struct taskl
+@@ -636,6 +641,12 @@ void tasklet_hrtimer_cancel(struct taskl
  	tasklet_kill(&ttimer->tasklet);
  }
  
@@ -173,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * Are we in NMI context?
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1868,6 +1868,8 @@ struct task_struct {
+@@ -1939,6 +1939,8 @@ struct task_struct {
  #endif
  #ifdef CONFIG_PREEMPT_RT_BASE
  	struct rcu_head put_rcu;
@@ -182,7 +182,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  	unsigned long	task_state_change;
-@@ -2136,6 +2138,7 @@ extern void thread_group_cputime_adjuste
+@@ -2236,6 +2238,7 @@ extern void thread_group_cputime_adjuste
  /*
   * Per process flags
   */
@@ -786,7 +786,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	.thread_comm		= "ksoftirqd/%u",
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -866,14 +866,7 @@ static bool can_stop_idle_tick(int cpu,
+@@ -879,14 +879,7 @@ static bool can_stop_idle_tick(int cpu,
  		return false;
  
  	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -804,7 +804,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3801,11 +3801,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3848,11 +3848,9 @@ int netif_rx_ni(struct sk_buff *skb)
  
  	trace_netif_rx_ni_entry(skb);
  
diff --git a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
index 8a95606..93efda0 100644
--- a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
+++ b/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 20 Jan 2016 16:34:17 +0100
 Subject: softirq: split timer softirqs out of ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with
 timer wakeup which can not happen in hardirq context. The prio has been
diff --git a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
index 76f0b5a..9f57519 100644
--- a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
+++ b/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -1,7 +1,7 @@
 From: Allen Pais <allen.pais at oracle.com>
 Date: Fri, 13 Dec 2013 09:44:41 +0530
 Subject: sparc64: use generic rwsem spinlocks rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Signed-off-by: Allen Pais <allen.pais at oracle.com>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/sparc/Kconfig
 +++ b/arch/sparc/Kconfig
-@@ -184,12 +184,10 @@ config NR_CPUS
+@@ -187,12 +187,10 @@ config NR_CPUS
  source kernel/Kconfig.hz
  
  config RWSEM_GENERIC_SPINLOCK
diff --git a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
index b9b3380..fa4440f 100644
--- a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
+++ b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
@@ -1,7 +1,7 @@
 Subject: spinlock: Split the lock types header
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 19:34:01 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Split raw_spinlock into its own file and the remaining spinlock_t into
 its own non-RT header. The non-RT header will be replaced later by sleeping
diff --git a/debian/patches/features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch b/debian/patches/features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch
index a7d7b5b..b3a45c3 100644
--- a/debian/patches/features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch
+++ b/debian/patches/features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Fri, 2 May 2014 13:13:22 +0200
 Subject: stomp-machine: create lg_global_trylock_relax() primitive
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Create lg_global_trylock_relax() for use by stopper thread when it cannot
 schedule, to deal with stop_cpus_lock, which is now an lglock.
@@ -32,14 +32,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #define lglock spinlock
 --- a/include/linux/spinlock_rt.h
 +++ b/include/linux/spinlock_rt.h
-@@ -34,6 +34,7 @@ extern int atomic_dec_and_spin_lock(atom
-  */
+@@ -40,6 +40,7 @@ extern int atomic_dec_and_spin_lock(atom
+ extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
  extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
  extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
 +extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
  
- #define spin_lock(lock)				\
- 	do {					\
+ #define spin_lock(lock)			rt_spin_lock(lock)
+ 
 --- a/kernel/locking/lglock.c
 +++ b/kernel/locking/lglock.c
 @@ -127,3 +127,28 @@ void lg_global_unlock(struct lglock *lg)
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +#endif
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1151,6 +1151,11 @@ void __lockfunc rt_spin_unlock_wait(spin
+@@ -1215,6 +1215,11 @@ void __lockfunc rt_spin_unlock_wait(spin
  }
  EXPORT_SYMBOL(rt_spin_unlock_wait);
  
@@ -82,6 +82,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +	return rt_mutex_trylock(lock);
 +}
 +
- int __lockfunc rt_spin_trylock(spinlock_t *lock)
+ int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
  {
- 	int ret = rt_mutex_trylock(&lock->lock);
+ 	int ret;
diff --git a/debian/patches/features/all/rt/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch b/debian/patches/features/all/rt/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
index 4513522..c86da7f 100644
--- a/debian/patches/features/all/rt/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
+++ b/debian/patches/features/all/rt/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Fri, 2 May 2014 13:13:34 +0200
 Subject: stomp-machine: use lg_global_trylock_relax() to dead with stop_cpus_lock lglock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 If the stop machinery is called from inactive CPU we cannot use
 lg_global_lock(), because some other stomp machine invocation might be
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/stop_machine.c
 +++ b/kernel/stop_machine.c
-@@ -313,18 +313,21 @@ static DEFINE_MUTEX(stop_cpus_mutex);
+@@ -321,18 +321,21 @@ static DEFINE_MUTEX(stop_cpus_mutex);
  
  static bool queue_stop_cpus_work(const struct cpumask *cpumask,
  				 cpu_stop_fn_t fn, void *arg,
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	for_each_cpu(cpu, cpumask) {
  		work = &per_cpu(cpu_stopper.stop_work, cpu);
  		work->fn = fn;
-@@ -344,7 +347,7 @@ static int __stop_cpus(const struct cpum
+@@ -352,7 +355,7 @@ static int __stop_cpus(const struct cpum
  	struct cpu_stop_done done;
  
  	cpu_stop_init_done(&done, cpumask_weight(cpumask));
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		return -ENOENT;
  	wait_for_completion(&done.completion);
  	return done.ret;
-@@ -532,6 +535,8 @@ static int __init cpu_stop_init(void)
+@@ -540,6 +543,8 @@ static int __init cpu_stop_init(void)
  		INIT_LIST_HEAD(&stopper->works);
  	}
  
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
  	stop_machine_unpark(raw_smp_processor_id());
  	stop_machine_initialized = true;
-@@ -626,7 +631,7 @@ int stop_machine_from_inactive_cpu(cpu_s
+@@ -634,7 +639,7 @@ int stop_machine_from_inactive_cpu(cpu_s
  	set_state(&msdata, MULTI_STOP_PREPARE);
  	cpu_stop_init_done(&done, num_active_cpus());
  	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
diff --git a/debian/patches/features/all/rt/stop-machine-raw-lock.patch b/debian/patches/features/all/rt/stop-machine-raw-lock.patch
index 90440ed..a38ecd4 100644
--- a/debian/patches/features/all/rt/stop-machine-raw-lock.patch
+++ b/debian/patches/features/all/rt/stop-machine-raw-lock.patch
@@ -1,7 +1,7 @@
 Subject: stop_machine: Use raw spinlocks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 11:01:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use raw-locks in stomp_machine() to allow locking in irq-off regions.
 
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/stop_machine.c
 +++ b/kernel/stop_machine.c
-@@ -36,7 +36,7 @@ struct cpu_stop_done {
+@@ -37,7 +37,7 @@ struct cpu_stop_done {
  struct cpu_stopper {
  	struct task_struct	*thread;
  
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	bool			enabled;	/* is this stopper enabled? */
  	struct list_head	works;		/* list of pending works */
  
-@@ -82,14 +82,14 @@ static bool cpu_stop_queue_work(unsigned
+@@ -83,14 +83,14 @@ static bool cpu_stop_queue_work(unsigned
  	unsigned long flags;
  	bool enabled;
  
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return enabled;
  }
  
-@@ -224,8 +224,8 @@ static int cpu_stop_queue_two_works(int
+@@ -232,8 +232,8 @@ static int cpu_stop_queue_two_works(int
  	int err;
  
  	lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	err = -ENOENT;
  	if (!stopper1->enabled || !stopper2->enabled)
-@@ -235,8 +235,8 @@ static int cpu_stop_queue_two_works(int
+@@ -243,8 +243,8 @@ static int cpu_stop_queue_two_works(int
  	__cpu_stop_queue_work(stopper1, work1);
  	__cpu_stop_queue_work(stopper2, work2);
  unlock:
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
  
  	return err;
-@@ -425,9 +425,9 @@ static int cpu_stop_should_run(unsigned
+@@ -433,9 +433,9 @@ static int cpu_stop_should_run(unsigned
  	unsigned long flags;
  	int run;
  
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return run;
  }
  
-@@ -438,13 +438,13 @@ static void cpu_stopper_thread(unsigned
+@@ -446,13 +446,13 @@ static void cpu_stopper_thread(unsigned
  
  repeat:
  	work = NULL;
@@ -88,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (work) {
  		cpu_stop_fn_t fn = work->fn;
-@@ -528,7 +528,7 @@ static int __init cpu_stop_init(void)
+@@ -536,7 +536,7 @@ static int __init cpu_stop_init(void)
  	for_each_possible_cpu(cpu) {
  		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
  
diff --git a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
index 92c5cd0..6da0f92 100644
--- a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+++ b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:27 -0500
 Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Instead of playing with non-preemption, introduce explicit
 startup serialization. This is more robust and cleaner as
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/stop_machine.c
 +++ b/kernel/stop_machine.c
-@@ -452,6 +452,16 @@ static void cpu_stopper_thread(unsigned
+@@ -460,6 +460,16 @@ static void cpu_stopper_thread(unsigned
  		struct cpu_stop_done *done = work->done;
  		int ret;
  
diff --git a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
index 30d27f4..e8a1194 100644
--- a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+++ b/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Wed, 18 Feb 2015 16:05:28 +0100
 Subject: sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
 |in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/net/sunrpc/svc_xprt.c
 +++ b/net/sunrpc/svc_xprt.c
-@@ -342,7 +342,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -396,7 +396,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
  		goto out;
  	}
  
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
  
  	atomic_long_inc(&pool->sp_stats.packets);
-@@ -378,7 +378,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -432,7 +432,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
  
  		atomic_long_inc(&pool->sp_stats.threads_woken);
  		wake_up_process(rqstp->rq_task);
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		goto out;
  	}
  	rcu_read_unlock();
-@@ -399,7 +399,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -453,7 +453,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
  		goto redo_search;
  	}
  	rqstp = NULL;
diff --git a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
index 0908e68..81e621f 100644
--- a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
+++ b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 15 Jul 2010 10:29:00 +0200
 Subject: suspend: Prevent might sleep splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 timekeeping suspend/resume calls read_persistant_clock() which takes
 rtc_lock. That results in might sleep warnings because at that point
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/kernel.h
 +++ b/include/linux/kernel.h
-@@ -484,6 +484,7 @@ extern enum system_states {
+@@ -491,6 +491,7 @@ extern enum system_states {
  	SYSTEM_HALT,
  	SYSTEM_POWER_OFF,
  	SYSTEM_RESTART,
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define TAINT_PROPRIETARY_MODULE	0
 --- a/kernel/power/hibernate.c
 +++ b/kernel/power/hibernate.c
-@@ -285,6 +285,8 @@ static int create_image(int platform_mod
+@@ -286,6 +286,8 @@ static int create_image(int platform_mod
  
  	local_irq_disable();
  
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	error = syscore_suspend();
  	if (error) {
  		printk(KERN_ERR "PM: Some system devices failed to power down, "
-@@ -314,6 +316,7 @@ static int create_image(int platform_mod
+@@ -315,6 +317,7 @@ static int create_image(int platform_mod
  	syscore_resume();
  
   Enable_irqs:
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	local_irq_enable();
  
   Enable_cpus:
-@@ -438,6 +441,7 @@ static int resume_target_kernel(bool pla
+@@ -444,6 +447,7 @@ static int resume_target_kernel(bool pla
  		goto Enable_cpus;
  
  	local_irq_disable();
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	error = syscore_suspend();
  	if (error)
-@@ -471,6 +475,7 @@ static int resume_target_kernel(bool pla
+@@ -477,6 +481,7 @@ static int resume_target_kernel(bool pla
  	syscore_resume();
  
   Enable_irqs:
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	local_irq_enable();
  
   Enable_cpus:
-@@ -556,6 +561,7 @@ int hibernation_platform_enter(void)
+@@ -562,6 +567,7 @@ int hibernation_platform_enter(void)
  		goto Enable_cpus;
  
  	local_irq_disable();
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	syscore_suspend();
  	if (pm_wakeup_pending()) {
  		error = -EAGAIN;
-@@ -568,6 +574,7 @@ int hibernation_platform_enter(void)
+@@ -574,6 +580,7 @@ int hibernation_platform_enter(void)
  
   Power_up:
  	syscore_resume();
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   Enable_cpus:
 --- a/kernel/power/suspend.c
 +++ b/kernel/power/suspend.c
-@@ -359,6 +359,8 @@ static int suspend_enter(suspend_state_t
+@@ -361,6 +361,8 @@ static int suspend_enter(suspend_state_t
  	arch_suspend_disable_irqs();
  	BUG_ON(!irqs_disabled());
  
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	error = syscore_suspend();
  	if (!error) {
  		*wakeup = pm_wakeup_pending();
-@@ -375,6 +377,8 @@ static int suspend_enter(suspend_state_t
+@@ -377,6 +379,8 @@ static int suspend_enter(suspend_state_t
  		syscore_resume();
  	}
  
diff --git a/debian/patches/features/all/rt/sysfs-realtime-entry.patch b/debian/patches/features/all/rt/sysfs-realtime-entry.patch
index 7a061ba..e9f40ed 100644
--- a/debian/patches/features/all/rt/sysfs-realtime-entry.patch
+++ b/debian/patches/features/all/rt/sysfs-realtime-entry.patch
@@ -1,7 +1,7 @@
 Subject: sysfs: Add /sys/kernel/realtime entry
 From: Clark Williams <williams at redhat.com>
 Date: Sat Jul 30 21:55:53 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Add a /sys/kernel entry to indicate that the kernel is a
 realtime kernel.
diff --git a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index 7bfb528..f173f64 100644
--- a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -1,7 +1,7 @@
 Subject: tasklet: Prevent tasklets from going into infinite spin in RT
 From: Ingo Molnar <mingo at elte.hu>
 Date: Tue Nov 29 20:18:22 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
 and spinlocks turn are mutexes. But this can cause issues with
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -494,8 +494,9 @@ static inline struct task_struct *this_c
+@@ -500,8 +500,9 @@ static inline struct task_struct *this_c
       to be executed on some cpu at least once after this.
     * If the tasklet is already scheduled, but its execution is still not
       started, it will be executed only once.
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
     * Tasklet is strictly serialized wrt itself, but not
       wrt another tasklets. If client needs some intertask synchronization,
       he makes it with spinlocks.
-@@ -520,27 +521,36 @@ struct tasklet_struct name = { NULL, 0,
+@@ -526,27 +527,36 @@ struct tasklet_struct name = { NULL, 0,
  enum
  {
  	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define tasklet_unlock_wait(t) do { } while (0)
  #define tasklet_unlock(t) do { } while (0)
  #endif
-@@ -589,12 +599,7 @@ static inline void tasklet_disable(struc
+@@ -595,12 +605,7 @@ static inline void tasklet_disable(struc
  	smp_mb();
  }
  
diff --git a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
index 435f640..a6bc28d 100644
--- a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
+++ b/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
@@ -1,7 +1,7 @@
 From: Daniel Wagner <wagi at monom.org>
 Date: Tue, 17 Feb 2015 09:37:44 +0100
 Subject: thermal: Defer thermal wakups to threads
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will
 call schedule while we run in irq context.
diff --git a/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
index 3c63609..9eee0c4 100644
--- a/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
+++ b/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
@@ -1,7 +1,7 @@
 Subject: tick/broadcast: Make broadcast hrtimer irqsafe
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 27 Feb 2016 10:47:10 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Otherwise we end up with the following:
 
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/time/tick-broadcast-hrtimer.c
 +++ b/kernel/time/tick-broadcast-hrtimer.c
-@@ -106,5 +106,6 @@ void tick_setup_hrtimer_broadcast(void)
+@@ -107,5 +107,6 @@ void tick_setup_hrtimer_broadcast(void)
  {
  	hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  	bctimer.function = bc_handler;
diff --git a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
index cb8bc13..8b07fb0 100644
--- a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
+++ b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
@@ -1,7 +1,7 @@
 Subject: timekeeping: Split jiffies seqlock
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 14 Feb 2013 22:36:59 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so
 it can be taken in atomic context on RT.
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 @@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(kti
  		return;
  
- 	/* Reevalute with jiffies_lock held */
+ 	/* Reevaluate with jiffies_lock held */
 -	write_seqlock(&jiffies_lock);
 +	raw_spin_lock(&jiffies_lock);
 +	write_seqcount_begin(&jiffies_seq);
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (rcu_needs_cpu(basemono, &next_rcu) ||
 --- a/kernel/time/timekeeping.c
 +++ b/kernel/time/timekeeping.c
-@@ -2319,8 +2319,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2325,8 +2325,10 @@ EXPORT_SYMBOL(hardpps);
   */
  void xtime_update(unsigned long ticks)
  {
diff --git a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
index 98b2527..c39caa8 100644
--- a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+++ b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 21 Aug 2009 11:56:45 +0200
 Subject: timer: delay waking softirqs from the jiffy tick
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 People were complaining about broken balancing with the recent -rt
 series.
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -1456,13 +1456,13 @@ void update_process_times(int user_tick)
+@@ -1627,13 +1627,13 @@ void update_process_times(int user_tick)
  
  	/* Note: this timer irq context must be accounted for as well. */
  	account_process_tick(p, user_tick);
diff --git a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
index c6dfff8..b25e8c0 100644
--- a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
+++ b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
@@ -1,7 +1,7 @@
 Subject: timer-fd: Prevent live lock
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 25 Jan 2012 11:08:40 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 If hrtimer_try_to_cancel() requires a retry, then depending on the
 priority setting te retry loop might prevent timer callback completion
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/fs/timerfd.c
 +++ b/fs/timerfd.c
-@@ -450,7 +450,10 @@ static int do_timerfd_settime(int ufd, i
+@@ -460,7 +460,10 @@ static int do_timerfd_settime(int ufd, i
  				break;
  		}
  		spin_unlock_irq(&ctx->wqh.lock);
diff --git a/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch b/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
new file mode 100644
index 0000000..5ad3f0f
--- /dev/null
+++ b/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
@@ -0,0 +1,181 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Wed, 13 Jul 2016 18:22:23 +0200
+Subject: [PATCH] timer: make the base lock raw
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
+
+The part where the base lock is held got more predictable / shorter after the
+timer rework. One reason is the lack of re-cascading.
+That means the lock can be made raw and held in IRQ context.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/time/timer.c |   48 ++++++++++++++++++++++++------------------------
+ 1 file changed, 24 insertions(+), 24 deletions(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -193,7 +193,7 @@ EXPORT_SYMBOL(jiffies_64);
+ #endif
+ 
+ struct timer_base {
+-	spinlock_t		lock;
++	raw_spinlock_t		lock;
+ 	struct timer_list	*running_timer;
+ 	unsigned long		clk;
+ 	unsigned long		next_expiry;
+@@ -947,10 +947,10 @@ static struct timer_base *lock_timer_bas
+ 
+ 		if (!(tf & TIMER_MIGRATING)) {
+ 			base = get_timer_base(tf);
+-			spin_lock_irqsave(&base->lock, *flags);
++			raw_spin_lock_irqsave(&base->lock, *flags);
+ 			if (timer->flags == tf)
+ 				return base;
+-			spin_unlock_irqrestore(&base->lock, *flags);
++			raw_spin_unlock_irqrestore(&base->lock, *flags);
+ 		}
+ 		cpu_relax();
+ 	}
+@@ -1017,9 +1017,9 @@ static inline int
+ 			/* See the comment in lock_timer_base() */
+ 			timer->flags |= TIMER_MIGRATING;
+ 
+-			spin_unlock(&base->lock);
++			raw_spin_unlock(&base->lock);
+ 			base = new_base;
+-			spin_lock(&base->lock);
++			raw_spin_lock(&base->lock);
+ 			WRITE_ONCE(timer->flags,
+ 				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
+ 		}
+@@ -1040,7 +1040,7 @@ static inline int
+ 	}
+ 
+ out_unlock:
+-	spin_unlock_irqrestore(&base->lock, flags);
++	raw_spin_unlock_irqrestore(&base->lock, flags);
+ 
+ 	return ret;
+ }
+@@ -1134,16 +1134,16 @@ void add_timer_on(struct timer_list *tim
+ 	if (base != new_base) {
+ 		timer->flags |= TIMER_MIGRATING;
+ 
+-		spin_unlock(&base->lock);
++		raw_spin_unlock(&base->lock);
+ 		base = new_base;
+-		spin_lock(&base->lock);
++		raw_spin_lock(&base->lock);
+ 		WRITE_ONCE(timer->flags,
+ 			   (timer->flags & ~TIMER_BASEMASK) | cpu);
+ 	}
+ 
+ 	debug_activate(timer, timer->expires);
+ 	internal_add_timer(base, timer);
+-	spin_unlock_irqrestore(&base->lock, flags);
++	raw_spin_unlock_irqrestore(&base->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(add_timer_on);
+ 
+@@ -1170,7 +1170,7 @@ int del_timer(struct timer_list *timer)
+ 	if (timer_pending(timer)) {
+ 		base = lock_timer_base(timer, &flags);
+ 		ret = detach_if_pending(timer, base, true);
+-		spin_unlock_irqrestore(&base->lock, flags);
++		raw_spin_unlock_irqrestore(&base->lock, flags);
+ 	}
+ 
+ 	return ret;
+@@ -1198,7 +1198,7 @@ int try_to_del_timer_sync(struct timer_l
+ 		timer_stats_timer_clear_start_info(timer);
+ 		ret = detach_if_pending(timer, base, true);
+ 	}
+-	spin_unlock_irqrestore(&base->lock, flags);
++	raw_spin_unlock_irqrestore(&base->lock, flags);
+ 
+ 	return ret;
+ }
+@@ -1330,13 +1330,13 @@ static void expire_timers(struct timer_b
+ 		data = timer->data;
+ 
+ 		if (timer->flags & TIMER_IRQSAFE) {
+-			spin_unlock(&base->lock);
++			raw_spin_unlock(&base->lock);
+ 			call_timer_fn(timer, fn, data);
+-			spin_lock(&base->lock);
++			raw_spin_lock(&base->lock);
+ 		} else {
+-			spin_unlock_irq(&base->lock);
++			raw_spin_unlock_irq(&base->lock);
+ 			call_timer_fn(timer, fn, data);
+-			spin_lock_irq(&base->lock);
++			raw_spin_lock_irq(&base->lock);
+ 		}
+ 	}
+ }
+@@ -1505,7 +1505,7 @@ u64 get_next_timer_interrupt(unsigned lo
+ 	if (cpu_is_offline(smp_processor_id()))
+ 		return expires;
+ 
+-	spin_lock(&base->lock);
++	raw_spin_lock(&base->lock);
+ 	nextevt = __next_timer_interrupt(base);
+ 	is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
+ 	base->next_expiry = nextevt;
+@@ -1529,7 +1529,7 @@ u64 get_next_timer_interrupt(unsigned lo
+ 		if ((expires - basem) > TICK_NSEC)
+ 			base->is_idle = true;
+ 	}
+-	spin_unlock(&base->lock);
++	raw_spin_unlock(&base->lock);
+ 
+ 	return cmp_next_hrtimer_event(basem, expires);
+ }
+@@ -1616,7 +1616,7 @@ static inline void __run_timers(struct t
+ 	if (!time_after_eq(jiffies, base->clk))
+ 		return;
+ 
+-	spin_lock_irq(&base->lock);
++	raw_spin_lock_irq(&base->lock);
+ 
+ 	while (time_after_eq(jiffies, base->clk)) {
+ 
+@@ -1627,7 +1627,7 @@ static inline void __run_timers(struct t
+ 			expire_timers(base, heads + levels);
+ 	}
+ 	base->running_timer = NULL;
+-	spin_unlock_irq(&base->lock);
++	raw_spin_unlock_irq(&base->lock);
+ }
+ 
+ /*
+@@ -1822,16 +1822,16 @@ int timers_dead_cpu(unsigned int cpu)
+ 		 * The caller is globally serialized and nobody else
+ 		 * takes two locks at once, deadlock is not possible.
+ 		 */
+-		spin_lock_irq(&new_base->lock);
+-		spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
++		raw_spin_lock_irq(&new_base->lock);
++		raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ 
+ 		BUG_ON(old_base->running_timer);
+ 
+ 		for (i = 0; i < WHEEL_SIZE; i++)
+ 			migrate_timer_list(new_base, old_base->vectors + i);
+ 
+-		spin_unlock(&old_base->lock);
+-		spin_unlock_irq(&new_base->lock);
++		raw_spin_unlock(&old_base->lock);
++		raw_spin_unlock_irq(&new_base->lock);
+ 		put_cpu_ptr(&timer_bases);
+ 	}
+ 	return 0;
+@@ -1847,7 +1847,7 @@ static void __init init_timer_cpu(int cp
+ 	for (i = 0; i < NR_BASES; i++) {
+ 		base = per_cpu_ptr(&timer_bases[i], cpu);
+ 		base->cpu = cpu;
+-		spin_lock_init(&base->lock);
++		raw_spin_lock_init(&base->lock);
+ 		base->clk = jiffies;
+ 	}
+ }
diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
index 3b41116..a47b3d0 100644
--- a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
+++ b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:34 -0500
 Subject: timers: Prepare for full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 When softirqs can be preempted we need to make sure that cancelling
 the timer from the active thread can not deadlock vs. a running timer
@@ -13,12 +13,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  include/linux/timer.h |    2 +-
  kernel/sched/core.c   |    9 +++++++--
- kernel/time/timer.c   |   41 ++++++++++++++++++++++++++++++++++++++---
- 3 files changed, 46 insertions(+), 6 deletions(-)
+ kernel/time/timer.c   |   44 ++++++++++++++++++++++++++++++++++++++++----
+ 3 files changed, 48 insertions(+), 7 deletions(-)
 
 --- a/include/linux/timer.h
 +++ b/include/linux/timer.h
-@@ -225,7 +225,7 @@ extern void add_timer(struct timer_list
+@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list
  
  extern int try_to_del_timer_sync(struct timer_list *timer);
  
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  # define del_timer_sync(t)		del_timer(t)
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -490,11 +490,14 @@ void resched_cpu(int cpu)
+@@ -525,11 +525,14 @@ void resched_cpu(int cpu)
   */
  int get_nohz_timer_target(void)
  {
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	rcu_read_lock();
  	for_each_domain(cpu, sd) {
-@@ -510,6 +513,8 @@ int get_nohz_timer_target(void)
+@@ -548,6 +551,8 @@ int get_nohz_timer_target(void)
  		cpu = housekeeping_any_cpu();
  unlock:
  	rcu_read_unlock();
@@ -57,17 +57,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -80,6 +80,9 @@ struct tvec_root {
- struct tvec_base {
- 	spinlock_t lock;
- 	struct timer_list *running_timer;
+@@ -195,6 +195,9 @@ EXPORT_SYMBOL(jiffies_64);
+ struct timer_base {
+ 	raw_spinlock_t		lock;
+ 	struct timer_list	*running_timer;
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+	wait_queue_head_t wait_for_running_timer;
++	struct swait_queue_head	wait_for_running_timer;
 +#endif
- 	unsigned long timer_jiffies;
- 	unsigned long next_timer;
- 	unsigned long active_timers;
-@@ -1006,6 +1009,33 @@ void add_timer_on(struct timer_list *tim
+ 	unsigned long		clk;
+ 	unsigned long		next_expiry;
+ 	unsigned int		cpu;
+@@ -1147,6 +1150,33 @@ void add_timer_on(struct timer_list *tim
  }
  EXPORT_SYMBOL_GPL(add_timer_on);
  
@@ -77,18 +77,18 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 + */
 +static void wait_for_running_timer(struct timer_list *timer)
 +{
-+	struct tvec_base *base;
++	struct timer_base *base;
 +	u32 tf = timer->flags;
 +
 +	if (tf & TIMER_MIGRATING)
 +		return;
 +
-+	base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
-+	wait_event(base->wait_for_running_timer,
++	base = get_timer_base(tf);
++	swait_event(base->wait_for_running_timer,
 +		   base->running_timer != timer);
 +}
 +
-+# define wakeup_timer_waiters(b)	wake_up(&(b)->wait_for_running_timer)
++# define wakeup_timer_waiters(b)	swake_up_all(&(b)->wait_for_running_timer)
 +#else
 +static inline void wait_for_running_timer(struct timer_list *timer)
 +{
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * del_timer - deactive a timer.
   * @timer: the timer to be deactivated
-@@ -1063,7 +1093,7 @@ int try_to_del_timer_sync(struct timer_l
+@@ -1204,7 +1234,7 @@ int try_to_del_timer_sync(struct timer_l
  }
  EXPORT_SYMBOL(try_to_del_timer_sync);
  
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * del_timer_sync - deactivate a timer and wait for the handler to finish.
   * @timer: the timer to be deactivated
-@@ -1123,7 +1153,7 @@ int del_timer_sync(struct timer_list *ti
+@@ -1264,7 +1294,7 @@ int del_timer_sync(struct timer_list *ti
  		int ret = try_to_del_timer_sync(timer);
  		if (ret >= 0)
  			return ret;
@@ -119,32 +119,41 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  EXPORT_SYMBOL(del_timer_sync);
-@@ -1248,15 +1278,17 @@ static inline void __run_timers(struct t
- 			if (irqsafe) {
- 				spin_unlock(&base->lock);
- 				call_timer_fn(timer, fn, data);
-+				base->running_timer = NULL;
- 				spin_lock(&base->lock);
- 			} else {
- 				spin_unlock_irq(&base->lock);
- 				call_timer_fn(timer, fn, data);
-+				base->running_timer = NULL;
- 				spin_lock_irq(&base->lock);
- 			}
+@@ -1329,13 +1359,16 @@ static void expire_timers(struct timer_b
+ 		fn = timer->function;
+ 		data = timer->data;
+ 
+-		if (timer->flags & TIMER_IRQSAFE) {
++		if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) &&
++		    timer->flags & TIMER_IRQSAFE) {
+ 			raw_spin_unlock(&base->lock);
+ 			call_timer_fn(timer, fn, data);
++			base->running_timer = NULL;
+ 			raw_spin_lock(&base->lock);
+ 		} else {
+ 			raw_spin_unlock_irq(&base->lock);
+ 			call_timer_fn(timer, fn, data);
++			base->running_timer = NULL;
+ 			raw_spin_lock_irq(&base->lock);
  		}
  	}
+@@ -1626,8 +1659,8 @@ static inline void __run_timers(struct t
+ 		while (levels--)
+ 			expire_timers(base, heads + levels);
+ 	}
 -	base->running_timer = NULL;
+ 	raw_spin_unlock_irq(&base->lock);
 +	wakeup_timer_waiters(base);
- 	spin_unlock_irq(&base->lock);
  }
  
-@@ -1656,6 +1688,9 @@ static void __init init_timer_cpu(int cp
- 
- 	base->cpu = cpu;
- 	spin_lock_init(&base->lock);
+ /*
+@@ -1849,6 +1882,9 @@ static void __init init_timer_cpu(int cp
+ 		base->cpu = cpu;
+ 		raw_spin_lock_init(&base->lock);
+ 		base->clk = jiffies;
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+	init_waitqueue_head(&base->wait_for_running_timer);
++		init_swait_queue_head(&base->wait_for_running_timer);
 +#endif
+ 	}
+ }
  
- 	base->timer_jiffies = jiffies;
- 	base->next_timer = base->timer_jiffies;
diff --git a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch b/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
index ef0eb26..372ff2e 100644
--- a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
+++ b/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
@@ -2,7 +2,7 @@ From: Carsten Emde <C.Emde at osadl.org>
 Date: Tue, 5 Jan 2016 10:21:59 +0100
 Subject: trace/latency-hist: Consider new argument when probing the
  sched_switch tracer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The sched_switch tracer has got a new argument. Fix the latency tracer
 accordingly.
diff --git a/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch b/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
index f876e33..377fef4 100644
--- a/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
+++ b/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
@@ -1,7 +1,7 @@
 Subject: trace: Use rcuidle version for preemptoff_hist trace point
 From: Yang Shi <yang.shi at windriver.com>
 Date: Tue, 23 Feb 2016 13:23:23 -0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 When running -rt kernel with both PREEMPT_OFF_HIST and LOCKDEP enabled,
 the below error is reported:
diff --git a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
index aa0dabf..b09d0a0 100644
--- a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Thu, 29 Sep 2011 12:24:30 -0500
 Subject: tracing: Account for preempt off in preempt_schedule()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 The preempt_schedule() uses the preempt_disable_notrace() version
 because it can cause infinite recursion by the function tracer as
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3347,7 +3347,16 @@ asmlinkage __visible void __sched notrac
+@@ -3553,7 +3553,16 @@ asmlinkage __visible void __sched notrac
  		 * an infinite recursion.
  		 */
  		prev_ctx = exception_enter();
@@ -44,4 +44,4 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		stop_critical_timings();
  		exception_exit(prev_ctx);
  
- 		preempt_enable_no_resched_notrace();
+ 		preempt_latency_stop(1);
diff --git a/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 4b3ece2..ab2eac7 100644
--- a/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -1,8 +1,7 @@
-From 08552bb6e497a6f37a31884083cdd2c046d0f674 Mon Sep 17 00:00:00 2001
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 11 Apr 2016 16:55:02 +0200
 Subject: [PATCH] tty: serial: 8250: don't take the trylock during oops
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 An oops with irqs off (panic() from irqsafe hrtimer like the watchdog
 timer) will lead to a lockdep warning on each invocation and as such
@@ -16,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/tty/serial/8250/8250_port.c
 +++ b/drivers/tty/serial/8250/8250_port.c
-@@ -3092,10 +3092,8 @@ void serial8250_console_write(struct uar
+@@ -3110,10 +3110,8 @@ void serial8250_console_write(struct uar
  
  	serial8250_rpm_get(up);
  
diff --git a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 5843a95..138c42a 100644
--- a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -1,7 +1,7 @@
 Subject: net: Remove preemption disabling in netif_rx()
 From: Priyanka Jain <Priyanka.Jain at freescale.com>
 Date: Thu, 17 May 2012 09:35:11 +0530
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 1)enqueue_to_backlog() (called from netif_rx) should be
   bind to a particluar CPU. This can be achieved by
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3790,7 +3790,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3803,7 +3803,7 @@ static int netif_rx_internal(struct sk_b
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu;
  
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		rcu_read_lock();
  
  		cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3800,13 +3800,13 @@ static int netif_rx_internal(struct sk_b
+@@ -3813,13 +3813,13 @@ static int netif_rx_internal(struct sk_b
  		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  
  		rcu_read_unlock();
diff --git a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
index 1f87c21..f19ab3e 100644
--- a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
+++ b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 8 Nov 2013 17:34:54 +0100
 Subject: usb: Use _nort in giveback function
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet
 context") I see
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/usb/core/hcd.c
 +++ b/drivers/usb/core/hcd.c
-@@ -1759,9 +1759,9 @@ static void __usb_hcd_giveback_urb(struc
+@@ -1760,9 +1760,9 @@ static void __usb_hcd_giveback_urb(struc
  	 * and no one may trigger the above deadlock situation when
  	 * running complete() in tasklet.
  	 */
diff --git a/debian/patches/features/all/rt/user-use-local-irq-nort.patch b/debian/patches/features/all/rt/user-use-local-irq-nort.patch
index af372a6..67ac182 100644
--- a/debian/patches/features/all/rt/user-use-local-irq-nort.patch
+++ b/debian/patches/features/all/rt/user-use-local-irq-nort.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 21 Jul 2009 23:06:05 +0200
 Subject: core: Do not disable interrupts on RT in kernel/users.c
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use the local_irq_*_nort variants to reduce latencies in RT. The code
 is serialized by the locks. No need to disable interrupts.
diff --git a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
index 965f877..b8a2083 100644
--- a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
+++ b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 28 Oct 2013 12:19:57 +0100
 Subject: wait.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 |  CC      init/main.o
 |In file included from include/linux/mmzone.h:9:0,
diff --git a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
index 17b0edf..91b3703 100644
--- a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
+++ b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 01 Jul 2013 11:02:42 +0200
 Subject: workqueue: Prevent workqueue versus ata-piix livelock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 An Intel i7 system regularly detected rcu_preempt stalls after the kernel
 was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #include "workqueue_internal.h"
  
-@@ -1303,7 +1304,7 @@ static int try_to_grab_pending(struct wo
+@@ -1277,7 +1278,7 @@ static int try_to_grab_pending(struct wo
  	local_unlock_irqrestore(pendingb_lock, *flags);
  	if (work_is_canceling(work))
  		return -ENOENT;
diff --git a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch b/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
index 41f2da5..5ec042f 100644
--- a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
+++ b/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
@@ -1,7 +1,7 @@
 From: Daniel Wagner <daniel.wagner at bmw-carit.de>
 Date: Fri, 11 Jul 2014 15:26:11 +0200
 Subject: work-simple: Simple work queue implemenation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Provides a framework for enqueuing callbacks from irq context
 PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
diff --git a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
index 46431e8..37c714d 100644
--- a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
+++ b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
@@ -22,7 +22,7 @@ Cc: Jens Axboe <axboe at kernel.dk>
 Cc: Linus Torvalds <torvalds at linux-foundation.org>
 Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 ---
  kernel/sched/core.c         |   81 ++++++++------------------------------------
@@ -32,7 +32,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1626,10 +1626,6 @@ static inline void ttwu_activate(struct
+@@ -1701,10 +1701,6 @@ static inline void ttwu_activate(struct
  {
  	activate_task(rq, p, en_flags);
  	p->on_rq = TASK_ON_RQ_QUEUED;
@@ -43,7 +43,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
  }
  
  /*
-@@ -2032,53 +2028,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2143,53 +2139,6 @@ try_to_wake_up(struct task_struct *p, un
  }
  
  /**
@@ -54,7 +54,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
 - * ensure that this_rq() is locked, @p is bound to this_rq() and not
 - * the current task.
 - */
--static void try_to_wake_up_local(struct task_struct *p)
+-static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
 -{
 -	struct rq *rq = task_rq(p);
 -
@@ -71,11 +71,11 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
 -		 * disabled avoiding further scheduler activity on it and we've
 -		 * not yet picked a replacement task.
 -		 */
--		lockdep_unpin_lock(&rq->lock);
+-		lockdep_unpin_lock(&rq->lock, cookie);
 -		raw_spin_unlock(&rq->lock);
 -		raw_spin_lock(&p->pi_lock);
 -		raw_spin_lock(&rq->lock);
--		lockdep_pin_lock(&rq->lock);
+-		lockdep_repin_lock(&rq->lock, cookie);
 -	}
 -
 -	if (!(p->state & TASK_NORMAL))
@@ -86,7 +86,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
 -	if (!task_on_rq_queued(p))
 -		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 -
--	ttwu_do_wakeup(rq, p, 0);
+-	ttwu_do_wakeup(rq, p, 0, cookie);
 -	if (schedstat_enabled())
 -		ttwu_stat(p, smp_processor_id(), 0);
 -out:
@@ -97,7 +97,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
   * wake_up_process - Wake up a specific process
   * @p: The process to be woken up.
   *
-@@ -3322,21 +3271,6 @@ static void __sched notrace __schedule(b
+@@ -3499,21 +3448,6 @@ static void __sched notrace __schedule(b
  		} else {
  			deactivate_task(rq, prev, DEQUEUE_SLEEP);
  			prev->on_rq = 0;
@@ -114,12 +114,12 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
 -
 -				to_wakeup = wq_worker_sleeping(prev);
 -				if (to_wakeup)
--					try_to_wake_up_local(to_wakeup);
+-					try_to_wake_up_local(to_wakeup, cookie);
 -			}
  		}
  		switch_count = &prev->nvcsw;
  	}
-@@ -3369,6 +3303,14 @@ static inline void sched_submit_work(str
+@@ -3546,6 +3480,14 @@ static inline void sched_submit_work(str
  {
  	if (!tsk->state || tsk_is_pi_blocked(tsk))
  		return;
@@ -134,7 +134,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
  	/*
  	 * If we are going to sleep and we have plugged IO queued,
  	 * make sure to submit it to avoid deadlocks.
-@@ -3377,6 +3319,12 @@ static inline void sched_submit_work(str
+@@ -3554,6 +3496,12 @@ static inline void sched_submit_work(str
  		blk_schedule_flush_plug(tsk);
  }
  
@@ -147,7 +147,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
  asmlinkage __visible void __sched schedule(void)
  {
  	struct task_struct *tsk = current;
-@@ -3387,6 +3335,7 @@ asmlinkage __visible void __sched schedu
+@@ -3564,6 +3512,7 @@ asmlinkage __visible void __sched schedu
  		__schedule(false);
  		sched_preempt_enable_no_resched();
  	} while (need_resched());
@@ -157,7 +157,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
  
 --- a/kernel/workqueue.c
 +++ b/kernel/workqueue.c
-@@ -867,43 +867,32 @@ static void wake_up_worker(struct worker
+@@ -841,43 +841,32 @@ static void wake_up_worker(struct worker
  }
  
  /**
@@ -212,7 +212,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
  	struct worker_pool *pool;
  
  	/*
-@@ -912,13 +901,15 @@ struct task_struct *wq_worker_sleeping(s
+@@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(s
  	 * checking NOT_RUNNING.
  	 */
  	if (worker->flags & WORKER_NOT_RUNNING)
@@ -232,7 +232,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.
  
  	/*
  	 * The counterpart of the following dec_and_test, implied mb,
-@@ -932,9 +923,12 @@ struct task_struct *wq_worker_sleeping(s
+@@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(s
  	 * lock is safe.
  	 */
  	if (atomic_dec_and_test(&pool->nr_running) &&
diff --git a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch b/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
index d21d639..d210003 100644
--- a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
+++ b/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
@@ -1,7 +1,7 @@
 Subject: workqueue: Prevent deadlock/stall on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 27 Jun 2014 16:24:52 +0200 (CEST)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Austin reported a XFS deadlock/stall on RT where scheduled work gets
 never exececuted and tasks are waiting for each other for ever.
@@ -44,7 +44,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3417,9 +3417,8 @@ STACK_FRAME_NON_STANDARD(__schedule); /*
+@@ -3594,9 +3594,8 @@ STACK_FRAME_NON_STANDARD(__schedule); /*
  
  static inline void sched_submit_work(struct task_struct *tsk)
  {
@@ -55,7 +55,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  	/*
  	 * If a worker went to sleep, notify and ask workqueue whether
  	 * it wants to wake up a task to maintain concurrency.
-@@ -3427,6 +3426,10 @@ static inline void sched_submit_work(str
+@@ -3604,6 +3603,10 @@ static inline void sched_submit_work(str
  	if (tsk->flags & PF_WQ_WORKER)
  		wq_worker_sleeping(tsk);
  
@@ -112,7 +112,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  #ifdef CONFIG_DEBUG_OBJECTS_WORK
  
  static struct debug_obj_descr work_debug_descr;
-@@ -860,10 +890,16 @@ static struct worker *first_idle_worker(
+@@ -834,10 +864,16 @@ static struct worker *first_idle_worker(
   */
  static void wake_up_worker(struct worker_pool *pool)
  {
@@ -130,7 +130,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  }
  
  /**
-@@ -892,7 +928,7 @@ void wq_worker_running(struct task_struc
+@@ -866,7 +902,7 @@ void wq_worker_running(struct task_struc
   */
  void wq_worker_sleeping(struct task_struct *task)
  {
@@ -139,7 +139,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  	struct worker_pool *pool;
  
  	/*
-@@ -909,26 +945,18 @@ void wq_worker_sleeping(struct task_stru
+@@ -883,26 +919,18 @@ void wq_worker_sleeping(struct task_stru
  		return;
  
  	worker->sleeping = 1;
@@ -169,7 +169,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  }
  
  /**
-@@ -1655,7 +1683,9 @@ static void worker_enter_idle(struct wor
+@@ -1629,7 +1657,9 @@ static void worker_enter_idle(struct wor
  	worker->last_active = jiffies;
  
  	/* idle_list is LIFO */
@@ -179,7 +179,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  
  	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
  		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1688,7 +1718,9 @@ static void worker_leave_idle(struct wor
+@@ -1662,7 +1692,9 @@ static void worker_leave_idle(struct wor
  		return;
  	worker_clr_flags(worker, WORKER_IDLE);
  	pool->nr_idle--;
@@ -189,7 +189,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  }
  
  static struct worker *alloc_worker(int node)
-@@ -1854,7 +1886,9 @@ static void destroy_worker(struct worker
+@@ -1828,7 +1860,9 @@ static void destroy_worker(struct worker
  	pool->nr_workers--;
  	pool->nr_idle--;
  
diff --git a/debian/patches/features/all/rt/workqueue-use-locallock.patch b/debian/patches/features/all/rt/workqueue-use-locallock.patch
index 73389da..1b27e68 100644
--- a/debian/patches/features/all/rt/workqueue-use-locallock.patch
+++ b/debian/patches/features/all/rt/workqueue-use-locallock.patch
@@ -1,7 +1,7 @@
 Subject: workqueue: Use local irq lock instead of irq disable regions
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:42:26 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Use a local_irq_lock as a replacement for irq off regions. We keep the
 semantic of irq-off in regard to the pool->lock and remain preemptible.
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static int worker_thread(void *__worker);
  static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
  
-@@ -1127,9 +1130,9 @@ static void put_pwq_unlocked(struct pool
+@@ -1101,9 +1104,9 @@ static void put_pwq_unlocked(struct pool
  		 * As both pwqs and pools are RCU protected, the
  		 * following lock operations are safe.
  		 */
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  
-@@ -1233,7 +1236,7 @@ static int try_to_grab_pending(struct wo
+@@ -1207,7 +1210,7 @@ static int try_to_grab_pending(struct wo
  	struct worker_pool *pool;
  	struct pool_workqueue *pwq;
  
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* try to steal the timer if it exists */
  	if (is_dwork) {
-@@ -1297,7 +1300,7 @@ static int try_to_grab_pending(struct wo
+@@ -1271,7 +1274,7 @@ static int try_to_grab_pending(struct wo
  	spin_unlock(&pool->lock);
  fail:
  	rcu_read_unlock();
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (work_is_canceling(work))
  		return -ENOENT;
  	cpu_relax();
-@@ -1402,7 +1405,7 @@ static void __queue_work(int cpu, struct
+@@ -1376,7 +1379,7 @@ static void __queue_work(int cpu, struct
  	 * queued or lose PENDING.  Grabbing PENDING and queueing should
  	 * happen with IRQ disabled.
  	 */
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	debug_work_activate(work);
  
-@@ -1508,14 +1511,14 @@ bool queue_work_on(int cpu, struct workq
+@@ -1482,14 +1485,14 @@ bool queue_work_on(int cpu, struct workq
  	bool ret = false;
  	unsigned long flags;
  
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return ret;
  }
  EXPORT_SYMBOL(queue_work_on);
-@@ -1582,14 +1585,14 @@ bool queue_delayed_work_on(int cpu, stru
+@@ -1556,14 +1559,14 @@ bool queue_delayed_work_on(int cpu, stru
  	unsigned long flags;
  
  	/* read the comment in __queue_work() */
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return ret;
  }
  EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1624,7 +1627,7 @@ bool mod_delayed_work_on(int cpu, struct
+@@ -1598,7 +1601,7 @@ bool mod_delayed_work_on(int cpu, struct
  
  	if (likely(ret >= 0)) {
  		__queue_delayed_work(cpu, wq, dwork, delay);
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  
  	/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -2942,7 +2945,7 @@ static bool __cancel_work_timer(struct w
+@@ -2916,7 +2919,7 @@ static bool __cancel_work_timer(struct w
  
  	/* tell other tasks trying to grab @work to back off */
  	mark_work_canceling(work);
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	flush_work(work);
  	clear_work_data(work);
-@@ -2997,10 +3000,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -2971,10 +2974,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
   */
  bool flush_delayed_work(struct delayed_work *dwork)
  {
@@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return flush_work(&dwork->work);
  }
  EXPORT_SYMBOL(flush_delayed_work);
-@@ -3035,7 +3038,7 @@ bool cancel_delayed_work(struct delayed_
+@@ -3009,7 +3012,7 @@ bool cancel_delayed_work(struct delayed_
  
  	set_work_pool_and_clear_pending(&dwork->work,
  					get_work_pool_id(&dwork->work));
diff --git a/debian/patches/features/all/rt/workqueue-use-rcu.patch b/debian/patches/features/all/rt/workqueue-use-rcu.patch
index 2fbbcb5..61354a2 100644
--- a/debian/patches/features/all/rt/workqueue-use-rcu.patch
+++ b/debian/patches/features/all/rt/workqueue-use-rcu.patch
@@ -1,7 +1,7 @@
 Subject: workqueue: Use normal rcu
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 24 Jul 2013 15:26:54 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 There is no need for sched_rcu. The undocumented reason why sched_rcu
 is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * If the pwq needs to be used beyond the locking in effect, the caller is
   * responsible for guaranteeing that the pwq stays online.
   *
-@@ -574,7 +574,7 @@ static int worker_pool_assign_id(struct
+@@ -548,7 +548,7 @@ static int worker_pool_assign_id(struct
   * @wq: the target workqueue
   * @node: the node ID
   *
@@ -105,7 +105,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * read locked.
   * If the pwq needs to be used beyond the locking in effect, the caller is
   * responsible for guaranteeing that the pwq stays online.
-@@ -718,8 +718,8 @@ static struct pool_workqueue *get_work_p
+@@ -692,8 +692,8 @@ static struct pool_workqueue *get_work_p
   * @work: the work item of interest
   *
   * Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -116,7 +116,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   *
   * All fields of the returned pool are accessible as long as the above
   * mentioned locking is in effect.  If the returned pool needs to be used
-@@ -1124,7 +1124,7 @@ static void put_pwq_unlocked(struct pool
+@@ -1098,7 +1098,7 @@ static void put_pwq_unlocked(struct pool
  {
  	if (pwq) {
  		/*
@@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		 * following lock operations are safe.
  		 */
  		spin_lock_irq(&pwq->pool->lock);
-@@ -1252,6 +1252,7 @@ static int try_to_grab_pending(struct wo
+@@ -1226,6 +1226,7 @@ static int try_to_grab_pending(struct wo
  	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
  		return 0;
  
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * The queueing is in progress, or it is already queued. Try to
  	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1290,10 +1291,12 @@ static int try_to_grab_pending(struct wo
+@@ -1264,10 +1265,12 @@ static int try_to_grab_pending(struct wo
  		set_work_pool_and_keep_pending(work, pool->id);
  
  		spin_unlock(&pool->lock);
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	local_irq_restore(*flags);
  	if (work_is_canceling(work))
  		return -ENOENT;
-@@ -1407,6 +1410,7 @@ static void __queue_work(int cpu, struct
+@@ -1381,6 +1384,7 @@ static void __queue_work(int cpu, struct
  	if (unlikely(wq->flags & __WQ_DRAINING) &&
  	    WARN_ON_ONCE(!is_chained_work(wq)))
  		return;
@@ -154,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  retry:
  	if (req_cpu == WORK_CPU_UNBOUND)
  		cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-@@ -1463,10 +1467,8 @@ static void __queue_work(int cpu, struct
+@@ -1437,10 +1441,8 @@ static void __queue_work(int cpu, struct
  	/* pwq determined, queue */
  	trace_workqueue_queue_work(req_cpu, pwq, work);
  
@@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	pwq->nr_in_flight[pwq->work_color]++;
  	work_flags = work_color_to_flags(pwq->work_color);
-@@ -1484,7 +1486,9 @@ static void __queue_work(int cpu, struct
+@@ -1458,7 +1460,9 @@ static void __queue_work(int cpu, struct
  
  	insert_work(pwq, work, worklist, work_flags);
  
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -2811,14 +2815,14 @@ static bool start_flush_work(struct work
+@@ -2785,14 +2789,14 @@ static bool start_flush_work(struct work
  
  	might_sleep();
  
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/* see the comment in try_to_grab_pending() with the same code */
  	pwq = get_work_pwq(work);
  	if (pwq) {
-@@ -2847,10 +2851,11 @@ static bool start_flush_work(struct work
+@@ -2821,10 +2825,11 @@ static bool start_flush_work(struct work
  	else
  		lock_map_acquire_read(&pwq->wq->lockdep_map);
  	lock_map_release(&pwq->wq->lockdep_map);
@@ -208,7 +208,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return false;
  }
  
-@@ -3259,7 +3264,7 @@ static void rcu_free_pool(struct rcu_hea
+@@ -3233,7 +3238,7 @@ static void rcu_free_pool(struct rcu_hea
   * put_unbound_pool - put a worker_pool
   * @pool: worker_pool to put
   *
@@ -217,7 +217,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * safe manner.  get_unbound_pool() calls this function on its failure path
   * and this function should be able to release pools which went through,
   * successfully or not, init_worker_pool().
-@@ -3313,8 +3318,8 @@ static void put_unbound_pool(struct work
+@@ -3287,8 +3292,8 @@ static void put_unbound_pool(struct work
  	del_timer_sync(&pool->idle_timer);
  	del_timer_sync(&pool->mayday_timer);
  
@@ -228,7 +228,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -3421,14 +3426,14 @@ static void pwq_unbound_release_workfn(s
+@@ -3395,14 +3400,14 @@ static void pwq_unbound_release_workfn(s
  	put_unbound_pool(pool);
  	mutex_unlock(&wq_pool_mutex);
  
@@ -245,7 +245,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -4078,7 +4083,7 @@ void destroy_workqueue(struct workqueue_
+@@ -4052,7 +4057,7 @@ void destroy_workqueue(struct workqueue_
  		 * The base ref is never dropped on per-cpu pwqs.  Directly
  		 * schedule RCU free.
  		 */
@@ -254,7 +254,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	} else {
  		/*
  		 * We're the sole accessor of @wq at this point.  Directly
-@@ -4171,7 +4176,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4145,7 +4150,8 @@ bool workqueue_congested(int cpu, struct
  	struct pool_workqueue *pwq;
  	bool ret;
  
@@ -264,7 +264,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (cpu == WORK_CPU_UNBOUND)
  		cpu = smp_processor_id();
-@@ -4182,7 +4188,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4156,7 +4162,8 @@ bool workqueue_congested(int cpu, struct
  		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
  
  	ret = !list_empty(&pwq->delayed_works);
@@ -274,7 +274,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return ret;
  }
-@@ -4208,15 +4215,15 @@ unsigned int work_busy(struct work_struc
+@@ -4182,15 +4189,15 @@ unsigned int work_busy(struct work_struc
  	if (work_pending(work))
  		ret |= WORK_BUSY_PENDING;
  
@@ -294,7 +294,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return ret;
  }
-@@ -4405,7 +4412,7 @@ void show_workqueue_state(void)
+@@ -4379,7 +4386,7 @@ void show_workqueue_state(void)
  	unsigned long flags;
  	int pi;
  
@@ -303,7 +303,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	pr_info("Showing busy workqueues and worker pools:\n");
  
-@@ -4458,7 +4465,7 @@ void show_workqueue_state(void)
+@@ -4432,7 +4439,7 @@ void show_workqueue_state(void)
  		spin_unlock_irqrestore(&pool->lock, flags);
  	}
  
@@ -312,7 +312,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -4819,16 +4826,16 @@ bool freeze_workqueues_busy(void)
+@@ -4770,16 +4777,16 @@ bool freeze_workqueues_busy(void)
  		 * nr_active is monotonically decreasing.  It's safe
  		 * to peek without lock.
  		 */
@@ -332,7 +332,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  out_unlock:
  	mutex_unlock(&wq_pool_mutex);
-@@ -5018,7 +5025,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4969,7 +4976,8 @@ static ssize_t wq_pool_ids_show(struct d
  	const char *delim = "";
  	int node, written = 0;
  
@@ -342,7 +342,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	for_each_node(node) {
  		written += scnprintf(buf + written, PAGE_SIZE - written,
  				     "%s%d:%d", delim, node,
-@@ -5026,7 +5034,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4977,7 +4985,8 @@ static ssize_t wq_pool_ids_show(struct d
  		delim = " ";
  	}
  	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
diff --git a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch b/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
index 6e5fb6a..aca872f 100644
--- a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
+++ b/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 2 Nov 2014 08:31:37 +0100
 Subject: x86: UV: raw_spinlock conversion
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Shrug.  Lots of hobbyists have a beast in their basement, right?
 
@@ -9,12 +9,10 @@ Shrug.  Lots of hobbyists have a beast in their basement, right?
 Signed-off-by: Mike Galbraith <mgalbraith at suse.de>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- arch/x86/include/asm/uv/uv_bau.h   |   14 +++++++-------
- arch/x86/include/asm/uv/uv_hub.h   |    2 +-
- arch/x86/kernel/apic/x2apic_uv_x.c |    2 +-
- arch/x86/platform/uv/tlb_uv.c      |   26 +++++++++++++-------------
- arch/x86/platform/uv/uv_time.c     |   21 +++++++++++++--------
- 5 files changed, 35 insertions(+), 30 deletions(-)
+ arch/x86/include/asm/uv/uv_bau.h |   14 +++++++-------
+ arch/x86/platform/uv/tlb_uv.c    |   26 +++++++++++++-------------
+ arch/x86/platform/uv/uv_time.c   |   21 +++++++++++++--------
+ 3 files changed, 33 insertions(+), 28 deletions(-)
 
 --- a/arch/x86/include/asm/uv/uv_bau.h
 +++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -51,31 +49,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return 1;
  }
  
---- a/arch/x86/include/asm/uv/uv_hub.h
-+++ b/arch/x86/include/asm/uv/uv_hub.h
-@@ -492,7 +492,7 @@ struct uv_blade_info {
- 	unsigned short	nr_online_cpus;
- 	unsigned short	pnode;
- 	short		memory_nid;
--	spinlock_t	nmi_lock;	/* obsolete, see uv_hub_nmi */
-+	raw_spinlock_t	nmi_lock;	/* obsolete, see uv_hub_nmi */
- 	unsigned long	nmi_count;	/* obsolete, see uv_hub_nmi */
- };
- extern struct uv_blade_info *uv_blade_info;
---- a/arch/x86/kernel/apic/x2apic_uv_x.c
-+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
-@@ -950,7 +950,7 @@ void __init uv_system_init(void)
- 			uv_blade_info[blade].pnode = pnode;
- 			uv_blade_info[blade].nr_possible_cpus = 0;
- 			uv_blade_info[blade].nr_online_cpus = 0;
--			spin_lock_init(&uv_blade_info[blade].nmi_lock);
-+			raw_spin_lock_init(&uv_blade_info[blade].nmi_lock);
- 			min_pnode = min(pnode, min_pnode);
- 			max_pnode = max(pnode, max_pnode);
- 			blade++;
 --- a/arch/x86/platform/uv/tlb_uv.c
 +++ b/arch/x86/platform/uv/tlb_uv.c
-@@ -714,9 +714,9 @@ static void destination_plugged(struct b
+@@ -729,9 +729,9 @@ static void destination_plugged(struct b
  
  		quiesce_local_uvhub(hmaster);
  
@@ -87,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		end_uvhub_quiesce(hmaster);
  
-@@ -736,9 +736,9 @@ static void destination_timeout(struct b
+@@ -751,9 +751,9 @@ static void destination_timeout(struct b
  
  		quiesce_local_uvhub(hmaster);
  
@@ -99,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		end_uvhub_quiesce(hmaster);
  
-@@ -759,7 +759,7 @@ static void disable_for_period(struct ba
+@@ -774,7 +774,7 @@ static void disable_for_period(struct ba
  	cycles_t tm1;
  
  	hmaster = bcp->uvhub_master;
@@ -108,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (!bcp->baudisabled) {
  		stat->s_bau_disabled++;
  		tm1 = get_cycles();
-@@ -772,7 +772,7 @@ static void disable_for_period(struct ba
+@@ -787,7 +787,7 @@ static void disable_for_period(struct ba
  			}
  		}
  	}
@@ -117,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -835,7 +835,7 @@ static void record_send_stats(cycles_t t
+@@ -850,7 +850,7 @@ static void record_send_stats(cycles_t t
   */
  static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
  {
@@ -126,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	atomic_t *v;
  
  	v = &hmaster->active_descriptor_count;
-@@ -968,7 +968,7 @@ static int check_enable(struct bau_contr
+@@ -983,7 +983,7 @@ static int check_enable(struct bau_contr
  	struct bau_control *hmaster;
  
  	hmaster = bcp->uvhub_master;
@@ -135,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
  		stat->s_bau_reenabled++;
  		for_each_present_cpu(tcpu) {
-@@ -980,10 +980,10 @@ static int check_enable(struct bau_contr
+@@ -995,10 +995,10 @@ static int check_enable(struct bau_contr
  				tbcp->period_giveups = 0;
  			}
  		}
@@ -148,7 +124,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return -1;
  }
  
-@@ -1901,9 +1901,9 @@ static void __init init_per_cpu_tunables
+@@ -1916,9 +1916,9 @@ static void __init init_per_cpu_tunables
  		bcp->cong_reps			= congested_reps;
  		bcp->disabled_period =		sec_2_cycles(disabled_period);
  		bcp->giveup_limit =		giveup_limit;
diff --git a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
index 3a48d7d..9d368e0 100644
--- a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
+++ b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -1,7 +1,7 @@
 Subject: x86: crypto: Reduce preempt disabled regions
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Mon, 14 Nov 2011 18:19:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Restrict the preempt disabled regions to the actual floating point
 operations and enable preemption for the administrative actions.
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/crypto/aesni-intel_glue.c
 +++ b/arch/x86/crypto/aesni-intel_glue.c
-@@ -383,14 +383,14 @@ static int ecb_encrypt(struct blkcipher_
+@@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_
  	err = blkcipher_walk_virt(desc, &walk);
  	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return err;
  }
-@@ -407,14 +407,14 @@ static int ecb_decrypt(struct blkcipher_
+@@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_
  	err = blkcipher_walk_virt(desc, &walk);
  	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return err;
  }
-@@ -431,14 +431,14 @@ static int cbc_encrypt(struct blkcipher_
+@@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_
  	err = blkcipher_walk_virt(desc, &walk);
  	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return err;
  }
-@@ -455,14 +455,14 @@ static int cbc_decrypt(struct blkcipher_
+@@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_
  	err = blkcipher_walk_virt(desc, &walk);
  	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  
@@ -88,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return err;
  }
-@@ -514,18 +514,20 @@ static int ctr_crypt(struct blkcipher_de
+@@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_de
  	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
  	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  
diff --git a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch b/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
index 91a5935..855722e 100644
--- a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
+++ b/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 11 Mar 2013 17:09:55 +0100
 Subject: x86/highmem: Add a "already used pte" check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 This is a copy from kmap_atomic_prot().
 
diff --git a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
index 65e83c0..26aeabf 100644
--- a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
+++ b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:27 -0500
 Subject: x86/ioapic: Do not unmask io_apic when interrupt is in progress
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 With threaded interrupts we might see an interrupt in progress on
 migration. Do not unmask it when this is the case.
@@ -16,7 +16,7 @@ xXx
 
 --- a/arch/x86/kernel/apic/io_apic.c
 +++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1711,7 +1711,8 @@ static bool io_apic_level_ack_pending(st
+@@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(st
  static inline bool ioapic_irqd_mask(struct irq_data *data)
  {
  	/* If we are moving the irq we need to mask it */
diff --git a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
index bc940ca..125e8fa 100644
--- a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
+++ b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: x86: kvm Require const tsc for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 06 Nov 2011 12:26:18 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Non constant TSC is a nightmare on bare metal already, but with
 virtualization it becomes a complete disaster because the workarounds
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -5850,6 +5850,13 @@ int kvm_arch_init(void *opaque)
+@@ -5865,6 +5865,13 @@ int kvm_arch_init(void *opaque)
  		goto out;
  	}
  
diff --git a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
index 56f6299..25be110 100644
--- a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
+++ b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 13 Dec 2010 16:33:39 +0100
 Subject: x86: Convert mce timer to hrtimer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 mce_timer is started in atomic contexts of cpu bringup. This results
 in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
@@ -35,7 +35,7 @@ fold in:
  
  #include <asm/processor.h>
  #include <asm/traps.h>
-@@ -1240,7 +1241,7 @@ void mce_log_therm_throt_event(__u64 sta
+@@ -1291,7 +1292,7 @@ void mce_log_therm_throt_event(__u64 sta
  static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
  
  static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -44,7 +44,7 @@ fold in:
  
  static unsigned long mce_adjust_timer_default(unsigned long interval)
  {
-@@ -1249,32 +1250,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1300,32 +1301,18 @@ static unsigned long mce_adjust_timer_de
  
  static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
  
@@ -58,7 +58,7 @@ fold in:
 -
 -	if (timer_pending(t)) {
 -		if (time_before(when, t->expires))
--			mod_timer_pinned(t, when);
+-			mod_timer(t, when);
 -	} else {
 -		t->expires = round_jiffies(when);
 -		add_timer_on(t, smp_processor_id());
@@ -83,7 +83,7 @@ fold in:
  	iv = __this_cpu_read(mce_next_interval);
  
  	if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1297,7 +1284,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1348,7 +1335,7 @@ static void mce_timer_fn(unsigned long d
  
  done:
  	__this_cpu_write(mce_next_interval, iv);
@@ -92,7 +92,7 @@ fold in:
  }
  
  /*
-@@ -1305,7 +1292,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1356,7 +1343,7 @@ static void mce_timer_fn(unsigned long d
   */
  void mce_timer_kick(unsigned long interval)
  {
@@ -101,7 +101,7 @@ fold in:
  	unsigned long iv = __this_cpu_read(mce_next_interval);
  
  	__restart_timer(t, interval);
-@@ -1320,7 +1307,7 @@ static void mce_timer_delete_all(void)
+@@ -1371,7 +1358,7 @@ static void mce_timer_delete_all(void)
  	int cpu;
  
  	for_each_online_cpu(cpu)
@@ -110,7 +110,7 @@ fold in:
  }
  
  static void mce_do_trigger(struct work_struct *work)
-@@ -1654,7 +1641,7 @@ static void __mcheck_cpu_clear_vendor(st
+@@ -1717,7 +1704,7 @@ static void __mcheck_cpu_clear_vendor(st
  	}
  }
  
@@ -119,7 +119,7 @@ fold in:
  {
  	unsigned long iv = check_interval * HZ;
  
-@@ -1663,16 +1650,17 @@ static void mce_start_timer(unsigned int
+@@ -1726,16 +1713,17 @@ static void mce_start_timer(unsigned int
  
  	per_cpu(mce_next_interval, cpu) = iv;
  
@@ -135,13 +135,13 @@ fold in:
 +	struct hrtimer *t = this_cpu_ptr(&mce_timer);
  	unsigned int cpu = smp_processor_id();
  
--	setup_timer(t, mce_timer_fn, cpu);
+-	setup_pinned_timer(t, mce_timer_fn, cpu);
 +	hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 +	t->function = mce_timer_fn;
  	mce_start_timer(cpu, t);
  }
  
-@@ -2393,6 +2381,8 @@ static void mce_disable_cpu(void *h)
+@@ -2459,6 +2447,8 @@ static void mce_disable_cpu(void *h)
  	if (!mce_available(raw_cpu_ptr(&cpu_info)))
  		return;
  
@@ -150,15 +150,15 @@ fold in:
  	if (!(action & CPU_TASKS_FROZEN))
  		cmci_clear();
  
-@@ -2415,6 +2405,7 @@ static void mce_reenable_cpu(void *h)
+@@ -2481,6 +2471,7 @@ static void mce_reenable_cpu(void *h)
  		if (b->init)
- 			wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+ 			wrmsrl(msr_ops.ctl(i), b->ctl);
  	}
 +	__mcheck_cpu_init_timer();
  }
  
  /* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2422,7 +2413,6 @@ static int
+@@ -2488,7 +2479,6 @@ static int
  mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  {
  	unsigned int cpu = (unsigned long)hcpu;
@@ -166,7 +166,7 @@ fold in:
  
  	switch (action & ~CPU_TASKS_FROZEN) {
  	case CPU_ONLINE:
-@@ -2442,11 +2432,9 @@ mce_cpu_callback(struct notifier_block *
+@@ -2508,11 +2498,9 @@ mce_cpu_callback(struct notifier_block *
  		break;
  	case CPU_DOWN_PREPARE:
  		smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
diff --git a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch b/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
index 4718236..5535b2e 100644
--- a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -1,7 +1,7 @@
 Subject: x86/mce: use swait queue for mce wakeups
 From: Steven Rostedt <rostedt at goodmis.org>
 Date:	Fri, 27 Feb 2015 15:20:37 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 We had a customer report a lockup on a 3.0-rt kernel that had the
 following backtrace:
@@ -69,7 +69,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
  
  #include <asm/processor.h>
  #include <asm/traps.h>
-@@ -1317,6 +1318,56 @@ static void mce_do_trigger(struct work_s
+@@ -1368,6 +1369,56 @@ static void mce_do_trigger(struct work_s
  
  static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  
@@ -126,7 +126,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
  /*
   * Notify the user(s) about new machine check events.
   * Can be called from interrupt context, but not from machine check/NMI
-@@ -1324,19 +1375,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1375,19 +1426,8 @@ static DECLARE_WORK(mce_trigger_work, mc
   */
  int mce_notify_irq(void)
  {
@@ -147,7 +147,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
  		return 1;
  	}
  	return 0;
-@@ -2473,6 +2513,10 @@ static __init int mcheck_init_device(voi
+@@ -2539,6 +2579,10 @@ static __init int mcheck_init_device(voi
  		goto err_out;
  	}
  
diff --git a/debian/patches/features/all/rt/x86-preempt-lazy.patch b/debian/patches/features/all/rt/x86-preempt-lazy.patch
index e3ea814..437c9af 100644
--- a/debian/patches/features/all/rt/x86-preempt-lazy.patch
+++ b/debian/patches/features/all/rt/x86-preempt-lazy.patch
@@ -1,7 +1,7 @@
 Subject: x86: Support for lazy preemption
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 01 Nov 2012 11:03:47 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Implement the x86 pieces for lazy preempt.
 
@@ -11,9 +11,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  arch/x86/entry/common.c            |    4 ++--
  arch/x86/entry/entry_32.S          |   16 ++++++++++++++++
  arch/x86/entry/entry_64.S          |   16 ++++++++++++++++
- arch/x86/include/asm/thread_info.h |    6 ++++++
+ arch/x86/include/asm/preempt.h     |   31 ++++++++++++++++++++++++++++++-
+ arch/x86/include/asm/thread_info.h |   10 ++++++++++
  arch/x86/kernel/asm-offsets.c      |    2 ++
- 6 files changed, 43 insertions(+), 2 deletions(-)
+ 7 files changed, 77 insertions(+), 3 deletions(-)
 
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
@@ -27,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	select ANON_INODES
 --- a/arch/x86/entry/common.c
 +++ b/arch/x86/entry/common.c
-@@ -202,7 +202,7 @@ long syscall_trace_enter(struct pt_regs
+@@ -136,7 +136,7 @@ static long syscall_trace_enter(struct p
  
  #define EXIT_TO_USERMODE_LOOP_FLAGS				\
  	(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |	\
@@ -36,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
  {
-@@ -218,7 +218,7 @@ static void exit_to_usermode_loop(struct
+@@ -152,7 +152,7 @@ static void exit_to_usermode_loop(struct
  		/* We have work to do. */
  		local_irq_enable();
  
@@ -47,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef ARCH_RT_DELAYS_SIGNAL_SEND
 --- a/arch/x86/entry/entry_32.S
 +++ b/arch/x86/entry/entry_32.S
-@@ -278,8 +278,24 @@ END(ret_from_exception)
+@@ -271,8 +271,24 @@ END(ret_from_exception)
  ENTRY(resume_kernel)
  	DISABLE_INTERRUPTS(CLBR_ANY)
  need_resched:
@@ -74,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	call	preempt_schedule_irq
 --- a/arch/x86/entry/entry_64.S
 +++ b/arch/x86/entry/entry_64.S
-@@ -511,7 +511,23 @@ GLOBAL(retint_user)
+@@ -512,7 +512,23 @@ GLOBAL(retint_user)
  	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
  	jnc	1f
  0:	cmpl	$0, PER_CPU_VAR(__preempt_count)
@@ -98,18 +99,79 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	call	preempt_schedule_irq
  	jmp	0b
  1:
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -79,17 +79,46 @@ static __always_inline void __preempt_co
+  * a decrement which hits zero means we have no preempt_count and should
+  * reschedule.
+  */
+-static __always_inline bool __preempt_count_dec_and_test(void)
++static __always_inline bool ____preempt_count_dec_and_test(void)
+ {
+ 	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
+ }
+ 
++static __always_inline bool __preempt_count_dec_and_test(void)
++{
++	if (____preempt_count_dec_and_test())
++		return true;
++#ifdef CONFIG_PREEMPT_LAZY
++	if (current_thread_info()->preempt_lazy_count)
++		return false;
++	return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
++	return false;
++#endif
++}
++
+ /*
+  * Returns true when we need to resched and can (barring IRQ state).
+  */
+ static __always_inline bool should_resched(int preempt_offset)
+ {
++#ifdef CONFIG_PREEMPT_LAZY
++	u32 tmp;
++
++	tmp = raw_cpu_read_4(__preempt_count);
++	if (tmp == preempt_offset)
++		return true;
++
++	/* preempt count == 0 ? */
++	tmp &= ~PREEMPT_NEED_RESCHED;
++	if (tmp)
++		return false;
++	if (current_thread_info()->preempt_lazy_count)
++		return false;
++	return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
+ 	return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
++#endif
+ }
+ 
+ #ifdef CONFIG_PREEMPT
 --- a/arch/x86/include/asm/thread_info.h
 +++ b/arch/x86/include/asm/thread_info.h
-@@ -58,6 +58,8 @@ struct thread_info {
+@@ -57,6 +57,8 @@ struct thread_info {
+ 	__u32			flags;		/* low level flags */
  	__u32			status;		/* thread synchronous flags */
  	__u32			cpu;		/* current CPU */
- 	mm_segment_t		addr_limit;
 +	int			preempt_lazy_count;	/* 0 => lazy preemptable
-+							  <0 => BUG */
- 	unsigned int		sig_on_uaccess_error:1;
- 	unsigned int		uaccess_err:1;	/* uaccess failed */
++							   <0 => BUG */
  };
-@@ -95,6 +97,7 @@ struct thread_info {
+ 
+ #define INIT_THREAD_INFO(tsk)			\
+@@ -73,6 +75,10 @@ struct thread_info {
+ 
+ #include <asm/asm-offsets.h>
+ 
++#define GET_THREAD_INFO(reg) \
++	_ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
++	_ASM_SUB $(THREAD_SIZE),reg ;
++
+ #endif
+ 
+ /*
+@@ -91,6 +97,7 @@ struct thread_info {
  #define TIF_SYSCALL_EMU		6	/* syscall emulation active */
  #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
  #define TIF_SECCOMP		8	/* secure computing */
@@ -117,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
  #define TIF_UPROBE		12	/* breakpointed or singlestepping */
  #define TIF_NOTSC		16	/* TSC is not accessible in userland */
-@@ -119,6 +122,7 @@ struct thread_info {
+@@ -115,6 +122,7 @@ struct thread_info {
  #define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
  #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
@@ -125,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define _TIF_USER_RETURN_NOTIFY	(1 << TIF_USER_RETURN_NOTIFY)
  #define _TIF_UPROBE		(1 << TIF_UPROBE)
  #define _TIF_NOTSC		(1 << TIF_NOTSC)
-@@ -155,6 +159,8 @@ struct thread_info {
+@@ -151,6 +159,8 @@ struct thread_info {
  #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
  #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
  
@@ -136,15 +198,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
 --- a/arch/x86/kernel/asm-offsets.c
 +++ b/arch/x86/kernel/asm-offsets.c
-@@ -32,6 +32,7 @@ void common(void) {
+@@ -31,6 +31,7 @@ void common(void) {
+ 	BLANK();
  	OFFSET(TI_flags, thread_info, flags);
  	OFFSET(TI_status, thread_info, status);
- 	OFFSET(TI_addr_limit, thread_info, addr_limit);
 +	OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
  
  	BLANK();
- 	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-@@ -85,4 +86,5 @@ void common(void) {
+ 	OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
+@@ -88,4 +89,5 @@ void common(void) {
  
  	BLANK();
  	DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
diff --git a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch b/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
index 8b38c9b..efa8ef5 100644
--- a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
+++ b/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at linaro.org>
 Date: Thu, 10 Dec 2015 10:58:51 -0800
 Subject: x86/signal: delay calling signals on 32bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 When running some ptrace single step tests on x86-32 machine, the below problem
 is triggered:
diff --git a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
index 3b1f82e..42433b1 100644
--- a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
+++ b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 16 Dec 2010 14:25:18 +0100
 Subject: x86: stackprotector: Avoid random pool on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 CPU bringup calls into the random pool to initialize the stack
 canary. During boot that works nicely even on RT as the might sleep
diff --git a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
index 974be0c..06c5941 100644
--- a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 26 Jul 2009 02:21:32 +0200
 Subject: x86: Use generic rwsem_spinlocks on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz
 
 Simplifies the separation of anon_rw_semaphores and rw_semaphores for
 -rt.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
-@@ -230,8 +230,11 @@ config ARCH_MAY_HAVE_PC_FDC
+@@ -231,8 +231,11 @@ config ARCH_MAY_HAVE_PC_FDC
  	def_bool y
  	depends on ISA_DMA_API
  
diff --git a/debian/patches/series-rt b/debian/patches/series-rt
index 1870d9e..0523783 100644
--- a/debian/patches/series-rt
+++ b/debian/patches/series-rt
@@ -1,25 +1,23 @@
 ###########################################################
 # DELTA against a known Linus release
 ###########################################################
+features/all/rt/mm-filemap-don-t-plant-shadow-entries-without-radix-.patch
+features/all/rt/mm-filemap-fix-mapping-nrpages-double-accounting-in-.patch
 
 ############################################################
 # UPSTREAM changes queued
 ############################################################
-features/all/rt/ARM-imx-always-use-TWD-on-IMX6Q.patch
 
 ############################################################
 # UPSTREAM FIXES, patches pending
 ############################################################
+features/all/rt/timer-make-the-base-lock-raw.patch
+features/all/rt/jbd2-Fix-lockdep-annotation-in-add_transaction_credi.patch
 
 ############################################################
 # Stuff broken upstream, patches submitted
 ############################################################
-features/all/rt/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch
-features/all/rt/sched-provide-a-tsk_nr_cpus_allowed-helper.patch
 features/all/rt/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
-features/all/rt/crypto-ccp-remove-rwlocks_types.h.patch
-features/all/rt/infiniband-ulp-ipoib-remove-pkey_mutex.patch
-features/all/rt/sched-preempt-Fix-preempt_count-manipulations.patch
 
 # Those two should vanish soon (not use PIT during bootup)
 features/all/rt/at91_dont_enable_disable_clock.patch
@@ -34,6 +32,12 @@ features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-inva
 # Stuff broken upstream, need to be sent
 ############################################################
 features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
+features/all/rt/fs-dcache-include-wait.h.patch
+features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
+features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
+features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
+features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
+features/all/rt/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
 
 # Wants a different fix for upstream
 
@@ -74,7 +78,6 @@ features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
 ############################################################
 # Stuff which should go upstream ASAP
 ############################################################
-features/all/rt/trace-correct-off-by-one-while-recording-the-trace-e.patch
 
 # SCHED BLOCK/WQ
 features/all/rt/block-shorten-interrupt-disabled-regions.patch
@@ -211,10 +214,8 @@ features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
 features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
 features/all/rt/genirq-disable-irqpoll-on-rt.patch
 features/all/rt/genirq-force-threading.patch
-features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
 
 # DRIVERS NET
-features/all/rt/drivers-net-fix-livelock-issues.patch
 features/all/rt/drivers-net-vortex-fix-locking-issues.patch
 
 # MM PAGE_ALLOC
@@ -262,9 +263,7 @@ features/all/rt/relay-fix-timer-madness.patch
 
 # TIMERS
 features/all/rt/timers-prepare-for-full-preemption.patch
-features/all/rt/timers-preempt-rt-support.patch
 features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
-features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch
 
 # HRTIMERS
 features/all/rt/hrtimers-prepare-full-preemption.patch
@@ -272,7 +271,6 @@ features/all/rt/hrtimer-enfore-64byte-alignment.patch
 features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
 features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
 features/all/rt/timer-fd-avoid-live-lock.patch
-features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
 features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
 
 # POSIX-CPU-TIMERS
@@ -318,17 +316,15 @@ features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
 features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
 features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
 features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
-features/all/rt/kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch
 
-# RAID5
-features/all/rt/md-raid5-percpu-handling-rt-aware.patch
-#
-features/all/rt/i915_compile_fix.patch
+# compile fix due to rtmutex locks
+features/all/rt/gpu_don_t_check_for_the_lock_owner.patch
+features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
 
 # FUTEX/RTMUTEX
 features/all/rt/rtmutex-futex-prepare-rt.patch
 features/all/rt/futex-requeue-pi-fix.patch
-features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
 
 # RTMUTEX
 features/all/rt/pid.h-include-atomic.h.patch
@@ -338,7 +334,7 @@ features/all/rt/spinlock-types-separate-raw.patch
 features/all/rt/rtmutex-avoid-include-hell.patch
 features/all/rt/rtmutex_dont_include_rcu.patch
 features/all/rt/rt-add-rt-locks.patch
-features/all/rt/rtmutex-Use-chainwalking-control-enum.patch
+features/all/rt/kernel-futex-don-t-deboost-too-early.patch
 features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
 features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
 
@@ -347,12 +343,10 @@ features/all/rt/peter_zijlstra-frob-rcu.patch
 features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
 features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
 features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
-features/all/rt/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch
-features/all/rt/rcu-disable-more-spots-of-rcu_bh.patch
 
 # LGLOCKS - lovely
 features/all/rt/lglocks-rt.patch
-features/all/rt/lockinglglocks_Use_preempt_enabledisable_nort()_in_lg_double_locklg_double_unlock.patch
+features/all/rt/lockinglglocks_Use_preempt_enabledisable_nort.patch
 
 # STOP machine (depend on lglock & rtmutex)
 features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch
@@ -369,6 +363,8 @@ features/all/rt/wait.h-include-atomic.h.patch
 features/all/rt/work-simple-Simple-work-queue-implemenation.patch
 features/all/rt/completion-use-simple-wait-queues.patch
 features/all/rt/fs-aio-simple-simple-work.patch
+features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
 
 # FS
 features/all/rt/fs-namespace-preemption-fix.patch
@@ -392,6 +388,7 @@ features/all/rt/block-mq-use-cpu_light.patch
 features/all/rt/block-mq-drop-preempt-disable.patch
 features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
 features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch
+features/all/rt/md-raid5-percpu-handling-rt-aware.patch
 
 # CPU CHILL
 features/all/rt/rt-introduce-cpu-chill.patch
@@ -407,6 +404,7 @@ features/all/rt/block-use-cpu-chill.patch
 # FS LIVELOCK PREVENTION
 features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
 features/all/rt/net-use-cpu-chill.patch
+features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
 
 # WORKQUEUE more fixes
 features/all/rt/workqueue-use-rcu.patch
@@ -424,6 +422,9 @@ features/all/rt/debugobjects-rt.patch
 # JUMPLABEL
 features/all/rt/jump-label-rt.patch
 
+# SEQLOCKS
+features/all/rt/seqlock-prevent-rt-starvation.patch
+
 # NETWORKING
 features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
 features/all/rt/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
@@ -432,9 +433,9 @@ features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
 features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
 features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
 features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
-
-# NETWORK livelock fix
-features/all/rt/net-tx-action-avoid-livelock-on-rt.patch
+features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
+features/all/rt/net-add-a-lock-around-icmp_sk.patch
 
 # NETWORK DEBUGGING AID
 features/all/rt/ping-sysrq.patch
@@ -510,9 +511,6 @@ features/all/rt/cpumask-disable-offstack-on-rt.patch
 # RANDOM
 features/all/rt/random-make-it-work-on-rt.patch
 
-# SEQLOCKS
-features/all/rt/seqlock-prevent-rt-starvation.patch
-
 # HOTPLUG
 features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
 features/all/rt/cpu-rt-rework-cpu-down.patch
@@ -522,7 +520,6 @@ features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
 features/all/rt/cpu_down_move_migrate_enable_back.patch
 features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
 
-features/all/rt/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
 features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
 
 # SCSCI QLA2xxx
@@ -553,19 +550,15 @@ features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
 
 # PREEMPT LAZY
 features/all/rt/preempt-lazy-support.patch
-features/all/rt/preempt-lazy-check-preempt_schedule.patch
 features/all/rt/x86-preempt-lazy.patch
 features/all/rt/arm-preempt-lazy-support.patch
-features/all/rt/arm-lazy-preempt-correct-resched-condition.patch
 features/all/rt/powerpc-preempt-lazy-support.patch
 features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
-features/all/rt/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch
 
 # LEDS
 features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
 
 # DRIVERS
-features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch
 features/all/rt/mmci-remove-bogus-irq-save.patch
 features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
 features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/kernel/linux.git



More information about the Kernel-svn-changes mailing list