[linux] 02/02: [rt] Update to 4.11.5-rt1 and reenable

debian-kernel at lists.debian.org debian-kernel at lists.debian.org
Sun Jun 18 17:16:10 UTC 2017


This is an automated email from the git hooks/post-receive script.

benh pushed a commit to branch sid
in repository linux.

commit b39658b2cf27f0b9e79a39fd16392a4db2f85165
Author: Ben Hutchings <ben at decadent.org.uk>
Date:   Sun Jun 18 18:14:20 2017 +0100

    [rt] Update to 4.11.5-rt1 and reenable
---
 debian/changelog                                   |   1 +
 debian/config/defines                              |   2 +-
 .../0001-futex-Avoid-freeing-an-active-timer.patch |   9 +-
 ...eanup-variable-names-for-futex_top_waiter.patch |  18 +-
 ...topology-Remove-cpus_allowed-manipulation.patch |  54 ++
 ...t-Pin-init-task-to-the-boot-CPU-initially.patch |  75 ++
 ...x-Deboost-before-waking-up-the-top-waiter.patch |  23 +-
 ...-Fix-early-boot-preempt-assumption-in-__s.patch |  63 ++
 .../rt/0002-arm-Adjust-system_state-check.patch    |  38 +
 ...mall-and-harmless-looking-inconsistencies.patch |  13 +-
 ...-Use-smp_store_release-in-mark_wake_futex.patch |   4 +-
 ...ex-deadline-Fix-a-PI-crash-for-deadline-t.patch |  35 +-
 .../0002-workqueue-Provide-work_on_cpu_safe.patch  |  85 +++
 .../rt/0003-arm64-Adjust-system_state-check.patch  |  39 ++
 ...rify-mark_wake_futex-memory-barrier-usage.patch |   9 +-
 ...3-futex-Remove-rt_mutex_deadlock_account_.patch |  22 +-
 ...-salinfo-Replace-racy-task-affinity-logic.patch | 130 ++++
 ...ine-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch |   9 +-
 .../rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch  |   9 +-
 ...mutex-Provide-futex-specific-rt_mutex-API.patch |  22 +-
 ...n-hwperf-Replace-racy-task-affinity-logic.patch |  77 +++
 .../features/all/rt/0004-rtmutex-Clean-up.patch    |  27 +-
 .../0004-x86-smp-Adjust-system_state-check.patch   |  35 +
 .../all/rt/0005-futex-Change-locking-rules.patch   |  36 +-
 .../rt/0005-metag-Adjust-system_state-check.patch  |  37 +
 ...mp-Replace-open-coded-task-affinity-logic.patch |  90 +++
 ...5-sched-rtmutex-Refactor-rt_mutex_setprio.patch |  44 +-
 .../all/rt/0006-futex-Cleanup-refcounting.patch    |  12 +-
 .../0006-powerpc-Adjust-system_state-check.patch   |  40 ++
 ...hed-tracing-Update-trace_sched_pi_setprio.patch |   9 +-
 ...rc-sysfs-Replace-racy-task-affinity-logic.patch | 119 ++++
 .../rt/0007-ACPI-Adjust-system_state-check.patch   |  39 ++
 ...sor-Fix-error-handling-in-__acpi_processo.patch |  46 ++
 ...ework-inconsistent-rt_mutex-futex_q-state.patch |   8 +-
 ...0007-rtmutex-Fix-PI-chain-order-integrity.patch |  21 +-
 ...rocessor-Replace-racy-task-affinity-logic.patch | 194 ++++++
 ...rt_mutex_futex_unlock-out-from-under-hb-l.patch |  40 +-
 .../all/rt/0008-mm-Adjust-system_state-check.patch |  43 ++
 .../0008-rtmutex-Fix-more-prio-comparisons.patch   |  17 +-
 ...req-ia64-Replace-racy-task-affinity-logic.patch | 210 ++++++
 ...-cpufreq-pasemi-Adjust-system_state-check.patch |  39 ++
 ...x-rt_mutex-Introduce-rt_mutex_init_waiter.patch |   8 +-
 ...g-preempt-count-leak-in-rt_mutex_futex_un.patch |   9 +-
 ...ufreq-sh-Replace-racy-task-affinity-logic.patch | 121 ++++
 ...tex-Restructure-rt_mutex_finish_proxy_loc.patch |  10 +-
 ...010-iommu-vt-d-Adjust-system_state-checks.patch |  48 ++
 ...parc-us3-Replace-racy-task-affinity-logic.patch | 125 ++++
 ...k-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch |  16 +-
 .../rt/0012-async-Adjust-system_state-checks.patch |  62 ++
 ...arc-us2e-Replace-racy-task-affinity-logic.patch | 130 ++++
 .../0012-futex-Futex_unlock_pi-determinism.patch   |   8 +-
 ...rypto-N2-Replace-racy-task-affinity-logic.patch |  96 +++
 .../0013-extable-Adjust-system_state-checks.patch  |  37 +
 ...-hb-lock-before-enqueueing-on-the-rtmutex.patch |  12 +-
 .../0014-printk-Adjust-system_state-checks.patch   |  36 +
 ...0015-mm-vmscan-Adjust-system_state-checks.patch |  40 ++
 ...16-init-Introduce-SYSTEM_SCHEDULING-state.patch |  61 ++
 ...Enable-might_sleep-and-smp_processor_id-c.patch |  75 ++
 ...irq-in-translation-section-permission-fau.patch |   6 +-
 ...UFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch |  44 ++
 ...CK-printk-drop-the-logbuf_lock-more-often.patch |  37 +-
 ...64-downgrade-preempt_disable-d-region-to-.patch |   8 +-
 ...lapic-mark-LAPIC-timer-handler-as-irqsafe.patch |   4 +-
 ...NFSv4-replace-seqcount_t-with-a-seqlock_t.patch |  10 +-
 ...om-invalidate-batched-entropy-after-crng-.patch | 162 +++++
 ...mers-Don-t-wake-ktimersoftd-on-every-tick.patch | 218 ------
 ...vert-acpi_gbl_hardware-lock-back-to-a-raw.patch |   4 +-
 .../features/all/rt/add_migrate_disable.patch      | 256 +++++++
 .../rt/arch-arm64-Add-lazy-preempt-support.patch   |  60 +-
 ...t-remove-irq-handler-when-clock-is-unused.patch |   2 +-
 ...-at91-tclib-default-to-tclib-timer-for-rt.patch |   2 +-
 .../all/rt/arm-convert-boot-lock-to-raw.patch      |  10 +-
 .../all/rt/arm-enable-highmem-for-rt.patch         |   2 +-
 .../all/rt/arm-highmem-flush-tlb-on-unmap.patch    |   2 +-
 .../rt/arm-include-definition-for-cpumask_t.patch  |   2 +-
 ...arm-kprobe-replace-patch_lock-to-raw-lock.patch |   2 +-
 .../features/all/rt/arm-preempt-lazy-support.patch |   8 +-
 .../features/all/rt/arm-unwind-use_raw_lock.patch  |   2 +-
 .../rt/arm64-xen--Make-XEN-depend-on-non-rt.patch  |   4 +-
 .../all/rt/at91_dont_enable_disable_clock.patch    |   2 +-
 .../all/rt/ata-disable-interrupts-if-non-rt.patch  |  18 +-
 .../features/all/rt/block-blk-mq-use-swait.patch   |  32 +-
 .../block-mq-don-t-complete-requests-via-IPI.patch |  38 +-
 .../all/rt/block-mq-drop-preempt-disable.patch     |  12 +-
 .../features/all/rt/block-mq-use-cpu_light.patch   |   4 +-
 .../block-shorten-interrupt-disabled-regions.patch |  12 +-
 .../features/all/rt/block-use-cpu-chill.patch      |  24 +-
 .../all/rt/bug-rt-dependend-variants.patch         |   2 +-
 ...ps-scheduling-while-atomic-in-cgroup-code.patch |  14 +-
 .../cgroups-use-simple-wait-in-css_release.patch   |  26 +-
 ...-random-don-t-print-that-the-init-is-done.patch | 167 +++++
 ...-drivers-timer-atmel-pit-fix-double-free_.patch |   2 +-
 ...clocksource-tclib-allow-higher-clockrates.patch |   2 +-
 .../all/rt/completion-use-simple-wait-queues.patch |  64 +-
 .../all/rt/cond-resched-lock-rt-tweak.patch        |   4 +-
 .../features/all/rt/cond-resched-softirq-rt.patch  |   8 +-
 ...n_proc-Protect-send_msg-with-a-local-lock.patch |   2 +-
 ...g-Document-why-PREEMPT_RT-uses-a-spinlock.patch |   4 +-
 ...ke-hotplug-lock-a-sleeping-spinlock-on-rt.patch |  14 +-
 .../features/all/rt/cpu-rt-rework-cpu-down.patch   |  46 +-
 ...l-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch |  14 +-
 .../all/rt/cpu_down_move_migrate_enable_back.patch |  12 +-
 ...req-drop-K8-s-driver-from-beeing-selected.patch |   4 +-
 .../all/rt/cpumask-disable-offstack-on-rt.patch    |   6 +-
 ...t-Convert-callback_lock-to-raw_spinlock_t.patch |  44 +-
 ...educe-preempt-disabled-regions-more-algos.patch |   6 +-
 .../patches/features/all/rt/debugobjects-rt.patch  |   4 +-
 .../all/rt/delayacct-use-raw_spinlocks.patch       |  82 +++
 .../patches/features/all/rt/dm-make-rt-aware.patch |   4 +-
 ...ck-zram-Replace-bit-spinlocks-with-rtmute.patch |  37 +-
 .../rt/drivers-net-8139-disable-irq-nosync.patch   |   4 +-
 .../rt/drivers-net-vortex-fix-locking-issues.patch |   4 +-
 ...ers-random-reduce-preempt-disabled-region.patch |   6 +-
 .../all/rt/drivers-tty-fix-omap-lock-crap.patch    |   6 +-
 .../rt/drivers-tty-pl011-irq-disable-madness.patch |   6 +-
 ...m-Don-t-disable-preemption-in-zcomp_strea.patch |  22 +-
 ...15-drop-trace_i915_gem_ring_dispatch-onrt.patch |   4 +-
 .../rt/drm-i915-init-spinlock-properly-on-RT.patch |  27 +
 ...ock_irq()_in_intel_pipe_update_startend().patch |   2 +-
 ...empt_disableenable_rt()_where_recommended.patch |   6 +-
 .../features/all/rt/epoll-use-get-cpu-light.patch  |   2 +-
 .../all/rt/fs-aio-simple-simple-work.patch         |   4 +-
 .../features/all/rt/fs-block-rt-support.patch      |   4 +-
 .../features/all/rt/fs-dcache-include-wait.h.patch |  24 -
 .../rt/fs-dcache-init-in_lookup_hashtable.patch    |   4 +-
 .../fs-dcache-use-cpu-chill-in-trylock-loops.patch |  10 +-
 ...ache-use-swait_queue-instead-of-waitqueue.patch |  20 +-
 .../all/rt/fs-jbd-replace-bh_state-lock.patch      |   2 +-
 ...bd2-pull-your-plug-when-waiting-for-space.patch |   2 +-
 .../all/rt/fs-namespace-preemption-fix.patch       |   4 +-
 .../fs-nfs-turn-rmdir_sem-into-a-semaphore.patch   |  10 +-
 .../all/rt/fs-ntfs-disable-interrupt-non-rt.patch  |   6 +-
 .../rt/fs-replace-bh_uptodate_lock-for-rt.patch    |  20 +-
 .../all/rt/ftrace-Fix-trace-header-alignment.patch |   6 +-
 .../all/rt/ftrace-migrate-disable-tracing.patch    |  12 +-
 ...e-lock-unlock-symetry-versus-pi_lock-and-.patch |   4 +-
 .../features/all/rt/futex-requeue-pi-fix.patch     |   6 +-
 ...-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch |   6 +-
 ...tex-rtmutex-Cure-RT-double-blocking-issue.patch |   8 +-
 ...round-migrate_disable-enable-in-different.patch |   6 +-
 .../all/rt/genirq-disable-irqpoll-on-rt.patch      |   6 +-
 ...ot-invoke-the-affinity-callback-via-a-wor.patch |  31 +-
 .../features/all/rt/genirq-force-threading.patch   |   8 +-
 ...pdate-irq_set_irqchip_state-documentation.patch |   4 +-
 .../rt/gpu_don_t_check_for_the_lock_owner.patch    |  33 -
 ...-set_cpus_allowed_ptr-in-sync_unplug_thre.patch |   4 +-
 .../all/rt/hotplug-light-get-online-cpus.patch     |  85 ++-
 ...lug-sync_unplug-no-27-5cn-27-in-task-name.patch |   4 +-
 .../all/rt/hotplug-use-migrate-disable.patch       |  12 +-
 ...-Move-schedule_work-call-to-helper-thread.patch |   2 +-
 .../all/rt/hrtimer-enfore-64byte-alignment.patch   |   4 +-
 ...up-hrtimer-callback-changes-for-preempt-r.patch |  49 +-
 .../all/rt/hrtimers-prepare-full-preemption.patch  |  40 +-
 ...warning-from-i915-when-running-on-PREEMPT.patch |   6 +-
 .../all/rt/ide-use-nort-local-irq-variants.patch   |  10 +-
 .../all/rt/idr-use-local-lock-for-protection.patch | 124 ----
 .../rt/infiniband-mellanox-ib-use-nort-irq.patch   |   2 +-
 .../all/rt/inpt-gameport-use-local-irq-nort.patch  |   2 +-
 .../rt/introduce_migrate_disable_cpu_light.patch   | 281 --------
 .../all/rt/iommu-amd--Use-WARN_ON_NORT.patch       |   6 +-
 ...don-t-disable-preempt-around-this_cpu_ptr.patch |  12 +-
 ...don-t-disable-preemption-while-accessing-.patch |   8 +-
 .../all/rt/ipc-sem-rework-semaphore-wakeups.patch  |  70 --
 ...-softirq-processing-in-irq-thread-context.patch |   6 +-
 ...irqwork-Move-irq-safe-work-to-irq-context.patch |   6 +-
 ...qwork-push_most_work_into_softirq_context.patch |  10 +-
 debian/patches/features/all/rt/jump-label-rt.patch |   4 +-
 .../all/rt/kconfig-disable-a-few-options-rt.patch  |   4 +-
 .../features/all/rt/kconfig-preempt-rt-full.patch  |   2 +-
 .../kernel-SRCU-provide-a-static-initializer.patch |   6 +-
 ...fix-cpu-down-problem-if-kthread-s-cpu-is-.patch |  14 +-
 ...plug-restore-original-cpu-mask-oncpu-down.patch |  20 +-
 ...ate_disable-do-fastpath-in-atomic-irqs-of.patch |  34 -
 ...-mark-perf_cpu_context-s-timer-as-irqsafe.patch |   4 +-
 ...tk-Don-t-try-to-print-from-IRQ-NMI-region.patch |   6 +-
 ...d-Provide-a-pointer-to-the-valid-CPU-mask.patch | 761 +++++++++++++++++++++
 ...d-move-stack-kprobe-clean-up-to-__put_tas.patch |   6 +-
 .../rt/kernel-softirq-unlock-with-irqs-on.patch    |   2 +-
 .../features/all/rt/kgb-serial-hackaround.patch    |  35 +-
 debian/patches/features/all/rt/latency-hist.patch  |  89 +--
 .../latency_hist-update-sched_wakeup-probe.patch   |   2 +-
 .../all/rt/latencyhist-disable-jump-labels.patch   |   4 +-
 .../leds-trigger-disable-CPU-trigger-on-RT.patch   |   2 +-
 .../rt/list_bl-fixup-bogus-lockdep-warning.patch   |   2 +-
 .../list_bl.h-make-list-head-locking-RT-safe.patch |   2 +-
 .../all/rt/local-irq-rt-depending-variants.patch   |   2 +-
 .../all/rt/locallock-add-local_lock_on.patch       |   2 +-
 debian/patches/features/all/rt/localversion.patch  |   4 +-
 ...-compilation-error-for-CONFIG_MODULES-and.patch |   6 +-
 .../rt/lockdep-Fix-per-cpu-static-objects.patch    |  10 +-
 ...dle-statically-initialized-PER_CPU-locks-.patch |  26 +-
 .../rt/lockdep-no-softirq-accounting-on-rt.patch   |   6 +-
 ...ftest-fix-warnings-due-to-missing-PREEMPT.patch |   2 +-
 ...-do-hardirq-context-test-for-raw-spinlock.patch |   2 +-
 ...ktorture-Do-NOT-include-rwlock.h-directly.patch |   2 +-
 ...cpu-rwsem-use-swait-for-the-wating-writer.patch |  73 --
 .../features/all/rt/md-disable-bcache.patch        |   2 +-
 .../all/rt/md-raid5-percpu-handling-rt-aware.patch |  14 +-
 .../all/rt/mips-disable-highmem-on-rt.patch        |   4 +-
 .../mm--rt--Fix-generic-kmap_atomic-for-RT.patch   |   2 +-
 ...dev-don-t-disable-IRQs-in-wb_congested_pu.patch |   4 +-
 .../all/rt/mm-bounce-local-irq-save-nort.patch     |   2 +-
 .../all/rt/mm-convert-swap-to-percpu-locked.patch  |  43 +-
 .../features/all/rt/mm-disable-sloub-rt.patch      |   6 +-
 .../patches/features/all/rt/mm-enable-slub.patch   |  88 +--
 .../features/all/rt/mm-make-vmstat-rt-aware.patch  |   2 +-
 ...ol-Don-t-call-schedule_work_on-in-preempt.patch |   6 +-
 .../all/rt/mm-memcontrol-do_not_disable_irq.patch  |  18 +-
 ...ol-mem_cgroup_migrate-replace-another-loc.patch |   4 +-
 ...m-page-alloc-use-local-lock-on-target-cpu.patch |   4 +-
 ...m-page_alloc-reduce-lock-sections-further.patch |  20 +-
 .../mm-page_alloc-rt-friendly-per-cpu-pages.patch  | 135 ++--
 .../rt/mm-perform-lru_add_drain_all-remotely.patch |  42 +-
 .../all/rt/mm-protect-activate-switch-mm.patch     |  10 +-
 .../all/rt/mm-rt-kmap-atomic-scheduling.patch      |  30 +-
 .../mm-scatterlist-dont-disable-irqs-on-RT.patch   |  30 +-
 .../all/rt/mm-vmalloc-use-get-cpu-light.patch      |  12 +-
 ...et-do-not-protect-workingset_shadow_nodes.patch | 182 +++--
 ...smalloc_copy_with_get_cpu_var_and_locking.patch |  18 +-
 .../all/rt/mmci-remove-bogus-irq-save.patch        |   6 +-
 .../all/rt/move_sched_delayed_work_to_helper.patch |   2 +-
 .../features/all/rt/mutex-no-spin-on-rt.patch      |   6 +-
 ...napi_schedule_irqoff-disable-interrupts-o.patch |  12 +-
 .../net-Qdisc-use-a-seqlock-instead-seqcount.patch |  45 +-
 .../all/rt/net-add-a-lock-around-icmp_sk.patch     |  55 +-
 ...k-the-missing-serialization-in-ip_send_un.patch |  21 +-
 ...r-local-irq-disable-alloc-atomic-headache.patch |  12 +-
 ...cpuhotplug-drain-input_pkt_queue-lockless.patch |   4 +-
 ...otect-users-of-napi_alloc_cache-against-r.patch |  22 +-
 ...move-explicit-do_softirq-from-busy_poll_s.patch |  28 +
 ...ays-take-qdisc-s-busylock-in-__dev_xmit_s.patch |   4 +-
 ...-iptable-xt-write-recseq-begin-rt-fallout.patch |  12 +-
 .../rt/net-make-devnet_rename_seq-a-mutex.patch    |  14 +-
 ...xmit_recursion-to-per-task-variable-on-RT.patch |  24 +-
 .../all/rt/net-prevent-abba-deadlock.patch         |   4 +-
 ...-a-way-to-delegate-processing-a-softirq-t.patch |  10 +-
 ...ev_deactivate_many-use-msleep-1-instead-o.patch |   2 +-
 .../features/all/rt/net-use-cpu-chill.patch        |   6 +-
 .../features/all/rt/net-wireless-warn-nort.patch   |   4 +-
 .../all/rt/net_disable_NET_RX_BUSY_POLL.patch      |  29 +
 .../features/all/rt/oleg-signal-rt-fix.patch       |  24 +-
 .../all/rt/panic-disable-random-on-rt.patch        |   4 +-
 ...troduce-rcu-bh-qs-where-safe-from-softirq.patch |  20 +-
 .../rt/pci-access-use-__wake_up_all_locked.patch   |  26 -
 .../features/all/rt/percpu_ida-use-locklocks.patch |  20 +-
 .../all/rt/perf-make-swevent-hrtimer-irqsafe.patch |   4 +-
 .../features/all/rt/peter_zijlstra-frob-rcu.patch  |   4 +-
 .../features/all/rt/peterz-percpu-rwsem-rt.patch   |   2 +-
 .../features/all/rt/peterz-srcu-crypto-chain.patch |   4 +-
 .../features/all/rt/pid.h-include-atomic.h.patch   |   4 +-
 .../pinctrl-qcom-Use-raw-spinlock-variants.patch   | 253 -------
 debian/patches/features/all/rt/ping-sysrq.patch    |  53 +-
 .../all/rt/posix-timers-no-broadcast.patch         |   6 +-
 ...osix-timers-thread-posix-cpu-timers-on-rt.patch | 215 +++---
 .../all/rt/power-disable-highmem-on-rt.patch       |   4 +-
 .../all/rt/power-use-generic-rwsem-on-rt.patch     |   2 +-
 ...-Disable-in-kernel-MPIC-emulation-for-PRE.patch |   2 +-
 .../all/rt/powerpc-preempt-lazy-support.patch      |  36 +-
 ...-device-init.c-adapt-to-completions-using.patch |   2 +-
 .../features/all/rt/preempt-lazy-support.patch     | 158 +++--
 .../features/all/rt/preempt-nort-rt-variants.patch |   6 +-
 ...intk-27-boot-param-to-help-with-debugging.patch |   4 +-
 debian/patches/features/all/rt/printk-kill.patch   |  45 +-
 .../patches/features/all/rt/printk-rt-aware.patch  |  51 +-
 .../ptrace-fix-ptrace-vs-tasklist_lock-race.patch  |  40 +-
 .../all/rt/radix-tree-use-local-locks.patch        |  90 ++-
 .../random-avoid-preempt_disable-ed-section.patch  |  69 +-
 .../all/rt/random-make-it-work-on-rt.patch         |  16 +-
 .../rbtree-include-rcu.h-because-we-use-it.patch   |  15 +-
 ...Eliminate-softirq-processing-from-rcutree.patch |  42 +-
 .../all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch  |   4 +-
 ...e-rcu_normal_after_boot-by-default-for-RT.patch |   4 +-
 .../all/rt/rcu-make-RCU_BOOST-default-on-RT.patch  |   4 +-
 .../rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch |  54 +-
 ...rcu-update-make-RCU_EXPEDITE_BOOT-default.patch |  61 --
 ..._bh_qs-disable-irq-while-calling-rcu_pree.patch |   4 +-
 ...-migrate_disable-race-with-cpu-hotplug-3f.patch |   4 +-
 ...t_full-arm-coredump-fails-for-cpu-3e-3d-4.patch |   4 +-
 ...ping-function-called-from-invalid-context.patch |  20 +-
 .../patches/features/all/rt/rt-add-rt-locks.patch  | 283 +++++---
 .../rt/rt-drop_mutex_disable_on_not_debug.patch    |  33 -
 .../features/all/rt/rt-introduce-cpu-chill.patch   |   6 +-
 .../features/all/rt/rt-local-irq-lock.patch        |   2 +-
 ...cking-Reenable-migration-accross-schedule.patch |  10 +-
 .../features/all/rt/rt-preempt-base-config.patch   |   2 +-
 .../features/all/rt/rt-serial-warn-fix.patch       |   2 +-
 ...x--Handle-non-enqueued-waiters-gracefully.patch |   4 +-
 .../all/rt/rtmutex-Make-lock_killable-work.patch   |   6 +-
 .../all/rt/rtmutex-Provide-locked-slowpath.patch   |  27 +-
 .../rt/rtmutex-Provide-rt_mutex_lock_state.patch   |   8 +-
 .../rt/rtmutex-add-a-first-shot-of-ww_mutex.patch  | 126 ++--
 .../all/rt/rtmutex-avoid-include-hell.patch        |   2 +-
 .../features/all/rt/rtmutex-futex-prepare-rt.patch |  30 +-
 .../features/all/rt/rtmutex-lock-killable.patch    |   4 +-
 .../all/rt/rtmutex-trylock-is-okay-on-RT.patch     |   4 +-
 .../features/all/rt/rtmutex_dont_include_rcu.patch |   6 +-
 .../rwsem-rt-Lift-single-reader-restriction.patch  |  16 +-
 .../rt/rxrpc-remove-unused-static-variables.patch  |   2 +-
 ...i-dont-t-disable-interrupts-in-qc_issue-h.patch |   2 +-
 ...nt-task-state-corruption-by-spurious-lock.patch |  78 +++
 .../features/all/rt/sched-Remove-TASK_ALL.patch    |  30 +
 ...-deadline-dl_task_timer-has-to-be-irqsafe.patch |   6 +-
 .../features/all/rt/sched-delay-put-task.patch     |  31 +-
 .../rt/sched-disable-rt-group-sched-on-rt.patch    |   4 +-
 .../features/all/rt/sched-disable-ttwu-queue.patch |   4 +-
 .../features/all/rt/sched-limit-nr-migrate.patch   |   4 +-
 ...ched-might-sleep-do-not-account-rcu-depth.patch |   8 +-
 .../features/all/rt/sched-mmdrop-delayed.patch     |  30 +-
 .../features/all/rt/sched-rt-mutex-wakeup.patch    |  33 +-
 ...hed-ttwu-ensure-success-return-is-correct.patch |   4 +-
 ...ueue-Only-wake-up-idle-workers-if-not-blo.patch |   4 +-
 .../features/all/rt/scsi-fcoe-rt-aware.patch       |  16 +-
 ...ping-function-called-from-invalid-context.patch |   2 +-
 .../all/rt/seqlock-prevent-rt-starvation.patch     |  29 +-
 .../all/rt/signal-fix-up-rcu-wreckage.patch        |   6 +-
 .../rt/signal-revert-ptrace-preempt-magic.patch    |   4 +-
 ...low-rt-tasks-to-cache-one-sigqueue-struct.patch |  55 +-
 .../features/all/rt/skbufhead-raw-lock.patch       |  24 +-
 .../all/rt/slub-disable-SLUB_CPU_PARTIAL.patch     |   4 +-
 .../all/rt/slub-enable-irqs-for-no-wait.patch      |  10 +-
 ...-snd_pcm_stream_lock-irqs_disabled-splats.patch |  10 +-
 .../rt/softirq-disable-softirq-stacks-for-rt.patch |   8 +-
 .../features/all/rt/softirq-preempt-fix-3-re.patch |  28 +-
 .../features/all/rt/softirq-split-locks.patch      |  46 +-
 ...irq-split-timer-softirqs-out-of-ksoftirqd.patch |   2 +-
 .../softirq-wake-the-timer-softirq-if-needed.patch |  22 +-
 .../sparc64-use-generic-rwsem-spinlocks-rt.patch   |   4 +-
 .../all/rt/spinlock-types-separate-raw.patch       |   2 +-
 .../features/all/rt/stop-machine-raw-lock.patch    |   2 +-
 ...ne-convert-stop_machine_run-to-PREEMPT_RT.patch |   2 +-
 ...ake-svc_xprt_do_enqueue-use-get_cpu_light.patch |   2 +-
 .../rt/suspend-prevernt-might-sleep-splats.patch   |  20 +-
 .../features/all/rt/sysfs-realtime-entry.patch     |   2 +-
 ...klets-from-going-into-infinite-spin-in-rt.patch |   8 +-
 .../thermal-Defer-thermal-wakups-to-threads.patch  |  99 ++-
 .../rt/tick-broadcast--Make-hrtimer-irqsafe.patch  |   2 +-
 .../all/rt/timekeeping-split-jiffies-lock.patch    |  20 +-
 ...delay-waking-softirqs-from-the-jiffy-tick.patch |   8 +-
 .../features/all/rt/timer-fd-avoid-live-lock.patch |   2 +-
 ...rtimer-check-properly-for-a-running-timer.patch |   4 +-
 .../all/rt/timer-make-the-base-lock-raw.patch      |  34 +-
 ...mers-Don-t-wake-ktimersoftd-on-every-tick.patch | 229 -------
 .../rt/timers-prepare-for-full-preemption.patch    |  36 +-
 ...cy-hist-Consider-new-argument-when-probin.patch |   2 +-
 ...e_version_for_preemptoff_hist_trace_point.patch |   8 +-
 ...count-for-preempt-off-in-preempt_schedule.patch |   4 +-
 ...l-8250-don-t-take-the-trylock-during-oops.patch |   4 +-
 ...t-remove-preemption-disabling-in-netif_rx.patch |   7 +-
 .../all/rt/usb-use-_nort-in-giveback.patch         |   2 +-
 .../features/all/rt/user-use-local-irq-nort.patch  |   4 +-
 .../features/all/rt/wait.h-include-atomic.h.patch  |   6 +-
 ...ue-work-around-irqsafe-timer-optimization.patch |   4 +-
 ...rk-simple-Simple-work-queue-implemenation.patch |   6 +-
 .../all/rt/workqueue-distangle-from-rq-lock.patch  |  51 +-
 .../all/rt/workqueue-prevent-deadlock-stall.patch  |  16 +-
 .../features/all/rt/workqueue-use-locallock.patch  |  44 +-
 .../features/all/rt/workqueue-use-rcu.patch        |  52 +-
 .../all/rt/x86-UV-raw_spinlock-conversion.patch    |  33 +-
 ...t-rid-of-warning-acpi_ioapic_lock-defined.patch |  44 --
 ...86-crypto-reduce-preempt-disabled-regions.patch |  51 +-
 .../x86-highmem-add-a-already-used-pte-check.patch |   2 +-
 .../all/rt/x86-io-apic-migra-no-unmask.patch       |   4 +-
 .../all/rt/x86-kvm-require-const-tsc-for-rt.patch  |   4 +-
 .../features/all/rt/x86-mce-timer-hrtimer.patch    | 132 ++--
 .../x86-mce-use-swait-queue-for-mce-wakeups.patch  |  10 +-
 .../rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch   |  39 --
 .../patches/features/all/rt/x86-preempt-lazy.patch |  40 +-
 ...x86-signal-delay-calling-signals-on-32bit.patch |   2 +-
 .../all/rt/x86-stackprot-no-random-on-rt.patch     |   2 +-
 .../all/rt/x86-use-gen-rwsem-spinlocks-rt.patch    |   4 +-
 debian/patches/series-rt                           | 111 ++-
 371 files changed, 6871 insertions(+), 4248 deletions(-)

diff --git a/debian/changelog b/debian/changelog
index 7d525ac..75e73b0 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -20,6 +20,7 @@ linux (4.11.6-1) UNRELEASED; urgency=medium
   * Set ABI to 1
   * debian/rules.real: Include rules.defs before using architecture variables
     (Closes: #862842)
+  * [rt] Update to 4.11.5-rt1 and reenable
 
  -- Ben Hutchings <ben at decadent.org.uk>  Tue, 06 Jun 2017 20:43:11 +0100
 
diff --git a/debian/config/defines b/debian/config/defines
index 32218b5..7bae064 100644
--- a/debian/config/defines
+++ b/debian/config/defines
@@ -93,7 +93,7 @@ debug-info: true
 signed-modules: false
 
 [featureset-rt_base]
-enabled: false
+enabled: true
 
 [description]
 part-long-up: This kernel is not suitable for SMP (multi-processor,
diff --git a/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch b/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch
index 98a92ef..171dd5e 100644
--- a/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch
+++ b/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch
@@ -1,9 +1,8 @@
+From 97181f9bd57405b879403763284537e27d46963d Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 10 Apr 2017 18:03:36 +0200
-Subject: [PATCH] futex: Avoid freeing an active timer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit 97181f9bd57405b879403763284537e27d46963d
+Subject: [PATCH 1/4] futex: Avoid freeing an active timer
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Alexander reported a hrtimer debug_object splat:
 
@@ -39,7 +38,7 @@ Signed-off-by: Ingo Molnar <mingo at kernel.org>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -2734,8 +2734,10 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2736,8 +2736,10 @@ static int futex_lock_pi(u32 __user *uad
  out_put_key:
  	put_futex_key(&q.key);
  out:
diff --git a/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch b/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
index 487de6e..4c58bbd 100644
--- a/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
+++ b/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:48 +0100
 Subject: [PATCH] futex: Cleanup variable names for futex_top_waiter()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit 499f5aca2cdd5e958b27e2655e7e7f82524f46b1
 
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1120,14 +1120,14 @@ static int attach_to_pi_owner(u32 uval,
+@@ -1122,14 +1122,14 @@ static int attach_to_pi_owner(u32 uval,
  static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
  			   union futex_key *key, struct futex_pi_state **ps)
  {
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * We are the first waiter - try to look up the owner based on
-@@ -1174,7 +1174,7 @@ static int futex_lock_pi_atomic(u32 __us
+@@ -1176,7 +1176,7 @@ static int futex_lock_pi_atomic(u32 __us
  				struct task_struct *task, int set_waiters)
  {
  	u32 uval, newval, vpid = task_pid_vnr(task);
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	int ret;
  
  	/*
-@@ -1200,9 +1200,9 @@ static int futex_lock_pi_atomic(u32 __us
+@@ -1202,9 +1202,9 @@ static int futex_lock_pi_atomic(u32 __us
  	 * Lookup existing state first. If it exists, try to attach to
  	 * its pi_state.
  	 */
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * No waiter and user TID is 0. We are here because the
-@@ -1292,11 +1292,11 @@ static void mark_wake_futex(struct wake_
+@@ -1294,11 +1294,11 @@ static void mark_wake_futex(struct wake_
  	q->lock_ptr = NULL;
  }
  
@@ -78,9 +78,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 -	struct futex_pi_state *pi_state = this->pi_state;
 +	struct futex_pi_state *pi_state = top_waiter->pi_state;
  	u32 uninitialized_var(curval), newval;
- 	WAKE_Q(wake_q);
+ 	DEFINE_WAKE_Q(wake_q);
  	bool deboost;
-@@ -1317,11 +1317,11 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1319,11 +1319,11 @@ static int wake_futex_pi(u32 __user *uad
  
  	/*
  	 * It is possible that the next waiter (the one that brought
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * We pass it to the next owner. The WAITERS bit is always
-@@ -2631,7 +2631,7 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2633,7 +2633,7 @@ static int futex_unlock_pi(u32 __user *u
  	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
  	union futex_key key = FUTEX_KEY_INIT;
  	struct futex_hash_bucket *hb;
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	int ret;
  
  retry:
-@@ -2655,9 +2655,9 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2657,9 +2657,9 @@ static int futex_unlock_pi(u32 __user *u
  	 * all and we at least want to know if user space fiddled
  	 * with the futex value instead of blindly unlocking.
  	 */
diff --git a/debian/patches/features/all/rt/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch b/debian/patches/features/all/rt/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch
new file mode 100644
index 0000000..d192b76
--- /dev/null
+++ b/debian/patches/features/all/rt/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch
@@ -0,0 +1,54 @@
+From 048c9b954e20396e0c45ee778466994d1be2e612 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:27 +0200
+Subject: [PATCH 01/13] ia64/topology: Remove cpus_allowed manipulation
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+The CPU hotplug callback fiddles with the cpus_allowed pointer to pin the
+calling thread on the plugged CPU. That's already guaranteed by the hotplug
+core code.
+
+Remove it.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: linux-ia64 at vger.kernel.org
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.174518069@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/ia64/kernel/topology.c |    6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/arch/ia64/kernel/topology.c
++++ b/arch/ia64/kernel/topology.c
+@@ -355,18 +355,12 @@ static int cache_add_dev(unsigned int cp
+ 	unsigned long i, j;
+ 	struct cache_info *this_object;
+ 	int retval = 0;
+-	cpumask_t oldmask;
+ 
+ 	if (all_cpu_cache_info[cpu].kobj.parent)
+ 		return 0;
+ 
+-	oldmask = current->cpus_allowed;
+-	retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-	if (unlikely(retval))
+-		return retval;
+ 
+ 	retval = cpu_cache_sysfs_init(cpu);
+-	set_cpus_allowed_ptr(current, &oldmask);
+ 	if (unlikely(retval < 0))
+ 		return retval;
+ 
diff --git a/debian/patches/features/all/rt/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch b/debian/patches/features/all/rt/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch
new file mode 100644
index 0000000..aecafe3
--- /dev/null
+++ b/debian/patches/features/all/rt/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch
@@ -0,0 +1,75 @@
+From 8fb12156b8db61af3d49f3e5e104568494581d1f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:32 +0200
+Subject: [PATCH 01/17] init: Pin init task to the boot CPU, initially
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+Some of the boot code in init_kernel_freeable() which runs before SMP
+bringup assumes (rightfully) that it runs on the boot CPU and therefore can
+use smp_processor_id() in preemptible context.
+
+That works so far because the smp_processor_id() check starts to be
+effective after smp bringup. That's just wrong. Starting with SMP bringup
+and the ability to move threads around, smp_processor_id() in preemptible
+context is broken.
+
+Aside of that it does not make sense to allow init to run on all CPUs
+before sched_smp_init() has been run.
+
+Pin the init to the boot CPU so the existing code can continue to use
+smp_processor_id() without triggering the checks when the enabling of those
+checks starts earlier.
+
+Tested-by: Mark Rutland <mark.rutland at arm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184734.943149935@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ init/main.c |   17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -389,6 +389,7 @@ static __initdata DECLARE_COMPLETION(kth
+ 
+ static noinline void __ref rest_init(void)
+ {
++	struct task_struct *tsk;
+ 	int pid;
+ 
+ 	rcu_scheduler_starting();
+@@ -397,7 +398,17 @@ static noinline void __ref rest_init(voi
+ 	 * the init task will end up wanting to create kthreads, which, if
+ 	 * we schedule it before we create kthreadd, will OOPS.
+ 	 */
+-	kernel_thread(kernel_init, NULL, CLONE_FS);
++	pid = kernel_thread(kernel_init, NULL, CLONE_FS);
++	/*
++	 * Pin init on the boot CPU. Task migration is not properly working
++	 * until sched_init_smp() has been run. It will set the allowed
++	 * CPUs for init to the non isolated CPUs.
++	 */
++	rcu_read_lock();
++	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
++	set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
++	rcu_read_unlock();
++
+ 	numa_default_policy();
+ 	pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
+ 	rcu_read_lock();
+@@ -1011,10 +1022,6 @@ static noinline void __init kernel_init_
+ 	 * init can allocate pages on any node
+ 	 */
+ 	set_mems_allowed(node_states[N_MEMORY]);
+-	/*
+-	 * init can run on any cpu.
+-	 */
+-	set_cpus_allowed_ptr(current, cpu_all_mask);
+ 
+ 	cad_pid = task_pid(current);
+ 
diff --git a/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch b/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
index 2c2eb87..c88ea6d 100644
--- a/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
+++ b/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
@@ -1,9 +1,8 @@
+From 2a1c6029940675abb2217b590512dbf691867ec4 Mon Sep 17 00:00:00 2001
 From: Xunlei Pang <xlpang at redhat.com>
 Date: Thu, 23 Mar 2017 15:56:07 +0100
-Subject: [PATCH] rtmutex: Deboost before waking up the top waiter
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit 2a1c6029940675abb2217b590512dbf691867ec4
+Subject: [PATCH 1/9] rtmutex: Deboost before waking up the top waiter
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 We should deboost before waking the high-priority task, such that we
 don't run two tasks with the same "state" (priority, deadline,
@@ -45,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1458,10 +1458,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1460,10 +1460,7 @@ static int wake_futex_pi(u32 __user *uad
  out_unlock:
  	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
  
@@ -59,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -371,24 +371,6 @@ static void __rt_mutex_adjust_prio(struc
+@@ -373,24 +373,6 @@ static void __rt_mutex_adjust_prio(struc
  }
  
  /*
@@ -84,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * Deadlock detection is conditional:
   *
   * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
-@@ -1049,6 +1031,7 @@ static void mark_wakeup_next_waiter(stru
+@@ -1051,6 +1033,7 @@ static void mark_wakeup_next_waiter(stru
  	 * lock->wait_lock.
  	 */
  	rt_mutex_dequeue_pi(current, waiter);
@@ -92,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * As we are waking up the top waiter, and the waiter stays
-@@ -1391,6 +1374,16 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1393,6 +1376,16 @@ static bool __sched rt_mutex_slowunlock(
  	 */
  	mark_wakeup_next_waiter(wake_q, lock);
  
@@ -109,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
  
  	/* check PI boosting */
-@@ -1440,6 +1433,18 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1442,6 +1435,18 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
  	return slowfn(lock);
  }
  
@@ -128,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static inline void
  rt_mutex_fastunlock(struct rt_mutex *lock,
  		    bool (*slowfn)(struct rt_mutex *lock,
-@@ -1453,11 +1458,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1455,11 +1460,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
  
  	deboost = slowfn(lock, &wake_q);
  
@@ -141,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -1570,6 +1571,13 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1572,6 +1573,13 @@ bool __sched __rt_mutex_futex_unlock(str
  	}
  
  	mark_wakeup_next_waiter(wake_q, lock);
@@ -155,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return true; /* deboost and wakeups */
  }
  
-@@ -1582,10 +1590,7 @@ void __sched rt_mutex_futex_unlock(struc
+@@ -1584,10 +1592,7 @@ void __sched rt_mutex_futex_unlock(struc
  	deboost = __rt_mutex_futex_unlock(lock, &wake_q);
  	raw_spin_unlock_irq(&lock->wait_lock);
  
diff --git a/debian/patches/features/all/rt/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch b/debian/patches/features/all/rt/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch
new file mode 100644
index 0000000..3a4f7b4
--- /dev/null
+++ b/debian/patches/features/all/rt/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch
@@ -0,0 +1,63 @@
+From 45aea321678856687927c53972321ebfab77759a Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Wed, 24 May 2017 08:52:02 +0200
+Subject: [PATCH] sched/clock: Fix early boot preempt assumption in
+ __set_sched_clock_stable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+The more strict early boot preemption warnings found that
+__set_sched_clock_stable() was incorrectly assuming we'd still be
+running on a single CPU:
+
+  BUG: using smp_processor_id() in preemptible [00000000] code: swapper/0/1
+  caller is debug_smp_processor_id+0x1c/0x1e
+  CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.12.0-rc2-00108-g1c3c5ea #1
+  Call Trace:
+   dump_stack+0x110/0x192
+   check_preemption_disabled+0x10c/0x128
+   ? set_debug_rodata+0x25/0x25
+   debug_smp_processor_id+0x1c/0x1e
+   sched_clock_init_late+0x27/0x87
+  [...]
+
+Fix it by disabling IRQs.
+
+Reported-by: kernel test robot <xiaolong.ye at intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Mark Rutland <mark.rutland at arm.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Cc: lkp at 01.org
+Cc: tipbuild at zytor.com
+Link: http://lkml.kernel.org/r/20170524065202.v25vyu7pvba5mhpd@hirez.programming.kicks-ass.net
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ kernel/sched/clock.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/clock.c
++++ b/kernel/sched/clock.c
+@@ -126,12 +126,19 @@ int sched_clock_stable(void)
+ 
+ static void __set_sched_clock_stable(void)
+ {
+-	struct sched_clock_data *scd = this_scd();
++	struct sched_clock_data *scd;
+ 
+ 	/*
++	 * Since we're still unstable and the tick is already running, we have
++	 * to disable IRQs in order to get a consistent scd->tick* reading.
++	 */
++	local_irq_disable();
++	scd = this_scd();
++	/*
+ 	 * Attempt to make the (initial) unstable->stable transition continuous.
+ 	 */
+ 	__sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
++	local_irq_enable();
+ 
+ 	printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
+ 			scd->tick_gtod, __gtod_offset,
diff --git a/debian/patches/features/all/rt/0002-arm-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0002-arm-Adjust-system_state-check.patch
new file mode 100644
index 0000000..32bf4c4
--- /dev/null
+++ b/debian/patches/features/all/rt/0002-arm-Adjust-system_state-check.patch
@@ -0,0 +1,38 @@
+From 5976a66913a8bf42465d96776fd37fb5631edc19 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:33 +0200
+Subject: [PATCH 02/17] arm: Adjust system_state check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in ipi_cpu_stop() to handle the extra states.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Mark Rutland <mark.rutland at arm.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Russell King <linux at armlinux.org.uk>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Cc: linux-arm-kernel at lists.infradead.org
+Link: http://lkml.kernel.org/r/20170516184735.020718977@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ arch/arm/kernel/smp.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -555,8 +555,7 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
+  */
+ static void ipi_cpu_stop(unsigned int cpu)
+ {
+-	if (system_state == SYSTEM_BOOTING ||
+-	    system_state == SYSTEM_RUNNING) {
++	if (system_state <= SYSTEM_RUNNING) {
+ 		raw_spin_lock(&stop_lock);
+ 		pr_crit("CPU%u: stopping\n", cpu);
+ 		dump_stack();
diff --git a/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch b/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
index 6bc8aba..c2efd94 100644
--- a/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
+++ b/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
@@ -1,9 +1,8 @@
+From 94ffac5d847cfd790bb37b7cef1cad803743985e Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 7 Apr 2017 09:04:07 +0200
-Subject: [PATCH] futex: Fix small (and harmless looking) inconsistencies
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit 94ffac5d847cfd790bb37b7cef1cad803743985e
+Subject: [PATCH 2/4] futex: Fix small (and harmless looking) inconsistencies
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 During (post-commit) review Darren spotted a few minor things. One
 (harmless AFAICT) type inconsistency and a comment that wasn't as
@@ -23,7 +22,7 @@ Signed-off-by: Ingo Molnar <mingo at kernel.org>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1023,7 +1023,8 @@ static int attach_to_pi_state(u32 __user
+@@ -1025,7 +1025,8 @@ static int attach_to_pi_state(u32 __user
  			      struct futex_pi_state **ps)
  {
  	pid_t pid = uval & FUTEX_TID_MASK;
@@ -33,7 +32,7 @@ Signed-off-by: Ingo Molnar <mingo at kernel.org>
  
  	/*
  	 * Userspace might have messed up non-PI and PI futexes [3]
-@@ -1439,6 +1440,11 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1441,6 +1442,11 @@ static int wake_futex_pi(u32 __user *uad
  	if (ret)
  		goto out_unlock;
  
@@ -45,7 +44,7 @@ Signed-off-by: Ingo Molnar <mingo at kernel.org>
  	raw_spin_lock(&pi_state->owner->pi_lock);
  	WARN_ON(list_empty(&pi_state->list));
  	list_del_init(&pi_state->list);
-@@ -1450,9 +1456,6 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1452,9 +1458,6 @@ static int wake_futex_pi(u32 __user *uad
  	pi_state->owner = new_owner;
  	raw_spin_unlock(&new_owner->pi_lock);
  
diff --git a/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch b/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
index b11b94c..9720454 100644
--- a/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
+++ b/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:49 +0100
 Subject: [PATCH] futex: Use smp_store_release() in mark_wake_futex()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit  1b367ece0d7e696cab1c8501bab282cc6a538b3f
 
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1288,8 +1288,7 @@ static void mark_wake_futex(struct wake_
+@@ -1290,8 +1290,7 @@ static void mark_wake_futex(struct wake_
  	 * memory barrier is required here to prevent the following
  	 * store to lock_ptr from getting ahead of the plist_del.
  	 */
diff --git a/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch b/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
index b751d5b..d234319 100644
--- a/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
+++ b/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
@@ -1,9 +1,8 @@
+From e96a7705e7d3fef96aec9b590c63b2f6f7d2ba22 Mon Sep 17 00:00:00 2001
 From: Xunlei Pang <xlpang at redhat.com>
 Date: Thu, 23 Mar 2017 15:56:08 +0100
-Subject: [PATCH] sched/rtmutex/deadline: Fix a PI crash for deadline tasks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit e96a7705e7d3fef96aec9b590c63b2f6f7d2ba22
+Subject: [PATCH 2/9] sched/rtmutex/deadline: Fix a PI crash for deadline tasks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 A crash happened while I was playing with deadline PI rtmutex.
 
@@ -61,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/init_task.h
 +++ b/include/linux/init_task.h
-@@ -164,6 +164,7 @@ extern struct task_group root_task_group
+@@ -181,6 +181,7 @@ extern struct cred init_cred;
  #ifdef CONFIG_RT_MUTEXES
  # define INIT_RT_MUTEXES(tsk)						\
  	.pi_waiters = RB_ROOT,						\
@@ -71,18 +70,18 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  # define INIT_RT_MUTEXES(tsk)
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1723,6 +1723,8 @@ struct task_struct {
- 	/* PI waiters blocked on a rt_mutex held by this task */
- 	struct rb_root pi_waiters;
- 	struct rb_node *pi_waiters_leftmost;
+@@ -779,6 +779,8 @@ struct task_struct {
+ 	/* PI waiters blocked on a rt_mutex held by this task: */
+ 	struct rb_root			pi_waiters;
+ 	struct rb_node			*pi_waiters_leftmost;
 +	/* Updated under owner's pi_lock and rq lock */
-+	struct task_struct	*pi_top_task;
- 	/* Deadlock detection and priority inheritance handling */
- 	struct rt_mutex_waiter *pi_blocked_on;
++	struct task_struct		*pi_top_task;
+ 	/* Deadlock detection and priority inheritance handling: */
+ 	struct rt_mutex_waiter		*pi_blocked_on;
  #endif
 --- a/include/linux/sched/rt.h
 +++ b/include/linux/sched/rt.h
-@@ -19,6 +19,7 @@ static inline int rt_task(struct task_st
+@@ -21,6 +21,7 @@ static inline int rt_task(struct task_st
  extern int rt_mutex_getprio(struct task_struct *p);
  extern void rt_mutex_setprio(struct task_struct *p, int prio);
  extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
@@ -92,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -1417,6 +1417,7 @@ static void rt_mutex_init_task(struct ta
+@@ -1438,6 +1438,7 @@ static void rt_mutex_init_task(struct ta
  #ifdef CONFIG_RT_MUTEXES
  	p->pi_waiters = RB_ROOT;
  	p->pi_waiters_leftmost = NULL;
@@ -102,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -321,6 +321,19 @@ rt_mutex_dequeue_pi(struct task_struct *
+@@ -323,6 +323,19 @@ rt_mutex_dequeue_pi(struct task_struct *
  }
  
  /*
@@ -122,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * Calculate task priority from the waiter tree priority
   *
   * Return task->normal_prio when the waiter tree is empty or when
-@@ -335,12 +348,12 @@ int rt_mutex_getprio(struct task_struct
+@@ -337,12 +350,12 @@ int rt_mutex_getprio(struct task_struct
  		   task->normal_prio);
  }
  
@@ -139,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -349,12 +362,12 @@ struct task_struct *rt_mutex_get_top_tas
+@@ -351,12 +364,12 @@ struct task_struct *rt_mutex_get_top_tas
   */
  int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
  {
@@ -158,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3669,6 +3669,8 @@ void rt_mutex_setprio(struct task_struct
+@@ -3712,6 +3712,8 @@ void rt_mutex_setprio(struct task_struct
  		goto out_unlock;
  	}
  
diff --git a/debian/patches/features/all/rt/0002-workqueue-Provide-work_on_cpu_safe.patch b/debian/patches/features/all/rt/0002-workqueue-Provide-work_on_cpu_safe.patch
new file mode 100644
index 0000000..a9e089d
--- /dev/null
+++ b/debian/patches/features/all/rt/0002-workqueue-Provide-work_on_cpu_safe.patch
@@ -0,0 +1,85 @@
+From 0e8d6a9336b487a1dd6f1991ff376e669d4c87c6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:28 +0200
+Subject: [PATCH 02/13] workqueue: Provide work_on_cpu_safe()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+work_on_cpu() is not protected against CPU hotplug. For code which requires
+to be either executed on an online CPU or to fail if the CPU is not
+available the callsite would have to protect against CPU hotplug.
+
+Provide a function which does get/put_online_cpus() around the call to
+work_on_cpu() and fails the call with -ENODEV if the target CPU is not
+online.
+
+Preparatory patch to convert several racy task affinity manipulations.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Acked-by: Tejun Heo <tj at kernel.org>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.262610721@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/workqueue.h |    5 +++++
+ kernel/workqueue.c        |   23 +++++++++++++++++++++++
+ 2 files changed, 28 insertions(+)
+
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -608,8 +608,13 @@ static inline long work_on_cpu(int cpu,
+ {
+ 	return fn(arg);
+ }
++static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
++{
++	return fn(arg);
++}
+ #else
+ long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
++long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
+ #endif /* CONFIG_SMP */
+ 
+ #ifdef CONFIG_FREEZER
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -4735,6 +4735,29 @@ long work_on_cpu(int cpu, long (*fn)(voi
+ 	return wfc.ret;
+ }
+ EXPORT_SYMBOL_GPL(work_on_cpu);
++
++/**
++ * work_on_cpu_safe - run a function in thread context on a particular cpu
++ * @cpu: the cpu to run on
++ * @fn:  the function to run
++ * @arg: the function argument
++ *
++ * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
++ * any locks which would prevent @fn from completing.
++ *
++ * Return: The value @fn returns.
++ */
++long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
++{
++	long ret = -ENODEV;
++
++	get_online_cpus();
++	if (cpu_online(cpu))
++		ret = work_on_cpu(cpu, fn, arg);
++	put_online_cpus();
++	return ret;
++}
++EXPORT_SYMBOL_GPL(work_on_cpu_safe);
+ #endif /* CONFIG_SMP */
+ 
+ #ifdef CONFIG_FREEZER
diff --git a/debian/patches/features/all/rt/0003-arm64-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0003-arm64-Adjust-system_state-check.patch
new file mode 100644
index 0000000..1567152
--- /dev/null
+++ b/debian/patches/features/all/rt/0003-arm64-Adjust-system_state-check.patch
@@ -0,0 +1,39 @@
+From ef284f5ca5f102bf855e599305c0c16d6e844635 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:34 +0200
+Subject: [PATCH 03/17] arm64: Adjust system_state check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in smp_send_stop() to handle the extra states.
+
+Tested-by: Mark Rutland <mark.rutland at arm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Mark Rutland <mark.rutland at arm.com>
+Acked-by: Catalin Marinas <catalin.marinas at arm.com>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Cc: Will Deacon <will.deacon at arm.com>
+Link: http://lkml.kernel.org/r/20170516184735.112589728@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ arch/arm64/kernel/smp.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -915,8 +915,7 @@ void smp_send_stop(void)
+ 		cpumask_copy(&mask, cpu_online_mask);
+ 		cpumask_clear_cpu(smp_processor_id(), &mask);
+ 
+-		if (system_state == SYSTEM_BOOTING ||
+-		    system_state == SYSTEM_RUNNING)
++		if (system_state <= SYSTEM_RUNNING)
+ 			pr_crit("SMP: stopping secondary CPUs\n");
+ 		smp_cross_call(&mask, IPI_CPU_STOP);
+ 	}
diff --git a/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch b/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
index d6b8d4b..276b7d3 100644
--- a/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
+++ b/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
@@ -1,9 +1,8 @@
+From 38fcd06e9b7f6855db1f3ebac5e18b8fdb467ffd Mon Sep 17 00:00:00 2001
 From: "Darren Hart (VMware)" <dvhart at infradead.org>
 Date: Fri, 14 Apr 2017 15:31:38 -0700
-Subject: [PATCH] futex: Clarify mark_wake_futex memory barrier usage
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit 38fcd06e9b7f6855db1f3ebac5e18b8fdb467ffd
+Subject: [PATCH 3/4] futex: Clarify mark_wake_futex memory barrier usage
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Clarify the scenario described in mark_wake_futex requiring the
 smp_store_release(). Update the comment to explicitly refer to the
@@ -20,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1378,10 +1378,11 @@ static void mark_wake_futex(struct wake_
+@@ -1380,10 +1380,11 @@ static void mark_wake_futex(struct wake_
  	wake_q_add(wake_q, p);
  	__unqueue_futex(q);
  	/*
diff --git a/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch b/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch
index b957a33..1cdf2e0 100644
--- a/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch
+++ b/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:50 +0100
 Subject: [PATCH] futex: Remove rt_mutex_deadlock_account_*()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit fffa954fb528963c2fb7b0c0084eb77e2be7ab52
 
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/locking/rtmutex-debug.c
 +++ b/kernel/locking/rtmutex-debug.c
-@@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex
+@@ -174,12 +174,3 @@ void debug_rt_mutex_init(struct rt_mutex
  	lock->name = name;
  }
  
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -936,8 +936,6 @@ static int try_to_take_rt_mutex(struct r
+@@ -938,8 +938,6 @@ static int try_to_take_rt_mutex(struct r
  	 */
  	rt_mutex_set_owner(lock, task);
  
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return 1;
  }
  
-@@ -1340,8 +1338,6 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1342,8 +1340,6 @@ static bool __sched rt_mutex_slowunlock(
  
  	debug_rt_mutex_unlock(lock);
  
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	/*
  	 * We must be careful here if the fast path is enabled. If we
  	 * have no waiters queued we cannot set owner to NULL here
-@@ -1407,11 +1403,10 @@ rt_mutex_fastlock(struct rt_mutex *lock,
+@@ -1409,11 +1405,10 @@ rt_mutex_fastlock(struct rt_mutex *lock,
  				struct hrtimer_sleeper *timeout,
  				enum rtmutex_chainwalk chwalk))
  {
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static inline int
-@@ -1423,21 +1418,19 @@ rt_mutex_timed_fastlock(struct rt_mutex
+@@ -1425,21 +1420,19 @@ rt_mutex_timed_fastlock(struct rt_mutex
  				      enum rtmutex_chainwalk chwalk))
  {
  	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
@@ -115,10 +115,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return slowfn(lock);
  }
  
-@@ -1447,19 +1440,18 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1449,19 +1442,18 @@ rt_mutex_fastunlock(struct rt_mutex *loc
  				   struct wake_q_head *wqh))
  {
- 	WAKE_Q(wake_q);
+ 	DEFINE_WAKE_Q(wake_q);
 +	bool deboost;
  
 -	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /**
-@@ -1570,10 +1562,9 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+@@ -1572,10 +1564,9 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
  				   struct wake_q_head *wqh)
  {
@@ -156,7 +156,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return rt_mutex_slowunlock(lock, wqh);
  }
  
-@@ -1631,7 +1622,6 @@ void rt_mutex_init_proxy_locked(struct r
+@@ -1637,7 +1628,6 @@ void rt_mutex_init_proxy_locked(struct r
  	__rt_mutex_init(lock, NULL);
  	debug_rt_mutex_proxy_lock(lock, proxy_owner);
  	rt_mutex_set_owner(lock, proxy_owner);
@@ -164,7 +164,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /**
-@@ -1647,7 +1637,6 @@ void rt_mutex_proxy_unlock(struct rt_mut
+@@ -1657,7 +1647,6 @@ void rt_mutex_proxy_unlock(struct rt_mut
  {
  	debug_rt_mutex_proxy_unlock(lock);
  	rt_mutex_set_owner(lock, NULL);
diff --git a/debian/patches/features/all/rt/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 0000000..812746a
--- /dev/null
+++ b/debian/patches/features/all/rt/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,130 @@
+From 67cb85fdcee7fbc61c09c00360d1a4ae37641db4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:29 +0200
+Subject: [PATCH 03/13] ia64/salinfo: Replace racy task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+Some of the file operations in /proc/sal require to run code on the
+requested cpu. This is achieved by temporarily setting the affinity of the
+calling user space thread to the requested CPU and reset it to the original
+affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by using work_on_cpu_safe() which guarantees to run the code on
+the requested CPU or to fail in case the CPU is offline.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: linux-ia64 at vger.kernel.org
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.341863457@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/ia64/kernel/salinfo.c |   31 ++++++++++++-------------------
+ 1 file changed, 12 insertions(+), 19 deletions(-)
+
+--- a/arch/ia64/kernel/salinfo.c
++++ b/arch/ia64/kernel/salinfo.c
+@@ -179,14 +179,14 @@ struct salinfo_platform_oemdata_parms {
+ 	const u8 *efi_guid;
+ 	u8 **oemdata;
+ 	u64 *oemdata_size;
+-	int ret;
+ };
+ 
+-static void
++static long
+ salinfo_platform_oemdata_cpu(void *context)
+ {
+ 	struct salinfo_platform_oemdata_parms *parms = context;
+-	parms->ret = salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);
++
++	return salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);
+ }
+ 
+ static void
+@@ -380,16 +380,7 @@ salinfo_log_release(struct inode *inode,
+ 	return 0;
+ }
+ 
+-static void
+-call_on_cpu(int cpu, void (*fn)(void *), void *arg)
+-{
+-	cpumask_t save_cpus_allowed = current->cpus_allowed;
+-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-	(*fn)(arg);
+-	set_cpus_allowed_ptr(current, &save_cpus_allowed);
+-}
+-
+-static void
++static long
+ salinfo_log_read_cpu(void *context)
+ {
+ 	struct salinfo_data *data = context;
+@@ -399,6 +390,7 @@ salinfo_log_read_cpu(void *context)
+ 	/* Clear corrected errors as they are read from SAL */
+ 	if (rh->severity == sal_log_severity_corrected)
+ 		ia64_sal_clear_state_info(data->type);
++	return 0;
+ }
+ 
+ static void
+@@ -430,7 +422,7 @@ salinfo_log_new_read(int cpu, struct sal
+ 	spin_unlock_irqrestore(&data_saved_lock, flags);
+ 
+ 	if (!data->saved_num)
+-		call_on_cpu(cpu, salinfo_log_read_cpu, data);
++		work_on_cpu_safe(cpu, salinfo_log_read_cpu, data);
+ 	if (!data->log_size) {
+ 		data->state = STATE_NO_DATA;
+ 		cpumask_clear_cpu(cpu, &data->cpu_event);
+@@ -459,11 +451,13 @@ salinfo_log_read(struct file *file, char
+ 	return simple_read_from_buffer(buffer, count, ppos, buf, bufsize);
+ }
+ 
+-static void
++static long
+ salinfo_log_clear_cpu(void *context)
+ {
+ 	struct salinfo_data *data = context;
++
+ 	ia64_sal_clear_state_info(data->type);
++	return 0;
+ }
+ 
+ static int
+@@ -486,7 +480,7 @@ salinfo_log_clear(struct salinfo_data *d
+ 	rh = (sal_log_record_header_t *)(data->log_buffer);
+ 	/* Corrected errors have already been cleared from SAL */
+ 	if (rh->severity != sal_log_severity_corrected)
+-		call_on_cpu(cpu, salinfo_log_clear_cpu, data);
++		work_on_cpu_safe(cpu, salinfo_log_clear_cpu, data);
+ 	/* clearing a record may make a new record visible */
+ 	salinfo_log_new_read(cpu, data);
+ 	if (data->state == STATE_LOG_RECORD) {
+@@ -531,9 +525,8 @@ salinfo_log_write(struct file *file, con
+ 				.oemdata = &data->oemdata,
+ 				.oemdata_size = &data->oemdata_size
+ 			};
+-			call_on_cpu(cpu, salinfo_platform_oemdata_cpu, &parms);
+-			if (parms.ret)
+-				count = parms.ret;
++			count = work_on_cpu_safe(cpu, salinfo_platform_oemdata_cpu,
++						 &parms);
+ 		} else
+ 			data->oemdata_size = 0;
+ 	} else
diff --git a/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch b/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
index 641205f..cba5c56 100644
--- a/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
+++ b/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
@@ -1,10 +1,9 @@
+From 85e2d4f992868ad78dc8bb2c077b652fcfb3661a Mon Sep 17 00:00:00 2001
 From: Xunlei Pang <xlpang at redhat.com>
 Date: Thu, 23 Mar 2017 15:56:09 +0100
-Subject: [PATCH] sched/deadline/rtmutex: Dont miss the
+Subject: [PATCH 3/9] sched/deadline/rtmutex: Dont miss the
  dl_runtime/dl_period update
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit 85e2d4f992868ad78dc8bb2c077b652fcfb3661a
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Currently dl tasks will actually return at the very beginning
 of rt_mutex_adjust_prio_chain() in !detect_deadlock cases:
@@ -43,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -603,7 +603,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -605,7 +605,7 @@ static int rt_mutex_adjust_prio_chain(st
  	 * enabled we continue, but stop the requeueing in the chain
  	 * walk.
  	 */
diff --git a/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch b/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
index 28e8f86..e8568e5 100644
--- a/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
+++ b/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
@@ -1,9 +1,8 @@
+From 59cd42c29618c45cd3c56da43402b14f611888dd Mon Sep 17 00:00:00 2001
 From: "Darren Hart (VMware)" <dvhart at infradead.org>
 Date: Fri, 14 Apr 2017 15:46:08 -0700
-Subject: [PATCH] MAINTAINERS: Add FUTEX SUBSYSTEM
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit 59cd42c29618c45cd3c56da43402b14f611888dd
+Subject: [PATCH 4/4] MAINTAINERS: Add FUTEX SUBSYSTEM
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Add a MAINTAINERS block for the FUTEX SUBSYSTEM which includes the core
 kernel code, include headers, testing code, and Documentation. Excludes
@@ -24,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/MAINTAINERS
 +++ b/MAINTAINERS
-@@ -5196,6 +5196,23 @@ F:	fs/fuse/
+@@ -5420,6 +5420,23 @@ F:	fs/fuse/
  F:	include/uapi/linux/fuse.h
  F:	Documentation/filesystems/fuse.txt
  
diff --git a/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch b/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
index d827d78..ef04066 100644
--- a/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
+++ b/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:51 +0100
 Subject: [PATCH] futex,rt_mutex: Provide futex specific rt_mutex API
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit 5293c2efda37775346885c7e924d4ef7018ea60b
 
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -914,7 +914,7 @@ void exit_pi_state_list(struct task_stru
+@@ -916,7 +916,7 @@ void exit_pi_state_list(struct task_stru
  		pi_state->owner = NULL;
  		raw_spin_unlock_irq(&curr->pi_lock);
  
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		spin_unlock(&hb->lock);
  
-@@ -1362,20 +1362,18 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1364,20 +1364,18 @@ static int wake_futex_pi(u32 __user *uad
  	pi_state->owner = new_owner;
  	raw_spin_unlock(&new_owner->pi_lock);
  
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	return 0;
  }
-@@ -2251,7 +2249,7 @@ static int fixup_owner(u32 __user *uaddr
+@@ -2253,7 +2251,7 @@ static int fixup_owner(u32 __user *uaddr
  		 * task acquired the rt_mutex after we removed ourself from the
  		 * rt_mutex waiters list.
  		 */
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			locked = 1;
  			goto out;
  		}
-@@ -2566,7 +2564,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2568,7 +2566,7 @@ static int futex_lock_pi(u32 __user *uad
  	if (!trylock) {
  		ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
  	} else {
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		/* Fixup the trylock return value: */
  		ret = ret ? 0 : -EWOULDBLOCK;
  	}
-@@ -2589,7 +2587,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2591,7 +2589,7 @@ static int futex_lock_pi(u32 __user *uad
  	 * it and return the fault to userspace.
  	 */
  	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/* Unqueue and drop the lock */
  	unqueue_me_pi(&q);
-@@ -2896,7 +2894,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2898,7 +2896,7 @@ static int futex_wait_requeue_pi(u32 __u
  			spin_lock(q.lock_ptr);
  			ret = fixup_pi_state_owner(uaddr2, &q, current);
  			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
@@ -112,7 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			/*
  			 * Drop the reference to the pi state which
  			 * the requeue_pi() code acquired for us.
-@@ -2936,7 +2934,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2938,7 +2936,7 @@ static int futex_wait_requeue_pi(u32 __u
  		 * userspace.
  		 */
  		if (ret && rt_mutex_owner(pi_mutex) == current)
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		unqueue_me_pi(&q);
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1486,15 +1486,23 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interrup
+@@ -1488,15 +1488,23 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interrup
  
  /*
   * Futex variant with full deadlock detection.
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /**
-@@ -1553,19 +1561,38 @@ void __sched rt_mutex_unlock(struct rt_m
+@@ -1555,19 +1563,38 @@ void __sched rt_mutex_unlock(struct rt_m
  EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  
  /**
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
 -	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
 -		return false;
-+	WAKE_Q(wake_q);
++	DEFINE_WAKE_Q(wake_q);
 +	bool deboost;
  
 -	return rt_mutex_slowunlock(lock, wqh);
diff --git a/debian/patches/features/all/rt/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 0000000..e0b3249
--- /dev/null
+++ b/debian/patches/features/all/rt/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,77 @@
+From 9feb42ac88b516e378b9782e82b651ca5bed95c4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 6 Apr 2017 14:56:18 +0200
+Subject: [PATCH 04/13] ia64/sn/hwperf: Replace racy task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+sn_hwperf_op_cpu() which is invoked from an ioctl requires to run code on
+the requested cpu. This is achieved by temporarily setting the affinity of
+the calling user space thread to the requested CPU and reset it to the
+original affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by using work_on_cpu_safe() which guarantees to run the code on
+the requested CPU or to fail in case the CPU is offline.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: linux-ia64 at vger.kernel.org
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704122251450.2548@nanos
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/ia64/sn/kernel/sn2/sn_hwperf.c |   17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
++++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+@@ -598,12 +598,17 @@ static void sn_hwperf_call_sal(void *inf
+ 	op_info->ret = r;
+ }
+ 
++static long sn_hwperf_call_sal_work(void *info)
++{
++	sn_hwperf_call_sal(info);
++	return 0;
++}
++
+ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
+ {
+ 	u32 cpu;
+ 	u32 use_ipi;
+ 	int r = 0;
+-	cpumask_t save_allowed;
+ 	
+ 	cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
+ 	use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
+@@ -629,13 +634,9 @@ static int sn_hwperf_op_cpu(struct sn_hw
+ 			/* use an interprocessor interrupt to call SAL */
+ 			smp_call_function_single(cpu, sn_hwperf_call_sal,
+ 				op_info, 1);
+-		}
+-		else {
+-			/* migrate the task before calling SAL */ 
+-			save_allowed = current->cpus_allowed;
+-			set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-			sn_hwperf_call_sal(op_info);
+-			set_cpus_allowed_ptr(current, &save_allowed);
++		} else {
++			/* Call on the target CPU */
++			work_on_cpu_safe(cpu, sn_hwperf_call_sal_work, op_info);
+ 		}
+ 	}
+ 	r = op_info->ret;
diff --git a/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch b/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch
index df75a04..ae6ea1f 100644
--- a/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch
+++ b/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch
@@ -1,9 +1,8 @@
+From aa2bfe55366552cb7e93e8709d66e698d79ccc47 Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:10 +0100
-Subject: [PATCH] rtmutex: Clean up
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit aa2bfe55366552cb7e93e8709d66e698d79ccc47
+Subject: [PATCH 4/9] rtmutex: Clean up
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Previous patches changed the meaning of the return value of
 rt_mutex_slowunlock(); update comments and code to reflect this.
@@ -26,16 +25,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1392,7 +1392,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1394,7 +1394,7 @@ static int wake_futex_pi(u32 __user *uad
  {
  	u32 uninitialized_var(curval), newval;
  	struct task_struct *new_owner;
 -	bool deboost = false;
 +	bool postunlock = false;
- 	WAKE_Q(wake_q);
+ 	DEFINE_WAKE_Q(wake_q);
  	int ret = 0;
  
-@@ -1453,12 +1453,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1455,12 +1455,13 @@ static int wake_futex_pi(u32 __user *uad
  	/*
  	 * We've updated the uservalue, this unlock cannot fail.
  	 */
@@ -53,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1328,7 +1328,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1330,7 +1330,8 @@ static inline int rt_mutex_slowtrylock(s
  
  /*
   * Slow path to release a rt-mutex.
@@ -63,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   */
  static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
  					struct wake_q_head *wake_q)
-@@ -1399,8 +1400,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1401,8 +1402,7 @@ static bool __sched rt_mutex_slowunlock(
  
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
  
@@ -73,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -1447,15 +1447,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1449,15 +1449,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
  }
  
  /*
@@ -92,10 +91,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static inline void
-@@ -1464,14 +1463,12 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1466,14 +1465,12 @@ rt_mutex_fastunlock(struct rt_mutex *loc
  				   struct wake_q_head *wqh))
  {
- 	WAKE_Q(wake_q);
+ 	DEFINE_WAKE_Q(wake_q);
 -	bool deboost;
  
  	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
@@ -109,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -1591,19 +1588,20 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1593,19 +1590,20 @@ bool __sched __rt_mutex_futex_unlock(str
  	 */
  	preempt_disable();
  
@@ -119,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
  {
- 	WAKE_Q(wake_q);
+ 	DEFINE_WAKE_Q(wake_q);
 -	bool deboost;
 +	bool postunlock;
  
diff --git a/debian/patches/features/all/rt/0004-x86-smp-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0004-x86-smp-Adjust-system_state-check.patch
new file mode 100644
index 0000000..792544b
--- /dev/null
+++ b/debian/patches/features/all/rt/0004-x86-smp-Adjust-system_state-check.patch
@@ -0,0 +1,35 @@
+From 719b3680d1f789c1e3054e3fcb26bfff07c3c623 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:35 +0200
+Subject: [PATCH 04/17] x86/smp: Adjust system_state check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in announce_cpu() to handle the extra states.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt at goodmis.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Mark Rutland <mark.rutland at arm.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Link: http://lkml.kernel.org/r/20170516184735.191715856@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ arch/x86/kernel/smpboot.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -863,7 +863,7 @@ static void announce_cpu(int cpu, int ap
+ 	if (cpu == 1)
+ 		printk(KERN_INFO "x86: Booting SMP configuration:\n");
+ 
+-	if (system_state == SYSTEM_BOOTING) {
++	if (system_state < SYSTEM_RUNNING) {
+ 		if (node != current_node) {
+ 			if (current_node > (-1))
+ 				pr_cont("\n");
diff --git a/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch b/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch
index c750000..eaf05bb 100644
--- a/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch
+++ b/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:52 +0100
 Subject: [PATCH] futex: Change locking rules
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit 734009e96d1983ad739e5b656e03430b3660c913
 
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -971,6 +971,39 @@ void exit_pi_state_list(struct task_stru
+@@ -973,6 +973,39 @@ void exit_pi_state_list(struct task_stru
   *
   * [10] There is no transient state which leaves owner and user space
   *	TID out of sync.
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
   */
  
  /*
-@@ -978,10 +1011,12 @@ void exit_pi_state_list(struct task_stru
+@@ -980,10 +1013,12 @@ void exit_pi_state_list(struct task_stru
   * the pi_state against the user space value. If correct, attach to
   * it.
   */
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * Userspace might have messed up non-PI and PI futexes [3]
-@@ -989,9 +1024,34 @@ static int attach_to_pi_state(u32 uval,
+@@ -991,9 +1026,34 @@ static int attach_to_pi_state(u32 uval,
  	if (unlikely(!pi_state))
  		return -EINVAL;
  
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 * Handle the owner died case:
  	 */
  	if (uval & FUTEX_OWNER_DIED) {
-@@ -1006,11 +1066,11 @@ static int attach_to_pi_state(u32 uval,
+@@ -1008,11 +1068,11 @@ static int attach_to_pi_state(u32 uval,
  			 * is not 0. Inconsistent state. [5]
  			 */
  			if (pid)
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		}
  
  		/*
-@@ -1022,14 +1082,14 @@ static int attach_to_pi_state(u32 uval,
+@@ -1024,14 +1084,14 @@ static int attach_to_pi_state(u32 uval,
  		 * Take a ref on the state and return success. [6]
  		 */
  		if (!pid)
@@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  
  	/*
-@@ -1038,11 +1098,29 @@ static int attach_to_pi_state(u32 uval,
+@@ -1040,11 +1100,29 @@ static int attach_to_pi_state(u32 uval,
  	 * user space TID. [9/10]
  	 */
  	if (pid != task_pid_vnr(pi_state->owner))
@@ -190,7 +190,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /*
-@@ -1093,6 +1171,9 @@ static int attach_to_pi_owner(u32 uval,
+@@ -1095,6 +1173,9 @@ static int attach_to_pi_owner(u32 uval,
  
  	/*
  	 * No existing pi state. First waiter. [2]
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 */
  	pi_state = alloc_pi_state();
  
-@@ -1117,7 +1198,8 @@ static int attach_to_pi_owner(u32 uval,
+@@ -1119,7 +1200,8 @@ static int attach_to_pi_owner(u32 uval,
  	return 0;
  }
  
@@ -210,7 +210,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			   union futex_key *key, struct futex_pi_state **ps)
  {
  	struct futex_q *top_waiter = futex_top_waiter(hb, key);
-@@ -1127,7 +1209,7 @@ static int lookup_pi_state(u32 uval, str
+@@ -1129,7 +1211,7 @@ static int lookup_pi_state(u32 uval, str
  	 * attach to the pi_state when the validation succeeds.
  	 */
  	if (top_waiter)
@@ -219,7 +219,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * We are the first waiter - try to look up the owner based on
-@@ -1146,7 +1228,7 @@ static int lock_pi_update_atomic(u32 __u
+@@ -1148,7 +1230,7 @@ static int lock_pi_update_atomic(u32 __u
  	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
  		return -EFAULT;
  
@@ -228,7 +228,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return curval != uval ? -EAGAIN : 0;
  }
  
-@@ -1202,7 +1284,7 @@ static int futex_lock_pi_atomic(u32 __us
+@@ -1204,7 +1286,7 @@ static int futex_lock_pi_atomic(u32 __us
  	 */
  	top_waiter = futex_top_waiter(hb, key);
  	if (top_waiter)
@@ -237,7 +237,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * No waiter and user TID is 0. We are here because the
-@@ -1334,6 +1416,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1336,6 +1418,7 @@ static int wake_futex_pi(u32 __user *uad
  
  	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
  		ret = -EFAULT;
@@ -245,7 +245,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	} else if (curval != uval) {
  		/*
  		 * If a unconditional UNLOCK_PI operation (user space did not
-@@ -1346,6 +1429,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1348,6 +1431,7 @@ static int wake_futex_pi(u32 __user *uad
  		else
  			ret = -EINVAL;
  	}
@@ -253,7 +253,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (ret) {
  		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
  		return ret;
-@@ -1821,7 +1905,7 @@ static int futex_requeue(u32 __user *uad
+@@ -1823,7 +1907,7 @@ static int futex_requeue(u32 __user *uad
  			 * If that call succeeds then we have pi_state and an
  			 * initial refcount on it.
  			 */
@@ -262,7 +262,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		}
  
  		switch (ret) {
-@@ -2120,10 +2204,13 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2122,10 +2206,13 @@ static int fixup_pi_state_owner(u32 __us
  {
  	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
  	struct futex_pi_state *pi_state = q->pi_state;
@@ -277,7 +277,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	/* Owner died? */
  	if (!pi_state->owner)
  		newtid |= FUTEX_OWNER_DIED;
-@@ -2139,11 +2226,10 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2141,11 +2228,10 @@ static int fixup_pi_state_owner(u32 __us
  	 * because we can fault here. Imagine swapped out pages or a fork
  	 * that marked all the anonymous memory readonly for cow.
  	 *
@@ -293,7 +293,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 */
  retry:
  	if (get_futex_value_locked(&uval, uaddr))
-@@ -2164,47 +2250,60 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2166,47 +2252,60 @@ static int fixup_pi_state_owner(u32 __us
  	 * itself.
  	 */
  	if (pi_state->owner != NULL) {
diff --git a/debian/patches/features/all/rt/0005-metag-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0005-metag-Adjust-system_state-check.patch
new file mode 100644
index 0000000..0e57c9c
--- /dev/null
+++ b/debian/patches/features/all/rt/0005-metag-Adjust-system_state-check.patch
@@ -0,0 +1,37 @@
+From dcd2e4734b428709984e2fa35ebbd6cccc246d47 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:36 +0200
+Subject: [PATCH 05/17] metag: Adjust system_state check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in stop_this_cpu() to handle the extra states.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: James Hogan <james.hogan at imgtec.com>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Mark Rutland <mark.rutland at arm.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184735.283420315@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ arch/metag/kernel/smp.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/metag/kernel/smp.c
++++ b/arch/metag/kernel/smp.c
+@@ -567,8 +567,7 @@ static void stop_this_cpu(void *data)
+ {
+ 	unsigned int cpu = smp_processor_id();
+ 
+-	if (system_state == SYSTEM_BOOTING ||
+-	    system_state == SYSTEM_RUNNING) {
++	if (system_state <= SYSTEM_RUNNING) {
+ 		spin_lock(&stop_lock);
+ 		pr_crit("CPU%u: stopping\n", cpu);
+ 		dump_stack();
diff --git a/debian/patches/features/all/rt/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch b/debian/patches/features/all/rt/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch
new file mode 100644
index 0000000..720cdc9
--- /dev/null
+++ b/debian/patches/features/all/rt/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch
@@ -0,0 +1,90 @@
+From 6d11b87d55eb75007a3721c2de5938f5bbf607fb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:31 +0200
+Subject: [PATCH 05/13] powerpc/smp: Replace open coded task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+Init task invokes smp_ops->setup_cpu() from smp_cpus_done(). Init task can
+run on any online CPU at this point, but the setup_cpu() callback requires
+to be invoked on the boot CPU. This is achieved by temporarily setting the
+affinity of the calling user space thread to the requested CPU and reset it
+to the original affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+That's actually not a problem in this context as neither CPU hotplug nor
+affinity settings can happen, but the access to task_struct::cpus_allowed
+is about to restricted.
+
+Replace it with a call to work_on_cpu_safe() which achieves the same result.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Acked-by: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: Paul Mackerras <paulus at samba.org>
+Cc: linuxppc-dev at lists.ozlabs.org
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.518053336@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/powerpc/kernel/smp.c |   26 +++++++++++---------------
+ 1 file changed, 11 insertions(+), 15 deletions(-)
+
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -787,24 +787,21 @@ static struct sched_domain_topology_leve
+ 	{ NULL, },
+ };
+ 
+-void __init smp_cpus_done(unsigned int max_cpus)
++static __init long smp_setup_cpu_workfn(void *data __always_unused)
+ {
+-	cpumask_var_t old_mask;
++	smp_ops->setup_cpu(boot_cpuid);
++	return 0;
++}
+ 
+-	/* We want the setup_cpu() here to be called from CPU 0, but our
+-	 * init thread may have been "borrowed" by another CPU in the meantime
+-	 * se we pin us down to CPU 0 for a short while
++void __init smp_cpus_done(unsigned int max_cpus)
++{
++	/*
++	 * We want the setup_cpu() here to be called on the boot CPU, but
++	 * init might run on any CPU, so make sure it's invoked on the boot
++	 * CPU.
+ 	 */
+-	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
+-	cpumask_copy(old_mask, &current->cpus_allowed);
+-	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
+-	
+ 	if (smp_ops && smp_ops->setup_cpu)
+-		smp_ops->setup_cpu(boot_cpuid);
+-
+-	set_cpus_allowed_ptr(current, old_mask);
+-
+-	free_cpumask_var(old_mask);
++		work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL);
+ 
+ 	if (smp_ops && smp_ops->bringup_done)
+ 		smp_ops->bringup_done();
+@@ -812,7 +809,6 @@ void __init smp_cpus_done(unsigned int m
+ 	dump_numa_cpu_topology();
+ 
+ 	set_sched_topology(powerpc_topology);
+-
+ }
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
diff --git a/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch b/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
index c83a42b..4487c6f 100644
--- a/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
+++ b/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
@@ -1,9 +1,8 @@
+From acd58620e415aee4a43a808d7d2fd87259ee0001 Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:11 +0100
-Subject: [PATCH] sched/rtmutex: Refactor rt_mutex_setprio()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit acd58620e415aee4a43a808d7d2fd87259ee0001
+Subject: [PATCH 5/9] sched/rtmutex: Refactor rt_mutex_setprio()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 With the introduction of SCHED_DEADLINE the whole notion that priority
 is a single number is gone, therefore the @prio argument to
@@ -36,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched/rt.h
 +++ b/include/linux/sched/rt.h
-@@ -16,28 +16,20 @@ static inline int rt_task(struct task_st
+@@ -18,28 +18,20 @@ static inline int rt_task(struct task_st
  }
  
  #ifdef CONFIG_RT_MUTEXES
@@ -75,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return NULL;
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -320,67 +320,16 @@ rt_mutex_dequeue_pi(struct task_struct *
+@@ -322,67 +322,16 @@ rt_mutex_dequeue_pi(struct task_struct *
  	RB_CLEAR_NODE(&waiter->pi_tree_entry);
  }
  
@@ -149,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -740,7 +689,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -742,7 +691,7 @@ static int rt_mutex_adjust_prio_chain(st
  		 */
  		rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
  		rt_mutex_enqueue_pi(task, waiter);
@@ -158,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	} else if (prerequeue_top_waiter == waiter) {
  		/*
-@@ -756,7 +705,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -758,7 +707,7 @@ static int rt_mutex_adjust_prio_chain(st
  		rt_mutex_dequeue_pi(task, waiter);
  		waiter = rt_mutex_top_waiter(lock);
  		rt_mutex_enqueue_pi(task, waiter);
@@ -167,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	} else {
  		/*
  		 * Nothing changed. No need to do any priority
-@@ -964,7 +913,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -966,7 +915,7 @@ static int task_blocks_on_rt_mutex(struc
  		return -EDEADLK;
  
  	raw_spin_lock(&task->pi_lock);
@@ -176,7 +175,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	waiter->task = task;
  	waiter->lock = lock;
  	waiter->prio = task->prio;
-@@ -986,7 +935,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -988,7 +937,7 @@ static int task_blocks_on_rt_mutex(struc
  		rt_mutex_dequeue_pi(owner, top_waiter);
  		rt_mutex_enqueue_pi(owner, waiter);
  
@@ -185,7 +184,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		if (owner->pi_blocked_on)
  			chain_walk = 1;
  	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
-@@ -1038,13 +987,14 @@ static void mark_wakeup_next_waiter(stru
+@@ -1040,13 +989,14 @@ static void mark_wakeup_next_waiter(stru
  	waiter = rt_mutex_top_waiter(lock);
  
  	/*
@@ -205,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * As we are waking up the top waiter, and the waiter stays
-@@ -1056,9 +1006,19 @@ static void mark_wakeup_next_waiter(stru
+@@ -1058,9 +1008,19 @@ static void mark_wakeup_next_waiter(stru
  	 */
  	lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
  
@@ -227,7 +226,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -1093,7 +1053,7 @@ static void remove_waiter(struct rt_mute
+@@ -1095,7 +1055,7 @@ static void remove_waiter(struct rt_mute
  	if (rt_mutex_has_waiters(lock))
  		rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
  
@@ -236,7 +235,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* Store the lock on which owner is blocked or NULL */
  	next_lock = task_blocked_on_lock(owner);
-@@ -1132,8 +1092,7 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1134,8 +1094,7 @@ void rt_mutex_adjust_pi(struct task_stru
  	raw_spin_lock_irqsave(&task->pi_lock, flags);
  
  	waiter = task->pi_blocked_on;
@@ -246,7 +245,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  		return;
  	}
-@@ -1387,17 +1346,6 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1389,17 +1348,6 @@ static bool __sched rt_mutex_slowunlock(
  	 * Queue the next waiter for wakeup once we release the wait_lock.
  	 */
  	mark_wakeup_next_waiter(wake_q, lock);
@@ -266,7 +265,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return true; /* call rt_mutex_postunlock() */
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3629,10 +3629,25 @@ EXPORT_SYMBOL(default_wake_function);
+@@ -3671,10 +3671,25 @@ EXPORT_SYMBOL(default_wake_function);
  
  #ifdef CONFIG_RT_MUTEXES
  
@@ -294,7 +293,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   *
   * This function changes the 'effective' priority of a task. It does
   * not touch ->normal_prio like __setscheduler().
-@@ -3640,16 +3655,40 @@ EXPORT_SYMBOL(default_wake_function);
+@@ -3682,17 +3697,41 @@ EXPORT_SYMBOL(default_wake_function);
   * Used by the rt_mutex code to implement priority inheritance
   * logic. Call site only calls if the priority of the task changed.
   */
@@ -318,6 +317,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		return;
  
  	rq = __task_rq_lock(p, &rf);
+ 	update_rq_clock(rq);
 +	/*
 +	 * Set under pi_lock && rq->lock, such that the value can be used under
 +	 * either lock.
@@ -338,7 +338,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * Idle task boosting is a nono in general. There is one
-@@ -3669,9 +3708,7 @@ void rt_mutex_setprio(struct task_struct
+@@ -3712,9 +3751,7 @@ void rt_mutex_setprio(struct task_struct
  		goto out_unlock;
  	}
  
@@ -349,7 +349,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	oldprio = p->prio;
  
  	if (oldprio == prio)
-@@ -3695,7 +3732,6 @@ void rt_mutex_setprio(struct task_struct
+@@ -3738,7 +3775,6 @@ void rt_mutex_setprio(struct task_struct
  	 *          running task
  	 */
  	if (dl_prio(prio)) {
@@ -357,7 +357,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		if (!dl_prio(p->normal_prio) ||
  		    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
  			p->dl.dl_boosted = 1;
-@@ -3732,6 +3768,11 @@ void rt_mutex_setprio(struct task_struct
+@@ -3776,6 +3812,11 @@ void rt_mutex_setprio(struct task_struct
  	balance_callback(rq);
  	preempt_enable();
  }
@@ -369,7 +369,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  
  void set_user_nice(struct task_struct *p, long nice)
-@@ -3976,10 +4017,9 @@ static void __setscheduler(struct rq *rq
+@@ -4022,10 +4063,9 @@ static void __setscheduler(struct rq *rq
  	 * Keep a potential priority boosting if called from
  	 * sched_setscheduler().
  	 */
@@ -382,7 +382,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (dl_prio(p->prio))
  		p->sched_class = &dl_sched_class;
-@@ -4266,7 +4306,7 @@ static int __sched_setscheduler(struct t
+@@ -4312,7 +4352,7 @@ static int __sched_setscheduler(struct t
  		 * the runqueue. This will be done when the task deboost
  		 * itself.
  		 */
diff --git a/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch b/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch
index ddd37fa..3193905 100644
--- a/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch
+++ b/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:53 +0100
 Subject: [PATCH] futex: Cleanup refcounting
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit bf92cf3a5100f5a0d5f9834787b130159397cb22
 
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -800,7 +800,7 @@ static int refill_pi_state_cache(void)
+@@ -802,7 +802,7 @@ static int refill_pi_state_cache(void)
  	return 0;
  }
  
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	struct futex_pi_state *pi_state = current->pi_state_cache;
  
-@@ -810,6 +810,11 @@ static struct futex_pi_state * alloc_pi_
+@@ -812,6 +812,11 @@ static struct futex_pi_state * alloc_pi_
  	return pi_state;
  }
  
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Drops a reference to the pi_state object and frees or caches it
   * when the last reference is gone.
-@@ -854,7 +859,7 @@ static void put_pi_state(struct futex_pi
+@@ -856,7 +861,7 @@ static void put_pi_state(struct futex_pi
   * Look up the task based on what TID userspace gave us.
   * We dont trust it.
   */
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	struct task_struct *p;
  
-@@ -1101,7 +1106,7 @@ static int attach_to_pi_state(u32 __user
+@@ -1103,7 +1108,7 @@ static int attach_to_pi_state(u32 __user
  		goto out_einval;
  
  out_attach:
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
  	*ps = pi_state;
  	return 0;
-@@ -1988,7 +1993,7 @@ static int futex_requeue(u32 __user *uad
+@@ -1990,7 +1995,7 @@ static int futex_requeue(u32 __user *uad
  			 * refcount on the pi_state and store the pointer in
  			 * the futex_q object of the waiter.
  			 */
diff --git a/debian/patches/features/all/rt/0006-powerpc-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0006-powerpc-Adjust-system_state-check.patch
new file mode 100644
index 0000000..7a1087e
--- /dev/null
+++ b/debian/patches/features/all/rt/0006-powerpc-Adjust-system_state-check.patch
@@ -0,0 +1,40 @@
+From a8fcfc1917681ba1ccc23a429543a67aad8bfd00 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:37 +0200
+Subject: [PATCH 06/17] powerpc: Adjust system_state check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in smp_generic_cpu_bootable() to handle the
+extra states.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Mark Rutland <mark.rutland at arm.com>
+Cc: Paul Mackerras <paulus at samba.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Cc: linuxppc-dev at lists.ozlabs.org
+Link: http://lkml.kernel.org/r/20170516184735.359536998@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ arch/powerpc/kernel/smp.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -98,7 +98,7 @@ int smp_generic_cpu_bootable(unsigned in
+ 	/* Special case - we inhibit secondary thread startup
+ 	 * during boot if the user requests it.
+ 	 */
+-	if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
++	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
+ 		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+ 			return 0;
+ 		if (smt_enabled_at_boot
diff --git a/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch b/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
index 7fd765b..ba0bfe1 100644
--- a/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
+++ b/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
@@ -1,9 +1,8 @@
+From b91473ff6e979c0028f02f90e40c844959c736d8 Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:12 +0100
-Subject: [PATCH] sched,tracing: Update trace_sched_pi_setprio()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit b91473ff6e979c0028f02f90e40c844959c736d8
+Subject: [PATCH 6/9] sched,tracing: Update trace_sched_pi_setprio()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Pass the PI donor task, instead of a numerical priority.
 
@@ -98,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3708,7 +3708,7 @@ void rt_mutex_setprio(struct task_struct
+@@ -3751,7 +3751,7 @@ void rt_mutex_setprio(struct task_struct
  		goto out_unlock;
  	}
  
diff --git a/debian/patches/features/all/rt/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 0000000..3fadb94
--- /dev/null
+++ b/debian/patches/features/all/rt/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,119 @@
+From ea875ec94eafb858990f3fe9528501f983105653 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 13 Apr 2017 10:17:07 +0200
+Subject: [PATCH 06/13] sparc/sysfs: Replace racy task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+The mmustat_enable sysfs file accessor functions must run code on the
+target CPU. This is achieved by temporarily setting the affinity of the
+calling user space thread to the requested CPU and reset it to the original
+affinity afterwards.
+
+That's racy vs. concurrent affinity settings for that thread resulting in
+code executing on the wrong CPU and overwriting the new affinity setting.
+
+Replace it by using work_on_cpu() which guarantees to run the code on the
+requested CPU.
+
+Protection against CPU hotplug is not required as the open sysfs file
+already prevents the removal from the CPU offline callback. Using the
+hotplug protected version would actually be wrong because it would deadlock
+against a CPU hotplug operation of the CPU associated to the sysfs file in
+progress.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Acked-by: David S. Miller <davem at davemloft.net>
+Cc: fenghua.yu at intel.com
+Cc: tony.luck at intel.com
+Cc: herbert at gondor.apana.org.au
+Cc: rjw at rjwysocki.net
+Cc: peterz at infradead.org
+Cc: benh at kernel.crashing.org
+Cc: bigeasy at linutronix.de
+Cc: jiangshanlai at gmail.com
+Cc: sparclinux at vger.kernel.org
+Cc: viresh.kumar at linaro.org
+Cc: mpe at ellerman.id.au
+Cc: tj at kernel.org
+Cc: lenb at kernel.org
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704131001270.2408@nanos
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/sparc/kernel/sysfs.c |   39 +++++++++++----------------------------
+ 1 file changed, 11 insertions(+), 28 deletions(-)
+
+--- a/arch/sparc/kernel/sysfs.c
++++ b/arch/sparc/kernel/sysfs.c
+@@ -98,27 +98,7 @@ static struct attribute_group mmu_stat_g
+ 	.name = "mmu_stats",
+ };
+ 
+-/* XXX convert to rusty's on_one_cpu */
+-static unsigned long run_on_cpu(unsigned long cpu,
+-			        unsigned long (*func)(unsigned long),
+-				unsigned long arg)
+-{
+-	cpumask_t old_affinity;
+-	unsigned long ret;
+-
+-	cpumask_copy(&old_affinity, &current->cpus_allowed);
+-	/* should return -EINVAL to userspace */
+-	if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
+-		return 0;
+-
+-	ret = func(arg);
+-
+-	set_cpus_allowed_ptr(current, &old_affinity);
+-
+-	return ret;
+-}
+-
+-static unsigned long read_mmustat_enable(unsigned long junk)
++static long read_mmustat_enable(void *data __maybe_unused)
+ {
+ 	unsigned long ra = 0;
+ 
+@@ -127,11 +107,11 @@ static unsigned long read_mmustat_enable
+ 	return ra != 0;
+ }
+ 
+-static unsigned long write_mmustat_enable(unsigned long val)
++static long write_mmustat_enable(void *data)
+ {
+-	unsigned long ra, orig_ra;
++	unsigned long ra, orig_ra, *val = data;
+ 
+-	if (val)
++	if (*val)
+ 		ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
+ 	else
+ 		ra = 0UL;
+@@ -142,7 +122,8 @@ static unsigned long write_mmustat_enabl
+ static ssize_t show_mmustat_enable(struct device *s,
+ 				struct device_attribute *attr, char *buf)
+ {
+-	unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
++	long val = work_on_cpu(s->id, read_mmustat_enable, NULL);
++
+ 	return sprintf(buf, "%lx\n", val);
+ }
+ 
+@@ -150,13 +131,15 @@ static ssize_t store_mmustat_enable(stru
+ 			struct device_attribute *attr, const char *buf,
+ 			size_t count)
+ {
+-	unsigned long val, err;
+-	int ret = sscanf(buf, "%lu", &val);
++	unsigned long val;
++	long err;
++	int ret;
+ 
++	ret = sscanf(buf, "%lu", &val);
+ 	if (ret != 1)
+ 		return -EINVAL;
+ 
+-	err = run_on_cpu(s->id, write_mmustat_enable, val);
++	err = work_on_cpu(s->id, write_mmustat_enable, &val);
+ 	if (err)
+ 		return -EIO;
+ 
diff --git a/debian/patches/features/all/rt/0007-ACPI-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0007-ACPI-Adjust-system_state-check.patch
new file mode 100644
index 0000000..e38e86e
--- /dev/null
+++ b/debian/patches/features/all/rt/0007-ACPI-Adjust-system_state-check.patch
@@ -0,0 +1,39 @@
+From 9762b33dc31c67e34b36ba4e787e64084b3136ff Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:38 +0200
+Subject: [PATCH 07/17] ACPI: Adjust system_state check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Make the decision whether a pci root is hotplugged depend on SYSTEM_RUNNING
+instead of !SYSTEM_BOOTING. It makes no sense to cover states greater than
+SYSTEM_RUNNING as there are not hotplug events on reboot and poweroff.
+
+Tested-by: Mark Rutland <mark.rutland at arm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt at goodmis.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Len Brown <lenb at kernel.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Rafael J. Wysocki <rjw at rjwysocki.net>
+Link: http://lkml.kernel.org/r/20170516184735.446455652@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ drivers/acpi/pci_root.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/acpi/pci_root.c
++++ b/drivers/acpi/pci_root.c
+@@ -523,7 +523,7 @@ static int acpi_pci_root_add(struct acpi
+ 	struct acpi_pci_root *root;
+ 	acpi_handle handle = device->handle;
+ 	int no_aspm = 0;
+-	bool hotadd = system_state != SYSTEM_BOOTING;
++	bool hotadd = system_state == SYSTEM_RUNNING;
+ 
+ 	root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
+ 	if (!root)
diff --git a/debian/patches/features/all/rt/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch b/debian/patches/features/all/rt/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch
new file mode 100644
index 0000000..cf7c935
--- /dev/null
+++ b/debian/patches/features/all/rt/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch
@@ -0,0 +1,46 @@
+From a5cbdf693a60d5b86d4d21dfedd90f17754eb273 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:33 +0200
+Subject: [PATCH 07/13] ACPI/processor: Fix error handling in
+ __acpi_processor_start()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+When acpi_install_notify_handler() fails the cooling device stays
+registered and the sysfs files created via acpi_pss_perf_init() are
+leaked and the function returns success.
+
+Undo acpi_pss_perf_init() and return a proper error code.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: linux-acpi at vger.kernel.org
+Cc: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.695499645@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/acpi/processor_driver.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/acpi/processor_driver.c
++++ b/drivers/acpi/processor_driver.c
+@@ -251,6 +251,9 @@ static int __acpi_processor_start(struct
+ 	if (ACPI_SUCCESS(status))
+ 		return 0;
+ 
++	result = -ENODEV;
++	acpi_pss_perf_exit(pr, device);
++
+ err_power_exit:
+ 	acpi_processor_power_exit(pr);
+ 	return result;
diff --git a/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch b/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
index a305e4c..724046c 100644
--- a/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
+++ b/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:54 +0100
 Subject: [PATCH] futex: Rework inconsistent rt_mutex/futex_q state
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit 73d786bd043ebc855f349c81ea805f6b11cbf2aa
 
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1402,12 +1402,19 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1404,12 +1404,19 @@ static int wake_futex_pi(u32 __user *uad
  	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
  
  	/*
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * We pass it to the next owner. The WAITERS bit is always
-@@ -2330,7 +2337,6 @@ static long futex_wait_restart(struct re
+@@ -2332,7 +2339,6 @@ static long futex_wait_restart(struct re
   */
  static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
  {
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	int ret = 0;
  
  	if (locked) {
-@@ -2344,43 +2350,15 @@ static int fixup_owner(u32 __user *uaddr
+@@ -2346,43 +2352,15 @@ static int fixup_owner(u32 __user *uaddr
  	}
  
  	/*
diff --git a/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch b/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch
index 445e9df..6875fb7 100644
--- a/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch
+++ b/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch
@@ -1,9 +1,8 @@
+From e0aad5b44ff5d28ac1d6ae70cdf84ca228e889dc Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:13 +0100
-Subject: [PATCH] rtmutex: Fix PI chain order integrity
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit e0aad5b44ff5d28ac1d6ae70cdf84ca228e889dc
+Subject: [PATCH 7/9] rtmutex: Fix PI chain order integrity
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 rt_mutex_waiter::prio is a copy of task_struct::prio which is updated
 during the PI chain walk, such that the PI chain order isn't messed up
@@ -38,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -236,8 +236,7 @@ rt_mutex_waiter_less(struct rt_mutex_wai
+@@ -238,8 +238,7 @@ rt_mutex_waiter_less(struct rt_mutex_wai
  	 * then right waiter has a dl_prio() too.
  	 */
  	if (dl_prio(left->prio))
@@ -48,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return 0;
  }
-@@ -648,7 +647,26 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -650,7 +649,26 @@ static int rt_mutex_adjust_prio_chain(st
  
  	/* [7] Requeue the waiter in the lock waiter tree. */
  	rt_mutex_dequeue(lock, waiter);
@@ -75,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	rt_mutex_enqueue(lock, waiter);
  
  	/* [8] Release the task */
-@@ -775,6 +793,8 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -777,6 +795,8 @@ static int rt_mutex_adjust_prio_chain(st
  static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
  				struct rt_mutex_waiter *waiter)
  {
@@ -84,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Before testing whether we can acquire @lock, we set the
  	 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
-@@ -900,6 +920,8 @@ static int task_blocks_on_rt_mutex(struc
+@@ -902,6 +922,8 @@ static int task_blocks_on_rt_mutex(struc
  	struct rt_mutex *next_lock;
  	int chain_walk = 0, res;
  
@@ -93,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Early deadlock detection. We really don't want the task to
  	 * enqueue on itself just to untangle the mess later. It's not
-@@ -917,6 +939,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -919,6 +941,7 @@ static int task_blocks_on_rt_mutex(struc
  	waiter->task = task;
  	waiter->lock = lock;
  	waiter->prio = task->prio;
@@ -101,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* Get the top priority waiter on the lock */
  	if (rt_mutex_has_waiters(lock))
-@@ -1034,6 +1057,8 @@ static void remove_waiter(struct rt_mute
+@@ -1036,6 +1059,8 @@ static void remove_waiter(struct rt_mute
  	struct task_struct *owner = rt_mutex_owner(lock);
  	struct rt_mutex *next_lock;
  
@@ -112,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	current->pi_blocked_on = NULL;
 --- a/kernel/locking/rtmutex_common.h
 +++ b/kernel/locking/rtmutex_common.h
-@@ -33,6 +33,7 @@ struct rt_mutex_waiter {
+@@ -34,6 +34,7 @@ struct rt_mutex_waiter {
  	struct rt_mutex		*deadlock_lock;
  #endif
  	int prio;
diff --git a/debian/patches/features/all/rt/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 0000000..04b040c
--- /dev/null
+++ b/debian/patches/features/all/rt/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,194 @@
+From 8153f9ac43897f9f4786b30badc134fcc1a4fb11 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:34 +0200
+Subject: [PATCH 08/13] ACPI/processor: Replace racy task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+acpi_processor_get_throttling() requires to invoke the getter function on
+the target CPU. This is achieved by temporarily setting the affinity of the
+calling user space thread to the requested CPU and reset it to the original
+affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+acpi_processor_get_throttling() is invoked in two ways:
+
+1) The CPU online callback, which is already running on the target CPU and
+   obviously protected against hotplug and not affected by affinity
+   settings.
+
+2) The ACPI driver probe function, which is not protected against hotplug
+   during modprobe.
+
+Switch it over to work_on_cpu() and protect the probe function against CPU
+hotplug.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: linux-acpi at vger.kernel.org
+Cc: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.785920903@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/acpi/processor_driver.c     |    7 +++-
+ drivers/acpi/processor_throttling.c |   62 ++++++++++++++++++++----------------
+ 2 files changed, 42 insertions(+), 27 deletions(-)
+
+--- a/drivers/acpi/processor_driver.c
++++ b/drivers/acpi/processor_driver.c
+@@ -262,11 +262,16 @@ static int __acpi_processor_start(struct
+ static int acpi_processor_start(struct device *dev)
+ {
+ 	struct acpi_device *device = ACPI_COMPANION(dev);
++	int ret;
+ 
+ 	if (!device)
+ 		return -ENODEV;
+ 
+-	return __acpi_processor_start(device);
++	/* Protect against concurrent CPU hotplug operations */
++	get_online_cpus();
++	ret = __acpi_processor_start(device);
++	put_online_cpus();
++	return ret;
+ }
+ 
+ static int acpi_processor_stop(struct device *dev)
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg {
+ #define THROTTLING_POSTCHANGE      (2)
+ 
+ static int acpi_processor_get_throttling(struct acpi_processor *pr);
+-int acpi_processor_set_throttling(struct acpi_processor *pr,
+-						int state, bool force);
++static int __acpi_processor_set_throttling(struct acpi_processor *pr,
++					   int state, bool force, bool direct);
+ 
+ static int acpi_processor_update_tsd_coord(void)
+ {
+@@ -891,7 +891,8 @@ static int acpi_processor_get_throttling
+ 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ 				"Invalid throttling state, reset\n"));
+ 			state = 0;
+-			ret = acpi_processor_set_throttling(pr, state, true);
++			ret = __acpi_processor_set_throttling(pr, state, true,
++							      true);
+ 			if (ret)
+ 				return ret;
+ 		}
+@@ -901,36 +902,31 @@ static int acpi_processor_get_throttling
+ 	return 0;
+ }
+ 
+-static int acpi_processor_get_throttling(struct acpi_processor *pr)
++static long __acpi_processor_get_throttling(void *data)
+ {
+-	cpumask_var_t saved_mask;
+-	int ret;
++	struct acpi_processor *pr = data;
++
++	return pr->throttling.acpi_processor_get_throttling(pr);
++}
+ 
++static int acpi_processor_get_throttling(struct acpi_processor *pr)
++{
+ 	if (!pr)
+ 		return -EINVAL;
+ 
+ 	if (!pr->flags.throttling)
+ 		return -ENODEV;
+ 
+-	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+-		return -ENOMEM;
+-
+ 	/*
+-	 * Migrate task to the cpu pointed by pr.
++	 * This is either called from the CPU hotplug callback of
++	 * processor_driver or via the ACPI probe function. In the latter
++	 * case the CPU is not guaranteed to be online. Both call sites are
++	 * protected against CPU hotplug.
+ 	 */
+-	cpumask_copy(saved_mask, &current->cpus_allowed);
+-	/* FIXME: use work_on_cpu() */
+-	if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+-		/* Can't migrate to the target pr->id CPU. Exit */
+-		free_cpumask_var(saved_mask);
++	if (!cpu_online(pr->id))
+ 		return -ENODEV;
+-	}
+-	ret = pr->throttling.acpi_processor_get_throttling(pr);
+-	/* restore the previous state */
+-	set_cpus_allowed_ptr(current, saved_mask);
+-	free_cpumask_var(saved_mask);
+ 
+-	return ret;
++	return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
+ }
+ 
+ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
+@@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn
+ 			arg->target_state, arg->force);
+ }
+ 
+-int acpi_processor_set_throttling(struct acpi_processor *pr,
+-						int state, bool force)
++static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
++{
++	if (direct)
++		return fn(arg);
++	return work_on_cpu(cpu, fn, arg);
++}
++
++static int __acpi_processor_set_throttling(struct acpi_processor *pr,
++					   int state, bool force, bool direct)
+ {
+ 	int ret = 0;
+ 	unsigned int i;
+@@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct
+ 		arg.pr = pr;
+ 		arg.target_state = state;
+ 		arg.force = force;
+-		ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
++		ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
++				  direct);
+ 	} else {
+ 		/*
+ 		 * When the T-state coordination is SW_ALL or HW_ALL,
+@@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct
+ 			arg.pr = match_pr;
+ 			arg.target_state = state;
+ 			arg.force = force;
+-			ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
+-				&arg);
++			ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
++					  &arg, direct);
+ 		}
+ 	}
+ 	/*
+@@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct
+ 	return ret;
+ }
+ 
++int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
++				  bool force)
++{
++	return __acpi_processor_set_throttling(pr, state, force, false);
++}
++
+ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
+ {
+ 	int result = 0;
diff --git a/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch b/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
index 8eab01d..8b10d4f 100644
--- a/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
+++ b/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:55 +0100
 Subject: [PATCH] futex: Pull rt_mutex_futex_unlock() out from under hb->lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit 16ffa12d742534d4ff73e8b3a4e81c1de39196f0
 
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -919,10 +919,12 @@ void exit_pi_state_list(struct task_stru
+@@ -921,10 +921,12 @@ void exit_pi_state_list(struct task_stru
  		pi_state->owner = NULL;
  		raw_spin_unlock_irq(&curr->pi_lock);
  
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		raw_spin_lock_irq(&curr->pi_lock);
  	}
  	raw_spin_unlock_irq(&curr->pi_lock);
-@@ -1035,6 +1037,11 @@ static int attach_to_pi_state(u32 __user
+@@ -1037,6 +1039,11 @@ static int attach_to_pi_state(u32 __user
  	 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
  	 * which in turn means that futex_lock_pi() still has a reference on
  	 * our pi_state.
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 */
  	WARN_ON(!atomic_read(&pi_state->refcount));
  
-@@ -1378,48 +1385,40 @@ static void mark_wake_futex(struct wake_
+@@ -1380,48 +1387,40 @@ static void mark_wake_futex(struct wake_
  	smp_store_release(&q->lock_ptr, NULL);
  }
  
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	u32 uninitialized_var(curval), newval;
 +	struct task_struct *new_owner;
 +	bool deboost = false;
- 	WAKE_Q(wake_q);
+ 	DEFINE_WAKE_Q(wake_q);
 -	bool deboost;
  	int ret = 0;
  
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 */
  	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
  
-@@ -1442,10 +1441,8 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1444,10 +1443,8 @@ static int wake_futex_pi(u32 __user *uad
  			ret = -EINVAL;
  	}
  
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	raw_spin_lock(&pi_state->owner->pi_lock);
  	WARN_ON(list_empty(&pi_state->list));
-@@ -1463,15 +1460,15 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1465,15 +1462,15 @@ static int wake_futex_pi(u32 __user *uad
  	 */
  	deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
  
@@ -175,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /*
-@@ -2230,7 +2227,8 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2232,7 +2229,8 @@ static int fixup_pi_state_owner(u32 __us
  	/*
  	 * We are here either because we stole the rtmutex from the
  	 * previous highest priority waiter or we are the highest priority
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 * We have to replace the newowner TID in the user space variable.
  	 * This must be atomic as we have to preserve the owner died bit here.
  	 *
-@@ -2247,7 +2245,7 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2249,7 +2247,7 @@ static int fixup_pi_state_owner(u32 __us
  	if (get_futex_value_locked(&uval, uaddr))
  		goto handle_fault;
  
@@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		newval = (uval & FUTEX_OWNER_DIED) | newtid;
  
  		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
-@@ -2343,6 +2341,10 @@ static int fixup_owner(u32 __user *uaddr
+@@ -2345,6 +2343,10 @@ static int fixup_owner(u32 __user *uaddr
  		/*
  		 * Got the lock. We might not be the anticipated owner if we
  		 * did a lock-steal - fix up the PI-state in that case:
@@ -205,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		 */
  		if (q->pi_state->owner != current)
  			ret = fixup_pi_state_owner(uaddr, q, current);
-@@ -2582,6 +2584,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2584,6 +2586,7 @@ static int futex_lock_pi(u32 __user *uad
  			 ktime_t *time, int trylock)
  {
  	struct hrtimer_sleeper timeout, *to = NULL;
@@ -213,7 +213,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct futex_hash_bucket *hb;
  	struct futex_q q = futex_q_init;
  	int res, ret;
-@@ -2668,12 +2671,19 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2670,12 +2673,19 @@ static int futex_lock_pi(u32 __user *uad
  	 * If fixup_owner() faulted and was unable to handle the fault, unlock
  	 * it and return the fault to userspace.
  	 */
@@ -235,7 +235,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	goto out_put_key;
  
  out_unlock_put_key:
-@@ -2736,10 +2746,36 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2738,10 +2748,36 @@ static int futex_unlock_pi(u32 __user *u
  	 */
  	top_waiter = futex_top_waiter(hb, &key);
  	if (top_waiter) {
@@ -275,7 +275,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		 */
  		if (!ret)
  			goto out_putkey;
-@@ -2754,7 +2790,6 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2756,7 +2792,6 @@ static int futex_unlock_pi(u32 __user *u
  		 * setting the FUTEX_WAITERS bit. Try again.
  		 */
  		if (ret == -EAGAIN) {
@@ -283,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			put_futex_key(&key);
  			goto retry;
  		}
-@@ -2762,7 +2797,7 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2764,7 +2799,7 @@ static int futex_unlock_pi(u32 __user *u
  		 * wake_futex_pi has detected invalid state. Tell user
  		 * space.
  		 */
@@ -292,7 +292,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  
  	/*
-@@ -2772,8 +2807,10 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2774,8 +2809,10 @@ static int futex_unlock_pi(u32 __user *u
  	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
  	 * owner.
  	 */
@@ -304,7 +304,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * If uval has changed, let user space handle it.
-@@ -2787,7 +2824,6 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2789,7 +2826,6 @@ static int futex_unlock_pi(u32 __user *u
  	return ret;
  
  pi_faulted:
@@ -312,7 +312,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	put_futex_key(&key);
  
  	ret = fault_in_user_writeable(uaddr);
-@@ -2891,6 +2927,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2893,6 +2929,7 @@ static int futex_wait_requeue_pi(u32 __u
  				 u32 __user *uaddr2)
  {
  	struct hrtimer_sleeper timeout, *to = NULL;
@@ -320,7 +320,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct rt_mutex_waiter rt_waiter;
  	struct futex_hash_bucket *hb;
  	union futex_key key2 = FUTEX_KEY_INIT;
-@@ -2975,8 +3012,10 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2977,8 +3014,10 @@ static int futex_wait_requeue_pi(u32 __u
  		if (q.pi_state && (q.pi_state->owner != current)) {
  			spin_lock(q.lock_ptr);
  			ret = fixup_pi_state_owner(uaddr2, &q, current);
@@ -333,7 +333,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			/*
  			 * Drop the reference to the pi state which
  			 * the requeue_pi() code acquired for us.
-@@ -3015,13 +3054,20 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3017,13 +3056,20 @@ static int futex_wait_requeue_pi(u32 __u
  		 * the fault, unlock the rt_mutex and return the fault to
  		 * userspace.
  		 */
diff --git a/debian/patches/features/all/rt/0008-mm-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0008-mm-Adjust-system_state-check.patch
new file mode 100644
index 0000000..4b0ae88
--- /dev/null
+++ b/debian/patches/features/all/rt/0008-mm-Adjust-system_state-check.patch
@@ -0,0 +1,43 @@
+From 8cdde385c7a33afbe13fd71351da0968540fa566 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:39 +0200
+Subject: [PATCH 08/17] mm: Adjust system_state check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+get_nid_for_pfn() checks for system_state == BOOTING to decide whether to
+use early_pfn_to_nid() when CONFIG_DEFERRED_STRUCT_PAGE_INIT=y.
+
+That check is dubious, because the switch to state RUNNING happes way after
+page_alloc_init_late() has been invoked.
+
+Change the check to less than RUNNING state so it covers the new
+intermediate states as well.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Mark Rutland <mark.rutland at arm.com>
+Cc: Mel Gorman <mgorman at techsingularity.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184735.528279534@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ drivers/base/node.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -377,7 +377,7 @@ static int __ref get_nid_for_pfn(unsigne
+ 	if (!pfn_valid_within(pfn))
+ 		return -1;
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+-	if (system_state == SYSTEM_BOOTING)
++	if (system_state < SYSTEM_RUNNING)
+ 		return early_pfn_to_nid(pfn);
+ #endif
+ 	page = pfn_to_page(pfn);
diff --git a/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch b/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch
index 4cdcd6d..959c633 100644
--- a/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch
+++ b/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch
@@ -1,9 +1,8 @@
+From 19830e55247cddb3f46f1bf60b8e245593491bea Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:14 +0100
-Subject: [PATCH] rtmutex: Fix more prio comparisons
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit 19830e55247cddb3f46f1bf60b8e245593491bea
+Subject: [PATCH 8/9] rtmutex: Fix more prio comparisons
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 There was a pure ->prio comparison left in try_to_wake_rt_mutex(),
 convert it to use rt_mutex_waiter_less(), noting that greater-or-equal
@@ -33,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -222,6 +222,12 @@ static inline bool unlock_rt_mutex_safe(
+@@ -224,6 +224,12 @@ static inline bool unlock_rt_mutex_safe(
  }
  #endif
  
@@ -46,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static inline int
  rt_mutex_waiter_less(struct rt_mutex_waiter *left,
  		     struct rt_mutex_waiter *right)
-@@ -241,6 +247,25 @@ rt_mutex_waiter_less(struct rt_mutex_wai
+@@ -243,6 +249,25 @@ rt_mutex_waiter_less(struct rt_mutex_wai
  	return 0;
  }
  
@@ -72,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static void
  rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
  {
-@@ -551,7 +576,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -553,7 +578,7 @@ static int rt_mutex_adjust_prio_chain(st
  	 * enabled we continue, but stop the requeueing in the chain
  	 * walk.
  	 */
@@ -81,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		if (!detect_deadlock)
  			goto out_unlock_pi;
  		else
-@@ -854,7 +879,8 @@ static int try_to_take_rt_mutex(struct r
+@@ -856,7 +881,8 @@ static int try_to_take_rt_mutex(struct r
  			 * the top waiter priority (kernel view),
  			 * @task lost.
  			 */
@@ -91,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  				return 0;
  
  			/*
-@@ -1117,7 +1143,7 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1119,7 +1145,7 @@ void rt_mutex_adjust_pi(struct task_stru
  	raw_spin_lock_irqsave(&task->pi_lock, flags);
  
  	waiter = task->pi_blocked_on;
diff --git a/debian/patches/features/all/rt/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 0000000..df3dfdc
--- /dev/null
+++ b/debian/patches/features/all/rt/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,210 @@
+From 38f05ed04beb276f780fcd2b5c0b78c76d0b3c0c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 12 Apr 2017 22:55:03 +0200
+Subject: [PATCH 09/13] cpufreq/ia64: Replace racy task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+The get() and target() callbacks must run on the affected cpu. This is
+achieved by temporarily setting the affinity of the calling thread to the
+requested CPU and reset it to the original affinity afterwards.
+
+That's racy vs. concurrent affinity settings for that thread resulting in
+code executing on the wrong CPU and overwriting the new affinity setting.
+
+Replace it by work_on_cpu(). All call pathes which invoke the callbacks are
+already protected against CPU hotplug.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Acked-by: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: linux-pm at vger.kernel.org
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704122231100.2548@nanos
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/cpufreq/ia64-acpi-cpufreq.c |   92 +++++++++++++++---------------------
+ 1 file changed, 39 insertions(+), 53 deletions(-)
+
+--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
++++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
+@@ -34,6 +34,11 @@ struct cpufreq_acpi_io {
+ 	unsigned int				resume;
+ };
+ 
++struct cpufreq_acpi_req {
++	unsigned int		cpu;
++	unsigned int		state;
++};
++
+ static struct cpufreq_acpi_io	*acpi_io_data[NR_CPUS];
+ 
+ static struct cpufreq_driver acpi_cpufreq_driver;
+@@ -83,8 +88,7 @@ processor_get_pstate (
+ static unsigned
+ extract_clock (
+ 	struct cpufreq_acpi_io *data,
+-	unsigned value,
+-	unsigned int cpu)
++	unsigned value)
+ {
+ 	unsigned long i;
+ 
+@@ -98,60 +102,43 @@ extract_clock (
+ }
+ 
+ 
+-static unsigned int
++static long
+ processor_get_freq (
+-	struct cpufreq_acpi_io	*data,
+-	unsigned int		cpu)
++	void *arg)
+ {
+-	int			ret = 0;
+-	u32			value = 0;
+-	cpumask_t		saved_mask;
+-	unsigned long 		clock_freq;
++	struct cpufreq_acpi_req *req = arg;
++	unsigned int		cpu = req->cpu;
++	struct cpufreq_acpi_io	*data = acpi_io_data[cpu];
++	u32			value;
++	int			ret;
+ 
+ 	pr_debug("processor_get_freq\n");
+-
+-	saved_mask = current->cpus_allowed;
+-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ 	if (smp_processor_id() != cpu)
+-		goto migrate_end;
++		return -EAGAIN;
+ 
+ 	/* processor_get_pstate gets the instantaneous frequency */
+ 	ret = processor_get_pstate(&value);
+-
+ 	if (ret) {
+-		set_cpus_allowed_ptr(current, &saved_mask);
+ 		pr_warn("get performance failed with error %d\n", ret);
+-		ret = 0;
+-		goto migrate_end;
++		return ret;
+ 	}
+-	clock_freq = extract_clock(data, value, cpu);
+-	ret = (clock_freq*1000);
+-
+-migrate_end:
+-	set_cpus_allowed_ptr(current, &saved_mask);
+-	return ret;
++	return 1000 * extract_clock(data, value);
+ }
+ 
+ 
+-static int
++static long
+ processor_set_freq (
+-	struct cpufreq_acpi_io	*data,
+-	struct cpufreq_policy   *policy,
+-	int			state)
++	void *arg)
+ {
+-	int			ret = 0;
+-	u32			value = 0;
+-	cpumask_t		saved_mask;
+-	int			retval;
++	struct cpufreq_acpi_req *req = arg;
++	unsigned int		cpu = req->cpu;
++	struct cpufreq_acpi_io	*data = acpi_io_data[cpu];
++	int			ret, state = req->state;
++	u32			value;
+ 
+ 	pr_debug("processor_set_freq\n");
+-
+-	saved_mask = current->cpus_allowed;
+-	set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
+-	if (smp_processor_id() != policy->cpu) {
+-		retval = -EAGAIN;
+-		goto migrate_end;
+-	}
++	if (smp_processor_id() != cpu)
++		return -EAGAIN;
+ 
+ 	if (state == data->acpi_data.state) {
+ 		if (unlikely(data->resume)) {
+@@ -159,8 +146,7 @@ processor_set_freq (
+ 			data->resume = 0;
+ 		} else {
+ 			pr_debug("Already at target state (P%d)\n", state);
+-			retval = 0;
+-			goto migrate_end;
++			return 0;
+ 		}
+ 	}
+ 
+@@ -171,7 +157,6 @@ processor_set_freq (
+ 	 * First we write the target state's 'control' value to the
+ 	 * control_register.
+ 	 */
+-
+ 	value = (u32) data->acpi_data.states[state].control;
+ 
+ 	pr_debug("Transitioning to state: 0x%08x\n", value);
+@@ -179,17 +164,11 @@ processor_set_freq (
+ 	ret = processor_set_pstate(value);
+ 	if (ret) {
+ 		pr_warn("Transition failed with error %d\n", ret);
+-		retval = -ENODEV;
+-		goto migrate_end;
++		return -ENODEV;
+ 	}
+ 
+ 	data->acpi_data.state = state;
+-
+-	retval = 0;
+-
+-migrate_end:
+-	set_cpus_allowed_ptr(current, &saved_mask);
+-	return (retval);
++	return 0;
+ }
+ 
+ 
+@@ -197,11 +176,13 @@ static unsigned int
+ acpi_cpufreq_get (
+ 	unsigned int		cpu)
+ {
+-	struct cpufreq_acpi_io *data = acpi_io_data[cpu];
++	struct cpufreq_acpi_req req;
++	long ret;
+ 
+-	pr_debug("acpi_cpufreq_get\n");
++	req.cpu = cpu;
++	ret = work_on_cpu(cpu, processor_get_freq, &req);
+ 
+-	return processor_get_freq(data, cpu);
++	return ret > 0 ? (unsigned int) ret : 0;
+ }
+ 
+ 
+@@ -210,7 +191,12 @@ acpi_cpufreq_target (
+ 	struct cpufreq_policy   *policy,
+ 	unsigned int index)
+ {
+-	return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
++	struct cpufreq_acpi_req req;
++
++	req.cpu = policy->cpu;
++	req.state = index;
++
++	return work_on_cpu(req.cpu, processor_set_freq, &req);
+ }
+ 
+ static int
diff --git a/debian/patches/features/all/rt/0009-cpufreq-pasemi-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0009-cpufreq-pasemi-Adjust-system_state-check.patch
new file mode 100644
index 0000000..1f02514
--- /dev/null
+++ b/debian/patches/features/all/rt/0009-cpufreq-pasemi-Adjust-system_state-check.patch
@@ -0,0 +1,39 @@
+From d04e31a23c3c828456cb5613f391ce4ac4e5765f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:40 +0200
+Subject: [PATCH 09/17] cpufreq/pasemi: Adjust system_state check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in pas_cpufreq_cpu_exit() to handle the extra
+states.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Mark Rutland <mark.rutland at arm.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Rafael J. Wysocki <rjw at rjwysocki.net>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Cc: linuxppc-dev at lists.ozlabs.org
+Link: http://lkml.kernel.org/r/20170516184735.620023128@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ drivers/cpufreq/pasemi-cpufreq.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/pasemi-cpufreq.c
++++ b/drivers/cpufreq/pasemi-cpufreq.c
+@@ -226,7 +226,7 @@ static int pas_cpufreq_cpu_exit(struct c
+ 	 * We don't support CPU hotplug. Don't unmap after the system
+ 	 * has already made it to a running state.
+ 	 */
+-	if (system_state != SYSTEM_BOOTING)
++	if (system_state >= SYSTEM_RUNNING)
+ 		return 0;
+ 
+ 	if (sdcasr_mapbase)
diff --git a/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch b/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
index 0a12332..62afc78 100644
--- a/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
+++ b/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:56 +0100
 Subject: [PATCH] futex,rt_mutex: Introduce rt_mutex_init_waiter()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit 50809358dd7199aa7ce232f6877dd09ec30ef374
 
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -2954,10 +2954,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2956,10 +2956,7 @@ static int futex_wait_requeue_pi(u32 __u
  	 * The waiter is allocated on our stack, manipulated by the requeue
  	 * code while we sleep on uaddr.
  	 */
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (unlikely(ret != 0))
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1151,6 +1151,14 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1153,6 +1153,14 @@ void rt_mutex_adjust_pi(struct task_stru
  				   next_lock, NULL, task);
  }
  
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /**
   * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
   * @lock:		 the rt_mutex to take
-@@ -1233,9 +1241,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1235,9 +1243,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	unsigned long flags;
  	int ret = 0;
  
diff --git a/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch b/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
index 91f7ae7..5b09b18 100644
--- a/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
+++ b/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
@@ -1,10 +1,9 @@
+From def34eaae5ce04b324e48e1bfac873091d945213 Mon Sep 17 00:00:00 2001
 From: Mike Galbraith <efault at gmx.de>
 Date: Wed, 5 Apr 2017 10:08:27 +0200
-Subject: [PATCH] rtmutex: Plug preempt count leak in
+Subject: [PATCH 9/9] rtmutex: Plug preempt count leak in
  rt_mutex_futex_unlock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Upstream commit def34eaae5ce04b324e48e1bfac873091d945213
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 mark_wakeup_next_waiter() already disables preemption, doing so again
 leaves us with an unpaired preempt_disable().
@@ -22,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1579,13 +1579,13 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1581,13 +1581,13 @@ bool __sched __rt_mutex_futex_unlock(str
  		return false; /* done */
  	}
  
diff --git a/debian/patches/features/all/rt/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 0000000..c52f354
--- /dev/null
+++ b/debian/patches/features/all/rt/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,121 @@
+From 205dcc1ecbc566cbc20acf246e68de3b080b3ecf Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:36 +0200
+Subject: [PATCH 10/13] cpufreq/sh: Replace racy task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+The target() callback must run on the affected cpu. This is achieved by
+temporarily setting the affinity of the calling thread to the requested CPU
+and reset it to the original affinity afterwards.
+
+That's racy vs. concurrent affinity settings for that thread resulting in
+code executing on the wrong CPU.
+
+Replace it by work_on_cpu(). All call pathes which invoke the callbacks are
+already protected against CPU hotplug.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Acked-by: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: linux-pm at vger.kernel.org
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.958216363@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/cpufreq/sh-cpufreq.c |   45 +++++++++++++++++++++++++------------------
+ 1 file changed, 27 insertions(+), 18 deletions(-)
+
+--- a/drivers/cpufreq/sh-cpufreq.c
++++ b/drivers/cpufreq/sh-cpufreq.c
+@@ -30,54 +30,63 @@
+ 
+ static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+ 
++struct cpufreq_target {
++	struct cpufreq_policy	*policy;
++	unsigned int		freq;
++};
++
+ static unsigned int sh_cpufreq_get(unsigned int cpu)
+ {
+ 	return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
+ }
+ 
+-/*
+- * Here we notify other drivers of the proposed change and the final change.
+- */
+-static int sh_cpufreq_target(struct cpufreq_policy *policy,
+-			     unsigned int target_freq,
+-			     unsigned int relation)
++static long __sh_cpufreq_target(void *arg)
+ {
+-	unsigned int cpu = policy->cpu;
++	struct cpufreq_target *target = arg;
++	struct cpufreq_policy *policy = target->policy;
++	int cpu = policy->cpu;
+ 	struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+-	cpumask_t cpus_allowed;
+ 	struct cpufreq_freqs freqs;
+ 	struct device *dev;
+ 	long freq;
+ 
+-	cpus_allowed = current->cpus_allowed;
+-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-
+-	BUG_ON(smp_processor_id() != cpu);
++	if (smp_processor_id() != cpu)
++		return -ENODEV;
+ 
+ 	dev = get_cpu_device(cpu);
+ 
+ 	/* Convert target_freq from kHz to Hz */
+-	freq = clk_round_rate(cpuclk, target_freq * 1000);
++	freq = clk_round_rate(cpuclk, target->freq * 1000);
+ 
+ 	if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
+ 		return -EINVAL;
+ 
+-	dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
++	dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000);
+ 
+ 	freqs.old	= sh_cpufreq_get(cpu);
+ 	freqs.new	= (freq + 500) / 1000;
+ 	freqs.flags	= 0;
+ 
+-	cpufreq_freq_transition_begin(policy, &freqs);
+-	set_cpus_allowed_ptr(current, &cpus_allowed);
++	cpufreq_freq_transition_begin(target->policy, &freqs);
+ 	clk_set_rate(cpuclk, freq);
+-	cpufreq_freq_transition_end(policy, &freqs, 0);
++	cpufreq_freq_transition_end(target->policy, &freqs, 0);
+ 
+ 	dev_dbg(dev, "set frequency %lu Hz\n", freq);
+-
+ 	return 0;
+ }
+ 
++/*
++ * Here we notify other drivers of the proposed change and the final change.
++ */
++static int sh_cpufreq_target(struct cpufreq_policy *policy,
++			     unsigned int target_freq,
++			     unsigned int relation)
++{
++	struct cpufreq_target data = { .policy = policy, .freq = target_freq };
++
++	return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
++}
++
+ static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+ {
+ 	struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
diff --git a/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch b/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
index 8581b4a..2387996 100644
--- a/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
+++ b/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:57 +0100
 Subject: [PATCH] futex,rt_mutex: Restructure rt_mutex_finish_proxy_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit 38d589f2fd08f1296aea3ce62bebd185125c6d81
 
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -3030,10 +3030,13 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3032,10 +3032,13 @@ static int futex_wait_requeue_pi(u32 __u
  		 */
  		WARN_ON(!q.pi_state);
  		pi_mutex = &q.pi_state->pi_mutex;
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		 * haven't already.
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1743,21 +1743,23 @@ struct task_struct *rt_mutex_next_owner(
+@@ -1753,21 +1753,23 @@ struct task_struct *rt_mutex_next_owner(
  }
  
  /**
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			       struct hrtimer_sleeper *to,
  			       struct rt_mutex_waiter *waiter)
  {
-@@ -1770,9 +1772,6 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -1780,9 +1782,6 @@ int rt_mutex_finish_proxy_lock(struct rt
  	/* sleep on the mutex */
  	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
  
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	/*
  	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
  	 * have to fix that up.
-@@ -1783,3 +1782,42 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -1793,3 +1792,42 @@ int rt_mutex_finish_proxy_lock(struct rt
  
  	return ret;
  }
diff --git a/debian/patches/features/all/rt/0010-iommu-vt-d-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0010-iommu-vt-d-Adjust-system_state-checks.patch
new file mode 100644
index 0000000..1b52e1f
--- /dev/null
+++ b/debian/patches/features/all/rt/0010-iommu-vt-d-Adjust-system_state-checks.patch
@@ -0,0 +1,48 @@
+From b608fe356fe8328665445a26ec75dfac918c8c5d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:41 +0200
+Subject: [PATCH 10/17] iommu/vt-d: Adjust system_state checks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state checks in dmar_parse_one_atsr() and
+dmar_iommu_notify_scope_dev() to handle the extra states.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Joerg Roedel <joro at 8bytes.org>
+Cc: David Woodhouse <dwmw2 at infradead.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Mark Rutland <mark.rutland at arm.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Cc: iommu at lists.linux-foundation.org
+Link: http://lkml.kernel.org/r/20170516184735.712365947@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ drivers/iommu/intel-iommu.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4310,7 +4310,7 @@ int dmar_parse_one_atsr(struct acpi_dmar
+ 	struct acpi_dmar_atsr *atsr;
+ 	struct dmar_atsr_unit *atsru;
+ 
+-	if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
++	if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
+ 		return 0;
+ 
+ 	atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+@@ -4560,7 +4560,7 @@ int dmar_iommu_notify_scope_dev(struct d
+ 	struct acpi_dmar_atsr *atsr;
+ 	struct acpi_dmar_reserved_memory *rmrr;
+ 
+-	if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
++	if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
+ 		return 0;
+ 
+ 	list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
diff --git a/debian/patches/features/all/rt/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 0000000..4aa4b90
--- /dev/null
+++ b/debian/patches/features/all/rt/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,125 @@
+From 9fe24c4e92d3963d92d7d383e28ed098bd5689d8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:37 +0200
+Subject: [PATCH 11/13] cpufreq/sparc-us3: Replace racy task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+The access to the safari config register in the CPU frequency functions
+must be executed on the target CPU. This is achieved by temporarily setting
+the affinity of the calling user space thread to the requested CPU and
+reset it to the original affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by a straight forward smp function call.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Acked-by: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: linux-pm at vger.kernel.org
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/20170412201043.047558840@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/cpufreq/sparc-us3-cpufreq.c |   46 ++++++++++++------------------------
+ 1 file changed, 16 insertions(+), 30 deletions(-)
+
+--- a/drivers/cpufreq/sparc-us3-cpufreq.c
++++ b/drivers/cpufreq/sparc-us3-cpufreq.c
+@@ -35,22 +35,28 @@ static struct us3_freq_percpu_info *us3_
+ #define SAFARI_CFG_DIV_32	0x0000000080000000UL
+ #define SAFARI_CFG_DIV_MASK	0x00000000C0000000UL
+ 
+-static unsigned long read_safari_cfg(void)
++static void read_safari_cfg(void *arg)
+ {
+-	unsigned long ret;
++	unsigned long ret, *val = arg;
+ 
+ 	__asm__ __volatile__("ldxa	[%%g0] %1, %0"
+ 			     : "=&r" (ret)
+ 			     : "i" (ASI_SAFARI_CONFIG));
+-	return ret;
++	*val = ret;
+ }
+ 
+-static void write_safari_cfg(unsigned long val)
++static void update_safari_cfg(void *arg)
+ {
++	unsigned long reg, *new_bits = arg;
++
++	read_safari_cfg(&reg);
++	reg &= ~SAFARI_CFG_DIV_MASK;
++	reg |= *new_bits;
++
+ 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
+ 			     "membar	#Sync"
+ 			     : /* no outputs */
+-			     : "r" (val), "i" (ASI_SAFARI_CONFIG)
++			     : "r" (reg), "i" (ASI_SAFARI_CONFIG)
+ 			     : "memory");
+ }
+ 
+@@ -78,29 +84,17 @@ static unsigned long get_current_freq(un
+ 
+ static unsigned int us3_freq_get(unsigned int cpu)
+ {
+-	cpumask_t cpus_allowed;
+ 	unsigned long reg;
+-	unsigned int ret;
+-
+-	cpumask_copy(&cpus_allowed, &current->cpus_allowed);
+-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-
+-	reg = read_safari_cfg();
+-	ret = get_current_freq(cpu, reg);
+ 
+-	set_cpus_allowed_ptr(current, &cpus_allowed);
+-
+-	return ret;
++	if (smp_call_function_single(cpu, read_safari_cfg, &reg, 1))
++		return 0;
++	return get_current_freq(cpu, reg);
+ }
+ 
+ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
+ {
+ 	unsigned int cpu = policy->cpu;
+-	unsigned long new_bits, new_freq, reg;
+-	cpumask_t cpus_allowed;
+-
+-	cpumask_copy(&cpus_allowed, &current->cpus_allowed);
+-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
++	unsigned long new_bits, new_freq;
+ 
+ 	new_freq = sparc64_get_clock_tick(cpu) / 1000;
+ 	switch (index) {
+@@ -121,15 +115,7 @@ static int us3_freq_target(struct cpufre
+ 		BUG();
+ 	}
+ 
+-	reg = read_safari_cfg();
+-
+-	reg &= ~SAFARI_CFG_DIV_MASK;
+-	reg |= new_bits;
+-	write_safari_cfg(reg);
+-
+-	set_cpus_allowed_ptr(current, &cpus_allowed);
+-
+-	return 0;
++	return smp_call_function_single(cpu, update_safari_cfg, &new_bits, 1);
+ }
+ 
+ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
diff --git a/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch b/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
index 4ba2d46..50e8ea9 100644
--- a/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
+++ b/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:58 +0100
 Subject: [PATCH] futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit cfafcd117da0216520568c195cb2f6cd1980c4bb
 
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -2097,20 +2097,7 @@ queue_unlock(struct futex_hash_bucket *h
+@@ -2099,20 +2099,7 @@ queue_unlock(struct futex_hash_bucket *h
  	hb_waiters_dec(hb);
  }
  
@@ -112,7 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	int prio;
  
-@@ -2127,6 +2114,24 @@ static inline void queue_me(struct futex
+@@ -2129,6 +2116,24 @@ static inline void queue_me(struct futex
  	plist_node_init(&q->list, prio);
  	plist_add(&q->list, &hb->chain);
  	q->task = current;
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	spin_unlock(&hb->lock);
  }
  
-@@ -2585,6 +2590,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2587,6 +2592,7 @@ static int futex_lock_pi(u32 __user *uad
  {
  	struct hrtimer_sleeper timeout, *to = NULL;
  	struct futex_pi_state *pi_state = NULL;
@@ -145,7 +145,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct futex_hash_bucket *hb;
  	struct futex_q q = futex_q_init;
  	int res, ret;
-@@ -2637,25 +2643,52 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2639,25 +2645,52 @@ static int futex_lock_pi(u32 __user *uad
  		}
  	}
  
@@ -208,7 +208,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 */
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1491,19 +1491,6 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1493,19 +1493,6 @@ int __sched rt_mutex_lock_interruptible(
  EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
  
  /*
@@ -228,7 +228,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
   * Futex variant, must not use fastpath.
   */
  int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
-@@ -1772,12 +1759,6 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1782,12 +1769,6 @@ int rt_mutex_wait_proxy_lock(struct rt_m
  	/* sleep on the mutex */
  	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
  
@@ -241,7 +241,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	raw_spin_unlock_irq(&lock->wait_lock);
  
  	return ret;
-@@ -1817,6 +1798,13 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -1827,6 +1808,13 @@ bool rt_mutex_cleanup_proxy_lock(struct
  		fixup_rt_mutex_waiters(lock);
  		cleanup = true;
  	}
diff --git a/debian/patches/features/all/rt/0012-async-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0012-async-Adjust-system_state-checks.patch
new file mode 100644
index 0000000..a3ab8ed
--- /dev/null
+++ b/debian/patches/features/all/rt/0012-async-Adjust-system_state-checks.patch
@@ -0,0 +1,62 @@
+From b4def42724594cd399cfee365221f5b38639711d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:43 +0200
+Subject: [PATCH 12/17] async: Adjust system_state checks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in async_run_entry_fn() and
+async_synchronize_cookie_domain() to handle the extra states.
+
+Tested-by: Mark Rutland <mark.rutland at arm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Arjan van de Ven <arjan at linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184735.865155020@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ kernel/async.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -114,14 +114,14 @@ static void async_run_entry_fn(struct wo
+ 	ktime_t uninitialized_var(calltime), delta, rettime;
+ 
+ 	/* 1) run (and print duration) */
+-	if (initcall_debug && system_state == SYSTEM_BOOTING) {
++	if (initcall_debug && system_state < SYSTEM_RUNNING) {
+ 		pr_debug("calling  %lli_%pF @ %i\n",
+ 			(long long)entry->cookie,
+ 			entry->func, task_pid_nr(current));
+ 		calltime = ktime_get();
+ 	}
+ 	entry->func(entry->data, entry->cookie);
+-	if (initcall_debug && system_state == SYSTEM_BOOTING) {
++	if (initcall_debug && system_state < SYSTEM_RUNNING) {
+ 		rettime = ktime_get();
+ 		delta = ktime_sub(rettime, calltime);
+ 		pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n",
+@@ -284,14 +284,14 @@ void async_synchronize_cookie_domain(asy
+ {
+ 	ktime_t uninitialized_var(starttime), delta, endtime;
+ 
+-	if (initcall_debug && system_state == SYSTEM_BOOTING) {
++	if (initcall_debug && system_state < SYSTEM_RUNNING) {
+ 		pr_debug("async_waiting @ %i\n", task_pid_nr(current));
+ 		starttime = ktime_get();
+ 	}
+ 
+ 	wait_event(async_done, lowest_in_progress(domain) >= cookie);
+ 
+-	if (initcall_debug && system_state == SYSTEM_BOOTING) {
++	if (initcall_debug && system_state < SYSTEM_RUNNING) {
+ 		endtime = ktime_get();
+ 		delta = ktime_sub(endtime, starttime);
+ 
diff --git a/debian/patches/features/all/rt/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 0000000..4326566
--- /dev/null
+++ b/debian/patches/features/all/rt/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,130 @@
+From 12699ac53a2e5fbd1fd7c164b11685d55c8aa28b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 13 Apr 2017 10:22:43 +0200
+Subject: [PATCH 12/13] cpufreq/sparc-us2e: Replace racy task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+The access to the HBIRD_ESTAR_MODE register in the cpu frequency control
+functions must happen on the target CPU. This is achieved by temporarily
+setting the affinity of the calling user space thread to the requested CPU
+and reset it to the original affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by a straight forward smp function call.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Acked-by: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: Herbert Xu <herbert at gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: linux-pm at vger.kernel.org
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: "David S. Miller" <davem at davemloft.net>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704131020280.2408@nanos
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/cpufreq/sparc-us2e-cpufreq.c |   45 ++++++++++++++++-------------------
+ 1 file changed, 21 insertions(+), 24 deletions(-)
+
+--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
++++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
+@@ -118,10 +118,6 @@ static void us2e_transition(unsigned lon
+ 			    unsigned long clock_tick,
+ 			    unsigned long old_divisor, unsigned long divisor)
+ {
+-	unsigned long flags;
+-
+-	local_irq_save(flags);
+-
+ 	estar &= ~ESTAR_MODE_DIV_MASK;
+ 
+ 	/* This is based upon the state transition diagram in the IIe manual.  */
+@@ -152,8 +148,6 @@ static void us2e_transition(unsigned lon
+ 	} else {
+ 		BUG();
+ 	}
+-
+-	local_irq_restore(flags);
+ }
+ 
+ static unsigned long index_to_estar_mode(unsigned int index)
+@@ -229,48 +223,51 @@ static unsigned long estar_to_divisor(un
+ 	return ret;
+ }
+ 
++static void __us2e_freq_get(void *arg)
++{
++	unsigned long *estar = arg;
++
++	*estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
++}
++
+ static unsigned int us2e_freq_get(unsigned int cpu)
+ {
+-	cpumask_t cpus_allowed;
+ 	unsigned long clock_tick, estar;
+ 
+-	cpumask_copy(&cpus_allowed, &current->cpus_allowed);
+-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-
+ 	clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+-	estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+-
+-	set_cpus_allowed_ptr(current, &cpus_allowed);
++	if (smp_call_function_single(cpu, __us2e_freq_get, &estar, 1))
++		return 0;
+ 
+ 	return clock_tick / estar_to_divisor(estar);
+ }
+ 
+-static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
++static void __us2e_freq_target(void *arg)
+ {
+-	unsigned int cpu = policy->cpu;
++	unsigned int cpu = smp_processor_id();
++	unsigned int *index = arg;
+ 	unsigned long new_bits, new_freq;
+ 	unsigned long clock_tick, divisor, old_divisor, estar;
+-	cpumask_t cpus_allowed;
+-
+-	cpumask_copy(&cpus_allowed, &current->cpus_allowed);
+-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ 
+ 	new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+-	new_bits = index_to_estar_mode(index);
+-	divisor = index_to_divisor(index);
++	new_bits = index_to_estar_mode(*index);
++	divisor = index_to_divisor(*index);
+ 	new_freq /= divisor;
+ 
+ 	estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+ 
+ 	old_divisor = estar_to_divisor(estar);
+ 
+-	if (old_divisor != divisor)
++	if (old_divisor != divisor) {
+ 		us2e_transition(estar, new_bits, clock_tick * 1000,
+ 				old_divisor, divisor);
++	}
++}
+ 
+-	set_cpus_allowed_ptr(current, &cpus_allowed);
++static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
++{
++	unsigned int cpu = policy->cpu;
+ 
+-	return 0;
++	return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
+ }
+ 
+ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
diff --git a/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch b/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
index b2a8bf3..1e2c49d 100644
--- a/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
+++ b/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:59 +0100
 Subject: [PATCH] futex: Futex_unlock_pi() determinism
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit bebe5b514345f09be2c15e414d076b02ecb9cce8
 
@@ -36,8 +36,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1396,15 +1396,10 @@ static int wake_futex_pi(u32 __user *uad
- 	WAKE_Q(wake_q);
+@@ -1398,15 +1398,10 @@ static int wake_futex_pi(u32 __user *uad
+ 	DEFINE_WAKE_Q(wake_q);
  	int ret = 0;
  
 -	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		 *
  		 * When this happens, give up our locks and try again, giving
  		 * the futex_lock_pi() instance time to complete, either by
-@@ -2792,15 +2787,18 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2794,15 +2789,18 @@ static int futex_unlock_pi(u32 __user *u
  		if (pi_state->owner != current)
  			goto out_unlock;
  
diff --git a/debian/patches/features/all/rt/0013-crypto-N2-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0013-crypto-N2-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 0000000..794f273
--- /dev/null
+++ b/debian/patches/features/all/rt/0013-crypto-N2-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,96 @@
+From 73810a069120aa831debb4d967310ab900f628ad Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 13 Apr 2017 10:20:23 +0200
+Subject: [PATCH 13/13] crypto: N2 - Replace racy task affinity logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+spu_queue_register() needs to invoke setup functions on a particular
+CPU. This is achieved by temporarily setting the affinity of the
+calling user space thread to the requested CPU and reset it to the original
+affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by using work_on_cpu_safe() which guarantees to run the code on
+the requested CPU or to fail in case the CPU is offline.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Acked-by: Herbert Xu <herbert at gondor.apana.org.au>
+Acked-by: "David S. Miller" <davem at davemloft.net>
+Cc: Fenghua Yu <fenghua.yu at intel.com>
+Cc: Tony Luck <tony.luck at intel.com>
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy at linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai at gmail.com>
+Cc: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: linux-crypto at vger.kernel.org
+Cc: Michael Ellerman <mpe at ellerman.id.au>
+Cc: Tejun Heo <tj at kernel.org>
+Cc: Len Brown <lenb at kernel.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704131019420.2408@nanos
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ drivers/crypto/n2_core.c |   31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/drivers/crypto/n2_core.c
++++ b/drivers/crypto/n2_core.c
+@@ -65,6 +65,11 @@ struct spu_queue {
+ 	struct list_head	list;
+ };
+ 
++struct spu_qreg {
++	struct spu_queue	*queue;
++	unsigned long		type;
++};
++
+ static struct spu_queue **cpu_to_cwq;
+ static struct spu_queue **cpu_to_mau;
+ 
+@@ -1631,31 +1636,27 @@ static void queue_cache_destroy(void)
+ 	kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+ }
+ 
+-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
++static long spu_queue_register_workfn(void *arg)
+ {
+-	cpumask_var_t old_allowed;
++	struct spu_qreg *qr = arg;
++	struct spu_queue *p = qr->queue;
++	unsigned long q_type = qr->type;
+ 	unsigned long hv_ret;
+ 
+-	if (cpumask_empty(&p->sharing))
+-		return -EINVAL;
+-
+-	if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+-		return -ENOMEM;
+-
+-	cpumask_copy(old_allowed, &current->cpus_allowed);
+-
+-	set_cpus_allowed_ptr(current, &p->sharing);
+-
+ 	hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
+ 				 CWQ_NUM_ENTRIES, &p->qhandle);
+ 	if (!hv_ret)
+ 		sun4v_ncs_sethead_marker(p->qhandle, 0);
+ 
+-	set_cpus_allowed_ptr(current, old_allowed);
++	return hv_ret ? -EINVAL : 0;
++}
+ 
+-	free_cpumask_var(old_allowed);
++static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
++{
++	int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
++	struct spu_qreg qr = { .queue = p, .type = q_type };
+ 
+-	return (hv_ret ? -EINVAL : 0);
++	return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
+ }
+ 
+ static int spu_queue_setup(struct spu_queue *p)
diff --git a/debian/patches/features/all/rt/0013-extable-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0013-extable-Adjust-system_state-checks.patch
new file mode 100644
index 0000000..562d63a
--- /dev/null
+++ b/debian/patches/features/all/rt/0013-extable-Adjust-system_state-checks.patch
@@ -0,0 +1,37 @@
+From 0594729c24d846889408a07057b5cc9e8d931419 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:44 +0200
+Subject: [PATCH 13/17] extable: Adjust system_state checks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in core_kernel_text() to handle the extra
+states, i.e. to cover init text up to the point where the system switches
+to state RUNNING.
+
+Tested-by: Mark Rutland <mark.rutland at arm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt at goodmis.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Link: http://lkml.kernel.org/r/20170516184735.949992741@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ kernel/extable.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/extable.c
++++ b/kernel/extable.c
+@@ -75,7 +75,7 @@ int core_kernel_text(unsigned long addr)
+ 	    addr < (unsigned long)_etext)
+ 		return 1;
+ 
+-	if (system_state == SYSTEM_BOOTING &&
++	if (system_state < SYSTEM_RUNNING &&
+ 	    init_kernel_text(addr))
+ 		return 1;
+ 	return 0;
diff --git a/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch b/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
index 1ea9960..9b5c128 100644
--- a/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
+++ b/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:36:00 +0100
 Subject: [PATCH] futex: Drop hb->lock before enqueueing on the rtmutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit 56222b212e8edb1cf51f5dd73ff645809b082b40
 
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -2652,20 +2652,33 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2654,20 +2654,33 @@ static int futex_lock_pi(u32 __user *uad
  		goto no_block;
  	}
  
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (unlikely(to))
  		hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
-@@ -2678,6 +2691,9 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2680,6 +2693,9 @@ static int futex_lock_pi(u32 __user *uad
  	 * first acquire the hb->lock before removing the lock from the
  	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
  	 * wait lists consistent.
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 */
  	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
  		ret = 0;
-@@ -2789,10 +2805,6 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2791,10 +2807,6 @@ static int futex_unlock_pi(u32 __user *u
  
  		get_pi_state(pi_state);
  		/*
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		 * wake_futex_pi() must observe a state consistent with what we
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1659,31 +1659,14 @@ void rt_mutex_proxy_unlock(struct rt_mut
+@@ -1669,31 +1669,14 @@ void rt_mutex_proxy_unlock(struct rt_mut
  	rt_mutex_set_owner(lock, NULL);
  }
  
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/* We enforce deadlock detection for futexes */
  	ret = task_blocks_on_rt_mutex(lock, waiter, task,
-@@ -1702,12 +1685,36 @@ int rt_mutex_start_proxy_lock(struct rt_
+@@ -1712,12 +1695,36 @@ int rt_mutex_start_proxy_lock(struct rt_
  	if (unlikely(ret))
  		remove_waiter(lock, waiter);
  
diff --git a/debian/patches/features/all/rt/0014-printk-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0014-printk-Adjust-system_state-checks.patch
new file mode 100644
index 0000000..8cbedb7
--- /dev/null
+++ b/debian/patches/features/all/rt/0014-printk-Adjust-system_state-checks.patch
@@ -0,0 +1,36 @@
+From ff48cd26fc4889b9deb5f9333d3c61746e450b7f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:45 +0200
+Subject: [PATCH 14/17] printk: Adjust system_state checks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in boot_delay_msec() to handle the extra
+states.
+
+Tested-by: Mark Rutland <mark.rutland at arm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt at goodmis.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Link: http://lkml.kernel.org/r/20170516184736.027534895@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ kernel/printk/printk.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1176,7 +1176,7 @@ static void boot_delay_msec(int level)
+ 	unsigned long long k;
+ 	unsigned long timeout;
+ 
+-	if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
++	if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
+ 		|| suppress_message_printing(level)) {
+ 		return;
+ 	}
diff --git a/debian/patches/features/all/rt/0015-mm-vmscan-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0015-mm-vmscan-Adjust-system_state-checks.patch
new file mode 100644
index 0000000..80cf671
--- /dev/null
+++ b/debian/patches/features/all/rt/0015-mm-vmscan-Adjust-system_state-checks.patch
@@ -0,0 +1,40 @@
+From c6202adf3a0969514299cf10ff07376a84ad09bb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:46 +0200
+Subject: [PATCH 15/17] mm/vmscan: Adjust system_state checks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in kswapd_run() to handle the extra states.
+
+Tested-by: Mark Rutland <mark.rutland at arm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt at goodmis.org>
+Acked-by: Vlastimil Babka <vbabka at suse.cz>
+Cc: Andrew Morton <akpm at linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Johannes Weiner <hannes at cmpxchg.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Mel Gorman <mgorman at techsingularity.net>
+Cc: Michal Hocko <mhocko at suse.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Link: http://lkml.kernel.org/r/20170516184736.119158930@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ mm/vmscan.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3654,7 +3654,7 @@ int kswapd_run(int nid)
+ 	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
+ 	if (IS_ERR(pgdat->kswapd)) {
+ 		/* failure at boot is fatal */
+-		BUG_ON(system_state == SYSTEM_BOOTING);
++		BUG_ON(system_state < SYSTEM_RUNNING);
+ 		pr_err("Failed to start kswapd on node %d\n", nid);
+ 		ret = PTR_ERR(pgdat->kswapd);
+ 		pgdat->kswapd = NULL;
diff --git a/debian/patches/features/all/rt/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch b/debian/patches/features/all/rt/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch
new file mode 100644
index 0000000..51e443a
--- /dev/null
+++ b/debian/patches/features/all/rt/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch
@@ -0,0 +1,61 @@
+From 69a78ff226fe0241ab6cb9dd961667be477e3cf7 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:47 +0200
+Subject: [PATCH 16/17] init: Introduce SYSTEM_SCHEDULING state
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+might_sleep() debugging and smp_processor_id() debugging should be active
+right after the scheduler starts working. The init task can invoke
+smp_processor_id() from preemptible context as it is pinned on the boot cpu
+until sched_smp_init() removes the pinning and lets it schedule on all non
+isolated cpus.
+
+Add a new state which allows to enable those checks earlier and add it to
+the xen do_poweroff() function.
+
+No functional change.
+
+Tested-by: Mark Rutland <mark.rutland at arm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky at oracle.com>
+Acked-by: Mark Rutland <mark.rutland at arm.com>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Juergen Gross <jgross at suse.com>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184736.196214622@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ drivers/xen/manage.c   |    1 +
+ include/linux/kernel.h |    6 +++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -190,6 +190,7 @@ static void do_poweroff(void)
+ {
+ 	switch (system_state) {
+ 	case SYSTEM_BOOTING:
++	case SYSTEM_SCHEDULING:
+ 		orderly_poweroff(true);
+ 		break;
+ 	case SYSTEM_RUNNING:
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -488,9 +488,13 @@ extern int root_mountflags;
+ 
+ extern bool early_boot_irqs_disabled;
+ 
+-/* Values used for system_state */
++/*
++ * Values used for system_state. Ordering of the states must not be changed
++ * as code checks for <, <=, >, >= STATE.
++ */
+ extern enum system_states {
+ 	SYSTEM_BOOTING,
++	SYSTEM_SCHEDULING,
+ 	SYSTEM_RUNNING,
+ 	SYSTEM_HALT,
+ 	SYSTEM_POWER_OFF,
diff --git a/debian/patches/features/all/rt/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch b/debian/patches/features/all/rt/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
new file mode 100644
index 0000000..6c44c4a
--- /dev/null
+++ b/debian/patches/features/all/rt/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
@@ -0,0 +1,75 @@
+From 1c3c5eab171590f86edd8d31389d61dd1efe3037 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 16 May 2017 20:42:48 +0200
+Subject: [PATCH 17/17] sched/core: Enable might_sleep() and smp_processor_id()
+ checks early
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+might_sleep() and smp_processor_id() checks are enabled after the boot
+process is done. That hides bugs in the SMP bringup and driver
+initialization code.
+
+Enable it right when the scheduler starts working, i.e. when init task and
+kthreadd have been created and right before the idle task enables
+preemption.
+
+Tested-by: Mark Rutland <mark.rutland at arm.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Mark Rutland <mark.rutland at arm.com>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Steven Rostedt <rostedt at goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184736.272225698@linutronix.de
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ init/main.c            |   10 ++++++++++
+ kernel/sched/core.c    |    4 +++-
+ lib/smp_processor_id.c |    2 +-
+ 3 files changed, 14 insertions(+), 2 deletions(-)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -414,6 +414,16 @@ static noinline void __ref rest_init(voi
+ 	rcu_read_lock();
+ 	kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
+ 	rcu_read_unlock();
++
++	/*
++	 * Enable might_sleep() and smp_processor_id() checks.
++	 * They cannot be enabled earlier because with CONFIG_PRREMPT=y
++	 * kernel_thread() would trigger might_sleep() splats. With
++	 * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
++	 * already, but it's stuck on the kthreadd_done completion.
++	 */
++	system_state = SYSTEM_SCHEDULING;
++
+ 	complete(&kthreadd_done);
+ 
+ 	/*
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6223,8 +6223,10 @@ void ___might_sleep(const char *file, in
+ 
+ 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
+ 	     !is_idle_task(current)) ||
+-	    system_state != SYSTEM_RUNNING || oops_in_progress)
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
+ 		return;
++
+ 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ 		return;
+ 	prev_jiffy = jiffies;
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -28,7 +28,7 @@ notrace static unsigned int check_preemp
+ 	/*
+ 	 * It is valid to assume CPU-locality during early bootup:
+ 	 */
+-	if (system_state != SYSTEM_RUNNING)
++	if (system_state < SYSTEM_SCHEDULING)
+ 		goto out;
+ 
+ 	/*
diff --git a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
index fa9d875..d5d7447 100644
--- a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
+++ b/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
@@ -1,7 +1,7 @@
 From: "Yadi.hu" <yadi.hu at windriver.com>
 Date: Wed, 10 Dec 2014 10:32:09 +0800
 Subject: ARM: enable irq in translation/section permission fault handlers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Probably happens on all ARM, with
 CONFIG_PREEMPT_RT_FULL
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/arm/mm/fault.c
 +++ b/arch/arm/mm/fault.c
-@@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr,
+@@ -431,6 +431,9 @@ do_translation_fault(unsigned long addr,
  	if (addr < TASK_SIZE)
  		return do_page_fault(addr, fsr, regs);
  
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (user_mode(regs))
  		goto bad_area;
  
-@@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr,
+@@ -498,6 +501,9 @@ do_translation_fault(unsigned long addr,
  static int
  do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  {
diff --git a/debian/patches/features/all/rt/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch b/debian/patches/features/all/rt/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
new file mode 100644
index 0000000..3b6ec06
--- /dev/null
+++ b/debian/patches/features/all/rt/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
@@ -0,0 +1,44 @@
+From 5ffb5cace8448c787c9f44e16a7b12f8c2866848 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Tue, 4 Apr 2017 17:43:55 +0200
+Subject: [PATCH] CPUFREQ: Loongson2: drop set_cpus_allowed_ptr()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+It is pure mystery to me why we need to be on a specific CPU while
+looking up a value in an array.
+My best shot at this is that before commit d4019f0a92ab ("cpufreq: move
+freq change notifications to cpufreq core") it was required to invoke
+cpufreq_notify_transition() on a special CPU.
+
+Since it looks like a waste, remove it.
+
+Cc: "Rafael J. Wysocki" <rjw at rjwysocki.net>
+Cc: Viresh Kumar <viresh.kumar at linaro.org>
+Cc: linux-pm at vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ drivers/cpufreq/loongson2_cpufreq.c |    7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/drivers/cpufreq/loongson2_cpufreq.c
++++ b/drivers/cpufreq/loongson2_cpufreq.c
+@@ -51,19 +51,12 @@ static int loongson2_cpu_freq_notifier(s
+ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
+ 				     unsigned int index)
+ {
+-	unsigned int cpu = policy->cpu;
+-	cpumask_t cpus_allowed;
+ 	unsigned int freq;
+ 
+-	cpus_allowed = current->cpus_allowed;
+-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-
+ 	freq =
+ 	    ((cpu_clock_freq / 1000) *
+ 	     loongson2_clockmod_table[index].driver_data) / 8;
+ 
+-	set_cpus_allowed_ptr(current, &cpus_allowed);
+-
+ 	/* setting the cpu frequency */
+ 	clk_set_rate(policy->clk, freq * 1000);
+ 
diff --git a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
index f9b3eff..d765863 100644
--- a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -1,31 +1,31 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 21 Mar 2013 19:01:05 +0100
 Subject: printk: Drop the logbuf_lock more often
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The lock is hold with irgs off. The latency drops 500us+ on my arm bugs
 with a "full" buffer after executing "dmesg" on the shell.
 
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- kernel/printk/printk.c |   26 ++++++++++++++++++++++++++
- 1 file changed, 26 insertions(+)
+ kernel/printk/printk.c |   27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -1396,6 +1396,7 @@ static int syslog_print_all(char __user
+@@ -1409,6 +1409,8 @@ static int syslog_print_all(char __user
  {
  	char *text;
  	int len = 0;
 +	int attempts = 0;
++	int num_msg;
  
  	text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
  	if (!text)
-@@ -1407,6 +1408,14 @@ static int syslog_print_all(char __user
+@@ -1420,6 +1422,14 @@ static int syslog_print_all(char __user
  		u64 seq;
  		u32 idx;
- 		enum log_flags prev;
-+		int num_msg;
+ 
 +try_again:
 +		attempts++;
 +		if (attempts > 10) {
@@ -33,44 +33,45 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +			goto out;
 +		}
 +		num_msg = 0;
- 
++
  		/*
  		 * Find first record that fits, including all following records,
-@@ -1422,6 +1431,14 @@ static int syslog_print_all(char __user
- 			prev = msg->flags;
+ 		 * into the user-provided buffer for this dump.
+@@ -1432,6 +1442,14 @@ static int syslog_print_all(char __user
+ 			len += msg_print_text(msg, true, NULL, 0);
  			idx = log_next(idx);
  			seq++;
 +			num_msg++;
 +			if (num_msg > 5) {
 +				num_msg = 0;
-+				raw_spin_unlock_irq(&logbuf_lock);
-+				raw_spin_lock_irq(&logbuf_lock);
++				logbuf_unlock_irq();
++				logbuf_lock_irq();
 +				if (clear_seq < log_first_seq)
 +					goto try_again;
 +			}
  		}
  
  		/* move first record forward until length fits into the buffer */
-@@ -1435,6 +1452,14 @@ static int syslog_print_all(char __user
- 			prev = msg->flags;
+@@ -1443,6 +1461,14 @@ static int syslog_print_all(char __user
+ 			len -= msg_print_text(msg, true, NULL, 0);
  			idx = log_next(idx);
  			seq++;
 +			num_msg++;
 +			if (num_msg > 5) {
 +				num_msg = 0;
-+				raw_spin_unlock_irq(&logbuf_lock);
-+				raw_spin_lock_irq(&logbuf_lock);
++				logbuf_unlock_irq();
++				logbuf_lock_irq();
 +				if (clear_seq < log_first_seq)
 +					goto try_again;
 +			}
  		}
  
  		/* last message fitting into this dump */
-@@ -1475,6 +1500,7 @@ static int syslog_print_all(char __user
+@@ -1481,6 +1507,7 @@ static int syslog_print_all(char __user
  		clear_seq = log_next_seq;
  		clear_idx = log_next_idx;
  	}
 +out:
- 	raw_spin_unlock_irq(&logbuf_lock);
+ 	logbuf_unlock_irq();
  
  	kfree(text);
diff --git a/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 6fc13de..4be9dfa 100644
--- a/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 11 Feb 2016 11:54:01 -0600
 Subject: KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating
 the vgic and timer states to prevent the calling task from migrating to
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/arm/kvm/arm.c
 +++ b/arch/arm/kvm/arm.c
-@@ -619,7 +619,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -632,7 +632,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
  		 * involves poking the GIC, which must be done in a
  		 * non-preemptible context.
  		 */
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		kvm_pmu_flush_hwstate(vcpu);
  		kvm_timer_flush_hwstate(vcpu);
  		kvm_vgic_flush_hwstate(vcpu);
-@@ -640,7 +640,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -653,7 +653,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
  			kvm_pmu_sync_hwstate(vcpu);
  			kvm_timer_sync_hwstate(vcpu);
  			kvm_vgic_sync_hwstate(vcpu);
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			continue;
  		}
  
-@@ -696,7 +696,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -709,7 +709,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
  
  		kvm_vgic_sync_hwstate(vcpu);
  
diff --git a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
index bda8d1c..69263e9 100644
--- a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ b/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Marcelo Tosatti <mtosatti at redhat.com>
 Date: Wed, 8 Apr 2015 20:33:25 -0300
 Subject: KVM: lapic: mark LAPIC timer handler as irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Since lapic timer handler only wakes up a simple waitqueue,
 it can be executed from hardirq context.
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/x86/kvm/lapic.c
 +++ b/arch/x86/kvm/lapic.c
-@@ -1939,6 +1939,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -2062,6 +2062,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
  	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
  		     HRTIMER_MODE_ABS_PINNED);
  	apic->lapic_timer.timer.function = apic_timer_fn;
diff --git a/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index f65cf1d..59417c2 100644
--- a/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -5,7 +5,7 @@ Cc:     Anna Schumaker <anna.schumaker at netapp.com>,
         linux-nfs at vger.kernel.org, linux-kernel at vger.kernel.org,
         tglx at linutronix.de
 Subject: NFSv4: replace seqcount_t with a seqlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me
 because it maps to preempt_disable() in -RT which I can't have at this
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
 --- a/fs/nfs/nfs4proc.c
 +++ b/fs/nfs/nfs4proc.c
-@@ -2697,7 +2697,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2607,7 +2607,7 @@ static int _nfs4_open_and_get_state(stru
  	unsigned int seq;
  	int ret;
  
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	ret = _nfs4_proc_open(opendata);
  	if (ret != 0)
-@@ -2735,7 +2735,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2645,7 +2645,7 @@ static int _nfs4_open_and_get_state(stru
  
  	if (d_inode(dentry) == state->inode) {
  		nfs_inode_attach_open_context(ctx);
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	mutex_init(&sp->so_delegreturn_mutex);
  	return sp;
  }
-@@ -1497,8 +1497,12 @@ static int nfs4_reclaim_open_state(struc
+@@ -1510,8 +1510,12 @@ static int nfs4_reclaim_open_state(struc
  	 * recovering after a network partition or a reboot from a
  	 * server that doesn't support a grace period.
  	 */
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  restart:
  	list_for_each_entry(state, &sp->so_states, open_states) {
  		if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
-@@ -1567,14 +1571,20 @@ static int nfs4_reclaim_open_state(struc
+@@ -1580,14 +1584,20 @@ static int nfs4_reclaim_open_state(struc
  		spin_lock(&sp->so_lock);
  		goto restart;
  	}
diff --git a/debian/patches/features/all/rt/Revert-random-invalidate-batched-entropy-after-crng-.patch b/debian/patches/features/all/rt/Revert-random-invalidate-batched-entropy-after-crng-.patch
new file mode 100644
index 0000000..82115e6
--- /dev/null
+++ b/debian/patches/features/all/rt/Revert-random-invalidate-batched-entropy-after-crng-.patch
@@ -0,0 +1,162 @@
+From 8adeebf2a94f4625c39c25ec461d0d2ab623b3ad Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Wed, 14 Jun 2017 21:29:16 +0200
+Subject: [PATCH] Revert "random: invalidate batched entropy after crng init"
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+This reverts commit 86f95e53ed76fec2579e00351c6050ab398a7730.
+
+In -RT lockdep complains with
+| -> #1 (primary_crng.lock){+.+...}:
+|        lock_acquire+0xb5/0x2b0
+|        rt_spin_lock+0x46/0x50
+|        _extract_crng+0x39/0xa0
+|        extract_crng+0x3a/0x40
+|        get_random_u64+0x17a/0x200
+|        cache_random_seq_create+0x51/0x100
+|        init_cache_random_seq+0x35/0x90
+|        __kmem_cache_create+0xd3/0x560
+|        create_boot_cache+0x8c/0xb2
+|        create_kmalloc_cache+0x54/0x9f
+|        create_kmalloc_caches+0xe3/0xfd
+|        kmem_cache_init+0x14f/0x1f0
+|        start_kernel+0x1e7/0x3b3
+|        x86_64_start_reservations+0x2a/0x2c
+|        x86_64_start_kernel+0x13d/0x14c
+|        verify_cpu+0x0/0xfc
+|
+| -> #0 (batched_entropy_reset_lock){+.+...}:
+|        __lock_acquire+0x11b4/0x1320
+|        lock_acquire+0xb5/0x2b0
+|        rt_write_lock+0x26/0x40
+|        rt_write_lock_irqsave+0x9/0x10
+|        invalidate_batched_entropy+0x28/0xb0
+|        crng_fast_load+0xb5/0xe0
+|        add_interrupt_randomness+0x16c/0x1a0
+|        irq_thread+0x15c/0x1e0
+|        kthread+0x112/0x150
+|        ret_from_fork+0x31/0x40
+
+so revert this for now and check later with upstream.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ drivers/char/random.c |   37 -------------------------------------
+ 1 file changed, 37 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1,9 +1,6 @@
+ /*
+  * random.c -- A strong random number generator
+  *
+- * Copyright (C) 2017 Jason A. Donenfeld <Jason at zx2c4.com>. All
+- * Rights Reserved.
+- *
+  * Copyright Matt Mackall <mpm at selenic.com>, 2003, 2004, 2005
+  *
+  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
+@@ -765,8 +762,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init
+ static struct crng_state **crng_node_pool __read_mostly;
+ #endif
+ 
+-static void invalidate_batched_entropy(void);
+-
+ static void crng_initialize(struct crng_state *crng)
+ {
+ 	int		i;
+@@ -804,7 +799,6 @@ static int crng_fast_load(const char *cp
+ 		cp++; crng_init_cnt++; len--;
+ 	}
+ 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+-		invalidate_batched_entropy();
+ 		crng_init = 1;
+ 		wake_up_interruptible(&crng_init_wait);
+ 		pr_notice("random: fast init done\n");
+@@ -842,7 +836,6 @@ static void crng_reseed(struct crng_stat
+ 	memzero_explicit(&buf, sizeof(buf));
+ 	crng->init_time = jiffies;
+ 	if (crng == &primary_crng && crng_init < 2) {
+-		invalidate_batched_entropy();
+ 		crng_init = 2;
+ 		process_random_ready_list();
+ 		wake_up_interruptible(&crng_init_wait);
+@@ -2023,7 +2016,6 @@ struct batched_entropy {
+ 	};
+ 	unsigned int position;
+ };
+-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
+ 
+ /*
+  * Get a random word for internal kernel use only. The quality of the random
+@@ -2034,8 +2026,6 @@ static DEFINE_PER_CPU(struct batched_ent
+ u64 get_random_u64(void)
+ {
+ 	u64 ret;
+-	bool use_lock = crng_init < 2;
+-	unsigned long flags;
+ 	struct batched_entropy *batch;
+ 
+ #if BITS_PER_LONG == 64
+@@ -2048,15 +2038,11 @@ u64 get_random_u64(void)
+ #endif
+ 
+ 	batch = &get_cpu_var(batched_entropy_u64);
+-	if (use_lock)
+-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ 		extract_crng((u8 *)batch->entropy_u64);
+ 		batch->position = 0;
+ 	}
+ 	ret = batch->entropy_u64[batch->position++];
+-	if (use_lock)
+-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+ 	put_cpu_var(batched_entropy_u64);
+ 	return ret;
+ }
+@@ -2066,45 +2052,22 @@ static DEFINE_PER_CPU(struct batched_ent
+ u32 get_random_u32(void)
+ {
+ 	u32 ret;
+-	bool use_lock = crng_init < 2;
+-	unsigned long flags;
+ 	struct batched_entropy *batch;
+ 
+ 	if (arch_get_random_int(&ret))
+ 		return ret;
+ 
+ 	batch = &get_cpu_var(batched_entropy_u32);
+-	if (use_lock)
+-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ 		extract_crng((u8 *)batch->entropy_u32);
+ 		batch->position = 0;
+ 	}
+ 	ret = batch->entropy_u32[batch->position++];
+-	if (use_lock)
+-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+ 	put_cpu_var(batched_entropy_u32);
+ 	return ret;
+ }
+ EXPORT_SYMBOL(get_random_u32);
+ 
+-/* It's important to invalidate all potential batched entropy that might
+- * be stored before the crng is initialized, which we can do lazily by
+- * simply resetting the counter to zero so that it's re-extracted on the
+- * next usage. */
+-static void invalidate_batched_entropy(void)
+-{
+-	int cpu;
+-	unsigned long flags;
+-
+-	write_lock_irqsave(&batched_entropy_reset_lock, flags);
+-	for_each_possible_cpu (cpu) {
+-		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+-		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+-	}
+-	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+-}
+-
+ /**
+  * randomize_page - Generate a random, page aligned address
+  * @start:	The smallest acceptable address the caller will take.
diff --git a/debian/patches/features/all/rt/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch b/debian/patches/features/all/rt/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
deleted file mode 100644
index ed7274e..0000000
--- a/debian/patches/features/all/rt/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
+++ /dev/null
@@ -1,218 +0,0 @@
-From 16145f9c01a2e671aceb731050de9fbf977d31d0 Mon Sep 17 00:00:00 2001
-From: Anna-Maria Gleixner <anna-maria at linutronix.de>
-Date: Fri, 26 May 2017 19:16:07 +0200
-Subject: [PATCH] Revert "timers: Don't wake ktimersoftd on every tick"
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-This reverts commit 032f93cae150a ("timers: Don't wake ktimersoftd on
-every tick").
-
-The problem is that the look ahead optimization from the tick timer
-interrupt context can race with the softirq thread expiring timer. As
-a consequence the temporary hlist heads which hold the to expire
-timers are overwritten and the timers which are already removed from
-the wheel bucket for expiry are now dangling w/o a list head.
-
-That means those timers never get expired. If one of those timers is
-canceled the removal operation will result in a hlist corruption.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria at linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- kernel/time/timer.c |   96 +++++++++++++++-------------------------------------
- 1 file changed, 29 insertions(+), 67 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -206,8 +206,6 @@ struct timer_base {
- 	bool			is_idle;
- 	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
- 	struct hlist_head	vectors[WHEEL_SIZE];
--	struct hlist_head	expired_lists[LVL_DEPTH];
--	int			expired_count;
- } ____cacheline_aligned;
- 
- static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
-@@ -1355,8 +1353,7 @@ static void call_timer_fn(struct timer_l
- 	}
- }
- 
--static inline void __expire_timers(struct timer_base *base,
--				   struct hlist_head *head)
-+static void expire_timers(struct timer_base *base, struct hlist_head *head)
- {
- 	while (!hlist_empty(head)) {
- 		struct timer_list *timer;
-@@ -1387,38 +1384,21 @@ static inline void __expire_timers(struc
- 	}
- }
- 
--static void expire_timers(struct timer_base *base)
--{
--	struct hlist_head *head;
--
--	while (base->expired_count--) {
--		head = base->expired_lists + base->expired_count;
--		__expire_timers(base, head);
--	}
--	base->expired_count = 0;
--}
--
--static void __collect_expired_timers(struct timer_base *base)
-+static int __collect_expired_timers(struct timer_base *base,
-+				    struct hlist_head *heads)
- {
- 	unsigned long clk = base->clk;
- 	struct hlist_head *vec;
--	int i;
-+	int i, levels = 0;
- 	unsigned int idx;
- 
--	/*
--	 * expire_timers() must be called at least once before we can
--	 * collect more timers
--	 */
--	if (WARN_ON(base->expired_count))
--		return;
--
- 	for (i = 0; i < LVL_DEPTH; i++) {
- 		idx = (clk & LVL_MASK) + i * LVL_SIZE;
- 
- 		if (__test_and_clear_bit(idx, base->pending_map)) {
- 			vec = base->vectors + idx;
--			hlist_move_list(vec,
--				&base->expired_lists[base->expired_count++]);
-+			hlist_move_list(vec, heads++);
-+			levels++;
- 		}
- 		/* Is it time to look at the next level? */
- 		if (clk & LVL_CLK_MASK)
-@@ -1426,6 +1406,7 @@ static void __collect_expired_timers(str
- 		/* Shift clock for the next level granularity */
- 		clk >>= LVL_CLK_SHIFT;
- 	}
-+	return levels;
- }
- 
- #ifdef CONFIG_NO_HZ_COMMON
-@@ -1618,7 +1599,8 @@ void timer_clear_idle(void)
- 	base->is_idle = false;
- }
- 
--static void collect_expired_timers(struct timer_base *base)
-+static int collect_expired_timers(struct timer_base *base,
-+				  struct hlist_head *heads)
- {
- 	/*
- 	 * NOHZ optimization. After a long idle sleep we need to forward the
-@@ -1635,49 +1617,20 @@ static void collect_expired_timers(struc
- 		if (time_after(next, jiffies)) {
- 			/* The call site will increment clock! */
- 			base->clk = jiffies - 1;
--			return;
-+			return 0;
- 		}
- 		base->clk = next;
- 	}
--	__collect_expired_timers(base);
-+	return __collect_expired_timers(base, heads);
- }
- #else
--static inline void collect_expired_timers(struct timer_base *base)
-+static inline int collect_expired_timers(struct timer_base *base,
-+					 struct hlist_head *heads)
- {
--	__collect_expired_timers(base);
-+	return __collect_expired_timers(base, heads);
- }
- #endif
- 
--static int find_expired_timers(struct timer_base *base)
--{
--	const unsigned long int end_clk = jiffies;
--
--	while (!base->expired_count && time_after_eq(end_clk, base->clk)) {
--		collect_expired_timers(base);
--		base->clk++;
--	}
--
--	return base->expired_count;
--}
--
--/* Called from CPU tick routine to quickly collect expired timers */
--static int tick_find_expired(struct timer_base *base)
--{
--	int count;
--
--	raw_spin_lock(&base->lock);
--
--	if (unlikely(time_after(jiffies, base->clk + HZ))) {
--		/* defer to ktimersoftd; don't spend too long in irq context */
--		count = -1;
--	} else
--		count = find_expired_timers(base);
--
--	raw_spin_unlock(&base->lock);
--
--	return count;
--}
--
- /*
-  * Called from the timer interrupt handler to charge one tick to the current
-  * process.  user_tick is 1 if the tick is user time, 0 for system.
-@@ -1704,11 +1657,22 @@ void update_process_times(int user_tick)
-  */
- static inline void __run_timers(struct timer_base *base)
- {
-+	struct hlist_head heads[LVL_DEPTH];
-+	int levels;
-+
-+	if (!time_after_eq(jiffies, base->clk))
-+		return;
-+
- 	raw_spin_lock_irq(&base->lock);
- 
--	while (find_expired_timers(base))
--		expire_timers(base);
-+	while (time_after_eq(jiffies, base->clk)) {
-+
-+		levels = collect_expired_timers(base, heads);
-+		base->clk++;
- 
-+		while (levels--)
-+			expire_timers(base, heads + levels);
-+	}
- 	raw_spin_unlock_irq(&base->lock);
- 	wakeup_timer_waiters(base);
- }
-@@ -1734,12 +1698,12 @@ void run_local_timers(void)
- 
- 	hrtimer_run_queues();
- 	/* Raise the softirq only if required. */
--	if (time_before(jiffies, base->clk) || !tick_find_expired(base)) {
-+	if (time_before(jiffies, base->clk)) {
- 		if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
- 			return;
- 		/* CPU is awake, so check the deferrable base. */
- 		base++;
--		if (time_before(jiffies, base->clk) || !tick_find_expired(base))
-+		if (time_before(jiffies, base->clk))
- 			return;
- 	}
- 	raise_softirq(TIMER_SOFTIRQ);
-@@ -1909,7 +1873,6 @@ int timers_dead_cpu(unsigned int cpu)
- 		raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- 
- 		BUG_ON(old_base->running_timer);
--		BUG_ON(old_base->expired_count);
- 
- 		for (i = 0; i < WHEEL_SIZE; i++)
- 			migrate_timer_list(new_base, old_base->vectors + i);
-@@ -1936,7 +1899,6 @@ static void __init init_timer_cpu(int cp
- #ifdef CONFIG_PREEMPT_RT_FULL
- 		init_swait_queue_head(&base->wait_for_running_timer);
- #endif
--		base->expired_count = 0;
- 	}
- }
- 
diff --git a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index 6197f22..ea33cbf 100644
--- a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Wed, 13 Feb 2013 09:26:05 -0500
 Subject: acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 We hit the following bug with 3.6-rt:
 
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /* Mutex for _OSI support */
 --- a/drivers/acpi/acpica/hwregs.c
 +++ b/drivers/acpi/acpica/hwregs.c
-@@ -363,14 +363,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
+@@ -428,14 +428,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
  			  ACPI_BITMASK_ALL_FIXED_STATUS,
  			  ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
  
diff --git a/debian/patches/features/all/rt/add_migrate_disable.patch b/debian/patches/features/all/rt/add_migrate_disable.patch
new file mode 100644
index 0000000..47a9936
--- /dev/null
+++ b/debian/patches/features/all/rt/add_migrate_disable.patch
@@ -0,0 +1,256 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Sat, 27 May 2017 19:02:06 +0200
+Subject: kernel/sched/core: add migrate_disable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+---
+ include/linux/preempt.h |   23 ++++++++
+ include/linux/sched.h   |    7 ++
+ include/linux/smp.h     |    3 +
+ kernel/sched/core.c     |  134 +++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/debug.c    |    4 +
+ 5 files changed, 169 insertions(+), 2 deletions(-)
+
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -184,6 +184,22 @@ do { \
+ 
+ #define preemptible()	(preempt_count() == 0 && !irqs_disabled())
+ 
++#ifdef CONFIG_SMP
++
++extern void migrate_disable(void);
++extern void migrate_enable(void);
++
++int __migrate_disabled(struct task_struct *p);
++
++#else
++#define migrate_disable()		barrier()
++#define migrate_enable()		barrier()
++static inline int __migrate_disabled(struct task_struct *p)
++{
++	return 0;
++}
++#endif
++
+ #ifdef CONFIG_PREEMPT
+ #define preempt_enable() \
+ do { \
+@@ -252,6 +268,13 @@ do { \
+ #define preempt_enable_notrace()		barrier()
+ #define preemptible()				0
+ 
++#define migrate_disable()			barrier()
++#define migrate_enable()			barrier()
++
++static inline int __migrate_disabled(struct task_struct *p)
++{
++	return 0;
++}
+ #endif /* CONFIG_PREEMPT_COUNT */
+ 
+ #ifdef MODULE
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -537,6 +537,13 @@ struct task_struct {
+ 	int				nr_cpus_allowed;
+ 	const cpumask_t			*cpus_ptr;
+ 	cpumask_t			cpus_mask;
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++	int				migrate_disable;
++	int				migrate_disable_update;
++# ifdef CONFIG_SCHED_DEBUG
++	int				migrate_disable_atomic;
++# endif
++#endif
+ 
+ #ifdef CONFIG_PREEMPT_RCU
+ 	int				rcu_read_lock_nesting;
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -197,6 +197,9 @@ static inline int get_boot_cpu_id(void)
+ #define get_cpu()		({ preempt_disable(); smp_processor_id(); })
+ #define put_cpu()		preempt_enable()
+ 
++#define get_cpu_light()		({ migrate_disable(); smp_processor_id(); })
++#define put_cpu_light()		migrate_enable()
++
+ /*
+  * Callback to arch code if there's nosmp or maxcpus=0 on the
+  * boot command line:
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1047,7 +1047,15 @@ void set_cpus_allowed_common(struct task
+ 	p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+ 
+-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++int __migrate_disabled(struct task_struct *p)
++{
++	return p->migrate_disable;
++}
++#endif
++
++static void __do_set_cpus_allowed_tail(struct task_struct *p,
++				       const struct cpumask *new_mask)
+ {
+ 	struct rq *rq = task_rq(p);
+ 	bool queued, running;
+@@ -1076,6 +1084,20 @@ void do_set_cpus_allowed(struct task_str
+ 		set_curr_task(rq, p);
+ }
+ 
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++	if (__migrate_disabled(p)) {
++		lockdep_assert_held(&p->pi_lock);
++
++		cpumask_copy(&p->cpus_mask, new_mask);
++		p->migrate_disable_update = 1;
++		return;
++	}
++#endif
++	__do_set_cpus_allowed_tail(p, new_mask);
++}
++
+ /*
+  * Change a given task's CPU affinity. Migrate the thread to a
+  * proper CPU and schedule it away if the CPU it's executing on
+@@ -1134,9 +1156,16 @@ static int __set_cpus_allowed_ptr(struct
+ 	}
+ 
+ 	/* Can the task run on the task's current CPU? If so, we're done */
+-	if (cpumask_test_cpu(task_cpu(p), new_mask))
++	if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ 		goto out;
+ 
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++	if (__migrate_disabled(p)) {
++		p->migrate_disable_update = 1;
++		goto out;
++	}
++#endif
++
+ 	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+ 	if (task_running(rq, p) || p->state == TASK_WAKING) {
+ 		struct migration_arg arg = { p, dest_cpu };
+@@ -7357,3 +7386,104 @@ const u32 sched_prio_to_wmult[40] = {
+  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
+  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
+ };
++
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++
++	if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++		p->migrate_disable_atomic++;
++#endif
++		return;
++	}
++#ifdef CONFIG_SCHED_DEBUG
++	WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++	if (p->migrate_disable) {
++		p->migrate_disable++;
++		return;
++	}
++
++	/* get_online_cpus(); */
++
++	preempt_disable();
++	p->migrate_disable = 1;
++
++	p->cpus_ptr = cpumask_of(smp_processor_id());
++	p->nr_cpus_allowed = 1;
++
++	preempt_enable();
++}
++EXPORT_SYMBOL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++	if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++		p->migrate_disable_atomic--;
++#endif
++		return;
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++	WARN_ON_ONCE(p->migrate_disable <= 0);
++	if (p->migrate_disable > 1) {
++		p->migrate_disable--;
++		return;
++	}
++
++	preempt_disable();
++
++	p->cpus_ptr = &p->cpus_mask;
++	p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
++	p->migrate_disable = 0;
++
++	if (p->migrate_disable_update) {
++		struct rq *rq;
++		struct rq_flags rf;
++
++		rq = task_rq_lock(p, &rf);
++		update_rq_clock(rq);
++
++		__do_set_cpus_allowed_tail(p, &p->cpus_mask);
++		task_rq_unlock(rq, p, &rf);
++
++		p->migrate_disable_update = 0;
++
++		WARN_ON(smp_processor_id() != task_cpu(p));
++		if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++			const struct cpumask *cpu_valid_mask = cpu_active_mask;
++			struct migration_arg arg;
++			unsigned int dest_cpu;
++
++			if (p->flags & PF_KTHREAD) {
++				/*
++				 * Kernel threads are allowed on online && !active CPUs
++				 */
++				cpu_valid_mask = cpu_online_mask;
++			}
++			dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask);
++			arg.task = p;
++			arg.dest_cpu = dest_cpu;
++
++			preempt_enable();
++			stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
++			tlb_migrate_finish(p->mm);
++			/* put_online_cpus(); */
++			return;
++		}
++	}
++	/* put_online_cpus(); */
++	preempt_enable();
++}
++EXPORT_SYMBOL(migrate_enable);
++#endif
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -958,6 +958,10 @@ void proc_sched_show_task(struct task_st
+ 		P(dl.runtime);
+ 		P(dl.deadline);
+ 	}
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++	P(migrate_disable);
++#endif
++	P(nr_cpus_allowed);
+ #undef PN_SCHEDSTAT
+ #undef PN
+ #undef __PN
diff --git a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch b/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
index 65b6899..3efd85c 100644
--- a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
+++ b/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
@@ -1,7 +1,7 @@
 From: Anders Roxell <anders.roxell at linaro.org>
 Date: Thu, 14 May 2015 17:52:17 +0200
 Subject: arch/arm64: Add lazy preempt support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 arm64 is missing support for PREEMPT_RT. The main feature which is
 lacking is support for lazy preemption. The arch-specific entry code,
@@ -13,15 +13,15 @@ indicate that support for full RT preemption is now available.
 Signed-off-by: Anders Roxell <anders.roxell at linaro.org>
 ---
  arch/arm64/Kconfig                   |    1 +
- arch/arm64/include/asm/thread_info.h |    7 ++++++-
+ arch/arm64/include/asm/thread_info.h |    7 +++++--
  arch/arm64/kernel/asm-offsets.c      |    1 +
  arch/arm64/kernel/entry.S            |   12 +++++++++---
  arch/arm64/kernel/signal.c           |    2 +-
- 5 files changed, 18 insertions(+), 5 deletions(-)
+ 5 files changed, 17 insertions(+), 6 deletions(-)
 
 --- a/arch/arm64/Kconfig
 +++ b/arch/arm64/Kconfig
-@@ -91,6 +91,7 @@ config ARM64
+@@ -96,6 +96,7 @@ config ARM64
  	select HAVE_PERF_EVENTS
  	select HAVE_PERF_REGS
  	select HAVE_PERF_USER_STACK_DUMP
@@ -31,23 +31,23 @@ Signed-off-by: Anders Roxell <anders.roxell at linaro.org>
  	select HAVE_SYSCALL_TRACEPOINTS
 --- a/arch/arm64/include/asm/thread_info.h
 +++ b/arch/arm64/include/asm/thread_info.h
-@@ -49,6 +49,7 @@ struct thread_info {
- 	mm_segment_t		addr_limit;	/* address limit */
- 	struct task_struct	*task;		/* main task structure */
+@@ -51,6 +51,7 @@ struct thread_info {
+ 	u64			ttbr0;		/* saved TTBR0_EL1 */
+ #endif
  	int			preempt_count;	/* 0 => preemptable, <0 => bug */
 +	int			preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- 	int			cpu;		/* cpu */
  };
  
-@@ -112,6 +113,7 @@ static inline struct thread_info *curren
- #define TIF_NEED_RESCHED	1
+ #define INIT_THREAD_INFO(tsk)						\
+@@ -86,6 +87,7 @@ struct thread_info {
  #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
  #define TIF_FOREIGN_FPSTATE	3	/* CPU's FP state is not current's */
-+#define TIF_NEED_RESCHED_LAZY	4
+ #define TIF_UPROBE		4	/* uprobe breakpoint or singlestep */
++#define TIF_NEED_RESCHED_LAZY	5
  #define TIF_NOHZ		7
  #define TIF_SYSCALL_TRACE	8
  #define TIF_SYSCALL_AUDIT	9
-@@ -127,6 +129,7 @@ static inline struct thread_info *curren
+@@ -101,6 +103,7 @@ struct thread_info {
  #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
  #define _TIF_FOREIGN_FPSTATE	(1 << TIF_FOREIGN_FPSTATE)
@@ -55,41 +55,41 @@ Signed-off-by: Anders Roxell <anders.roxell at linaro.org>
  #define _TIF_NOHZ		(1 << TIF_NOHZ)
  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
-@@ -135,7 +138,9 @@ static inline struct thread_info *curren
- #define _TIF_32BIT		(1 << TIF_32BIT)
+@@ -111,8 +114,8 @@ struct thread_info {
  
  #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
--				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
-+				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
-+				 _TIF_NEED_RESCHED_LAZY)
+ 				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+-				 _TIF_UPROBE)
+-
++				 _TIF_UPROBE | _TIF_NEED_RESCHED_LAZY)
 +#define _TIF_NEED_RESCHED_MASK	(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
- 
  #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
  				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+ 				 _TIF_NOHZ)
 --- a/arch/arm64/kernel/asm-offsets.c
 +++ b/arch/arm64/kernel/asm-offsets.c
 @@ -38,6 +38,7 @@ int main(void)
    BLANK();
-   DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
-   DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
-+  DEFINE(TI_PREEMPT_LAZY,	offsetof(struct thread_info, preempt_lazy_count));
-   DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
-   DEFINE(TI_TASK,		offsetof(struct thread_info, task));
-   DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
+   DEFINE(TSK_TI_FLAGS,		offsetof(struct task_struct, thread_info.flags));
+   DEFINE(TSK_TI_PREEMPT,	offsetof(struct task_struct, thread_info.preempt_count));
++  DEFINE(TSK_TI_PREEMPT_LAZY,	offsetof(struct task_struct, thread_info.preempt_lazy_count));
+   DEFINE(TSK_TI_ADDR_LIMIT,	offsetof(struct task_struct, thread_info.addr_limit));
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+   DEFINE(TSK_TI_TTBR0,		offsetof(struct task_struct, thread_info.ttbr0));
 --- a/arch/arm64/kernel/entry.S
 +++ b/arch/arm64/kernel/entry.S
-@@ -428,11 +428,16 @@ ENDPROC(el1_sync)
+@@ -488,11 +488,16 @@ ENDPROC(el1_sync)
  
  #ifdef CONFIG_PREEMPT
- 	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
+ 	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
 -	cbnz	w24, 1f				// preempt count != 0
 +	cbnz	w24, 2f				// preempt count != 0
- 	ldr	x0, [tsk, #TI_FLAGS]		// get flags
+ 	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
 -	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
 -	bl	el1_preempt
 +	tbnz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
 +
-+	ldr	w24, [tsk, #TI_PREEMPT_LAZY]	// get preempt lazy count
++	ldr	w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
 +	cbnz	w24, 2f				// preempt lazy count != 0
 +	tbz	x0, #TIF_NEED_RESCHED_LAZY, 2f	// needs rescheduling?
  1:
@@ -98,9 +98,9 @@ Signed-off-by: Anders Roxell <anders.roxell at linaro.org>
  #endif
  #ifdef CONFIG_TRACE_IRQFLAGS
  	bl	trace_hardirqs_on
-@@ -446,6 +451,7 @@ ENDPROC(el1_irq)
+@@ -506,6 +511,7 @@ ENDPROC(el1_irq)
  1:	bl	preempt_schedule_irq		// irq en/disable is done inside
- 	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
+ 	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
  	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
 +	tbnz	x0, #TIF_NEED_RESCHED_LAZY, 1b	// needs rescheduling?
  	ret	x24
diff --git a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
index 9c0e338..90fe774 100644
--- a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
+++ b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
@@ -1,7 +1,7 @@
 From: Benedikt Spranger <b.spranger at linutronix.de>
 Date: Sat, 6 Mar 2010 17:47:10 +0100
 Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Setup and remove the interrupt handler in clock event mode selection.
 This avoids calling the (shared) interrupt handler when the device is
diff --git a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
index 48369dc..65e2951 100644
--- a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
+++ b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 May 2010 18:29:35 +0200
 Subject: ARM: at91: tclib: Default to tclib timer for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 RT is not too happy about the shared timer interrupt in AT91
 devices. Default to tclib timer for RT.
diff --git a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
index bc89fdf..e7a556d 100644
--- a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
+++ b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
@@ -1,7 +1,7 @@
 From: Frank Rowand <frank.rowand at am.sony.com>
 Date: Mon, 19 Sep 2011 14:51:14 -0700
 Subject: arm: Convert arm boot_lock to raw
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The arm boot_lock is used by the secondary processor startup code.  The locking
 task is the idle thread, which has idle->sched_class == &idle_sched_class.
@@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
 --- a/arch/arm/mach-omap2/omap-smp.c
 +++ b/arch/arm/mach-omap2/omap-smp.c
-@@ -64,7 +64,7 @@ static const struct omap_smp_config omap
+@@ -69,7 +69,7 @@ static const struct omap_smp_config omap
  	.startup_addr = omap5_secondary_startup,
  };
  
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  void __iomem *omap4_get_scu_base(void)
  {
-@@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigne
+@@ -136,8 +136,8 @@ static void omap4_secondary_init(unsigne
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -188,7 +188,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned
+@@ -150,7 +150,7 @@ static int omap4_boot_secondary(unsigned
  	 * Set synchronisation state between this boot processor
  	 * and the secondary one
  	 */
@@ -197,7 +197,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned
+@@ -229,7 +229,7 @@ static int omap4_boot_secondary(unsigned
  	 * Now the secondary core is starting up let it run its
  	 * calibrations, then wait for it to finish
  	 */
diff --git a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
index 2cfebeb..d687e51 100644
--- a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
+++ b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: arm: Enable highmem for rt
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 13 Feb 2013 11:03:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 fixup highmem for ARM.
 
diff --git a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch b/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
index 33ee4a4..12b78c2 100644
--- a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
+++ b/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 11 Mar 2013 21:37:27 +0100
 Subject: arm/highmem: Flush tlb on unmap
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The tlb should be flushed on unmap and thus make the mapping entry
 invalid. This is only done in the non-debug case which does not look
diff --git a/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch b/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch
index e451b25..bbfe4ad 100644
--- a/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch
+++ b/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 22 Dec 2016 17:28:33 +0100
 Subject: [PATCH] arm: include definition for cpumask_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 This definition gets pulled in by other files. With the (later) split of
 RCU and spinlock.h it won't compile anymore.
diff --git a/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch b/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch
index 409fed9..9fb6f16 100644
--- a/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch
+++ b/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at linaro.org>
 Date: Thu, 10 Nov 2016 16:17:55 -0800
 Subject: [PATCH] arm: kprobe: replace patch_lock to raw lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When running kprobe on -rt kernel, the below bug is caught:
 
diff --git a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
index ed8ea0b..b12159b 100644
--- a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
 Subject: arm: Add support for lazy preemption
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 31 Oct 2012 12:04:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Implement the arm pieces for lazy preempt.
 
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/arm/Kconfig
 +++ b/arch/arm/Kconfig
-@@ -75,6 +75,7 @@ config ARM
+@@ -81,6 +81,7 @@ config ARM
  	select HAVE_PERF_EVENTS
  	select HAVE_PERF_REGS
  	select HAVE_PERF_USER_STACK_DUMP
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  __und_fault:
 --- a/arch/arm/kernel/entry-common.S
 +++ b/arch/arm/kernel/entry-common.S
-@@ -36,7 +36,9 @@
+@@ -41,7 +41,9 @@
   UNWIND(.cantunwind	)
  	disable_irq_notrace			@ disable interrupts
  	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
@@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	bne	fast_work_pending
  
  	/* perform architecture specific actions before user return */
-@@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
+@@ -67,8 +69,11 @@ ENDPROC(ret_fast_syscall)
  	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
  	disable_irq_notrace			@ disable interrupts
  	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
diff --git a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
index 47b8b1d..ed75644 100644
--- a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
+++ b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 20 Sep 2013 14:31:54 +0200
 Subject: arm/unwind: use a raw_spin_lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Mostly unwind is done with irqs enabled however SLUB may call it with
 irqs disabled while creating a new SLUB cache.
diff --git a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch b/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
index 0ff187b..75496fb 100644
--- a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -1,7 +1,7 @@
 Subject: arm64/xen: Make XEN depend on !RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 12 Oct 2015 11:18:40 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 It's not ready and probably never will be, unless xen folks have a
 look at it.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/arm64/Kconfig
 +++ b/arch/arm64/Kconfig
-@@ -704,7 +704,7 @@ config XEN_DOM0
+@@ -742,7 +742,7 @@ config XEN_DOM0
  
  config XEN
  	bool "Xen guest support on ARM64"
diff --git a/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch b/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
index b079c50..e6f8fe8 100644
--- a/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
+++ b/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 09 Mar 2016 10:51:06 +0100
 Subject: arm: at91: do not disable/enable clocks in a row
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Currently the driver will disable the clock and enable it one line later
 if it is switching from periodic mode into one shot.
diff --git a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
index 5c38ffe..d53b339 100644
--- a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
+++ b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <srostedt at redhat.com>
 Date: Fri, 3 Jul 2009 08:44:29 -0500
 Subject: ata: Do not disable interrupts in ide code for preempt-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use the local_irq_*_nort variants.
 
@@ -15,19 +15,19 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/ata/libata-sff.c
 +++ b/drivers/ata/libata-sff.c
-@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str
+@@ -679,9 +679,9 @@ unsigned int ata_sff_data_xfer_noirq(str
  	unsigned long flags;
  	unsigned int consumed;
  
 -	local_irq_save(flags);
 +	local_irq_save_nort(flags);
- 	consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
+ 	consumed = ata_sff_data_xfer32(qc, buf, buflen, rw);
 -	local_irq_restore(flags);
 +	local_irq_restore_nort(flags);
  
  	return consumed;
  }
-@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu
+@@ -720,7 +720,7 @@ static void ata_pio_sector(struct ata_qu
  		unsigned long flags;
  
  		/* FIXME: use a bounce buffer */
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		buf = kmap_atomic(page);
  
  		/* do the actual data transfer */
-@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu
+@@ -728,7 +728,7 @@ static void ata_pio_sector(struct ata_qu
  				       do_write);
  
  		kunmap_atomic(buf);
@@ -44,8 +44,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		local_irq_restore_nort(flags);
  	} else {
  		buf = page_address(page);
- 		ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
-@@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_
+ 		ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size,
+@@ -865,7 +865,7 @@ static int __atapi_pio_bytes(struct ata_
  		unsigned long flags;
  
  		/* FIXME: use bounce buffer */
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		buf = kmap_atomic(page);
  
  		/* do the actual data transfer */
-@@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_
+@@ -873,7 +873,7 @@ static int __atapi_pio_bytes(struct ata_
  								count, rw);
  
  		kunmap_atomic(buf);
@@ -62,4 +62,4 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		local_irq_restore_nort(flags);
  	} else {
  		buf = page_address(page);
- 		consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
+ 		consumed = ap->ops->sff_data_xfer(qc, buf + offset,
diff --git a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch b/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
index 91e601a..11304a6 100644
--- a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
+++ b/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 13 Feb 2015 11:01:26 +0100
 Subject: block: blk-mq: Use swait
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
 | in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6
@@ -40,13 +40,13 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
  block/blk-core.c       |    6 +++---
- block/blk-mq.c         |    6 +++---
+ block/blk-mq.c         |    8 ++++----
  include/linux/blkdev.h |    2 +-
- 3 files changed, 7 insertions(+), 7 deletions(-)
+ 3 files changed, 8 insertions(+), 8 deletions(-)
 
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
-@@ -662,7 +662,7 @@ int blk_queue_enter(struct request_queue
+@@ -678,7 +678,7 @@ int blk_queue_enter(struct request_queue
  		if (nowait)
  			return -EBUSY;
  
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  				!atomic_read(&q->mq_freeze_depth) ||
  				blk_queue_dying(q));
  		if (blk_queue_dying(q))
-@@ -682,7 +682,7 @@ static void blk_queue_usage_counter_rele
+@@ -698,7 +698,7 @@ static void blk_queue_usage_counter_rele
  	struct request_queue *q =
  		container_of(ref, struct request_queue, q_usage_counter);
  
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void blk_rq_timed_out_timer(unsigned long data)
-@@ -751,7 +751,7 @@ struct request_queue *blk_alloc_queue_no
+@@ -766,7 +766,7 @@ struct request_queue *blk_alloc_queue_no
  	q->bypass_depth = 1;
  	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
  
@@ -75,16 +75,24 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
-@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st
+@@ -79,14 +79,14 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st
  
- static void blk_mq_freeze_queue_wait(struct request_queue *q)
+ void blk_mq_freeze_queue_wait(struct request_queue *q)
  {
 -	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
 +	swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
  }
+ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
  
- /*
-@@ -110,7 +110,7 @@ void blk_mq_unfreeze_queue(struct reques
+ int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
+ 				     unsigned long timeout)
+ {
+-	return wait_event_timeout(q->mq_freeze_wq,
++	return swait_event_timeout(q->mq_freeze_wq,
+ 					percpu_ref_is_zero(&q->q_usage_counter),
+ 					timeout);
+ }
+@@ -127,7 +127,7 @@ void blk_mq_unfreeze_queue(struct reques
  	WARN_ON_ONCE(freeze_depth < 0);
  	if (!freeze_depth) {
  		percpu_ref_reinit(&q->q_usage_counter);
@@ -93,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  }
  EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
-@@ -129,7 +129,7 @@ void blk_mq_wake_waiters(struct request_
+@@ -173,7 +173,7 @@ void blk_mq_wake_waiters(struct request_
  	 * dying, we need to ensure that processes currently waiting on
  	 * the queue are notified as well.
  	 */
@@ -104,7 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 --- a/include/linux/blkdev.h
 +++ b/include/linux/blkdev.h
-@@ -468,7 +468,7 @@ struct request_queue {
+@@ -566,7 +566,7 @@ struct request_queue {
  	struct throtl_data *td;
  #endif
  	struct rcu_head		rcu_head;
diff --git a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch b/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
index 072355d..bac2123 100644
--- a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Jan 2015 15:10:08 +0100
 Subject: block/mq: don't complete requests via IPI
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The IPI runs in hardirq context and there are sleeping locks. This patch
 moves the completion into a workqueue.
@@ -9,14 +9,14 @@ moves the completion into a workqueue.
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
  block/blk-core.c       |    3 +++
- block/blk-mq.c         |   20 ++++++++++++++++++++
+ block/blk-mq.c         |   24 ++++++++++++++++++++++++
  include/linux/blk-mq.h |    2 +-
- include/linux/blkdev.h |    1 +
- 4 files changed, 25 insertions(+), 1 deletion(-)
+ include/linux/blkdev.h |    3 +++
+ 4 files changed, 31 insertions(+), 1 deletion(-)
 
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
-@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q
+@@ -116,6 +116,9 @@ void blk_rq_init(struct request_queue *q
  
  	INIT_LIST_HEAD(&rq->queuelist);
  	INIT_LIST_HEAD(&rq->timeout_list);
@@ -28,9 +28,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	rq->__sector = (sector_t) -1;
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
-@@ -177,6 +177,9 @@ static void blk_mq_rq_ctx_init(struct re
- 	rq->resid_len = 0;
- 	rq->sense = NULL;
+@@ -213,6 +213,9 @@ void blk_mq_rq_ctx_init(struct request_q
+ 	rq->errors = 0;
+ 	rq->extra_len = 0;
  
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +	INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	INIT_LIST_HEAD(&rq->timeout_list);
  	rq->timeout = 0;
  
-@@ -345,6 +348,17 @@ void blk_mq_end_request(struct request *
+@@ -395,6 +398,17 @@ void blk_mq_end_request(struct request *
  }
  EXPORT_SYMBOL(blk_mq_end_request);
  
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static void __blk_mq_complete_request_remote(void *data)
  {
  	struct request *rq = data;
-@@ -352,6 +366,8 @@ static void __blk_mq_complete_request_re
+@@ -402,6 +416,8 @@ static void __blk_mq_complete_request_re
  	rq->q->softirq_done_fn(rq);
  }
  
@@ -65,11 +65,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static void blk_mq_ipi_complete_request(struct request *rq)
  {
  	struct blk_mq_ctx *ctx = rq->mq_ctx;
-@@ -368,10 +384,14 @@ static void blk_mq_ipi_complete_request(
+@@ -418,10 +434,18 @@ static void blk_mq_ipi_complete_request(
  		shared = cpus_share_cache(cpu, ctx->cpu);
  
  	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
 +#ifdef CONFIG_PREEMPT_RT_FULL
++		/*
++		 * We could force QUEUE_FLAG_SAME_FORCE then we would not get in
++		 * here. But we could try to invoke it one the CPU like this.
++		 */
 +		schedule_work_on(ctx->cpu, &rq->work);
 +#else
  		rq->csd.func = __blk_mq_complete_request_remote;
@@ -82,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
 --- a/include/linux/blk-mq.h
 +++ b/include/linux/blk-mq.h
-@@ -209,7 +209,7 @@ static inline u16 blk_mq_unique_tag_to_t
+@@ -218,7 +218,7 @@ static inline u16 blk_mq_unique_tag_to_t
  	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
  }
  
@@ -93,11 +97,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  void blk_mq_end_request(struct request *rq, int error);
 --- a/include/linux/blkdev.h
 +++ b/include/linux/blkdev.h
-@@ -89,6 +89,7 @@ struct request {
+@@ -128,6 +128,9 @@ typedef __u32 __bitwise req_flags_t;
+  */
+ struct request {
  	struct list_head queuelist;
++#ifdef CONFIG_PREEMPT_RT_FULL
++	struct work_struct work;
++#endif
  	union {
  		struct call_single_data csd;
-+		struct work_struct work;
  		u64 fifo_time;
- 	};
- 
diff --git a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch b/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
index 7add8b9..0491335 100644
--- a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
+++ b/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: block/mq: do not invoke preempt_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 preempt_disable() and get_cpu() don't play well together with the sleeping
 locks it tries to allocate later.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
-@@ -363,7 +363,7 @@ static void blk_mq_ipi_complete_request(
+@@ -413,7 +413,7 @@ static void blk_mq_ipi_complete_request(
  		return;
  	}
  
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
  		shared = cpus_share_cache(cpu, ctx->cpu);
  
-@@ -375,7 +375,7 @@ static void blk_mq_ipi_complete_request(
+@@ -425,7 +425,7 @@ static void blk_mq_ipi_complete_request(
  	} else {
  		rq->q->softirq_done_fn(rq);
  	}
@@ -31,8 +31,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +	put_cpu_light();
  }
  
- static void __blk_mq_complete_request(struct request *rq)
-@@ -906,14 +906,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
+ static void blk_mq_stat_add(struct request *rq)
+@@ -1143,14 +1143,14 @@ static void __blk_mq_delay_run_hw_queue(
  		return;
  
  	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
@@ -49,4 +49,4 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +		put_cpu_light();
  	}
  
- 	kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
+ 	if (msecs == 0)
diff --git a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
index eb4d61d..caf666b 100644
--- a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
+++ b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 9 Apr 2014 10:37:23 +0200
 Subject: block: mq: use cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 there is a might sleep splat because get_cpu() disables preemption and
 later we grab a lock. As a workaround for this we use get_cpu_light().
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/block/blk-mq.h
 +++ b/block/blk-mq.h
-@@ -72,12 +72,12 @@ static inline struct blk_mq_ctx *__blk_m
+@@ -130,12 +130,12 @@ static inline struct blk_mq_ctx *__blk_m
   */
  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  {
diff --git a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
index 77e4894..7ed89e4 100644
--- a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
+++ b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
@@ -1,7 +1,7 @@
 Subject: block: Shorten interrupt disabled regions
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 22 Jun 2011 19:47:02 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Moving the blk_sched_flush_plug() call out of the interrupt/preempt
 disabled region in the scheduler allows us to replace
@@ -48,7 +48,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
 
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
-@@ -3200,7 +3200,7 @@ static void queue_unplugged(struct reque
+@@ -3186,7 +3186,7 @@ static void queue_unplugged(struct reque
  		blk_run_queue_async(q);
  	else
  		__blk_run_queue(q);
@@ -57,7 +57,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
  }
  
  static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3248,7 +3248,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3234,7 +3234,6 @@ EXPORT_SYMBOL(blk_check_plugged);
  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  {
  	struct request_queue *q;
@@ -65,7 +65,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
  	struct request *rq;
  	LIST_HEAD(list);
  	unsigned int depth;
-@@ -3268,11 +3267,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3254,11 +3253,6 @@ void blk_flush_plug_list(struct blk_plug
  	q = NULL;
  	depth = 0;
  
@@ -77,7 +77,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
  	while (!list_empty(&list)) {
  		rq = list_entry_rq(list.next);
  		list_del_init(&rq->queuelist);
-@@ -3285,7 +3279,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3271,7 +3265,7 @@ void blk_flush_plug_list(struct blk_plug
  				queue_unplugged(q, depth, from_schedule);
  			q = rq->q;
  			depth = 0;
@@ -86,7 +86,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
  		}
  
  		/*
-@@ -3312,8 +3306,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3298,8 +3292,6 @@ void blk_flush_plug_list(struct blk_plug
  	 */
  	if (q)
  		queue_unplugged(q, depth, from_schedule);
diff --git a/debian/patches/features/all/rt/block-use-cpu-chill.patch b/debian/patches/features/all/rt/block-use-cpu-chill.patch
index 87b029a..3ff5597 100644
--- a/debian/patches/features/all/rt/block-use-cpu-chill.patch
+++ b/debian/patches/features/all/rt/block-use-cpu-chill.patch
@@ -1,7 +1,7 @@
 Subject: block: Use cpu_chill() for retry loops
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 20 Dec 2012 18:28:26 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Steven also observed a live lock when there was a
@@ -18,15 +18,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/block/blk-ioc.c
 +++ b/block/blk-ioc.c
-@@ -7,6 +7,7 @@
- #include <linux/bio.h>
+@@ -8,6 +8,7 @@
  #include <linux/blkdev.h>
  #include <linux/slab.h>
+ #include <linux/sched/task.h>
 +#include <linux/delay.h>
  
  #include "blk.h"
  
-@@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_s
+@@ -117,7 +118,7 @@ static void ioc_release_fn(struct work_s
  			spin_unlock(q->queue_lock);
  		} else {
  			spin_unlock_irqrestore(&ioc->lock, flags);
@@ -35,12 +35,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
  		}
  	}
-@@ -187,7 +188,7 @@ void put_io_context_active(struct io_con
- 			spin_unlock(icq->q->queue_lock);
- 		} else {
- 			spin_unlock_irqrestore(&ioc->lock, flags);
--			cpu_relax();
-+			cpu_chill();
- 			goto retry;
+@@ -201,7 +202,7 @@ void put_io_context_active(struct io_con
+ 				spin_unlock(icq->q->queue_lock);
+ 			} else {
+ 				spin_unlock_irqrestore(&ioc->lock, flags);
+-				cpu_relax();
++				cpu_chill();
+ 				goto retry;
+ 			}
  		}
- 	}
diff --git a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
index 91824ed..cedc8b4 100644
--- a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
+++ b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:58 -0500
 Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Introduce RT/NON-RT WARN/BUG statements to avoid ifdefs in the code.
 
diff --git a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index 4415145..d3d9d79 100644
--- a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sat, 21 Jun 2014 10:09:48 +0200
 Subject: memcontrol: Prevent scheduling while atomic in cgroup code
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 mm, memcg: make refill_stock() use get_cpu_light()
 
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -1695,6 +1695,7 @@ struct memcg_stock_pcp {
+@@ -1685,6 +1685,7 @@ struct memcg_stock_pcp {
  #define FLUSHING_CACHED_CHARGE	0
  };
  static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static DEFINE_MUTEX(percpu_charge_mutex);
  
  /**
-@@ -1717,7 +1718,7 @@ static bool consume_stock(struct mem_cgr
+@@ -1707,7 +1708,7 @@ static bool consume_stock(struct mem_cgr
  	if (nr_pages > CHARGE_BATCH)
  		return ret;
  
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	stock = this_cpu_ptr(&memcg_stock);
  	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
-@@ -1725,7 +1726,7 @@ static bool consume_stock(struct mem_cgr
+@@ -1715,7 +1716,7 @@ static bool consume_stock(struct mem_cgr
  		ret = true;
  	}
  
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	return ret;
  }
-@@ -1752,13 +1753,13 @@ static void drain_local_stock(struct wor
+@@ -1742,13 +1743,13 @@ static void drain_local_stock(struct wor
  	struct memcg_stock_pcp *stock;
  	unsigned long flags;
  
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /*
-@@ -1770,7 +1771,7 @@ static void refill_stock(struct mem_cgro
+@@ -1760,7 +1761,7 @@ static void refill_stock(struct mem_cgro
  	struct memcg_stock_pcp *stock;
  	unsigned long flags;
  
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	stock = this_cpu_ptr(&memcg_stock);
  	if (stock->cached != memcg) { /* reset if necessary */
-@@ -1779,7 +1780,7 @@ static void refill_stock(struct mem_cgro
+@@ -1769,7 +1770,7 @@ static void refill_stock(struct mem_cgro
  	}
  	stock->nr_pages += nr_pages;
  
diff --git a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch b/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
index 5d19911..e02f2c8 100644
--- a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
+++ b/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 13 Feb 2015 15:52:24 +0100
 Subject: cgroups: use simple wait in css_release()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 To avoid:
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
@@ -30,20 +30,20 @@ To avoid:
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
  include/linux/cgroup-defs.h |    2 ++
- kernel/cgroup.c             |    9 +++++----
+ kernel/cgroup/cgroup.c      |    9 +++++----
  2 files changed, 7 insertions(+), 4 deletions(-)
 
 --- a/include/linux/cgroup-defs.h
 +++ b/include/linux/cgroup-defs.h
-@@ -16,6 +16,7 @@
- #include <linux/percpu-refcount.h>
+@@ -17,6 +17,7 @@
  #include <linux/percpu-rwsem.h>
  #include <linux/workqueue.h>
+ #include <linux/bpf-cgroup.h>
 +#include <linux/swork.h>
  
  #ifdef CONFIG_CGROUPS
  
-@@ -137,6 +138,7 @@ struct cgroup_subsys_state {
+@@ -139,6 +140,7 @@ struct cgroup_subsys_state {
  	/* percpu_ref killing and RCU release */
  	struct rcu_head rcu_head;
  	struct work_struct destroy_work;
@@ -51,9 +51,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  };
  
  /*
---- a/kernel/cgroup.c
-+++ b/kernel/cgroup.c
-@@ -5041,10 +5041,10 @@ static void css_free_rcu_fn(struct rcu_h
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -3895,10 +3895,10 @@ static void css_free_rcu_fn(struct rcu_h
  	queue_work(cgroup_destroy_wq, &css->destroy_work);
  }
  
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct cgroup_subsys *ss = css->ss;
  	struct cgroup *cgrp = css->cgroup;
  
-@@ -5087,8 +5087,8 @@ static void css_release(struct percpu_re
+@@ -3943,8 +3943,8 @@ static void css_release(struct percpu_re
  	struct cgroup_subsys_state *css =
  		container_of(ref, struct cgroup_subsys_state, refcnt);
  
@@ -77,11 +77,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5740,6 +5740,7 @@ static int __init cgroup_wq_init(void)
+@@ -4601,6 +4601,7 @@ static int __init cgroup_wq_init(void)
  	 */
  	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
  	BUG_ON(!cgroup_destroy_wq);
 +	BUG_ON(swork_get());
- 
- 	/*
- 	 * Used to destroy pidlists and separate to serve as flush domain.
+ 	return 0;
+ }
+ core_initcall(cgroup_wq_init);
diff --git a/debian/patches/features/all/rt/char-random-don-t-print-that-the-init-is-done.patch b/debian/patches/features/all/rt/char-random-don-t-print-that-the-init-is-done.patch
new file mode 100644
index 0000000..aecfec8
--- /dev/null
+++ b/debian/patches/features/all/rt/char-random-don-t-print-that-the-init-is-done.patch
@@ -0,0 +1,167 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Tue, 30 May 2017 16:39:01 +0200
+Subject: char/random: don't print that the init is done
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+On RT we run into circular locking with pendingb_lock (workqueue),
+port_lock_key (uart) and the primary_crng (random):
+
+ ======================================================
+ [ INFO: possible circular locking dependency detected ]
+ -------------------------------------------------------
+ irq/4-serial/512 is trying to acquire lock:
+  ((pendingb_lock).lock){+.+...}, at: [<ffffffff8108d48d>] queue_work_on+0x5d/0x190
+
+ but task is already holding lock:
+  (&port_lock_key){+.+...}, at: [<ffffffff815b4bb6>] serial8250_handle_irq.part.27+0x16/0xb0
+
+ which lock already depends on the new lock.
+
+ the existing dependency chain (in reverse order) is:
+
+ -> #3 (&port_lock_key){+.+...}:
+        lock_acquire+0xac/0x240
+        rt_spin_lock+0x46/0x50
+        serial8250_console_write+0x211/0x220
+        univ8250_console_write+0x1c/0x20
+        console_unlock+0x563/0x5c0
+        vprintk_emit+0x277/0x320
+        vprintk_default+0x1a/0x20
+        vprintk_func+0x20/0x80
+        printk+0x3e/0x46
+        crng_fast_load+0xde/0xe0
+        add_interrupt_randomness+0x16c/0x1a0
+        irq_thread+0x15c/0x1e0
+        kthread+0x112/0x150
+        ret_from_fork+0x31/0x40
+
+ -> #2 (primary_crng.lock){+.+...}:
+        lock_acquire+0xac/0x240
+        rt_spin_lock+0x46/0x50
+        _extract_crng+0x39/0xa0
+        extract_crng+0x3a/0x40
+        get_random_u32+0x120/0x190
+        new_slab+0x1d6/0x7c0
+        ___slab_alloc+0x30b/0x6f0
+        __slab_alloc.isra.78+0x6c/0xc0
+        __kmalloc+0x254/0x3a0
+        pcpu_mem_zalloc+0x3a/0x70
+        percpu_init_late+0x4f/0x8a
+        start_kernel+0x1ec/0x3b8
+        x86_64_start_reservations+0x2a/0x2c
+        x86_64_start_kernel+0x13d/0x14c
+        verify_cpu+0x0/0xfc
+
+ -> #1 ((batched_entropy_u32_lock).lock){+.+...}:
+        lock_acquire+0xac/0x240
+        rt_spin_lock__no_mg+0x41/0x50
+        get_random_u32+0x64/0x190
+        new_slab+0x1d6/0x7c0
+        ___slab_alloc+0x30b/0x6f0
+        __slab_alloc.isra.78+0x6c/0xc0
+        kmem_cache_alloc+0x26a/0x370
+        __debug_object_init+0x325/0x460
+        debug_object_activate+0x11c/0x1f0
+        __queue_work+0x2c/0x770
+        queue_work_on+0x12a/0x190
+        serio_queue_event+0xd3/0x140
+        __serio_register_port+0x17e/0x1a0
+        i8042_probe+0x623/0x687
+        platform_drv_probe+0x36/0x90
+        driver_probe_device+0x1f8/0x2e0
+        __driver_attach+0x96/0xa0
+        bus_for_each_dev+0x5d/0x90
+        driver_attach+0x19/0x20
+        bus_add_driver+0x125/0x220
+        driver_register+0x5b/0xd0
+        __platform_driver_probe+0x5b/0x120
+        __platform_create_bundle+0xaa/0xd0
+        i8042_init+0x3f1/0x430
+        do_one_initcall+0x3e/0x180
+        kernel_init_freeable+0x212/0x295
+        kernel_init+0x9/0x100
+        ret_from_fork+0x31/0x40
+
+ -> #0 ((pendingb_lock).lock){+.+...}:
+        __lock_acquire+0x11b4/0x1320
+        lock_acquire+0xac/0x240
+        rt_spin_lock+0x46/0x50
+        queue_work_on+0x5d/0x190
+        tty_flip_buffer_push+0x26/0x30
+        serial8250_rx_chars+0x120/0x1f0
+        serial8250_handle_irq.part.27+0x58/0xb0
+        serial8250_default_handle_irq+0x4b/0x60
+        serial8250_interrupt+0x5f/0xd0
+        irq_forced_thread_fn+0x1e/0x70
+        irq_thread+0x137/0x1e0
+        kthread+0x112/0x150
+        ret_from_fork+0x31/0x40
+
+ other info that might help us debug this:
+
+ Chain exists of:
+   (pendingb_lock).lock --> primary_crng.lock --> &port_lock_key
+
+  Possible unsafe locking scenario:
+
+        CPU0                    CPU1
+        ----                    ----
+   lock(&port_lock_key);
+                                lock(primary_crng.lock);
+                                lock(&port_lock_key);
+   lock((pendingb_lock).lock);
+
+  *** DEADLOCK ***
+
+ 2 locks held by irq/4-serial/512:
+  #0:  (&i->lock){+.+...}, at: [<ffffffff815b0400>] serial8250_interrupt+0x30/0xd0
+  #1:  (&port_lock_key){+.+...}, at: [<ffffffff815b4bb6>] serial8250_handle_irq.part.27+0x16/0xb0
+
+ stack backtrace:
+ CPU: 4 PID: 512 Comm: irq/4-serial Not tainted 4.11.3-rt0+ #101
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.10.2-1 04/01/2014
+ Call Trace:
+  dump_stack+0x86/0xc1
+  print_circular_bug+0x1be/0x210
+  __lock_acquire+0x11b4/0x1320
+  lock_acquire+0xac/0x240
+  rt_spin_lock+0x46/0x50
+  queue_work_on+0x5d/0x190
+  tty_flip_buffer_push+0x26/0x30
+  serial8250_rx_chars+0x120/0x1f0
+  serial8250_handle_irq.part.27+0x58/0xb0
+  serial8250_default_handle_irq+0x4b/0x60
+  serial8250_interrupt+0x5f/0xd0
+  irq_forced_thread_fn+0x1e/0x70
+  irq_thread+0x137/0x1e0
+  kthread+0x112/0x150
+  ret_from_fork+0x31/0x40
+
+It should work if we delay that printk after dropping the lock but we
+also could skip it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ drivers/char/random.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -802,7 +802,7 @@ static int crng_fast_load(const char *cp
+ 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+ 		crng_init = 1;
+ 		wake_up_interruptible(&crng_init_wait);
+-		pr_notice("random: fast init done\n");
++		/* pr_notice("random: fast init done\n"); */
+ 	}
+ 	spin_unlock_irqrestore(&primary_crng.lock, flags);
+ 	return 1;
+@@ -840,7 +840,7 @@ static void crng_reseed(struct crng_stat
+ 		crng_init = 2;
+ 		process_random_ready_list();
+ 		wake_up_interruptible(&crng_init_wait);
+-		pr_notice("random: crng init done\n");
++		/* pr_notice("random: crng init done\n"); */
+ 	}
+ 	spin_unlock_irqrestore(&primary_crng.lock, flags);
+ }
diff --git a/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch b/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
index 4ec8809..d47db31 100644
--- a/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
+++ b/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
@@ -1,7 +1,7 @@
 From: Alexandre Belloni <alexandre.belloni at free-electrons.com>
 Date: Thu, 17 Mar 2016 21:09:43 +0100
 Subject: [PATCH] clockevents/drivers/timer-atmel-pit: fix double free_irq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 clockevents_exchange_device() changes the state from detached to shutdown
 and so at that point the IRQ has not yet been requested.
diff --git a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
index 419cab1..11ccd7c 100644
--- a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
+++ b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
@@ -1,7 +1,7 @@
 From: Benedikt Spranger <b.spranger at linutronix.de>
 Date: Mon, 8 Mar 2010 18:57:04 +0100
 Subject: clocksource: TCLIB: Allow higher clock rates for clock events
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 As default the TCLIB uses the 32KiHz base clock rate for clock events.
 Add a compile time selection to allow higher clock resulution.
diff --git a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
index 1f7fe06..5d86cb7 100644
--- a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
+++ b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
@@ -1,7 +1,7 @@
 Subject: completion: Use simple wait queues
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 11 Jan 2013 11:23:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Completions have no long lasting callbacks and therefor do not need
 the complex waitqueue variant. Use simple waitqueues which reduces the
@@ -15,17 +15,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  include/linux/completion.h                          |    9 ++---
  include/linux/suspend.h                             |    6 +++
  include/linux/swait.h                               |    1 
- include/linux/uprobes.h                             |    1 
  kernel/power/hibernate.c                            |    7 ++++
  kernel/power/suspend.c                              |    5 +++
  kernel/sched/completion.c                           |   32 ++++++++++----------
  kernel/sched/core.c                                 |   10 +++++-
  kernel/sched/swait.c                                |   20 ++++++++++++
- 12 files changed, 72 insertions(+), 27 deletions(-)
+ 11 files changed, 71 insertions(+), 27 deletions(-)
 
 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
 +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
-@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ez
+@@ -696,7 +696,7 @@ static void ezusb_req_ctx_wait(struct ez
  			while (!ctx->done.done && msecs--)
  				udelay(1000);
  		} else {
@@ -36,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		break;
 --- a/drivers/usb/gadget/function/f_fs.c
 +++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1594,7 +1594,7 @@ static void ffs_data_put(struct ffs_data
  		pr_info("%s(): freeing\n", __func__);
  		ffs_data_clear(ffs);
  		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -47,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
 --- a/drivers/usb/gadget/legacy/inode.c
 +++ b/drivers/usb/gadget/legacy/inode.c
-@@ -346,7 +346,7 @@ ep_io (struct ep_data *epdata, void *buf
+@@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf
  	spin_unlock_irq (&epdata->dev->lock);
  
  	if (likely (value == 0)) {
@@ -56,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		if (value != 0) {
  			spin_lock_irq (&epdata->dev->lock);
  			if (likely (epdata->ep != NULL)) {
-@@ -355,7 +355,7 @@ ep_io (struct ep_data *epdata, void *buf
+@@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf
  				usb_ep_dequeue (epdata->ep, epdata->req);
  				spin_unlock_irq (&epdata->dev->lock);
  
@@ -125,19 +124,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
  extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
---- a/include/linux/uprobes.h
-+++ b/include/linux/uprobes.h
-@@ -27,6 +27,7 @@
- #include <linux/errno.h>
- #include <linux/rbtree.h>
- #include <linux/types.h>
-+#include <linux/wait.h>
- 
- struct vm_area_struct;
- struct mm_struct;
 --- a/kernel/power/hibernate.c
 +++ b/kernel/power/hibernate.c
-@@ -683,6 +683,10 @@ static int load_image_and_restore(void)
+@@ -679,6 +679,10 @@ static int load_image_and_restore(void)
  	return error;
  }
  
@@ -148,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * hibernate - Carry out system hibernation, including saving the image.
   */
-@@ -696,6 +700,8 @@ int hibernate(void)
+@@ -692,6 +696,8 @@ int hibernate(void)
  		return -EPERM;
  	}
  
@@ -157,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	lock_system_sleep();
  	/* The snapshot device should not be opened while we're running */
  	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
-@@ -773,6 +779,7 @@ int hibernate(void)
+@@ -769,6 +775,7 @@ int hibernate(void)
  	atomic_inc(&snapshot_device_available);
   Unlock:
  	unlock_system_sleep();
@@ -167,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/power/suspend.c
 +++ b/kernel/power/suspend.c
-@@ -531,6 +531,8 @@ static int enter_state(suspend_state_t s
+@@ -546,6 +546,8 @@ static int enter_state(suspend_state_t s
  	return error;
  }
  
@@ -176,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * pm_suspend - Externally visible function for suspending the system.
   * @state: System sleep state to enter.
-@@ -545,6 +547,8 @@ int pm_suspend(suspend_state_t state)
+@@ -560,6 +562,8 @@ int pm_suspend(suspend_state_t state)
  	if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
  		return -EINVAL;
  
@@ -185,7 +174,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	error = enter_state(state);
  	if (error) {
  		suspend_stats.fail++;
-@@ -552,6 +556,7 @@ int pm_suspend(suspend_state_t state)
+@@ -567,6 +571,7 @@ int pm_suspend(suspend_state_t state)
  	} else {
  		suspend_stats.success++;
  	}
@@ -195,13 +184,14 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  EXPORT_SYMBOL(pm_suspend);
 --- a/kernel/sched/completion.c
 +++ b/kernel/sched/completion.c
-@@ -30,10 +30,10 @@ void complete(struct completion *x)
+@@ -31,11 +31,11 @@ void complete(struct completion *x)
  {
  	unsigned long flags;
  
 -	spin_lock_irqsave(&x->wait.lock, flags);
 +	raw_spin_lock_irqsave(&x->wait.lock, flags);
- 	x->done++;
+ 	if (x->done != UINT_MAX)
+ 		x->done++;
 -	__wake_up_locked(&x->wait, TASK_NORMAL, 1);
 -	spin_unlock_irqrestore(&x->wait.lock, flags);
 +	swake_up_locked(&x->wait);
@@ -209,13 +199,13 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(complete);
  
-@@ -50,10 +50,10 @@ void complete_all(struct completion *x)
+@@ -52,10 +52,10 @@ void complete_all(struct completion *x)
  {
  	unsigned long flags;
  
 -	spin_lock_irqsave(&x->wait.lock, flags);
 +	raw_spin_lock_irqsave(&x->wait.lock, flags);
- 	x->done += UINT_MAX/2;
+ 	x->done = UINT_MAX;
 -	__wake_up_locked(&x->wait, TASK_NORMAL, 0);
 -	spin_unlock_irqrestore(&x->wait.lock, flags);
 +	swake_up_all_locked(&x->wait);
@@ -223,7 +213,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(complete_all);
  
-@@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
+@@ -64,20 +64,20 @@ do_wait_for_common(struct completion *x,
  		   long (*action)(long), long timeout, int state)
  {
  	if (!x->done) {
@@ -249,7 +239,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		if (!x->done)
  			return timeout;
  	}
-@@ -89,9 +89,9 @@ static inline long __sched
+@@ -92,9 +92,9 @@ static inline long __sched
  {
  	might_sleep();
  
@@ -261,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return timeout;
  }
  
-@@ -277,12 +277,12 @@ bool try_wait_for_completion(struct comp
+@@ -280,12 +280,12 @@ bool try_wait_for_completion(struct comp
  	if (!READ_ONCE(x->done))
  		return 0;
  
@@ -269,14 +259,14 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	raw_spin_lock_irqsave(&x->wait.lock, flags);
  	if (!x->done)
  		ret = 0;
- 	else
+ 	else if (x->done != UINT_MAX)
  		x->done--;
 -	spin_unlock_irqrestore(&x->wait.lock, flags);
 +	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
  	return ret;
  }
  EXPORT_SYMBOL(try_wait_for_completion);
-@@ -311,7 +311,7 @@ bool completion_done(struct completion *
+@@ -314,7 +314,7 @@ bool completion_done(struct completion *
  	 * after it's acquired the lock.
  	 */
  	smp_rmb();
@@ -287,9 +277,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  EXPORT_SYMBOL(completion_done);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3323,7 +3323,10 @@ void migrate_disable(void)
+@@ -7476,7 +7476,10 @@ void migrate_disable(void)
+ 		return;
  	}
- 
  #ifdef CONFIG_SCHED_DEBUG
 -	WARN_ON_ONCE(p->migrate_disable_atomic);
 +	if (unlikely(p->migrate_disable_atomic)) {
@@ -299,7 +289,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  
  	if (p->migrate_disable) {
-@@ -3350,7 +3353,10 @@ void migrate_enable(void)
+@@ -7509,7 +7512,10 @@ void migrate_enable(void)
  	}
  
  #ifdef CONFIG_SCHED_DEBUG
@@ -309,12 +299,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		WARN_ON_ONCE(1);
 +	}
  #endif
- 	WARN_ON_ONCE(p->migrate_disable <= 0);
  
+ 	WARN_ON_ONCE(p->migrate_disable <= 0);
 --- a/kernel/sched/swait.c
 +++ b/kernel/sched/swait.c
 @@ -1,5 +1,6 @@
- #include <linux/sched.h>
+ #include <linux/sched/signal.h>
  #include <linux/swait.h>
 +#include <linux/suspend.h>
  
diff --git a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
index b02e938..2ea1b8d 100644
--- a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
+++ b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
@@ -1,7 +1,7 @@
 Subject: sched: Use the proper LOCK_OFFSET for cond_resched()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 22:51:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 RT does not increment preempt count when a 'sleeping' spinlock is
 locked. Update PREEMPT_LOCK_OFFSET for that case.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/preempt.h
 +++ b/include/linux/preempt.h
-@@ -91,7 +91,11 @@
+@@ -117,7 +117,11 @@
  /*
   * The preempt_count offset after spin_lock()
   */
diff --git a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
index 2329dde..b07c730 100644
--- a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
+++ b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
@@ -1,7 +1,7 @@
 Subject: sched: Take RT softirq semantics into account in cond_resched()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 14 Jul 2011 09:56:44 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The softirq semantics work different on -RT. There is no SOFTIRQ_MASK in
 the preemption counter which leads to the BUG_ON() statement in
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -3373,12 +3373,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -1514,12 +1514,16 @@ extern int __cond_resched_lock(spinlock_
  	__cond_resched_lock(lock);				\
  })
  
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -5092,6 +5092,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -5096,6 +5096,7 @@ int __cond_resched_lock(spinlock_t *lock
  }
  EXPORT_SYMBOL(__cond_resched_lock);
  
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  int __sched __cond_resched_softirq(void)
  {
  	BUG_ON(!in_softirq());
-@@ -5105,6 +5106,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5109,6 +5110,7 @@ int __sched __cond_resched_softirq(void)
  	return 0;
  }
  EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch b/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
index f11c316..10b57f4 100644
--- a/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
+++ b/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 16 Oct 2016 05:11:54 +0200
 Subject: [PATCH] connector/cn_proc: Protect send_msg() with a local lock
  on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931
 |in_atomic(): 1, irqs_disabled(): 0, pid: 31807, name: sleep
diff --git a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
index 73d6865..953bcba 100644
--- a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
+++ b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Thu, 5 Dec 2013 09:16:52 -0500
 Subject: cpu hotplug: Document why PREEMPT_RT uses a spinlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The patch:
 
@@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -260,6 +260,14 @@ struct hotplug_pcp {
+@@ -255,6 +255,14 @@ struct hotplug_pcp {
  	int grab_lock;
  	struct completion synced;
  #ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index 8253bae..67ed26d 100644
--- a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Fri, 02 Mar 2012 10:36:57 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Tasks can block on hotplug.lock in pin_current_cpu(), but their state
 might be != RUNNING. So the mutex wakeup will set the state
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -210,10 +210,16 @@ static int cpu_hotplug_disabled;
+@@ -205,10 +205,16 @@ static int cpu_hotplug_disabled;
  
  static struct {
  	struct task_struct *active_writer;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Also blocks the new readers during
  	 * an ongoing cpu hotplug operation.
-@@ -226,12 +232,24 @@ static struct {
+@@ -221,12 +227,24 @@ static struct {
  } cpu_hotplug = {
  	.active_writer = NULL,
  	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  #define cpuhp_lock_acquire_tryread() \
-@@ -268,8 +286,8 @@ void pin_current_cpu(void)
+@@ -263,8 +281,8 @@ void pin_current_cpu(void)
  		return;
  	}
  	preempt_enable();
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	preempt_disable();
  	goto retry;
  }
-@@ -342,9 +360,9 @@ void get_online_cpus(void)
+@@ -337,9 +355,9 @@ void get_online_cpus(void)
  	if (cpu_hotplug.active_writer == current)
  		return;
  	cpuhp_lock_acquire_read();
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(get_online_cpus);
  
-@@ -397,11 +415,11 @@ void cpu_hotplug_begin(void)
+@@ -392,11 +410,11 @@ void cpu_hotplug_begin(void)
  	cpuhp_lock_acquire();
  
  	for (;;) {
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		schedule();
  	}
  	finish_wait(&cpu_hotplug.wq, &wait);
-@@ -410,7 +428,7 @@ void cpu_hotplug_begin(void)
+@@ -405,7 +423,7 @@ void cpu_hotplug_begin(void)
  void cpu_hotplug_done(void)
  {
  	cpu_hotplug.active_writer = NULL;
diff --git a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
index 0c6304e..073cc5d 100644
--- a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
+++ b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <srostedt at redhat.com>
 Date: Mon, 16 Jul 2012 08:07:43 +0000
 Subject: cpu/rt: Rework cpu down for PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Bringing a CPU down is a pain with the PREEMPT_RT kernel because
 tasks can be preempted in many more places than in non-RT. In
@@ -57,18 +57,18 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -2480,6 +2480,10 @@ extern void do_set_cpus_allowed(struct t
- 
- extern int set_cpus_allowed_ptr(struct task_struct *p,
- 				const struct cpumask *new_mask);
+@@ -1346,6 +1346,10 @@ extern int task_can_attach(struct task_s
+ #ifdef CONFIG_SMP
+ extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+ extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
 +int migrate_me(void);
 +void tell_sched_cpu_down_begin(int cpu);
 +void tell_sched_cpu_down_done(int cpu);
 +
  #else
- static inline void do_set_cpus_allowed(struct task_struct *p,
- 				      const struct cpumask *new_mask)
-@@ -2492,6 +2496,9 @@ static inline int set_cpus_allowed_ptr(s
+ static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+@@ -1356,6 +1360,9 @@ static inline int set_cpus_allowed_ptr(s
  		return -EINVAL;
  	return 0;
  }
@@ -77,10 +77,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +static inline void tell_sched_cpu_down_done(int cpu) { }
  #endif
  
- #ifdef CONFIG_NO_HZ_COMMON
+ #ifndef cpu_relax_yield
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -210,16 +210,10 @@ static int cpu_hotplug_disabled;
+@@ -205,16 +205,10 @@ static int cpu_hotplug_disabled;
  
  static struct {
  	struct task_struct *active_writer;
@@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Also blocks the new readers during
  	 * an ongoing cpu hotplug operation.
-@@ -232,24 +226,12 @@ static struct {
+@@ -227,24 +221,12 @@ static struct {
  } cpu_hotplug = {
  	.active_writer = NULL,
  	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  #define cpuhp_lock_acquire_tryread() \
-@@ -257,12 +239,42 @@ static struct {
+@@ -252,12 +234,42 @@ static struct {
  #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
  #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
  
@@ -165,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
  
  /**
-@@ -276,18 +288,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -271,18 +283,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
  void pin_current_cpu(void)
  {
  	struct hotplug_pcp *hp;
@@ -209,7 +209,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	preempt_disable();
  	goto retry;
  }
-@@ -308,26 +341,84 @@ void unpin_current_cpu(void)
+@@ -303,26 +336,84 @@ void unpin_current_cpu(void)
  		wake_up_process(hp->unplug);
  }
  
@@ -301,7 +301,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Start the sync_unplug_thread on the target cpu and wait for it to
   * complete.
-@@ -335,23 +426,83 @@ static int sync_unplug_thread(void *data
+@@ -330,23 +421,83 @@ static int sync_unplug_thread(void *data
  static int cpu_unplug_begin(unsigned int cpu)
  {
  	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
@@ -392,7 +392,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  void get_online_cpus(void)
-@@ -360,9 +511,9 @@ void get_online_cpus(void)
+@@ -355,9 +506,9 @@ void get_online_cpus(void)
  	if (cpu_hotplug.active_writer == current)
  		return;
  	cpuhp_lock_acquire_read();
@@ -404,7 +404,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(get_online_cpus);
  
-@@ -415,11 +566,11 @@ void cpu_hotplug_begin(void)
+@@ -410,11 +561,11 @@ void cpu_hotplug_begin(void)
  	cpuhp_lock_acquire();
  
  	for (;;) {
@@ -418,7 +418,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		schedule();
  	}
  	finish_wait(&cpu_hotplug.wq, &wait);
-@@ -428,7 +579,7 @@ void cpu_hotplug_begin(void)
+@@ -423,7 +574,7 @@ void cpu_hotplug_begin(void)
  void cpu_hotplug_done(void)
  {
  	cpu_hotplug.active_writer = NULL;
@@ -427,7 +427,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	cpuhp_lock_release();
  }
  
-@@ -907,6 +1058,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -806,6 +957,9 @@ static int takedown_cpu(unsigned int cpu
  	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
  	smpboot_park_threads(cpu);
  
@@ -439,8 +439,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	 * interrupt affinities.
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1140,6 +1140,84 @@ void do_set_cpus_allowed(struct task_str
- 		set_curr_task(rq, p);
+@@ -1111,6 +1111,84 @@ void do_set_cpus_allowed(struct task_str
+ 	__do_set_cpus_allowed_tail(p, new_mask);
  }
  
 +static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
@@ -480,7 +480,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	struct task_struct *p = current;
 +	struct migration_arg arg;
 +	struct cpumask *cpumask;
-+	struct cpumask *mask;
++	const struct cpumask *mask;
 +	unsigned int dest_cpu;
 +	struct rq_flags rf;
 +	struct rq *rq;
@@ -496,7 +496,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	rq = task_rq_lock(p, &rf);
 +
 +	cpumask = this_cpu_ptr(&sched_cpumasks);
-+	mask = &p->cpus_allowed;
++	mask = p->cpus_ptr;
 +
 +	cpumask_andnot(cpumask, mask, &sched_down_cpumask);
 +
diff --git a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index a4c164f..2858571 100644
--- a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Tue, 4 Mar 2014 12:28:32 -0500
 Subject: cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 We hit another bug that was caused by switching cpu_chill() from
 msleep() to hrtimer_nanosleep().
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -1649,12 +1649,13 @@ void hrtimer_init_sleeper(struct hrtimer
+@@ -1622,12 +1622,13 @@ void hrtimer_init_sleeper(struct hrtimer
  }
  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
  
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		hrtimer_start_expires(&t->timer, mode);
  
  		if (likely(t->task))
-@@ -1696,7 +1697,8 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1669,7 +1670,8 @@ long __sched hrtimer_nanosleep_restart(s
  				HRTIMER_MODE_ABS);
  	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
  
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		goto out;
  
  	rmtp = restart->nanosleep.rmtp;
-@@ -1713,8 +1715,10 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1686,8 +1688,10 @@ long __sched hrtimer_nanosleep_restart(s
  	return ret;
  }
  
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	struct restart_block *restart;
  	struct hrtimer_sleeper t;
-@@ -1727,7 +1731,7 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1700,7 +1704,7 @@ long hrtimer_nanosleep(struct timespec *
  
  	hrtimer_init_on_stack(&t.timer, clockid, mode);
  	hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		goto out;
  
  	/* Absolute timers do not update the rmtp value and restart: */
-@@ -1754,6 +1758,12 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1727,6 +1731,12 @@ long hrtimer_nanosleep(struct timespec *
  	return ret;
  }
  
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
  		struct timespec __user *, rmtp)
  {
-@@ -1780,7 +1790,8 @@ void cpu_chill(void)
+@@ -1753,7 +1763,8 @@ void cpu_chill(void)
  	unsigned int freeze_flag = current->flags & PF_NOFREEZE;
  
  	current->flags |= PF_NOFREEZE;
diff --git a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
index 804d04c..0607eac 100644
--- a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
+++ b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
@@ -1,7 +1,7 @@
 From:	Tiejun Chen <tiejun.chen at windriver.com>
 Subject: cpu_down: move migrate_enable() back
 Date:	Thu, 7 Nov 2013 10:06:07 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to
 use migrate_enable()/migrate_disable() to replace that combination
@@ -35,7 +35,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen at windriver.com>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -1195,6 +1195,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1084,6 +1084,7 @@ static int __ref _cpu_down(unsigned int
  		goto restore_cpus;
  	}
  
@@ -43,11 +43,11 @@ Signed-off-by: Tiejun Chen <tiejun.chen at windriver.com>
  	cpu_hotplug_begin();
  	ret = cpu_unplug_begin(cpu);
  	if (ret) {
-@@ -1242,7 +1243,6 @@ static int __ref _cpu_down(unsigned int
+@@ -1130,7 +1131,6 @@ static int __ref _cpu_down(unsigned int
  	cpu_unplug_done(cpu);
  out_cancel:
  	cpu_hotplug_done();
 -	migrate_enable();
- 	/* This post dead nonsense must die */
- 	if (!ret && hasdied)
- 		cpu_notify_nofail(CPU_POST_DEAD, cpu);
+ restore_cpus:
+ 	set_cpus_allowed_ptr(current, cpumask_org);
+ 	free_cpumask_var(cpumask_org);
diff --git a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
index 8f7ced4..7d6c7cd 100644
--- a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
+++ b/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 9 Apr 2015 15:23:01 +0200
 Subject: cpufreq: drop K8's driver from beeing selected
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Ralf posted a picture of a backtrace from
 
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/cpufreq/Kconfig.x86
 +++ b/drivers/cpufreq/Kconfig.x86
-@@ -124,7 +124,7 @@ config X86_POWERNOW_K7_ACPI
+@@ -125,7 +125,7 @@ config X86_POWERNOW_K7_ACPI
  
  config X86_POWERNOW_K8
  	tristate "AMD Opteron/Athlon64 PowerNow!"
diff --git a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
index bf3e8f3..883b3e3 100644
--- a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
+++ b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 14 Dec 2011 01:03:49 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 There are "valid" GFP_ATOMIC allocations such as
 
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
-@@ -900,7 +900,7 @@ config IOMMU_HELPER
+@@ -908,7 +908,7 @@ config IOMMU_HELPER
  config MAXSMP
  	bool "Enable Maximum number of SMP Processors and NUMA Nodes"
  	depends on X86_64 && SMP && DEBUG_KERNEL
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	  If unsure, say N.
 --- a/lib/Kconfig
 +++ b/lib/Kconfig
-@@ -400,6 +400,7 @@ config CHECK_SIGNATURE
+@@ -409,6 +409,7 @@ config CHECK_SIGNATURE
  
  config CPUMASK_OFFSTACK
  	bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
diff --git a/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
index 54dcfcd..5a603ac 100644
--- a/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
+++ b/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <efault at gmx.de>
 Date: Sun, 8 Jan 2017 09:32:25 +0100
 Subject: [PATCH] cpuset: Convert callback_lock to raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The two commits below add up to a cpuset might_sleep() splat for RT:
 
@@ -46,12 +46,12 @@ Cc: stable-rt at vger.kernel.org
 Signed-off-by: Mike Galbraith <efault at gmx.de>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- kernel/cpuset.c |   66 ++++++++++++++++++++++++++++----------------------------
+ kernel/cgroup/cpuset.c |   66 ++++++++++++++++++++++++-------------------------
  1 file changed, 33 insertions(+), 33 deletions(-)
 
---- a/kernel/cpuset.c
-+++ b/kernel/cpuset.c
-@@ -284,7 +284,7 @@ static struct cpuset top_cpuset = {
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -286,7 +286,7 @@ static struct cpuset top_cpuset = {
   */
  
  static DEFINE_MUTEX(cpuset_mutex);
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  static struct workqueue_struct *cpuset_migrate_mm_wq;
  
-@@ -907,9 +907,9 @@ static void update_cpumasks_hier(struct
+@@ -909,9 +909,9 @@ static void update_cpumasks_hier(struct
  			continue;
  		rcu_read_unlock();
  
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
  			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
-@@ -974,9 +974,9 @@ static int update_cpumask(struct cpuset
+@@ -976,9 +976,9 @@ static int update_cpumask(struct cpuset
  	if (retval < 0)
  		return retval;
  
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/* use trialcs->cpus_allowed as a temp variable */
  	update_cpumasks_hier(cs, trialcs->cpus_allowed);
-@@ -1176,9 +1176,9 @@ static void update_nodemasks_hier(struct
+@@ -1178,9 +1178,9 @@ static void update_nodemasks_hier(struct
  			continue;
  		rcu_read_unlock();
  
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
  			!nodes_equal(cp->mems_allowed, cp->effective_mems));
-@@ -1246,9 +1246,9 @@ static int update_nodemask(struct cpuset
+@@ -1248,9 +1248,9 @@ static int update_nodemask(struct cpuset
  	if (retval < 0)
  		goto done;
  
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/* use trialcs->mems_allowed as a temp variable */
  	update_nodemasks_hier(cs, &trialcs->mems_allowed);
-@@ -1339,9 +1339,9 @@ static int update_flag(cpuset_flagbits_t
+@@ -1341,9 +1341,9 @@ static int update_flag(cpuset_flagbits_t
  	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
  			|| (is_spread_page(cs) != is_spread_page(trialcs)));
  
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
  		rebuild_sched_domains_locked();
-@@ -1756,7 +1756,7 @@ static int cpuset_common_seq_show(struct
+@@ -1758,7 +1758,7 @@ static int cpuset_common_seq_show(struct
  	cpuset_filetype_t type = seq_cft(sf)->private;
  	int ret = 0;
  
@@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	switch (type) {
  	case FILE_CPULIST:
-@@ -1775,7 +1775,7 @@ static int cpuset_common_seq_show(struct
+@@ -1777,7 +1777,7 @@ static int cpuset_common_seq_show(struct
  		ret = -EINVAL;
  	}
  
@@ -138,7 +138,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return ret;
  }
  
-@@ -1989,12 +1989,12 @@ static int cpuset_css_online(struct cgro
+@@ -1991,12 +1991,12 @@ static int cpuset_css_online(struct cgro
  
  	cpuset_inc();
  
@@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
  		goto out_unlock;
-@@ -2021,12 +2021,12 @@ static int cpuset_css_online(struct cgro
+@@ -2023,12 +2023,12 @@ static int cpuset_css_online(struct cgro
  	}
  	rcu_read_unlock();
  
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  out_unlock:
  	mutex_unlock(&cpuset_mutex);
  	return 0;
-@@ -2065,7 +2065,7 @@ static void cpuset_css_free(struct cgrou
+@@ -2067,7 +2067,7 @@ static void cpuset_css_free(struct cgrou
  static void cpuset_bind(struct cgroup_subsys_state *root_css)
  {
  	mutex_lock(&cpuset_mutex);
@@ -177,7 +177,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
  		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
-@@ -2076,7 +2076,7 @@ static void cpuset_bind(struct cgroup_su
+@@ -2078,7 +2078,7 @@ static void cpuset_bind(struct cgroup_su
  		top_cpuset.mems_allowed = top_cpuset.effective_mems;
  	}
  
@@ -186,7 +186,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	mutex_unlock(&cpuset_mutex);
  }
  
-@@ -2177,12 +2177,12 @@ hotplug_update_tasks_legacy(struct cpuse
+@@ -2179,12 +2179,12 @@ hotplug_update_tasks_legacy(struct cpuse
  {
  	bool is_empty;
  
@@ -201,7 +201,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
-@@ -2219,10 +2219,10 @@ hotplug_update_tasks(struct cpuset *cs,
+@@ -2221,10 +2221,10 @@ hotplug_update_tasks(struct cpuset *cs,
  	if (nodes_empty(*new_mems))
  		*new_mems = parent_cs(cs)->effective_mems;
  
@@ -214,7 +214,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (cpus_updated)
  		update_tasks_cpumask(cs);
-@@ -2308,21 +2308,21 @@ static void cpuset_hotplug_workfn(struct
+@@ -2310,21 +2310,21 @@ static void cpuset_hotplug_workfn(struct
  
  	/* synchronize cpus_allowed to cpu_active_mask */
  	if (cpus_updated) {
@@ -240,7 +240,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		update_tasks_nodemask(&top_cpuset);
  	}
  
-@@ -2420,11 +2420,11 @@ void cpuset_cpus_allowed(struct task_str
+@@ -2422,11 +2422,11 @@ void cpuset_cpus_allowed(struct task_str
  {
  	unsigned long flags;
  
@@ -254,7 +254,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
-@@ -2472,11 +2472,11 @@ nodemask_t cpuset_mems_allowed(struct ta
+@@ -2474,11 +2474,11 @@ nodemask_t cpuset_mems_allowed(struct ta
  	nodemask_t mask;
  	unsigned long flags;
  
@@ -268,7 +268,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	return mask;
  }
-@@ -2568,14 +2568,14 @@ bool __cpuset_node_allowed(int node, gfp
+@@ -2570,14 +2570,14 @@ bool __cpuset_node_allowed(int node, gfp
  		return true;
  
  	/* Not hardwall and node outside mems_allowed: scan up cpusets */
diff --git a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
index 59a7d86..1778081 100644
--- a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
+++ b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 21 Feb 2014 17:24:04 +0100
 Subject: crypto: Reduce preempt disabled regions, more algos
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Don Estabrook reported
 | kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
@@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (walk.nbytes) {
  		glue_ctr_crypt_final_128bit(
  			gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
-@@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct c
+@@ -382,7 +381,7 @@ int glue_xts_crypt_128bit(const struct c
  			  void *tweak_ctx, void *crypt_ctx)
  {
  	const unsigned int bsize = 128 / 8;
@@ -213,7 +213,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct blkcipher_walk walk;
  	int err;
  
-@@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct c
+@@ -395,21 +394,21 @@ int glue_xts_crypt_128bit(const struct c
  
  	/* set minimum length to bsize, for tweak_fn */
  	fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
diff --git a/debian/patches/features/all/rt/debugobjects-rt.patch b/debian/patches/features/all/rt/debugobjects-rt.patch
index f507ccc..f46b6df 100644
--- a/debian/patches/features/all/rt/debugobjects-rt.patch
+++ b/debian/patches/features/all/rt/debugobjects-rt.patch
@@ -1,7 +1,7 @@
 Subject: debugobjects: Make RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:41:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Avoid filling the pool / allocating memory with irqs off().
 
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/lib/debugobjects.c
 +++ b/lib/debugobjects.c
-@@ -308,7 +308,10 @@ static void
+@@ -334,7 +334,10 @@ static void
  	struct debug_obj *obj;
  	unsigned long flags;
  
diff --git a/debian/patches/features/all/rt/delayacct-use-raw_spinlocks.patch b/debian/patches/features/all/rt/delayacct-use-raw_spinlocks.patch
new file mode 100644
index 0000000..81dc987
--- /dev/null
+++ b/debian/patches/features/all/rt/delayacct-use-raw_spinlocks.patch
@@ -0,0 +1,82 @@
+From 2c887ccff27de53f76fbdedc0afea9fa3be3ea2f Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Sat, 20 May 2017 12:32:23 +0200
+Subject: [PATCH] delayacct: use raw_spinlocks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+try_to_wake_up() might invoke delayacct_blkio_end() while holding the
+pi_lock. The lock is only held for a short amount of time so it should
+be safe to make it raw.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/delayacct.h |    2 +-
+ kernel/delayacct.c        |   14 +++++++-------
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/include/linux/delayacct.h
++++ b/include/linux/delayacct.h
+@@ -29,7 +29,7 @@
+ 
+ #ifdef CONFIG_TASK_DELAY_ACCT
+ struct task_delay_info {
+-	spinlock_t	lock;
++	raw_spinlock_t	lock;
+ 	unsigned int	flags;	/* Private per-task flags */
+ 
+ 	/* For each stat XXX, add following, aligned appropriately
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -44,7 +44,7 @@ void __delayacct_tsk_init(struct task_st
+ {
+ 	tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
+ 	if (tsk->delays)
+-		spin_lock_init(&tsk->delays->lock);
++		raw_spin_lock_init(&tsk->delays->lock);
+ }
+ 
+ /*
+@@ -57,10 +57,10 @@ static void delayacct_end(u64 *start, u6
+ 	unsigned long flags;
+ 
+ 	if (ns > 0) {
+-		spin_lock_irqsave(&current->delays->lock, flags);
++		raw_spin_lock_irqsave(&current->delays->lock, flags);
+ 		*total += ns;
+ 		(*count)++;
+-		spin_unlock_irqrestore(&current->delays->lock, flags);
++		raw_spin_unlock_irqrestore(&current->delays->lock, flags);
+ 	}
+ }
+ 
+@@ -119,7 +119,7 @@ int __delayacct_add_tsk(struct taskstats
+ 
+ 	/* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
+ 
+-	spin_lock_irqsave(&tsk->delays->lock, flags);
++	raw_spin_lock_irqsave(&tsk->delays->lock, flags);
+ 	tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
+ 	d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
+ 	tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
+@@ -129,7 +129,7 @@ int __delayacct_add_tsk(struct taskstats
+ 	d->blkio_count += tsk->delays->blkio_count;
+ 	d->swapin_count += tsk->delays->swapin_count;
+ 	d->freepages_count += tsk->delays->freepages_count;
+-	spin_unlock_irqrestore(&tsk->delays->lock, flags);
++	raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -139,10 +139,10 @@ int __delayacct_add_tsk(struct taskstats
+ 	__u64 ret;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&tsk->delays->lock, flags);
++	raw_spin_lock_irqsave(&tsk->delays->lock, flags);
+ 	ret = nsec_to_clock_t(tsk->delays->blkio_delay +
+ 				tsk->delays->swapin_delay);
+-	spin_unlock_irqrestore(&tsk->delays->lock, flags);
++	raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
+ 	return ret;
+ }
+ 
diff --git a/debian/patches/features/all/rt/dm-make-rt-aware.patch b/debian/patches/features/all/rt/dm-make-rt-aware.patch
index 88ba319..31c7fb8 100644
--- a/debian/patches/features/all/rt/dm-make-rt-aware.patch
+++ b/debian/patches/features/all/rt/dm-make-rt-aware.patch
@@ -1,7 +1,7 @@
 Subject: dm: Make rt aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 14 Nov 2011 23:06:09 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
 interrupts legitimately enabled here as we cant deadlock against the
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/md/dm-rq.c
 +++ b/drivers/md/dm-rq.c
-@@ -842,7 +842,7 @@ static void dm_old_request_fn(struct req
+@@ -667,7 +667,7 @@ static void dm_old_request_fn(struct req
  		/* Establish tio->ti before queuing work (map_tio_request) */
  		tio->ti = ti;
  		kthread_queue_work(&md->kworker, &tio->work);
diff --git a/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index f373ca8..fbe2009 100644
--- a/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Thu, 31 Mar 2016 04:08:28 +0200
 Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex
  for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 They're nondeterministic, and lead to ___might_sleep() splats in -rt.
 OTOH, they're a lot less wasteful than an rtmutex per page.
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/block/zram/zram_drv.c
 +++ b/drivers/block/zram/zram_drv.c
-@@ -528,6 +528,8 @@ static struct zram_meta *zram_meta_alloc
+@@ -461,6 +461,8 @@ static struct zram_meta *zram_meta_alloc
  		goto out_error;
  	}
  
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return meta;
  
  out_error:
-@@ -576,12 +578,12 @@ static int zram_decompress_page(struct z
+@@ -511,12 +513,12 @@ static int zram_decompress_page(struct z
  	unsigned long handle;
  	unsigned int size;
  
@@ -34,13 +34,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	handle = meta->table[index].handle;
  	size = zram_get_obj_size(meta, index);
  
- 	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ 	if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
 -		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 +		zram_unlock_table(&meta->table[index]);
- 		memset(mem, 0, PAGE_SIZE);
+ 		zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
  		return 0;
  	}
-@@ -596,7 +598,7 @@ static int zram_decompress_page(struct z
+@@ -531,7 +533,7 @@ static int zram_decompress_page(struct z
  		zcomp_stream_put(zram->comp);
  	}
  	zs_unmap_object(meta->mem_pool, handle);
@@ -49,17 +49,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/* Should NEVER happen. Return bio error if it does. */
  	if (unlikely(ret)) {
-@@ -616,14 +618,14 @@ static int zram_bvec_read(struct zram *z
+@@ -551,14 +553,14 @@ static int zram_bvec_read(struct zram *z
  	struct zram_meta *meta = zram->meta;
  	page = bvec->bv_page;
  
 -	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
 +	zram_lock_table(&meta->table[index]);
  	if (unlikely(!meta->table[index].handle) ||
- 			zram_test_flag(meta, index, ZRAM_ZERO)) {
+ 			zram_test_flag(meta, index, ZRAM_SAME)) {
 -		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 +		zram_unlock_table(&meta->table[index]);
- 		handle_zero_page(bvec);
+ 		handle_same_page(bvec, meta->table[index].element);
  		return 0;
  	}
 -	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
@@ -67,20 +67,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (is_partial_io(bvec))
  		/* Use  a temporary buffer to decompress the page */
-@@ -700,10 +702,10 @@ static int zram_bvec_write(struct zram *
+@@ -636,11 +638,11 @@ static int zram_bvec_write(struct zram *
  		if (user_mem)
  			kunmap_atomic(user_mem);
  		/* Free memory associated with this sector now. */
 -		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
 +		zram_lock_table(&meta->table[index]);
  		zram_free_page(zram, index);
- 		zram_set_flag(meta, index, ZRAM_ZERO);
+ 		zram_set_flag(meta, index, ZRAM_SAME);
+ 		zram_set_element(meta, index, element);
 -		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 +		zram_unlock_table(&meta->table[index]);
  
- 		atomic64_inc(&zram->stats.zero_pages);
+ 		atomic64_inc(&zram->stats.same_pages);
  		ret = 0;
-@@ -794,12 +796,12 @@ static int zram_bvec_write(struct zram *
+@@ -731,12 +733,12 @@ static int zram_bvec_write(struct zram *
  	 * Free memory associated with this sector
  	 * before overwriting unused sectors.
  	 */
@@ -95,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/* Update stats */
  	atomic64_add(clen, &zram->stats.compr_data_size);
-@@ -842,9 +844,9 @@ static void zram_bio_discard(struct zram
+@@ -779,9 +781,9 @@ static void zram_bio_discard(struct zram
  	}
  
  	while (n >= PAGE_SIZE) {
@@ -107,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		atomic64_inc(&zram->stats.notify_free);
  		index++;
  		n -= PAGE_SIZE;
-@@ -973,9 +975,9 @@ static void zram_slot_free_notify(struct
+@@ -905,9 +907,9 @@ static void zram_slot_free_notify(struct
  	zram = bdev->bd_disk->private_data;
  	meta = zram->meta;
  
@@ -121,9 +122,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
 --- a/drivers/block/zram/zram_drv.h
 +++ b/drivers/block/zram/zram_drv.h
-@@ -73,6 +73,9 @@ enum zram_pageflags {
- struct zram_table_entry {
- 	unsigned long handle;
+@@ -76,6 +76,9 @@ struct zram_table_entry {
+ 		unsigned long element;
+ 	};
  	unsigned long value;
 +#ifdef CONFIG_PREEMPT_RT_BASE
 +	spinlock_t lock;
diff --git a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
index 4e24741..56be59e 100644
--- a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
+++ b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:24 -0500
 Subject: drivers/net: Use disable_irq_nosync() in 8139too
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use disable_irq_nosync() instead of disable_irq() as this might be
 called in atomic context with netpoll.
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/net/ethernet/realtek/8139too.c
 +++ b/drivers/net/ethernet/realtek/8139too.c
-@@ -2233,7 +2233,7 @@ static void rtl8139_poll_controller(stru
+@@ -2223,7 +2223,7 @@ static void rtl8139_poll_controller(stru
  	struct rtl8139_private *tp = netdev_priv(dev);
  	const int irq = tp->pci_dev->irq;
  
diff --git a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
index 0aabfbf..f824972 100644
--- a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
+++ b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Fri, 3 Jul 2009 08:30:00 -0500
 Subject: drivers/net: vortex fix locking issues
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Argh, cut and paste wasn't enough...
 
@@ -32,7 +32,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  }
  #endif
  
-@@ -1910,12 +1910,12 @@ static void vortex_tx_timeout(struct net
+@@ -1908,12 +1908,12 @@ static void vortex_tx_timeout(struct net
  			 * Block interrupts because vortex_interrupt does a bare spin_lock()
  			 */
  			unsigned long flags;
diff --git a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
index 15c6002..42b70c1 100644
--- a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
+++ b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:30 -0500
 Subject: drivers: random: Reduce preempt disabled region
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 No need to keep preemption disabled across the whole function.
 
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
-@@ -1028,8 +1028,6 @@ static void add_timer_randomness(struct
+@@ -1017,8 +1017,6 @@ static void add_timer_randomness(struct
  	} sample;
  	long delta, delta2, delta3;
  
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	sample.jiffies = jiffies;
  	sample.cycles = random_get_entropy();
  	sample.num = num;
-@@ -1070,7 +1068,6 @@ static void add_timer_randomness(struct
+@@ -1059,7 +1057,6 @@ static void add_timer_randomness(struct
  		 */
  		credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
  	}
diff --git a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
index 126dfcf..a8b6e57 100644
--- a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
+++ b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
@@ -1,7 +1,7 @@
 Subject: tty/serial/omap: Make the locking RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 28 Jul 2011 13:32:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The lock is a sleeping lock and local_irq_save() is not the
 optimsation we are looking for. Redo it to make it work on -RT and
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/tty/serial/omap-serial.c
 +++ b/drivers/tty/serial/omap-serial.c
-@@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console
+@@ -1312,13 +1312,10 @@ serial_omap_console_write(struct console
  
  	pm_runtime_get_sync(up->dev);
  
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * First save the IER then disable the interrupts
-@@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console
+@@ -1347,8 +1344,7 @@ serial_omap_console_write(struct console
  	pm_runtime_mark_last_busy(up->dev);
  	pm_runtime_put_autosuspend(up->dev);
  	if (locked)
diff --git a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
index 4b2b43f..598e031 100644
--- a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
+++ b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
@@ -1,7 +1,7 @@
 Subject: tty/serial/pl011: Make the locking work on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 08 Jan 2013 21:36:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The lock is a sleeping lock and local_irq_save() is not the optimsation
 we are looking for. Redo it to make it work on -RT and non-RT.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/tty/serial/amba-pl011.c
 +++ b/drivers/tty/serial/amba-pl011.c
-@@ -2194,13 +2194,19 @@ pl011_console_write(struct console *co,
+@@ -2222,13 +2222,19 @@ pl011_console_write(struct console *co,
  
  	clk_enable(uap->clk);
  
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 *	First save the CR then disable the interrupts
-@@ -2224,8 +2230,7 @@ pl011_console_write(struct console *co,
+@@ -2254,8 +2260,7 @@ pl011_console_write(struct console *co,
  		pl011_write(old_cr, uap, REG_CR);
  
  	if (locked)
diff --git a/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index c204cc8..f6a4d11 100644
--- a/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Thu, 20 Oct 2016 11:15:22 +0200
 Subject: [PATCH] drivers/zram: Don't disable preemption in
  zcomp_stream_get/put()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 In v4.7, the driver switched to percpu compression streams, disabling
 preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We
@@ -43,14 +43,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  int zcomp_compress(struct zcomp_strm *zstrm,
-@@ -174,6 +181,7 @@ static int __zcomp_cpu_notifier(struct z
- 			pr_err("Can't allocate a compression stream\n");
- 			return NOTIFY_BAD;
- 		}
-+		spin_lock_init(&zstrm->zcomp_lock);
- 		*per_cpu_ptr(comp->stream, cpu) = zstrm;
- 		break;
- 	case CPU_DEAD:
+@@ -173,6 +180,7 @@ int zcomp_cpu_up_prepare(unsigned int cp
+ 		pr_err("Can't allocate a compression stream\n");
+ 		return -ENOMEM;
+ 	}
++	spin_lock_init(&zstrm->zcomp_lock);
+ 	*per_cpu_ptr(comp->stream, cpu) = zstrm;
+ 	return 0;
+ }
 --- a/drivers/block/zram/zcomp.h
 +++ b/drivers/block/zram/zcomp.h
 @@ -14,6 +14,7 @@ struct zcomp_strm {
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /* dynamic per-device compression frontend */
 --- a/drivers/block/zram/zram_drv.c
 +++ b/drivers/block/zram/zram_drv.c
-@@ -577,6 +577,7 @@ static int zram_decompress_page(struct z
+@@ -512,6 +512,7 @@ static int zram_decompress_page(struct z
  	struct zram_meta *meta = zram->meta;
  	unsigned long handle;
  	unsigned int size;
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	zram_lock_table(&meta->table[index]);
  	handle = meta->table[index].handle;
-@@ -588,16 +589,15 @@ static int zram_decompress_page(struct z
+@@ -523,16 +524,15 @@ static int zram_decompress_page(struct z
  		return 0;
  	}
  
diff --git a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
index d44bcc2..042bbfb 100644
--- a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
+++ b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 25 Apr 2013 18:12:52 +0200
 Subject: drm/i915: drop trace_i915_gem_ring_dispatch on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 This tracepoint is responsible for:
 
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -1489,7 +1489,9 @@ execbuf_submit(struct i915_execbuffer_pa
+@@ -1445,7 +1445,9 @@ execbuf_submit(struct i915_execbuffer_pa
  	if (ret)
  		return ret;
  
diff --git a/debian/patches/features/all/rt/drm-i915-init-spinlock-properly-on-RT.patch b/debian/patches/features/all/rt/drm-i915-init-spinlock-properly-on-RT.patch
new file mode 100644
index 0000000..cbd6da8
--- /dev/null
+++ b/debian/patches/features/all/rt/drm-i915-init-spinlock-properly-on-RT.patch
@@ -0,0 +1,27 @@
+From: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
+Date: Mon, 29 May 2017 15:33:52 +0200
+Subject: [PATCH] drm/i915: init spinlock properly on -RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+THe lockinit is opencoded so need to fix it up…
+
+Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
+---
+ drivers/gpu/drm/i915/i915_gem_timeline.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
++++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
+@@ -50,7 +50,12 @@ static int __i915_gem_timeline_init(stru
+ 		tl->fence_context = fences++;
+ 		tl->common = timeline;
+ #ifdef CONFIG_DEBUG_SPINLOCK
++# ifdef CONFIG_PREEMPT_RT_FULL
++		rt_mutex_init(&tl->lock.lock);
++		__rt_spin_lock_init(&tl->lock, lockname, lockclass);
++# else
+ 		__raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
++# endif
+ #else
+ 		spin_lock_init(&tl->lock);
+ #endif
diff --git a/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
index 2969c88..72c7c79 100644
--- a/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+++ b/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
@@ -1,7 +1,7 @@
 Subject: drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end()
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sat, 27 Feb 2016 09:01:42 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 
 [    8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918
diff --git a/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index 2c6c20d..7ad039d 100644
--- a/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -1,7 +1,7 @@
 Subject: drm,radeon,i915: Use preempt_disable/enable_rt() where recommended
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sat, 27 Feb 2016 08:09:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 DRM folks identified the spots, so use them.
 
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/gpu/drm/i915/i915_irq.c
 +++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -867,6 +867,7 @@ static int i915_get_crtc_scanoutpos(stru
  	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  
  	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* Get optional system timestamp before query. */
  	if (stime)
-@@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -918,6 +919,7 @@ static int i915_get_crtc_scanoutpos(stru
  		*etime = ktime_get();
  
  	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
index 38dc76e..0e4d2c6 100644
--- a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
+++ b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
 Subject: fs/epoll: Do not disable preemption on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 08 Jul 2011 16:35:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 ep_call_nested() takes a sleeping lock so we can't disable preemption.
 The light version is enough since ep_call_nested() doesn't mind beeing
diff --git a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch b/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
index be5d78e..78aedb9 100644
--- a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
+++ b/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 16 Feb 2015 18:49:10 +0100
 Subject: fs/aio: simple simple work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768
 |in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +#include <linux/swork.h>
  
  #include <asm/kmap_types.h>
- #include <asm/uaccess.h>
+ #include <linux/uaccess.h>
 @@ -115,7 +116,7 @@ struct kioctx {
  	struct page		**ring_pages;
  	long			nr_pages;
diff --git a/debian/patches/features/all/rt/fs-block-rt-support.patch b/debian/patches/features/all/rt/fs-block-rt-support.patch
index 750bf27..7d32fa8 100644
--- a/debian/patches/features/all/rt/fs-block-rt-support.patch
+++ b/debian/patches/features/all/rt/fs-block-rt-support.patch
@@ -1,7 +1,7 @@
 Subject: block: Turn off warning which is bogus on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jun 2011 17:05:09 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On -RT the context is always with IRQs enabled. Ignore this warning on -RT.
 
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
-@@ -233,7 +233,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
+@@ -214,7 +214,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
   **/
  void blk_start_queue(struct request_queue *q)
  {
diff --git a/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch b/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch
deleted file mode 100644
index d04baaf..0000000
--- a/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-Date: Wed, 14 Sep 2016 11:55:23 +0200
-Subject: fs/dcache: include wait.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Since commit d9171b934526 ("parallel lookups machinery, part 4 (and
-last)") dcache.h is using but does not include wait.h. It works as long
-as it is included somehow earlier and fails otherwise.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- include/linux/dcache.h |    1 +
- 1 file changed, 1 insertion(+)
-
---- a/include/linux/dcache.h
-+++ b/include/linux/dcache.h
-@@ -11,6 +11,7 @@
- #include <linux/rcupdate.h>
- #include <linux/lockref.h>
- #include <linux/stringhash.h>
-+#include <linux/wait.h>
- 
- struct path;
- struct vfsmount;
diff --git a/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch b/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
index a13ee5d..849c501 100644
--- a/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
+++ b/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 14 Sep 2016 17:57:03 +0200
 Subject: [PATCH] fs/dcache: init in_lookup_hashtable
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 in_lookup_hashtable was introduced in commit 94bdd655caba ("parallel
 lookups machinery, part 3") and never initialized but since it is in
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/fs/dcache.c
 +++ b/fs/dcache.c
-@@ -3604,6 +3604,11 @@ EXPORT_SYMBOL(d_genocide);
+@@ -3610,6 +3610,11 @@ EXPORT_SYMBOL(d_genocide);
  
  void __init vfs_caches_init_early(void)
  {
diff --git a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index 529d963..bac57a6 100644
--- a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -1,7 +1,7 @@
 Subject: fs: dcache: Use cpu_chill() in trylock loops
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 07 Mar 2012 21:00:34 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Use cpu_chill() instead of cpu_relax() to let the system
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/fs/autofs4/autofs_i.h
 +++ b/fs/autofs4/autofs_i.h
-@@ -31,6 +31,7 @@
+@@ -32,6 +32,7 @@
  #include <linux/sched.h>
  #include <linux/mount.h>
  #include <linux/namei.h>
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		goto repeat;
  	}
  }
-@@ -2324,7 +2336,7 @@ void d_delete(struct dentry * dentry)
+@@ -2330,7 +2342,7 @@ void d_delete(struct dentry * dentry)
  	if (dentry->d_lockref.count == 1) {
  		if (!spin_trylock(&inode->i_lock)) {
  			spin_unlock(&dentry->d_lock);
@@ -95,9 +95,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #include <linux/namei.h>
 +#include <linux/delay.h>
  #include <linux/security.h>
+ #include <linux/cred.h>
  #include <linux/idr.h>
- #include <linux/init.h>		/* init_rootfs */
-@@ -358,7 +359,7 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -357,7 +358,7 @@ int __mnt_want_write(struct vfsmount *m)
  	smp_mb();
  	while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
  		preempt_enable();
diff --git a/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index a0458dc..fc9f23d 100644
--- a/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 14 Sep 2016 14:35:49 +0200
 Subject: [PATCH] fs/dcache: use swait_queue instead of waitqueue
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 __d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock()
 which disables preemption. As a workaround convert it to swait.
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
 --- a/fs/dcache.c
 +++ b/fs/dcache.c
-@@ -2396,21 +2396,24 @@ static inline void end_dir_add(struct in
+@@ -2402,21 +2402,24 @@ static inline void end_dir_add(struct in
  
  static void d_wait_lookup(struct dentry *dentry)
  {
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	unsigned int hash = name->hash;
  	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
-@@ -2519,7 +2522,7 @@ void __d_lookup_done(struct dentry *dent
+@@ -2525,7 +2528,7 @@ void __d_lookup_done(struct dentry *dent
  	hlist_bl_lock(b);
  	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
  	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		/*
 --- a/fs/namei.c
 +++ b/fs/namei.c
-@@ -1626,7 +1626,7 @@ static struct dentry *lookup_slow(const
+@@ -1628,7 +1628,7 @@ static struct dentry *lookup_slow(const
  {
  	struct dentry *dentry = ERR_PTR(-ENOENT), *old;
  	struct inode *inode = dir->d_inode;
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	inode_lock_shared(inode);
  	/* Don't go there if it's already dead */
-@@ -3083,7 +3083,7 @@ static int lookup_open(struct nameidata
+@@ -3069,7 +3069,7 @@ static int lookup_open(struct nameidata
  	struct dentry *dentry;
  	int error, create_error = 0;
  	umode_t mode = op->mode;
@@ -112,7 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		return -ENOENT;
 --- a/fs/nfs/dir.c
 +++ b/fs/nfs/dir.c
-@@ -485,7 +485,7 @@ static
+@@ -491,7 +491,7 @@ static
  void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
  {
  	struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct dentry *dentry;
  	struct dentry *alias;
  	struct inode *dir = d_inode(parent);
-@@ -1487,7 +1487,7 @@ int nfs_atomic_open(struct inode *dir, s
+@@ -1493,7 +1493,7 @@ int nfs_atomic_open(struct inode *dir, s
  		    struct file *file, unsigned open_flags,
  		    umode_t mode, int *opened)
  {
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	spin_lock(&dentry->d_lock);
 --- a/fs/proc/base.c
 +++ b/fs/proc/base.c
-@@ -1834,7 +1834,7 @@ bool proc_fill_cache(struct file *file,
+@@ -1836,7 +1836,7 @@ bool proc_fill_cache(struct file *file,
  
  	child = d_hash_and_lookup(dir, &qname);
  	if (!child) {
@@ -163,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			goto end_instantiate;
 --- a/fs/proc/proc_sysctl.c
 +++ b/fs/proc/proc_sysctl.c
-@@ -632,7 +632,7 @@ static bool proc_sys_fill_cache(struct f
+@@ -665,7 +665,7 @@ static bool proc_sys_fill_cache(struct f
  
  	child = d_lookup(dir, &qname);
  	if (!child) {
@@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
 --- a/include/linux/nfs_xdr.h
 +++ b/include/linux/nfs_xdr.h
-@@ -1490,7 +1490,7 @@ struct nfs_unlinkdata {
+@@ -1513,7 +1513,7 @@ struct nfs_unlinkdata {
  	struct nfs_removeargs args;
  	struct nfs_removeres res;
  	struct dentry *dentry;
diff --git a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
index f607cdb..50085f4 100644
--- a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
+++ b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 18 Mar 2011 10:11:25 +0100
 Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 bit_spin_locks break under RT.
 
diff --git a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
index ab35872..d1ddfa2 100644
--- a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
+++ b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 17 Feb 2014 17:30:03 +0100
 Subject: fs: jbd2: pull your plug when waiting for space
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Two cps in parallel managed to stall the the ext4 fs. It seems that
 journal code is either waiting for locks or sleeping waiting for
diff --git a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
index b91177c..c452542 100644
--- a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
+++ b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 19 Jul 2009 08:44:27 -0500
 Subject: fs: namespace preemption fix
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On RT we cannot loop with preemption disabled here as
 mnt_make_readonly() might have been preempted. We can safely enable
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/fs/namespace.c
 +++ b/fs/namespace.c
-@@ -356,8 +356,11 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -355,8 +355,11 @@ int __mnt_want_write(struct vfsmount *m)
  	 * incremented count after it has set MNT_WRITE_HOLD.
  	 */
  	smp_mb();
diff --git a/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
index ddc78f9..9571eab 100644
--- a/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
+++ b/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 15 Sep 2016 10:51:27 +0200
 Subject: [PATCH] fs/nfs: turn rmdir_sem into a semaphore
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The RW semaphore had a reader side which used the _non_owner version
 because it most likely took the reader lock in one thread and released it
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/fs/nfs/dir.c
 +++ b/fs/nfs/dir.c
-@@ -1802,7 +1802,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1813,7 +1813,11 @@ int nfs_rmdir(struct inode *dir, struct
  
  	trace_nfs_rmdir_enter(dir, dentry);
  	if (d_really_is_positive(dentry)) {
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
  		/* Ensure the VFS deletes this inode */
  		switch (error) {
-@@ -1812,7 +1816,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1823,7 +1827,11 @@ int nfs_rmdir(struct inode *dir, struct
  		case -ENOENT:
  			nfs_dentry_handle_enoent(dentry);
  		}
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	trace_nfs_rmdir_exit(dir, dentry, error);
 --- a/fs/nfs/inode.c
 +++ b/fs/nfs/inode.c
-@@ -1957,7 +1957,11 @@ static void init_once(void *foo)
+@@ -1984,7 +1984,11 @@ static void init_once(void *foo)
  	nfsi->nrequests = 0;
  	nfsi->commit_info.ncommit = 0;
  	atomic_set(&nfsi->commit_info.rpcs_out, 0);
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		 * point dentry is definitely not a root, so we won't need
 --- a/include/linux/nfs_fs.h
 +++ b/include/linux/nfs_fs.h
-@@ -165,7 +165,11 @@ struct nfs_inode {
+@@ -161,7 +161,11 @@ struct nfs_inode {
  
  	/* Readers: in-flight sillydelete RPC calls */
  	/* Writers: rmdir */
diff --git a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
index 2e02e85..ed2d188 100644
--- a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
+++ b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <efault at gmx.de>
 Date: Fri, 3 Jul 2009 08:44:12 -0500
 Subject: fs: ntfs: disable interrupt only on !RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
 > * Nick Piggin <nickpiggin at yahoo.com.au> wrote:
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/fs/ntfs/aops.c
 +++ b/fs/ntfs/aops.c
-@@ -92,13 +92,13 @@ static void ntfs_end_buffer_async_read(s
+@@ -93,13 +93,13 @@ static void ntfs_end_buffer_async_read(s
  			ofs = 0;
  			if (file_ofs < init_size)
  				ofs = init_size - file_ofs;
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  	} else {
  		clear_buffer_uptodate(bh);
-@@ -143,13 +143,13 @@ static void ntfs_end_buffer_async_read(s
+@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(s
  		recs = PAGE_SIZE / rec_size;
  		/* Should have been verified before we got here... */
  		BUG_ON(!recs);
diff --git a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
index e55a20e..dd3dc81 100644
--- a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 18 Mar 2011 09:18:52 +0100
 Subject: buffer_head: Replace bh_uptodate_lock for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Wrap the bit_spin_lock calls into a separate inline and add the RT
 replacements with a real spinlock.
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/fs/buffer.c
 +++ b/fs/buffer.c
-@@ -301,8 +301,7 @@ static void end_buffer_async_read(struct
+@@ -303,8 +303,7 @@ static void end_buffer_async_read(struct
  	 * decide that the page is now completely done.
  	 */
  	first = page_buffers(page);
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	clear_buffer_async_read(bh);
  	unlock_buffer(bh);
  	tmp = bh;
-@@ -315,8 +314,7 @@ static void end_buffer_async_read(struct
+@@ -317,8 +316,7 @@ static void end_buffer_async_read(struct
  		}
  		tmp = tmp->b_this_page;
  	} while (tmp != bh);
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * If none of the buffers had errors and they are all
-@@ -328,9 +326,7 @@ static void end_buffer_async_read(struct
+@@ -330,9 +328,7 @@ static void end_buffer_async_read(struct
  	return;
  
  still_busy:
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffe
+@@ -360,8 +356,7 @@ void end_buffer_async_write(struct buffe
  	}
  
  	first = page_buffers(page);
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	clear_buffer_async_write(bh);
  	unlock_buffer(bh);
-@@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffe
+@@ -373,15 +368,12 @@ void end_buffer_async_write(struct buffe
  		}
  		tmp = tmp->b_this_page;
  	}
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(end_buffer_async_write);
  
-@@ -3383,6 +3375,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3426,6 +3418,7 @@ struct buffer_head *alloc_buffer_head(gf
  	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
  	if (ret) {
  		INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		recalc_bh_state();
 --- a/fs/ntfs/aops.c
 +++ b/fs/ntfs/aops.c
-@@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(s
+@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s
  				"0x%llx.", (unsigned long long)bh->b_blocknr);
  	}
  	first = page_buffers(page);
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	clear_buffer_async_read(bh);
  	unlock_buffer(bh);
  	tmp = bh;
-@@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(s
+@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s
  		}
  		tmp = tmp->b_this_page;
  	} while (tmp != bh);
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * If none of the buffers had errors then we can set the page uptodate,
  	 * but we first have to perform the post read mst fixups, if the
-@@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(s
+@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s
  	unlock_page(page);
  	return;
  still_busy:
diff --git a/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch b/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch
index 70f93dc..904a438 100644
--- a/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch
+++ b/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 16 Oct 2016 05:08:30 +0200
 Subject: [PATCH] ftrace: Fix trace header alignment
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Line up helper arrows to the right column.
 
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
-@@ -2896,17 +2896,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3113,17 +3113,17 @@ get_total_entries(struct trace_buffer *b
  
  static void print_lat_help_header(struct seq_file *m)
  {
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2935,11 +2935,11 @@ static void print_func_help_header_irq(s
+@@ -3152,11 +3152,11 @@ static void print_func_help_header_irq(s
  		    "#                            |/  _-----=> need-resched_lazy\n"
  		    "#                            || / _---=> hardirq/softirq\n"
  		    "#                            ||| / _--=> preempt-depth\n"
diff --git a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
index a87c1f7..9953f46 100644
--- a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
+++ b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:56:42 +0200
 Subject: trace: Add migrate-disabled counter to tracing output
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/trace_events.h
 +++ b/include/linux/trace_events.h
-@@ -56,6 +56,8 @@ struct trace_entry {
+@@ -61,6 +61,8 @@ struct trace_entry {
  	unsigned char		flags;
  	unsigned char		preempt_count;
  	int			pid;
@@ -24,8 +24,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define TRACE_EVENT_TYPE_MAX						\
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
-@@ -1909,6 +1909,8 @@ tracing_generic_entry_update(struct trac
- 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+@@ -1946,6 +1946,8 @@ tracing_generic_entry_update(struct trac
+ 		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
  		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
  		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
 +
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
  
-@@ -2897,9 +2899,10 @@ static void print_lat_help_header(struct
+@@ -3114,9 +3116,10 @@ static void print_lat_help_header(struct
  		    "#                | / _----=> need-resched    \n"
  		    "#                || / _---=> hardirq/softirq \n"
  		    "#                ||| / _--=> preempt-depth   \n"
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/kernel/trace/trace_output.c
 +++ b/kernel/trace/trace_output.c
-@@ -432,6 +432,11 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -484,6 +484,11 @@ int trace_print_lat_fmt(struct trace_seq
  	else
  		trace_seq_putc(s, '.');
  
diff --git a/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
index e6fa03f..0695247 100644
--- a/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ b/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 1 Mar 2013 11:17:42 +0100
 Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 In exit_pi_state_list() we have the following locking construct:
 
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -909,7 +909,9 @@ void exit_pi_state_list(struct task_stru
+@@ -911,7 +911,9 @@ void exit_pi_state_list(struct task_stru
  		 * task still owns the PI-state:
  		 */
  		if (head->next != next) {
diff --git a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
index ca48902..7a9dddd 100644
--- a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
+++ b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: futex: Fix bug on when a requeued RT task times out
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Requeue with timeout causes a bug with PREEMPT_RT_FULL.
 
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -135,7 +135,8 @@ static void fixup_rt_mutex_waiters(struc
+@@ -137,7 +137,8 @@ static void fixup_rt_mutex_waiters(struc
  
  static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
  {
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -1712,6 +1713,35 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1722,6 +1723,35 @@ int __rt_mutex_start_proxy_lock(struct r
  	if (try_to_take_rt_mutex(lock, task, NULL))
  		return 1;
  
diff --git a/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch b/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
index abf5bda..09117d0 100644
--- a/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
+++ b/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Mon, 22 May 2017 13:04:50 -0700
 Subject: [PATCH] futex,rt_mutex: Fix rt_mutex_cleanup_proxy_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Markus reported that the glibc/nptl/tst-robustpi8 test was failing after
 commit:
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1775,12 +1775,14 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1785,12 +1785,14 @@ int rt_mutex_wait_proxy_lock(struct rt_m
  	int ret;
  
  	raw_spin_lock_irq(&lock->wait_lock);
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	raw_spin_unlock_irq(&lock->wait_lock);
  
  	return ret;
-@@ -1812,15 +1814,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -1822,15 +1824,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
  
  	raw_spin_lock_irq(&lock->wait_lock);
  	/*
diff --git a/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch b/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch
index c87e83b..243ed05 100644
--- a/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch
+++ b/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch
@@ -2,7 +2,7 @@ From 8a35f416ca9ff27e893cebcbe064a1f3c8e1de57 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 9 May 2017 17:11:10 +0200
 Subject: [PATCH] futex/rtmutex: Cure RT double blocking issue
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 RT has a problem when the wait on a futex/rtmutex got interrupted by a
 timeout or a signal. task->pi_blocked_on is still set when returning from
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -2388,6 +2388,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2408,6 +2408,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
  			       struct hrtimer_sleeper *to,
  			       struct rt_mutex_waiter *waiter)
  {
@@ -35,11 +35,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	int ret;
  
  	raw_spin_lock_irq(&lock->wait_lock);
-@@ -2399,6 +2400,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2419,6 +2420,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
  	 * have to fix that up.
  	 */
  	fixup_rt_mutex_waiters(lock);
-+
 +	/*
 +	 * RT has a problem here when the wait got interrupted by a timeout
 +	 * or a signal. task->pi_blocked_on is still set. The task must
@@ -57,6 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +		tsk->pi_blocked_on = NULL;
 +		raw_spin_unlock(&tsk->pi_lock);
 +	}
++
  	raw_spin_unlock_irq(&lock->wait_lock);
  
  	return ret;
diff --git a/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch b/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
index 8d6d8ce..701d7df 100644
--- a/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
+++ b/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 8 Mar 2017 14:23:35 +0100
 Subject: [PATCH] futex: workaround migrate_disable/enable in different context
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 migrate_disable()/migrate_enable() takes a different path in atomic() vs
 !atomic() context. These little hacks ensure that we don't underflow / overflow
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -2669,9 +2669,18 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2671,9 +2671,18 @@ static int futex_lock_pi(u32 __user *uad
  	 * lock handoff sequence.
  	 */
  	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (ret) {
  		if (ret == 1)
-@@ -2815,10 +2824,21 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2817,10 +2826,21 @@ static int futex_unlock_pi(u32 __user *u
  		 * observed.
  		 */
  		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
diff --git a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
index c15dd7d..2894bff 100644
--- a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
+++ b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:57 -0500
 Subject: genirq: Disable irqpoll on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Creates long latencies for no value
 
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/irq/spurious.c
 +++ b/kernel/irq/spurious.c
-@@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
+@@ -444,6 +444,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
  
  static int __init irqfixup_setup(char *str)
  {
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	irqfixup = 1;
  	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
  	printk(KERN_WARNING "This may impact system performance.\n");
-@@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644);
+@@ -456,6 +460,10 @@ module_param(irqfixup, int, 0644);
  
  static int __init irqpoll_setup(char *str)
  {
diff --git a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
index e8b0570..138dd28 100644
--- a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 21 Aug 2013 17:48:46 +0200
 Subject: genirq: Do not invoke the affinity callback via a workqueue on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Joe Korty reported, that __irq_set_affinity_locked() schedules a
 workqueue while holding a rawlock which results in a might_sleep()
@@ -10,25 +10,10 @@ This patch uses swork_queue() instead.
 
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- drivers/scsi/qla2xxx/qla_isr.c |    4 +++
- include/linux/interrupt.h      |    6 +++++
- kernel/irq/manage.c            |   43 ++++++++++++++++++++++++++++++++++++++---
- 3 files changed, 50 insertions(+), 3 deletions(-)
+ include/linux/interrupt.h |    6 ++++++
+ kernel/irq/manage.c       |   43 ++++++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 46 insertions(+), 3 deletions(-)
 
---- a/drivers/scsi/qla2xxx/qla_isr.c
-+++ b/drivers/scsi/qla2xxx/qla_isr.c
-@@ -3125,7 +3125,11 @@ qla24xx_enable_msix(struct qla_hw_data *
- 		* kref_put().
- 		*/
- 		kref_get(&qentry->irq_notify.kref);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+		swork_queue(&qentry->irq_notify.swork);
-+#else
- 		schedule_work(&qentry->irq_notify.work);
-+#endif
- 	}
- 
- 	/*
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
 @@ -14,6 +14,7 @@
@@ -61,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  };
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -235,7 +235,12 @@ int irq_set_affinity_locked(struct irq_d
+@@ -237,7 +237,12 @@ int irq_set_affinity_locked(struct irq_d
  
  	if (desc->affinity_notify) {
  		kref_get(&desc->affinity_notify->kref);
@@ -74,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  	irqd_set(data, IRQD_AFFINITY_SET);
  
-@@ -273,10 +278,8 @@ int irq_set_affinity_hint(unsigned int i
+@@ -275,10 +280,8 @@ int irq_set_affinity_hint(unsigned int i
  }
  EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  
@@ -86,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct irq_desc *desc = irq_to_desc(notify->irq);
  	cpumask_var_t cpumask;
  	unsigned long flags;
-@@ -298,6 +301,35 @@ static void irq_affinity_notify(struct w
+@@ -300,6 +303,35 @@ static void irq_affinity_notify(struct w
  	kref_put(&notify->kref, notify->release);
  }
  
@@ -122,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /**
   *	irq_set_affinity_notifier - control notification of IRQ affinity changes
   *	@irq:		Interrupt for which to enable/disable notification
-@@ -326,7 +358,12 @@ irq_set_affinity_notifier(unsigned int i
+@@ -328,7 +360,12 @@ irq_set_affinity_notifier(unsigned int i
  	if (notify) {
  		notify->irq = irq;
  		kref_init(&notify->kref);
diff --git a/debian/patches/features/all/rt/genirq-force-threading.patch b/debian/patches/features/all/rt/genirq-force-threading.patch
index 8b807cb..ca0b877 100644
--- a/debian/patches/features/all/rt/genirq-force-threading.patch
+++ b/debian/patches/features/all/rt/genirq-force-threading.patch
@@ -1,7 +1,7 @@
 Subject: genirq: Force interrupt thread on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 03 Apr 2011 11:57:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Force threaded_irqs and optimize the code (force_irqthreads) in regard
 to this.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -406,9 +406,13 @@ extern int irq_set_irqchip_state(unsigne
+@@ -418,9 +418,13 @@ extern int irq_set_irqchip_state(unsigne
  				 bool state);
  
  #ifdef CONFIG_IRQ_FORCED_THREADING
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifndef __ARCH_SET_SOFTIRQ_PENDING
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -22,6 +22,7 @@
+@@ -24,6 +24,7 @@
  #include "internals.h"
  
  #ifdef CONFIG_IRQ_FORCED_THREADING
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  __read_mostly bool force_irqthreads;
  
  static int __init setup_forced_irqthreads(char *arg)
-@@ -30,6 +31,7 @@ static int __init setup_forced_irqthread
+@@ -32,6 +33,7 @@ static int __init setup_forced_irqthread
  	return 0;
  }
  early_param("threadirqs", setup_forced_irqthreads);
diff --git a/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch b/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
index 13ce7da..9862b3c 100644
--- a/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 11 Feb 2016 11:54:00 -0600
 Subject: genirq: update irq_set_irqchip_state documentation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On -rt kernels, the use of migrate_disable()/migrate_enable() is
 sufficient to guarantee a task isn't moved to another CPU.  Update the
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -2111,7 +2111,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2113,7 +2113,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
   *	This call sets the internal irqchip state of an interrupt,
   *	depending on the value of @which.
   *
diff --git a/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch b/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch
deleted file mode 100644
index 08fd8b2..0000000
--- a/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: gpu: don't check for the lock owner.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- drivers/gpu/drm/i915/i915_gem_shrinker.c |    2 +-
- drivers/gpu/drm/msm/msm_gem_shrinker.c   |    2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
-+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
-@@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mu
- 	if (!mutex_is_locked(mutex))
- 		return false;
- 
--#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
-+#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
- 	return mutex->owner == task;
- #else
- 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
---- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
-+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
-@@ -23,7 +23,7 @@ static bool mutex_is_locked_by(struct mu
- 	if (!mutex_is_locked(mutex))
- 		return false;
- 
--#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
-+#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
- 	return mutex->owner == task;
- #else
- 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
index 9a3ccf0..b08acb6 100644
--- a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+++ b/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Tue, 24 Mar 2015 08:14:49 +0100
 Subject: hotplug: Use set_cpus_allowed_ptr() in sync_unplug_thread()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 do_set_cpus_allowed() is not safe vs ->sched_class change.
 
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -418,7 +418,7 @@ static int sync_unplug_thread(void *data
+@@ -413,7 +413,7 @@ static int sync_unplug_thread(void *data
  	 * we don't want any more work on this CPU.
  	 */
  	current->flags &= ~PF_NO_SETAFFINITY;
diff --git a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
index 7bee10b..11b8068 100644
--- a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
+++ b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
@@ -1,7 +1,7 @@
 Subject: hotplug: Lightweight get online cpus
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 15 Jun 2011 12:36:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 get_online_cpus() is a heavy weight function which involves a global
 mutex. migrate_disable() wants a simpler construct which prevents only
@@ -13,43 +13,35 @@ tasks on the cpu which should be brought down.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- include/linux/cpu.h |    7 +--
+ include/linux/cpu.h |    5 ++
  kernel/cpu.c        |  118 ++++++++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 122 insertions(+), 3 deletions(-)
+ kernel/sched/core.c |    4 +
+ 3 files changed, 127 insertions(+)
 
 --- a/include/linux/cpu.h
 +++ b/include/linux/cpu.h
-@@ -173,9 +173,6 @@ static inline void cpu_notifier_register
- #endif /* CONFIG_SMP */
- extern struct bus_type cpu_subsys;
- 
--static inline void pin_current_cpu(void) { }
--static inline void unpin_current_cpu(void) { }
--
- #ifdef CONFIG_HOTPLUG_CPU
- /* Stop CPUs going up and down. */
- 
-@@ -185,6 +182,8 @@ extern void get_online_cpus(void);
- extern void put_online_cpus(void);
- extern void cpu_hotplug_disable(void);
+@@ -109,6 +109,8 @@ extern void cpu_hotplug_disable(void);
  extern void cpu_hotplug_enable(void);
+ void clear_tasks_mm_cpumask(int cpu);
+ int cpu_down(unsigned int cpu);
 +extern void pin_current_cpu(void);
 +extern void unpin_current_cpu(void);
- #define hotcpu_notifier(fn, pri)	cpu_notifier(fn, pri)
- #define __hotcpu_notifier(fn, pri)	__cpu_notifier(fn, pri)
- #define register_hotcpu_notifier(nb)	register_cpu_notifier(nb)
-@@ -202,6 +201,8 @@ static inline void cpu_hotplug_done(void
+ 
+ #else		/* CONFIG_HOTPLUG_CPU */
+ 
+@@ -118,6 +120,9 @@ static inline void cpu_hotplug_done(void
  #define put_online_cpus()	do { } while (0)
  #define cpu_hotplug_disable()	do { } while (0)
  #define cpu_hotplug_enable()	do { } while (0)
-+static inline void pin_current_cpu(void) { }
-+static inline void unpin_current_cpu(void) { }
- #define hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
- #define __hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
- /* These aren't inline functions due to a GCC bug. */
++static inline void pin_current_cpu(void)	{ }
++static inline void unpin_current_cpu(void)	{ }
++
+ #endif		/* CONFIG_HOTPLUG_CPU */
+ 
+ #ifdef CONFIG_PM_SLEEP_SMP
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -239,6 +239,100 @@ static struct {
+@@ -234,6 +234,100 @@ static struct {
  #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
  #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
  
@@ -150,16 +142,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  void get_online_cpus(void)
  {
-@@ -877,6 +971,8 @@ static int __ref _cpu_down(unsigned int
+@@ -766,6 +860,8 @@ static int __ref _cpu_down(unsigned int
+ {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	int prev_state, ret = 0;
- 	bool hasdied = false;
 +	int mycpu;
 +	cpumask_var_t cpumask;
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
-@@ -884,7 +980,27 @@ static int __ref _cpu_down(unsigned int
+@@ -773,7 +869,27 @@ static int __ref _cpu_down(unsigned int
  	if (!cpu_present(cpu))
  		return -EINVAL;
  
@@ -187,12 +179,39 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	cpuhp_tasks_frozen = tasks_frozen;
  
-@@ -923,6 +1039,8 @@ static int __ref _cpu_down(unsigned int
+@@ -811,6 +927,8 @@ static int __ref _cpu_down(unsigned int
+ 	}
  
- 	hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
  out:
 +	cpu_unplug_done(cpu);
 +out_cancel:
  	cpu_hotplug_done();
- 	/* This post dead nonsense must die */
- 	if (!ret && hasdied)
+ 	return ret;
+ }
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7471,6 +7471,7 @@ void migrate_disable(void)
+ 	/* get_online_cpus(); */
+ 
+ 	preempt_disable();
++	pin_current_cpu();
+ 	p->migrate_disable = 1;
+ 
+ 	p->cpus_ptr = cpumask_of(smp_processor_id());
+@@ -7535,13 +7536,16 @@ void migrate_enable(void)
+ 			arg.task = p;
+ 			arg.dest_cpu = dest_cpu;
+ 
++			unpin_current_cpu();
+ 			preempt_enable();
+ 			stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ 			tlb_migrate_finish(p->mm);
+ 			/* put_online_cpus(); */
++
+ 			return;
+ 		}
+ 	}
++	unpin_current_cpu();
+ 	/* put_online_cpus(); */
+ 	preempt_enable();
+ }
diff --git a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
index 5a956d7..b8b0d92 100644
--- a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+++ b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
@@ -1,7 +1,7 @@
 Subject: hotplug: sync_unplug: No "\n" in task name
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Sun, 16 Oct 2011 18:56:43 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Otherwise the output will look a little odd.
 
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -318,7 +318,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -313,7 +313,7 @@ static int cpu_unplug_begin(unsigned int
  	struct task_struct *tsk;
  
  	init_completion(&hp->synced);
diff --git a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
index 84c6a61..90db14e 100644
--- a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
+++ b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
@@ -1,7 +1,7 @@
 Subject: hotplug: Use migrate disable on unplug
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 19:35:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Migration needs to be disabled accross the unplug handling to make
 sure that the unplug thread is off the unplugged cpu.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -988,14 +988,13 @@ static int __ref _cpu_down(unsigned int
+@@ -877,14 +877,13 @@ static int __ref _cpu_down(unsigned int
  	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
  	set_cpus_allowed_ptr(current, cpumask);
  	free_cpumask_var(cpumask);
@@ -30,11 +30,11 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	cpu_hotplug_begin();
  	ret = cpu_unplug_begin(cpu);
-@@ -1044,6 +1043,7 @@ static int __ref _cpu_down(unsigned int
+@@ -932,6 +931,7 @@ static int __ref _cpu_down(unsigned int
  	cpu_unplug_done(cpu);
  out_cancel:
  	cpu_hotplug_done();
 +	migrate_enable();
- 	/* This post dead nonsense must die */
- 	if (!ret && hasdied)
- 		cpu_notify_nofail(CPU_POST_DEAD, cpu);
+ 	return ret;
+ }
+ 
diff --git a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
index 55d94db..4694586 100644
--- a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
+++ b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at windriver.com>
 Date: Mon, 16 Sep 2013 14:09:19 -0700
 Subject: hrtimer: Move schedule_work call to helper thread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When run ltp leapsec_timer test, the following call trace is caught:
 
diff --git a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch b/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
index a3c9d7c..dac5550 100644
--- a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
+++ b/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 23 Dec 2015 20:57:41 +0100
 Subject: hrtimer: enfore 64byte alignment
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The patch "hrtimer: Fixup hrtimer callback changes for preempt-rt" adds
 a list_head expired to struct hrtimer_clock_base and with it we run into
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
-@@ -127,11 +127,7 @@ struct hrtimer_sleeper {
+@@ -116,11 +116,7 @@ struct hrtimer_sleeper {
  	struct task_struct *task;
  };
  
diff --git a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 31ffc0e..3f18bce 100644
--- a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:44:31 -0500
 Subject: hrtimer: Fixup hrtimer callback changes for preempt-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 In preempt-rt we can not call the callbacks which take sleeping locks
 from the timer interrupt context.
@@ -16,10 +16,10 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  include/linux/hrtimer.h  |    7 ++
  kernel/sched/core.c      |    1 
  kernel/sched/rt.c        |    1 
- kernel/time/hrtimer.c    |  144 ++++++++++++++++++++++++++++++++++++++++++++---
+ kernel/time/hrtimer.c    |  143 ++++++++++++++++++++++++++++++++++++++++++++---
  kernel/time/tick-sched.c |    1 
  kernel/watchdog.c        |    1 
- 6 files changed, 146 insertions(+), 9 deletions(-)
+ 6 files changed, 145 insertions(+), 9 deletions(-)
 
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
@@ -31,8 +31,8 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
 + * @irqsafe:	timer can run in hardirq context
   * @praecox:	timer expiry time if expired at the time of programming
   * @is_rel:	Set if the timer was armed relative
-  * @start_pid:  timer statistics field to store the pid of the task which
-@@ -104,6 +106,8 @@ struct hrtimer {
+  *
+@@ -98,6 +100,8 @@ struct hrtimer {
  	enum hrtimer_restart		(*function)(struct hrtimer *);
  	struct hrtimer_clock_base	*base;
  	u8				state;
@@ -41,7 +41,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
  	ktime_t				praecox;
  #endif
-@@ -136,6 +140,7 @@ struct hrtimer_sleeper {
+@@ -125,6 +129,7 @@ struct hrtimer_sleeper {
   *			timer to a base on another cpu.
   * @clockid:		clock id for per_cpu support
   * @active:		red black tree root node for the active timers
@@ -49,7 +49,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
   * @get_time:		function to retrieve the current time of the clock
   * @offset:		offset of this clock to the monotonic base
   */
-@@ -144,6 +149,7 @@ struct hrtimer_clock_base {
+@@ -133,6 +138,7 @@ struct hrtimer_clock_base {
  	int			index;
  	clockid_t		clockid;
  	struct timerqueue_head	active;
@@ -57,7 +57,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	ktime_t			(*get_time)(void);
  	ktime_t			offset;
  } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
-@@ -187,6 +193,7 @@ struct hrtimer_cpu_base {
+@@ -176,6 +182,7 @@ struct hrtimer_cpu_base {
  	raw_spinlock_t			lock;
  	seqcount_t			seq;
  	struct hrtimer			*running;
@@ -67,7 +67,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	unsigned int			clock_was_set_seq;
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -345,6 +345,7 @@ static void init_rq_hrtick(struct rq *rq
+@@ -352,6 +352,7 @@ static void init_rq_hrtick(struct rq *rq
  
  	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	rq->hrtick_timer.function = hrtick;
@@ -77,7 +77,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  static inline void hrtick_clear(struct rq *rq)
 --- a/kernel/sched/rt.c
 +++ b/kernel/sched/rt.c
-@@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwid
+@@ -48,6 +48,7 @@ void init_rt_bandwidth(struct rt_bandwid
  
  	hrtimer_init(&rt_b->rt_period_timer,
  			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -101,7 +101,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
  static inline void retrigger_next_event(void *arg) { }
  
-@@ -873,7 +870,7 @@ void hrtimer_wait_for_timer(const struct
+@@ -845,7 +842,7 @@ void hrtimer_wait_for_timer(const struct
  {
  	struct hrtimer_clock_base *base = timer->base;
  
@@ -110,7 +110,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  		wait_event(base->cpu_base->wait,
  				!(hrtimer_callback_running(timer)));
  }
-@@ -923,6 +920,11 @@ static void __remove_hrtimer(struct hrti
+@@ -895,6 +892,11 @@ static void __remove_hrtimer(struct hrti
  	if (!(state & HRTIMER_STATE_ENQUEUED))
  		return;
  
@@ -122,15 +122,15 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	if (!timerqueue_del(&base->active, &timer->node))
  		cpu_base->active_bases &= ~(1 << base->index);
  
-@@ -1163,6 +1165,7 @@ static void __hrtimer_init(struct hrtime
+@@ -1144,6 +1146,7 @@ static void __hrtimer_init(struct hrtime
  
  	base = hrtimer_clockid_to_base(clock_id);
  	timer->base = &cpu_base->clock_base[base];
 +	INIT_LIST_HEAD(&timer->cb_entry);
  	timerqueue_init(&timer->node);
+ }
  
- #ifdef CONFIG_TIMER_STATS
-@@ -1203,6 +1206,7 @@ bool hrtimer_active(const struct hrtimer
+@@ -1178,6 +1181,7 @@ bool hrtimer_active(const struct hrtimer
  		seq = raw_read_seqcount_begin(&cpu_base->seq);
  
  		if (timer->state != HRTIMER_STATE_INACTIVE ||
@@ -138,7 +138,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  		    cpu_base->running == timer)
  			return true;
  
-@@ -1301,12 +1305,112 @@ static void __run_hrtimer(struct hrtimer
+@@ -1275,12 +1279,111 @@ static void __run_hrtimer(struct hrtimer
  	cpu_base->running = NULL;
  }
  
@@ -205,7 +205,6 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
 +			raw_write_seqcount_barrier(&cpu_base->seq);
 +
 +			__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
-+			timer_stats_account_hrtimer(timer);
 +			fn = timer->function;
 +
 +			raw_spin_unlock_irq(&cpu_base->lock);
@@ -251,8 +250,8 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  
  	for (; active; base++, active >>= 1) {
  		struct timerqueue_node *node;
-@@ -1346,9 +1450,14 @@ static void __hrtimer_run_queues(struct
- 			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
+@@ -1320,9 +1423,14 @@ static void __hrtimer_run_queues(struct
+ 			if (basenow < hrtimer_get_softexpires_tv64(timer))
  				break;
  
 -			__run_hrtimer(cpu_base, base, timer, &basenow);
@@ -267,7 +266,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  }
  
  #ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1490,8 +1599,6 @@ void hrtimer_run_queues(void)
+@@ -1464,8 +1572,6 @@ void hrtimer_run_queues(void)
  	now = hrtimer_update_base(cpu_base);
  	__hrtimer_run_queues(cpu_base, now);
  	raw_spin_unlock(&cpu_base->lock);
@@ -276,7 +275,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  }
  
  /*
-@@ -1513,6 +1620,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1487,6 +1593,7 @@ static enum hrtimer_restart hrtimer_wake
  void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
  {
  	sl->timer.function = hrtimer_wakeup;
@@ -284,7 +283,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	sl->task = task;
  }
  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1647,6 +1755,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+@@ -1621,6 +1728,7 @@ int hrtimers_prepare_cpu(unsigned int cp
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  		cpu_base->clock_base[i].cpu_base = cpu_base;
  		timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -292,7 +291,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	}
  
  	cpu_base->cpu = cpu;
-@@ -1723,9 +1832,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
+@@ -1697,9 +1805,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
  
  #endif /* CONFIG_HOTPLUG_CPU */
  
@@ -321,7 +320,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  /**
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -1198,6 +1198,7 @@ void tick_setup_sched_timer(void)
+@@ -1197,6 +1197,7 @@ void tick_setup_sched_timer(void)
  	 * Emulate tick processing via per-CPU hrtimers:
  	 */
  	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -331,7 +330,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	/* Get the next period (per-CPU) */
 --- a/kernel/watchdog.c
 +++ b/kernel/watchdog.c
-@@ -522,6 +522,7 @@ static void watchdog_enable(unsigned int
+@@ -384,6 +384,7 @@ static void watchdog_enable(unsigned int
  	/* kick off the timer for the hardlockup detector */
  	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	hrtimer->function = watchdog_timer_fn;
diff --git a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
index 2b18d82..4bc9529 100644
--- a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
+++ b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:34 -0500
 Subject: hrtimers: Prepare full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Make cancellation of a running callback in softirq context safe
 against preemption.
@@ -10,15 +10,23 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 ---
- include/linux/hrtimer.h    |   12 +++++++++++-
+ include/linux/hrtimer.h    |   13 ++++++++++++-
  kernel/time/hrtimer.c      |   33 ++++++++++++++++++++++++++++++++-
  kernel/time/itimer.c       |    1 +
  kernel/time/posix-timers.c |   33 +++++++++++++++++++++++++++++++++
- 4 files changed, 77 insertions(+), 2 deletions(-)
+ 4 files changed, 78 insertions(+), 2 deletions(-)
 
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
-@@ -207,6 +207,9 @@ struct hrtimer_cpu_base {
+@@ -22,6 +22,7 @@
+ #include <linux/percpu.h>
+ #include <linux/timer.h>
+ #include <linux/timerqueue.h>
++#include <linux/wait.h>
+ 
+ struct hrtimer_clock_base;
+ struct hrtimer_cpu_base;
+@@ -195,6 +196,9 @@ struct hrtimer_cpu_base {
  	unsigned int			nr_hangs;
  	unsigned int			max_hang_time;
  #endif
@@ -28,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	struct hrtimer_clock_base	clock_base[HRTIMER_MAX_CLOCK_BASES];
  } ____cacheline_aligned;
  
-@@ -416,6 +419,13 @@ static inline void hrtimer_restart(struc
+@@ -404,6 +408,13 @@ static inline void hrtimer_restart(struc
  	hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
  }
  
@@ -42,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Query timers: */
  extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
  
-@@ -440,7 +450,7 @@ static inline int hrtimer_is_queued(stru
+@@ -428,7 +439,7 @@ static inline int hrtimer_is_queued(stru
   * Helper function to check, whether the timer is running the callback
   * function
   */
@@ -53,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -856,6 +856,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -828,6 +828,32 @@ u64 hrtimer_forward(struct hrtimer *time
  }
  EXPORT_SYMBOL_GPL(hrtimer_forward);
  
@@ -86,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * enqueue_hrtimer - internal function to (re)start a timer
   *
-@@ -1073,7 +1099,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1042,7 +1068,7 @@ int hrtimer_cancel(struct hrtimer *timer
  
  		if (ret >= 0)
  			return ret;
@@ -95,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1464,6 +1490,8 @@ void hrtimer_run_queues(void)
+@@ -1438,6 +1464,8 @@ void hrtimer_run_queues(void)
  	now = hrtimer_update_base(cpu_base);
  	__hrtimer_run_queues(cpu_base, now);
  	raw_spin_unlock(&cpu_base->lock);
@@ -104,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -1623,6 +1651,9 @@ int hrtimers_prepare_cpu(unsigned int cp
+@@ -1597,6 +1625,9 @@ int hrtimers_prepare_cpu(unsigned int cp
  
  	cpu_base->cpu = cpu;
  	hrtimer_init_hres(cpu_base);
@@ -116,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/time/itimer.c
 +++ b/kernel/time/itimer.c
-@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
+@@ -195,6 +195,7 @@ int do_setitimer(int which, struct itime
  		/* We are sharing ->siglock with it_real_fn() */
  		if (hrtimer_try_to_cancel(timer) < 0) {
  			spin_unlock_irq(&tsk->sighand->siglock);
@@ -126,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		expires = timeval_to_ktime(value->it_value);
 --- a/kernel/time/posix-timers.c
 +++ b/kernel/time/posix-timers.c
-@@ -828,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
+@@ -829,6 +829,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
  	return overrun;
  }
  
@@ -147,7 +155,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Set a POSIX.1b interval timer. */
  /* timr->it_lock is taken. */
  static int
-@@ -905,6 +919,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
+@@ -906,6 +920,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
  	if (!timr)
  		return -EINVAL;
  
@@ -155,7 +163,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	kc = clockid_to_kclock(timr->it_clock);
  	if (WARN_ON_ONCE(!kc || !kc->timer_set))
  		error = -EINVAL;
-@@ -913,9 +928,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
+@@ -914,9 +929,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
  
  	unlock_timer(timr, flag);
  	if (error == TIMER_RETRY) {
@@ -168,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (old_setting && !error &&
  	    copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-@@ -953,10 +971,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
+@@ -954,10 +972,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
  	if (!timer)
  		return -EINVAL;
  
@@ -184,7 +192,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	spin_lock(&current->sighand->siglock);
  	list_del(&timer->list);
-@@ -982,8 +1005,18 @@ static void itimer_delete(struct k_itime
+@@ -983,8 +1006,18 @@ static void itimer_delete(struct k_itime
  retry_delete:
  	spin_lock_irqsave(&timer->it_lock, flags);
  
diff --git a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index 14ecd11..1eb542e 100644
--- a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -1,7 +1,7 @@
 From: Clark Williams <williams at redhat.com>
 Date: Tue, 26 May 2015 10:43:43 -0500
 Subject: i915: bogus warning from i915 when running on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The i915 driver has a 'WARN_ON(!in_interrupt())' in the display
 handler, which whines constanly on the RT kernel (since the interrupt
@@ -19,8 +19,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/gpu/drm/i915/intel_display.c
 +++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -12131,7 +12131,7 @@ void intel_check_page_flip(struct drm_i9
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+@@ -12113,7 +12113,7 @@ void intel_check_page_flip(struct drm_i9
+ 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  	struct intel_flip_work *work;
  
 -	WARN_ON(!in_interrupt());
diff --git a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
index 12230a5..0a05105 100644
--- a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
+++ b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:16 -0500
 Subject: ide: Do not disable interrupts for PREEMPT-RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use the local_irq_*_nort variants.
 
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			return;
 --- a/drivers/ide/ide-io.c
 +++ b/drivers/ide/ide-io.c
-@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat
+@@ -660,7 +660,7 @@ void ide_timer_expiry (unsigned long dat
  		/* disable_irq_nosync ?? */
  		disable_irq(hwif->irq);
  		/* local CPU only, as if we were handling an interrupt */
@@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef DEBUG
 --- a/drivers/ide/ide-taskfile.c
 +++ b/drivers/ide/ide-taskfile.c
-@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
+@@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
  
  		page_is_high = PageHighMem(page);
  		if (page_is_high)
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		buf = kmap_atomic(page) + offset;
  
-@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
+@@ -272,7 +272,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
  		kunmap_atomic(buf);
  
  		if (page_is_high)
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		len -= nr_bytes;
  	}
-@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr
+@@ -415,7 +415,7 @@ static ide_startstop_t pre_task_out_intr
  	}
  
  	if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
diff --git a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch
deleted file mode 100644
index 0686a35..0000000
--- a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch
+++ /dev/null
@@ -1,124 +0,0 @@
-From: Thomas Gleixner <tglx at linutronix.de>
-Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: idr: Use local lock instead of preempt enable/disable
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-We need to protect the per cpu variable and prevent migration.
-
-Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
----
- include/linux/idr.h |    4 ++++
- lib/idr.c           |   43 +++++++++++++++++++++++++++++++++++++------
- 2 files changed, 41 insertions(+), 6 deletions(-)
-
---- a/include/linux/idr.h
-+++ b/include/linux/idr.h
-@@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
-  * Each idr_preload() should be matched with an invocation of this
-  * function.  See idr_preload() for details.
-  */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+void idr_preload_end(void);
-+#else
- static inline void idr_preload_end(void)
- {
- 	preempt_enable();
- }
-+#endif
- 
- /**
-  * idr_find - return pointer for given id
---- a/lib/idr.c
-+++ b/lib/idr.c
-@@ -30,6 +30,7 @@
- #include <linux/idr.h>
- #include <linux/spinlock.h>
- #include <linux/percpu.h>
-+#include <linux/locallock.h>
- 
- #define MAX_IDR_SHIFT		(sizeof(int) * 8 - 1)
- #define MAX_IDR_BIT		(1U << MAX_IDR_SHIFT)
-@@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *
- static DEFINE_PER_CPU(int, idr_preload_cnt);
- static DEFINE_SPINLOCK(simple_ida_lock);
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
-+
-+static inline void idr_preload_lock(void)
-+{
-+	local_lock(idr_lock);
-+}
-+
-+static inline void idr_preload_unlock(void)
-+{
-+	local_unlock(idr_lock);
-+}
-+
-+void idr_preload_end(void)
-+{
-+	idr_preload_unlock();
-+}
-+EXPORT_SYMBOL(idr_preload_end);
-+#else
-+static inline void idr_preload_lock(void)
-+{
-+	preempt_disable();
-+}
-+
-+static inline void idr_preload_unlock(void)
-+{
-+	preempt_enable();
-+}
-+#endif
-+
-+
- /* the maximum ID which can be allocated given idr->layers */
- static int idr_max(int layers)
- {
-@@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc
- 	 * context.  See idr_preload() for details.
- 	 */
- 	if (!in_interrupt()) {
--		preempt_disable();
-+		idr_preload_lock();
- 		new = __this_cpu_read(idr_preload_head);
- 		if (new) {
- 			__this_cpu_write(idr_preload_head, new->ary[0]);
- 			__this_cpu_dec(idr_preload_cnt);
- 			new->ary[0] = NULL;
- 		}
--		preempt_enable();
-+		idr_preload_unlock();
- 		if (new)
- 			return new;
- 	}
-@@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *id
- 	idr_mark_full(pa, id);
- }
- 
--
- /**
-  * idr_preload - preload for idr_alloc()
-  * @gfp_mask: allocation mask to use for preloading
-@@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
- 	WARN_ON_ONCE(in_interrupt());
- 	might_sleep_if(gfpflags_allow_blocking(gfp_mask));
- 
--	preempt_disable();
-+	idr_preload_lock();
- 
- 	/*
- 	 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
-@@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
- 	while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
- 		struct idr_layer *new;
- 
--		preempt_enable();
-+		idr_preload_unlock();
- 		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
--		preempt_disable();
-+		idr_preload_lock();
- 		if (!new)
- 			break;
- 
diff --git a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
index cca140d..0b985a3 100644
--- a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
+++ b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
@@ -1,7 +1,7 @@
 From: Sven-Thorsten Dietrich <sdietrich at novell.com>
 Date: Fri, 3 Jul 2009 08:30:35 -0500
 Subject: infiniband: Mellanox IB driver patch use _nort() primitives
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT
 Kernel.
diff --git a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
index 5b07aa8..727c215 100644
--- a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
+++ b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:16 -0500
 Subject: input: gameport: Do not disable interrupts on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use the _nort() primitives.
 
diff --git a/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch b/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch
deleted file mode 100644
index 8ff2fdf..0000000
--- a/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch
+++ /dev/null
@@ -1,281 +0,0 @@
-Subject: Intrduce migrate_disable() + cpu_light()
-From: Thomas Gleixner <tglx at linutronix.de>
-Date: Fri, 17 Jun 2011 15:42:38 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Introduce migrate_disable(). The task can't be pushed to another CPU but can
-be preempted.
-
-From: Peter Zijlstra <a.p.zijlstra at chello.nl>:
-|Make migrate_disable() be a preempt_disable() for !rt kernels. This
-|allows generic code to use it but still enforces that these code
-|sections stay relatively small.
-|
-|A preemptible migrate_disable() accessible for general use would allow
-|people growing arbitrary per-cpu crap instead of clean these things
-|up.
-
-From: Steven Rostedt <rostedt at goodmis.org>
-| The migrate_disable() can cause a bit of a overhead to the RT kernel,
-| as changing the affinity is expensive to do at every lock encountered.
-| As a running task can not migrate, the actual disabling of migration
-| does not need to occur until the task is about to schedule out.
-|
-| In most cases, a task that disables migration will enable it before
-| it schedules making this change improve performance tremendously.
-
-On top of this build get/put_cpu_light(). It is similar to get_cpu():
-it uses migrate_disable() instead of preempt_disable(). That means the user
-remains on the same CPU but the function using it may be preempted and
-invoked again from another caller on the same CPU.
-
-Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
----
- include/linux/cpu.h     |    3 ++
- include/linux/preempt.h |    9 ++++++
- include/linux/sched.h   |   39 +++++++++++++++++++++-----
- include/linux/smp.h     |    3 ++
- kernel/sched/core.c     |   70 +++++++++++++++++++++++++++++++++++++++++++++++-
- kernel/sched/debug.c    |    7 ++++
- lib/smp_processor_id.c  |    5 ++-
- 7 files changed, 125 insertions(+), 11 deletions(-)
-
---- a/include/linux/cpu.h
-+++ b/include/linux/cpu.h
-@@ -173,6 +173,9 @@ static inline void cpu_notifier_register
- #endif /* CONFIG_SMP */
- extern struct bus_type cpu_subsys;
- 
-+static inline void pin_current_cpu(void) { }
-+static inline void unpin_current_cpu(void) { }
-+
- #ifdef CONFIG_HOTPLUG_CPU
- /* Stop CPUs going up and down. */
- 
---- a/include/linux/preempt.h
-+++ b/include/linux/preempt.h
-@@ -257,11 +257,20 @@ do { \
- # define preempt_enable_rt()		preempt_enable()
- # define preempt_disable_nort()		barrier()
- # define preempt_enable_nort()		barrier()
-+# ifdef CONFIG_SMP
-+   extern void migrate_disable(void);
-+   extern void migrate_enable(void);
-+# else /* CONFIG_SMP */
-+#  define migrate_disable()		barrier()
-+#  define migrate_enable()		barrier()
-+# endif /* CONFIG_SMP */
- #else
- # define preempt_disable_rt()		barrier()
- # define preempt_enable_rt()		barrier()
- # define preempt_disable_nort()		preempt_disable()
- # define preempt_enable_nort()		preempt_enable()
-+# define migrate_disable()		preempt_disable()
-+# define migrate_enable()		preempt_enable()
- #endif
- 
- #ifdef CONFIG_PREEMPT_NOTIFIERS
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1520,6 +1520,12 @@ struct task_struct {
- #endif
- 
- 	unsigned int policy;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+	int migrate_disable;
-+# ifdef CONFIG_SCHED_DEBUG
-+	int migrate_disable_atomic;
-+# endif
-+#endif
- 	int nr_cpus_allowed;
- 	cpumask_t cpus_allowed;
- 
-@@ -1997,14 +2003,6 @@ static inline struct vm_struct *task_sta
- }
- #endif
- 
--/* Future-safe accessor for struct task_struct's cpus_allowed. */
--#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
--
--static inline int tsk_nr_cpus_allowed(struct task_struct *p)
--{
--	return p->nr_cpus_allowed;
--}
--
- #define TNF_MIGRATED	0x01
- #define TNF_NO_GROUP	0x02
- #define TNF_SHARED	0x04
-@@ -3522,6 +3520,31 @@ static inline void set_task_cpu(struct t
- 
- #endif /* CONFIG_SMP */
- 
-+static inline int __migrate_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+	return p->migrate_disable;
-+#else
-+	return 0;
-+#endif
-+}
-+
-+/* Future-safe accessor for struct task_struct's cpus_allowed. */
-+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-+{
-+	if (__migrate_disabled(p))
-+		return cpumask_of(task_cpu(p));
-+
-+	return &p->cpus_allowed;
-+}
-+
-+static inline int tsk_nr_cpus_allowed(struct task_struct *p)
-+{
-+	if (__migrate_disabled(p))
-+		return 1;
-+	return p->nr_cpus_allowed;
-+}
-+
- extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
- extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
- 
---- a/include/linux/smp.h
-+++ b/include/linux/smp.h
-@@ -197,6 +197,9 @@ static inline int get_boot_cpu_id(void)
- #define get_cpu()		({ preempt_disable(); smp_processor_id(); })
- #define put_cpu()		preempt_enable()
- 
-+#define get_cpu_light()		({ migrate_disable(); smp_processor_id(); })
-+#define put_cpu_light()		migrate_enable()
-+
- /*
-  * Callback to arch code if there's nosmp or maxcpus=0 on the
-  * boot command line:
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -1100,6 +1100,11 @@ void do_set_cpus_allowed(struct task_str
- 
- 	lockdep_assert_held(&p->pi_lock);
- 
-+	if (__migrate_disabled(p)) {
-+		cpumask_copy(&p->cpus_allowed, new_mask);
-+		return;
-+	}
-+
- 	queued = task_on_rq_queued(p);
- 	running = task_current(rq, p);
- 
-@@ -1179,7 +1184,7 @@ static int __set_cpus_allowed_ptr(struct
- 	}
- 
- 	/* Can the task run on the task's current CPU? If so, we're done */
--	if (cpumask_test_cpu(task_cpu(p), new_mask))
-+	if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
- 		goto out;
- 
- 	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-@@ -3252,6 +3257,69 @@ static inline void schedule_debug(struct
- 	schedstat_inc(this_rq()->sched_count);
- }
- 
-+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
-+
-+void migrate_disable(void)
-+{
-+	struct task_struct *p = current;
-+
-+	if (in_atomic()) {
-+#ifdef CONFIG_SCHED_DEBUG
-+		p->migrate_disable_atomic++;
-+#endif
-+		return;
-+	}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	WARN_ON_ONCE(p->migrate_disable_atomic);
-+#endif
-+
-+	if (p->migrate_disable) {
-+		p->migrate_disable++;
-+		return;
-+	}
-+
-+	preempt_disable();
-+	pin_current_cpu();
-+	p->migrate_disable = 1;
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL(migrate_disable);
-+
-+void migrate_enable(void)
-+{
-+	struct task_struct *p = current;
-+
-+	if (in_atomic()) {
-+#ifdef CONFIG_SCHED_DEBUG
-+		p->migrate_disable_atomic--;
-+#endif
-+		return;
-+	}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	WARN_ON_ONCE(p->migrate_disable_atomic);
-+#endif
-+	WARN_ON_ONCE(p->migrate_disable <= 0);
-+
-+	if (p->migrate_disable > 1) {
-+		p->migrate_disable--;
-+		return;
-+	}
-+
-+	preempt_disable();
-+	/*
-+	 * Clearing migrate_disable causes tsk_cpus_allowed to
-+	 * show the tasks original cpu affinity.
-+	 */
-+	p->migrate_disable = 0;
-+
-+	unpin_current_cpu();
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL(migrate_enable);
-+#endif
-+
- /*
-  * Pick up the highest-prio task:
-  */
---- a/kernel/sched/debug.c
-+++ b/kernel/sched/debug.c
-@@ -558,6 +558,9 @@ void print_rt_rq(struct seq_file *m, int
- 	P(rt_throttled);
- 	PN(rt_time);
- 	PN(rt_runtime);
-+#ifdef CONFIG_SMP
-+	P(rt_nr_migratory);
-+#endif
- 
- #undef PN
- #undef P
-@@ -953,6 +956,10 @@ void proc_sched_show_task(struct task_st
- #endif
- 	P(policy);
- 	P(prio);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+	P(migrate_disable);
-+#endif
-+	P(nr_cpus_allowed);
- #undef PN_SCHEDSTAT
- #undef PN
- #undef __PN
---- a/lib/smp_processor_id.c
-+++ b/lib/smp_processor_id.c
-@@ -39,8 +39,9 @@ notrace static unsigned int check_preemp
- 	if (!printk_ratelimit())
- 		goto out_enable;
- 
--	printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
--		what1, what2, preempt_count() - 1, current->comm, current->pid);
-+	printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n",
-+		what1, what2, preempt_count() - 1, __migrate_disabled(current),
-+		current->comm, current->pid);
- 
- 	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
- 	dump_stack();
diff --git a/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch b/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
index 00f0814..5c53a3f 100644
--- a/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
+++ b/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
@@ -1,7 +1,7 @@
 Subject: iommu/amd: Use WARN_ON_NORT in __attach_device()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 27 Feb 2016 10:22:23 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 RT does not disable interrupts here, but the protection is still
 correct. Fixup the WARN_ON so it won't yell on RT.
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/iommu/amd_iommu.c
 +++ b/drivers/iommu/amd_iommu.c
-@@ -1923,10 +1923,10 @@ static int __attach_device(struct iommu_
+@@ -1929,10 +1929,10 @@ static int __attach_device(struct iommu_
  	int ret;
  
  	/*
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* lock domain */
  	spin_lock(&domain->lock);
-@@ -2094,10 +2094,10 @@ static void __detach_device(struct iommu
+@@ -2100,10 +2100,10 @@ static void __detach_device(struct iommu
  	struct protection_domain *domain;
  
  	/*
diff --git a/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch b/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
index c4040ec..a12eace 100644
--- a/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
+++ b/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 15 Sep 2016 16:58:19 +0200
 Subject: [PATCH] iommu/iova: don't disable preempt around this_cpu_ptr()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Commit 583248e6620a ("iommu/iova: Disable preemption around use of
 this_cpu_ptr()") disables preemption while accessing a per-CPU variable.
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  static bool iova_rcache_insert(struct iova_domain *iovad,
  			       unsigned long pfn,
-@@ -420,10 +421,8 @@ alloc_iova_fast(struct iova_domain *iova
+@@ -419,10 +420,8 @@ alloc_iova_fast(struct iova_domain *iova
  
  		/* Try replenishing IOVAs by flushing rcache. */
  		flushed_rcache = true;
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		goto retry;
  	}
  
-@@ -751,7 +750,7 @@ static bool __iova_rcache_insert(struct
+@@ -750,7 +749,7 @@ static bool __iova_rcache_insert(struct
  	bool can_insert = false;
  	unsigned long flags;
  
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	spin_lock_irqsave(&cpu_rcache->lock, flags);
  
  	if (!iova_magazine_full(cpu_rcache->loaded)) {
-@@ -781,7 +780,6 @@ static bool __iova_rcache_insert(struct
+@@ -780,7 +779,6 @@ static bool __iova_rcache_insert(struct
  		iova_magazine_push(cpu_rcache->loaded, iova_pfn);
  
  	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (mag_to_free) {
  		iova_magazine_free_pfns(mag_to_free, iovad);
-@@ -815,7 +813,7 @@ static unsigned long __iova_rcache_get(s
+@@ -814,7 +812,7 @@ static unsigned long __iova_rcache_get(s
  	bool has_pfn = false;
  	unsigned long flags;
  
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	spin_lock_irqsave(&cpu_rcache->lock, flags);
  
  	if (!iova_magazine_empty(cpu_rcache->loaded)) {
-@@ -837,7 +835,6 @@ static unsigned long __iova_rcache_get(s
+@@ -836,7 +834,6 @@ static unsigned long __iova_rcache_get(s
  		iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
  
  	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
diff --git a/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch b/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
index 9318fbe..47c62ab 100644
--- a/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
+++ b/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 15 Sep 2016 17:16:44 +0200
 Subject: [PATCH] iommu/vt-d: don't disable preemption while accessing
  deferred_flush()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 get_cpu() disables preemption and returns the current CPU number. The
 CPU number is later only used once while retrieving the address of the
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/iommu/intel-iommu.c
 +++ b/drivers/iommu/intel-iommu.c
-@@ -479,7 +479,7 @@ struct deferred_flush_data {
+@@ -480,7 +480,7 @@ struct deferred_flush_data {
  	struct deferred_flush_table *tables;
  };
  
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  /* bitmap for indexing intel_iommus */
  static int g_num_of_iommus;
-@@ -3719,10 +3719,8 @@ static void add_unmap(struct dmar_domain
+@@ -3720,10 +3720,8 @@ static void add_unmap(struct dmar_domain
  	struct intel_iommu *iommu;
  	struct deferred_flush_entry *entry;
  	struct deferred_flush_data *flush_data;
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/* Flush all CPUs' entries to avoid deferring too much.  If
  	 * this becomes a bottleneck, can just flush us, and rely on
-@@ -3755,8 +3753,6 @@ static void add_unmap(struct dmar_domain
+@@ -3756,8 +3754,6 @@ static void add_unmap(struct dmar_domain
  	}
  	flush_data->size++;
  	spin_unlock_irqrestore(&flush_data->lock, flags);
diff --git a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch
deleted file mode 100644
index 0a9f209..0000000
--- a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-Subject: ipc/sem: Rework semaphore wakeups
-From: Peter Zijlstra <peterz at infradead.org>
-Date: Wed, 14 Sep 2011 11:57:04 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Current sysv sems have a weird ass wakeup scheme that involves keeping
-preemption disabled over a potential O(n^2) loop and busy waiting on
-that on other CPUs.
-
-Kill this and simply wake the task directly from under the sem_lock.
-
-This was discovered by a migrate_disable() debug feature that
-disallows:
-
-  spin_lock();
-  preempt_disable();
-  spin_unlock()
-  preempt_enable();
-
-Cc: Manfred Spraul <manfred at colorfullife.com>
-Suggested-by: Thomas Gleixner <tglx at linutronix.de>
-Reported-by: Mike Galbraith <efault at gmx.de>
-Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
-Cc: Manfred Spraul <manfred at colorfullife.com>
-Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins
-Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
----
- ipc/sem.c |   10 ++++++++++
- 1 file changed, 10 insertions(+)
-
---- a/ipc/sem.c
-+++ b/ipc/sem.c
-@@ -712,6 +712,13 @@ static int perform_atomic_semop(struct s
- static void wake_up_sem_queue_prepare(struct list_head *pt,
- 				struct sem_queue *q, int error)
- {
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+	struct task_struct *p = q->sleeper;
-+	get_task_struct(p);
-+	q->status = error;
-+	wake_up_process(p);
-+	put_task_struct(p);
-+#else
- 	if (list_empty(pt)) {
- 		/*
- 		 * Hold preempt off so that we don't get preempted and have the
-@@ -723,6 +730,7 @@ static void wake_up_sem_queue_prepare(st
- 	q->pid = error;
- 
- 	list_add_tail(&q->list, pt);
-+#endif
- }
- 
- /**
-@@ -736,6 +744,7 @@ static void wake_up_sem_queue_prepare(st
-  */
- static void wake_up_sem_queue_do(struct list_head *pt)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- 	struct sem_queue *q, *t;
- 	int did_something;
- 
-@@ -748,6 +757,7 @@ static void wake_up_sem_queue_do(struct
- 	}
- 	if (did_something)
- 		preempt_enable();
-+#endif
- }
- 
- static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
diff --git a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index 690c3aa..a21e31a 100644
--- a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -1,7 +1,7 @@
 Subject: genirq: Allow disabling of softirq processing in irq thread context
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 31 Jan 2012 13:01:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The processing of softirqs in irq thread context is a performance gain
 for the non-rt workloads of a system, but it's counterproductive for
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -881,7 +881,15 @@ irq_forced_thread_fn(struct irq_desc *de
+@@ -883,7 +883,15 @@ irq_forced_thread_fn(struct irq_desc *de
  	local_bh_disable();
  	ret = action->thread_fn(action->irq, action->dev_id);
  	irq_finalize_oneshot(desc, action);
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return ret;
  }
  
-@@ -1338,6 +1346,9 @@ static int
+@@ -1340,6 +1348,9 @@ static int
  			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  		}
  
diff --git a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch b/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
index cae7511..322563d 100644
--- a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
+++ b/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
@@ -1,7 +1,7 @@
 Subject: irqwork: Move irq safe work to irq context
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 15 Nov 2015 18:40:17 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On architectures where arch_irq_work_has_interrupt() returns false, we
 end up running the irq safe work from the softirq context. That
@@ -56,7 +56,7 @@ Cc: stable-rt at vger.kernel.org
   * Synchronize against the irq_work @entry, ensures the entry is not
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -1644,7 +1644,7 @@ void update_process_times(int user_tick)
+@@ -1604,7 +1604,7 @@ void update_process_times(int user_tick)
  	scheduler_tick();
  	run_local_timers();
  	rcu_check_callbacks(user_tick);
@@ -65,7 +65,7 @@ Cc: stable-rt at vger.kernel.org
  	if (in_irq())
  		irq_work_tick();
  #endif
-@@ -1684,9 +1684,7 @@ static __latent_entropy void run_timer_s
+@@ -1645,9 +1645,7 @@ static __latent_entropy void run_timer_s
  {
  	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
  
diff --git a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch b/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
index 1a233be..b2afe0d 100644
--- a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
+++ b/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
@@ -1,7 +1,7 @@
 Subject: irqwork: push most work into softirq context
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 23 Jun 2015 15:32:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Initially we defered all irqwork into softirq because we didn't want the
 latency spikes if perf or another user was busy and delayed the RT task.
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
 --- a/kernel/sched/rt.c
 +++ b/kernel/sched/rt.c
-@@ -102,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
+@@ -103,6 +103,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
  	rt_rq->push_cpu = nr_cpu_ids;
  	raw_spin_lock_init(&rt_rq->push_lock);
  	init_irq_work(&rt_rq->push_work, push_irq_work_func);
@@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	/* We start is dequeued state, because no RT tasks are queued */
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -220,6 +220,7 @@ static void nohz_full_kick_func(struct i
+@@ -224,6 +224,7 @@ static void nohz_full_kick_func(struct i
  
  static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
  	.func = nohz_full_kick_func,
@@ -164,7 +164,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -1644,7 +1644,7 @@ void update_process_times(int user_tick)
+@@ -1604,7 +1604,7 @@ void update_process_times(int user_tick)
  	scheduler_tick();
  	run_local_timers();
  	rcu_check_callbacks(user_tick);
@@ -173,7 +173,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (in_irq())
  		irq_work_tick();
  #endif
-@@ -1684,6 +1684,10 @@ static __latent_entropy void run_timer_s
+@@ -1645,6 +1645,10 @@ static __latent_entropy void run_timer_s
  {
  	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
  
diff --git a/debian/patches/features/all/rt/jump-label-rt.patch b/debian/patches/features/all/rt/jump-label-rt.patch
index 0169af8..c864dbd 100644
--- a/debian/patches/features/all/rt/jump-label-rt.patch
+++ b/debian/patches/features/all/rt/jump-label-rt.patch
@@ -1,7 +1,7 @@
 Subject: jump-label: disable if stop_machine() is used
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 08 Jul 2015 17:14:48 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Some architectures are using stop_machine() while switching the opcode which
 leads to latency spikes.
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/arm/Kconfig
 +++ b/arch/arm/Kconfig
-@@ -36,7 +36,7 @@ config ARM
+@@ -42,7 +42,7 @@ config ARM
  	select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
  	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
  	select HAVE_ARCH_HARDENED_USERCOPY
diff --git a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
index 67de3bd..58af067 100644
--- a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
+++ b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
@@ -1,7 +1,7 @@
 Subject: kconfig: Disable config options which are not RT compatible
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 24 Jul 2011 12:11:43 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Disable stuff which is known to have issues on RT
 
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/Kconfig
 +++ b/arch/Kconfig
-@@ -9,6 +9,7 @@ config OPROFILE
+@@ -12,6 +12,7 @@ config OPROFILE
  	tristate "OProfile system profiling"
  	depends on PROFILING
  	depends on HAVE_OPROFILE
diff --git a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
index d60cedd..1fcb73f 100644
--- a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
+++ b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
@@ -1,7 +1,7 @@
 Subject: kconfig: Add PREEMPT_RT_FULL
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 14:58:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Introduce the final symbol for PREEMPT_RT_FULL.
 
diff --git a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch b/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
index 8b2aa70..d9fb768 100644
--- a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
+++ b/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 19 Mar 2013 14:44:30 +0100
 Subject: kernel/SRCU: provide a static initializer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 There are macros for static initializer for the three out of four
 possible notifier types, that are:
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		RAW_NOTIFIER_INIT(name)
  
 +#define _SRCU_NOTIFIER_HEAD(name, mod)				\
-+	static DEFINE_PER_CPU(struct srcu_struct_array,		\
++	static DEFINE_PER_CPU(struct srcu_array,		\
 +			name##_head_srcu_array);		\
 +	mod struct srcu_notifier_head name =			\
 +			SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 @@ -119,7 +119,7 @@ void process_srcu(struct work_struct *wo
   */
  #define __DEFINE_SRCU(name, is_static)					\
- 	static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
+ 	static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
 -	is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
 +	is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array)
  #define DEFINE_SRCU(name)		__DEFINE_SRCU(name, /* not static */)
diff --git a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index e9c999b..1607d40 100644
--- a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 7 Jun 2013 22:37:06 +0200
 Subject: kernel/cpu: fix cpu down problem if kthread's cpu is going down
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 If kthread is pinned to CPUx and CPUx is going down then we get into
 trouble:
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -259,6 +259,7 @@ struct hotplug_pcp {
+@@ -254,6 +254,7 @@ struct hotplug_pcp {
  	int refcount;
  	int grab_lock;
  	struct completion synced;
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #ifdef CONFIG_PREEMPT_RT_FULL
  	/*
  	 * Note, on PREEMPT_RT, the hotplug lock must save the state of
-@@ -362,6 +363,7 @@ static int sync_unplug_thread(void *data
+@@ -357,6 +358,7 @@ static int sync_unplug_thread(void *data
  {
  	struct hotplug_pcp *hp = data;
  
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	preempt_disable();
  	hp->unplug = current;
  	wait_for_pinned_cpus(hp);
-@@ -427,6 +429,14 @@ static void __cpu_unplug_sync(struct hot
+@@ -422,6 +424,14 @@ static void __cpu_unplug_sync(struct hot
  	wait_for_completion(&hp->synced);
  }
  
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Start the sync_unplug_thread on the target cpu and wait for it to
   * complete.
-@@ -450,6 +460,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -445,6 +455,7 @@ static int cpu_unplug_begin(unsigned int
  	tell_sched_cpu_down_begin(cpu);
  
  	init_completion(&hp->synced);
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
  	if (IS_ERR(hp->sync_tsk)) {
-@@ -465,8 +476,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -460,8 +471,7 @@ static int cpu_unplug_begin(unsigned int
  	 * wait for tasks that are going to enter these sections and
  	 * we must not have them block.
  	 */
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return 0;
  }
  
-@@ -1062,6 +1072,7 @@ static int takedown_cpu(unsigned int cpu
+@@ -961,6 +971,7 @@ static int takedown_cpu(unsigned int cpu
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	int err;
  
diff --git a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index 08d9309..24eef39 100644
--- a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 14 Jun 2013 17:16:35 +0200
 Subject: kernel/hotplug: restore original cpu mask oncpu/down
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 If a task which is allowed to run only on CPU X puts CPU Y down then it
 will be allowed on all CPUs but the on CPU Y after it comes back from
@@ -16,15 +16,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -1166,6 +1166,7 @@ static int __ref _cpu_down(unsigned int
- 	bool hasdied = false;
+@@ -1055,6 +1055,7 @@ static int __ref _cpu_down(unsigned int
+ 	int prev_state, ret = 0;
  	int mycpu;
  	cpumask_var_t cpumask;
 +	cpumask_var_t cpumask_org;
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
-@@ -1176,6 +1177,12 @@ static int __ref _cpu_down(unsigned int
+@@ -1065,6 +1066,12 @@ static int __ref _cpu_down(unsigned int
  	/* Move the downtaker off the unplug cpu */
  	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
  		return -ENOMEM;
@@ -33,11 +33,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +		return -ENOMEM;
 +	}
 +
-+	cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
++	cpumask_copy(cpumask_org, &current->cpus_mask);
  	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
  	set_cpus_allowed_ptr(current, cpumask);
  	free_cpumask_var(cpumask);
-@@ -1184,7 +1191,8 @@ static int __ref _cpu_down(unsigned int
+@@ -1073,7 +1080,8 @@ static int __ref _cpu_down(unsigned int
  	if (mycpu == cpu) {
  		printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
  		migrate_enable();
@@ -47,10 +47,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  
  	cpu_hotplug_begin();
-@@ -1238,6 +1246,9 @@ static int __ref _cpu_down(unsigned int
- 	/* This post dead nonsense must die */
- 	if (!ret && hasdied)
- 		cpu_notify_nofail(CPU_POST_DEAD, cpu);
+@@ -1123,6 +1131,9 @@ static int __ref _cpu_down(unsigned int
+ out_cancel:
+ 	cpu_hotplug_done();
+ 	migrate_enable();
 +restore_cpus:
 +	set_cpus_allowed_ptr(current, cpumask_org);
 +	free_cpumask_var(cpumask_org);
diff --git a/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch b/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
deleted file mode 100644
index 8e8e83b..0000000
--- a/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-Date: Tue, 9 Feb 2016 18:18:01 +0100
-Subject: kernel: migrate_disable() do fastpath in atomic &
- irqs-off
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-With interrupts off it makes no sense to do the long path since we can't
-leave the CPU anyway. Also we might end up in a recursion with lockdep.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- kernel/sched/core.c |    4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -3299,7 +3299,7 @@ void migrate_disable(void)
- {
- 	struct task_struct *p = current;
- 
--	if (in_atomic()) {
-+	if (in_atomic() || irqs_disabled()) {
- #ifdef CONFIG_SCHED_DEBUG
- 		p->migrate_disable_atomic++;
- #endif
-@@ -3326,7 +3326,7 @@ void migrate_enable(void)
- {
- 	struct task_struct *p = current;
- 
--	if (in_atomic()) {
-+	if (in_atomic() || irqs_disabled()) {
- #ifdef CONFIG_SCHED_DEBUG
- 		p->migrate_disable_atomic--;
- #endif
diff --git a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index 7882587..4714554 100644
--- a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 4 Feb 2016 16:38:10 +0100
 Subject: [PATCH] kernel/perf: mark perf_cpu_context's timer as irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Otherwise we get a WARN_ON() backtrace and some events are reported as
 "not counted".
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
-@@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(stru
+@@ -1043,6 +1043,7 @@ static void __perf_mux_hrtimer_init(stru
  	raw_spin_lock_init(&cpuctx->hrtimer_lock);
  	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
  	timer->function = perf_mux_hrtimer_handler;
diff --git a/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
index c35bec7..b6c3d9f 100644
--- a/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
+++ b/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 19 May 2016 17:45:27 +0200
 Subject: [PATCH] kernel/printk: Don't try to print from IRQ/NMI region
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On -RT we try to acquire sleeping locks which might lead to warnings
 from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -1628,6 +1628,11 @@ static void call_console_drivers(int lev
+@@ -1630,6 +1630,11 @@ static void call_console_drivers(const c
  	if (!console_drivers)
  		return;
  
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	migrate_disable();
  	for_each_console(con) {
  		if (exclusive_console && con != exclusive_console)
-@@ -2556,6 +2561,11 @@ void console_unblank(void)
+@@ -2357,6 +2362,11 @@ void console_unblank(void)
  {
  	struct console *c;
  
diff --git a/debian/patches/features/all/rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/debian/patches/features/all/rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
new file mode 100644
index 0000000..74438bf
--- /dev/null
+++ b/debian/patches/features/all/rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -0,0 +1,761 @@
+From 866f2c8a7f0eec01a72cceeb73bab62eb3624694 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Tue, 4 Apr 2017 12:50:16 +0200
+Subject: [PATCH] kernel: sched: Provide a pointer to the valid CPU mask
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed()
+wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not
+much difference in !RT but in RT we used this to implement
+migrate_disable(). Within a migrate_disable() section the CPU mask is
+restricted to single CPU while the "normal" CPU mask remains untouched.
+
+As an alternative implementation Ingo suggested to use
+	struct task_struct {
+		const cpumask_t		*cpus_ptr;
+		cpumask_t		cpus_mask;
+        };
+with
+	t->cpus_allowed_ptr = &t->cpus_allowed;
+
+In -RT we then can switch the cpus_ptr to
+	t->cpus_allowed_ptr = &cpumask_of(task_cpu(p));
+
+in a migration disabled region. The rules are simple:
+- Code that 'uses' ->cpus_allowed would use the pointer.
+- Code that 'modifies' ->cpus_allowed would use the direct mask.
+
+While converting the existing users I tried to stick with the rules
+above however… well mostly CPUFREQ tries to temporary switch the CPU
+mask to do something on a certain CPU and then switches the mask back it
+its original value. So in theory `cpus_ptr' could or should be used.
+However if this is invoked in a migration disabled region (which is not
+the case because it would require something like preempt_disable() and
+set_cpus_allowed_ptr() might sleep so it can't be) then the "restore"
+part would restore the wrong mask. So it only looks strange and I go for
+the pointer…
+
+Some drivers copy the cpumask without cpumask_copy() and others use
+cpumask_copy but without alloc_cpumask_var(). I did not fix those as
+part of this, could do this as a follow up…
+
+So is this the way we want it?
+Is the usage of `cpus_ptr' vs `cpus_mask' for the set + restore part
+(see cpufreq users) what we want? At some point it looks like they
+should use a different interface for their doing. I am not sure why
+switching to certain CPU is important but maybe it could be done via a
+workqueue from the CPUFREQ core (so we have a comment desribing why are
+doing this and a get_online_cpus() to ensure that the CPU does not go
+offline too early).
+
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Thomas Gleixner <tglx at linutronix.de>
+Cc: Mike Galbraith <efault at gmx.de>
+Cc: Ingo Molnar <mingo at elte.hu>
+Cc: Rafael J. Wysocki <rjw at rjwysocki.net>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ arch/ia64/kernel/mca.c                     |    2 -
+ arch/mips/include/asm/switch_to.h          |    4 +-
+ arch/mips/kernel/mips-mt-fpaff.c           |    2 -
+ arch/mips/kernel/traps.c                   |    6 ++--
+ arch/powerpc/platforms/cell/spufs/sched.c  |    2 -
+ arch/tile/include/asm/setup.h              |    2 -
+ arch/tile/kernel/hardwall.c                |   10 +++---
+ drivers/infiniband/hw/hfi1/affinity.c      |    6 ++--
+ drivers/infiniband/hw/hfi1/sdma.c          |    3 --
+ drivers/infiniband/hw/qib/qib_file_ops.c   |    7 ++--
+ fs/proc/array.c                            |    4 +-
+ include/linux/init_task.h                  |    3 +-
+ include/linux/sched.h                      |    5 ++-
+ kernel/cgroup/cpuset.c                     |    2 -
+ kernel/fork.c                              |    2 +
+ kernel/sched/core.c                        |   42 ++++++++++++++---------------
+ kernel/sched/cpudeadline.c                 |    4 +-
+ kernel/sched/cpupri.c                      |    4 +-
+ kernel/sched/deadline.c                    |    6 ++--
+ kernel/sched/fair.c                        |   28 +++++++++----------
+ kernel/sched/rt.c                          |    4 +-
+ kernel/trace/trace_hwlat.c                 |    2 -
+ lib/smp_processor_id.c                     |    2 -
+ samples/trace_events/trace-events-sample.c |    2 -
+ 24 files changed, 78 insertions(+), 76 deletions(-)
+
+--- a/arch/ia64/kernel/mca.c
++++ b/arch/ia64/kernel/mca.c
+@@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, un
+ 	ti->cpu = cpu;
+ 	p->stack = ti;
+ 	p->state = TASK_UNINTERRUPTIBLE;
+-	cpumask_set_cpu(cpu, &p->cpus_allowed);
++	cpumask_set_cpu(cpu, &p->cpus_mask);
+ 	INIT_LIST_HEAD(&p->tasks);
+ 	p->parent = p->real_parent = p->group_leader = p;
+ 	INIT_LIST_HEAD(&p->children);
+--- a/arch/mips/include/asm/switch_to.h
++++ b/arch/mips/include/asm/switch_to.h
+@@ -42,7 +42,7 @@ extern struct task_struct *ll_task;
+  * inline to try to keep the overhead down. If we have been forced to run on
+  * a "CPU" with an FPU because of a previous high level of FP computation,
+  * but did not actually use the FPU during the most recent time-slice (CU1
+- * isn't set), we undo the restriction on cpus_allowed.
++ * isn't set), we undo the restriction on cpus_mask.
+  *
+  * We're not calling set_cpus_allowed() here, because we have no need to
+  * force prompt migration - we're already switching the current CPU to a
+@@ -57,7 +57,7 @@ do {									\
+ 	    test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&		\
+ 	    (!(KSTK_STATUS(prev) & ST0_CU1))) {				\
+ 		clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);		\
+-		prev->cpus_allowed = prev->thread.user_cpus_allowed;	\
++		prev->cpus_mask = prev->thread.user_cpus_allowed;	\
+ 	}								\
+ 	next->thread.emulated_fp = 0;					\
+ } while(0)
+--- a/arch/mips/kernel/mips-mt-fpaff.c
++++ b/arch/mips/kernel/mips-mt-fpaff.c
+@@ -176,7 +176,7 @@ asmlinkage long mipsmt_sys_sched_getaffi
+ 	if (retval)
+ 		goto out_unlock;
+ 
+-	cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
++	cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
+ 	cpumask_and(&mask, &allowed, cpu_active_mask);
+ 
+ out_unlock:
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -1191,12 +1191,12 @@ static void mt_ase_fp_affinity(void)
+ 		 * restricted the allowed set to exclude any CPUs with FPUs,
+ 		 * we'll skip the procedure.
+ 		 */
+-		if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
++		if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
+ 			cpumask_t tmask;
+ 
+ 			current->thread.user_cpus_allowed
+-				= current->cpus_allowed;
+-			cpumask_and(&tmask, &current->cpus_allowed,
++				= current->cpus_mask;
++			cpumask_and(&tmask, &current->cpus_mask,
+ 				    &mt_fpu_cpumask);
+ 			set_cpus_allowed_ptr(current, &tmask);
+ 			set_thread_flag(TIF_FPUBOUND);
+--- a/arch/powerpc/platforms/cell/spufs/sched.c
++++ b/arch/powerpc/platforms/cell/spufs/sched.c
+@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_
+ 	 * runqueue. The context will be rescheduled on the proper node
+ 	 * if it is timesliced or preempted.
+ 	 */
+-	cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
++	cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
+ 
+ 	/* Save the current cpu id for spu interrupt routing. */
+ 	ctx->last_ran = raw_smp_processor_id();
+--- a/arch/tile/include/asm/setup.h
++++ b/arch/tile/include/asm/setup.h
+@@ -49,7 +49,7 @@ int hardwall_ipi_valid(int cpu);
+ 
+ /* Hook hardwall code into changes in affinity. */
+ #define arch_set_cpus_allowed(p, new_mask) do { \
+-	if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
++	if (!cpumask_equal(p->cpus_ptr, new_mask)) \
+ 		hardwall_deactivate_all(p); \
+ } while (0)
+ #endif
+--- a/arch/tile/kernel/hardwall.c
++++ b/arch/tile/kernel/hardwall.c
+@@ -590,12 +590,12 @@ static int hardwall_activate(struct hard
+ 	 * Get our affinity; if we're not bound to this tile uniquely,
+ 	 * we can't access the network registers.
+ 	 */
+-	if (cpumask_weight(&p->cpus_allowed) != 1)
++	if (p->nr_cpus_allowed != 1)
+ 		return -EPERM;
+ 
+ 	/* Make sure we are bound to a cpu assigned to this resource. */
+ 	cpu = smp_processor_id();
+-	BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
++	BUG_ON(cpumask_first(p->cpus_ptr) != cpu);
+ 	if (!cpumask_test_cpu(cpu, &info->cpumask))
+ 		return -EINVAL;
+ 
+@@ -621,17 +621,17 @@ static int hardwall_activate(struct hard
+  * Deactivate a task's hardwall.  Must hold lock for hardwall_type.
+  * This method may be called from exit_thread(), so we don't want to
+  * rely on too many fields of struct task_struct still being valid.
+- * We assume the cpus_allowed, pid, and comm fields are still valid.
++ * We assume the nr_cpus_allowed, pid, and comm fields are still valid.
+  */
+ static void _hardwall_deactivate(struct hardwall_type *hwt,
+ 				 struct task_struct *task)
+ {
+ 	struct thread_struct *ts = &task->thread;
+ 
+-	if (cpumask_weight(&task->cpus_allowed) != 1) {
++	if (task->nr_cpus_allowed != 1) {
+ 		pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
+ 		       task->pid, task->comm, hwt->name,
+-		       cpumask_weight(&task->cpus_allowed));
++		       task->nr_cpus_allowed);
+ 		BUG();
+ 	}
+ 
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -576,7 +576,7 @@ int hfi1_get_proc_affinity(int node)
+ 	struct hfi1_affinity_node *entry;
+ 	cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
+ 	const struct cpumask *node_mask,
+-		*proc_mask = &current->cpus_allowed;
++		*proc_mask = current->cpus_ptr;
+ 	struct hfi1_affinity_node_list *affinity = &node_affinity;
+ 	struct cpu_mask_set *set = &affinity->proc;
+ 
+@@ -584,7 +584,7 @@ int hfi1_get_proc_affinity(int node)
+ 	 * check whether process/context affinity has already
+ 	 * been set
+ 	 */
+-	if (cpumask_weight(proc_mask) == 1) {
++	if (current->nr_cpus_allowed == 1) {
+ 		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+ 			  current->pid, current->comm,
+ 			  cpumask_pr_args(proc_mask));
+@@ -595,7 +595,7 @@ int hfi1_get_proc_affinity(int node)
+ 		cpu = cpumask_first(proc_mask);
+ 		cpumask_set_cpu(cpu, &set->used);
+ 		goto done;
+-	} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
++	} else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
+ 		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+ 			  current->pid, current->comm,
+ 			  cpumask_pr_args(proc_mask));
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -856,14 +856,13 @@ struct sdma_engine *sdma_select_user_eng
+ {
+ 	struct sdma_rht_node *rht_node;
+ 	struct sdma_engine *sde = NULL;
+-	const struct cpumask *current_mask = &current->cpus_allowed;
+ 	unsigned long cpu_id;
+ 
+ 	/*
+ 	 * To ensure that always the same sdma engine(s) will be
+ 	 * selected make sure the process is pinned to this CPU only.
+ 	 */
+-	if (cpumask_weight(current_mask) != 1)
++	if (current->nr_cpus_allowed != 1)
+ 		goto out;
+ 
+ 	cpu_id = smp_processor_id();
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -1163,7 +1163,7 @@ static unsigned int qib_poll(struct file
+ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
+ {
+ 	struct qib_filedata *fd = fp->private_data;
+-	const unsigned int weight = cpumask_weight(&current->cpus_allowed);
++	const unsigned int weight = current->nr_cpus_allowed;
+ 	const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
+ 	int local_cpu;
+ 
+@@ -1644,9 +1644,8 @@ static int qib_assign_ctxt(struct file *
+ 		ret = find_free_ctxt(i_minor - 1, fp, uinfo);
+ 	else {
+ 		int unit;
+-		const unsigned int cpu = cpumask_first(&current->cpus_allowed);
+-		const unsigned int weight =
+-			cpumask_weight(&current->cpus_allowed);
++		const unsigned int cpu = cpumask_first(current->cpus_ptr);
++		const unsigned int weight = current->nr_cpus_allowed;
+ 
+ 		if (weight == 1 && !test_bit(cpu, qib_cpulist))
+ 			if (!find_hca(cpu, &unit) && unit >= 0)
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -364,9 +364,9 @@ static inline void task_context_switch_c
+ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
+ {
+ 	seq_printf(m, "Cpus_allowed:\t%*pb\n",
+-		   cpumask_pr_args(&task->cpus_allowed));
++		   cpumask_pr_args(task->cpus_ptr));
+ 	seq_printf(m, "Cpus_allowed_list:\t%*pbl\n",
+-		   cpumask_pr_args(&task->cpus_allowed));
++		   cpumask_pr_args(task->cpus_ptr));
+ }
+ 
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -226,7 +226,8 @@ extern struct cred init_cred;
+ 	.static_prio	= MAX_PRIO-20,					\
+ 	.normal_prio	= MAX_PRIO-20,					\
+ 	.policy		= SCHED_NORMAL,					\
+-	.cpus_allowed	= CPU_MASK_ALL,					\
++	.cpus_ptr	= &tsk.cpus_mask,				\
++	.cpus_mask	= CPU_MASK_ALL,					\
+ 	.nr_cpus_allowed= NR_CPUS,					\
+ 	.mm		= NULL,						\
+ 	.active_mm	= &init_mm,					\
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -535,7 +535,8 @@ struct task_struct {
+ 
+ 	unsigned int			policy;
+ 	int				nr_cpus_allowed;
+-	cpumask_t			cpus_allowed;
++	const cpumask_t			*cpus_ptr;
++	cpumask_t			cpus_mask;
+ 
+ #ifdef CONFIG_PREEMPT_RCU
+ 	int				rcu_read_lock_nesting;
+@@ -1224,7 +1225,7 @@ extern struct pid *cad_pid;
+ #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
+ #define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
+ #define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
+-#define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
++#define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_mask */
+ #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
+ #define PF_MUTEX_TESTER		0x20000000	/* Thread belongs to the rt mutex tester */
+ #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2092,7 +2092,7 @@ static void cpuset_fork(struct task_stru
+ 	if (task_css_is_root(task, cpuset_cgrp_id))
+ 		return;
+ 
+-	set_cpus_allowed_ptr(task, &current->cpus_allowed);
++	set_cpus_allowed_ptr(task, current->cpus_ptr);
+ 	task->mems_allowed = current->mems_allowed;
+ }
+ 
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -539,6 +539,8 @@ static struct task_struct *dup_task_stru
+ 	tsk->stack_canary = get_random_long();
+ #endif
+ 
++	if (orig->cpus_ptr == &orig->cpus_mask)
++		tsk->cpus_ptr = &tsk->cpus_mask;
+ 	/*
+ 	 * One for us, one for whoever does the "release_task()" (usually
+ 	 * parent)
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -986,7 +986,7 @@ static struct rq *__migrate_task(struct
+ 		return rq;
+ 
+ 	/* Affinity changed (again). */
+-	if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
++	if (!cpumask_test_cpu(dest_cpu, p->cpus_ptr))
+ 		return rq;
+ 
+ 	rq = move_queued_task(rq, p, dest_cpu);
+@@ -1012,7 +1012,7 @@ static int migration_cpu_stop(void *data
+ 	local_irq_disable();
+ 	/*
+ 	 * We need to explicitly wake pending tasks before running
+-	 * __migrate_task() such that we will not miss enforcing cpus_allowed
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
+ 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
+ 	 */
+ 	sched_ttwu_pending();
+@@ -1043,7 +1043,7 @@ static int migration_cpu_stop(void *data
+  */
+ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+ {
+-	cpumask_copy(&p->cpus_allowed, new_mask);
++	cpumask_copy(&p->cpus_mask, new_mask);
+ 	p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+ 
+@@ -1113,7 +1113,7 @@ static int __set_cpus_allowed_ptr(struct
+ 		goto out;
+ 	}
+ 
+-	if (cpumask_equal(&p->cpus_allowed, new_mask))
++	if (cpumask_equal(p->cpus_ptr, new_mask))
+ 		goto out;
+ 
+ 	if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
+@@ -1264,10 +1264,10 @@ static int migrate_swap_stop(void *data)
+ 	if (task_cpu(arg->src_task) != arg->src_cpu)
+ 		goto unlock;
+ 
+-	if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed))
++	if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
+ 		goto unlock;
+ 
+-	if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed))
++	if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
+ 		goto unlock;
+ 
+ 	__migrate_swap_task(arg->src_task, arg->dst_cpu);
+@@ -1308,10 +1308,10 @@ int migrate_swap(struct task_struct *cur
+ 	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
+ 		goto out;
+ 
+-	if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed))
++	if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
+ 		goto out;
+ 
+-	if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed))
++	if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
+ 		goto out;
+ 
+ 	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
+@@ -1455,7 +1455,7 @@ void kick_process(struct task_struct *p)
+ EXPORT_SYMBOL_GPL(kick_process);
+ 
+ /*
+- * ->cpus_allowed is protected by both rq->lock and p->pi_lock
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
+  *
+  * A few notes on cpu_active vs cpu_online:
+  *
+@@ -1495,14 +1495,14 @@ static int select_fallback_rq(int cpu, s
+ 		for_each_cpu(dest_cpu, nodemask) {
+ 			if (!cpu_active(dest_cpu))
+ 				continue;
+-			if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
++			if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
+ 				return dest_cpu;
+ 		}
+ 	}
+ 
+ 	for (;;) {
+ 		/* Any allowed, online CPU? */
+-		for_each_cpu(dest_cpu, &p->cpus_allowed) {
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
+ 			if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
+ 				continue;
+ 			if (!cpu_online(dest_cpu))
+@@ -1547,7 +1547,7 @@ static int select_fallback_rq(int cpu, s
+ }
+ 
+ /*
+- * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
++ * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
+  */
+ static inline
+ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+@@ -1557,11 +1557,11 @@ int select_task_rq(struct task_struct *p
+ 	if (p->nr_cpus_allowed > 1)
+ 		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+ 	else
+-		cpu = cpumask_any(&p->cpus_allowed);
++		cpu = cpumask_any(p->cpus_ptr);
+ 
+ 	/*
+ 	 * In order not to call set_task_cpu() on a blocking task we need
+-	 * to rely on ttwu() to place the task on a valid ->cpus_allowed
++	 * to rely on ttwu() to place the task on a valid ->cpus_ptr
+ 	 * CPU.
+ 	 *
+ 	 * Since this is common to all placement strategies, this lives here.
+@@ -1569,7 +1569,7 @@ int select_task_rq(struct task_struct *p
+ 	 * [ this allows ->select_task() to simply return task_cpu(p) and
+ 	 *   not worry about this generic constraint ]
+ 	 */
+-	if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
++	if (unlikely(!cpumask_test_cpu(cpu, p->cpus_ptr) ||
+ 		     !cpu_online(cpu)))
+ 		cpu = select_fallback_rq(task_cpu(p), p);
+ 
+@@ -2543,7 +2543,7 @@ void wake_up_new_task(struct task_struct
+ #ifdef CONFIG_SMP
+ 	/*
+ 	 * Fork balancing, do it here and not earlier because:
+-	 *  - cpus_allowed can change in the fork path
++	 *  - cpus_ptr can change in the fork path
+ 	 *  - any previously selected CPU might disappear through hotplug
+ 	 *
+ 	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
+@@ -4315,7 +4315,7 @@ static int __sched_setscheduler(struct t
+ 			 * the entire root_domain to become SCHED_DEADLINE. We
+ 			 * will also fail if there's no bandwidth available.
+ 			 */
+-			if (!cpumask_subset(span, &p->cpus_allowed) ||
++			if (!cpumask_subset(span, p->cpus_ptr) ||
+ 			    rq->rd->dl_bw.bw == 0) {
+ 				task_rq_unlock(rq, p, &rf);
+ 				return -EPERM;
+@@ -4909,7 +4909,7 @@ long sched_getaffinity(pid_t pid, struct
+ 		goto out_unlock;
+ 
+ 	raw_spin_lock_irqsave(&p->pi_lock, flags);
+-	cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
++	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
+ 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ 
+ out_unlock:
+@@ -5469,7 +5469,7 @@ int task_can_attach(struct task_struct *
+ 	 * allowed nodes is unnecessary.  Thus, cpusets are not
+ 	 * applicable for such threads.  This prevents checking for
+ 	 * success of set_cpus_allowed_ptr() on all attached tasks
+-	 * before cpus_allowed may be changed.
++	 * before cpus_mask may be changed.
+ 	 */
+ 	if (p->flags & PF_NO_SETAFFINITY) {
+ 		ret = -EINVAL;
+@@ -5525,7 +5525,7 @@ int migrate_task_to(struct task_struct *
+ 	if (curr_cpu == target_cpu)
+ 		return 0;
+ 
+-	if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed))
++	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
+ 		return -EINVAL;
+ 
+ 	/* TODO: This is not properly updating schedstats */
+@@ -5665,7 +5665,7 @@ static void migrate_tasks(struct rq *dea
+ 		next->sched_class->put_prev_task(rq, next);
+ 
+ 		/*
+-		 * Rules for changing task_struct::cpus_allowed are holding
++		 * Rules for changing task_struct::cpus_mask are holding
+ 		 * both pi_lock and rq->lock, such that holding either
+ 		 * stabilizes the mask.
+ 		 *
+--- a/kernel/sched/cpudeadline.c
++++ b/kernel/sched/cpudeadline.c
+@@ -128,10 +128,10 @@ int cpudl_find(struct cpudl *cp, struct
+ 	const struct sched_dl_entity *dl_se = &p->dl;
+ 
+ 	if (later_mask &&
+-	    cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
++	    cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
+ 		best_cpu = cpumask_any(later_mask);
+ 		goto out;
+-	} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
++	} else if (cpumask_test_cpu(cpudl_maximum(cp), p->cpus_ptr) &&
+ 			dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
+ 		best_cpu = cpudl_maximum(cp);
+ 		if (later_mask)
+--- a/kernel/sched/cpupri.c
++++ b/kernel/sched/cpupri.c
+@@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struc
+ 		if (skip)
+ 			continue;
+ 
+-		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
++		if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
+ 			continue;
+ 
+ 		if (lowest_mask) {
+-			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
++			cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
+ 
+ 			/*
+ 			 * We have to ensure that we have at least one bit
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -252,7 +252,7 @@ static struct rq *dl_task_offline_migrat
+ 		 * If we cannot preempt any rq, fall back to pick any
+ 		 * online cpu.
+ 		 */
+-		cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
++		cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
+ 		if (cpu >= nr_cpu_ids) {
+ 			/*
+ 			 * Fail to find any suitable cpu.
+@@ -1286,7 +1286,7 @@ static void set_curr_task_dl(struct rq *
+ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
+ {
+ 	if (!task_running(rq, p) &&
+-	    cpumask_test_cpu(cpu, &p->cpus_allowed))
++	    cpumask_test_cpu(cpu, p->cpus_ptr))
+ 		return 1;
+ 	return 0;
+ }
+@@ -1435,7 +1435,7 @@ static struct rq *find_lock_later_rq(str
+ 		/* Retry if something changed. */
+ 		if (double_lock_balance(rq, later_rq)) {
+ 			if (unlikely(task_rq(task) != rq ||
+-				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
++				     !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
+ 				     task_running(rq, task) ||
+ 				     !dl_task(task) ||
+ 				     !task_on_rq_queued(task))) {
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1553,7 +1553,7 @@ static void task_numa_compare(struct tas
+ 	 */
+ 	if (cur) {
+ 		/* Skip this swap candidate if cannot move to the source cpu */
+-		if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
++		if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
+ 			goto unlock;
+ 
+ 		/*
+@@ -1663,7 +1663,7 @@ static void task_numa_find_cpu(struct ta
+ 
+ 	for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
+ 		/* Skip this CPU if the source task cannot migrate */
+-		if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
++		if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
+ 			continue;
+ 
+ 		env->dst_cpu = cpu;
+@@ -5460,7 +5460,7 @@ find_idlest_group(struct sched_domain *s
+ 
+ 		/* Skip over this group if it has no CPUs allowed */
+ 		if (!cpumask_intersects(sched_group_cpus(group),
+-					&p->cpus_allowed))
++					p->cpus_ptr))
+ 			continue;
+ 
+ 		local_group = cpumask_test_cpu(this_cpu,
+@@ -5580,7 +5580,7 @@ find_idlest_cpu(struct sched_group *grou
+ 		return cpumask_first(sched_group_cpus(group));
+ 
+ 	/* Traverse only the allowed CPUs */
+-	for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
++	for_each_cpu_and(i, sched_group_cpus(group), p->cpus_ptr) {
+ 		if (idle_cpu(i)) {
+ 			struct rq *rq = cpu_rq(i);
+ 			struct cpuidle_state *idle = idle_get_state(rq);
+@@ -5719,7 +5719,7 @@ static int select_idle_core(struct task_
+ 	if (!test_idle_cores(target, false))
+ 		return -1;
+ 
+-	cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
++	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+ 
+ 	for_each_cpu_wrap(core, cpus, target, wrap) {
+ 		bool idle = true;
+@@ -5753,7 +5753,7 @@ static int select_idle_smt(struct task_s
+ 		return -1;
+ 
+ 	for_each_cpu(cpu, cpu_smt_mask(target)) {
+-		if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
++		if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ 			continue;
+ 		if (idle_cpu(cpu))
+ 			return cpu;
+@@ -5805,7 +5805,7 @@ static int select_idle_cpu(struct task_s
+ 	time = local_clock();
+ 
+ 	for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
+-		if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
++		if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ 			continue;
+ 		if (idle_cpu(cpu))
+ 			break;
+@@ -5960,7 +5960,7 @@ select_task_rq_fair(struct task_struct *
+ 	if (sd_flag & SD_BALANCE_WAKE) {
+ 		record_wakee(p);
+ 		want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
+-			      && cpumask_test_cpu(cpu, &p->cpus_allowed);
++			      && cpumask_test_cpu(cpu, p->cpus_ptr);
+ 	}
+ 
+ 	rcu_read_lock();
+@@ -6693,14 +6693,14 @@ int can_migrate_task(struct task_struct
+ 	/*
+ 	 * We do not migrate tasks that are:
+ 	 * 1) throttled_lb_pair, or
+-	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
++	 * 2) cannot be migrated to this CPU due to cpus_ptr, or
+ 	 * 3) running (obviously), or
+ 	 * 4) are cache-hot on their current CPU.
+ 	 */
+ 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
+ 		return 0;
+ 
+-	if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
++	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
+ 		int cpu;
+ 
+ 		schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
+@@ -6720,7 +6720,7 @@ int can_migrate_task(struct task_struct
+ 
+ 		/* Prevent to re-select dst_cpu via env's cpus */
+ 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
+-			if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
++			if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
+ 				env->flags |= LBF_DST_PINNED;
+ 				env->new_dst_cpu = cpu;
+ 				break;
+@@ -7254,7 +7254,7 @@ check_cpu_capacity(struct rq *rq, struct
+ 
+ /*
+  * Group imbalance indicates (and tries to solve) the problem where balancing
+- * groups is inadequate due to ->cpus_allowed constraints.
++ * groups is inadequate due to ->cpus_ptr constraints.
+  *
+  * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
+  * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
+@@ -7828,7 +7828,7 @@ static struct sched_group *find_busiest_
+ 	/*
+ 	 * If the busiest group is imbalanced the below checks don't
+ 	 * work because they assume all things are equal, which typically
+-	 * isn't true due to cpus_allowed constraints and the like.
++	 * isn't true due to cpus_ptr constraints and the like.
+ 	 */
+ 	if (busiest->group_type == group_imbalanced)
+ 		goto force_balance;
+@@ -8213,7 +8213,7 @@ static int load_balance(int this_cpu, st
+ 			 * if the curr task on busiest cpu can't be
+ 			 * moved to this_cpu
+ 			 */
+-			if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
++			if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
+ 				raw_spin_unlock_irqrestore(&busiest->lock,
+ 							    flags);
+ 				env.flags |= LBF_ALL_PINNED;
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1591,7 +1591,7 @@ static void put_prev_task_rt(struct rq *
+ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
+ {
+ 	if (!task_running(rq, p) &&
+-	    cpumask_test_cpu(cpu, &p->cpus_allowed))
++	    cpumask_test_cpu(cpu, p->cpus_ptr))
+ 		return 1;
+ 	return 0;
+ }
+@@ -1726,7 +1726,7 @@ static struct rq *find_lock_lowest_rq(st
+ 			 * Also make sure that it wasn't scheduled on its rq.
+ 			 */
+ 			if (unlikely(task_rq(task) != rq ||
+-				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
++				     !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
+ 				     task_running(rq, task) ||
+ 				     !rt_task(task) ||
+ 				     !task_on_rq_queued(task))) {
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -279,7 +279,7 @@ static void move_to_next_cpu(void)
+ 	 * of this thread, than stop migrating for the duration
+ 	 * of the current test.
+ 	 */
+-	if (!cpumask_equal(current_mask, &current->cpus_allowed))
++	if (!cpumask_equal(current_mask, current->cpus_ptr))
+ 		goto disable;
+ 
+ 	get_online_cpus();
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -22,7 +22,7 @@ notrace static unsigned int check_preemp
+ 	 * Kernel threads bound to a single CPU can safely use
+ 	 * smp_processor_id():
+ 	 */
+-	if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
++	if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu)))
+ 		goto out;
+ 
+ 	/*
+--- a/samples/trace_events/trace-events-sample.c
++++ b/samples/trace_events/trace-events-sample.c
+@@ -33,7 +33,7 @@ static void simple_thread_func(int cnt)
+ 
+ 	/* Silly tracepoints */
+ 	trace_foo_bar("hello", cnt, array, random_strings[len],
+-		      &current->cpus_allowed);
++		      current->cpus_ptr);
+ 
+ 	trace_foo_with_template_simple("HELLO", cnt);
+ 
diff --git a/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index ee81c86..077bb65 100644
--- a/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 21 Nov 2016 19:31:08 +0100
 Subject: [PATCH] kernel/sched: move stack + kprobe clean up to
  __put_task_struct()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 There is no need to free the stack before the task struct. This also
 comes handy on -RT because we can't free memory in preempt disabled
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -76,6 +76,7 @@
+@@ -87,6 +87,7 @@
  #include <linux/compiler.h>
  #include <linux/sysctl.h>
  #include <linux/kcov.h>
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #include <asm/pgtable.h>
  #include <asm/pgalloc.h>
-@@ -385,6 +386,15 @@ void __put_task_struct(struct task_struc
+@@ -398,6 +399,15 @@ void __put_task_struct(struct task_struc
  	WARN_ON(atomic_read(&tsk->usage));
  	WARN_ON(tsk == current);
  
diff --git a/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch b/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
index d5b6eb7..300c0ff 100644
--- a/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
+++ b/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 9 Feb 2016 18:17:18 +0100
 Subject: kernel: softirq: unlock with irqs on
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 We unlock the lock while the interrupts are off. This isn't a problem
 now but will get because the migrate_disable() + enable are not
diff --git a/debian/patches/features/all/rt/kgb-serial-hackaround.patch b/debian/patches/features/all/rt/kgb-serial-hackaround.patch
index c1db5f1..c41c38a 100644
--- a/debian/patches/features/all/rt/kgb-serial-hackaround.patch
+++ b/debian/patches/features/all/rt/kgb-serial-hackaround.patch
@@ -1,7 +1,7 @@
 From: Jason Wessel <jason.wessel at windriver.com>
 Date: Thu, 28 Jul 2011 12:42:23 -0500
 Subject: kgdb/serial: Short term workaround
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On 07/27/2011 04:37 PM, Thomas Gleixner wrote:
 >  - KGDB (not yet disabled) is reportedly unusable on -rt right now due
@@ -21,8 +21,8 @@ Jason.
 ---
  drivers/tty/serial/8250/8250_port.c |    3 +++
  include/linux/kdb.h                 |    2 ++
- kernel/debug/kdb/kdb_io.c           |    6 ++----
- 3 files changed, 7 insertions(+), 4 deletions(-)
+ kernel/debug/kdb/kdb_io.c           |    2 ++
+ 3 files changed, 7 insertions(+)
 
 --- a/drivers/tty/serial/8250/8250_port.c
 +++ b/drivers/tty/serial/8250/8250_port.c
@@ -34,7 +34,7 @@ Jason.
  #include <linux/uaccess.h>
  #include <linux/pm_runtime.h>
  #include <linux/timer.h>
-@@ -3146,6 +3147,8 @@ void serial8250_console_write(struct uar
+@@ -3181,6 +3182,8 @@ void serial8250_console_write(struct uar
  
  	if (port->sysrq || oops_in_progress)
  		locked = 0;
@@ -63,32 +63,7 @@ Jason.
  			       char *help, short minlen) { return 0; }
 --- a/kernel/debug/kdb/kdb_io.c
 +++ b/kernel/debug/kdb/kdb_io.c
-@@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, con
- 	int linecount;
- 	int colcount;
- 	int logging, saved_loglevel = 0;
--	int saved_trap_printk;
- 	int got_printf_lock = 0;
- 	int retlen = 0;
- 	int fnd, len;
-@@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, con
- 	unsigned long uninitialized_var(flags);
- 
- 	preempt_disable();
--	saved_trap_printk = kdb_trap_printk;
--	kdb_trap_printk = 0;
- 
- 	/* Serialize kdb_printf if multiple cpus try to write at once.
- 	 * But if any cpu goes recursive in kdb, just print the output,
-@@ -855,7 +852,6 @@ int vkdb_printf(enum kdb_msgsrc src, con
- 	} else {
- 		__release(kdb_printf_lock);
- 	}
--	kdb_trap_printk = saved_trap_printk;
- 	preempt_enable();
- 	return retlen;
- }
-@@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...)
+@@ -854,9 +854,11 @@ int kdb_printf(const char *fmt, ...)
  	va_list ap;
  	int r;
  
diff --git a/debian/patches/features/all/rt/latency-hist.patch b/debian/patches/features/all/rt/latency-hist.patch
index 73c75c4..667a9f6 100644
--- a/debian/patches/features/all/rt/latency-hist.patch
+++ b/debian/patches/features/all/rt/latency-hist.patch
@@ -1,7 +1,7 @@
 Subject: tracing: Add latency histograms
 From: Carsten Emde <C.Emde at osadl.org>
 Date: Tue, 19 Jul 2011 14:03:41 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 This patch provides a recording mechanism to store data of potential
 sources of system latencies. The recordings separately determine the
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  Documentation/trace/histograms.txt  |  186 +++++
  include/linux/hrtimer.h             |    4 
- include/linux/sched.h               |    6 
+ include/linux/sched.h               |    7 
  include/trace/events/hist.h         |   73 ++
  include/trace/events/latency_hist.h |   29 
  kernel/time/hrtimer.c               |   21 
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  kernel/trace/Makefile               |    4 
  kernel/trace/latency_hist.c         | 1178 ++++++++++++++++++++++++++++++++++++
  kernel/trace/trace_irqsoff.c        |   11 
- 10 files changed, 1616 insertions(+)
+ 10 files changed, 1616 insertions(+), 1 deletion(-)
 
 --- /dev/null
 +++ b/Documentation/trace/histograms.txt
@@ -217,15 +217,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +These data are also reset when the wakeup histogram is reset.
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
-@@ -87,6 +87,7 @@ enum hrtimer_restart {
+@@ -86,6 +86,7 @@ enum hrtimer_restart {
   * @function:	timer expiry callback function
   * @base:	pointer to the timer base (per cpu and per clock)
   * @state:	state information (See bit values above)
 + * @praecox:	timer expiry time if expired at the time of programming
   * @is_rel:	Set if the timer was armed relative
-  * @start_pid:  timer statistics field to store the pid of the task which
-  *		started the timer
-@@ -103,6 +104,9 @@ struct hrtimer {
+  *
+  * The hrtimer structure must be initialized by hrtimer_init()
+@@ -96,6 +97,9 @@ struct hrtimer {
  	enum hrtimer_restart		(*function)(struct hrtimer *);
  	struct hrtimer_clock_base	*base;
  	u8				state;
@@ -233,14 +233,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	ktime_t				praecox;
 +#endif
  	u8				is_rel;
- #ifdef CONFIG_TIMER_STATS
- 	int				start_pid;
+ };
+ 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1924,6 +1924,12 @@ struct task_struct {
- 	/* bitmask and counter of trace recursion */
- 	unsigned long trace_recursion;
+@@ -1009,7 +1009,12 @@ struct task_struct {
+ 	/* Bitmask and counter of trace recursion: */
+ 	unsigned long			trace_recursion;
  #endif /* CONFIG_TRACING */
+-
 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
 +	u64 preempt_timestamp_hist;
 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
@@ -248,8 +249,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 +#endif
  #ifdef CONFIG_KCOV
- 	/* Coverage collection mode enabled for this task (0 if disabled). */
- 	enum kcov_mode kcov_mode;
+ 	/* Coverage collection mode enabled for this task (0 if disabled): */
+ 	enum kcov_mode			kcov_mode;
 --- /dev/null
 +++ b/include/trace/events/hist.h
 @@ -0,0 +1,73 @@
@@ -360,18 +361,18 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif /* _LATENCY_HIST_H */
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -53,6 +53,7 @@
- #include <asm/uaccess.h>
- 
- #include <trace/events/timer.h>
+@@ -50,6 +50,7 @@
+ #include <linux/sched/nohz.h>
+ #include <linux/sched/debug.h>
+ #include <linux/timer.h>
 +#include <trace/events/hist.h>
+ #include <linux/freezer.h>
  
- #include "tick-internal.h"
+ #include <linux/uaccess.h>
+@@ -960,7 +961,16 @@ void hrtimer_start_range_ns(struct hrtim
  
-@@ -991,7 +992,16 @@ void hrtimer_start_range_ns(struct hrtim
+ 	/* Switch the timer base, if necessary: */
  	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
- 
- 	timer_stats_hrtimer_set_start_info(timer);
 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
 +	{
 +		ktime_t now = new_base->get_time();
@@ -385,7 +386,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	leftmost = enqueue_hrtimer(timer, new_base);
  	if (!leftmost)
  		goto unlock;
-@@ -1265,6 +1275,8 @@ static void __run_hrtimer(struct hrtimer
+@@ -1239,6 +1249,8 @@ static void __run_hrtimer(struct hrtimer
  	cpu_base->running = NULL;
  }
  
@@ -394,7 +395,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
  {
  	struct hrtimer_clock_base *base = cpu_base->clock_base;
-@@ -1284,6 +1296,15 @@ static void __hrtimer_run_queues(struct
+@@ -1258,6 +1270,15 @@ static void __hrtimer_run_queues(struct
  
  			timer = container_of(node, struct hrtimer, node);
  
@@ -412,7 +413,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			 * minimizing wakeups, not running timers at the
 --- a/kernel/trace/Kconfig
 +++ b/kernel/trace/Kconfig
-@@ -182,6 +182,24 @@ config IRQSOFF_TRACER
+@@ -184,6 +184,24 @@ config IRQSOFF_TRACER
  	  enabled. This option and the preempt-off timing option can be
  	  used together or separately.)
  
@@ -437,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  config PREEMPT_TRACER
  	bool "Preemption-off Latency Tracer"
  	default n
-@@ -206,6 +224,24 @@ config PREEMPT_TRACER
+@@ -208,6 +226,24 @@ config PREEMPT_TRACER
  	  enabled. This option and the irqs-off timing option can be
  	  used together or separately.)
  
@@ -462,7 +463,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  config SCHED_TRACER
  	bool "Scheduling Latency Tracer"
  	select GENERIC_TRACER
-@@ -251,6 +287,74 @@ config HWLAT_TRACER
+@@ -253,6 +289,74 @@ config HWLAT_TRACER
  	 file. Every time a latency is greater than tracing_thresh, it will
  	 be recorded into the ring buffer.
  
@@ -660,7 +661,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	int current_prio;
 +	long latency;
 +	long timeroffset;
-+	cycle_t timestamp;
++	u64 timestamp;
 +};
 +#endif
 +
@@ -711,7 +712,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 +
 +void notrace latency_hist(int latency_type, int cpu, long latency,
-+			  long timeroffset, cycle_t stop,
++			  long timeroffset, u64 stop,
 +			  struct task_struct *p)
 +{
 +	struct hist_data *my_hist;
@@ -1302,7 +1303,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	int time_set = 0;
 +
 +	if (starthist) {
-+		cycle_t uninitialized_var(start);
++		u64 uninitialized_var(start);
 +
 +		if (!preempt_count() && !irqs_disabled())
 +			return;
@@ -1338,12 +1339,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		}
 +#endif
 +	} else {
-+		cycle_t uninitialized_var(stop);
++		u64 uninitialized_var(stop);
 +
 +#ifdef CONFIG_INTERRUPT_OFF_HIST
 +		if ((reason == IRQS_ON || reason == TRACE_STOP) &&
 +		    per_cpu(hist_irqsoff_counting, cpu)) {
-+			cycle_t start = per_cpu(hist_irqsoff_start, cpu);
++			u64 start = per_cpu(hist_irqsoff_start, cpu);
 +
 +			stop = ftrace_now(cpu);
 +			time_set++;
@@ -1361,7 +1362,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#ifdef CONFIG_PREEMPT_OFF_HIST
 +		if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
 +		    per_cpu(hist_preemptoff_counting, cpu)) {
-+			cycle_t start = per_cpu(hist_preemptoff_start, cpu);
++			u64 start = per_cpu(hist_preemptoff_start, cpu);
 +
 +			if (!(time_set++))
 +				stop = ftrace_now(cpu);
@@ -1380,7 +1381,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		if ((!per_cpu(hist_irqsoff_counting, cpu) ||
 +		     !per_cpu(hist_preemptoff_counting, cpu)) &&
 +		   per_cpu(hist_preemptirqsoff_counting, cpu)) {
-+			cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
++			u64 start = per_cpu(hist_preemptirqsoff_start, cpu);
 +
 +			if (!time_set)
 +				stop = ftrace_now(cpu);
@@ -1467,7 +1468,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	unsigned long flags;
 +	int cpu = task_cpu(next);
 +	long latency;
-+	cycle_t stop;
++	u64 stop;
 +	struct task_struct *cpu_wakeup_task;
 +
 +	raw_spin_lock_irqsave(&wakeup_lock, flags);
@@ -1537,9 +1538,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	if (latency_ns <= 0 && task != NULL && rt_task(task) &&
 +	    (task->prio < curr->prio ||
 +	    (task->prio == curr->prio &&
-+	    !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
++	    !cpumask_test_cpu(cpu, task->cpus_ptr)))) {
 +		long latency;
-+		cycle_t now;
++		u64 now;
 +
 +		if (missed_timer_offsets_pid) {
 +			if (likely(missed_timer_offsets_pid !=
@@ -1741,7 +1742,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #include "trace.h"
  
-@@ -424,11 +425,13 @@ void start_critical_timings(void)
+@@ -436,11 +437,13 @@ void start_critical_timings(void)
  {
  	if (preempt_trace() || irq_trace())
  		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -1755,7 +1756,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (preempt_trace() || irq_trace())
  		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  }
-@@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+@@ -450,6 +453,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
  #ifdef CONFIG_PROVE_LOCKING
  void time_hardirqs_on(unsigned long a0, unsigned long a1)
  {
@@ -1763,7 +1764,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (!preempt_trace() && irq_trace())
  		stop_critical_timing(a0, a1);
  }
-@@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0,
+@@ -458,6 +462,7 @@ void time_hardirqs_off(unsigned long a0,
  {
  	if (!preempt_trace() && irq_trace())
  		start_critical_timing(a0, a1);
@@ -1771,7 +1772,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  #else /* !CONFIG_PROVE_LOCKING */
-@@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct
+@@ -483,6 +488,7 @@ inline void print_irqtrace_events(struct
   */
  void trace_hardirqs_on(void)
  {
@@ -1779,7 +1780,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (!preempt_trace() && irq_trace())
  		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  }
-@@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
+@@ -492,11 +498,13 @@ void trace_hardirqs_off(void)
  {
  	if (!preempt_trace() && irq_trace())
  		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -1793,7 +1794,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (!preempt_trace() && irq_trace())
  		stop_critical_timing(CALLER_ADDR0, caller_addr);
  }
-@@ -494,6 +502,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
+@@ -506,6 +514,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
  {
  	if (!preempt_trace() && irq_trace())
  		start_critical_timing(CALLER_ADDR0, caller_addr);
@@ -1801,7 +1802,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(trace_hardirqs_off_caller);
  
-@@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
+@@ -515,12 +524,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
  #ifdef CONFIG_PREEMPT_TRACER
  void trace_preempt_on(unsigned long a0, unsigned long a1)
  {
diff --git a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch b/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
index d0c863b..c875ec8 100644
--- a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
+++ b/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
@@ -1,7 +1,7 @@
 Subject: latency_hist: Update sched_wakeup probe
 From: Mathieu Desnoyers <mathieu.desnoyers at efficios.com>
 Date: Sun, 25 Oct 2015 18:06:05 -0400
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 "sched: Introduce the 'trace_sched_waking' tracepoint" introduces a
 prototype change for the sched_wakeup probe: the "success" argument is
diff --git a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch b/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
index 3242593..aa9e446 100644
--- a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
+++ b/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 4 Feb 2016 14:08:06 +0100
 Subject: latencyhist: disable jump-labels
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Atleast on X86 we die a recursive death
 
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/Kconfig
 +++ b/arch/Kconfig
-@@ -52,6 +52,7 @@ config KPROBES
+@@ -55,6 +55,7 @@ config KPROBES
  config JUMP_LABEL
         bool "Optimize very unlikely/likely branches"
         depends on HAVE_ARCH_JUMP_LABEL
diff --git a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
index 6e39e6c..8186779 100644
--- a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
+++ b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 23 Jan 2014 14:45:59 +0100
 Subject: leds: trigger: disable CPU trigger on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 as it triggers:
 |CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141
diff --git a/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch b/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
index a99a0d8..9f504ac 100644
--- a/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
+++ b/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 31 Mar 2016 00:04:25 -0500
 Subject: [PATCH] list_bl: fixup bogus lockdep warning
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 At first glance, the use of 'static inline' seems appropriate for
 INIT_HLIST_BL_HEAD().
diff --git a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
index 4615b26..a34d8ef 100644
--- a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
+++ b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
@@ -1,7 +1,7 @@
 From: Paul Gortmaker <paul.gortmaker at windriver.com>
 Date: Fri, 21 Jun 2013 15:07:25 -0400
 Subject: list_bl: Make list head locking RT safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 As per changes in include/linux/jbd_common.h for avoiding the
 bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal
diff --git a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
index 08c280c..bf3c2fb 100644
--- a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
+++ b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 21 Jul 2009 22:34:14 +0200
 Subject: rt: local_irq_* variants depending on RT/!RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Add local_irq_*_(no)rt variant which are mainly used to break
 interrupt disabled sections on PREEMPT_RT or to explicitely disable
diff --git a/debian/patches/features/all/rt/locallock-add-local_lock_on.patch b/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
index e0b0f0e..9c74d69 100644
--- a/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
+++ b/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 27 May 2016 15:11:51 +0200
 Subject: [PATCH] locallock: add local_lock_on()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
diff --git a/debian/patches/features/all/rt/localversion.patch b/debian/patches/features/all/rt/localversion.patch
index b096ae9..0d8f833 100644
--- a/debian/patches/features/all/rt/localversion.patch
+++ b/debian/patches/features/all/rt/localversion.patch
@@ -1,7 +1,7 @@
 Subject: Add localversion for -RT release
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 08 Jul 2011 20:25:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
@@ -11,4 +11,4 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt20
++-rt1
diff --git a/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch b/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
index 3ee2b08..ef43082 100644
--- a/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
+++ b/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
@@ -2,7 +2,7 @@ From: Dan Murphy <dmurphy at ti.com>
 Date: Fri, 24 Feb 2017 08:41:49 -0600
 Subject: [PATCH] lockdep: Fix compilation error for !CONFIG_MODULES and
  !CONFIG_SMP
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When CONFIG_MODULES is not set then it fails to compile in lockdep:
 
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/module.h
 +++ b/include/linux/module.h
-@@ -664,6 +664,11 @@ static inline bool is_module_percpu_addr
+@@ -661,6 +661,11 @@ static inline bool is_module_percpu_addr
  	return false;
  }
  
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return false;
 --- a/kernel/module.c
 +++ b/kernel/module.c
-@@ -734,6 +734,11 @@ bool is_module_percpu_address(unsigned l
+@@ -739,6 +739,11 @@ bool is_module_percpu_address(unsigned l
  	return false;
  }
  
diff --git a/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch b/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch
index 369f8d9..8060866 100644
--- a/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch
+++ b/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch
@@ -2,7 +2,7 @@ From 8ce371f9846ef1e8b3cc8f6865766cb5c1f17e40 Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Mon, 20 Mar 2017 12:26:55 +0100
 Subject: [PATCH] lockdep: Fix per-cpu static objects
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Since commit 383776fa7527 ("locking/lockdep: Handle statically initialized
 PER_CPU locks properly") we try to collapse per-cpu locks into a single
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -1240,6 +1240,8 @@ core_initcall(cpu_hotplug_pm_sync_init);
+@@ -1125,6 +1125,8 @@ core_initcall(cpu_hotplug_pm_sync_init);
  
  #endif /* CONFIG_PM_SLEEP_SMP */
  
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #endif /* CONFIG_SMP */
  
  /* Boot processor state steps */
-@@ -1924,6 +1926,10 @@ void __init boot_cpu_init(void)
+@@ -1815,6 +1817,10 @@ void __init boot_cpu_init(void)
  	set_cpu_active(cpu, true);
  	set_cpu_present(cpu, true);
  	set_cpu_possible(cpu, true);
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
 --- a/kernel/module.c
 +++ b/kernel/module.c
-@@ -677,8 +677,12 @@ bool __is_module_percpu_address(unsigned
+@@ -682,8 +682,12 @@ bool __is_module_percpu_address(unsigned
  			void *va = (void *)addr;
  
  			if (va >= start && va < start + mod->percpu_size) {
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			}
 --- a/mm/percpu.c
 +++ b/mm/percpu.c
-@@ -1295,8 +1295,11 @@ bool __is_kernel_percpu_address(unsigned
+@@ -1296,8 +1296,11 @@ bool __is_kernel_percpu_address(unsigned
  		void *va = (void *)addr;
  
  		if (va >= start && va < start + static_size) {
diff --git a/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch b/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
index 6991a28..8f54dcd 100644
--- a/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
+++ b/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 17 Feb 2017 19:44:39 +0100
 Subject: [PATCH] lockdep: Handle statically initialized PER_CPU locks proper
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 If a PER_CPU struct which contains a spin_lock is statically initialized
 via:
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/module.h
 +++ b/include/linux/module.h
-@@ -496,6 +496,7 @@ static inline int module_is_live(struct
+@@ -493,6 +493,7 @@ static inline int module_is_live(struct
  struct module *__module_text_address(unsigned long addr);
  struct module *__module_address(unsigned long addr);
  bool is_module_address(unsigned long addr);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
 --- a/kernel/locking/lockdep.c
 +++ b/kernel/locking/lockdep.c
-@@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *l
+@@ -660,6 +660,7 @@ look_up_lock_class(struct lockdep_map *l
  	struct lockdep_subclass_key *key;
  	struct hlist_head *hash_head;
  	struct lock_class *class;
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
  		debug_locks_off();
-@@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *l
+@@ -673,10 +674,23 @@ look_up_lock_class(struct lockdep_map *l
  
  	/*
  	 * Static locks do not have their class-keys yet - for them the key
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	/*
  	 * NOTE: the class-key must be unique. For dynamic locks, a static
-@@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *l
+@@ -708,7 +722,7 @@ look_up_lock_class(struct lockdep_map *l
  		}
  	}
  
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /*
-@@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *
+@@ -726,19 +740,18 @@ register_lock_class(struct lockdep_map *
  	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
  
  	class = look_up_lock_class(lock, subclass);
@@ -133,7 +133,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		return NULL;
  	}
  
-@@ -3410,7 +3423,7 @@ static int match_held_lock(struct held_l
+@@ -3419,7 +3432,7 @@ static int match_held_lock(struct held_l
  		 * Clearly if the lock hasn't been acquired _ever_, we're not
  		 * holding it either, so report failure.
  		 */
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			return 0;
  
  		/*
-@@ -4159,7 +4172,7 @@ void lockdep_reset_lock(struct lockdep_m
+@@ -4172,7 +4185,7 @@ void lockdep_reset_lock(struct lockdep_m
  		 * If the class exists we look it up and zap it:
  		 */
  		class = look_up_lock_class(lock, j);
@@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	/*
 --- a/kernel/module.c
 +++ b/kernel/module.c
-@@ -660,16 +660,7 @@ static void percpu_modcopy(struct module
+@@ -665,16 +665,7 @@ static void percpu_modcopy(struct module
  		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
  }
  
@@ -171,7 +171,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	struct module *mod;
  	unsigned int cpu;
-@@ -683,9 +674,11 @@ bool is_module_percpu_address(unsigned l
+@@ -688,9 +679,11 @@ bool is_module_percpu_address(unsigned l
  			continue;
  		for_each_possible_cpu(cpu) {
  			void *start = per_cpu_ptr(mod->percpu, cpu);
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  				preempt_enable();
  				return true;
  			}
-@@ -696,6 +689,20 @@ bool is_module_percpu_address(unsigned l
+@@ -701,6 +694,20 @@ bool is_module_percpu_address(unsigned l
  	return false;
  }
  
@@ -208,7 +208,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static inline void __percpu *mod_percpu(struct module *mod)
 --- a/mm/percpu.c
 +++ b/mm/percpu.c
-@@ -1283,18 +1283,7 @@ void free_percpu(void __percpu *ptr)
+@@ -1284,18 +1284,7 @@ void free_percpu(void __percpu *ptr)
  }
  EXPORT_SYMBOL_GPL(free_percpu);
  
@@ -228,7 +228,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  #ifdef CONFIG_SMP
  	const size_t static_size = __per_cpu_end - __per_cpu_start;
-@@ -1303,16 +1292,36 @@ bool is_kernel_percpu_address(unsigned l
+@@ -1304,16 +1293,36 @@ bool is_kernel_percpu_address(unsigned l
  
  	for_each_possible_cpu(cpu) {
  		void *start = per_cpu_ptr(base, cpu);
diff --git a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
index 6668702..f6bea8f 100644
--- a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
+++ b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: lockdep: Make it RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 18:51:23 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 teach lockdep that we don't really do softirqs on -RT.
 
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #if defined(CONFIG_IRQSOFF_TRACER) || \
 --- a/kernel/locking/lockdep.c
 +++ b/kernel/locking/lockdep.c
-@@ -3702,6 +3702,7 @@ static void check_flags(unsigned long fl
+@@ -3715,6 +3715,7 @@ static void check_flags(unsigned long fl
  		}
  	}
  
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * We dont accurately track softirq state in e.g.
  	 * hardirq contexts (such as on 4KSTACKS), so only
-@@ -3716,6 +3717,7 @@ static void check_flags(unsigned long fl
+@@ -3729,6 +3730,7 @@ static void check_flags(unsigned long fl
  			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
  		}
  	}
diff --git a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
index 2bdd4d6..e50b5b5 100644
--- a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
+++ b/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <josh.cartwright at ni.com>
 Date: Wed, 28 Jan 2015 13:08:45 -0600
 Subject: lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 "lockdep: Selftest: Only do hardirq context test for raw spinlock"
 disabled the execution of certain tests with PREEMPT_RT_FULL, but did
diff --git a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
index f1cad36..a834fb5 100644
--- a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
+++ b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
@@ -1,7 +1,7 @@
 Subject: lockdep: selftest: Only do hardirq context test for raw spinlock
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Mon, 16 Apr 2012 15:01:56 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 From: Yong Zhang <yong.zhang at windriver.com>
 
diff --git a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
index 994e906..6621eca 100644
--- a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
+++ b/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
@@ -1,7 +1,7 @@
 From: "Wolfgang M. Reimer" <linuxball at gmail.com>
 Date: Tue, 21 Jul 2015 16:20:07 +0200
 Subject: locking: locktorture: Do NOT include rwlock.h directly
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Including rwlock.h directly will cause kernel builds to fail
 if CONFIG_PREEMPT_RT_FULL is defined. The correct header file
diff --git a/debian/patches/features/all/rt/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch b/debian/patches/features/all/rt/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
deleted file mode 100644
index 2865e74..0000000
--- a/debian/patches/features/all/rt/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-Date: Mon, 21 Nov 2016 19:26:15 +0100
-Subject: [PATCH] locking/percpu-rwsem: use swait for the wating writer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Use struct swait_queue_head instead of wait_queue_head_t for the waiting
-writer. The swait implementation is smaller and lightweight compared to
-wait_queue_head_t.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- include/linux/percpu-rwsem.h  |    6 +++---
- kernel/locking/percpu-rwsem.c |    6 +++---
- 2 files changed, 6 insertions(+), 6 deletions(-)
-
---- a/include/linux/percpu-rwsem.h
-+++ b/include/linux/percpu-rwsem.h
-@@ -4,7 +4,7 @@
- #include <linux/atomic.h>
- #include <linux/rwsem.h>
- #include <linux/percpu.h>
--#include <linux/wait.h>
-+#include <linux/swait.h>
- #include <linux/rcu_sync.h>
- #include <linux/lockdep.h>
- 
-@@ -12,7 +12,7 @@ struct percpu_rw_semaphore {
- 	struct rcu_sync		rss;
- 	unsigned int __percpu	*read_count;
- 	struct rw_semaphore	rw_sem;
--	wait_queue_head_t	writer;
-+	struct swait_queue_head	writer;
- 	int			readers_block;
- };
- 
-@@ -22,7 +22,7 @@ static struct percpu_rw_semaphore name =
- 	.rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC),	\
- 	.read_count = &__percpu_rwsem_rc_##name,			\
- 	.rw_sem = __RWSEM_INITIALIZER(name.rw_sem),			\
--	.writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer),		\
-+	.writer = __SWAIT_QUEUE_HEAD_INITIALIZER(name.writer),		\
- }
- 
- extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
---- a/kernel/locking/percpu-rwsem.c
-+++ b/kernel/locking/percpu-rwsem.c
-@@ -18,7 +18,7 @@ int __percpu_init_rwsem(struct percpu_rw
- 	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
- 	rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
- 	__init_rwsem(&sem->rw_sem, name, rwsem_key);
--	init_waitqueue_head(&sem->writer);
-+	init_swait_queue_head(&sem->writer);
- 	sem->readers_block = 0;
- 	return 0;
- }
-@@ -103,7 +103,7 @@ void __percpu_up_read(struct percpu_rw_s
- 	__this_cpu_dec(*sem->read_count);
- 
- 	/* Prod writer to recheck readers_active */
--	wake_up(&sem->writer);
-+	swake_up(&sem->writer);
- }
- EXPORT_SYMBOL_GPL(__percpu_up_read);
- 
-@@ -160,7 +160,7 @@ void percpu_down_write(struct percpu_rw_
- 	 */
- 
- 	/* Wait for all now active readers to complete. */
--	wait_event(sem->writer, readers_active_check(sem));
-+	swait_event(sem->writer, readers_active_check(sem));
- }
- EXPORT_SYMBOL_GPL(percpu_down_write);
- 
diff --git a/debian/patches/features/all/rt/md-disable-bcache.patch b/debian/patches/features/all/rt/md-disable-bcache.patch
index 0beba70..e5655bb 100644
--- a/debian/patches/features/all/rt/md-disable-bcache.patch
+++ b/debian/patches/features/all/rt/md-disable-bcache.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Aug 2013 11:48:57 +0200
 Subject: md: disable bcache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 It uses anon semaphores
 |drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’:
diff --git a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
index 17a9071..7a7ffe1 100644
--- a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
+++ b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 6 Apr 2010 16:51:31 +0200
 Subject: md: raid5: Make raid5_percpu handling RT aware
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 __raid_run_ops() disables preemption with get_cpu() around the access
 to the raid5_percpu variables. That causes scheduling while atomic
@@ -21,7 +21,7 @@ Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
 
 --- a/drivers/md/raid5.c
 +++ b/drivers/md/raid5.c
-@@ -1928,8 +1928,9 @@ static void raid_run_ops(struct stripe_h
+@@ -1986,8 +1986,9 @@ static void raid_run_ops(struct stripe_h
  	struct raid5_percpu *percpu;
  	unsigned long cpu;
  
@@ -32,7 +32,7 @@ Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
  	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
  		ops_run_biofill(sh);
  		overlap_clear++;
-@@ -1985,7 +1986,8 @@ static void raid_run_ops(struct stripe_h
+@@ -2043,7 +2044,8 @@ static void raid_run_ops(struct stripe_h
  			if (test_and_clear_bit(R5_Overlap, &dev->flags))
  				wake_up(&sh->raid_conf->wait_for_overlap);
  		}
@@ -42,15 +42,15 @@ Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
  }
  
  static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
-@@ -6393,6 +6395,7 @@ static int raid456_cpu_up_prepare(unsign
- 		       __func__, cpu);
+@@ -6664,6 +6666,7 @@ static int raid456_cpu_up_prepare(unsign
+ 			__func__, cpu);
  		return -ENOMEM;
  	}
 +	spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
  	return 0;
  }
  
-@@ -6403,7 +6406,6 @@ static int raid5_alloc_percpu(struct r5c
+@@ -6674,7 +6677,6 @@ static int raid5_alloc_percpu(struct r5c
  	conf->percpu = alloc_percpu(struct raid5_percpu);
  	if (!conf->percpu)
  		return -ENOMEM;
@@ -60,7 +60,7 @@ Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
  		conf->scribble_disks = max(conf->raid_disks,
 --- a/drivers/md/raid5.h
 +++ b/drivers/md/raid5.h
-@@ -504,6 +504,7 @@ struct r5conf {
+@@ -643,6 +643,7 @@ struct r5conf {
  	int			recovery_disabled;
  	/* per cpu variables */
  	struct raid5_percpu {
diff --git a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
index dd3d779..b627bc8 100644
--- a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
+++ b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: mips: Disable highmem on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 17:10:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The current highmem handling on -RT is not compatible and needs fixups.
 
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/mips/Kconfig
 +++ b/arch/mips/Kconfig
-@@ -2516,7 +2516,7 @@ config MIPS_ASID_BITS_VARIABLE
+@@ -2520,7 +2520,7 @@ config MIPS_ASID_BITS_VARIABLE
  #
  config HIGHMEM
  	bool "High Memory Support"
diff --git a/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch b/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
index afba622..bf1c079 100644
--- a/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
+++ b/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
@@ -1,7 +1,7 @@
 Subject: mm: rt: Fix generic kmap_atomic for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 19 Sep 2015 10:15:00 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The update to 4.1 brought in the mainline variant of the pagefault
 disable distangling from preempt count. That introduced a
diff --git a/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch b/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
index ac0e7b8..04c34bb 100644
--- a/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
+++ b/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 5 Feb 2016 12:17:14 +0100
 Subject: mm: backing-dev: don't disable IRQs in wb_congested_put()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 it triggers:
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:930
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/mm/backing-dev.c
 +++ b/mm/backing-dev.c
-@@ -457,9 +457,9 @@ void wb_congested_put(struct bdi_writeba
+@@ -459,9 +459,9 @@ void wb_congested_put(struct bdi_writeba
  {
  	unsigned long flags;
  
diff --git a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
index 41f43e6..22bfbab 100644
--- a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
+++ b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
@@ -1,7 +1,7 @@
 Subject: mm: bounce: Use local_irq_save_nort
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 09 Jan 2013 10:33:09 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 kmap_atomic() is preemptible on RT.
 
diff --git a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
index 59cd12b..ec199c3 100644
--- a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
+++ b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:51 -0500
 Subject: mm/swap: Convert to percpu locked
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Replace global locks (get_cpu + local_irq_save) with "local_locks()".
 Currently there is one of for "rotate" and one for "swap".
@@ -12,13 +12,13 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  include/linux/swap.h |    1 +
  mm/compaction.c      |    6 ++++--
- mm/page_alloc.c      |    2 ++
+ mm/page_alloc.c      |    3 ++-
  mm/swap.c            |   38 ++++++++++++++++++++++----------------
- 4 files changed, 29 insertions(+), 18 deletions(-)
+ 4 files changed, 29 insertions(+), 19 deletions(-)
 
 --- a/include/linux/swap.h
 +++ b/include/linux/swap.h
-@@ -294,6 +294,7 @@ extern unsigned long nr_free_pagecache_p
+@@ -269,6 +269,7 @@ extern unsigned long nr_free_pagecache_p
  
  
  /* linux/mm/swap.c */
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  extern void lru_cache_add_file(struct page *page);
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
-@@ -1593,10 +1593,12 @@ static enum compact_result compact_zone(
+@@ -1601,10 +1601,12 @@ static enum compact_result compact_zone(
  				block_start_pfn(cc->migrate_pfn, cc->order);
  
  			if (cc->last_migrated_pfn < current_block_start) {
@@ -45,16 +45,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			}
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -6594,7 +6594,9 @@ static int page_alloc_cpu_notify(struct
- 	int cpu = (unsigned long)hcpu;
+@@ -6787,8 +6787,9 @@ void __init free_area_init(unsigned long
  
- 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-+		local_lock_irq_on(swapvec_lock, cpu);
- 		lru_add_drain_cpu(cpu);
-+		local_unlock_irq_on(swapvec_lock, cpu);
- 		drain_pages(cpu);
+ static int page_alloc_cpu_dead(unsigned int cpu)
+ {
+-
++	local_lock_irq_on(swapvec_lock, cpu);
+ 	lru_add_drain_cpu(cpu);
++	local_unlock_irq_on(swapvec_lock, cpu);
+ 	drain_pages(cpu);
  
- 		/*
+ 	/*
 --- a/mm/swap.c
 +++ b/mm/swap.c
 @@ -32,6 +32,7 @@
@@ -74,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /*
   * This path almost never happens for VM activity - pages are normally
-@@ -240,11 +243,11 @@ void rotate_reclaimable_page(struct page
+@@ -242,11 +245,11 @@ void rotate_reclaimable_page(struct page
  		unsigned long flags;
  
  		get_page(page);
@@ -88,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  
-@@ -294,12 +297,13 @@ void activate_page(struct page *page)
+@@ -296,12 +299,13 @@ void activate_page(struct page *page)
  {
  	page = compound_head(page);
  	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -104,7 +105,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  
-@@ -326,7 +330,7 @@ void activate_page(struct page *page)
+@@ -328,7 +332,7 @@ void activate_page(struct page *page)
  
  static void __lru_cache_activate_page(struct page *page)
  {
@@ -113,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	int i;
  
  	/*
-@@ -348,7 +352,7 @@ static void __lru_cache_activate_page(st
+@@ -350,7 +354,7 @@ static void __lru_cache_activate_page(st
  		}
  	}
  
@@ -122,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -390,12 +394,12 @@ EXPORT_SYMBOL(mark_page_accessed);
+@@ -392,12 +396,12 @@ EXPORT_SYMBOL(mark_page_accessed);
  
  static void __lru_cache_add(struct page *page)
  {
@@ -137,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -593,9 +597,9 @@ void lru_add_drain_cpu(int cpu)
+@@ -595,9 +599,9 @@ void lru_add_drain_cpu(int cpu)
  		unsigned long flags;
  
  		/* No harm done if a racing interrupt already did this */
@@ -149,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  
  	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -627,11 +631,12 @@ void deactivate_file_page(struct page *p
+@@ -629,11 +633,12 @@ void deactivate_file_page(struct page *p
  		return;
  
  	if (likely(get_page_unless_zero(page))) {
@@ -164,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  
-@@ -646,19 +651,20 @@ void deactivate_file_page(struct page *p
+@@ -648,19 +653,20 @@ void deactivate_file_page(struct page *p
  void deactivate_page(struct page *page)
  {
  	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
diff --git a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
index 918d6fa..78841fa 100644
--- a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
+++ b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:44:03 -0500
 Subject: mm: Allow only slub on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs.
 
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -1759,6 +1759,7 @@ choice
+@@ -1825,6 +1825,7 @@ choice
  
  config SLAB
  	bool "SLAB"
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	select HAVE_HARDENED_USERCOPY_ALLOCATOR
  	help
  	  The regular slab allocator that is established and known to work
-@@ -1779,6 +1780,7 @@ config SLUB
+@@ -1845,6 +1846,7 @@ config SLUB
  config SLOB
  	depends on EXPERT
  	bool "SLOB (Simple Allocator)"
diff --git a/debian/patches/features/all/rt/mm-enable-slub.patch b/debian/patches/features/all/rt/mm-enable-slub.patch
index f5e942b..fff07e2 100644
--- a/debian/patches/features/all/rt/mm-enable-slub.patch
+++ b/debian/patches/features/all/rt/mm-enable-slub.patch
@@ -1,7 +1,7 @@
 Subject: mm: Enable SLUB for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 25 Oct 2012 10:32:35 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Make SLUB RT aware by converting locks to raw and using free lists to
 move the freeing out of the lock held region.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/mm/slab.h
 +++ b/mm/slab.h
-@@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(
+@@ -465,7 +465,11 @@ static inline void slab_post_alloc_hook(
   * The slab lists for all objects.
   */
  struct kmem_cache_node {
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	struct list_head slabs_partial;	/* partial list first, better asm code */
 --- a/mm/slub.c
 +++ b/mm/slub.c
-@@ -1141,7 +1141,7 @@ static noinline int free_debug_processin
+@@ -1146,7 +1146,7 @@ static noinline int free_debug_processin
  	unsigned long uninitialized_var(flags);
  	int ret = 0;
  
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	slab_lock(page);
  
  	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1176,7 +1176,7 @@ static noinline int free_debug_processin
+@@ -1181,7 +1181,7 @@ static noinline int free_debug_processin
  			 bulk_cnt, cnt);
  
  	slab_unlock(page);
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (!ret)
  		slab_fix(s, "Object at 0x%p not freed", object);
  	return ret;
-@@ -1304,6 +1304,12 @@ static inline void dec_slabs_node(struct
+@@ -1309,6 +1309,12 @@ static inline void dec_slabs_node(struct
  
  #endif /* CONFIG_SLUB_DEBUG */
  
@@ -59,31 +59,31 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Hooks for other subsystems that check memory allocations. In a typical
   * production configuration these hooks all should produce no code at all.
-@@ -1530,7 +1536,11 @@ static struct page *allocate_slab(struct
+@@ -1535,7 +1541,11 @@ static struct page *allocate_slab(struct
  
  	flags &= gfp_allowed_mask;
  
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+	if (system_state == SYSTEM_RUNNING)
++	if (system_state > SYSTEM_BOOTING)
 +#else
  	if (gfpflags_allow_blocking(flags))
 +#endif
  		local_irq_enable();
  
  	flags |= s->allocflags;
-@@ -1605,7 +1615,11 @@ static struct page *allocate_slab(struct
+@@ -1610,7 +1620,11 @@ static struct page *allocate_slab(struct
  	page->frozen = 1;
  
  out:
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+	if (system_state == SYSTEM_RUNNING)
++	if (system_state > SYSTEM_BOOTING)
 +#else
  	if (gfpflags_allow_blocking(flags))
 +#endif
  		local_irq_disable();
  	if (!page)
  		return NULL;
-@@ -1664,6 +1678,16 @@ static void __free_slab(struct kmem_cach
+@@ -1670,6 +1684,16 @@ static void __free_slab(struct kmem_cach
  	__free_pages(page, order);
  }
  
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define need_reserve_slab_rcu						\
  	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
  
-@@ -1695,6 +1719,12 @@ static void free_slab(struct kmem_cache
+@@ -1701,6 +1725,12 @@ static void free_slab(struct kmem_cache
  		}
  
  		call_rcu(head, rcu_free_slab);
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	} else
  		__free_slab(s, page);
  }
-@@ -1802,7 +1832,7 @@ static void *get_partial_node(struct kme
+@@ -1808,7 +1838,7 @@ static void *get_partial_node(struct kme
  	if (!n || !n->nr_partial)
  		return NULL;
  
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	list_for_each_entry_safe(page, page2, &n->partial, lru) {
  		void *t;
  
-@@ -1827,7 +1857,7 @@ static void *get_partial_node(struct kme
+@@ -1833,7 +1863,7 @@ static void *get_partial_node(struct kme
  			break;
  
  	}
@@ -131,7 +131,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return object;
  }
  
-@@ -2073,7 +2103,7 @@ static void deactivate_slab(struct kmem_
+@@ -2079,7 +2109,7 @@ static void deactivate_slab(struct kmem_
  			 * that acquire_slab() will see a slab page that
  			 * is frozen
  			 */
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  	} else {
  		m = M_FULL;
-@@ -2084,7 +2114,7 @@ static void deactivate_slab(struct kmem_
+@@ -2090,7 +2120,7 @@ static void deactivate_slab(struct kmem_
  			 * slabs from diagnostic functions will not see
  			 * any frozen slabs.
  			 */
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  	}
  
-@@ -2119,7 +2149,7 @@ static void deactivate_slab(struct kmem_
+@@ -2125,7 +2155,7 @@ static void deactivate_slab(struct kmem_
  		goto redo;
  
  	if (lock)
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (m == M_FREE) {
  		stat(s, DEACTIVATE_EMPTY);
-@@ -2151,10 +2181,10 @@ static void unfreeze_partials(struct kme
+@@ -2157,10 +2187,10 @@ static void unfreeze_partials(struct kme
  		n2 = get_node(s, page_to_nid(page));
  		if (n != n2) {
  			if (n)
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  
  		do {
-@@ -2183,7 +2213,7 @@ static void unfreeze_partials(struct kme
+@@ -2189,7 +2219,7 @@ static void unfreeze_partials(struct kme
  	}
  
  	if (n)
@@ -180,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	while (discard_page) {
  		page = discard_page;
-@@ -2222,14 +2252,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2228,14 +2258,21 @@ static void put_cpu_partial(struct kmem_
  			pobjects = oldpage->pobjects;
  			pages = oldpage->pages;
  			if (drain && pobjects > s->cpu_partial) {
@@ -202,7 +202,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  				oldpage = NULL;
  				pobjects = 0;
  				pages = 0;
-@@ -2301,7 +2338,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2307,7 +2344,22 @@ static bool has_cpu_slab(int cpu, void *
  
  static void flush_all(struct kmem_cache *s)
  {
@@ -225,7 +225,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2356,10 +2408,10 @@ static unsigned long count_partial(struc
+@@ -2362,10 +2414,10 @@ static unsigned long count_partial(struc
  	unsigned long x = 0;
  	struct page *page;
  
@@ -238,7 +238,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return x;
  }
  #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2497,8 +2549,10 @@ static inline void *get_freelist(struct
+@@ -2503,8 +2555,10 @@ static inline void *get_freelist(struct
   * already disabled (which is the case for bulk allocation).
   */
  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -250,7 +250,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	void *freelist;
  	struct page *page;
  
-@@ -2558,6 +2612,13 @@ static void *___slab_alloc(struct kmem_c
+@@ -2564,6 +2618,13 @@ static void *___slab_alloc(struct kmem_c
  	VM_BUG_ON(!c->page->frozen);
  	c->freelist = get_freepointer(s, freelist);
  	c->tid = next_tid(c->tid);
@@ -264,7 +264,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return freelist;
  
  new_slab:
-@@ -2589,7 +2650,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2595,7 +2656,7 @@ static void *___slab_alloc(struct kmem_c
  	deactivate_slab(s, page, get_freepointer(s, freelist));
  	c->page = NULL;
  	c->freelist = NULL;
@@ -273,7 +273,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2601,6 +2662,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2607,6 +2668,7 @@ static void *__slab_alloc(struct kmem_ca
  {
  	void *p;
  	unsigned long flags;
@@ -281,7 +281,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	local_irq_save(flags);
  #ifdef CONFIG_PREEMPT
-@@ -2612,8 +2674,9 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2618,8 +2680,9 @@ static void *__slab_alloc(struct kmem_ca
  	c = this_cpu_ptr(s->cpu_slab);
  #endif
  
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return p;
  }
  
-@@ -2799,7 +2862,7 @@ static void __slab_free(struct kmem_cach
+@@ -2805,7 +2868,7 @@ static void __slab_free(struct kmem_cach
  
  	do {
  		if (unlikely(n)) {
@@ -301,7 +301,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			n = NULL;
  		}
  		prior = page->freelist;
-@@ -2831,7 +2894,7 @@ static void __slab_free(struct kmem_cach
+@@ -2837,7 +2900,7 @@ static void __slab_free(struct kmem_cach
  				 * Otherwise the list_lock will synchronize with
  				 * other processors updating the list of slabs.
  				 */
@@ -310,7 +310,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  			}
  		}
-@@ -2873,7 +2936,7 @@ static void __slab_free(struct kmem_cach
+@@ -2879,7 +2942,7 @@ static void __slab_free(struct kmem_cach
  		add_partial(n, page, DEACTIVATE_TO_TAIL);
  		stat(s, FREE_ADD_PARTIAL);
  	}
@@ -319,7 +319,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return;
  
  slab_empty:
-@@ -2888,7 +2951,7 @@ static void __slab_free(struct kmem_cach
+@@ -2894,7 +2957,7 @@ static void __slab_free(struct kmem_cach
  		remove_full(s, n, page);
  	}
  
@@ -328,7 +328,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	stat(s, FREE_SLAB);
  	discard_slab(s, page);
  }
-@@ -3093,6 +3156,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3099,6 +3162,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
  			  void **p)
  {
  	struct kmem_cache_cpu *c;
@@ -336,7 +336,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	int i;
  
  	/* memcg and kmem_cache debug support */
-@@ -3116,7 +3180,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3122,7 +3186,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
  			 * of re-populating per CPU c->freelist
  			 */
  			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -345,7 +345,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			if (unlikely(!p[i]))
  				goto error;
  
-@@ -3128,6 +3192,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3134,6 +3198,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
  	}
  	c->tid = next_tid(c->tid);
  	local_irq_enable();
@@ -353,7 +353,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* Clear memory outside IRQ disabled fastpath loop */
  	if (unlikely(flags & __GFP_ZERO)) {
-@@ -3275,7 +3340,7 @@ static void
+@@ -3281,7 +3346,7 @@ static void
  init_kmem_cache_node(struct kmem_cache_node *n)
  {
  	n->nr_partial = 0;
@@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	INIT_LIST_HEAD(&n->partial);
  #ifdef CONFIG_SLUB_DEBUG
  	atomic_long_set(&n->nr_slabs, 0);
-@@ -3619,6 +3684,10 @@ static void list_slab_objects(struct kme
+@@ -3625,6 +3690,10 @@ static void list_slab_objects(struct kme
  							const char *text)
  {
  #ifdef CONFIG_SLUB_DEBUG
@@ -373,7 +373,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	void *addr = page_address(page);
  	void *p;
  	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3639,6 +3708,7 @@ static void list_slab_objects(struct kme
+@@ -3645,6 +3714,7 @@ static void list_slab_objects(struct kme
  	slab_unlock(page);
  	kfree(map);
  #endif
@@ -381,7 +381,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -3652,7 +3722,7 @@ static void free_partial(struct kmem_cac
+@@ -3658,7 +3728,7 @@ static void free_partial(struct kmem_cac
  	struct page *page, *h;
  
  	BUG_ON(irqs_disabled());
@@ -390,7 +390,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	list_for_each_entry_safe(page, h, &n->partial, lru) {
  		if (!page->inuse) {
  			remove_partial(n, page);
-@@ -3662,7 +3732,7 @@ static void free_partial(struct kmem_cac
+@@ -3668,7 +3738,7 @@ static void free_partial(struct kmem_cac
  			"Objects remaining in %s on __kmem_cache_shutdown()");
  		}
  	}
@@ -399,7 +399,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	list_for_each_entry_safe(page, h, &discard, lru)
  		discard_slab(s, page);
-@@ -3905,7 +3975,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3912,7 +3982,7 @@ int __kmem_cache_shrink(struct kmem_cach
  		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
  			INIT_LIST_HEAD(promote + i);
  
@@ -408,7 +408,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		/*
  		 * Build lists of slabs to discard or promote.
-@@ -3936,7 +4006,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3943,7 +4013,7 @@ int __kmem_cache_shrink(struct kmem_cach
  		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
  			list_splice(promote + i, &n->partial);
  
@@ -417,7 +417,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		/* Release empty slabs */
  		list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4112,6 +4182,12 @@ void __init kmem_cache_init(void)
+@@ -4156,6 +4226,12 @@ void __init kmem_cache_init(void)
  {
  	static __initdata struct kmem_cache boot_kmem_cache,
  		boot_kmem_cache_node;
@@ -430,7 +430,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (debug_guardpage_minorder())
  		slub_max_order = 0;
-@@ -4320,7 +4396,7 @@ static int validate_slab_node(struct kme
+@@ -4364,7 +4440,7 @@ static int validate_slab_node(struct kme
  	struct page *page;
  	unsigned long flags;
  
@@ -439,7 +439,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	list_for_each_entry(page, &n->partial, lru) {
  		validate_slab_slab(s, page, map);
-@@ -4342,7 +4418,7 @@ static int validate_slab_node(struct kme
+@@ -4386,7 +4462,7 @@ static int validate_slab_node(struct kme
  		       s->name, count, atomic_long_read(&n->nr_slabs));
  
  out:
@@ -448,7 +448,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return count;
  }
  
-@@ -4530,12 +4606,12 @@ static int list_locations(struct kmem_ca
+@@ -4574,12 +4650,12 @@ static int list_locations(struct kmem_ca
  		if (!atomic_long_read(&n->nr_slabs))
  			continue;
  
diff --git a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
index 69e4786..7f0afec 100644
--- a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
+++ b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:13 -0500
 Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Disable preemption on -RT for the vmstat code. On vanila the code runs in
 IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the
diff --git a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index a3cc82f..63eeea1 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at windriver.com>
 Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context
 Date: Wed, 30 Oct 2013 11:48:33 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The following trace is triggered when running ltp oom test cases:
 
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -1792,7 +1792,7 @@ static void drain_all_stock(struct mem_c
+@@ -1782,7 +1782,7 @@ static void drain_all_stock(struct mem_c
  		return;
  	/* Notify other cpus that system-wide "drain" is running */
  	get_online_cpus();
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	for_each_online_cpu(cpu) {
  		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  		struct mem_cgroup *memcg;
-@@ -1809,7 +1809,7 @@ static void drain_all_stock(struct mem_c
+@@ -1799,7 +1799,7 @@ static void drain_all_stock(struct mem_c
  				schedule_work_on(cpu, &stock->work);
  		}
  	}
diff --git a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch b/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
index 544c331..0421dd5 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Subject: mm/memcontrol: Replace local_irq_disable with local locks
 Date: Wed, 28 Jan 2015 17:14:16 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 There are a few local_irq_disable() which then take sleeping locks. This
 patch converts them local locks.
@@ -13,15 +13,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -67,6 +67,7 @@
+@@ -69,6 +69,7 @@
  #include <net/sock.h>
  #include <net/ip.h>
  #include "slab.h"
 +#include <linux/locallock.h>
  
- #include <asm/uaccess.h>
+ #include <linux/uaccess.h>
  
-@@ -92,6 +93,8 @@ int do_swap_account __read_mostly;
+@@ -94,6 +95,8 @@ int do_swap_account __read_mostly;
  #define do_swap_account		0
  #endif
  
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /* Whether legacy memory+swap accounting is active */
  static bool do_memsw_account(void)
  {
-@@ -4555,12 +4558,12 @@ static int mem_cgroup_move_account(struc
+@@ -4535,12 +4538,12 @@ static int mem_cgroup_move_account(struc
  
  	ret = 0;
  
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  out_unlock:
  	unlock_page(page);
  out:
-@@ -5435,10 +5438,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5422,10 +5425,10 @@ void mem_cgroup_commit_charge(struct pag
  
  	commit_charge(page, memcg, lrucare);
  
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (do_memsw_account() && PageSwapCache(page)) {
  		swp_entry_t entry = { .val = page_private(page) };
-@@ -5494,14 +5497,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5481,14 +5484,14 @@ static void uncharge_batch(struct mem_cg
  		memcg_oom_recover(memcg);
  	}
  
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	if (!mem_cgroup_is_root(memcg))
  		css_put_many(&memcg->css, nr_pages);
-@@ -5850,6 +5853,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5838,6 +5841,7 @@ void mem_cgroup_swapout(struct page *pag
  {
  	struct mem_cgroup *memcg, *swap_memcg;
  	unsigned short oldid;
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	VM_BUG_ON_PAGE(PageLRU(page), page);
  	VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5890,12 +5894,16 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5878,12 +5882,16 @@ void mem_cgroup_swapout(struct page *pag
  	 * important here to have the interrupts disabled because it is the
  	 * only synchronisation we have for udpating the per-CPU variables.
  	 */
diff --git a/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch b/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
index 986f56a..6bc89ea 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 5 Jun 2016 08:11:13 +0200
 Subject: [PATCH] mm/memcontrol: mem_cgroup_migrate() - replace another
  local_irq_disable() w. local_lock_irq()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 v4.6 grew a local_irq_disable() in mm/memcontrol.c::mem_cgroup_migrate().
 Convert it to use the existing local lock (event_lock) like the others.
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -5659,10 +5659,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -5646,10 +5646,10 @@ void mem_cgroup_migrate(struct page *old
  
  	commit_charge(newpage, memcg, false);
  
diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
index e3e776f..17b13ce 100644
--- a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -1,7 +1,7 @@
 Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 27 Sep 2012 11:11:46 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The plain spinlock while sufficient does not update the local_lock
 internals. Use a proper local_lock function instead to ease debugging.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -286,9 +286,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+@@ -291,9 +291,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
  
  #ifdef CONFIG_PREEMPT_RT_BASE
  # define cpu_lock_irqsave(cpu, flags)		\
diff --git a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
index 3878f43..004aa97 100644
--- a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri Jul 3 08:44:37 2009 -0500
 Subject: mm: page_alloc: Reduce lock sections further
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Split out the pages which are to be freed into a separate list and
 call free_pages_bulk() outside of the percpu page allocator locks.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -1085,7 +1085,7 @@ static bool bulkfree_pcp_prepare(struct
+@@ -1099,7 +1099,7 @@ static bool bulkfree_pcp_prepare(struct
  #endif /* CONFIG_DEBUG_VM */
  
  /*
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * Assumes all pages on list are in same zone, and of same order.
   * count is the number of pages to free.
   *
-@@ -1096,19 +1096,58 @@ static bool bulkfree_pcp_prepare(struct
+@@ -1110,19 +1110,58 @@ static bool bulkfree_pcp_prepare(struct
   * pinned" detection logic.
   */
  static void free_pcppages_bulk(struct zone *zone, int count,
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	while (count) {
  		struct page *page;
  		struct list_head *list;
-@@ -1124,7 +1163,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1138,7 +1177,7 @@ static void free_pcppages_bulk(struct zo
  			batch_free++;
  			if (++migratetype == MIGRATE_PCPTYPES)
  				migratetype = 0;
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		} while (list_empty(list));
  
  		/* This is the only non-empty list. Free them all. */
-@@ -1132,27 +1171,12 @@ static void free_pcppages_bulk(struct zo
+@@ -1146,27 +1185,12 @@ static void free_pcppages_bulk(struct zo
  			batch_free = count;
  
  		do {
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static void free_one_page(struct zone *zone,
-@@ -1161,7 +1185,9 @@ static void free_one_page(struct zone *z
+@@ -1175,7 +1199,9 @@ static void free_one_page(struct zone *z
  				int migratetype)
  {
  	unsigned long nr_scanned;
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
  	if (nr_scanned)
  		__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
-@@ -1171,7 +1197,7 @@ static void free_one_page(struct zone *z
+@@ -1185,7 +1211,7 @@ static void free_one_page(struct zone *z
  		migratetype = get_pfnblock_migratetype(page, pfn);
  	}
  	__free_one_page(page, pfn, zone, order, migratetype);
@@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-@@ -2259,16 +2285,18 @@ static int rmqueue_bulk(struct zone *zon
+@@ -2299,16 +2325,18 @@ static int rmqueue_bulk(struct zone *zon
  void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  {
  	unsigned long flags;
@@ -164,7 +164,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  #endif
  
-@@ -2284,16 +2312,21 @@ static void drain_pages_zone(unsigned in
+@@ -2324,16 +2352,21 @@ static void drain_pages_zone(unsigned in
  	unsigned long flags;
  	struct per_cpu_pageset *pset;
  	struct per_cpu_pages *pcp;
@@ -188,7 +188,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2475,8 +2508,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2556,8 +2589,13 @@ void free_hot_cold_page(struct page *pag
  	pcp->count++;
  	if (pcp->count >= pcp->high) {
  		unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index d30d26c..3fa3dfa 100644
--- a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:37 -0500
 Subject: mm: page_alloc: rt-friendly per-cpu pages
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
 method into a preemptible, explicit-per-cpu-locks method.
@@ -13,20 +13,20 @@ Contains fixes from:
 Signed-off-by: Ingo Molnar <mingo at elte.hu>
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- mm/page_alloc.c |   57 ++++++++++++++++++++++++++++++++++++++++----------------
- 1 file changed, 41 insertions(+), 16 deletions(-)
+ mm/page_alloc.c |   55 +++++++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 39 insertions(+), 16 deletions(-)
 
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -61,6 +61,7 @@
- #include <linux/page_ext.h>
+@@ -62,6 +62,7 @@
  #include <linux/hugetlb.h>
  #include <linux/sched/rt.h>
+ #include <linux/sched/mm.h>
 +#include <linux/locallock.h>
  #include <linux/page_owner.h>
  #include <linux/kthread.h>
  #include <linux/memcontrol.h>
-@@ -281,6 +282,18 @@ EXPORT_SYMBOL(nr_node_ids);
+@@ -286,6 +287,18 @@ EXPORT_SYMBOL(nr_node_ids);
  EXPORT_SYMBOL(nr_online_nodes);
  #endif
  
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  int page_group_by_mobility_disabled __read_mostly;
  
  #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1244,10 +1257,10 @@ static void __free_pages_ok(struct page
+@@ -1258,10 +1271,10 @@ static void __free_pages_ok(struct page
  		return;
  
  	migratetype = get_pfnblock_migratetype(page, pfn);
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2248,14 +2261,14 @@ void drain_zone_pages(struct zone *zone,
+@@ -2288,14 +2301,14 @@ void drain_zone_pages(struct zone *zone,
  	unsigned long flags;
  	int to_drain, batch;
  
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  #endif
  
-@@ -2272,7 +2285,7 @@ static void drain_pages_zone(unsigned in
+@@ -2312,7 +2325,7 @@ static void drain_pages_zone(unsigned in
  	struct per_cpu_pageset *pset;
  	struct per_cpu_pages *pcp;
  
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	pset = per_cpu_ptr(zone->pageset, cpu);
  
  	pcp = &pset->pcp;
-@@ -2280,7 +2293,7 @@ static void drain_pages_zone(unsigned in
+@@ -2320,7 +2333,7 @@ static void drain_pages_zone(unsigned in
  		free_pcppages_bulk(zone, pcp->count, pcp);
  		pcp->count = 0;
  	}
@@ -93,25 +93,47 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2366,8 +2379,17 @@ void drain_all_pages(struct zone *zone)
+@@ -2355,6 +2368,7 @@ void drain_local_pages(struct zone *zone
+ 		drain_pages(cpu);
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_BASE
+ static void drain_local_pages_wq(struct work_struct *work)
+ {
+ 	/*
+@@ -2368,6 +2382,7 @@ static void drain_local_pages_wq(struct
+ 	drain_local_pages(NULL);
+ 	preempt_enable();
+ }
++#endif
+ 
+ /*
+  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
+@@ -2438,7 +2453,14 @@ void drain_all_pages(struct zone *zone)
  		else
  			cpumask_clear_cpu(cpu, &cpus_with_pcps);
  	}
-+#ifndef CONFIG_PREEMPT_RT_BASE
- 	on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
- 								zone, 1);
-+#else
+-
++#ifdef CONFIG_PREEMPT_RT_BASE
 +	for_each_cpu(cpu, &cpus_with_pcps) {
 +		if (zone)
 +			drain_pages_zone(cpu, zone);
 +		else
 +			drain_pages(cpu);
 +	}
++#else
+ 	for_each_cpu(cpu, &cpus_with_pcps) {
+ 		struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
+ 		INIT_WORK(work, drain_local_pages_wq);
+@@ -2446,6 +2468,7 @@ void drain_all_pages(struct zone *zone)
+ 	}
+ 	for_each_cpu(cpu, &cpus_with_pcps)
+ 		flush_work(per_cpu_ptr(&pcpu_drain, cpu));
 +#endif
- }
  
- #ifdef CONFIG_HIBERNATION
-@@ -2427,7 +2449,7 @@ void free_hot_cold_page(struct page *pag
+ 	mutex_unlock(&pcpu_drain_mutex);
+ }
+@@ -2507,7 +2530,7 @@ void free_hot_cold_page(struct page *pag
  
  	migratetype = get_pfnblock_migratetype(page, pfn);
  	set_pcppage_migratetype(page, migratetype);
@@ -120,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	__count_vm_event(PGFREE);
  
  	/*
-@@ -2458,7 +2480,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2538,7 +2561,7 @@ void free_hot_cold_page(struct page *pag
  	}
  
  out:
@@ -129,45 +151,42 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -2600,7 +2622,7 @@ struct page *buffered_rmqueue(struct zon
- 		struct per_cpu_pages *pcp;
- 		struct list_head *list;
- 
--		local_irq_save(flags);
-+		local_lock_irqsave(pa_lock, flags);
- 		do {
- 			pcp = &this_cpu_ptr(zone->pageset)->pcp;
- 			list = &pcp->lists[migratetype];
-@@ -2627,7 +2649,7 @@ struct page *buffered_rmqueue(struct zon
- 		 * allocate greater than order-1 page units with __GFP_NOFAIL.
- 		 */
- 		WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
--		spin_lock_irqsave(&zone->lock, flags);
-+		local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
- 
- 		do {
- 			page = NULL;
-@@ -2639,22 +2661,24 @@ struct page *buffered_rmqueue(struct zon
- 			if (!page)
- 				page = __rmqueue(zone, order, migratetype);
- 		} while (page && check_new_pages(page, order));
--		spin_unlock(&zone->lock);
--		if (!page)
-+		if (!page) {
-+			spin_unlock(&zone->lock);
- 			goto failed;
-+		}
- 		__mod_zone_freepage_state(zone, -(1 << order),
- 					  get_pcppage_migratetype(page));
-+		spin_unlock(&zone->lock);
+@@ -2695,7 +2718,7 @@ static struct page *rmqueue_pcplist(stru
+ 	struct page *page;
+ 	unsigned long flags;
+ 
+-	local_irq_save(flags);
++	local_lock_irqsave(pa_lock, flags);
+ 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ 	list = &pcp->lists[migratetype];
+ 	page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list);
+@@ -2703,7 +2726,7 @@ static struct page *rmqueue_pcplist(stru
+ 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+ 		zone_statistics(preferred_zone, zone);
  	}
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(pa_lock, flags);
+ 	return page;
+ }
+ 
+@@ -2730,7 +2753,7 @@ struct page *rmqueue(struct zone *prefer
+ 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
+ 	 */
+ 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
+-	spin_lock_irqsave(&zone->lock, flags);
++	local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+ 
+ 	do {
+ 		page = NULL;
+@@ -2750,14 +2773,14 @@ struct page *rmqueue(struct zone *prefer
  
  	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
- 	zone_statistics(preferred_zone, zone, gfp_flags);
+ 	zone_statistics(preferred_zone, zone);
 -	local_irq_restore(flags);
 +	local_unlock_irqrestore(pa_lock, flags);
  
- 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
+ out:
+ 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
  	return page;
  
  failed:
@@ -176,15 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return NULL;
  }
  
-@@ -6558,6 +6582,7 @@ static int page_alloc_cpu_notify(struct
- void __init page_alloc_init(void)
- {
- 	hotcpu_notifier(page_alloc_cpu_notify, 0);
-+	local_irq_lock_init(pa_lock);
- }
- 
- /*
-@@ -7386,7 +7411,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7591,7 +7614,7 @@ void zone_pcp_reset(struct zone *zone)
  	struct per_cpu_pageset *pset;
  
  	/* avoid races with drain_pages()  */
@@ -193,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (zone->pageset != &boot_pageset) {
  		for_each_online_cpu(cpu) {
  			pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7395,7 +7420,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7600,7 +7623,7 @@ void zone_pcp_reset(struct zone *zone)
  		free_percpu(zone->pageset);
  		zone->pageset = &boot_pageset;
  	}
diff --git a/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch b/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
index a8da1e2..d315e91 100644
--- a/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
+++ b/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
@@ -1,7 +1,7 @@
 From: Luiz Capitulino <lcapitulino at redhat.com>
 Date: Fri, 27 May 2016 15:03:28 +0200
 Subject: [PATCH] mm: perform lru_add_drain_all() remotely
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
 on all CPUs that have non-empty LRU pagevecs and then waiting for
@@ -20,12 +20,12 @@ Signed-off-by: Rik van Riel <riel at redhat.com>
 Signed-off-by: Luiz Capitulino <lcapitulino at redhat.com>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- mm/swap.c |   42 ++++++++++++++++++++++++++++++++----------
- 1 file changed, 32 insertions(+), 10 deletions(-)
+ mm/swap.c |   37 ++++++++++++++++++++++++++++++-------
+ 1 file changed, 30 insertions(+), 7 deletions(-)
 
 --- a/mm/swap.c
 +++ b/mm/swap.c
-@@ -597,9 +597,15 @@ void lru_add_drain_cpu(int cpu)
+@@ -599,9 +599,15 @@ void lru_add_drain_cpu(int cpu)
  		unsigned long flags;
  
  		/* No harm done if a racing interrupt already did this */
@@ -41,41 +41,33 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  
  	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -667,12 +673,15 @@ void lru_add_drain(void)
+@@ -669,6 +675,16 @@ void lru_add_drain(void)
  	local_unlock_cpu(swapvec_lock);
  }
  
--static void lru_add_drain_per_cpu(struct work_struct *dummy)
 +#ifdef CONFIG_PREEMPT_RT_BASE
 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
- {
--	lru_add_drain();
++{
 +	local_lock_on(swapvec_lock, cpu);
 +	lru_add_drain_cpu(cpu);
 +	local_unlock_on(swapvec_lock, cpu);
- }
- 
--static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
++}
++
 +#else
++
+ static void lru_add_drain_per_cpu(struct work_struct *dummy)
+ {
+ 	lru_add_drain();
+@@ -676,6 +692,16 @@ static void lru_add_drain_per_cpu(struct
  
- /*
-  * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
-@@ -692,6 +701,22 @@ static int __init lru_init(void)
- }
- early_initcall(lru_init);
+ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
  
-+static void lru_add_drain_per_cpu(struct work_struct *dummy)
-+{
-+	lru_add_drain();
-+}
-+
-+static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
 +{
 +	struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 +
 +	INIT_WORK(work, lru_add_drain_per_cpu);
-+	queue_work_on(cpu, lru_add_drain_wq, work);
++	queue_work_on(cpu, mm_percpu_wq, work);
 +	cpumask_set_cpu(cpu, has_work);
 +}
 +#endif
@@ -83,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  void lru_add_drain_all(void)
  {
  	static DEFINE_MUTEX(lock);
-@@ -703,21 +728,18 @@ void lru_add_drain_all(void)
+@@ -694,21 +720,18 @@ void lru_add_drain_all(void)
  	cpumask_clear(&has_work);
  
  	for_each_online_cpu(cpu) {
@@ -95,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
 -		    need_activate_page_drain(cpu)) {
 -			INIT_WORK(work, lru_add_drain_per_cpu);
--			queue_work_on(cpu, lru_add_drain_wq, work);
+-			queue_work_on(cpu, mm_percpu_wq, work);
 -			cpumask_set_cpu(cpu, &has_work);
 -		}
 +		    need_activate_page_drain(cpu))
diff --git a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
index daa6d92..e784a0e 100644
--- a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
+++ b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
@@ -1,7 +1,7 @@
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Tue, 15 May 2012 13:53:56 +0800
 Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 User preempt_*_rt instead of local_irq_*_rt or otherwise there will be
 warning on ARM like below:
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/fs/exec.c
 +++ b/fs/exec.c
-@@ -1017,12 +1017,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -1022,12 +1022,14 @@ static int exec_mmap(struct mm_struct *m
  		}
  	}
  	task_lock(tsk);
@@ -54,15 +54,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		up_read(&old_mm->mmap_sem);
 --- a/mm/mmu_context.c
 +++ b/mm/mmu_context.c
-@@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm)
+@@ -25,6 +25,7 @@ void use_mm(struct mm_struct *mm)
  	struct task_struct *tsk = current;
  
  	task_lock(tsk);
 +	preempt_disable_rt();
  	active_mm = tsk->active_mm;
  	if (active_mm != mm) {
- 		atomic_inc(&mm->mm_count);
-@@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm)
+ 		mmgrab(mm);
+@@ -32,6 +33,7 @@ void use_mm(struct mm_struct *mm)
  	}
  	tsk->mm = mm;
  	switch_mm(active_mm, mm, tsk);
diff --git a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
index 09bdb60..36cb66f 100644
--- a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
+++ b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
@@ -1,7 +1,7 @@
 Subject: mm, rt: kmap_atomic scheduling
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 28 Jul 2011 10:43:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 In fact, with migrate_disable() existing one could play games with
 kmap_atomic. You could save/restore the kmap_atomic slots on context
@@ -31,7 +31,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
 
 --- a/arch/x86/kernel/process_32.c
 +++ b/arch/x86/kernel/process_32.c
-@@ -35,6 +35,7 @@
+@@ -37,6 +37,7 @@
  #include <linux/uaccess.h>
  #include <linux/io.h>
  #include <linux/kdebug.h>
@@ -39,7 +39,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
  
  #include <asm/pgtable.h>
  #include <asm/ldt.h>
-@@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsig
+@@ -196,6 +197,35 @@ start_thread(struct pt_regs *regs, unsig
  }
  EXPORT_SYMBOL_GPL(start_thread);
  
@@ -222,26 +222,26 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
  
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -26,6 +26,7 @@ struct sched_param {
- #include <linux/nodemask.h>
- #include <linux/mm_types.h>
- #include <linux/preempt.h>
+@@ -26,6 +26,7 @@
+ #include <linux/signal_types.h>
+ #include <linux/mm_types_task.h>
+ #include <linux/task_io_accounting.h>
 +#include <asm/kmap_types.h>
  
- #include <asm/page.h>
- #include <asm/ptrace.h>
-@@ -1986,6 +1987,12 @@ struct task_struct {
- 	int softirq_nestcnt;
- 	unsigned int softirqs_raised;
+ /* task_struct member predeclarations (sorted alphabetically): */
+ struct audit_context;
+@@ -1062,6 +1063,12 @@ struct task_struct {
+ 	int				softirq_nestcnt;
+ 	unsigned int			softirqs_raised;
  #endif
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
-+	int kmap_idx;
-+	pte_t kmap_pte[KM_TYPE_NR];
++	int				kmap_idx;
++	pte_t				kmap_pte[KM_TYPE_NR];
 +# endif
 +#endif
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- 	unsigned long	task_state_change;
+ 	unsigned long			task_state_change;
  #endif
 --- a/include/linux/uaccess.h
 +++ b/include/linux/uaccess.h
diff --git a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
index 40ee4b6..bfcc77f 100644
--- a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
+++ b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -1,17 +1,15 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:44:34 -0500
 Subject: mm/scatterlist: Do not disable irqs on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
-The local_irq_save() is not only used to get things done "fast" but
-also to ensure that in case of SG_MITER_ATOMIC we are in "atomic"
-context for kmap_atomic(). For -RT it is enough to keep pagefault
-disabled (which is currently handled by kmap_atomic()).
+For -RT it is enough to keep pagefault disabled (which is currently handled by
+kmap_atomic()).
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- lib/scatterlist.c |    6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
+ lib/scatterlist.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
 
 --- a/lib/scatterlist.c
 +++ b/lib/scatterlist.c
@@ -24,21 +22,3 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			kunmap_atomic(miter->addr);
  		} else
  			kunmap(miter->page);
-@@ -664,7 +664,7 @@ size_t sg_copy_buffer(struct scatterlist
- 	if (!sg_miter_skip(&miter, skip))
- 		return false;
- 
--	local_irq_save(flags);
-+	local_irq_save_nort(flags);
- 
- 	while (sg_miter_next(&miter) && offset < buflen) {
- 		unsigned int len;
-@@ -681,7 +681,7 @@ size_t sg_copy_buffer(struct scatterlist
- 
- 	sg_miter_stop(&miter);
- 
--	local_irq_restore(flags);
-+	local_irq_restore_nort(flags);
- 	return offset;
- }
- EXPORT_SYMBOL(sg_copy_buffer);
diff --git a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
index a41bd7b..e97e344 100644
--- a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
+++ b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
 Subject: mm/vmalloc: Another preempt disable region which sucks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 12 Jul 2011 11:39:36 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Avoid the preempt disable version of get_cpu_var(). The inner-lock should
 provide enough serialisation.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/mm/vmalloc.c
 +++ b/mm/vmalloc.c
-@@ -845,7 +845,7 @@ static void *new_vmap_block(unsigned int
+@@ -855,7 +855,7 @@ static void *new_vmap_block(unsigned int
  	struct vmap_block *vb;
  	struct vmap_area *va;
  	unsigned long vb_idx;
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	void *vaddr;
  
  	node = numa_node_id();
-@@ -888,11 +888,12 @@ static void *new_vmap_block(unsigned int
+@@ -898,11 +898,12 @@ static void *new_vmap_block(unsigned int
  	BUG_ON(err);
  	radix_tree_preload_end();
  
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return vaddr;
  }
-@@ -961,6 +962,7 @@ static void *vb_alloc(unsigned long size
+@@ -971,6 +972,7 @@ static void *vb_alloc(unsigned long size
  	struct vmap_block *vb;
  	void *vaddr = NULL;
  	unsigned int order;
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	BUG_ON(offset_in_page(size));
  	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -975,7 +977,8 @@ static void *vb_alloc(unsigned long size
+@@ -985,7 +987,8 @@ static void *vb_alloc(unsigned long size
  	order = get_order(size);
  
  	rcu_read_lock();
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  		unsigned long pages_off;
  
-@@ -998,7 +1001,7 @@ static void *vb_alloc(unsigned long size
+@@ -1008,7 +1011,7 @@ static void *vb_alloc(unsigned long size
  		break;
  	}
  
diff --git a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index b2a9de1..a9d6e7c 100644
--- a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Jan 2015 17:19:44 +0100
 Subject: mm/workingset: Do not protect workingset_shadow_nodes with irq off
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 workingset_shadow_nodes is protected by local_irq_disable(). Some users
 use spin_lock_irq().
@@ -11,10 +11,10 @@ so I catch users of it which will be introduced later.
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
  include/linux/swap.h |    4 +++-
- mm/filemap.c         |   13 +++++++++----
- mm/truncate.c        |    7 +++++--
- mm/workingset.c      |   23 ++++++++++++-----------
- 4 files changed, 29 insertions(+), 18 deletions(-)
+ mm/filemap.c         |    9 +++++++--
+ mm/truncate.c        |    4 +++-
+ mm/workingset.c      |   31 ++++++++++++++++---------------
+ 4 files changed, 29 insertions(+), 19 deletions(-)
 
 --- a/include/linux/swap.h
 +++ b/include/linux/swap.h
@@ -26,129 +26,159 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #include <asm/page.h>
  
  struct notifier_block;
-@@ -247,7 +248,8 @@ struct swap_info_struct {
+@@ -254,7 +255,8 @@ struct swap_info_struct {
  void *workingset_eviction(struct address_space *mapping, struct page *page);
  bool workingset_refault(void *shadow);
  void workingset_activation(struct page *page);
--extern struct list_lru workingset_shadow_nodes;
-+extern struct list_lru __workingset_shadow_nodes;
-+DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
+-void workingset_update_node(struct radix_tree_node *node, void *private);
++void __workingset_update_node(struct radix_tree_node *node, void *private);
++DECLARE_LOCAL_IRQ_LOCK(shadow_nodes_lock);
  
- static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
- {
+ /* linux/mm/page_alloc.c */
+ extern unsigned long totalram_pages;
 --- a/mm/filemap.c
 +++ b/mm/filemap.c
-@@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct
- 		 * node->private_list is protected by
- 		 * mapping->tree_lock.
- 		 */
--		if (!list_empty(&node->private_list))
--			list_lru_del(&workingset_shadow_nodes,
-+		if (!list_empty(&node->private_list)) {
-+			local_lock(workingset_shadow_lock);
-+			list_lru_del(&__workingset_shadow_nodes,
- 				     &node->private_list);
-+			local_unlock(workingset_shadow_lock);
-+		}
+@@ -110,6 +110,7 @@
+  * ->i_mmap_rwsem
+  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
+  */
++DECLARE_LOCAL_IRQ_LOCK(shadow_nodes_lock);
+ 
+ static int page_cache_tree_insert(struct address_space *mapping,
+ 				  struct page *page, void **shadowp)
+@@ -142,8 +143,10 @@ static int page_cache_tree_insert(struct
+ 						      true);
+ 		}
  	}
++	local_lock(shadow_nodes_lock);
+ 	__radix_tree_replace(&mapping->page_tree, node, slot, page,
+-			     workingset_update_node, mapping);
++			     __workingset_update_node, mapping);
++	local_unlock(shadow_nodes_lock);
+ 	mapping->nrpages++;
  	return 0;
  }
-@@ -217,8 +220,10 @@ static void page_cache_tree_delete(struc
- 		if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
- 				list_empty(&node->private_list)) {
- 			node->private_data = mapping;
--			list_lru_add(&workingset_shadow_nodes,
--					&node->private_list);
-+			local_lock(workingset_shadow_lock);
-+			list_lru_add(&__workingset_shadow_nodes,
-+				     &node->private_list);
-+			local_unlock(workingset_shadow_lock);
- 		}
+@@ -160,6 +163,7 @@ static void page_cache_tree_delete(struc
+ 	VM_BUG_ON_PAGE(PageTail(page), page);
+ 	VM_BUG_ON_PAGE(nr != 1 && shadow, page);
+ 
++	local_lock(shadow_nodes_lock);
+ 	for (i = 0; i < nr; i++) {
+ 		struct radix_tree_node *node;
+ 		void **slot;
+@@ -171,8 +175,9 @@ static void page_cache_tree_delete(struc
+ 
+ 		radix_tree_clear_tags(&mapping->page_tree, node, slot);
+ 		__radix_tree_replace(&mapping->page_tree, node, slot, shadow,
+-				     workingset_update_node, mapping);
++				     __workingset_update_node, mapping);
  	}
++	local_unlock(shadow_nodes_lock);
  
+ 	if (shadow) {
+ 		mapping->nrexceptional += nr;
 --- a/mm/truncate.c
 +++ b/mm/truncate.c
-@@ -62,9 +62,12 @@ static void clear_exceptional_entry(stru
- 	 * protected by mapping->tree_lock.
- 	 */
- 	if (!workingset_node_shadows(node) &&
--	    !list_empty(&node->private_list))
--		list_lru_del(&workingset_shadow_nodes,
-+	    !list_empty(&node->private_list)) {
-+		local_lock(workingset_shadow_lock);
-+		list_lru_del(&__workingset_shadow_nodes,
- 				&node->private_list);
-+		local_unlock(workingset_shadow_lock);
-+	}
- 	__radix_tree_delete_node(&mapping->page_tree, node);
+@@ -41,8 +41,10 @@ static void clear_shadow_entry(struct ad
+ 		goto unlock;
+ 	if (*slot != entry)
+ 		goto unlock;
++	local_lock(shadow_nodes_lock);
+ 	__radix_tree_replace(&mapping->page_tree, node, slot, NULL,
+-			     workingset_update_node, mapping);
++			     __workingset_update_node, mapping);
++	local_unlock(shadow_nodes_lock);
+ 	mapping->nrexceptional--;
  unlock:
  	spin_unlock_irq(&mapping->tree_lock);
 --- a/mm/workingset.c
 +++ b/mm/workingset.c
-@@ -334,7 +334,8 @@ void workingset_activation(struct page *
+@@ -339,9 +339,10 @@ void workingset_activation(struct page *
   * point where they would still be useful.
   */
  
--struct list_lru workingset_shadow_nodes;
-+struct list_lru __workingset_shadow_nodes;
-+DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
+-static struct list_lru shadow_nodes;
++static struct list_lru __shadow_nodes;
++DEFINE_LOCAL_IRQ_LOCK(shadow_nodes_lock);
  
- static unsigned long count_shadow_nodes(struct shrinker *shrinker,
- 					struct shrink_control *sc)
-@@ -344,9 +345,9 @@ static unsigned long count_shadow_nodes(
- 	unsigned long pages;
+-void workingset_update_node(struct radix_tree_node *node, void *private)
++void __workingset_update_node(struct radix_tree_node *node, void *private)
+ {
+ 	struct address_space *mapping = private;
+ 
+@@ -359,10 +360,10 @@ void workingset_update_node(struct radix
+ 	 */
+ 	if (node->count && node->count == node->exceptional) {
+ 		if (list_empty(&node->private_list))
+-			list_lru_add(&shadow_nodes, &node->private_list);
++			list_lru_add(&__shadow_nodes, &node->private_list);
+ 	} else {
+ 		if (!list_empty(&node->private_list))
+-			list_lru_del(&shadow_nodes, &node->private_list);
++			list_lru_del(&__shadow_nodes, &node->private_list);
+ 	}
+ }
+ 
+@@ -374,9 +375,9 @@ static unsigned long count_shadow_nodes(
+ 	unsigned long cache;
  
  	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
 -	local_irq_disable();
--	shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
+-	nodes = list_lru_shrink_count(&shadow_nodes, sc);
 -	local_irq_enable();
-+	local_lock_irq(workingset_shadow_lock);
-+	shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
-+	local_unlock_irq(workingset_shadow_lock);
++	local_lock_irq(shadow_nodes_lock);
++	nodes = list_lru_shrink_count(&__shadow_nodes, sc);
++	local_unlock_irq(shadow_nodes_lock);
+ 
+ 	/*
+ 	 * Approximate a reasonable limit for the radix tree nodes
+@@ -478,15 +479,15 @@ static enum lru_status shadow_lru_isolat
+ 	mem_cgroup_inc_page_stat(virt_to_page(node),
+ 				 MEMCG_WORKINGSET_NODERECLAIM);
+ 	__radix_tree_delete_node(&mapping->page_tree, node,
+-				 workingset_update_node, mapping);
++				 __workingset_update_node, mapping);
  
- 	if (sc->memcg) {
- 		pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
-@@ -438,9 +439,9 @@ static enum lru_status shadow_lru_isolat
+ out_invalid:
  	spin_unlock(&mapping->tree_lock);
  	ret = LRU_REMOVED_RETRY;
  out:
 -	local_irq_enable();
-+	local_unlock_irq(workingset_shadow_lock);
++	local_unlock_irq(shadow_nodes_lock);
  	cond_resched();
 -	local_irq_disable();
-+	local_lock_irq(workingset_shadow_lock);
++	local_lock_irq(shadow_nodes_lock);
  	spin_lock(lru_lock);
  	return ret;
  }
-@@ -451,10 +452,10 @@ static unsigned long scan_shadow_nodes(s
+@@ -497,9 +498,9 @@ static unsigned long scan_shadow_nodes(s
  	unsigned long ret;
  
  	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
 -	local_irq_disable();
--	ret =  list_lru_shrink_walk(&workingset_shadow_nodes, sc,
-+	local_lock_irq(workingset_shadow_lock);
-+	ret =  list_lru_shrink_walk(&__workingset_shadow_nodes, sc,
- 				    shadow_lru_isolate, NULL);
+-	ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
 -	local_irq_enable();
-+	local_unlock_irq(workingset_shadow_lock);
++	local_lock_irq(shadow_nodes_lock);
++	ret = list_lru_shrink_walk(&__shadow_nodes, sc, shadow_lru_isolate, NULL);
++	local_unlock_irq(shadow_nodes_lock);
  	return ret;
  }
  
-@@ -492,7 +493,7 @@ static int __init workingset_init(void)
+@@ -537,7 +538,7 @@ static int __init workingset_init(void)
  	pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
  	       timestamp_bits, max_order, bucket_order);
  
--	ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key);
-+	ret = __list_lru_init(&__workingset_shadow_nodes, true, &shadow_nodes_key);
+-	ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
++	ret = __list_lru_init(&__shadow_nodes, true, &shadow_nodes_key);
  	if (ret)
  		goto err;
  	ret = register_shrinker(&workingset_shadow_shrinker);
-@@ -500,7 +501,7 @@ static int __init workingset_init(void)
+@@ -545,7 +546,7 @@ static int __init workingset_init(void)
  		goto err_list_lru;
  	return 0;
  err_list_lru:
--	list_lru_destroy(&workingset_shadow_nodes);
-+	list_lru_destroy(&__workingset_shadow_nodes);
+-	list_lru_destroy(&shadow_nodes);
++	list_lru_destroy(&__shadow_nodes);
  err:
  	return ret;
  }
diff --git a/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
index 7857361..1b52a30 100644
--- a/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+++ b/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Tue, 22 Mar 2016 11:16:09 +0100
 Subject: [PATCH] mm/zsmalloc: copy with get_cpu_var() and locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 get_cpu_var() disables preemption and triggers a might_sleep() splat later.
 This is replaced with get_locked_var().
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Object location (<PFN>, <obj_idx>) is encoded as
   * as single (unsigned long) handle value.
-@@ -327,7 +341,7 @@ static void SetZsPageMovable(struct zs_p
+@@ -323,7 +337,7 @@ static void SetZsPageMovable(struct zs_p
  
  static int create_cache(struct zs_pool *pool)
  {
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  					0, 0, NULL);
  	if (!pool->handle_cachep)
  		return 1;
-@@ -351,10 +365,27 @@ static void destroy_cache(struct zs_pool
+@@ -347,10 +361,27 @@ static void destroy_cache(struct zs_pool
  
  static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
  {
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
  {
  	kmem_cache_free(pool->handle_cachep, (void *)handle);
-@@ -373,12 +404,18 @@ static void cache_free_zspage(struct zs_
+@@ -369,12 +400,18 @@ static void cache_free_zspage(struct zs_
  
  static void record_obj(unsigned long handle, unsigned long obj)
  {
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /* zpool driver */
-@@ -467,6 +504,7 @@ MODULE_ALIAS("zpool-zsmalloc");
+@@ -463,6 +500,7 @@ MODULE_ALIAS("zpool-zsmalloc");
  
  /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
  static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  static bool is_zspage_isolated(struct zspage *zspage)
  {
-@@ -902,7 +940,13 @@ static unsigned long location_to_obj(str
+@@ -898,7 +936,13 @@ static unsigned long location_to_obj(str
  
  static unsigned long handle_to_obj(unsigned long handle)
  {
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static unsigned long obj_to_head(struct page *page, void *obj)
-@@ -916,22 +960,46 @@ static unsigned long obj_to_head(struct
+@@ -912,22 +956,46 @@ static unsigned long obj_to_head(struct
  
  static inline int testpin_tag(unsigned long handle)
  {
@@ -177,7 +177,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void reset_page(struct page *page)
-@@ -1423,7 +1491,7 @@ void *zs_map_object(struct zs_pool *pool
+@@ -1376,7 +1444,7 @@ void *zs_map_object(struct zs_pool *pool
  	class = pool->size_class[class_idx];
  	off = (class->size * obj_idx) & ~PAGE_MASK;
  
@@ -186,7 +186,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	area->vm_mm = mm;
  	if (off + class->size <= PAGE_SIZE) {
  		/* this object is contained entirely within a page */
-@@ -1477,7 +1545,7 @@ void zs_unmap_object(struct zs_pool *poo
+@@ -1430,7 +1498,7 @@ void zs_unmap_object(struct zs_pool *poo
  
  		__zs_unmap_object(area, pages, off, class->size);
  	}
diff --git a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
index d71239d..aa64b10 100644
--- a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
+++ b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
@@ -1,7 +1,7 @@
 Subject: mmci: Remove bogus local_irq_save()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 09 Jan 2013 12:11:12 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On !RT interrupt runs with interrupts disabled. On RT it's in a
 thread, so no need to disable interrupts at all.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/mmc/host/mmci.c
 +++ b/drivers/mmc/host/mmci.c
-@@ -1147,15 +1147,12 @@ static irqreturn_t mmci_pio_irq(int irq,
+@@ -1204,15 +1204,12 @@ static irqreturn_t mmci_pio_irq(int irq,
  	struct sg_mapping_iter *sg_miter = &host->sg_miter;
  	struct variant_data *variant = host->variant;
  	void __iomem *base = host->base;
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	do {
  		unsigned int remain, len;
  		char *buffer;
-@@ -1195,8 +1192,6 @@ static irqreturn_t mmci_pio_irq(int irq,
+@@ -1252,8 +1249,6 @@ static irqreturn_t mmci_pio_irq(int irq,
  
  	sg_miter_stop(sg_miter);
  
diff --git a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
index 632b8ef..e82f8d8 100644
--- a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
+++ b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
@@ -1,7 +1,7 @@
 Date: Wed, 26 Jun 2013 15:28:11 -0400
 From: Steven Rostedt <rostedt at goodmis.org>
 Subject: rt,ntp: Move call to schedule_delayed_work() to helper thread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The ntp code for notify_cmos_timer() is called from a hard interrupt
 context. schedule_delayed_work() under PREEMPT_RT_FULL calls spinlocks
diff --git a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
index e9db74b..ec5472d 100644
--- a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
+++ b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:51:45 +0200
 Subject: locking: Disable spin on owner for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Drop spin on owner for mutex / rwsem. We are most likely not using it
 but…
@@ -17,8 +17,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  config MUTEX_SPIN_ON_OWNER
  	def_bool y
--	depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
-+	depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+-	depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
++	depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
  
  config RWSEM_SPIN_ON_OWNER
         def_bool y
diff --git a/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index ee63189..1832649 100644
--- a/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -2,7 +2,7 @@ From: Steven Rostedt <rostedt at goodmis.org>
 Date: Tue, 6 Dec 2016 17:50:30 -0500
 Subject: [PATCH] net: Have __napi_schedule_irqoff() disable interrupts on
  RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 A customer hit a crash where the napi sd->poll_list became corrupted.
 The customer had the bnx2x driver, which does a
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
-@@ -396,7 +396,19 @@ typedef enum rx_handler_result rx_handle
+@@ -409,7 +409,19 @@ typedef enum rx_handler_result rx_handle
  typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  
  void __napi_schedule(struct napi_struct *n);
@@ -51,19 +51,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -4938,6 +4938,7 @@ void __napi_schedule(struct napi_struct
+@@ -4961,6 +4961,7 @@ bool napi_schedule_prep(struct napi_stru
  }
- EXPORT_SYMBOL(__napi_schedule);
+ EXPORT_SYMBOL(napi_schedule_prep);
  
 +#ifndef CONFIG_PREEMPT_RT_FULL
  /**
   * __napi_schedule_irqoff - schedule for receive
   * @n: entry to schedule
-@@ -4949,6 +4950,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -4972,6 +4973,7 @@ void __napi_schedule_irqoff(struct napi_
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  }
  EXPORT_SYMBOL(__napi_schedule_irqoff);
 +#endif
  
- void __napi_complete(struct napi_struct *n)
+ bool napi_complete_done(struct napi_struct *n, int work_done)
  {
diff --git a/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index 7d54f13..ba17295 100644
--- a/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 14 Sep 2016 17:36:35 +0200
 Subject: [PATCH] net/Qdisc: use a seqlock instead seqcount
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The seqcount disables preemption on -RT while it is held which can't
 remove. Also we don't want the reader to spin for ages if the writer is
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  struct gnet_stats_basic_cpu {
  	struct gnet_stats_basic_packed bstats;
-@@ -33,11 +34,11 @@ int gnet_stats_start_copy_compat(struct
+@@ -35,11 +36,11 @@ int gnet_stats_start_copy_compat(struct
  				 spinlock_t *lock, struct gnet_dump *d,
  				 int padattr);
  
@@ -63,23 +63,22 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			     struct gnet_stats_basic_packed *bstats,
  			     struct gnet_stats_basic_cpu __percpu *cpu,
  			     struct gnet_stats_basic_packed *b);
-@@ -55,14 +56,14 @@ int gen_new_estimator(struct gnet_stats_
+@@ -56,13 +57,13 @@ int gen_new_estimator(struct gnet_stats_
  		      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- 		      struct gnet_stats_rate_est64 *rate_est,
+ 		      struct net_rate_estimator __rcu **rate_est,
  		      spinlock_t *stats_lock,
 -		      seqcount_t *running, struct nlattr *opt);
 +		      net_seqlock_t *running, struct nlattr *opt);
- void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
- 			struct gnet_stats_rate_est64 *rate_est);
+ void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
  int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
  			  struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- 			  struct gnet_stats_rate_est64 *rate_est,
+ 			  struct net_rate_estimator __rcu **ptr,
  			  spinlock_t *stats_lock,
 -			  seqcount_t *running, struct nlattr *opt);
 +			  net_seqlock_t *running, struct nlattr *opt);
- bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
- 			  const struct gnet_stats_rate_est64 *rate_est);
- #endif
+ bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
+ bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
+ 			struct gnet_stats_rate_est64 *sample);
 --- /dev/null
 +++ b/include/net/net_seq_lock.h
 @@ -0,0 +1,15 @@
@@ -169,33 +168,33 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
 --- a/net/core/gen_estimator.c
 +++ b/net/core/gen_estimator.c
-@@ -84,7 +84,7 @@ struct gen_estimator
+@@ -46,7 +46,7 @@
+ struct net_rate_estimator {
  	struct gnet_stats_basic_packed	*bstats;
- 	struct gnet_stats_rate_est64	*rate_est;
  	spinlock_t		*stats_lock;
 -	seqcount_t		*running;
 +	net_seqlock_t		*running;
- 	int			ewma_log;
- 	u32			last_packets;
- 	unsigned long		avpps;
-@@ -213,7 +213,7 @@ int gen_new_estimator(struct gnet_stats_
+ 	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ 	u8			ewma_log;
+ 	u8			intvl_log; /* period : (250ms << intvl_log) */
+@@ -128,7 +128,7 @@ int gen_new_estimator(struct gnet_stats_
  		      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- 		      struct gnet_stats_rate_est64 *rate_est,
+ 		      struct net_rate_estimator __rcu **rate_est,
  		      spinlock_t *stats_lock,
 -		      seqcount_t *running,
 +		      net_seqlock_t *running,
  		      struct nlattr *opt)
  {
- 	struct gen_estimator *est;
-@@ -309,7 +309,7 @@ int gen_replace_estimator(struct gnet_st
+ 	struct gnet_estimator *parm = nla_data(opt);
+@@ -217,7 +217,7 @@ int gen_replace_estimator(struct gnet_st
  			  struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- 			  struct gnet_stats_rate_est64 *rate_est,
+ 			  struct net_rate_estimator __rcu **rate_est,
  			  spinlock_t *stats_lock,
 -			  seqcount_t *running, struct nlattr *opt)
 +			  net_seqlock_t *running, struct nlattr *opt)
  {
- 	gen_kill_estimator(bstats, rate_est);
- 	return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
+ 	return gen_new_estimator(bstats, cpu_bstats, rate_est,
+ 				 stats_lock, running, opt);
 --- a/net/core/gen_stats.c
 +++ b/net/core/gen_stats.c
 @@ -130,7 +130,7 @@ static void
@@ -231,7 +230,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		      struct gnet_stats_basic_packed *b)
 --- a/net/sched/sch_api.c
 +++ b/net/sched/sch_api.c
-@@ -981,7 +981,7 @@ static struct Qdisc *qdisc_create(struct
+@@ -980,7 +980,7 @@ static struct Qdisc *qdisc_create(struct
  			rcu_assign_pointer(sch->stab, stab);
  		}
  		if (tca[TCA_RATE]) {
diff --git a/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch b/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
index e36982e..11fd911 100644
--- a/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
+++ b/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 31 Aug 2016 17:54:09 +0200
 Subject: [PATCH] net: add a lock around icmp_sk()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 It looks like the this_cpu_ptr() access in icmp_sk() is protected with
 local_bh_disable(). To avoid missing serialization in -RT I am adding
@@ -10,8 +10,8 @@ here a local lock. No crash has been observed, this is just precaution.
 Cc: stable-rt at vger.kernel.org
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- net/ipv4/icmp.c |    8 ++++++++
- 1 file changed, 8 insertions(+)
+ net/ipv4/icmp.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
 
 --- a/net/ipv4/icmp.c
 +++ b/net/ipv4/icmp.c
@@ -32,42 +32,35 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static struct sock *icmp_sk(struct net *net)
  {
  	return *this_cpu_ptr(net->ipv4.icmp_sk);
-@@ -215,12 +218,14 @@ static inline struct sock *icmp_xmit_loc
+@@ -417,6 +420,7 @@ static void icmp_reply(struct icmp_bxm *
  
+ 	/* Needed by both icmp_global_allow and icmp_xmit_lock */
  	local_bh_disable();
- 
 +	local_lock(icmp_sk_lock);
- 	sk = icmp_sk(net);
  
- 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
- 		/* This can happen if the output path signals a
- 		 * dst_link_failure() for an outgoing ICMP packet.
- 		 */
-+		local_unlock(icmp_sk_lock);
- 		local_bh_enable();
- 		return NULL;
- 	}
-@@ -230,6 +235,7 @@ static inline struct sock *icmp_xmit_loc
- static inline void icmp_xmit_unlock(struct sock *sk)
- {
- 	spin_unlock_bh(&sk->sk_lock.slock);
+ 	/* global icmp_msgs_per_sec */
+ 	if (!icmpv4_global_allow(net, type, code))
+@@ -461,6 +465,7 @@ static void icmp_reply(struct icmp_bxm *
+ out_unlock:
+ 	icmp_xmit_unlock(sk);
+ out_bh_enable:
 +	local_unlock(icmp_sk_lock);
+ 	local_bh_enable();
  }
  
- int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
-@@ -358,6 +364,7 @@ static void icmp_push_reply(struct icmp_
- 	struct sock *sk;
- 	struct sk_buff *skb;
+@@ -673,6 +678,7 @@ void icmp_send(struct sk_buff *skb_in, i
  
+ 	/* Needed by both icmp_global_allow and icmp_xmit_lock */
+ 	local_bh_disable();
 +	local_lock(icmp_sk_lock);
- 	sk = icmp_sk(dev_net((*rt)->dst.dev));
- 	if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
- 			   icmp_param->data_len+icmp_param->head_len,
-@@ -380,6 +387,7 @@ static void icmp_push_reply(struct icmp_
- 		skb->ip_summed = CHECKSUM_NONE;
- 		ip_push_pending_frames(sk, fl4);
- 	}
+ 
+ 	/* Check global sysctl_icmp_msgs_per_sec ratelimit */
+ 	if (!icmpv4_global_allow(net, type, code))
+@@ -757,6 +763,7 @@ void icmp_send(struct sk_buff *skb_in, i
+ out_unlock:
+ 	icmp_xmit_unlock(sk);
+ out_bh_enable:
 +	local_unlock(icmp_sk_lock);
+ 	local_bh_enable();
+ out:;
  }
- 
- /*
diff --git a/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch b/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
index 0770712..d305c9c 100644
--- a/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
+++ b/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
@@ -5,7 +5,7 @@ Subject: [PATCH] net: add back the missing serialization in
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Some time ago Sami Pietikäinen reported a crash on -RT in
 ip_send_unicast_reply() which was later fixed by Nicholas Mc Guire
@@ -38,8 +38,8 @@ This is brings back the old locks.
 Cc: stable-rt at vger.kernel.org
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- net/ipv4/tcp_ipv4.c |    7 +++++++
- 1 file changed, 7 insertions(+)
+ net/ipv4/tcp_ipv4.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
 
 --- a/net/ipv4/tcp_ipv4.c
 +++ b/net/ipv4/tcp_ipv4.c
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #include <net/net_namespace.h>
  #include <net/icmp.h>
-@@ -568,6 +569,7 @@ void tcp_v4_send_check(struct sock *sk,
+@@ -583,6 +584,7 @@ void tcp_v4_send_check(struct sock *sk,
  }
  EXPORT_SYMBOL(tcp_v4_send_check);
  
@@ -59,16 +59,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   *	This routine will send an RST to the other tcp.
   *
-@@ -695,6 +697,8 @@ static void tcp_v4_send_reset(const stru
- 		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
+@@ -711,6 +713,7 @@ static void tcp_v4_send_reset(const stru
  
  	arg.tos = ip_hdr(skb)->tos;
-+
+ 	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 +	local_lock(tcp_sk_lock);
  	local_bh_disable();
  	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
  			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -704,6 +708,7 @@ static void tcp_v4_send_reset(const stru
+@@ -720,6 +723,7 @@ static void tcp_v4_send_reset(const stru
  	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
  	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
  	local_bh_enable();
@@ -76,15 +75,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #ifdef CONFIG_TCP_MD5SIG
  out:
-@@ -779,6 +784,7 @@ static void tcp_v4_send_ack(struct net *
- 	if (oif)
+@@ -797,6 +801,7 @@ static void tcp_v4_send_ack(const struct
  		arg.bound_dev_if = oif;
  	arg.tos = tos;
+ 	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
 +	local_lock(tcp_sk_lock);
  	local_bh_disable();
  	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
  			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -787,6 +793,7 @@ static void tcp_v4_send_ack(struct net *
+@@ -805,6 +810,7 @@ static void tcp_v4_send_ack(const struct
  
  	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
  	local_bh_enable();
diff --git a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
index 792b1b6..3042f9d 100644
--- a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 26 Sep 2012 16:21:08 +0200
 Subject: net: Another local_irq_disable/kmalloc headache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Replace it by a local lock. Though that's pretty inefficient :(
 
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #include <net/protocol.h>
  #include <net/dst.h>
-@@ -360,6 +361,7 @@ struct napi_alloc_cache {
+@@ -359,6 +360,7 @@ struct napi_alloc_cache {
  
  static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
  static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
@@ -28,20 +28,20 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  {
-@@ -367,10 +369,10 @@ static void *__netdev_alloc_frag(unsigne
+@@ -366,10 +368,10 @@ static void *__netdev_alloc_frag(unsigne
  	unsigned long flags;
  	void *data;
  
 -	local_irq_save(flags);
 +	local_lock_irqsave(netdev_alloc_lock, flags);
  	nc = this_cpu_ptr(&netdev_alloc_cache);
- 	data = __alloc_page_frag(nc, fragsz, gfp_mask);
+ 	data = page_frag_alloc(nc, fragsz, gfp_mask);
 -	local_irq_restore(flags);
 +	local_unlock_irqrestore(netdev_alloc_lock, flags);
  	return data;
  }
  
-@@ -438,13 +440,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+@@ -437,13 +439,13 @@ struct sk_buff *__netdev_alloc_skb(struc
  	if (sk_memalloc_socks())
  		gfp_mask |= __GFP_MEMALLOC;
  
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	local_lock_irqsave(netdev_alloc_lock, flags);
  
  	nc = this_cpu_ptr(&netdev_alloc_cache);
- 	data = __alloc_page_frag(nc, len, gfp_mask);
+ 	data = page_frag_alloc(nc, len, gfp_mask);
  	pfmemalloc = nc->pfmemalloc;
  
 -	local_irq_restore(flags);
diff --git a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 963fea9..53ee705 100644
--- a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -1,7 +1,7 @@
 Subject: net/core/cpuhotplug: Drain input_pkt_queue lockless
 From: Grygorii Strashko <grygorii.strashko at ti.com>
 Date: Fri, 9 Oct 2015 09:25:49 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 I can constantly see below error report with 4.1 RT-kernel on TI ARM dra7-evm 
 if I'm trying to unplug cpu1:
@@ -36,7 +36,7 @@ Cc: stable-rt at vger.kernel.org
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -8046,7 +8046,7 @@ static int dev_cpu_callback(struct notif
+@@ -8093,7 +8093,7 @@ static int dev_cpu_dead(unsigned int old
  		netif_rx_ni(skb);
  		input_queue_head_incr(oldsd);
  	}
diff --git a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
index 107aa07..3dfdc79 100644
--- a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
+++ b/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 15 Jan 2016 16:33:34 +0100
 Subject: net/core: protect users of napi_alloc_cache against
  reentrance
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On -RT the code running in BH can not be moved to another CPU so CPU
 local variable remain local. However the code can be preempted
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/net/core/skbuff.c
 +++ b/net/core/skbuff.c
-@@ -362,6 +362,7 @@ struct napi_alloc_cache {
+@@ -361,6 +361,7 @@ struct napi_alloc_cache {
  static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
  static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
  static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  {
-@@ -391,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -390,9 +391,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
  
  static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  {
@@ -34,15 +34,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +	struct napi_alloc_cache *nc;
 +	void *data;
  
--	return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+-	return page_frag_alloc(&nc->page, fragsz, gfp_mask);
 +	nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
-+	data = __alloc_page_frag(&nc->page, fragsz, gfp_mask);
++	data =  page_frag_alloc(&nc->page, fragsz, gfp_mask);
 +	put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
 +	return data;
  }
  
  void *napi_alloc_frag(unsigned int fragsz)
-@@ -487,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
+@@ -486,9 +491,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
  struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
  				 gfp_t gfp_mask)
  {
@@ -54,18 +54,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	len += NET_SKB_PAD + NET_IP_ALIGN;
  
-@@ -507,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -506,7 +512,10 @@ struct sk_buff *__napi_alloc_skb(struct
  	if (sk_memalloc_socks())
  		gfp_mask |= __GFP_MEMALLOC;
  
 +	nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- 	data = __alloc_page_frag(&nc->page, len, gfp_mask);
+ 	data = page_frag_alloc(&nc->page, len, gfp_mask);
 +	pfmemalloc = nc->page.pfmemalloc;
 +	put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
  	if (unlikely(!data))
  		return NULL;
  
-@@ -518,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -517,7 +526,7 @@ struct sk_buff *__napi_alloc_skb(struct
  	}
  
  	/* use OR instead of assignment to avoid clearing of bits in mask */
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		skb->pfmemalloc = 1;
  	skb->head_frag = 1;
  
-@@ -762,23 +771,26 @@ EXPORT_SYMBOL(consume_skb);
+@@ -761,23 +770,26 @@ EXPORT_SYMBOL(consume_skb);
  
  void __kfree_skb_flush(void)
  {
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	/* record skb to CPU local list */
  	nc->skb_cache[nc->skb_count++] = skb;
  
-@@ -793,6 +805,7 @@ static inline void _kfree_skb_defer(stru
+@@ -792,6 +804,7 @@ static inline void _kfree_skb_defer(stru
  				     nc->skb_cache);
  		nc->skb_count = 0;
  	}
diff --git a/debian/patches/features/all/rt/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch b/debian/patches/features/all/rt/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
new file mode 100644
index 0000000..cb83163
--- /dev/null
+++ b/debian/patches/features/all/rt/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
@@ -0,0 +1,28 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Mon, 22 May 2017 21:08:08 +0200
+Subject: net/core: remove explicit do_softirq() from busy_poll_stop()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+Since commit 217f69743681 ("net: busy-poll: allow preemption in
+sk_busy_loop()") there is an explicit do_softirq() invocation after
+local_bh_enable() has been invoked.
+I don't understand why we need this because local_bh_enable() will
+invoke do_softirq() once the softirq counter reached zero and we have
+softirq-related work pending.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ net/core/dev.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5059,8 +5059,6 @@ static void busy_poll_stop(struct napi_s
+ 	if (rc == BUSY_POLL_BUDGET)
+ 		__napi_schedule(napi);
+ 	local_bh_enable();
+-	if (local_softirq_pending())
+-		do_softirq();
+ }
+ 
+ bool sk_busy_loop(struct sock *sk, int nonblock)
diff --git a/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index 23a4a6e..931a9f2 100644
--- a/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 30 Mar 2016 13:36:29 +0200
 Subject: [PATCH] net: dev: always take qdisc's busylock in __dev_xmit_skb()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The root-lock is dropped before dev_hard_start_xmit() is invoked and after
 setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3102,7 +3102,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3077,7 +3077,11 @@ static inline int __dev_xmit_skb(struct
  	 * This permits qdisc->running owner to get the lock more
  	 * often and dequeue packets faster.
  	 */
diff --git a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index 302eb0a..a9bfc17 100644
--- a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -1,7 +1,7 @@
 Subject: net: netfilter: Serialize xt_write_recseq sections on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 28 Oct 2012 11:18:08 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The netfilter code relies only on the implicit semantics of
 local_bh_disable() for serializing wt_write_recseq sections. RT breaks
@@ -17,15 +17,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/netfilter/x_tables.h
 +++ b/include/linux/netfilter/x_tables.h
-@@ -4,6 +4,7 @@
- 
+@@ -5,6 +5,7 @@
  #include <linux/netdevice.h>
  #include <linux/static_key.h>
+ #include <linux/netfilter.h>
 +#include <linux/locallock.h>
  #include <uapi/linux/netfilter/x_tables.h>
  
  /* Test a struct->invflags and a boolean for inequality */
-@@ -300,6 +301,8 @@ void xt_free_table_info(struct xt_table_
+@@ -337,6 +338,8 @@ void xt_free_table_info(struct xt_table_
   */
  DECLARE_PER_CPU(seqcount_t, xt_recseq);
  
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* xt_tee_enabled - true if x_tables needs to handle reentrancy
   *
   * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
-@@ -320,6 +323,9 @@ static inline unsigned int xt_write_recs
+@@ -357,6 +360,9 @@ static inline unsigned int xt_write_recs
  {
  	unsigned int addend;
  
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Low order bit of sequence is set if we already
  	 * called xt_write_recseq_begin().
-@@ -350,6 +356,7 @@ static inline void xt_write_recseq_end(u
+@@ -387,6 +393,7 @@ static inline void xt_write_recseq_end(u
  	/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
  	smp_wmb();
  	__this_cpu_add(xt_recseq.sequence, addend);
diff --git a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
index 0fb3aa6..3adf2a0 100644
--- a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
+++ b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 20 Mar 2013 18:06:20 +0100
 Subject: net: Add a mutex around devnet_rename_seq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On RT write_seqcount_begin() disables preemption and device_rename()
 allocates memory with GFP_KERNEL and grabs later the sysfs_mutex
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPU
+@@ -189,6 +189,7 @@ static unsigned int napi_gen_id = NR_CPU
  static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
  
  static seqcount_t devnet_rename_seq;
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static inline void dev_base_seq_inc(struct net *net)
  {
-@@ -888,7 +889,8 @@ int netdev_get_name(struct net *net, cha
+@@ -889,7 +890,8 @@ int netdev_get_name(struct net *net, cha
  	strcpy(name, dev->name);
  	rcu_read_unlock();
  	if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		goto retry;
  	}
  
-@@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *d
+@@ -1158,20 +1160,17 @@ int dev_change_name(struct net_device *d
  	if (dev->flags & IFF_UP)
  		return -EBUSY;
  
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (oldname[0] && !strchr(oldname, '%'))
  		netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1183,11 +1182,12 @@ int dev_change_name(struct net_device *d
+@@ -1184,11 +1183,12 @@ int dev_change_name(struct net_device *d
  	if (ret) {
  		memcpy(dev->name, oldname, IFNAMSIZ);
  		dev->name_assign_type = old_assign_type;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	netdev_adjacent_rename_links(dev, oldname);
  
-@@ -1208,7 +1208,8 @@ int dev_change_name(struct net_device *d
+@@ -1209,7 +1209,8 @@ int dev_change_name(struct net_device *d
  		/* err >= 0 after dev_alloc_name() or stores the first errno */
  		if (err >= 0) {
  			err = ret;
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			memcpy(dev->name, oldname, IFNAMSIZ);
  			memcpy(oldname, newname, IFNAMSIZ);
  			dev->name_assign_type = old_assign_type;
-@@ -1221,6 +1222,11 @@ int dev_change_name(struct net_device *d
+@@ -1222,6 +1223,11 @@ int dev_change_name(struct net_device *d
  	}
  
  	return err;
diff --git a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 678b0fc..31916b1 100644
--- a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 13 Jan 2016 15:55:02 +0100
 Subject: net: move xmit_recursion to per-task variable on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 A softirq on -RT can be preempted. That means one task is in
 __dev_queue_xmit(), gets preempted and another task may enter
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
-@@ -2463,14 +2463,53 @@ void netdev_freemem(struct net_device *d
+@@ -2427,14 +2427,53 @@ void netdev_freemem(struct net_device *d
  void synchronize_net(void);
  int init_dummy_netdev(struct net_device *dev);
  
@@ -81,19 +81,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1989,6 +1989,9 @@ struct task_struct {
+@@ -1065,6 +1065,9 @@ struct task_struct {
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- 	unsigned long	task_state_change;
+ 	unsigned long			task_state_change;
  #endif
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+	int xmit_recursion;
++	int				xmit_recursion;
 +#endif
- 	int pagefault_disabled;
+ 	int				pagefault_disabled;
  #ifdef CONFIG_MMU
- 	struct task_struct *oom_reaper_list;
+ 	struct task_struct		*oom_reaper_list;
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3165,8 +3165,10 @@ static void skb_update_prio(struct sk_bu
+@@ -3140,8 +3140,10 @@ static void skb_update_prio(struct sk_bu
  #define skb_update_prio(skb)
  #endif
  
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  /**
   *	dev_loopback_xmit - loop back @skb
-@@ -3400,8 +3402,7 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3381,8 +3383,7 @@ static int __dev_queue_xmit(struct sk_bu
  		int cpu = smp_processor_id(); /* ok because BHs are off */
  
  		if (txq->xmit_lock_owner != cpu) {
@@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  				goto recursion_alert;
  
  			skb = validate_xmit_skb(skb, dev);
-@@ -3411,9 +3412,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3392,9 +3393,9 @@ static int __dev_queue_xmit(struct sk_bu
  			HARD_TX_LOCK(dev, txq, cpu);
  
  			if (!netif_xmit_stopped(txq)) {
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  					goto out;
 --- a/net/core/filter.c
 +++ b/net/core/filter.c
-@@ -1645,7 +1645,7 @@ static inline int __bpf_tx_skb(struct ne
+@@ -1652,7 +1652,7 @@ static inline int __bpf_tx_skb(struct ne
  {
  	int ret;
  
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
  		kfree_skb(skb);
  		return -ENETDOWN;
-@@ -1653,9 +1653,9 @@ static inline int __bpf_tx_skb(struct ne
+@@ -1660,9 +1660,9 @@ static inline int __bpf_tx_skb(struct ne
  
  	skb->dev = dev;
  
diff --git a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch b/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
index 5a3442e..302fb74 100644
--- a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
+++ b/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
@@ -1,7 +1,7 @@
 Subject: net-flip-lock-dep-thingy.patch
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 28 Jun 2011 10:59:58 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 =======================================================
 [ INFO: possible circular locking dependency detected ]
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/core/sock.c
 +++ b/net/core/sock.c
-@@ -2499,12 +2499,11 @@ void lock_sock_nested(struct sock *sk, i
+@@ -2541,12 +2541,11 @@ void lock_sock_nested(struct sock *sk, i
  	if (sk->sk_lock.owned)
  		__lock_sock(sk);
  	sk->sk_lock.owned = 1;
diff --git a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index dac816e..4fea66c 100644
--- a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 20 Jan 2016 15:39:05 +0100
 Subject: net: provide a way to delegate processing a softirq to
  ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 If the NET_RX uses up all of his budget it moves the following NAPI
 invocations into the `ksoftirqd`. On -RT it does not do so. Instead it
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -496,6 +496,14 @@ extern void thread_do_softirq(void);
+@@ -508,6 +508,14 @@ extern void thread_do_softirq(void);
  extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  extern void softirq_init(void);
  extern void __raise_softirq_irqoff(unsigned int nr);
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  extern void raise_softirq(unsigned int nr);
 --- a/kernel/softirq.c
 +++ b/kernel/softirq.c
-@@ -685,6 +685,27 @@ void __raise_softirq_irqoff(unsigned int
+@@ -686,6 +686,27 @@ void __raise_softirq_irqoff(unsigned int
  }
  
  /*
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  void raise_softirq_irqoff(unsigned int nr)
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -5279,7 +5279,7 @@ static __latent_entropy void net_rx_acti
+@@ -5367,7 +5367,7 @@ static __latent_entropy void net_rx_acti
  	list_splice_tail(&repoll, &list);
  	list_splice(&list, &sd->poll_list);
  	if (!list_empty(&sd->poll_list))
@@ -76,4 +76,4 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +		__raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
  
  	net_rps_action_and_irq_enable(sd);
- }
+ out:
diff --git a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index 3931a53..de82228 100644
--- a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -1,7 +1,7 @@
 From: Marc Kleine-Budde <mkl at pengutronix.de>
 Date: Wed, 5 Mar 2014 00:49:47 +0100
 Subject: net: sched: Use msleep() instead of yield()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50
 (by default). If a high priority userspace process tries to shut down a busy
diff --git a/debian/patches/features/all/rt/net-use-cpu-chill.patch b/debian/patches/features/all/rt/net-use-cpu-chill.patch
index 4608db7..364d74e 100644
--- a/debian/patches/features/all/rt/net-use-cpu-chill.patch
+++ b/debian/patches/features/all/rt/net-use-cpu-chill.patch
@@ -1,7 +1,7 @@
 Subject: net: Use cpu_chill() instead of cpu_relax()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 07 Mar 2012 21:10:04 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Use cpu_chill() instead of cpu_relax() to let the system
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #include <linux/kmod.h>
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
-@@ -694,7 +695,7 @@ static void prb_retire_rx_blk_timer_expi
+@@ -702,7 +703,7 @@ static void prb_retire_rx_blk_timer_expi
  	if (BLOCK_NUM_PKTS(pbd)) {
  		while (atomic_read(&pkc->blk_fill_in_prog)) {
  			/* Waiting for skb_copy_bits to finish... */
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  	}
  
-@@ -956,7 +957,7 @@ static void prb_retire_current_block(str
+@@ -964,7 +965,7 @@ static void prb_retire_current_block(str
  		if (!(status & TP_STATUS_BLK_TMO)) {
  			while (atomic_read(&pkc->blk_fill_in_prog)) {
  				/* Waiting for skb_copy_bits to finish... */
diff --git a/debian/patches/features/all/rt/net-wireless-warn-nort.patch b/debian/patches/features/all/rt/net-wireless-warn-nort.patch
index 4925333..f8fe162 100644
--- a/debian/patches/features/all/rt/net-wireless-warn-nort.patch
+++ b/debian/patches/features/all/rt/net-wireless-warn-nort.patch
@@ -1,7 +1,7 @@
 Subject: net/wireless: Use WARN_ON_NORT()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 21 Jul 2011 21:05:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The softirq counter is meaningless on RT, so the check triggers a
 false positive.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/mac80211/rx.c
 +++ b/net/mac80211/rx.c
-@@ -4230,7 +4230,7 @@ void ieee80211_rx_napi(struct ieee80211_
+@@ -4224,7 +4224,7 @@ void ieee80211_rx_napi(struct ieee80211_
  	struct ieee80211_supported_band *sband;
  	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  
diff --git a/debian/patches/features/all/rt/net_disable_NET_RX_BUSY_POLL.patch b/debian/patches/features/all/rt/net_disable_NET_RX_BUSY_POLL.patch
new file mode 100644
index 0000000..72cc350
--- /dev/null
+++ b/debian/patches/features/all/rt/net_disable_NET_RX_BUSY_POLL.patch
@@ -0,0 +1,29 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Sat, 27 May 2017 19:02:06 +0200
+Subject: net/core: disable NET_RX_BUSY_POLL
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+sk_busy_loop() does preempt_disable() followed by a few operations which can
+take sleeping locks and may get long.
+I _think_ that we could use preempt_disable_nort() (in sk_busy_loop()) instead
+but after a successfull cmpxchg(&napi->state, …) we would gain the ressource
+and could be scheduled out. At this point nobody knows who (which context) owns
+it and so it could take a while until the state is realeased and napi_poll()
+could be invoked again.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ net/Kconfig |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -277,7 +277,7 @@ config CGROUP_NET_CLASSID
+ 
+ config NET_RX_BUSY_POLL
+ 	bool
+-	default y
++	default y if !PREEMPT_RT_FULL
+ 
+ config BQL
+ 	bool
diff --git a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
index 6b2e55d..2686c8c 100644
--- a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
+++ b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
@@ -1,7 +1,7 @@
 From: Oleg Nesterov <oleg at redhat.com>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: signal/x86: Delay calling signals in atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On x86_64 we must disable preemption before we enable interrupts
 for stack faults, int3 and debugging, because the current task is using
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/entry/common.c
 +++ b/arch/x86/entry/common.c
-@@ -148,6 +148,13 @@ static void exit_to_usermode_loop(struct
+@@ -149,6 +149,13 @@ static void exit_to_usermode_loop(struct
  		if (cached_flags & _TIF_NEED_RESCHED)
  			schedule();
  
@@ -77,20 +77,20 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1700,6 +1700,10 @@ struct task_struct {
- 	sigset_t blocked, real_blocked;
- 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
- 	struct sigpending pending;
+@@ -760,6 +760,10 @@ struct task_struct {
+ 	/* Restored if set_restore_sigmask() was used: */
+ 	sigset_t			saved_sigmask;
+ 	struct sigpending		pending;
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +	/* TODO: move me into ->restart_block ? */
-+	struct siginfo forced_info;
++	struct				siginfo forced_info;
 +#endif
- 
- 	unsigned long sas_ss_sp;
- 	size_t sas_ss_size;
+ 	unsigned long			sas_ss_sp;
+ 	size_t				sas_ss_size;
+ 	unsigned int			sas_ss_flags;
 --- a/kernel/signal.c
 +++ b/kernel/signal.c
-@@ -1216,8 +1216,8 @@ int do_send_sig_info(int sig, struct sig
+@@ -1227,8 +1227,8 @@ int do_send_sig_info(int sig, struct sig
   * We don't want to have recursive SIGSEGV's etc, for example,
   * that is why we also clear SIGNAL_UNKILLABLE.
   */
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	unsigned long int flags;
  	int ret, blocked, ignored;
-@@ -1242,6 +1242,39 @@ force_sig_info(int sig, struct siginfo *
+@@ -1253,6 +1253,39 @@ force_sig_info(int sig, struct siginfo *
  	return ret;
  }
  
diff --git a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
index 5180da5..945f34f 100644
--- a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
+++ b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Disable on -RT. If this is invoked from irq-context we will have problems
 to acquire the sleeping lock.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/panic.c
 +++ b/kernel/panic.c
-@@ -482,9 +482,11 @@ static u64 oops_id;
+@@ -481,9 +481,11 @@ static u64 oops_id;
  
  static int init_oops_id(void)
  {
diff --git a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index 991f13c..abe7d33 100644
--- a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Make ksoftirqd do RCU quiescent states
 From: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
 Date: Wed, 5 Oct 2011 11:45:18 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable
 to network-based denial-of-service attacks.  This patch therefore
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/rcupdate.h
 +++ b/include/linux/rcupdate.h
-@@ -343,11 +343,7 @@ static inline int rcu_preempt_depth(void
+@@ -303,11 +303,7 @@ static inline int rcu_preempt_depth(void
  /* Internal to kernel */
  void rcu_init(void);
  void rcu_sched_qs(void);
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void rcu_cpu_starting(unsigned int cpu);
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -260,7 +260,14 @@ void rcu_sched_qs(void)
+@@ -262,7 +262,14 @@ void rcu_sched_qs(void)
  			   this_cpu_ptr(&rcu_sched_data), true);
  }
  
@@ -63,15 +63,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
 --- a/kernel/rcu/tree_plugin.h
 +++ b/kernel/rcu/tree_plugin.h
-@@ -28,6 +28,7 @@
- #include <linux/gfp.h>
+@@ -29,6 +29,7 @@
  #include <linux/oom.h>
+ #include <linux/sched/debug.h>
  #include <linux/smpboot.h>
 +#include <linux/jiffies.h>
+ #include <uapi/linux/sched/types.h>
  #include "../time/tick-internal.h"
  
- #ifdef CONFIG_RCU_BOOST
-@@ -1244,7 +1245,7 @@ static void rcu_prepare_kthreads(int cpu
+@@ -1246,7 +1247,7 @@ static void rcu_prepare_kthreads(int cpu
  
  #endif /* #else #ifdef CONFIG_RCU_BOOST */
  
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /*
   * Check to see if any future RCU-related work will need to be done
-@@ -1261,7 +1262,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1263,7 +1264,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
  	return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
  	       ? 0 : rcu_cpu_has_callbacks(NULL);
  }
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
   * after it.
-@@ -1357,6 +1360,8 @@ static bool __maybe_unused rcu_try_advan
+@@ -1359,6 +1362,8 @@ static bool __maybe_unused rcu_try_advan
  	return cbs_ready;
  }
  
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
   * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
-@@ -1402,6 +1407,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1404,6 +1409,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
  	*nextevt = basemono + dj * TICK_NSEC;
  	return 0;
  }
diff --git a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch
deleted file mode 100644
index c0c6e89..0000000
--- a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-Subject: pci: Use __wake_up_all_locked in pci_unblock_user_cfg_access()
-From: Thomas Gleixner <tglx at linutronix.de>
-Date: Thu, 01 Dec 2011 00:07:16 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-The waitqueue is protected by the pci_lock, so we can just avoid to
-lock the waitqueue lock itself. That prevents the
-might_sleep()/scheduling while atomic problem on RT
-
-Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
-
----
- drivers/pci/access.c |    2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/pci/access.c
-+++ b/drivers/pci/access.c
-@@ -672,7 +672,7 @@ void pci_cfg_access_unlock(struct pci_de
- 	WARN_ON(!dev->block_cfg_access);
- 
- 	dev->block_cfg_access = 0;
--	wake_up_all(&pci_cfg_wait);
-+	wake_up_all_locked(&pci_cfg_wait);
- 	raw_spin_unlock_irqrestore(&pci_lock, flags);
- }
- EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
index 1a97494..5f18de4 100644
--- a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
+++ b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 9 Apr 2014 11:58:17 +0200
 Subject: percpu_ida: Use local locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 the local_irq_save() + spin_lock() does not work that well on -RT
 
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/lib/percpu_ida.c
 +++ b/lib/percpu_ida.c
-@@ -26,6 +26,9 @@
+@@ -27,6 +27,9 @@
  #include <linux/string.h>
  #include <linux/spinlock.h>
  #include <linux/percpu_ida.h>
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  struct percpu_ida_cpu {
  	/*
-@@ -148,13 +151,13 @@ int percpu_ida_alloc(struct percpu_ida *
+@@ -149,13 +152,13 @@ int percpu_ida_alloc(struct percpu_ida *
  	unsigned long flags;
  	int tag;
  
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		return tag;
  	}
  
-@@ -173,6 +176,7 @@ int percpu_ida_alloc(struct percpu_ida *
+@@ -174,6 +177,7 @@ int percpu_ida_alloc(struct percpu_ida *
  
  		if (!tags->nr_free)
  			alloc_global_tags(pool, tags);
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		if (!tags->nr_free)
  			steal_tags(pool, tags);
  
-@@ -184,7 +188,7 @@ int percpu_ida_alloc(struct percpu_ida *
+@@ -185,7 +189,7 @@ int percpu_ida_alloc(struct percpu_ida *
  		}
  
  		spin_unlock(&pool->lock);
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		if (tag >= 0 || state == TASK_RUNNING)
  			break;
-@@ -196,7 +200,7 @@ int percpu_ida_alloc(struct percpu_ida *
+@@ -197,7 +201,7 @@ int percpu_ida_alloc(struct percpu_ida *
  
  		schedule();
  
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		tags = this_cpu_ptr(pool->tag_cpu);
  	}
  	if (state != TASK_RUNNING)
-@@ -221,7 +225,7 @@ void percpu_ida_free(struct percpu_ida *
+@@ -222,7 +226,7 @@ void percpu_ida_free(struct percpu_ida *
  
  	BUG_ON(tag >= pool->nr_tags);
  
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	tags = this_cpu_ptr(pool->tag_cpu);
  
  	spin_lock(&tags->lock);
-@@ -253,7 +257,7 @@ void percpu_ida_free(struct percpu_ida *
+@@ -254,7 +258,7 @@ void percpu_ida_free(struct percpu_ida *
  		spin_unlock(&pool->lock);
  	}
  
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(percpu_ida_free);
  
-@@ -345,7 +349,7 @@ int percpu_ida_for_each_free(struct perc
+@@ -346,7 +350,7 @@ int percpu_ida_for_each_free(struct perc
  	struct percpu_ida_cpu *remote;
  	unsigned cpu, i, err = 0;
  
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	for_each_possible_cpu(cpu) {
  		remote = per_cpu_ptr(pool->tag_cpu, cpu);
  		spin_lock(&remote->lock);
-@@ -367,7 +371,7 @@ int percpu_ida_for_each_free(struct perc
+@@ -368,7 +372,7 @@ int percpu_ida_for_each_free(struct perc
  	}
  	spin_unlock(&pool->lock);
  out:
diff --git a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
index a5e8ce2..cdc417b 100644
--- a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Yong Zhang <yong.zhang at windriver.com>
 Date: Wed, 11 Jul 2012 22:05:21 +0000
 Subject: perf: Make swevent hrtimer run in irq instead of softirq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Otherwise we get a deadlock like below:
 
@@ -59,7 +59,7 @@ Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
 
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
-@@ -8363,6 +8363,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -8495,6 +8495,7 @@ static void perf_swevent_init_hrtimer(st
  
  	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
index af2e5e1..22a47bb 100644
--- a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
+++ b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Frob softirq test
 From: Peter Zijlstra <a.p.zijlstra at chello.nl>
 Date: Sat Aug 13 00:23:17 CEST 2011
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 With RT_FULL we get the below wreckage:
 
@@ -156,7 +156,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
 
 --- a/kernel/rcu/tree_plugin.h
 +++ b/kernel/rcu/tree_plugin.h
-@@ -426,7 +426,7 @@ void rcu_read_unlock_special(struct task
+@@ -428,7 +428,7 @@ void rcu_read_unlock_special(struct task
  	}
  
  	/* Hardware IRQ handlers cannot block, complain if they get here. */
diff --git a/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch b/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch
index d872b13..e60899b 100644
--- a/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch
+++ b/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch
@@ -1,7 +1,7 @@
 Subject: locking/percpu-rwsem: Remove preempt_disable variants
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed Nov 23 16:29:32 CET 2016
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Effective revert commit:
 
diff --git a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
index 87c1f69..6cfd795 100644
--- a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
+++ b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
@@ -1,7 +1,7 @@
 Subject: crypto: Convert crypto notifier chain to SRCU
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 05 Oct 2012 09:03:24 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The crypto notifier deadlocks on RT. Though this can be a real deadlock
 on mainline as well due to fifo fair rwsems.
@@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #ifdef CONFIG_PROC_FS
  void __init crypto_init_proc(void);
-@@ -146,7 +146,7 @@ static inline int crypto_is_moribund(str
+@@ -143,7 +143,7 @@ static inline int crypto_is_moribund(str
  
  static inline void crypto_notify(unsigned long val, void *v)
  {
diff --git a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch b/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
index ae4fd7c..b1b03d2 100644
--- a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
+++ b/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
 From: Grygorii Strashko <Grygorii.Strashko at linaro.org>
 Date: Tue, 21 Jul 2015 19:43:56 +0300
 Subject: pid.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 This patch fixes build error:
   CC      kernel/pid_namespace.o
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 @@ -2,6 +2,7 @@
  #define _LINUX_PID_H
  
- #include <linux/rcupdate.h>
+ #include <linux/rculist.h>
 +#include <linux/atomic.h>
  
  enum pid_type
diff --git a/debian/patches/features/all/rt/pinctrl-qcom-Use-raw-spinlock-variants.patch b/debian/patches/features/all/rt/pinctrl-qcom-Use-raw-spinlock-variants.patch
deleted file mode 100644
index 8119658..0000000
--- a/debian/patches/features/all/rt/pinctrl-qcom-Use-raw-spinlock-variants.patch
+++ /dev/null
@@ -1,253 +0,0 @@
-From: Julia Cartwright <julia at ni.com>
-Date: Fri, 20 Jan 2017 10:13:47 -0600
-Subject: [PATCH] pinctrl: qcom: Use raw spinlock variants
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-The MSM pinctrl driver currently implements an irq_chip for handling
-GPIO interrupts; due to how irq_chip handling is done, it's necessary
-for the irq_chip methods to be invoked from hardirq context, even on a
-a real-time kernel.  Because the spinlock_t type becomes a "sleeping"
-spinlock w/ RT kernels, it is not suitable to be used with irq_chips.
-
-A quick audit of the operations under the lock reveal that they do only
-minimal, bounded work, and are therefore safe to do under a raw
-spinlock.
-
-On real-time kernels, this fixes an OOPs which looks like the following,
-as reported by Brian Wrenn:
-
-    kernel BUG at kernel/locking/rtmutex.c:1014!
-    Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
-    Modules linked in: spidev_irq(O) smsc75xx wcn36xx [last unloaded: spidev]
-    CPU: 0 PID: 1163 Comm: irq/144-mmc0 Tainted: G        W  O    4.4.9-linaro-lt-qcom #1
-    PC is at rt_spin_lock_slowlock+0x80/0x2d8
-    LR is at rt_spin_lock_slowlock+0x68/0x2d8
-    [..]
-  Call trace:
-    rt_spin_lock_slowlock
-    rt_spin_lock
-    msm_gpio_irq_ack
-    handle_edge_irq
-    generic_handle_irq
-    msm_gpio_irq_handler
-    generic_handle_irq
-    __handle_domain_irq
-    gic_handle_irq
-
-Cc: stable-rt at vger.kernel.org
-Cc: Bjorn Andersson <bjorn.andersson at linaro.org>
-Reported-by: Brian Wrenn <dcbrianw at gmail.com>
-Tested-by: Brian Wrenn <dcbrianw at gmail.com>
-Signed-off-by: Julia Cartwright <julia at ni.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- drivers/pinctrl/qcom/pinctrl-msm.c |   48 ++++++++++++++++++-------------------
- 1 file changed, 24 insertions(+), 24 deletions(-)
-
---- a/drivers/pinctrl/qcom/pinctrl-msm.c
-+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
-@@ -61,7 +61,7 @@ struct msm_pinctrl {
- 	struct notifier_block restart_nb;
- 	int irq;
- 
--	spinlock_t lock;
-+	raw_spinlock_t lock;
- 
- 	DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
- 	DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
-@@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pin
- 	if (WARN_ON(i == g->nfuncs))
- 		return -EINVAL;
- 
--	spin_lock_irqsave(&pctrl->lock, flags);
-+	raw_spin_lock_irqsave(&pctrl->lock, flags);
- 
- 	val = readl(pctrl->regs + g->ctl_reg);
- 	val &= ~mask;
- 	val |= i << g->mux_bit;
- 	writel(val, pctrl->regs + g->ctl_reg);
- 
--	spin_unlock_irqrestore(&pctrl->lock, flags);
-+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- 
- 	return 0;
- }
-@@ -323,14 +323,14 @@ static int msm_config_group_set(struct p
- 			break;
- 		case PIN_CONFIG_OUTPUT:
- 			/* set output value */
--			spin_lock_irqsave(&pctrl->lock, flags);
-+			raw_spin_lock_irqsave(&pctrl->lock, flags);
- 			val = readl(pctrl->regs + g->io_reg);
- 			if (arg)
- 				val |= BIT(g->out_bit);
- 			else
- 				val &= ~BIT(g->out_bit);
- 			writel(val, pctrl->regs + g->io_reg);
--			spin_unlock_irqrestore(&pctrl->lock, flags);
-+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- 
- 			/* enable output */
- 			arg = 1;
-@@ -351,12 +351,12 @@ static int msm_config_group_set(struct p
- 			return -EINVAL;
- 		}
- 
--		spin_lock_irqsave(&pctrl->lock, flags);
-+		raw_spin_lock_irqsave(&pctrl->lock, flags);
- 		val = readl(pctrl->regs + g->ctl_reg);
- 		val &= ~(mask << bit);
- 		val |= arg << bit;
- 		writel(val, pctrl->regs + g->ctl_reg);
--		spin_unlock_irqrestore(&pctrl->lock, flags);
-+		raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- 	}
- 
- 	return 0;
-@@ -384,13 +384,13 @@ static int msm_gpio_direction_input(stru
- 
- 	g = &pctrl->soc->groups[offset];
- 
--	spin_lock_irqsave(&pctrl->lock, flags);
-+	raw_spin_lock_irqsave(&pctrl->lock, flags);
- 
- 	val = readl(pctrl->regs + g->ctl_reg);
- 	val &= ~BIT(g->oe_bit);
- 	writel(val, pctrl->regs + g->ctl_reg);
- 
--	spin_unlock_irqrestore(&pctrl->lock, flags);
-+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- 
- 	return 0;
- }
-@@ -404,7 +404,7 @@ static int msm_gpio_direction_output(str
- 
- 	g = &pctrl->soc->groups[offset];
- 
--	spin_lock_irqsave(&pctrl->lock, flags);
-+	raw_spin_lock_irqsave(&pctrl->lock, flags);
- 
- 	val = readl(pctrl->regs + g->io_reg);
- 	if (value)
-@@ -417,7 +417,7 @@ static int msm_gpio_direction_output(str
- 	val |= BIT(g->oe_bit);
- 	writel(val, pctrl->regs + g->ctl_reg);
- 
--	spin_unlock_irqrestore(&pctrl->lock, flags);
-+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- 
- 	return 0;
- }
-@@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chi
- 
- 	g = &pctrl->soc->groups[offset];
- 
--	spin_lock_irqsave(&pctrl->lock, flags);
-+	raw_spin_lock_irqsave(&pctrl->lock, flags);
- 
- 	val = readl(pctrl->regs + g->io_reg);
- 	if (value)
-@@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chi
- 		val &= ~BIT(g->out_bit);
- 	writel(val, pctrl->regs + g->io_reg);
- 
--	spin_unlock_irqrestore(&pctrl->lock, flags);
-+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
- 
- #ifdef CONFIG_DEBUG_FS
-@@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq
- 
- 	g = &pctrl->soc->groups[d->hwirq];
- 
--	spin_lock_irqsave(&pctrl->lock, flags);
-+	raw_spin_lock_irqsave(&pctrl->lock, flags);
- 
- 	val = readl(pctrl->regs + g->intr_cfg_reg);
- 	val &= ~BIT(g->intr_enable_bit);
-@@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq
- 
- 	clear_bit(d->hwirq, pctrl->enabled_irqs);
- 
--	spin_unlock_irqrestore(&pctrl->lock, flags);
-+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
- 
- static void msm_gpio_irq_unmask(struct irq_data *d)
-@@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct i
- 
- 	g = &pctrl->soc->groups[d->hwirq];
- 
--	spin_lock_irqsave(&pctrl->lock, flags);
-+	raw_spin_lock_irqsave(&pctrl->lock, flags);
- 
- 	val = readl(pctrl->regs + g->intr_cfg_reg);
- 	val |= BIT(g->intr_enable_bit);
-@@ -600,7 +600,7 @@ static void msm_gpio_irq_unmask(struct i
- 
- 	set_bit(d->hwirq, pctrl->enabled_irqs);
- 
--	spin_unlock_irqrestore(&pctrl->lock, flags);
-+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
- 
- static void msm_gpio_irq_ack(struct irq_data *d)
-@@ -613,7 +613,7 @@ static void msm_gpio_irq_ack(struct irq_
- 
- 	g = &pctrl->soc->groups[d->hwirq];
- 
--	spin_lock_irqsave(&pctrl->lock, flags);
-+	raw_spin_lock_irqsave(&pctrl->lock, flags);
- 
- 	val = readl(pctrl->regs + g->intr_status_reg);
- 	if (g->intr_ack_high)
-@@ -625,7 +625,7 @@ static void msm_gpio_irq_ack(struct irq_
- 	if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
- 		msm_gpio_update_dual_edge_pos(pctrl, g, d);
- 
--	spin_unlock_irqrestore(&pctrl->lock, flags);
-+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
- 
- static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
-@@ -638,7 +638,7 @@ static int msm_gpio_irq_set_type(struct
- 
- 	g = &pctrl->soc->groups[d->hwirq];
- 
--	spin_lock_irqsave(&pctrl->lock, flags);
-+	raw_spin_lock_irqsave(&pctrl->lock, flags);
- 
- 	/*
- 	 * For hw without possibility of detecting both edges
-@@ -712,7 +712,7 @@ static int msm_gpio_irq_set_type(struct
- 	if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
- 		msm_gpio_update_dual_edge_pos(pctrl, g, d);
- 
--	spin_unlock_irqrestore(&pctrl->lock, flags);
-+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- 
- 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- 		irq_set_handler_locked(d, handle_level_irq);
-@@ -728,11 +728,11 @@ static int msm_gpio_irq_set_wake(struct
- 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
- 	unsigned long flags;
- 
--	spin_lock_irqsave(&pctrl->lock, flags);
-+	raw_spin_lock_irqsave(&pctrl->lock, flags);
- 
- 	irq_set_irq_wake(pctrl->irq, on);
- 
--	spin_unlock_irqrestore(&pctrl->lock, flags);
-+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- 
- 	return 0;
- }
-@@ -878,7 +878,7 @@ int msm_pinctrl_probe(struct platform_de
- 	pctrl->soc = soc_data;
- 	pctrl->chip = msm_gpio_template;
- 
--	spin_lock_init(&pctrl->lock);
-+	raw_spin_lock_init(&pctrl->lock);
- 
- 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- 	pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/debian/patches/features/all/rt/ping-sysrq.patch b/debian/patches/features/all/rt/ping-sysrq.patch
index 60f6f7a..779897c 100644
--- a/debian/patches/features/all/rt/ping-sysrq.patch
+++ b/debian/patches/features/all/rt/ping-sysrq.patch
@@ -1,7 +1,7 @@
 Subject: net: sysrq via icmp
 From: Carsten Emde <C.Emde at osadl.org>
 Date: Tue, 19 Jul 2011 13:51:17 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 There are (probably rare) situations when a system crashed and the system
 console becomes unresponsive but the network icmp layer still is alive.
@@ -13,37 +13,36 @@ Documentation/sysrq.txt for details.
 Signed-off-by: Carsten Emde <C.Emde at osadl.org>
 
 ---
- Documentation/sysrq.txt    |   11 +++++++++--
- include/net/netns/ipv4.h   |    1 +
- net/ipv4/icmp.c            |   30 ++++++++++++++++++++++++++++++
- net/ipv4/sysctl_net_ipv4.c |    7 +++++++
- 4 files changed, 47 insertions(+), 2 deletions(-)
+ Documentation/admin-guide/sysrq.rst |   12 ++++++++++++
+ include/net/netns/ipv4.h            |    1 +
+ net/ipv4/icmp.c                     |   30 ++++++++++++++++++++++++++++++
+ net/ipv4/sysctl_net_ipv4.c          |    7 +++++++
+ 4 files changed, 50 insertions(+)
 
---- a/Documentation/sysrq.txt
-+++ b/Documentation/sysrq.txt
-@@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (
- On other - If you know of the key combos for other architectures, please
-            let me know so I can add them to this section.
+--- a/Documentation/admin-guide/sysrq.rst
++++ b/Documentation/admin-guide/sysrq.rst
+@@ -77,6 +77,18 @@ On all
  
--On all -  write a character to /proc/sysrq-trigger.  e.g.:
--
-+On all -  write a character to /proc/sysrq-trigger, e.g.:
  		echo t > /proc/sysrq-trigger
  
-+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
-+		echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
-+	 Send an ICMP echo request with this pattern plus the particular
-+	 SysRq command key. Example:
-+		# ping -c1 -s57 -p0102030468
-+	 will trigger the SysRq-H (help) command.
++On all
++        Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.::
 +
++                echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
 +
- *  What are the 'command' keys?
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 'b'     - Will immediately reboot the system without syncing or unmounting
++        Send an ICMP echo request with this pattern plus the particular
++        SysRq command key. Example::
++
++                ping -c1 -s57 -p0102030468
++
++        will trigger the SysRq-H (help) command.
++
+ What are the 'command' keys?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
 --- a/include/net/netns/ipv4.h
 +++ b/include/net/netns/ipv4.h
-@@ -69,6 +69,7 @@ struct netns_ipv4 {
+@@ -79,6 +79,7 @@ struct netns_ipv4 {
  
  	int sysctl_icmp_echo_ignore_all;
  	int sysctl_icmp_echo_ignore_broadcasts;
@@ -61,7 +60,7 @@ Signed-off-by: Carsten Emde <C.Emde at osadl.org>
  #include <linux/socket.h>
  #include <linux/in.h>
  #include <linux/inet.h>
-@@ -899,6 +900,30 @@ static bool icmp_redirect(struct sk_buff
+@@ -927,6 +928,30 @@ static bool icmp_redirect(struct sk_buff
  }
  
  /*
@@ -92,7 +91,7 @@ Signed-off-by: Carsten Emde <C.Emde at osadl.org>
   *	Handle ICMP_ECHO ("ping") requests.
   *
   *	RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
-@@ -925,6 +950,11 @@ static bool icmp_echo(struct sk_buff *sk
+@@ -953,6 +978,11 @@ static bool icmp_echo(struct sk_buff *sk
  		icmp_param.data_len	   = skb->len;
  		icmp_param.head_len	   = sizeof(struct icmphdr);
  		icmp_reply(&icmp_param, skb);
@@ -106,7 +105,7 @@ Signed-off-by: Carsten Emde <C.Emde at osadl.org>
  	return true;
 --- a/net/ipv4/sysctl_net_ipv4.c
 +++ b/net/ipv4/sysctl_net_ipv4.c
-@@ -681,6 +681,13 @@ static struct ctl_table ipv4_net_table[]
+@@ -687,6 +687,13 @@ static struct ctl_table ipv4_net_table[]
  		.proc_handler	= proc_dointvec
  	},
  	{
diff --git a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
index 550871f..e3a50ec 100644
--- a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
+++ b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:29:20 -0500
 Subject: posix-timers: Prevent broadcast signals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Posix timers should not send broadcast signals and kernel only
 signals. Prevent it.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/time/posix-timers.c
 +++ b/kernel/time/posix-timers.c
-@@ -506,6 +506,7 @@ static enum hrtimer_restart posix_timer_
+@@ -507,6 +507,7 @@ static enum hrtimer_restart posix_timer_
  static struct pid *good_sigevent(sigevent_t * event)
  {
  	struct task_struct *rtn = current->group_leader;
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
  		(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-@@ -514,7 +515,8 @@ static struct pid *good_sigevent(sigeven
+@@ -515,7 +516,8 @@ static struct pid *good_sigevent(sigeven
  		return NULL;
  
  	if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
diff --git a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 91a0f27..a26e8ee 100644
--- a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -1,7 +1,7 @@
 From: John Stultz <johnstul at us.ibm.com>
 Date: Fri, 3 Jul 2009 08:29:58 -0500
 Subject: posix-timers: Thread posix-cpu-timers on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 posix-cpu-timer code takes non -rt safe locks in hard irq
 context. Move it to a thread.
@@ -15,12 +15,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  include/linux/init_task.h      |    7 +
  include/linux/sched.h          |    3 
  kernel/fork.c                  |    3 
- kernel/time/posix-cpu-timers.c |  193 ++++++++++++++++++++++++++++++++++++++++-
- 4 files changed, 202 insertions(+), 4 deletions(-)
+ kernel/time/posix-cpu-timers.c |  157 +++++++++++++++++++++++++++++++++++++++--
+ 4 files changed, 166 insertions(+), 4 deletions(-)
 
 --- a/include/linux/init_task.h
 +++ b/include/linux/init_task.h
-@@ -150,6 +150,12 @@ extern struct task_group root_task_group
+@@ -167,6 +167,12 @@ extern struct cred init_cred;
  # define INIT_PERF_EVENTS(tsk)
  #endif
  
@@ -33,8 +33,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  # define INIT_VTIME(tsk)						\
  	.vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount),	\
-@@ -251,6 +257,7 @@ extern struct task_group root_task_group
- 	.cpu_timers	= INIT_CPU_TIMERS(tsk.cpu_timers),		\
+@@ -269,6 +275,7 @@ extern struct cred init_cred;
+ 	INIT_CPU_TIMERS(tsk)						\
  	.pi_lock	= __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),	\
  	.timer_slack_ns = 50000, /* 50 usec default slack */		\
 +	INIT_TIMER_LIST							\
@@ -43,19 +43,19 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),		\
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1664,6 +1664,9 @@ struct task_struct {
- 
- 	struct task_cputime cputime_expires;
- 	struct list_head cpu_timers[3];
+@@ -710,6 +710,9 @@ struct task_struct {
+ #ifdef CONFIG_POSIX_TIMERS
+ 	struct task_cputime		cputime_expires;
+ 	struct list_head		cpu_timers[3];
 +#ifdef CONFIG_PREEMPT_RT_BASE
-+	struct task_struct *posix_timer_list;
++	struct task_struct		*posix_timer_list;
 +#endif
+ #endif
  
- /* process credentials */
- 	const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
+ 	/* Process credentials: */
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -1427,6 +1427,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1451,6 +1451,9 @@ static void rt_mutex_init_task(struct ta
   */
  static void posix_cpu_timers_init(struct task_struct *tsk)
  {
@@ -67,15 +67,26 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	tsk->cputime_expires.sched_exp = 0;
 --- a/kernel/time/posix-cpu-timers.c
 +++ b/kernel/time/posix-cpu-timers.c
-@@ -3,6 +3,7 @@
+@@ -2,8 +2,10 @@
+  * Implement CPU time clocks for the POSIX clock interface.
   */
  
- #include <linux/sched.h>
++#include <uapi/linux/sched/types.h>
+ #include <linux/sched/signal.h>
+ #include <linux/sched/cputime.h>
 +#include <linux/sched/rt.h>
  #include <linux/posix-timers.h>
  #include <linux/errno.h>
  #include <linux/math64.h>
-@@ -620,7 +621,7 @@ static int posix_cpu_timer_set(struct k_
+@@ -12,6 +14,7 @@
+ #include <trace/events/timer.h>
+ #include <linux/tick.h>
+ #include <linux/workqueue.h>
++#include <linux/smpboot.h>
+ 
+ /*
+  * Called after updating RLIMIT_CPU to run cpu timer and update
+@@ -590,7 +593,7 @@ static int posix_cpu_timer_set(struct k_
  	/*
  	 * Disarm any old timer after extracting its expiry time.
  	 */
@@ -84,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	ret = 0;
  	old_incr = timer->it.cpu.incr;
-@@ -1064,7 +1065,7 @@ void posix_cpu_timer_schedule(struct k_i
+@@ -1014,7 +1017,7 @@ void posix_cpu_timer_schedule(struct k_i
  	/*
  	 * Now re-arm for the new expiry time.
  	 */
@@ -93,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	arm_timer(timer);
  	unlock_task_sighand(p, &flags);
  
-@@ -1153,13 +1154,13 @@ static inline int fastpath_timer_check(s
+@@ -1103,13 +1106,13 @@ static inline int fastpath_timer_check(s
   * already updated our counts.  We need to check if any timers fire now.
   * Interrupts are disabled.
   */
@@ -109,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * The fast path checks that there are no expired thread or thread
-@@ -1213,6 +1214,190 @@ void run_posix_cpu_timers(struct task_st
+@@ -1163,6 +1166,152 @@ void run_posix_cpu_timers(struct task_st
  	}
  }
  
@@ -118,63 +129,42 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#include <linux/cpu.h>
 +DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
 +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++DEFINE_PER_CPU(bool, posix_timer_th_active);
 +
-+static int posix_cpu_timers_thread(void *data)
++static void posix_cpu_kthread_fn(unsigned int cpu)
 +{
-+	int cpu = (long)data;
++	struct task_struct *tsk = NULL;
++	struct task_struct *next = NULL;
 +
-+	BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++	BUG_ON(per_cpu(posix_timer_task, cpu) != current);
 +
-+	while (!kthread_should_stop()) {
-+		struct task_struct *tsk = NULL;
-+		struct task_struct *next = NULL;
++	/* grab task list */
++	raw_local_irq_disable();
++	tsk = per_cpu(posix_timer_tasklist, cpu);
++	per_cpu(posix_timer_tasklist, cpu) = NULL;
++	raw_local_irq_enable();
 +
-+		if (cpu_is_offline(cpu))
-+			goto wait_to_die;
-+
-+		/* grab task list */
-+		raw_local_irq_disable();
-+		tsk = per_cpu(posix_timer_tasklist, cpu);
-+		per_cpu(posix_timer_tasklist, cpu) = NULL;
-+		raw_local_irq_enable();
-+
-+		/* its possible the list is empty, just return */
-+		if (!tsk) {
-+			set_current_state(TASK_INTERRUPTIBLE);
-+			schedule();
-+			__set_current_state(TASK_RUNNING);
-+			continue;
-+		}
++	/* its possible the list is empty, just return */
++	if (!tsk)
++		return;
 +
-+		/* Process task list */
-+		while (1) {
-+			/* save next */
-+			next = tsk->posix_timer_list;
++	/* Process task list */
++	while (1) {
++		/* save next */
++		next = tsk->posix_timer_list;
 +
-+			/* run the task timers, clear its ptr and
-+			 * unreference it
-+			 */
-+			__run_posix_cpu_timers(tsk);
-+			tsk->posix_timer_list = NULL;
-+			put_task_struct(tsk);
++		/* run the task timers, clear its ptr and
++		 * unreference it
++		 */
++		__run_posix_cpu_timers(tsk);
++		tsk->posix_timer_list = NULL;
++		put_task_struct(tsk);
 +
-+			/* check if this is the last on the list */
-+			if (next == tsk)
-+				break;
-+			tsk = next;
-+		}
++		/* check if this is the last on the list */
++		if (next == tsk)
++			break;
++		tsk = next;
 +	}
-+	return 0;
-+
-+wait_to_die:
-+	/* Wait for kthread_stop */
-+	set_current_state(TASK_INTERRUPTIBLE);
-+	while (!kthread_should_stop()) {
-+		schedule();
-+		set_current_state(TASK_INTERRUPTIBLE);
-+	}
-+	__set_current_state(TASK_RUNNING);
-+	return 0;
 +}
 +
 +static inline int __fastpath_timer_check(struct task_struct *tsk)
@@ -194,12 +184,14 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +
 +void run_posix_cpu_timers(struct task_struct *tsk)
 +{
-+	unsigned long cpu = smp_processor_id();
++	unsigned int cpu = smp_processor_id();
 +	struct task_struct *tasklist;
 +
 +	BUG_ON(!irqs_disabled());
-+	if(!per_cpu(posix_timer_task, cpu))
++
++	if (per_cpu(posix_timer_th_active, cpu) != true)
 +		return;
++
 +	/* get per-cpu references */
 +	tasklist = per_cpu(posix_timer_tasklist, cpu);
 +
@@ -221,72 +213,53 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	}
 +}
 +
-+/*
-+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
-+ * Here we can start up the necessary migration thread for the new CPU.
-+ */
-+static int posix_cpu_thread_call(struct notifier_block *nfb,
-+				 unsigned long action, void *hcpu)
++static int posix_cpu_kthread_should_run(unsigned int cpu)
 +{
-+	int cpu = (long)hcpu;
-+	struct task_struct *p;
-+	struct sched_param param;
++	return __this_cpu_read(posix_timer_tasklist) != NULL;
++}
 +
-+	switch (action) {
-+	case CPU_UP_PREPARE:
-+		p = kthread_create(posix_cpu_timers_thread, hcpu,
-+					"posixcputmr/%d",cpu);
-+		if (IS_ERR(p))
-+			return NOTIFY_BAD;
-+		p->flags |= PF_NOFREEZE;
-+		kthread_bind(p, cpu);
-+		/* Must be high prio to avoid getting starved */
-+		param.sched_priority = MAX_RT_PRIO-1;
-+		sched_setscheduler(p, SCHED_FIFO, &param);
-+		per_cpu(posix_timer_task,cpu) = p;
-+		break;
-+	case CPU_ONLINE:
-+		/* Strictly unneccessary, as first user will wake it. */
-+		wake_up_process(per_cpu(posix_timer_task,cpu));
-+		break;
-+#ifdef CONFIG_HOTPLUG_CPU
-+	case CPU_UP_CANCELED:
-+		/* Unbind it from offline cpu so it can run.  Fall thru. */
-+		kthread_bind(per_cpu(posix_timer_task, cpu),
-+			     cpumask_any(cpu_online_mask));
-+		kthread_stop(per_cpu(posix_timer_task,cpu));
-+		per_cpu(posix_timer_task,cpu) = NULL;
-+		break;
-+	case CPU_DEAD:
-+		kthread_stop(per_cpu(posix_timer_task,cpu));
-+		per_cpu(posix_timer_task,cpu) = NULL;
-+		break;
-+#endif
-+	}
-+	return NOTIFY_OK;
++static void posix_cpu_kthread_park(unsigned int cpu)
++{
++	this_cpu_write(posix_timer_th_active, false);
++}
++
++static void posix_cpu_kthread_unpark(unsigned int cpu)
++{
++	this_cpu_write(posix_timer_th_active, true);
 +}
 +
-+/* Register at highest priority so that task migration (migrate_all_tasks)
-+ * happens before everything else.
-+ */
-+static struct notifier_block posix_cpu_thread_notifier = {
-+	.notifier_call = posix_cpu_thread_call,
-+	.priority = 10
++static void posix_cpu_kthread_setup(unsigned int cpu)
++{
++	struct sched_param sp;
++
++	sp.sched_priority = MAX_RT_PRIO - 1;
++	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
++	posix_cpu_kthread_unpark(cpu);
++}
++
++static struct smp_hotplug_thread posix_cpu_thread = {
++	.store			= &posix_timer_task,
++	.thread_should_run	= posix_cpu_kthread_should_run,
++	.thread_fn		= posix_cpu_kthread_fn,
++	.thread_comm		= "posixcputmr/%u",
++	.setup			= posix_cpu_kthread_setup,
++	.park			= posix_cpu_kthread_park,
++	.unpark			= posix_cpu_kthread_unpark,
 +};
 +
 +static int __init posix_cpu_thread_init(void)
 +{
-+	void *hcpu = (void *)(long)smp_processor_id();
 +	/* Start one for boot CPU. */
 +	unsigned long cpu;
++	int ret;
 +
 +	/* init the per-cpu posix_timer_tasklets */
 +	for_each_possible_cpu(cpu)
 +		per_cpu(posix_timer_tasklist, cpu) = NULL;
 +
-+	posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
-+	posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
-+	register_cpu_notifier(&posix_cpu_thread_notifier);
++	ret = smpboot_register_percpu_thread(&posix_cpu_thread);
++	WARN_ON(ret);
++
 +	return 0;
 +}
 +early_initcall(posix_cpu_thread_init);
diff --git a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
index 45b1b47..830f9c8 100644
--- a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
+++ b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: powerpc: Disable highmem on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 17:08:34 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The current highmem handling on -RT is not compatible and needs fixups.
 
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
-@@ -322,7 +322,7 @@ menu "Kernel options"
+@@ -333,7 +333,7 @@ menu "Kernel options"
  
  config HIGHMEM
  	bool "High memory support"
diff --git a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
index b159179..7a0c28e 100644
--- a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
+++ b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: powerpc: Use generic rwsem on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use generic code which uses rtmutex
 
diff --git a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
index 6650349..d418fb5 100644
--- a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
+++ b/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
@@ -1,7 +1,7 @@
 From: Bogdan Purcareata <bogdan.purcareata at freescale.com>
 Date: Fri, 24 Apr 2015 15:53:13 +0000
 Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 While converting the openpic emulation code to use a raw_spinlock_t enables
 guests to run on RT, there's still a performance issue. For interrupts sent in
diff --git a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
index df5966b..cde875e 100644
--- a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 1 Nov 2012 10:14:11 +0100
 Subject: powerpc: Add support for lazy preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Implement the powerpc pieces for lazy preempt.
 
@@ -16,14 +16,14 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
-@@ -135,6 +135,7 @@ config PPC
- 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- 	select GENERIC_STRNCPY_FROM_USER
- 	select GENERIC_STRNLEN_USER
+@@ -155,6 +155,7 @@ config PPC
+ 	select HAVE_PERF_EVENTS_NMI		if PPC64
+ 	select HAVE_PERF_REGS
+ 	select HAVE_PERF_USER_STACK_DUMP
 +	select HAVE_PREEMPT_LAZY
- 	select HAVE_MOD_ARCH_SPECIFIC
- 	select MODULES_USE_ELF_RELA
- 	select CLONE_BACKWARDS
+ 	select HAVE_RCU_TABLE_FREE		if SMP
+ 	select HAVE_REGS_AND_STACK_ACCESS_API
+ 	select HAVE_SYSCALL_TRACEPOINTS
 --- a/arch/powerpc/include/asm/thread_info.h
 +++ b/arch/powerpc/include/asm/thread_info.h
 @@ -43,6 +43,8 @@ struct thread_info {
@@ -75,16 +75,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 --- a/arch/powerpc/kernel/asm-offsets.c
 +++ b/arch/powerpc/kernel/asm-offsets.c
 @@ -156,6 +156,7 @@ int main(void)
- 	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- 	DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
- 	DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-+	DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- 	DEFINE(TI_TASK, offsetof(struct thread_info, task));
- 	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ 	OFFSET(TI_FLAGS, thread_info, flags);
+ 	OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
+ 	OFFSET(TI_PREEMPT, thread_info, preempt_count);
++	OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
+ 	OFFSET(TI_TASK, thread_info, task);
+ 	OFFSET(TI_CPU, thread_info, cpu);
  
 --- a/arch/powerpc/kernel/entry_32.S
 +++ b/arch/powerpc/kernel/entry_32.S
-@@ -835,7 +835,14 @@ user_exc_return:		/* r10 contains MSR_KE
+@@ -845,7 +845,14 @@ user_exc_return:		/* r10 contains MSR_KE
  	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
  	bne	restore
  	andi.	r8,r8,_TIF_NEED_RESCHED
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	lwz	r3,_MSR(r1)
  	andi.	r0,r3,MSR_EE	/* interrupts off? */
  	beq	restore		/* don't schedule if so */
-@@ -846,11 +853,11 @@ user_exc_return:		/* r10 contains MSR_KE
+@@ -856,11 +863,11 @@ user_exc_return:		/* r10 contains MSR_KE
  	 */
  	bl	trace_hardirqs_off
  #endif
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_TRACE_IRQFLAGS
  	/* And now, to properly rebalance the above, we tell lockdep they
  	 * are being turned back on, which will happen when we return
-@@ -1171,7 +1178,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -1183,7 +1190,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
  #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
  
  do_work:			/* r10 contains MSR_KERNEL here */
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	beq	do_user_signal
  
  do_resched:			/* r10 contains MSR_KERNEL here */
-@@ -1192,7 +1199,7 @@ do_resched:			/* r10 contains MSR_KERNEL
+@@ -1204,7 +1211,7 @@ do_resched:			/* r10 contains MSR_KERNEL
  	MTMSRD(r10)		/* disable interrupts */
  	CURRENT_THREAD_INFO(r9, r1)
  	lwz	r9,TI_FLAGS(r9)
diff --git a/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch b/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
index 61930db..4d244eb 100644
--- a/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
+++ b/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
@@ -1,7 +1,7 @@
 From: Paul Gortmaker <paul.gortmaker at windriver.com>
 Date: Sun, 31 May 2015 14:44:42 -0400
 Subject: powerpc: ps3/device-init.c - adapt to completions using swait vs wait
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 To fix:
 
diff --git a/debian/patches/features/all/rt/preempt-lazy-support.patch b/debian/patches/features/all/rt/preempt-lazy-support.patch
index ef8ca2c..726de61 100644
--- a/debian/patches/features/all/rt/preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/preempt-lazy-support.patch
@@ -1,7 +1,7 @@
 Subject: sched: Add support for lazy preemption
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 26 Oct 2012 18:50:54 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 It has become an obsession to mitigate the determinism vs. throughput
 loss of RT. Looking at the mainline semantics of preemption points
@@ -53,23 +53,23 @@ performance.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- include/linux/preempt.h      |   29 ++++++++++++++++-
- include/linux/sched.h        |   37 ++++++++++++++++++++++
- include/linux/thread_info.h  |   12 ++++++-
+ include/linux/preempt.h      |   35 +++++++++++++++++-
+ include/linux/sched.h        |   38 +++++++++++++++++++
+ include/linux/thread_info.h  |   12 +++++-
  include/linux/trace_events.h |    1 
  kernel/Kconfig.preempt       |    6 +++
- kernel/sched/core.c          |   72 +++++++++++++++++++++++++++++++++++++++++--
- kernel/sched/fair.c          |   16 ++++-----
+ kernel/sched/core.c          |   83 +++++++++++++++++++++++++++++++++++++++++--
+ kernel/sched/fair.c          |   16 ++++----
  kernel/sched/features.h      |    3 +
- kernel/sched/sched.h         |    9 +++++
- kernel/trace/trace.c         |   37 +++++++++++++---------
+ kernel/sched/sched.h         |    9 ++++
+ kernel/trace/trace.c         |   37 +++++++++++--------
  kernel/trace/trace.h         |    2 +
- kernel/trace/trace_output.c  |   14 +++++++-
- 12 files changed, 209 insertions(+), 29 deletions(-)
+ kernel/trace/trace_output.c  |   14 ++++++-
+ 12 files changed, 227 insertions(+), 29 deletions(-)
 
 --- a/include/linux/preempt.h
 +++ b/include/linux/preempt.h
-@@ -153,6 +153,20 @@ extern void preempt_count_sub(int val);
+@@ -179,6 +179,20 @@ extern void preempt_count_sub(int val);
  #define preempt_count_inc() preempt_count_add(1)
  #define preempt_count_dec() preempt_count_sub(1)
  
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_PREEMPT_COUNT
  
  #define preempt_disable() \
-@@ -161,6 +175,12 @@ do { \
+@@ -187,6 +201,12 @@ do { \
  	barrier(); \
  } while (0)
  
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define sched_preempt_enable_no_resched() \
  do { \
  	barrier(); \
-@@ -198,6 +218,13 @@ do { \
+@@ -240,6 +260,13 @@ do { \
  		__preempt_schedule(); \
  } while (0)
  
@@ -117,7 +117,20 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #else /* !CONFIG_PREEMPT */
  #define preempt_enable() \
  do { \
-@@ -264,7 +291,7 @@ do { \
+@@ -247,6 +274,12 @@ do { \
+ 	preempt_count_dec(); \
+ } while (0)
+ 
++#define preempt_lazy_enable() \
++do { \
++	dec_preempt_lazy_count(); \
++	barrier(); \
++} while (0)
++
+ #define preempt_enable_notrace() \
+ do { \
+ 	barrier(); \
+@@ -313,7 +346,7 @@ do { \
  } while (0)
  #define preempt_fold_need_resched() \
  do { \
@@ -128,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -3349,6 +3349,43 @@ static inline int test_tsk_need_resched(
+@@ -1513,6 +1513,44 @@ static inline int test_tsk_need_resched(
  	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
  }
  
@@ -169,12 +182,13 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +
 +#endif
 +
- static inline int restart_syscall(void)
++
+ static inline bool __task_is_stopped_or_traced(struct task_struct *task)
  {
- 	set_tsk_thread_flag(current, TIF_SIGPENDING);
+ 	if (task->state & (__TASK_STOPPED | __TASK_TRACED))
 --- a/include/linux/thread_info.h
 +++ b/include/linux/thread_info.h
-@@ -107,7 +107,17 @@ static inline int test_ti_thread_flag(st
+@@ -74,7 +74,17 @@ static inline int test_ti_thread_flag(st
  #define test_thread_flag(flag) \
  	test_ti_thread_flag(current_thread_info(), flag)
  
@@ -195,7 +209,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static inline int arch_within_stack_frames(const void * const stack,
 --- a/include/linux/trace_events.h
 +++ b/include/linux/trace_events.h
-@@ -58,6 +58,7 @@ struct trace_entry {
+@@ -63,6 +63,7 @@ struct trace_entry {
  	int			pid;
  	unsigned short		migrate_disable;
  	unsigned short		padding;
@@ -220,11 +234,21 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	default PREEMPT_NONE
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -510,6 +510,38 @@ void resched_curr(struct rq *rq)
+@@ -517,6 +517,48 @@ void resched_curr(struct rq *rq)
  		trace_sched_wake_idle_without_ipi(cpu);
  }
  
 +#ifdef CONFIG_PREEMPT_LAZY
++
++static int tsk_is_polling(struct task_struct *p)
++{
++#ifdef TIF_POLLING_NRFLAG
++	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
++#else
++	return 0;
++#endif
++}
++
 +void resched_curr_lazy(struct rq *rq)
 +{
 +	struct task_struct *curr = rq->curr;
@@ -259,7 +283,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void resched_cpu(int cpu)
  {
  	struct rq *rq = cpu_rq(cpu);
-@@ -2531,6 +2563,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2525,6 +2567,9 @@ int sched_fork(unsigned long clone_flags
  	p->on_cpu = 0;
  #endif
  	init_task_preempt_count(p);
@@ -269,31 +293,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_SMP
  	plist_node_init(&p->pushable_tasks, MAX_PRIO);
  	RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3362,6 +3397,7 @@ void migrate_disable(void)
- 	}
- 
- 	preempt_disable();
-+	preempt_lazy_disable();
- 	pin_current_cpu();
- 	p->migrate_disable = 1;
- 	preempt_enable();
-@@ -3401,6 +3437,7 @@ void migrate_enable(void)
+@@ -3516,6 +3561,7 @@ static void __sched notrace __schedule(b
  
- 	unpin_current_cpu();
- 	preempt_enable();
-+	preempt_lazy_enable();
- }
- EXPORT_SYMBOL(migrate_enable);
- #endif
-@@ -3530,6 +3567,7 @@ static void __sched notrace __schedule(b
- 
- 	next = pick_next_task(rq, prev, cookie);
+ 	next = pick_next_task(rq, prev, &rf);
  	clear_tsk_need_resched(prev);
 +	clear_tsk_need_resched_lazy(prev);
  	clear_preempt_need_resched();
- 	rq->clock_skip_update = 0;
  
-@@ -3675,6 +3713,30 @@ static void __sched notrace preempt_sche
+ 	if (likely(prev != next)) {
+@@ -3667,6 +3713,30 @@ static void __sched notrace preempt_sche
  	} while (need_resched());
  }
  
@@ -324,7 +332,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_PREEMPT
  /*
   * this is the entry point to schedule() from in-kernel preemption
-@@ -3689,7 +3751,8 @@ asmlinkage __visible void __sched notrac
+@@ -3681,7 +3751,8 @@ asmlinkage __visible void __sched notrac
  	 */
  	if (likely(!preemptible()))
  		return;
@@ -334,7 +342,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	preempt_schedule_common();
  }
  NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3716,6 +3779,9 @@ asmlinkage __visible void __sched notrac
+@@ -3708,6 +3779,9 @@ asmlinkage __visible void __sched notrac
  	if (likely(!preemptible()))
  		return;
  
@@ -344,7 +352,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	do {
  		/*
  		 * Because the function tracer can trace preempt_count_sub()
-@@ -5523,7 +5589,9 @@ void init_idle(struct task_struct *idle,
+@@ -5537,7 +5611,9 @@ void init_idle(struct task_struct *idle,
  
  	/* Set the preempt count _outside_ the spinlocks! */
  	init_idle_preempt_count(idle, cpu);
@@ -355,9 +363,33 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * The idle tasks have their own, simple scheduling class:
  	 */
+@@ -7512,6 +7588,7 @@ void migrate_disable(void)
+ 	/* get_online_cpus(); */
+ 
+ 	preempt_disable();
++	preempt_lazy_disable();
+ 	pin_current_cpu();
+ 	p->migrate_disable = 1;
+ 
+@@ -7581,6 +7658,7 @@ void migrate_enable(void)
+ 			arg.dest_cpu = dest_cpu;
+ 
+ 			unpin_current_cpu();
++			preempt_lazy_enable();
+ 			preempt_enable();
+ 			stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ 			tlb_migrate_finish(p->mm);
+@@ -7591,6 +7669,7 @@ void migrate_enable(void)
+ 	}
+ 	unpin_current_cpu();
+ 	/* put_online_cpus(); */
++	preempt_lazy_enable();
+ 	preempt_enable();
+ }
+ EXPORT_SYMBOL(migrate_enable);
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
-@@ -3518,7 +3518,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3742,7 +3742,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
  	ideal_runtime = sched_slice(cfs_rq, curr);
  	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  	if (delta_exec > ideal_runtime) {
@@ -366,7 +398,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		/*
  		 * The current task ran long enough, ensure it doesn't get
  		 * re-elected due to buddy favours.
-@@ -3542,7 +3542,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3766,7 +3766,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
  		return;
  
  	if (delta > ideal_runtime)
@@ -375,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static void
-@@ -3684,7 +3684,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -3908,7 +3908,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
  	 * validating it and just reschedule.
  	 */
  	if (queued) {
@@ -384,7 +416,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		return;
  	}
  	/*
-@@ -3866,7 +3866,7 @@ static void __account_cfs_rq_runtime(str
+@@ -4090,7 +4090,7 @@ static void __account_cfs_rq_runtime(str
  	 * hierarchy can be throttled
  	 */
  	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -393,7 +425,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static __always_inline
-@@ -4494,7 +4494,7 @@ static void hrtick_start_fair(struct rq
+@@ -4718,7 +4718,7 @@ static void hrtick_start_fair(struct rq
  
  		if (delta < 0) {
  			if (rq->curr == p)
@@ -402,7 +434,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			return;
  		}
  		hrtick_start(rq, delta);
-@@ -5905,7 +5905,7 @@ static void check_preempt_wakeup(struct
+@@ -6231,7 +6231,7 @@ static void check_preempt_wakeup(struct
  	return;
  
  preempt:
@@ -411,7 +443,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * Only set the backward buddy when the current task is still
  	 * on the rq. This can happen when a wakeup gets interleaved
-@@ -8631,7 +8631,7 @@ static void task_fork_fair(struct task_s
+@@ -9006,7 +9006,7 @@ static void task_fork_fair(struct task_s
  		 * 'current' within the tree based on its new key value.
  		 */
  		swap(curr->vruntime, se->vruntime);
@@ -420,7 +452,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  
  	se->vruntime -= cfs_rq->min_vruntime;
-@@ -8655,7 +8655,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -9030,7 +9030,7 @@ prio_changed_fair(struct rq *rq, struct
  	 */
  	if (rq->curr == p) {
  		if (p->prio > oldprio)
@@ -443,7 +475,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
 --- a/kernel/sched/sched.h
 +++ b/kernel/sched/sched.h
-@@ -1347,6 +1347,15 @@ extern void init_sched_fair_class(void);
+@@ -1477,6 +1477,15 @@ extern void init_sched_fair_class(void);
  extern void resched_curr(struct rq *rq);
  extern void resched_cpu(int cpu);
  
@@ -461,7 +493,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
-@@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trac
+@@ -1934,6 +1934,7 @@ tracing_generic_entry_update(struct trac
  	struct task_struct *tsk = current;
  
  	entry->preempt_count		= pc & 0xff;
@@ -469,17 +501,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	entry->pid			= (tsk) ? tsk->pid : 0;
  	entry->flags =
  #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1907,7 +1908,8 @@ tracing_generic_entry_update(struct trac
+@@ -1944,7 +1945,8 @@ tracing_generic_entry_update(struct trac
  		((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
  		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ 		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
 -		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
 +		(tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
 +		(need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
  		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
  
  	entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -2894,15 +2896,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3111,15 +3113,17 @@ get_total_entries(struct trace_buffer *b
  
  static void print_lat_help_header(struct seq_file *m)
  {
@@ -506,7 +538,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2928,11 +2932,14 @@ static void print_func_help_header_irq(s
+@@ -3145,11 +3149,14 @@ static void print_func_help_header_irq(s
  	print_event_info(buf, m);
  	seq_puts(m, "#                              _-----=> irqs-off\n"
  		    "#                             / _----=> need-resched\n"
@@ -528,7 +560,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void
 --- a/kernel/trace/trace.h
 +++ b/kernel/trace/trace.h
-@@ -124,6 +124,7 @@ struct kretprobe_trace_entry_head {
+@@ -126,6 +126,7 @@ struct kretprobe_trace_entry_head {
   *  NEED_RESCHED	- reschedule is requested
   *  HARDIRQ		- inside an interrupt handler
   *  SOFTIRQ		- inside a softirq handler
@@ -536,7 +568,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   */
  enum trace_flag_type {
  	TRACE_FLAG_IRQS_OFF		= 0x01,
-@@ -133,6 +134,7 @@ enum trace_flag_type {
+@@ -135,6 +136,7 @@ enum trace_flag_type {
  	TRACE_FLAG_SOFTIRQ		= 0x10,
  	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
  	TRACE_FLAG_NMI			= 0x40,
@@ -546,7 +578,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define TRACE_BUF_SIZE		1024
 --- a/kernel/trace/trace_output.c
 +++ b/kernel/trace/trace_output.c
-@@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -438,6 +438,7 @@ int trace_print_lat_fmt(struct trace_seq
  {
  	char hardsoft_irq;
  	char need_resched;
@@ -554,7 +586,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	char irqs_off;
  	int hardirq;
  	int softirq;
-@@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -468,6 +469,9 @@ int trace_print_lat_fmt(struct trace_seq
  		break;
  	}
  
@@ -564,7 +596,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	hardsoft_irq =
  		(nmi && hardirq)     ? 'Z' :
  		nmi                  ? 'z' :
-@@ -424,14 +428,20 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -476,14 +480,20 @@ int trace_print_lat_fmt(struct trace_seq
  		softirq              ? 's' :
  		                       '.' ;
  
diff --git a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
index aa00aa6..530f6d8 100644
--- a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
+++ b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 24 Jul 2009 12:38:56 +0200
 Subject: preempt: Provide preempt_*_(no)rt variants
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 RT needs a few preempt_disable/enable points which are not necessary
 otherwise. Implement variants to avoid #ifdeffery.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/preempt.h
 +++ b/include/linux/preempt.h
-@@ -154,7 +154,11 @@ do { \
+@@ -180,7 +180,11 @@ do { \
  	preempt_count_dec(); \
  } while (0)
  
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #define preemptible()	(preempt_count() == 0 && !irqs_disabled())
  
-@@ -248,6 +252,18 @@ do { \
+@@ -297,6 +301,18 @@ do { \
  		set_preempt_need_resched(); \
  } while (0)
  
diff --git a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
index 56c0689..ac8af46 100644
--- a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+++ b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -1,7 +1,7 @@
 Subject: printk: Add "force_early_printk" boot param to help with debugging
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 02 Sep 2011 14:41:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Gives me an option to screw printk and actually see what the machine
 says.
@@ -16,7 +16,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -381,6 +381,13 @@ asmlinkage void early_printk(const char
+@@ -431,6 +431,13 @@ asmlinkage void early_printk(const char
   */
  static bool __read_mostly printk_killswitch;
  
diff --git a/debian/patches/features/all/rt/printk-kill.patch b/debian/patches/features/all/rt/printk-kill.patch
index d52b2af..b26e564 100644
--- a/debian/patches/features/all/rt/printk-kill.patch
+++ b/debian/patches/features/all/rt/printk-kill.patch
@@ -1,7 +1,7 @@
 Subject: printk: Add a printk kill switch
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 22 Jul 2011 17:58:40 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that
 it does not dead-lock with the early printk code.
@@ -10,12 +10,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  include/linux/printk.h |    2 +
  kernel/printk/printk.c |   79 ++++++++++++++++++++++++++++++++++++-------------
- kernel/watchdog.c      |   10 ++++++
- 3 files changed, 71 insertions(+), 20 deletions(-)
+ kernel/watchdog_hld.c  |    9 +++++
+ 3 files changed, 70 insertions(+), 20 deletions(-)
 
 --- a/include/linux/printk.h
 +++ b/include/linux/printk.h
-@@ -126,9 +126,11 @@ struct va_format {
+@@ -141,9 +141,11 @@ struct va_format {
  #ifdef CONFIG_EARLY_PRINTK
  extern asmlinkage __printf(1, 2)
  void early_printk(const char *fmt, ...);
@@ -29,9 +29,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_PRINTK_NMI
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -351,6 +351,58 @@ struct printk_log {
-  */
- DEFINE_RAW_SPINLOCK(logbuf_lock);
+@@ -401,6 +401,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock);
+ 		printk_safe_exit_irqrestore(flags);	\
+ 	} while (0)
  
 +#ifdef CONFIG_EARLY_PRINTK
 +struct console *early_console;
@@ -88,9 +88,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_PRINTK
  DECLARE_WAIT_QUEUE_HEAD(log_wait);
  /* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1781,6 +1833,13 @@ asmlinkage int vprintk_emit(int facility
- 	/* cpu currently holding logbuf_lock in this function */
- 	static unsigned int logbuf_cpu = UINT_MAX;
+@@ -1705,6 +1757,13 @@ asmlinkage int vprintk_emit(int facility
+ 	int printed_len = 0;
+ 	bool in_sched = false;
  
 +	/*
 +	 * Fall back to early_printk if a debugging subsystem has
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (level == LOGLEVEL_SCHED) {
  		level = LOGLEVEL_DEFAULT;
  		in_sched = true;
-@@ -2014,26 +2073,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
+@@ -1876,26 +1935,6 @@ static bool suppress_message_printing(in
  
  #endif /* CONFIG_PRINTK */
  
@@ -129,18 +129,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static int __add_preferred_console(char *name, int idx, char *options,
  				   char *brl_options)
  {
---- a/kernel/watchdog.c
-+++ b/kernel/watchdog.c
-@@ -315,6 +315,8 @@ static int is_softlockup(unsigned long t
- 
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
- 
+--- a/kernel/watchdog_hld.c
++++ b/kernel/watchdog_hld.c
+@@ -21,6 +21,7 @@
+ static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+ static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
+ static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 +static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
-+
- static struct perf_event_attr wd_hw_attr = {
- 	.type		= PERF_TYPE_HARDWARE,
- 	.config		= PERF_COUNT_HW_CPU_CYCLES,
-@@ -348,6 +350,13 @@ static void watchdog_overflow_callback(s
+ 
+ /* boot commands */
+ /*
+@@ -106,6 +107,13 @@ static void watchdog_overflow_callback(s
  		/* only print hardlockups once */
  		if (__this_cpu_read(hard_watchdog_warn) == true)
  			return;
@@ -154,7 +153,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
  		print_modules();
-@@ -365,6 +374,7 @@ static void watchdog_overflow_callback(s
+@@ -123,6 +131,7 @@ static void watchdog_overflow_callback(s
  				!test_and_set_bit(0, &hardlockup_allcpu_dumped))
  			trigger_allbutself_cpu_backtrace();
  
diff --git a/debian/patches/features/all/rt/printk-rt-aware.patch b/debian/patches/features/all/rt/printk-rt-aware.patch
index 2649909..c2282bd 100644
--- a/debian/patches/features/all/rt/printk-rt-aware.patch
+++ b/debian/patches/features/all/rt/printk-rt-aware.patch
@@ -1,19 +1,19 @@
 Subject: printk: Make rt aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 19 Sep 2012 14:50:37 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Drop the lock before calling the console driver and do not disable
 interrupts while printing to a serial console.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- kernel/printk/printk.c |   25 +++++++++++++++++++++++--
- 1 file changed, 23 insertions(+), 2 deletions(-)
+ kernel/printk/printk.c |   19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -1628,6 +1628,7 @@ static void call_console_drivers(int lev
+@@ -1630,6 +1630,7 @@ static void call_console_drivers(const c
  	if (!console_drivers)
  		return;
  
@@ -21,21 +21,20 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	for_each_console(con) {
  		if (exclusive_console && con != exclusive_console)
  			continue;
-@@ -1643,6 +1644,7 @@ static void call_console_drivers(int lev
+@@ -1645,6 +1646,7 @@ static void call_console_drivers(const c
  		else
  			con->write(con, text, len);
  	}
 +	migrate_enable();
  }
  
- /*
-@@ -1951,13 +1953,23 @@ asmlinkage int vprintk_emit(int facility
+ int printk_delay_msec __read_mostly;
+@@ -1827,12 +1829,22 @@ asmlinkage int vprintk_emit(int facility
  
  	/* If called from the scheduler, we can not call up(). */
  	if (!in_sched) {
 +		int may_trylock = 1;
 +
- 		lockdep_off();
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +		/*
 +		 * we can't take a sleeping lock with IRQs or preeption disabled
@@ -52,41 +51,21 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 -		if (console_trylock())
 +		if (may_trylock && console_trylock())
  			console_unlock();
- 		lockdep_on();
  	}
-@@ -2349,11 +2361,16 @@ static void console_cont_flush(char *tex
- 		goto out;
  
- 	len = cont_print_text(text, size);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+	call_console_drivers(cont.level, NULL, 0, text, len);
-+#else
- 	raw_spin_unlock(&logbuf_lock);
- 	stop_critical_timings();
- 	call_console_drivers(cont.level, NULL, 0, text, len);
- 	start_critical_timings();
- 	local_irq_restore(flags);
-+#endif
- 	return;
- out:
- 	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2477,13 +2494,17 @@ void console_unlock(void)
- 		console_idx = log_next(console_idx);
+@@ -2283,10 +2295,15 @@ void console_unlock(void)
  		console_seq++;
- 		console_prev = msg->flags;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+		call_console_drivers(level, ext_text, ext_len, text, len);
-+#else
  		raw_spin_unlock(&logbuf_lock);
  
++#ifdef CONFIG_PREEMPT_RT_FULL
++		printk_safe_exit_irqrestore(flags);
++		call_console_drivers(ext_text, ext_len, text, len);
++#else
  		stop_critical_timings();	/* don't trace print latency */
- 		call_console_drivers(level, ext_text, ext_len, text, len);
+ 		call_console_drivers(ext_text, ext_len, text, len);
  		start_critical_timings();
- 		local_irq_restore(flags);
--
+ 		printk_safe_exit_irqrestore(flags);
 +#endif
+ 
  		if (do_cond_resched)
  			cond_resched();
- 	}
diff --git a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 04fb098..2308e2c 100644
--- a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Aug 2013 18:21:04 +0200
 Subject: ptrace: fix ptrace vs tasklist_lock race
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 As explained by Alexander Fyodorov <halcy at yandex.ru>:
 
@@ -24,26 +24,28 @@ taken in case the caller is interrupted between looking into ->state and
 
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- include/linux/sched.h |   48 +++++++++++++++++++++++++++++++++++++++++++++---
+ include/linux/sched.h |   49 +++++++++++++++++++++++++++++++++++++++++++++----
  kernel/ptrace.c       |    9 ++++++++-
  kernel/sched/core.c   |   17 +++++++++++++++--
- 3 files changed, 68 insertions(+), 6 deletions(-)
+ 3 files changed, 68 insertions(+), 7 deletions(-)
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -243,10 +243,7 @@ extern char ___assert_task_state[1 - 2*!
- 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
- 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
+@@ -100,12 +100,8 @@ struct task_group;
+ 					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+ 					 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
  
--#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
- #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
--#define task_is_stopped_or_traced(task)	\
--			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
- #define task_contributes_to_load(task)	\
- 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- 				 (task->flags & PF_FROZEN) == 0 && \
-@@ -3366,6 +3363,51 @@ static inline int signal_pending_state(l
- 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
+-#define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)
+-
+ #define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)
+ 
+-#define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+-
+ #define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
+ 					 (task->flags & PF_FROZEN) == 0 && \
+ 					 (task->state & TASK_NOLOAD) == 0)
+@@ -1500,6 +1496,51 @@ static inline int test_tsk_need_resched(
+ 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
  }
  
 +static inline bool __task_is_stopped_or_traced(struct task_struct *task)
@@ -96,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
   * explicit rescheduling in places that are safe. The return
 --- a/kernel/ptrace.c
 +++ b/kernel/ptrace.c
-@@ -166,7 +166,14 @@ static bool ptrace_freeze_traced(struct
+@@ -175,7 +175,14 @@ static bool ptrace_freeze_traced(struct
  
  	spin_lock_irq(&task->sighand->siglock);
  	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
@@ -114,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	spin_unlock_irq(&task->sighand->siglock);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1384,6 +1384,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1363,6 +1363,18 @@ int migrate_swap(struct task_struct *cur
  	return ret;
  }
  
@@ -133,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * wait_task_inactive - wait for a thread to unschedule.
   *
-@@ -1428,7 +1440,7 @@ unsigned long wait_task_inactive(struct
+@@ -1407,7 +1419,7 @@ unsigned long wait_task_inactive(struct
  		 * is actually now running somewhere else!
  		 */
  		while (task_running(rq, p)) {
@@ -142,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  				return 0;
  			cpu_relax();
  		}
-@@ -1443,7 +1455,8 @@ unsigned long wait_task_inactive(struct
+@@ -1422,7 +1434,8 @@ unsigned long wait_task_inactive(struct
  		running = task_running(rq, p);
  		queued = task_on_rq_queued(p);
  		ncsw = 0;
diff --git a/debian/patches/features/all/rt/radix-tree-use-local-locks.patch b/debian/patches/features/all/rt/radix-tree-use-local-locks.patch
index ccb8b44..b9cd1ed 100644
--- a/debian/patches/features/all/rt/radix-tree-use-local-locks.patch
+++ b/debian/patches/features/all/rt/radix-tree-use-local-locks.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 25 Jan 2017 16:34:27 +0100
 Subject: [PATCH] radix-tree: use local locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The preload functionality uses per-CPU variables and preempt-disable to
 ensure that it does not switch CPUs during its usage. This patch adds
@@ -12,53 +12,68 @@ Cc: stable-rt at vger.kernel.org
 Reported-and-debugged-by: Mike Galbraith <efault at gmx.de>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
+ include/linux/idr.h        |    5 +----
  include/linux/radix-tree.h |    7 ++-----
- lib/radix-tree.c           |   22 +++++++++++++++-------
- 2 files changed, 17 insertions(+), 12 deletions(-)
+ lib/radix-tree.c           |   30 ++++++++++++++++++++++--------
+ 3 files changed, 25 insertions(+), 17 deletions(-)
 
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -111,10 +111,7 @@ static inline bool idr_is_empty(const st
+  * Each idr_preload() should be matched with an invocation of this
+  * function.  See idr_preload() for details.
+  */
+-static inline void idr_preload_end(void)
+-{
+-	preempt_enable();
+-}
++void idr_preload_end(void);
+ 
+ /**
+  * idr_find - return pointer for given id
 --- a/include/linux/radix-tree.h
 +++ b/include/linux/radix-tree.h
-@@ -292,6 +292,8 @@ unsigned int radix_tree_gang_lookup_slot
+@@ -328,6 +328,8 @@ unsigned int radix_tree_gang_lookup_slot
  int radix_tree_preload(gfp_t gfp_mask);
  int radix_tree_maybe_preload(gfp_t gfp_mask);
  int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
 +void radix_tree_preload_end(void);
 +
  void radix_tree_init(void);
- void *radix_tree_tag_set(struct radix_tree_root *root,
+ void *radix_tree_tag_set(struct radix_tree_root *,
  			unsigned long index, unsigned int tag);
-@@ -314,11 +316,6 @@ unsigned long radix_tree_range_tag_if_ta
- int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
- unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
+@@ -347,11 +349,6 @@ unsigned int radix_tree_gang_lookup_tag_
+ 		unsigned int max_items, unsigned int tag);
+ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
  
 -static inline void radix_tree_preload_end(void)
 -{
 -	preempt_enable();
 -}
 -
- /**
-  * struct radix_tree_iter - radix tree iterator state
-  *
+ int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
+ int radix_tree_split(struct radix_tree_root *, unsigned long index,
+ 			unsigned new_order);
 --- a/lib/radix-tree.c
 +++ b/lib/radix-tree.c
-@@ -36,7 +36,7 @@
- #include <linux/bitops.h>
+@@ -37,7 +37,7 @@
  #include <linux/rcupdate.h>
- #include <linux/preempt.h>		/* in_interrupt() */
+ #include <linux/slab.h>
+ #include <linux/string.h>
 -
 +#include <linux/locallock.h>
  
  /* Number of nodes in fully populated tree of given height */
  static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
-@@ -68,6 +68,7 @@ struct radix_tree_preload {
+@@ -86,6 +86,7 @@ struct radix_tree_preload {
  	struct radix_tree_node *nodes;
  };
  static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
  
- static inline void *node_to_entry(void *ptr)
+ static inline struct radix_tree_node *entry_to_node(void *ptr)
  {
-@@ -290,13 +291,14 @@ radix_tree_node_alloc(struct radix_tree_
+@@ -404,12 +405,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, st
  		 * succeed in getting a node here (and never reach
  		 * kmem_cache_alloc)
  		 */
@@ -66,15 +81,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +		rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
  		if (rtp->nr) {
  			ret = rtp->nodes;
- 			rtp->nodes = ret->private_data;
- 			ret->private_data = NULL;
+ 			rtp->nodes = ret->parent;
  			rtp->nr--;
  		}
 +		put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
  		/*
  		 * Update the allocation stack trace as this is more useful
  		 * for debugging.
-@@ -357,14 +359,14 @@ static int __radix_tree_preload(gfp_t gf
+@@ -475,14 +477,14 @@ static int __radix_tree_preload(gfp_t gf
  	 */
  	gfp_mask &= ~__GFP_ACCOUNT;
  
@@ -91,8 +105,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +		local_lock(radix_tree_preloads_lock);
  		rtp = this_cpu_ptr(&radix_tree_preloads);
  		if (rtp->nr < nr) {
- 			node->private_data = rtp->nodes;
-@@ -406,7 +408,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
+ 			node->parent = rtp->nodes;
+@@ -524,7 +526,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
  	if (gfpflags_allow_blocking(gfp_mask))
  		return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
  	/* Preloading doesn't help anything with this gfp mask, skip it */
@@ -101,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return 0;
  }
  EXPORT_SYMBOL(radix_tree_maybe_preload);
-@@ -422,7 +424,7 @@ int radix_tree_maybe_preload_order(gfp_t
+@@ -562,7 +564,7 @@ int radix_tree_maybe_preload_order(gfp_t
  
  	/* Preloading doesn't help anything with this gfp mask, skip it */
  	if (!gfpflags_allow_blocking(gfp_mask)) {
@@ -110,7 +124,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		return 0;
  	}
  
-@@ -456,6 +458,12 @@ int radix_tree_maybe_preload_order(gfp_t
+@@ -596,6 +598,12 @@ int radix_tree_maybe_preload_order(gfp_t
  	return __radix_tree_preload(gfp_mask, nr_nodes);
  }
  
@@ -120,6 +134,28 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +}
 +EXPORT_SYMBOL(radix_tree_preload_end);
 +
- /*
-  * The maximum index which can be stored in a radix tree
-  */
+ static unsigned radix_tree_load_root(const struct radix_tree_root *root,
+ 		struct radix_tree_node **nodep, unsigned long *maxindex)
+ {
+@@ -2107,6 +2115,12 @@ void idr_preload(gfp_t gfp_mask)
+ }
+ EXPORT_SYMBOL(idr_preload);
+ 
++void idr_preload_end(void)
++{
++	local_unlock(radix_tree_preloads_lock);
++}
++EXPORT_SYMBOL(idr_preload_end);
++
+ /**
+  * ida_pre_get - reserve resources for ida allocation
+  * @ida: ida handle
+@@ -2123,7 +2137,7 @@ int ida_pre_get(struct ida *ida, gfp_t g
+ 	 * ida_get_new() can return -EAGAIN, prompting the caller
+ 	 * to return to the ida_pre_get() step.
+ 	 */
+-	preempt_enable();
++	local_unlock(radix_tree_preloads_lock);
+ 
+ 	if (!this_cpu_read(ida_bitmap)) {
+ 		struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
diff --git a/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch b/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch
index 8b551a2..4c3e005 100644
--- a/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch
+++ b/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch
@@ -2,7 +2,7 @@ From 81e7296af883a58c3e5609842e129de01442198d Mon Sep 17 00:00:00 2001
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 12 May 2017 15:46:17 +0200
 Subject: [PATCH] random: avoid preempt_disable()ed section
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 extract_crng() will use sleeping locks while in a preempt_disable()
 section due to get_cpu_var().
@@ -11,8 +11,8 @@ Work around it with local_locks.
 Cc: stable-rt at vger.kernel.org # where it applies to
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- drivers/char/random.c |   12 ++++++++----
- 1 file changed, 8 insertions(+), 4 deletions(-)
+ drivers/char/random.c |   11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
 
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
@@ -24,52 +24,49 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #include <crypto/chacha20.h>
  
  #include <asm/processor.h>
-@@ -2052,6 +2053,7 @@ struct batched_entropy {
+@@ -2022,6 +2023,7 @@ struct batched_entropy {
   * goal of being quite fast and not depleting entropy.
   */
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
-+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_long_lock);
- unsigned long get_random_long(void)
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
++static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock);
+ u64 get_random_u64(void)
  {
- 	unsigned long ret;
-@@ -2060,13 +2062,13 @@ unsigned long get_random_long(void)
- 	if (arch_get_random_long(&ret))
- 		return ret;
+ 	u64 ret;
+@@ -2036,18 +2038,19 @@ u64 get_random_u64(void)
+ 	    return ret;
+ #endif
  
--	batch = &get_cpu_var(batched_entropy_long);
-+	batch = &get_locked_var(batched_entropy_long_lock, batched_entropy_long);
- 	if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
- 		extract_crng((u8 *)batch->entropy_long);
+-	batch = &get_cpu_var(batched_entropy_u64);
++	batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
+ 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ 		extract_crng((u8 *)batch->entropy_u64);
  		batch->position = 0;
  	}
- 	ret = batch->entropy_long[batch->position++];
--	put_cpu_var(batched_entropy_long);
-+	put_locked_var(batched_entropy_long_lock, batched_entropy_long);
+ 	ret = batch->entropy_u64[batch->position++];
+-	put_cpu_var(batched_entropy_u64);
++	put_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
  	return ret;
  }
- EXPORT_SYMBOL(get_random_long);
-@@ -2078,6 +2080,8 @@ unsigned int get_random_int(void)
- }
- #else
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
-+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_int_lock);
-+
- unsigned int get_random_int(void)
+ EXPORT_SYMBOL(get_random_u64);
+ 
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
++static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock);
+ u32 get_random_u32(void)
  {
- 	unsigned int ret;
-@@ -2086,13 +2090,13 @@ unsigned int get_random_int(void)
+ 	u32 ret;
+@@ -2056,13 +2059,13 @@ u32 get_random_u32(void)
  	if (arch_get_random_int(&ret))
  		return ret;
  
--	batch = &get_cpu_var(batched_entropy_int);
-+	batch = &get_locked_var(batched_entropy_int_lock, batched_entropy_int);
- 	if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
- 		extract_crng((u8 *)batch->entropy_int);
+-	batch = &get_cpu_var(batched_entropy_u32);
++	batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
+ 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ 		extract_crng((u8 *)batch->entropy_u32);
  		batch->position = 0;
  	}
- 	ret = batch->entropy_int[batch->position++];
--	put_cpu_var(batched_entropy_int);
-+	put_locked_var(batched_entropy_int_lock, batched_entropy_int);
+ 	ret = batch->entropy_u32[batch->position++];
+-	put_cpu_var(batched_entropy_u32);
++	put_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
  	return ret;
  }
- #endif
+ EXPORT_SYMBOL(get_random_u32);
diff --git a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
index 2650c7d..0cd711f 100644
--- a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
+++ b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: random: Make it work on rt
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 21 Aug 2012 20:38:50 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Delegate the random insertion to the forced threaded interrupt
 handler. Store the return IP of the hard interrupt handler in the irq
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
-@@ -1120,28 +1120,27 @@ static __u32 get_reg(struct fast_pool *f
+@@ -1102,28 +1102,27 @@ static __u32 get_reg(struct fast_pool *f
  	return *(ptr + f->reg_idx++);
  }
  
@@ -57,17 +57,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	add_interrupt_bench(cycles);
 --- a/drivers/hv/vmbus_drv.c
 +++ b/drivers/hv/vmbus_drv.c
-@@ -761,6 +761,8 @@ static void vmbus_isr(void)
- 	void *page_addr;
+@@ -970,6 +970,8 @@ static void vmbus_isr(void)
+ 	void *page_addr = hv_cpu->synic_event_page;
  	struct hv_message *msg;
  	union hv_synic_event_flags *event;
 +	struct pt_regs *regs = get_irq_regs();
 +	u64 ip = regs ? instruction_pointer(regs) : 0;
  	bool handled = false;
  
- 	page_addr = hv_context.synic_event_page[cpu];
-@@ -808,7 +810,7 @@ static void vmbus_isr(void)
- 			tasklet_schedule(hv_context.msg_dpc[cpu]);
+ 	if (unlikely(page_addr == NULL))
+@@ -1013,7 +1015,7 @@ static void vmbus_isr(void)
+ 			tasklet_schedule(&hv_cpu->msg_dpc);
  	}
  
 -	add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		note_interrupt(desc, retval);
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -1023,6 +1023,12 @@ static int irq_thread(void *data)
+@@ -1025,6 +1025,12 @@ static int irq_thread(void *data)
  		if (action_ret == IRQ_WAKE_THREAD)
  			irq_wake_secondary(desc, action);
  
diff --git a/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch b/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
index 40523a0..2b6dde9 100644
--- a/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
+++ b/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 14 Sep 2016 11:52:17 +0200
 Subject: rbtree: include rcu.h because we use it
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Since commit c1adf20052d8 ("Introduce rb_replace_node_rcu()")
 rbtree_augmented.h uses RCU related data structures but does not include
@@ -11,7 +11,8 @@ otherwise.
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
  include/linux/rbtree_augmented.h |    1 +
- 1 file changed, 1 insertion(+)
+ include/linux/rbtree_latch.h     |    1 +
+ 2 files changed, 2 insertions(+)
 
 --- a/include/linux/rbtree_augmented.h
 +++ b/include/linux/rbtree_augmented.h
@@ -23,3 +24,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  /*
   * Please note - only struct rb_augment_callbacks and the prototypes for
+--- a/include/linux/rbtree_latch.h
++++ b/include/linux/rbtree_latch.h
+@@ -34,6 +34,7 @@
+ 
+ #include <linux/rbtree.h>
+ #include <linux/seqlock.h>
++#include <linux/rcupdate.h>
+ 
+ struct latch_tree_node {
+ 	struct rb_node node[2];
diff --git a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
index dd0a389..f26eac8 100644
--- a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -1,7 +1,7 @@
 From: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
 Date: Mon, 4 Nov 2013 13:21:10 -0800
 Subject: rcu: Eliminate softirq processing from rcutree
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Running RCU out of softirq is a problem for some workloads that would
 like to manage RCU core processing independently of other softirq work,
@@ -19,12 +19,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
  kernel/rcu/tree.c        |  110 ++++++++++++++++++++++++++++++---
  kernel/rcu/tree.h        |    5 -
- kernel/rcu/tree_plugin.h |  153 ++++++-----------------------------------------
- 3 files changed, 122 insertions(+), 146 deletions(-)
+ kernel/rcu/tree_plugin.h |  155 ++++++-----------------------------------------
+ 3 files changed, 122 insertions(+), 148 deletions(-)
 
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -55,6 +55,11 @@
+@@ -57,6 +57,11 @@
  #include <linux/random.h>
  #include <linux/trace_events.h>
  #include <linux/suspend.h>
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #include "tree.h"
  #include "rcu.h"
-@@ -3044,18 +3049,17 @@ static void
+@@ -3143,18 +3148,17 @@ static void
  /*
   * Do RCU core processing for the current CPU.
   */
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Schedule RCU callback invocation.  If the specified type of RCU
   * does not support RCU priority boosting, just do a direct call,
-@@ -3067,18 +3071,105 @@ static void invoke_rcu_callbacks(struct
+@@ -3166,18 +3170,105 @@ static void invoke_rcu_callbacks(struct
  {
  	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
  		return;
@@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  /*
   * Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4268,7 +4359,6 @@ void __init rcu_init(void)
+@@ -4357,7 +4448,6 @@ void __init rcu_init(void)
  	if (dump_tree)
  		rcu_dump_rcu_node_tree(&rcu_sched_state);
  	__rcu_init_preempt();
@@ -179,9 +179,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	 * We don't need protection against CPU-hotplug here because
 --- a/kernel/rcu/tree.h
 +++ b/kernel/rcu/tree.h
-@@ -596,12 +596,10 @@ extern struct rcu_state rcu_bh_state;
- extern struct rcu_state rcu_preempt_state;
- #endif /* #ifdef CONFIG_PREEMPT_RCU */
+@@ -599,12 +599,10 @@ extern struct rcu_state rcu_preempt_stat
+ 
+ int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
  
 -#ifdef CONFIG_RCU_BOOST
  DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
@@ -192,7 +192,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #ifndef RCU_TREE_NONCORE
  
-@@ -621,10 +619,9 @@ void call_rcu(struct rcu_head *head, rcu
+@@ -624,10 +622,9 @@ void call_rcu(struct rcu_head *head, rcu
  static void __init __rcu_init_preempt(void);
  static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
  static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -206,15 +206,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #endif /* #ifdef CONFIG_RCU_BOOST */
 --- a/kernel/rcu/tree_plugin.h
 +++ b/kernel/rcu/tree_plugin.h
-@@ -24,26 +24,10 @@
+@@ -24,28 +24,10 @@
   *	   Paul E. McKenney <paulmck at linux.vnet.ibm.com>
   */
  
 -#include <linux/delay.h>
 -#include <linux/gfp.h>
 -#include <linux/oom.h>
+-#include <linux/sched/debug.h>
 -#include <linux/smpboot.h>
 -#include <linux/jiffies.h>
+-#include <uapi/linux/sched/types.h>
 -#include "../time/tick-internal.h"
 -
  #ifdef CONFIG_RCU_BOOST
@@ -233,7 +235,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #else /* #ifdef CONFIG_RCU_BOOST */
  
  /*
-@@ -56,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
+@@ -58,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
  
  #endif /* #else #ifdef CONFIG_RCU_BOOST */
  
@@ -248,7 +250,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #ifdef CONFIG_RCU_NOCB_CPU
  static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
  static bool have_rcu_nocb_mask;	    /* Was rcu_nocb_mask allocated? */
-@@ -633,15 +625,6 @@ static void rcu_preempt_check_callbacks(
+@@ -635,15 +625,6 @@ static void rcu_preempt_check_callbacks(
  		t->rcu_read_unlock_special.b.need_qs = true;
  }
  
@@ -264,7 +266,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Queue a preemptible-RCU callback for invocation after a grace period.
   */
-@@ -830,6 +813,19 @@ void exit_rcu(void)
+@@ -832,6 +813,19 @@ void exit_rcu(void)
  
  #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
  
@@ -284,7 +286,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #ifdef CONFIG_RCU_BOOST
  
  #include "../locking/rtmutex_common.h"
-@@ -861,16 +857,6 @@ static void rcu_initiate_boost_trace(str
+@@ -863,16 +857,6 @@ static void rcu_initiate_boost_trace(str
  
  #endif /* #else #ifdef CONFIG_RCU_TRACE */
  
@@ -301,7 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Carry out RCU priority boosting on the task indicated by ->exp_tasks
   * or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1014,23 +1000,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1016,23 +1000,6 @@ static void rcu_initiate_boost(struct rc
  }
  
  /*
@@ -325,7 +327,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
   * Is the current CPU running the RCU-callbacks kthread?
   * Caller must have preemption disabled.
   */
-@@ -1084,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(s
+@@ -1086,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(s
  	return 0;
  }
  
@@ -393,7 +395,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * Set the per-rcu_node kthread's affinity to cover all CPUs that are
   * served by the rcu_node in question.  The CPU hotplug lock is still
-@@ -1175,26 +1083,12 @@ static void rcu_boost_kthread_setaffinit
+@@ -1177,26 +1083,12 @@ static void rcu_boost_kthread_setaffinit
  	free_cpumask_var(cm);
  }
  
@@ -420,7 +422,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	rcu_for_each_leaf_node(rcu_state_p, rnp)
  		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
  }
-@@ -1217,11 +1111,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1219,11 +1111,6 @@ static void rcu_initiate_boost(struct rc
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  }
  
diff --git a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
index b5a6b4c..321f0a8 100644
--- a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Disable RCU_FAST_NO_HZ on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 28 Oct 2012 13:26:09 +0000
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 This uses a timer_list timer from the irq disabled guts of the idle
 code. Disable it for now to prevent wreckage.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -623,7 +623,7 @@ config RCU_FANOUT_LEAF
+@@ -622,7 +622,7 @@ config RCU_FANOUT_LEAF
  
  config RCU_FAST_NO_HZ
  	bool "Accelerate last non-dyntick-idle CPU's grace periods"
diff --git a/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
index 627bbb1..782d60f 100644
--- a/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
+++ b/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -1,7 +1,7 @@
 From: Julia Cartwright <julia at ni.com>
 Date: Wed, 12 Oct 2016 11:21:14 -0500
 Subject: [PATCH] rcu: enable rcu_normal_after_boot by default for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The forcing of an expedited grace period is an expensive and very
 RT-application unfriendly operation, as it forcibly preempts all running
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/rcu/update.c
 +++ b/kernel/rcu/update.c
-@@ -62,7 +62,7 @@
+@@ -64,7 +64,7 @@
  #ifndef CONFIG_TINY_RCU
  module_param(rcu_expedited, int, 0);
  module_param(rcu_normal, int, 0);
diff --git a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
index cfb094b..8affdad 100644
--- a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 21 Mar 2014 20:19:05 +0100
 Subject: rcu: make RCU_BOOST default on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Since it is no longer invoked from the softirq people run into OOM more
 often if the priority of the RCU thread is too low. Making boosting
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	help
  	  This option needs to be enabled if you wish to make
  	  expert-level adjustments to RCU configuration.  By default,
-@@ -650,7 +650,7 @@ config TREE_RCU_TRACE
+@@ -649,7 +649,7 @@ config TREE_RCU_TRACE
  config RCU_BOOST
  	bool "Enable RCU priority boosting"
  	depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
diff --git a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index 8f5da62..ac2a0af 100644
--- a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Merge RCU-bh into RCU-preempt
 Date: Wed, 5 Oct 2011 11:59:38 -0700
 From: Thomas Gleixner <tglx at linutronix.de>
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The Linux kernel has long RCU-bh read-side critical sections that
 intolerably increase scheduling latency under mainline's RCU-bh rules,
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/rcupdate.h
 +++ b/include/linux/rcupdate.h
-@@ -179,6 +179,9 @@ void call_rcu(struct rcu_head *head,
+@@ -178,6 +178,9 @@ void call_rcu(struct rcu_head *head,
  
  #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
  
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
   * @head: structure to be used for queueing the RCU updates.
-@@ -202,6 +205,7 @@ void call_rcu(struct rcu_head *head,
+@@ -201,6 +204,7 @@ void call_rcu(struct rcu_head *head,
   */
  void call_rcu_bh(struct rcu_head *head,
  		 rcu_callback_t func);
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
-@@ -339,7 +343,11 @@ static inline int rcu_preempt_depth(void
+@@ -299,7 +303,11 @@ static inline int rcu_preempt_depth(void
  /* Internal to kernel */
  void rcu_init(void);
  void rcu_sched_qs(void);
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void rcu_check_callbacks(int user);
  void rcu_report_dead(unsigned int cpu);
  void rcu_cpu_starting(unsigned int cpu);
-@@ -513,7 +521,14 @@ extern struct lockdep_map rcu_callback_m
+@@ -473,7 +481,14 @@ extern struct lockdep_map rcu_callback_m
  int debug_lockdep_rcu_enabled(void);
  
  int rcu_read_lock_held(void);
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -911,10 +926,14 @@ static inline void rcu_read_unlock(void)
+@@ -871,10 +886,14 @@ static inline void rcu_read_unlock(void)
  static inline void rcu_read_lock_bh(void)
  {
  	local_bh_disable();
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -924,10 +943,14 @@ static inline void rcu_read_lock_bh(void
+@@ -884,10 +903,14 @@ static inline void rcu_read_lock_bh(void
   */
  static inline void rcu_read_unlock_bh(void)
  {
@@ -173,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* RCUtree hotplug events */
 --- a/kernel/rcu/rcutorture.c
 +++ b/kernel/rcu/rcutorture.c
-@@ -404,6 +404,7 @@ static struct rcu_torture_ops rcu_ops =
+@@ -414,6 +414,7 @@ static struct rcu_torture_ops rcu_ops =
  	.name		= "rcu"
  };
  
@@ -181,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Definitions for rcu_bh torture testing.
   */
-@@ -443,6 +444,12 @@ static struct rcu_torture_ops rcu_bh_ops
+@@ -453,6 +454,12 @@ static struct rcu_torture_ops rcu_bh_ops
  	.name		= "rcu_bh"
  };
  
@@ -196,7 +196,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * The names includes "busted", and they really means it!
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -260,6 +260,7 @@ void rcu_sched_qs(void)
+@@ -262,6 +262,7 @@ void rcu_sched_qs(void)
  			   this_cpu_ptr(&rcu_sched_data), true);
  }
  
@@ -204,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void rcu_bh_qs(void)
  {
  	if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
-@@ -269,6 +270,7 @@ void rcu_bh_qs(void)
+@@ -271,6 +272,7 @@ void rcu_bh_qs(void)
  		__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
  	}
  }
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
  
-@@ -449,11 +451,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
+@@ -557,11 +559,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
  /*
   * Return the number of RCU BH batches started thus far for debug & stats.
   */
@@ -226,7 +226,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /*
   * Return the number of RCU batches completed thus far for debug & stats.
-@@ -473,6 +477,7 @@ unsigned long rcu_batches_completed_sche
+@@ -581,6 +585,7 @@ unsigned long rcu_batches_completed_sche
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
  
@@ -234,7 +234,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Return the number of RCU BH batches completed thus far for debug & stats.
   */
-@@ -481,6 +486,7 @@ unsigned long rcu_batches_completed_bh(v
+@@ -589,6 +594,7 @@ unsigned long rcu_batches_completed_bh(v
  	return rcu_bh_state.completed;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
@@ -242,7 +242,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /*
   * Return the number of RCU expedited batches completed thus far for
-@@ -504,6 +510,7 @@ unsigned long rcu_exp_batches_completed_
+@@ -612,6 +618,7 @@ unsigned long rcu_exp_batches_completed_
  }
  EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
  
@@ -250,7 +250,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Force a quiescent state.
   */
-@@ -522,6 +529,13 @@ void rcu_bh_force_quiescent_state(void)
+@@ -630,6 +637,13 @@ void rcu_bh_force_quiescent_state(void)
  }
  EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  
@@ -264,7 +264,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Force a quiescent state for RCU-sched.
   */
-@@ -572,9 +586,11 @@ void rcutorture_get_gp_data(enum rcutort
+@@ -680,9 +694,11 @@ void rcutorture_get_gp_data(enum rcutort
  	case RCU_FLAVOR:
  		rsp = rcu_state_p;
  		break;
@@ -276,7 +276,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	case RCU_SCHED_FLAVOR:
  		rsp = &rcu_sched_state;
  		break;
-@@ -3195,6 +3211,7 @@ void call_rcu_sched(struct rcu_head *hea
+@@ -3289,6 +3305,7 @@ void call_rcu_sched(struct rcu_head *hea
  }
  EXPORT_SYMBOL_GPL(call_rcu_sched);
  
@@ -284,7 +284,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Queue an RCU callback for invocation after a quicker grace period.
   */
-@@ -3203,6 +3220,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -3297,6 +3314,7 @@ void call_rcu_bh(struct rcu_head *head,
  	__call_rcu(head, func, &rcu_bh_state, -1, 0);
  }
  EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /*
   * Queue an RCU callback for lazy invocation after a grace period.
-@@ -3294,6 +3312,7 @@ void synchronize_sched(void)
+@@ -3388,6 +3406,7 @@ void synchronize_sched(void)
  }
  EXPORT_SYMBOL_GPL(synchronize_sched);
  
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
   *
-@@ -3320,6 +3339,7 @@ void synchronize_rcu_bh(void)
+@@ -3414,6 +3433,7 @@ void synchronize_rcu_bh(void)
  		wait_rcu_gp(call_rcu_bh);
  }
  EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -308,7 +308,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * get_state_synchronize_rcu - Snapshot current RCU state
-@@ -3698,6 +3718,7 @@ static void _rcu_barrier(struct rcu_stat
+@@ -3790,6 +3810,7 @@ static void _rcu_barrier(struct rcu_stat
  	mutex_unlock(&rsp->barrier_mutex);
  }
  
@@ -316,7 +316,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
   */
-@@ -3706,6 +3727,7 @@ void rcu_barrier_bh(void)
+@@ -3798,6 +3819,7 @@ void rcu_barrier_bh(void)
  	_rcu_barrier(&rcu_bh_state);
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -324,7 +324,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
-@@ -4227,7 +4249,9 @@ void __init rcu_init(void)
+@@ -4316,7 +4338,9 @@ void __init rcu_init(void)
  
  	rcu_bootup_announce();
  	rcu_init_geometry();
@@ -336,7 +336,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		rcu_dump_rcu_node_tree(&rcu_sched_state);
 --- a/kernel/rcu/tree.h
 +++ b/kernel/rcu/tree.h
-@@ -588,7 +588,9 @@ extern struct list_head rcu_struct_flavo
+@@ -589,7 +589,9 @@ extern struct list_head rcu_struct_flavo
   */
  extern struct rcu_state rcu_sched_state;
  
@@ -348,7 +348,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  extern struct rcu_state rcu_preempt_state;
 --- a/kernel/rcu/update.c
 +++ b/kernel/rcu/update.c
-@@ -296,6 +296,7 @@ int rcu_read_lock_held(void)
+@@ -298,6 +298,7 @@ int rcu_read_lock_held(void)
  }
  EXPORT_SYMBOL_GPL(rcu_read_lock_held);
  
@@ -356,7 +356,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
   *
-@@ -322,6 +323,7 @@ int rcu_read_lock_bh_held(void)
+@@ -324,6 +325,7 @@ int rcu_read_lock_bh_held(void)
  	return in_softirq() || irqs_disabled();
  }
  EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
diff --git a/debian/patches/features/all/rt/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch b/debian/patches/features/all/rt/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
deleted file mode 100644
index a0234c9..0000000
--- a/debian/patches/features/all/rt/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-Date: Wed, 2 Nov 2016 16:45:58 +0100
-Subject: [PATCH] rcu: update: make RCU_EXPEDITE_BOOT default
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-RCU_EXPEDITE_BOOT should speed up the boot process by enforcing
-synchronize_rcu_expedited() instead of synchronize_rcu() during the boot
-process. There should be no reason why one does not want this and there
-is no need worry about real time latency at this point.
-Therefore make it default.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- init/Kconfig        |   13 -------------
- kernel/rcu/update.c |    6 ++----
- 2 files changed, 2 insertions(+), 17 deletions(-)
-
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -781,19 +781,6 @@ config RCU_NOCB_CPU_ALL
- 
- endchoice
- 
--config RCU_EXPEDITE_BOOT
--	bool
--	default n
--	help
--	  This option enables expedited grace periods at boot time,
--	  as if rcu_expedite_gp() had been invoked early in boot.
--	  The corresponding rcu_unexpedite_gp() is invoked from
--	  rcu_end_inkernel_boot(), which is intended to be invoked
--	  at the end of the kernel-only boot sequence, just before
--	  init is exec'ed.
--
--	  Accept the default if unsure.
--
- endmenu # "RCU Subsystem"
- 
- config BUILD_BIN2C
---- a/kernel/rcu/update.c
-+++ b/kernel/rcu/update.c
-@@ -132,8 +132,7 @@ bool rcu_gp_is_normal(void)
- }
- EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
- 
--static atomic_t rcu_expedited_nesting =
--	ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
-+static atomic_t rcu_expedited_nesting =	ATOMIC_INIT(1);
- 
- /*
-  * Should normal grace-period primitives be expedited?  Intended for
-@@ -182,8 +181,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
-  */
- void rcu_end_inkernel_boot(void)
- {
--	if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
--		rcu_unexpedite_gp();
-+	rcu_unexpedite_gp();
- 	if (rcu_normal_after_boot)
- 		WRITE_ONCE(rcu_normal, 1);
- }
diff --git a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index 9e7d2fc..7deda81 100644
--- a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -1,7 +1,7 @@
 From: Tiejun Chen <tiejun.chen at windriver.com>
 Date: Wed, 18 Dec 2013 17:51:49 +0800
 Subject: rcutree/rcu_bh_qs: Disable irq while calling rcu_preempt_qs()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Any callers to the function rcu_preempt_qs() must disable irqs in
 order to protect the assignment to ->rcu_read_unlock_special. In
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -265,7 +265,12 @@ static void rcu_preempt_qs(void);
+@@ -267,7 +267,12 @@ static void rcu_preempt_qs(void);
  
  void rcu_bh_qs(void)
  {
diff --git a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
index 1553d20..577c2ae 100644
--- a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
+++ b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -1,7 +1,7 @@
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Thu, 28 Jul 2011 11:16:00 +0800
 Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When retry happens, it's likely that the task has been migrated to
 another cpu (except unplug failed), but it still derefernces the
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -257,9 +257,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -252,9 +252,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
   */
  void pin_current_cpu(void)
  {
diff --git a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
index ed40a06..56f2845 100644
--- a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+++ b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -1,7 +1,7 @@
 Subject: ARM: Initialize split page table locks for vector page
 From: Frank Rowand <frank.rowand at am.sony.com>
 Date: Sat, 1 Oct 2011 18:58:13 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if
 PREEMPT_RT_FULL=y because vectors_user_mapping() creates a
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/arm/kernel/process.c
 +++ b/arch/arm/kernel/process.c
-@@ -322,6 +322,30 @@ unsigned long arch_randomize_brk(struct
+@@ -325,6 +325,30 @@ unsigned long arch_randomize_brk(struct
  }
  
  #ifdef CONFIG_MMU
diff --git a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
index 9da4799..3f076ff 100644
--- a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
+++ b/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
@@ -1,7 +1,7 @@
 Subject: ARM: smp: Move clear_tasks_mm_cpumask() call to __cpu_die()
 From: Grygorii Strashko <grygorii.strashko at ti.com>
 Date: Fri, 11 Sep 2015 21:21:23 +0300
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When running with the RT-kernel (4.1.5-rt5) on TI OMAP dra7-evm and trying
 to do Suspend to RAM, the following backtrace occurs:
@@ -70,12 +70,12 @@ general.
 This issue was first reported in:
  http://www.spinics.net/lists/linux-rt-users/msg13752.html
 
- arch/arm/kernel/smp.c |    5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
+ arch/arm/kernel/smp.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
 
 --- a/arch/arm/kernel/smp.c
 +++ b/arch/arm/kernel/smp.c
-@@ -234,8 +234,6 @@ int __cpu_disable(void)
+@@ -236,8 +236,6 @@ int __cpu_disable(void)
  	flush_cache_louis();
  	local_flush_tlb_all();
  
@@ -84,13 +84,11 @@ This issue was first reported in:
  	return 0;
  }
  
-@@ -251,6 +249,9 @@ void __cpu_die(unsigned int cpu)
- 		pr_err("CPU%u: cpu didn't die\n", cpu);
- 		return;
+@@ -255,6 +253,7 @@ void __cpu_die(unsigned int cpu)
  	}
-+
-+	clear_tasks_mm_cpumask(cpu);
-+
- 	pr_notice("CPU%u: shutdown\n", cpu);
+ 	pr_debug("CPU%u: shutdown\n", cpu);
  
++	clear_tasks_mm_cpumask(cpu);
  	/*
+ 	 * platform_cpu_kill() is generally expected to do the powering off
+ 	 * and/or cutting of clocks to the dying CPU.  Optionally, this may
diff --git a/debian/patches/features/all/rt/rt-add-rt-locks.patch b/debian/patches/features/all/rt/rt-add-rt-locks.patch
index d0e1eb2..3f41faa 100644
--- a/debian/patches/features/all/rt/rt-add-rt-locks.patch
+++ b/debian/patches/features/all/rt/rt-add-rt-locks.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 26 Jul 2009 19:39:56 +0200
 Subject: rt: Add the preempt-rt lock replacement APIs
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex
 based locking functions for preempt-rt.
@@ -13,31 +13,32 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  include/linux/kernel.h            |    4 
  include/linux/locallock.h         |    6 
  include/linux/mutex.h             |   20 -
- include/linux/mutex_rt.h          |   84 ++++++
+ include/linux/mutex_rt.h          |  130 +++++++++
  include/linux/rtmutex.h           |   29 +-
  include/linux/rwlock_rt.h         |   99 +++++++
  include/linux/rwlock_types_rt.h   |   33 ++
  include/linux/rwsem.h             |    6 
  include/linux/rwsem_rt.h          |  167 ++++++++++++
- include/linux/sched.h             |   19 +
+ include/linux/sched.h             |    8 
+ include/linux/sched/wake_q.h      |   11 
  include/linux/spinlock.h          |   12 
  include/linux/spinlock_api_smp.h  |    4 
- include/linux/spinlock_rt.h       |  162 ++++++++++++
+ include/linux/spinlock_rt.h       |  162 +++++++++++
  include/linux/spinlock_types.h    |   11 
  include/linux/spinlock_types_rt.h |   48 +++
  kernel/futex.c                    |   11 
  kernel/locking/Makefile           |    9 
- kernel/locking/rt.c               |  498 ++++++++++++++++++++++++++++++++++++++
- kernel/locking/rtmutex.c          |  479 +++++++++++++++++++++++++++++++++---
- kernel/locking/rtmutex_common.h   |    9 
+ kernel/locking/rt.c               |  521 ++++++++++++++++++++++++++++++++++++++
+ kernel/locking/rtmutex.c          |  480 ++++++++++++++++++++++++++++++++---
+ kernel/locking/rtmutex_common.h   |   10 
  kernel/locking/spinlock.c         |    7 
  kernel/locking/spinlock_debug.c   |    5 
  kernel/sched/core.c               |    7 
- 23 files changed, 1663 insertions(+), 66 deletions(-)
+ 24 files changed, 1734 insertions(+), 66 deletions(-)
 
 --- a/include/linux/kernel.h
 +++ b/include/linux/kernel.h
-@@ -194,6 +194,9 @@ extern int _cond_resched(void);
+@@ -201,6 +201,9 @@ extern int _cond_resched(void);
   */
  # define might_sleep() \
  	do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
@@ -47,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  # define sched_annotate_sleep()	(current->task_state_change = 0)
  #else
    static inline void ___might_sleep(const char *file, int line,
-@@ -201,6 +204,7 @@ extern int _cond_resched(void);
+@@ -208,6 +211,7 @@ extern int _cond_resched(void);
    static inline void __might_sleep(const char *file, int line,
  				   int preempt_offset) { }
  # define might_sleep() do { might_resched(); } while (0)
@@ -75,13 +76,13 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
 --- a/include/linux/mutex.h
 +++ b/include/linux/mutex.h
-@@ -19,6 +19,17 @@
- #include <asm/processor.h>
- #include <linux/osq_lock.h>
+@@ -22,6 +22,17 @@
+ 
+ struct ww_acquire_ctx;
  
 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
 +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
-+	, .dep_map = { .name = #lockname }
++		, .dep_map = { .name = #lockname }
 +#else
 +# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
 +#endif
@@ -93,9 +94,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Simple, straightforward mutexes with strict semantics:
   *
-@@ -99,13 +110,6 @@ do {							\
- static inline void mutex_destroy(struct mutex *lock) {}
- #endif
+@@ -113,13 +124,6 @@ do {									\
+ 	__mutex_init((mutex), #mutex, &__key);				\
+ } while (0)
  
 -#ifdef CONFIG_DEBUG_LOCK_ALLOC
 -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -105,20 +106,18 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 -#endif
 -
  #define __MUTEX_INITIALIZER(lockname) \
- 		{ .count = ATOMIC_INIT(1) \
+ 		{ .owner = ATOMIC_LONG_INIT(0) \
  		, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
-@@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killa
- extern int mutex_trylock(struct mutex *lock);
- extern void mutex_unlock(struct mutex *lock);
+@@ -227,4 +231,6 @@ mutex_trylock_recursive(struct mutex *lo
+ 	return mutex_trylock(lock);
+ }
  
 +#endif /* !PREEMPT_RT_FULL */
 +
- extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
- 
  #endif /* __LINUX_MUTEX_H */
 --- /dev/null
 +++ b/include/linux/mutex_rt.h
-@@ -0,0 +1,84 @@
+@@ -0,0 +1,130 @@
 +#ifndef __LINUX_MUTEX_RT_H
 +#define __LINUX_MUTEX_RT_H
 +
@@ -149,6 +148,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +
 +extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
 +extern void __lockfunc _mutex_lock(struct mutex *lock);
++extern void __lockfunc _mutex_lock_io(struct mutex *lock);
++extern void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass);
 +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
 +extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
 +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
@@ -164,7 +165,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#define mutex_lock_killable(l)		_mutex_lock_killable(l)
 +#define mutex_trylock(l)		_mutex_trylock(l)
 +#define mutex_unlock(l)			_mutex_unlock(l)
++#define mutex_lock_io(l)		_mutex_lock_io(l);
++
++#define __mutex_owner(l)		((l)->lock.owner)
++
++#ifdef CONFIG_DEBUG_MUTEXES
 +#define mutex_destroy(l)		rt_mutex_destroy(&(l)->lock)
++#else
++static inline void mutex_destroy(struct mutex *lock) {}
++#endif
 +
 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
 +# define mutex_lock_nested(l, s)	_mutex_lock_nested(l, s)
@@ -172,6 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +					_mutex_lock_interruptible_nested(l, s)
 +# define mutex_lock_killable_nested(l, s) \
 +					_mutex_lock_killable_nested(l, s)
++# define mutex_lock_io_nested(l, s)	_mutex_lock_io_nested(l, s)
 +
 +# define mutex_lock_nest_lock(lock, nest_lock)				\
 +do {									\
@@ -186,6 +196,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +# define mutex_lock_killable_nested(l, s) \
 +					_mutex_lock_killable(l)
 +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
++# define mutex_lock_io_nested(l, s)	_mutex_lock_io(l)
 +#endif
 +
 +# define mutex_init(mutex)				\
@@ -202,6 +213,40 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	__mutex_do_init((mutex), name, key);		\
 +} while (0)
 +
++/**
++ * These values are chosen such that FAIL and SUCCESS match the
++ * values of the regular mutex_trylock().
++ */
++enum mutex_trylock_recursive_enum {
++	MUTEX_TRYLOCK_FAILED    = 0,
++	MUTEX_TRYLOCK_SUCCESS   = 1,
++	MUTEX_TRYLOCK_RECURSIVE,
++};
++/**
++ * mutex_trylock_recursive - trylock variant that allows recursive locking
++ * @lock: mutex to be locked
++ *
++ * This function should not be used, _ever_. It is purely for hysterical GEM
++ * raisins, and once those are gone this will be removed.
++ *
++ * Returns:
++ *  MUTEX_TRYLOCK_FAILED    - trylock failed,
++ *  MUTEX_TRYLOCK_SUCCESS   - lock acquired,
++ *  MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
++ */
++int __rt_mutex_owner_current(struct rt_mutex *lock);
++
++static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
++mutex_trylock_recursive(struct mutex *lock)
++{
++	if (unlikely(__rt_mutex_owner_current(&lock->lock)))
++		return MUTEX_TRYLOCK_RECURSIVE;
++
++	return mutex_trylock(lock);
++}
++
++extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
++
 +#endif
 --- a/include/linux/rtmutex.h
 +++ b/include/linux/rtmutex.h
@@ -601,27 +646,36 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -312,6 +312,11 @@ extern char ___assert_task_state[1 - 2*!
- 
- #endif
+@@ -123,6 +123,11 @@ struct task_group;
+ 		smp_store_mb(current->state, (state_value));	\
+ 	} while (0)
  
-+#define __set_current_state_no_track(state_value)	\
-+	do { current->state = (state_value); } while (0)
-+#define set_current_state_no_track(state_value)		\
-+	set_mb(current->state, (state_value))
++#define __set_current_state_no_track(state_value)		\
++		current->state = (state_value);
++#define set_current_state_no_track(state_value)			\
++		smp_store_mb(current->state, (state_value));
++
+ #else
+ /*
+  * set_current_state() includes a barrier so that the write of current->state
+@@ -160,6 +165,9 @@ struct task_group;
+  */
+ #define __set_current_state(state_value) do { current->state = (state_value); } while (0)
+ #define set_current_state(state_value)	 smp_store_mb(current->state, (state_value))
 +
- /* Task command name length */
- #define TASK_COMM_LEN 16
++#define __set_current_state_no_track(state_value)	__set_current_state(state_value)
++#define set_current_state_no_track(state_value)		set_current_state(state_value)
+ #endif
  
-@@ -1013,8 +1018,18 @@ struct wake_q_head {
- 	struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+ /* Task command name length: */
+--- a/include/linux/sched/wake_q.h
++++ b/include/linux/sched/wake_q.h
+@@ -48,6 +48,15 @@ static inline void wake_q_init(struct wa
  
  extern void wake_q_add(struct wake_q_head *head,
--		       struct task_struct *task);
+ 		       struct task_struct *task);
 -extern void wake_up_q(struct wake_q_head *head);
-+			      struct task_struct *task);
 +extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
-+
 +static inline void wake_up_q(struct wake_q_head *head)
 +{
 +	__wake_up_q(head, false);
@@ -632,11 +686,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	__wake_up_q(head, true);
 +}
  
- /*
-  * sched-domains (multiprocessor balancing) declarations:
+ #endif /* _LINUX_SCHED_WAKE_Q_H */
 --- a/include/linux/spinlock.h
 +++ b/include/linux/spinlock.h
-@@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(ra
+@@ -268,7 +268,11 @@ static inline void do_raw_spin_unlock(ra
  #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  
  /* Include rwlock functions */
@@ -649,7 +702,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /*
   * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
-@@ -282,6 +286,10 @@ static inline void do_raw_spin_unlock(ra
+@@ -279,6 +283,10 @@ static inline void do_raw_spin_unlock(ra
  # include <linux/spinlock_api_up.h>
  #endif
  
@@ -660,7 +713,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
   */
-@@ -416,4 +424,6 @@ extern int _atomic_dec_and_lock(atomic_t
+@@ -408,4 +416,6 @@ extern int _atomic_dec_and_lock(atomic_t
  #define atomic_dec_and_lock(atomic, lock) \
  		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  
@@ -669,7 +722,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif /* __LINUX_SPINLOCK_H */
 --- a/include/linux/spinlock_api_smp.h
 +++ b/include/linux/spinlock_api_smp.h
-@@ -189,6 +189,8 @@ static inline int __raw_spin_trylock_bh(
+@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(
  	return 0;
  }
  
@@ -692,7 +745,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#include <linux/bug.h>
 +
 +extern void
-+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
++__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key);
 +
 +#define spin_lock_init(slock)				\
 +do {							\
@@ -916,15 +969,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1398,6 +1398,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1400,6 +1400,7 @@ static int wake_futex_pi(u32 __user *uad
  	struct task_struct *new_owner;
  	bool postunlock = false;
- 	WAKE_Q(wake_q);
-+	WAKE_Q(wake_sleeper_q);
+ 	DEFINE_WAKE_Q(wake_q);
++	DEFINE_WAKE_Q(wake_sleeper_q);
  	int ret = 0;
  
  	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1459,13 +1460,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1461,13 +1462,13 @@ static int wake_futex_pi(u32 __user *uad
  	pi_state->owner = new_owner;
  	raw_spin_unlock(&new_owner->pi_lock);
  
@@ -941,7 +994,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return ret;
  }
-@@ -2666,7 +2667,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2668,7 +2669,7 @@ static int futex_lock_pi(u32 __user *uad
  		goto no_block;
  	}
  
@@ -950,7 +1003,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3033,7 +3034,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3035,7 +3036,7 @@ static int futex_wait_requeue_pi(u32 __u
  	 * The waiter is allocated on our stack, manipulated by the requeue
  	 * code while we sleep on uaddr.
  	 */
@@ -982,7 +1035,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  obj-$(CONFIG_LOCKDEP) += lockdep.o
  ifeq ($(CONFIG_PROC_FS),y)
  obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -24,7 +28,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+@@ -24,8 +28,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
  obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
@@ -993,9 +1046,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
  obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
  obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+ obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
 --- /dev/null
 +++ b/kernel/locking/rt.c
-@@ -0,0 +1,498 @@
+@@ -0,0 +1,521 @@
 +/*
 + * kernel/rt.c
 + *
@@ -1091,6 +1145,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +}
 +EXPORT_SYMBOL(_mutex_lock);
 +
++void __lockfunc _mutex_lock_io(struct mutex *lock)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	_mutex_lock(lock);
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL_GPL(_mutex_lock_io);
++
 +int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
 +{
 +	int ret;
@@ -1123,6 +1187,19 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +}
 +EXPORT_SYMBOL(_mutex_lock_nested);
 +
++void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass)
++{
++	int token;
++
++	token = io_schedule_prepare();
++
++	mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++	rt_mutex_lock(&lock->lock);
++
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL_GPL(_mutex_lock_io_nested);
++
 +void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
 +{
 +	mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
@@ -1508,16 +1585,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   *
   *  See Documentation/locking/rt-mutex-design.txt for details.
   */
-@@ -228,6 +233,8 @@ static inline bool unlock_rt_mutex_safe(
+@@ -230,6 +235,9 @@ static inline bool unlock_rt_mutex_safe(
  }
  #endif
  
 +#define STEAL_NORMAL  0
 +#define STEAL_LATERAL 1
++
  /*
   * Only use with rt_mutex_waiter_{less,equal}()
   */
-@@ -236,10 +243,15 @@ static inline bool unlock_rt_mutex_safe(
+@@ -238,11 +246,15 @@ static inline bool unlock_rt_mutex_safe(
  
  static inline int
  rt_mutex_waiter_less(struct rt_mutex_waiter *left,
@@ -1526,6 +1604,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
 -	if (left->prio < right->prio)
 -		return 1;
+-
 +	if (mode == STEAL_NORMAL) {
 +		if (left->prio < right->prio)
 +			return 1;
@@ -1533,10 +1612,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		if (left->prio <= right->prio)
 +			return 1;
 +	}
- 
  	/*
  	 * If both waiters have dl_prio(), we check the deadlines of the
-@@ -283,7 +295,7 @@ rt_mutex_enqueue(struct rt_mutex *lock,
+ 	 * associated tasks.
+@@ -285,7 +297,7 @@ rt_mutex_enqueue(struct rt_mutex *lock,
  	while (*link) {
  		parent = *link;
  		entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
@@ -1545,7 +1624,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			link = &parent->rb_left;
  		} else {
  			link = &parent->rb_right;
-@@ -322,7 +334,7 @@ rt_mutex_enqueue_pi(struct task_struct *
+@@ -324,7 +336,7 @@ rt_mutex_enqueue_pi(struct task_struct *
  	while (*link) {
  		parent = *link;
  		entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
@@ -1554,7 +1633,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			link = &parent->rb_left;
  		} else {
  			link = &parent->rb_right;
-@@ -388,6 +400,14 @@ static bool rt_mutex_cond_detect_deadloc
+@@ -390,6 +402,14 @@ static bool rt_mutex_cond_detect_deadloc
  	return debug_rt_mutex_detect_deadlock(waiter, chwalk);
  }
  
@@ -1569,7 +1648,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Max number of times we'll walk the boosting chain:
   */
-@@ -713,13 +733,16 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -715,13 +735,16 @@ static int rt_mutex_adjust_prio_chain(st
  	 * follow here. This is the end of the chain we are walking.
  	 */
  	if (!rt_mutex_owner(lock)) {
@@ -1588,15 +1667,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		raw_spin_unlock_irq(&lock->wait_lock);
  		return 0;
  	}
-@@ -812,6 +835,7 @@ static int rt_mutex_adjust_prio_chain(st
- 	return ret;
- }
- 
-+
- /*
-  * Try to take an rt-mutex
-  *
-@@ -822,8 +846,9 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -824,8 +847,9 @@ static int rt_mutex_adjust_prio_chain(st
   * @waiter: The waiter that is queued to the lock's wait tree if the
   *	    callsite called task_blocked_on_lock(), otherwise NULL
   */
@@ -1608,7 +1679,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	lockdep_assert_held(&lock->wait_lock);
  
-@@ -862,8 +887,10 @@ static int try_to_take_rt_mutex(struct r
+@@ -864,8 +888,10 @@ static int try_to_take_rt_mutex(struct r
  		 * If waiter is not the highest priority waiter of
  		 * @lock, give up.
  		 */
@@ -1620,7 +1691,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		/*
  		 * We can acquire the lock. Remove the waiter from the
-@@ -881,15 +908,26 @@ static int try_to_take_rt_mutex(struct r
+@@ -883,15 +909,26 @@ static int try_to_take_rt_mutex(struct r
  		 * not need to be dequeued.
  		 */
  		if (rt_mutex_has_waiters(lock)) {
@@ -1649,7 +1720,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			/*
  			 * The current top waiter stays enqueued. We
  			 * don't have to change anything in the lock
-@@ -936,6 +974,339 @@ static int try_to_take_rt_mutex(struct r
+@@ -938,6 +975,339 @@ static int try_to_take_rt_mutex(struct r
  	return 1;
  }
  
@@ -1812,8 +1883,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
 +{
 +	unsigned long flags;
-+	WAKE_Q(wake_q);
-+	WAKE_Q(wake_sleeper_q);
++	DEFINE_WAKE_Q(wake_q);
++	DEFINE_WAKE_Q(wake_sleeper_q);
 +	bool postunlock;
 +
 +	raw_spin_lock_irqsave(&lock->wait_lock, flags);
@@ -1989,7 +2060,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * Task blocks on lock.
   *
-@@ -1051,6 +1422,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1053,6 +1423,7 @@ static int task_blocks_on_rt_mutex(struc
   * Called with lock->wait_lock held and interrupts disabled.
   */
  static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
@@ -1997,7 +2068,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  				    struct rt_mutex *lock)
  {
  	struct rt_mutex_waiter *waiter;
-@@ -1090,7 +1462,10 @@ static void mark_wakeup_next_waiter(stru
+@@ -1092,7 +1463,10 @@ static void mark_wakeup_next_waiter(stru
  	 * Pairs with preempt_enable() in rt_mutex_postunlock();
  	 */
  	preempt_disable();
@@ -2009,7 +2080,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	raw_spin_unlock(&current->pi_lock);
  }
  
-@@ -1174,21 +1549,22 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1176,21 +1550,22 @@ void rt_mutex_adjust_pi(struct task_stru
  		return;
  	}
  	next_lock = waiter->lock;
@@ -2034,7 +2105,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -1268,7 +1644,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1270,7 +1645,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	unsigned long flags;
  	int ret = 0;
  
@@ -2043,7 +2114,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/*
  	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1363,7 +1739,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1365,7 +1740,8 @@ static inline int rt_mutex_slowtrylock(s
   * Return whether the current task needs to call rt_mutex_postunlock().
   */
  static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -2053,7 +2124,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	unsigned long flags;
  
-@@ -1417,7 +1794,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1419,7 +1795,7 @@ static bool __sched rt_mutex_slowunlock(
  	 *
  	 * Queue the next waiter for wakeup once we release the wait_lock.
  	 */
@@ -2062,20 +2133,20 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
  
  	return true; /* call rt_mutex_postunlock() */
-@@ -1469,9 +1846,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1471,9 +1847,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
  /*
   * Performs the wakeup of the the top-waiter and re-enables preemption.
   */
 -void rt_mutex_postunlock(struct wake_q_head *wake_q)
 +void rt_mutex_postunlock(struct wake_q_head *wake_q,
-+			 struct wake_q_head *wq_sleeper)
++			 struct wake_q_head *wake_sleeper_q)
  {
  	wake_up_q(wake_q);
-+	wake_up_q_sleeper(wq_sleeper);
++	wake_up_q_sleeper(wake_sleeper_q);
  
  	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
  	preempt_enable();
-@@ -1480,15 +1859,17 @@ void rt_mutex_postunlock(struct wake_q_h
+@@ -1482,15 +1860,17 @@ void rt_mutex_postunlock(struct wake_q_h
  static inline void
  rt_mutex_fastunlock(struct rt_mutex *lock,
  		    bool (*slowfn)(struct rt_mutex *lock,
@@ -2083,20 +2154,20 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +				   struct wake_q_head *wqh,
 +				   struct wake_q_head *wq_sleeper))
  {
- 	WAKE_Q(wake_q);
-+	WAKE_Q(wake_sleeper_q);
+ 	DEFINE_WAKE_Q(wake_q);
++	DEFINE_WAKE_Q(wake_sleeper_q);
  
  	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
  		return;
  
 -	if (slowfn(lock, &wake_q))
 -		rt_mutex_postunlock(&wake_q);
-+	if (slowfn(lock, &wake_q,  &wake_sleeper_q))
++	if (slowfn(lock, &wake_q, &wake_sleeper_q))
 +		rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
  }
  
  /**
-@@ -1607,12 +1988,9 @@ void __sched rt_mutex_unlock(struct rt_m
+@@ -1609,12 +1989,9 @@ void __sched rt_mutex_unlock(struct rt_m
  }
  EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  
@@ -2112,7 +2183,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	lockdep_assert_held(&lock->wait_lock);
  
-@@ -1629,22 +2007,34 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1631,22 +2008,34 @@ bool __sched __rt_mutex_futex_unlock(str
  	 * avoid inversion prior to the wakeup.  preempt_disable()
  	 * therein pairs with rt_mutex_postunlock().
  	 */
@@ -2135,8 +2206,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +
  void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
  {
- 	WAKE_Q(wake_q);
-+	WAKE_Q(wake_sleeper_q);
+ 	DEFINE_WAKE_Q(wake_q);
++	DEFINE_WAKE_Q(wake_sleeper_q);
  	bool postunlock;
  
  	raw_spin_lock_irq(&lock->wait_lock);
@@ -2150,7 +2221,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -1677,13 +2067,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -1679,13 +2068,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
  void __rt_mutex_init(struct rt_mutex *lock, const char *name)
  {
  	lock->owner = NULL;
@@ -2165,7 +2236,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /**
   * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1698,7 +2087,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1704,7 +2092,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
  void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
  				struct task_struct *proxy_owner)
  {
@@ -2174,7 +2245,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	debug_rt_mutex_proxy_lock(lock, proxy_owner);
  	rt_mutex_set_owner(lock, proxy_owner);
  }
-@@ -1916,3 +2305,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -1926,3 +2314,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
  
  	return cleanup;
  }
@@ -2202,7 +2273,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 --- a/kernel/locking/rtmutex_common.h
 +++ b/kernel/locking/rtmutex_common.h
-@@ -27,6 +27,7 @@ struct rt_mutex_waiter {
+@@ -14,6 +14,7 @@
+ 
+ #include <linux/rtmutex.h>
+ #include <linux/sched/wake_q.h>
++#include <linux/sched/debug.h>
+ 
+ /*
+  * This is the control structure for tasks blocked on a rt_mutex,
+@@ -28,6 +29,7 @@ struct rt_mutex_waiter {
  	struct rb_node          pi_tree_entry;
  	struct task_struct	*task;
  	struct rt_mutex		*lock;
@@ -2210,7 +2289,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_DEBUG_RT_MUTEXES
  	unsigned long		ip;
  	struct pid		*deadlock_task_pid;
-@@ -107,7 +108,7 @@ extern void rt_mutex_init_proxy_locked(s
+@@ -107,7 +109,7 @@ extern void rt_mutex_init_proxy_locked(s
  				       struct task_struct *proxy_owner);
  extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
  				  struct task_struct *proxy_owner);
@@ -2219,7 +2298,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
  				     struct rt_mutex_waiter *waiter,
  				     struct task_struct *task);
-@@ -124,9 +125,11 @@ extern int rt_mutex_futex_trylock(struct
+@@ -124,9 +126,11 @@ extern int rt_mutex_futex_trylock(struct
  
  extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
  extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -2229,7 +2308,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 -extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
 +extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
-+				struct wake_q_head *wq_sleeper);
++				struct wake_q_head *wake_sleeper_q);
  
  #ifdef CONFIG_DEBUG_RT_MUTEXES
  # include "rtmutex-debug.h"
@@ -2283,7 +2362,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static void spin_dump(raw_spinlock_t *lock, const char *msg)
  {
-@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *
+@@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *
  	arch_spin_unlock(&lock->raw_lock);
  }
  
@@ -2291,7 +2370,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static void rwlock_bug(rwlock_t *lock, const char *msg)
  {
  	if (!debug_locks_off())
-@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+@@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock)
  	debug_write_unlock(lock);
  	arch_write_unlock(&lock->raw_lock);
  }
@@ -2299,7 +2378,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#endif
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -454,7 +454,7 @@ void wake_q_add(struct wake_q_head *head
+@@ -461,7 +461,7 @@ void wake_q_add(struct wake_q_head *head
  	head->lastp = &node->next;
  }
  
@@ -2308,7 +2387,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	struct wake_q_node *node = head->first;
  
-@@ -471,7 +471,10 @@ void wake_up_q(struct wake_q_head *head)
+@@ -478,7 +478,10 @@ void wake_up_q(struct wake_q_head *head)
  		 * wake_up_process() implies a wmb() to pair with the queueing
  		 * in wake_q_add() so as not to miss wakeups.
  		 */
diff --git a/debian/patches/features/all/rt/rt-drop_mutex_disable_on_not_debug.patch b/debian/patches/features/all/rt/rt-drop_mutex_disable_on_not_debug.patch
deleted file mode 100644
index de86fcb..0000000
--- a/debian/patches/features/all/rt/rt-drop_mutex_disable_on_not_debug.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-Date: Fri, 10 Feb 2017 18:21:04 +0100
-Subject: rt: Drop mutex_disable() on !DEBUG configs and the GPL suffix from export symbol
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Alex Goins reported that mutex_destroy() on RT will force a GPL only symbol
-which won't link and therefore fail on a non-GPL kernel module.
-This does not happen on !RT and is a regression on RT which we would like to
-avoid.
-I try here the easy thing and to not use rt_mutex_destroy() if
-CONFIG_DEBUG_MUTEXES is not enabled.
-
-Reported-by: Alex Goins <agoins at nvidia.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- include/linux/mutex_rt.h |    5 +++++
- 1 file changed, 5 insertions(+)
-
---- a/include/linux/mutex_rt.h
-+++ b/include/linux/mutex_rt.h
-@@ -43,7 +43,12 @@ extern void __lockfunc _mutex_unlock(str
- #define mutex_lock_killable(l)		_mutex_lock_killable(l)
- #define mutex_trylock(l)		_mutex_trylock(l)
- #define mutex_unlock(l)			_mutex_unlock(l)
-+
-+#ifdef CONFIG_DEBUG_MUTEXES
- #define mutex_destroy(l)		rt_mutex_destroy(&(l)->lock)
-+#else
-+static inline void mutex_destroy(struct mutex *lock) {}
-+#endif
- 
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- # define mutex_lock_nested(l, s)	_mutex_lock_nested(l, s)
diff --git a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
index eb05f88..f7e3603 100644
--- a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
+++ b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
@@ -1,7 +1,7 @@
 Subject: rt: Introduce cpu_chill()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 07 Mar 2012 20:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/delay.h
 +++ b/include/linux/delay.h
-@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int s
+@@ -63,4 +63,10 @@ static inline void ssleep(unsigned int s
  	msleep(seconds * 1000);
  }
  
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #endif /* defined(_LINUX_DELAY_H) */
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -1768,6 +1768,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
+@@ -1741,6 +1741,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
  	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
  }
  
diff --git a/debian/patches/features/all/rt/rt-local-irq-lock.patch b/debian/patches/features/all/rt/rt-local-irq-lock.patch
index 9fc4f30..2d759bc 100644
--- a/debian/patches/features/all/rt/rt-local-irq-lock.patch
+++ b/debian/patches/features/all/rt/rt-local-irq-lock.patch
@@ -1,7 +1,7 @@
 Subject: rt: Add local irq locks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 20 Jun 2011 09:03:47 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Introduce locallock. For !RT this maps to preempt_disable()/
 local_irq_disable() so there is not much that changes. For RT this will
diff --git a/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch b/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
index 9cca067..3fd027f 100644
--- a/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
+++ b/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 8 Feb 2016 16:15:28 +0100
 Subject: rt/locking: Reenable migration accross schedule
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 We currently disable migration across lock acquisition. That includes the part
 where we block on the lock and schedule out. We cannot disable migration after
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -980,14 +980,19 @@ static int __try_to_take_rt_mutex(struct
+@@ -981,14 +981,19 @@ static int __try_to_take_rt_mutex(struct
   * preemptible spin_lock functions:
   */
  static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
-@@ -1045,7 +1050,8 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1046,7 +1051,8 @@ static int task_blocks_on_rt_mutex(struc
   * We store the current state under p->pi_lock in p->saved_state and
   * the try_to_wake_up() code handles this accordingly.
   */
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	struct task_struct *lock_owner, *self = current;
  	struct rt_mutex_waiter waiter, *top_waiter;
-@@ -1089,8 +1095,13 @@ static void  noinline __sched rt_spin_lo
+@@ -1090,8 +1096,13 @@ static void  noinline __sched rt_spin_lo
  
  		debug_rt_mutex_print_deadlock(&waiter);
  
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		raw_spin_lock_irqsave(&lock->wait_lock, flags);
  
-@@ -1148,38 +1159,35 @@ static void  noinline __sched rt_spin_lo
+@@ -1149,38 +1160,35 @@ static void  noinline __sched rt_spin_lo
  
  void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
  {
diff --git a/debian/patches/features/all/rt/rt-preempt-base-config.patch b/debian/patches/features/all/rt/rt-preempt-base-config.patch
index ed64104..57fe177 100644
--- a/debian/patches/features/all/rt/rt-preempt-base-config.patch
+++ b/debian/patches/features/all/rt/rt-preempt-base-config.patch
@@ -1,7 +1,7 @@
 Subject: rt: Provide PREEMPT_RT_BASE config switch
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 17 Jun 2011 12:39:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Introduce PREEMPT_RT_BASE which enables parts of
 PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT
diff --git a/debian/patches/features/all/rt/rt-serial-warn-fix.patch b/debian/patches/features/all/rt/rt-serial-warn-fix.patch
index e9a9431..aa06d64 100644
--- a/debian/patches/features/all/rt/rt-serial-warn-fix.patch
+++ b/debian/patches/features/all/rt/rt-serial-warn-fix.patch
@@ -1,7 +1,7 @@
 Subject: rt: Improve the serial console PASS_LIMIT
 From: Ingo Molnar <mingo at elte.hu>
 Date: Wed Dec 14 13:05:54 CET 2011
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Beyond the warning:
 
diff --git a/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch b/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
index 50815e7..5f93e2f 100644
--- a/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
+++ b/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Handle non enqueued waiters gracefully
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 06 Nov 2015 18:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Yimin debugged that in case of a PI wakeup in progress when
 rt_mutex_start_proxy_lock() calls task_blocks_on_rt_mutex() the latter
@@ -22,7 +22,7 @@ Cc: stable-rt at vger.kernel.org
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1697,7 +1697,7 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1707,7 +1707,7 @@ int __rt_mutex_start_proxy_lock(struct r
  		ret = 0;
  	}
  
diff --git a/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch b/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch
index 676e9d2..792990a 100644
--- a/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch
+++ b/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 Apr 2017 12:50:59 +0200
 Subject: [PATCH] rtmutex: Make lock_killable work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Locking an rt mutex killable does not work because signal handling is
 restricted to TASK_INTERRUPTIBLE.
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1213,18 +1213,13 @@ static int __sched
+@@ -1215,18 +1215,13 @@ static int __sched
  		if (try_to_take_rt_mutex(lock, current, waiter))
  			break;
  
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 -		 * TASK_INTERRUPTIBLE checks for signals and
 -		 * timeout. Ignored otherwise.
 -		 */
--		if (unlikely(state == TASK_INTERRUPTIBLE)) {
+-		if (likely(state == TASK_INTERRUPTIBLE)) {
 -			/* Signal pending? */
 -			if (signal_pending(current))
 -				ret = -EINTR;
diff --git a/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch b/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch
index b3cc618..92e84bf 100644
--- a/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch
+++ b/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 Apr 2017 12:51:01 +0200
 Subject: [PATCH] rtmutex: Provide locked slowpath
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The new rt rwsem implementation needs rtmutex::wait_lock to protect struct
 rw_semaphore. Dropping the lock and reaquiring it for locking the rtmutex
@@ -14,12 +14,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
  kernel/locking/rtmutex.c        |   72 +++++++++++++++++++++++-----------------
- kernel/locking/rtmutex_common.h |    9 +++++
- 2 files changed, 51 insertions(+), 30 deletions(-)
+ kernel/locking/rtmutex_common.h |    8 ++++
+ 2 files changed, 50 insertions(+), 30 deletions(-)
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1750,36 +1750,18 @@ static void ww_mutex_account_lock(struct
+@@ -1752,30 +1752,13 @@ static void ww_mutex_account_lock(struct
  }
  #endif
  
@@ -54,7 +54,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 -	raw_spin_lock_irqsave(&lock->wait_lock, flags);
 +	int ret;
  
- 	/* Try to acquire the lock again: */
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ 	if (ww_ctx) {
+@@ -1791,7 +1774,6 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	if (try_to_take_rt_mutex(lock, current, NULL)) {
  		if (ww_ctx)
  			ww_mutex_account_lock(lock, ww_ctx);
@@ -62,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		return 0;
  	}
  
-@@ -1789,13 +1771,13 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1801,13 +1783,13 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	if (unlikely(timeout))
  		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
  
@@ -80,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		/* ww_mutex received EDEADLK, let it become EALREADY */
  		ret = __mutex_lock_check_stamp(lock, ww_ctx);
  		BUG_ON(!ret);
-@@ -1804,10 +1786,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1816,10 +1798,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	if (unlikely(ret)) {
  		__set_current_state(TASK_RUNNING);
  		if (rt_mutex_has_waiters(lock))
@@ -93,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	} else if (ww_ctx) {
  		ww_mutex_account_lock(lock, ww_ctx);
  	}
-@@ -1817,6 +1799,36 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1829,6 +1811,36 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	 * unconditionally. We might have to fix that up.
  	 */
  	fixup_rt_mutex_waiters(lock);
@@ -132,10 +134,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
 --- a/kernel/locking/rtmutex_common.h
 +++ b/kernel/locking/rtmutex_common.h
-@@ -131,6 +131,15 @@ extern bool __rt_mutex_futex_unlock(stru
- extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
- 				struct wake_q_head *wq_sleeper);
+@@ -131,6 +131,14 @@ extern bool __rt_mutex_futex_unlock(stru
  
+ extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
+ 				struct wake_q_head *wake_sleeper_q);
 +/* RW semaphore special interface */
 +struct ww_acquire_ctx;
 +
@@ -144,7 +146,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +				     enum rtmutex_chainwalk chwalk,
 +				     struct ww_acquire_ctx *ww_ctx,
 +				     struct rt_mutex_waiter *waiter);
-+
+ 
  #ifdef CONFIG_DEBUG_RT_MUTEXES
  # include "rtmutex-debug.h"
- #else
diff --git a/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch b/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
index 126d80f..adb408c 100644
--- a/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
+++ b/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 Apr 2017 12:51:00 +0200
 Subject: [PATCH] rtmutex: Provide rt_mutex_lock_state()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Allow rtmutex to be locked with arbitrary states. Preparatory patch for the
 rt rwsem rework.
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  extern int rt_mutex_timed_lock(struct rt_mutex *lock,
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -2008,21 +2008,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -2020,21 +2020,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
  }
  
  /**
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
   * @lock:		the rt_mutex to be locked
   *
   * Returns:
-@@ -2031,20 +2042,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+@@ -2043,20 +2054,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
   */
  int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
  {
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /**
   * rt_mutex_lock_killable - lock a rt_mutex killable
   *
-@@ -2054,16 +2055,21 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -2066,16 +2067,21 @@ int __sched rt_mutex_futex_trylock(struc
   * Returns:
   *  0          on success
   * -EINTR      when interrupted by a signal
diff --git a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
index 51811b7..c947dec 100644
--- a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
 Date: Mon, 28 Oct 2013 09:36:37 +0100
 Subject: rtmutex: Add RT aware ww locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 lockdep says:
 | --------------------------------------------------------------------------
@@ -23,20 +23,31 @@ lockdep says:
 
 Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
 ---
- kernel/locking/rtmutex.c |  248 ++++++++++++++++++++++++++++++++++++++++++-----
- 1 file changed, 224 insertions(+), 24 deletions(-)
+ kernel/locking/rtmutex.c |  273 ++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 247 insertions(+), 26 deletions(-)
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -21,6 +21,7 @@
- #include <linux/sched/rt.h>
- #include <linux/sched/deadline.h>
+@@ -23,6 +23,7 @@
+ #include <linux/sched/wake_q.h>
+ #include <linux/sched/debug.h>
  #include <linux/timer.h>
 +#include <linux/ww_mutex.h>
  
  #include "rtmutex_common.h"
  
-@@ -1300,6 +1301,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1286,8 +1287,8 @@ int atomic_dec_and_spin_lock(atomic_t *a
+ }
+ EXPORT_SYMBOL(atomic_dec_and_spin_lock);
+ 
+-	void
+-__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
++void
++__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key)
+ {
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ 	/*
+@@ -1301,6 +1302,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
  
  #endif /* PREEMPT_RT_FULL */
  
@@ -77,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  static inline int
  try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
  		     struct rt_mutex_waiter *waiter)
-@@ -1580,7 +1615,8 @@ void rt_mutex_init_waiter(struct rt_mute
+@@ -1581,7 +1616,8 @@ void rt_mutex_init_waiter(struct rt_mute
  static int __sched
  __rt_mutex_slowlock(struct rt_mutex *lock, int state,
  		    struct hrtimer_sleeper *timeout,
@@ -87,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  {
  	int ret = 0;
  
-@@ -1598,6 +1634,12 @@ static int __sched
+@@ -1599,6 +1635,12 @@ static int __sched
  			break;
  		}
  
@@ -100,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  		raw_spin_unlock_irq(&lock->wait_lock);
  
  		debug_rt_mutex_print_deadlock(waiter);
-@@ -1632,13 +1674,90 @@ static void rt_mutex_handle_deadlock(int
+@@ -1633,13 +1675,91 @@ static void rt_mutex_handle_deadlock(int
  	}
  }
  
@@ -169,6 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
 +		BUG_ON(waiter->lock != lock);
 +		rt_mutex_wake_waiter(waiter);
 +	}
++
 +}
 +
 +#else
@@ -192,8 +204,20 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  {
  	struct rt_mutex_waiter waiter;
  	unsigned long flags;
-@@ -1658,6 +1777,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1657,8 +1777,20 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ 	 */
+ 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
  
++#ifdef CONFIG_PREEMPT_RT_FULL
++	if (ww_ctx) {
++		struct ww_mutex *ww;
++
++		ww = container_of(lock, struct ww_mutex, base.lock);
++		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
++			return -EALREADY;
++	}
++#endif
++
  	/* Try to acquire the lock again: */
  	if (try_to_take_rt_mutex(lock, current, NULL)) {
 +		if (ww_ctx)
@@ -201,7 +225,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
  		return 0;
  	}
-@@ -1672,13 +1793,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1673,13 +1805,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  
  	if (likely(!ret))
  		/* sleep on the mutex */
@@ -227,7 +251,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  	}
  
  	/*
-@@ -1808,29 +1939,33 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1809,29 +1951,33 @@ static bool __sched rt_mutex_slowunlock(
   */
  static inline int
  rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -265,7 +289,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  
  static inline int
-@@ -1881,7 +2016,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1882,7 +2028,7 @@ void __sched rt_mutex_lock(struct rt_mut
  {
  	might_sleep();
  
@@ -274,7 +298,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock);
  
-@@ -1898,7 +2033,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1899,7 +2045,7 @@ int __sched rt_mutex_lock_interruptible(
  {
  	might_sleep();
  
@@ -283,7 +307,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
  
-@@ -1925,7 +2060,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1926,7 +2072,7 @@ int __sched rt_mutex_lock_killable(struc
  {
  	might_sleep();
  
@@ -292,7 +316,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
  
-@@ -1949,6 +2084,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1950,6 +2096,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
  
  	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
  				       RT_MUTEX_MIN_CHAINWALK,
@@ -300,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  				       rt_mutex_slowlock);
  }
  EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2239,7 +2375,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2248,7 +2395,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
  	raw_spin_lock_irq(&lock->wait_lock);
  	/* sleep on the mutex */
  	set_current_state(TASK_INTERRUPTIBLE);
@@ -309,20 +333,13 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  	/*
  	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
  	 * have to fix that up.
-@@ -2306,24 +2442,88 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2315,24 +2462,98 @@ bool rt_mutex_cleanup_proxy_lock(struct
  	return cleanup;
  }
  
--#ifdef CONFIG_PREEMPT_RT_FULL
--struct ww_mutex {
--};
--struct ww_acquire_ctx {
--};
--int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
 +static inline int
 +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
- {
--	BUG();
++{
 +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
 +	unsigned tmp;
 +
@@ -344,47 +361,58 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
 +#endif
 +
 +	return 0;
- }
--EXPORT_SYMBOL_GPL(__ww_mutex_lock);
--int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++}
 +
-+#ifdef CONFIG_PREEMPT_RT_FULL
+ #ifdef CONFIG_PREEMPT_RT_FULL
+-struct ww_mutex {
+-};
+-struct ww_acquire_ctx {
+-};
+-int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
 +int __sched
-+__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  {
 -	BUG();
 +	int ret;
 +
 +	might_sleep();
 +
-+	mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
-+	ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
++	mutex_acquire_nest(&lock->base.dep_map, 0, 0,
++			   ctx ? &ctx->dep_map : NULL, _RET_IP_);
++	ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,
++				ctx);
 +	if (ret)
 +		mutex_release(&lock->base.dep_map, 1, _RET_IP_);
-+	else if (!ret && ww_ctx->acquired > 1)
-+		return ww_mutex_deadlock_injection(lock, ww_ctx);
++	else if (!ret && ctx && ctx->acquired > 1)
++		return ww_mutex_deadlock_injection(lock, ctx);
 +
 +	return ret;
  }
- EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
+-EXPORT_SYMBOL_GPL(__ww_mutex_lock);
+-int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
 +
 +int __sched
-+__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
-+{
++ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ {
+-	BUG();
 +	int ret;
 +
 +	might_sleep();
 +
-+	mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
-+	ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
++	mutex_acquire_nest(&lock->base.dep_map, 0, 0,
++			   ctx ? &ctx->dep_map : NULL, _RET_IP_);
++	ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,
++				ctx);
 +	if (ret)
 +		mutex_release(&lock->base.dep_map, 1, _RET_IP_);
-+	else if (!ret && ww_ctx->acquired > 1)
-+		return ww_mutex_deadlock_injection(lock, ww_ctx);
++	else if (!ret && ctx && ctx->acquired > 1)
++		return ww_mutex_deadlock_injection(lock, ctx);
 +
 +	return ret;
-+}
-+EXPORT_SYMBOL_GPL(__ww_mutex_lock);
+ }
+-EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
++EXPORT_SYMBOL_GPL(ww_mutex_lock);
 +
  void __sched ww_mutex_unlock(struct ww_mutex *lock)
  {
@@ -406,7 +434,13 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
 +
 +	mutex_release(&lock->base.dep_map, nest, _RET_IP_);
 +	rt_mutex_unlock(&lock->base.lock);
++}
++EXPORT_SYMBOL(ww_mutex_unlock);
++
++int __rt_mutex_owner_current(struct rt_mutex *lock)
++{
++	return rt_mutex_owner(lock) == current;
  }
 -EXPORT_SYMBOL_GPL(ww_mutex_unlock);
-+EXPORT_SYMBOL(ww_mutex_unlock);
++EXPORT_SYMBOL(__rt_mutex_owner_current);
  #endif
diff --git a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
index e6476be..03d5da5 100644
--- a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
+++ b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Avoid include hell
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 20:06:39 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Include only the required raw types. This avoids pulling in the
 complete spinlock header which in turn requires rtmutex.h at some point.
diff --git a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
index 07ab8fd..41b5dba 100644
--- a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
+++ b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Handle the various new futex race conditions
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 10 Jun 2011 11:04:15 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 RT opens a few new interesting race conditions in the rtmutex/futex
 combo due to futex hash bucket lock being a 'sleeping' spinlock and
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -2011,6 +2011,16 @@ static int futex_requeue(u32 __user *uad
+@@ -2013,6 +2013,16 @@ static int futex_requeue(u32 __user *uad
  				requeue_pi_wake_futex(this, &key2, hb2);
  				drop_count++;
  				continue;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			} else if (ret) {
  				/*
  				 * rt_mutex_start_proxy_lock() detected a
-@@ -2996,7 +3006,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2998,7 +3008,7 @@ static int futex_wait_requeue_pi(u32 __u
  	struct hrtimer_sleeper timeout, *to = NULL;
  	struct futex_pi_state *pi_state = NULL;
  	struct rt_mutex_waiter rt_waiter;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	union futex_key key2 = FUTEX_KEY_INIT;
  	struct futex_q q = futex_q_init;
  	int res, ret;
-@@ -3052,20 +3062,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3054,20 +3064,55 @@ static int futex_wait_requeue_pi(u32 __u
  	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
  	futex_wait_queue_me(hb, &q, to);
  
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* Check if the requeue code acquired the second futex for us. */
  	if (!q.rt_waiter) {
-@@ -3074,7 +3119,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3076,7 +3121,8 @@ static int futex_wait_requeue_pi(u32 __u
  		 * did a lock-steal - fix up the PI-state in that case.
  		 */
  		if (q.pi_state && (q.pi_state->owner != current)) {
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			ret = fixup_pi_state_owner(uaddr2, &q, current);
  			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
  				pi_state = q.pi_state;
-@@ -3085,7 +3131,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3087,7 +3133,7 @@ static int futex_wait_requeue_pi(u32 __u
  			 * the requeue_pi() code acquired for us.
  			 */
  			put_pi_state(q.pi_state);
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		}
  	} else {
  		struct rt_mutex *pi_mutex;
-@@ -3099,7 +3145,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3101,7 +3147,8 @@ static int futex_wait_requeue_pi(u32 __u
  		pi_mutex = &q.pi_state->pi_mutex;
  		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
  
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -133,6 +133,11 @@ static void fixup_rt_mutex_waiters(struc
+@@ -135,6 +135,11 @@ static void fixup_rt_mutex_waiters(struc
  		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
  }
  
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * We can speed up the acquire/release, if there's no debugging state to be
   * set up.
-@@ -389,7 +394,8 @@ int max_lock_depth = 1024;
+@@ -391,7 +396,8 @@ int max_lock_depth = 1024;
  
  static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
  {
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -525,7 +531,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -527,7 +533,7 @@ static int rt_mutex_adjust_prio_chain(st
  	 * reached or the state of the chain has changed while we
  	 * dropped the locks.
  	 */
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		goto out_unlock_pi;
  
  	/*
-@@ -961,6 +967,23 @@ static int task_blocks_on_rt_mutex(struc
+@@ -963,6 +969,23 @@ static int task_blocks_on_rt_mutex(struc
  		return -EDEADLK;
  
  	raw_spin_lock(&task->pi_lock);
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	rt_mutex_adjust_prio(task);
  	waiter->task = task;
  	waiter->lock = lock;
-@@ -985,7 +1008,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -987,7 +1010,7 @@ static int task_blocks_on_rt_mutex(struc
  		rt_mutex_enqueue_pi(owner, waiter);
  
  		rt_mutex_adjust_prio(owner);
@@ -204,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			chain_walk = 1;
  	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
  		chain_walk = 1;
-@@ -1081,7 +1104,7 @@ static void remove_waiter(struct rt_mute
+@@ -1083,7 +1106,7 @@ static void remove_waiter(struct rt_mute
  {
  	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
  	struct task_struct *owner = rt_mutex_owner(lock);
@@ -213,7 +213,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	lockdep_assert_held(&lock->wait_lock);
  
-@@ -1107,7 +1130,8 @@ static void remove_waiter(struct rt_mute
+@@ -1109,7 +1132,8 @@ static void remove_waiter(struct rt_mute
  	rt_mutex_adjust_prio(owner);
  
  	/* Store the lock on which owner is blocked or NULL */
@@ -223,7 +223,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	raw_spin_unlock(&owner->pi_lock);
  
-@@ -1143,7 +1167,8 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1145,7 +1169,8 @@ void rt_mutex_adjust_pi(struct task_stru
  	raw_spin_lock_irqsave(&task->pi_lock, flags);
  
  	waiter = task->pi_blocked_on;
diff --git a/debian/patches/features/all/rt/rtmutex-lock-killable.patch b/debian/patches/features/all/rt/rtmutex-lock-killable.patch
index 56cf9f6..521d325 100644
--- a/debian/patches/features/all/rt/rtmutex-lock-killable.patch
+++ b/debian/patches/features/all/rt/rtmutex-lock-killable.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Add rtmutex_lock_killable()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 09 Jun 2011 11:43:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Add "killable" type to rtmutex. We need this since rtmutex are used as
 "normal" mutexes which do use this type.
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1535,6 +1535,25 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -1537,6 +1537,25 @@ int __sched rt_mutex_futex_trylock(struc
  }
  
  /**
diff --git a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch b/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
index 637a412..f703085 100644
--- a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
+++ b/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed 02 Dec 2015 11:34:07 +0100
 Subject: rtmutex: trylock is okay on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On
 -RT we don't run softirqs in IRQ context but in thread context so it is
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1545,7 +1545,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+@@ -1547,7 +1547,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
   */
  int __sched rt_mutex_trylock(struct rt_mutex *lock)
  {
diff --git a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch b/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
index be7709f..e967d5c 100644
--- a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
+++ b/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
@@ -1,6 +1,6 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Subject: rbtree: don't include the rcu header
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The RCU header pulls in spinlock.h and fails due not yet defined types:
 
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +#endif
 --- a/include/linux/rcupdate.h
 +++ b/include/linux/rcupdate.h
-@@ -46,6 +46,7 @@
+@@ -45,6 +45,7 @@
  #include <linux/compiler.h>
  #include <linux/ktime.h>
  #include <linux/irqflags.h>
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #include <asm/barrier.h>
  
-@@ -633,54 +634,6 @@ static inline void rcu_preempt_sleep_che
+@@ -593,54 +594,6 @@ static inline void rcu_preempt_sleep_che
  })
  
  /**
diff --git a/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch b/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
index e1dbc4e..f721cf9 100644
--- a/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
+++ b/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 Apr 2017 12:51:02 +0200
 Subject: [PATCH] rwsem/rt: Lift single reader restriction
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The RT specific R/W semaphore implementation restricts the number of readers
 to one because a writer cannot block on multiple readers and inherit its
@@ -46,8 +46,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  include/linux/rwsem_rt.h  |  166 +++++-----------------------
  kernel/locking/Makefile   |    4 
  kernel/locking/rt.c       |  167 ----------------------------
- kernel/locking/rwsem-rt.c |  268 ++++++++++++++++++++++++++++++++++++++++++++++
- 5 files changed, 310 insertions(+), 304 deletions(-)
+ kernel/locking/rwsem-rt.c |  269 ++++++++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 311 insertions(+), 304 deletions(-)
  create mode 100644 kernel/locking/rwsem-rt.c
 
 --- a/include/linux/rwsem.h
@@ -284,7 +284,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  obj-$(CONFIG_LOCKDEP) += lockdep.o
  ifeq ($(CONFIG_PROC_FS),y)
  obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -32,6 +32,6 @@ ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+@@ -32,7 +32,7 @@ ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
  obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
  obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
  endif
@@ -292,9 +292,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o
  obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
  obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+ obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
 --- a/kernel/locking/rt.c
 +++ b/kernel/locking/rt.c
-@@ -306,173 +306,6 @@ void __rt_rwlock_init(rwlock_t *rwlock,
+@@ -329,173 +329,6 @@ void __rt_rwlock_init(rwlock_t *rwlock,
  }
  EXPORT_SYMBOL(__rt_rwlock_init);
  
@@ -470,11 +471,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
   * @cnt: the atomic which we are to dec
 --- /dev/null
 +++ b/kernel/locking/rwsem-rt.c
-@@ -0,0 +1,268 @@
+@@ -0,0 +1,269 @@
 +/*
 + */
 +#include <linux/rwsem.h>
-+#include <linux/sched.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/signal.h>
 +#include <linux/export.h>
 +
 +#include "rtmutex_common.h"
diff --git a/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch b/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch
index 1045320..f5355a5 100644
--- a/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch
+++ b/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 21 Oct 2016 10:54:50 +0200
 Subject: [PATCH] rxrpc: remove unused static variables
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The rxrpc_security_methods and rxrpc_security_sem user has been removed
 in 648af7fca159 ("rxrpc: Absorb the rxkad security module"). This was
diff --git a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch b/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
index 70087e6..d931c72 100644
--- a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
+++ b/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
@@ -1,7 +1,7 @@
 From: Paul Gortmaker <paul.gortmaker at windriver.com>
 Date: Sat, 14 Feb 2015 11:01:16 -0500
 Subject: sas-ata/isci: dont't disable interrupts in qc_issue handler
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On 3.14-rt we see the following trace on Canoe Pass for
 SCSI_ISCI "Intel(R) C600 Series Chipset SAS Controller"
diff --git a/debian/patches/features/all/rt/sched-Prevent-task-state-corruption-by-spurious-lock.patch b/debian/patches/features/all/rt/sched-Prevent-task-state-corruption-by-spurious-lock.patch
new file mode 100644
index 0000000..2a21f77
--- /dev/null
+++ b/debian/patches/features/all/rt/sched-Prevent-task-state-corruption-by-spurious-lock.patch
@@ -0,0 +1,78 @@
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 6 Jun 2017 14:20:37 +0200
+Subject: sched: Prevent task state corruption by spurious lock wakeup
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+Mathias and others reported GDB failures on RT.
+
+The following scenario leads to task state corruption:
+
+CPU0						CPU1
+
+T1->state = TASK_XXX;
+spin_lock(&lock)
+  rt_spin_lock_slowlock(&lock->rtmutex)
+    raw_spin_lock(&rtm->wait_lock);
+    T1->saved_state = current->state;
+    T1->state = TASK_UNINTERRUPTIBLE;
+						spin_unlock(&lock)
+    task_blocks_on_rt_mutex(rtm)  		  rt_spin_lock_slowunlock(&lock->rtmutex)
+      queue_waiter(rtm)				    raw_spin_lock(&rtm->wait_lock);
+      pi_chain_walk(rtm)
+        raw_spin_unlock(&rtm->wait_lock);
+						    wake_top_waiter(T1)
+
+      raw_spin_lock(&rtm->wait_lock);
+
+    for (;;) {
+      if (__try_to_take_rt_mutex())  <- Succeeds
+        break;
+      ...
+    }
+
+    T1->state = T1->saved_state;
+						     try_to_wake_up(T1)
+						       ttwu_do_wakeup(T1)
+						         T1->state = TASK_RUNNING;
+
+In most cases this is harmless because waiting for some event, which is the
+usual reason for TASK_[UN]INTERRUPTIBLE has to be safe against other forms
+of spurious wakeups anyway.
+
+But in case of TASK_TRACED this is actually fatal, because the task loses
+the TASK_TRACED state. In consequence it fails to consume SIGSTOP which was
+sent from the debugger and actually delivers SIGSTOP to the task which
+breaks the ptrace mechanics and brings the debugger into an unexpected
+state.
+
+The TASK_TRACED state should prevent getting there due to the state
+matching logic in try_to_wake_up(). But that's not true because
+wake_up_lock_sleeper() uses TASK_ALL as state mask. That's bogus because
+lock sleepers always use TASK_UNINTERRUPTIBLE, so the wakeup should use
+that as well.
+
+The cure is way simpler as figuring it out:
+
+Change the mask used in wake_up_lock_sleeper() from TASK_ALL to
+TASK_UNINTERRUPTIBLE.
+
+Cc: stable-rt at vger.kernel.org
+Reported-by: Mathias Koehrer <mathias.koehrer at etas.com>
+Reported-by: David Hauck <davidh at netacquire.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/sched/core.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2206,7 +2206,7 @@ EXPORT_SYMBOL(wake_up_process);
+  */
+ int wake_up_lock_sleeper(struct task_struct *p)
+ {
+-	return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
++	return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER);
+ }
+ 
+ int wake_up_state(struct task_struct *p, unsigned int state)
diff --git a/debian/patches/features/all/rt/sched-Remove-TASK_ALL.patch b/debian/patches/features/all/rt/sched-Remove-TASK_ALL.patch
new file mode 100644
index 0000000..bae2e4f
--- /dev/null
+++ b/debian/patches/features/all/rt/sched-Remove-TASK_ALL.patch
@@ -0,0 +1,30 @@
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Wed, 7 Jun 2017 10:12:45 +0200
+Subject: [PATCH] sched: Remove TASK_ALL
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+
+It's unused:
+
+$ git grep "\<TASK_ALL\>" | wc -l
+1
+
+And dangerous, kill the bugger.
+
+Cc: stable-rt at vger.kernel.org
+Acked-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/sched.h |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -94,7 +94,6 @@ struct task_group;
+ 
+ /* Convenience macros for the sake of wake_up(): */
+ #define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+-#define TASK_ALL			(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
+ 
+ /* get_task_state(): */
+ #define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
diff --git a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
index 021bd5a..60f6122 100644
--- a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ b/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Juri Lelli <juri.lelli at gmail.com>
 Date: Tue, 13 May 2014 15:30:20 +0200
 Subject: sched/deadline: dl_task_timer has to be irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 As for rt_period_timer, dl_task_timer has to be irqsafe.
 
@@ -13,11 +13,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/sched/deadline.c
 +++ b/kernel/sched/deadline.c
-@@ -687,6 +687,7 @@ void init_dl_task_timer(struct sched_dl_
+@@ -693,6 +693,7 @@ void init_dl_task_timer(struct sched_dl_
  
  	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	timer->function = dl_task_timer;
 +	timer->irqsafe = 1;
  }
  
- static
+ /*
diff --git a/debian/patches/features/all/rt/sched-delay-put-task.patch b/debian/patches/features/all/rt/sched-delay-put-task.patch
index 36a33b7..c9ce15b 100644
--- a/debian/patches/features/all/rt/sched-delay-put-task.patch
+++ b/debian/patches/features/all/rt/sched-delay-put-task.patch
@@ -1,31 +1,34 @@
 Subject: sched: Move task_struct cleanup to RCU
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 31 May 2011 16:59:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 __put_task_struct() does quite some expensive work. We don't want to
 burden random tasks with that.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- include/linux/sched.h |   13 +++++++++++++
- kernel/fork.c         |   15 ++++++++++++++-
- 2 files changed, 27 insertions(+), 1 deletion(-)
+ include/linux/sched.h      |    3 +++
+ include/linux/sched/task.h |   10 ++++++++++
+ kernel/fork.c              |   15 ++++++++++++++-
+ 3 files changed, 27 insertions(+), 1 deletion(-)
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1968,6 +1968,9 @@ struct task_struct {
- 	unsigned int	sequential_io;
- 	unsigned int	sequential_io_avg;
+@@ -1052,6 +1052,9 @@ struct task_struct {
+ 	unsigned int			sequential_io;
+ 	unsigned int			sequential_io_avg;
  #endif
 +#ifdef CONFIG_PREEMPT_RT_BASE
-+	struct rcu_head put_rcu;
++	struct rcu_head			put_rcu;
 +#endif
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- 	unsigned long	task_state_change;
+ 	unsigned long			task_state_change;
  #endif
-@@ -2225,6 +2228,15 @@ extern struct pid *cad_pid;
- extern void free_task(struct task_struct *tsk);
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -86,6 +86,15 @@ extern void sched_exec(void);
+ 
  #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
  
 +#ifdef CONFIG_PREEMPT_RT_BASE
@@ -40,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  extern void __put_task_struct(struct task_struct *t);
  
  static inline void put_task_struct(struct task_struct *t)
-@@ -2232,6 +2244,7 @@ static inline void put_task_struct(struc
+@@ -93,6 +102,7 @@ static inline void put_task_struct(struc
  	if (atomic_dec_and_test(&t->usage))
  		__put_task_struct(t);
  }
@@ -50,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  struct task_struct *try_get_task_struct(struct task_struct **ptask);
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -376,7 +376,9 @@ static inline void put_signal_struct(str
+@@ -389,7 +389,9 @@ static inline void put_signal_struct(str
  	if (atomic_dec_and_test(&sig->sigcnt))
  		free_signal_struct(sig);
  }
@@ -61,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void __put_task_struct(struct task_struct *tsk)
  {
  	WARN_ON(!tsk->exit_state);
-@@ -393,7 +395,18 @@ void __put_task_struct(struct task_struc
+@@ -406,7 +408,18 @@ void __put_task_struct(struct task_struc
  	if (!profile_handoff_task(tsk))
  		free_task(tsk);
  }
diff --git a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
index 30d07c8..52d5c61 100644
--- a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
+++ b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 17:03:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Carsten reported problems when running:
 
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -1051,6 +1051,7 @@ config CFS_BANDWIDTH
+@@ -1052,6 +1052,7 @@ config CFS_BANDWIDTH
  config RT_GROUP_SCHED
  	bool "Group scheduling for SCHED_RR/FIFO"
  	depends on CGROUP_SCHED
diff --git a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
index e819d3e..ccee435 100644
--- a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
+++ b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
@@ -1,7 +1,7 @@
 Subject: sched: Disable TTWU_QUEUE on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 13 Sep 2011 16:42:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The queued remote wakeup mechanism can introduce rather large
 latencies if the number of migrated tasks is high. Disable it for RT.
@@ -28,5 +28,5 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  SCHED_FEAT(TTWU_QUEUE, true)
 +#endif
  
- #ifdef HAVE_RT_PUSH_IPI
  /*
+  * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
diff --git a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
index 09a4732..d8dfc59 100644
--- a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
+++ b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
@@ -1,7 +1,7 @@
 Subject: sched: Limit the number of task migrations per batch
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 06 Jun 2011 12:12:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Put an upper limit on the number of tasks which are migrated per batch
 to avoid large latencies.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_fe
+@@ -58,7 +58,11 @@ const_debug unsigned int sysctl_sched_fe
   * Number of tasks to iterate in a single balance run.
   * Limited because this is done with IRQs disabled.
   */
diff --git a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
index d715f17..ef0bfde 100644
--- a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -1,7 +1,7 @@
 Subject: sched: Do not account rcu_preempt_depth on RT in might_sleep()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 07 Jun 2011 09:19:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 RT changes the rcu_preempt_depth semantics, so we cannot check for it
 in might_sleep().
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/rcupdate.h
 +++ b/include/linux/rcupdate.h
-@@ -301,6 +301,11 @@ void synchronize_rcu(void);
+@@ -261,6 +261,11 @@ void synchronize_rcu(void);
   * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
   */
  #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #else /* #ifdef CONFIG_PREEMPT_RCU */
  
-@@ -326,6 +331,8 @@ static inline int rcu_preempt_depth(void
+@@ -286,6 +291,8 @@ static inline int rcu_preempt_depth(void
  	return 0;
  }
  
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Internal to kernel */
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -7862,7 +7862,7 @@ void __init sched_init(void)
+@@ -6271,7 +6271,7 @@ void __init sched_init(void)
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  static inline int preempt_count_equals(int preempt_offset)
  {
diff --git a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
index a13e2ae..de6555f 100644
--- a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
+++ b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
@@ -1,7 +1,7 @@
 Subject: sched: Move mmdrop to RCU on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 06 Jun 2011 12:20:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Takes sleeping locks and calls into the memory allocator, so nothing
 we want to do in task switch and oder atomic contexts.
@@ -9,7 +9,7 @@ we want to do in task switch and oder atomic contexts.
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  include/linux/mm_types.h |    4 ++++
- include/linux/sched.h    |   11 +++++++++++
+ include/linux/sched/mm.h |   11 +++++++++++
  kernel/fork.c            |   13 +++++++++++++
  kernel/sched/core.c      |   19 +++++++++++++++++--
  4 files changed, 45 insertions(+), 2 deletions(-)
@@ -23,20 +23,20 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +#include <linux/rcupdate.h>
  #include <linux/page-flags-layout.h>
  #include <linux/workqueue.h>
- #include <asm/page.h>
-@@ -509,6 +510,9 @@ struct mm_struct {
+ 
+@@ -491,6 +492,9 @@ struct mm_struct {
  	bool tlb_flush_pending;
  #endif
  	struct uprobes_state uprobes_state;
 +#ifdef CONFIG_PREEMPT_RT_BASE
 +	struct rcu_head delayed_drop;
 +#endif
- #ifdef CONFIG_X86_INTEL_MPX
- 	/* address of the bounds directory */
- 	void __user *bd_addr;
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -2912,6 +2912,17 @@ static inline void mmdrop(struct mm_stru
+ #ifdef CONFIG_HUGETLB_PAGE
+ 	atomic_long_t hugetlb_usage;
+ #endif
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -42,6 +42,17 @@ static inline void mmdrop(struct mm_stru
  		__mmdrop(mm);
  }
  
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -865,6 +865,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -885,6 +885,19 @@ void __mmdrop(struct mm_struct *mm)
  }
  EXPORT_SYMBOL_GPL(__mmdrop);
  
@@ -92,16 +92,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (unlikely(prev_state == TASK_DEAD)) {
  		if (prev->sched_class->task_dead)
  			prev->sched_class->task_dead(prev);
-@@ -5587,6 +5591,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5612,6 +5616,8 @@ void sched_setnuma(struct task_struct *p
  #endif /* CONFIG_NUMA_BALANCING */
  
  #ifdef CONFIG_HOTPLUG_CPU
 +static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
 +
  /*
-  * Ensures that the idle task is using init_mm right before its cpu goes
+  * Ensure that the idle task is using init_mm right before its CPU goes
   * offline.
-@@ -5601,7 +5607,12 @@ void idle_task_exit(void)
+@@ -5626,7 +5632,12 @@ void idle_task_exit(void)
  		switch_mm_irqs_off(mm, &init_mm, current);
  		finish_arch_post_lock_switch();
  	}
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -7547,6 +7558,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5953,6 +5964,10 @@ int sched_cpu_dying(unsigned int cpu)
  	update_max_interval();
  	nohz_balance_exit_idle(cpu);
  	hrtick_clear(rq);
diff --git a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
index 95ae8e3..2b91fa6 100644
--- a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
+++ b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
@@ -1,7 +1,7 @@
 Subject: sched: Add saved_state for tasks blocked on sleeping locks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 25 Jun 2011 09:21:04 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Spinlocks are state preserving in !RT. RT changes the state when a
 task gets blocked on a lock. So we need to remember the state before
@@ -11,32 +11,33 @@ sleep is done, the saved state is restored.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- include/linux/sched.h |    2 ++
+ include/linux/sched.h |    3 +++
  kernel/sched/core.c   |   31 ++++++++++++++++++++++++++++++-
  kernel/sched/sched.h  |    1 +
- 3 files changed, 33 insertions(+), 1 deletion(-)
+ 3 files changed, 34 insertions(+), 1 deletion(-)
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1481,6 +1481,7 @@ struct task_struct {
- 	struct thread_info thread_info;
+@@ -490,6 +490,8 @@ struct task_struct {
  #endif
- 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
-+	volatile long saved_state; /* saved state for "spinlock sleepers" */
- 	void *stack;
- 	atomic_t usage;
- 	unsigned int flags;	/* per process flags, defined below */
-@@ -2704,6 +2705,7 @@ extern void xtime_update(unsigned long t
+ 	/* -1 unrunnable, 0 runnable, >0 stopped: */
+ 	volatile long			state;
++	/* saved state for "spinlock sleepers" */
++	volatile long			saved_state;
+ 	void				*stack;
+ 	atomic_t			usage;
+ 	/* Per task flags (PF_*), defined further below: */
+@@ -1415,6 +1417,7 @@ extern struct task_struct *find_task_by_
  
  extern int wake_up_state(struct task_struct *tsk, unsigned int state);
  extern int wake_up_process(struct task_struct *tsk);
-+extern int wake_up_lock_sleeper(struct task_struct * tsk);
++extern int wake_up_lock_sleeper(struct task_struct *tsk);
  extern void wake_up_new_task(struct task_struct *tsk);
+ 
  #ifdef CONFIG_SMP
-  extern void kick_process(struct task_struct *tsk);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -2033,8 +2033,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2013,8 +2013,25 @@ try_to_wake_up(struct task_struct *p, un
  	 */
  	smp_mb__before_spinlock();
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -63,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	trace_sched_waking(p);
  
-@@ -2181,6 +2198,18 @@ int wake_up_process(struct task_struct *
+@@ -2180,6 +2197,18 @@ int wake_up_process(struct task_struct *
  }
  EXPORT_SYMBOL(wake_up_process);
  
@@ -84,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return try_to_wake_up(p, state, 0);
 --- a/kernel/sched/sched.h
 +++ b/kernel/sched/sched.h
-@@ -1163,6 +1163,7 @@ static inline void finish_lock_switch(st
+@@ -1293,6 +1293,7 @@ static inline void finish_lock_switch(st
  #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
  #define WF_FORK		0x02		/* child wakeup after fork */
  #define WF_MIGRATED	0x4		/* internal use, task got migrated */
diff --git a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
index fdd3f0c..84886c1 100644
--- a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
@@ -1,7 +1,7 @@
 Subject: sched: ttwu: Return success when only changing the saved_state value
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 13 Dec 2011 21:42:19 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When a task blocks on a rt lock, it saves the current state in
 p->saved_state, so a lock related wake up will not destroy the
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -2040,8 +2040,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2020,8 +2020,10 @@ try_to_wake_up(struct task_struct *p, un
  		 * if the wakeup condition is true.
  		 */
  		if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index e5d637f..0f16e03 100644
--- a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Mon, 18 Mar 2013 15:12:49 -0400
 Subject: sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 In -rt, most spin_locks() turn into mutexes. One of these spin_lock
 conversions is performed on the workqueue gcwq->lock. When the idle
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3477,8 +3477,10 @@ static void __sched notrace __schedule(b
+@@ -3474,8 +3474,10 @@ static void __sched notrace __schedule(b
  			 * If a worker went to sleep, notify and ask workqueue
  			 * whether it wants to wake up a task to maintain
  			 * concurrency.
diff --git a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
index 7399920..2adcf21 100644
--- a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
+++ b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
@@ -1,7 +1,7 @@
 Subject: scsi/fcoe: Make RT aware.
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 12 Nov 2011 14:00:48 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Do not disable preemption while taking sleeping locks. All user look safe
 for migrate_diable() only.
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/scsi/fcoe/fcoe.c
 +++ b/drivers/scsi/fcoe/fcoe.c
-@@ -1455,11 +1455,11 @@ static int fcoe_rcv(struct sk_buff *skb,
+@@ -1464,11 +1464,11 @@ static int fcoe_rcv(struct sk_buff *skb,
  static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
  {
  	struct fcoe_percpu_s *fps;
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return rc;
  }
-@@ -1646,11 +1646,11 @@ static inline int fcoe_filter_frames(str
+@@ -1655,11 +1655,11 @@ static inline int fcoe_filter_frames(str
  		return 0;
  	}
  
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return -EINVAL;
  }
  
-@@ -1693,7 +1693,7 @@ static void fcoe_recv_frame(struct sk_bu
+@@ -1702,7 +1702,7 @@ static void fcoe_recv_frame(struct sk_bu
  	 */
  	hp = (struct fcoe_hdr *) skb_network_header(skb);
  
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
  		if (stats->ErrorFrames < 5)
  			printk(KERN_WARNING "fcoe: FCoE version "
-@@ -1725,13 +1725,13 @@ static void fcoe_recv_frame(struct sk_bu
+@@ -1734,13 +1734,13 @@ static void fcoe_recv_frame(struct sk_bu
  		goto drop;
  
  	if (!fcoe_filter_frames(lport, fp)) {
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
-@@ -834,7 +834,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -836,7 +836,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
  
  	INIT_LIST_HEAD(&del_list);
  
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
  		deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
-@@ -870,7 +870,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -872,7 +872,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
  				sel_time = fcf->time;
  		}
  	}
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		/* Removes fcf from current list */
 --- a/drivers/scsi/libfc/fc_exch.c
 +++ b/drivers/scsi/libfc/fc_exch.c
-@@ -814,10 +814,10 @@ static struct fc_exch *fc_exch_em_alloc(
+@@ -833,10 +833,10 @@ static struct fc_exch *fc_exch_em_alloc(
  	}
  	memset(ep, 0, sizeof(*ep));
  
diff --git a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
index d6d8f0f..7280dd1 100644
--- a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
+++ b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
@@ -1,7 +1,7 @@
 Subject: scsi: qla2xxx: Use local_irq_save_nort() in qla2x00_poll
 From: John Kacur <jkacur at redhat.com>
 Date: Fri, 27 Apr 2012 12:48:46 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 RT triggers the following:
 
diff --git a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
index 56cb803..11398d3 100644
--- a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
+++ b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
@@ -1,7 +1,7 @@
 Subject: seqlock: Prevent rt starvation
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 22 Feb 2012 12:03:30 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 If a low prio writer gets preempted while holding the seqlock write
 locked, a high prio reader spins forever on RT.
@@ -23,9 +23,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 ---
  include/linux/seqlock.h |   56 +++++++++++++++++++++++++++++++++++++-----------
- include/net/dst.h       |    2 -
- include/net/neighbour.h |    4 +--
- 3 files changed, 47 insertions(+), 15 deletions(-)
+ include/net/neighbour.h |    6 ++---
+ 2 files changed, 47 insertions(+), 15 deletions(-)
 
 --- a/include/linux/seqlock.h
 +++ b/include/linux/seqlock.h
@@ -158,17 +157,6 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	spin_unlock_irqrestore(&sl->lock, flags);
  }
  
---- a/include/net/dst.h
-+++ b/include/net/dst.h
-@@ -446,7 +446,7 @@ static inline void dst_confirm(struct ds
- static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
- 				   struct sk_buff *skb)
- {
--	const struct hh_cache *hh;
-+	struct hh_cache *hh;
- 
- 	if (dst->pending_confirm) {
- 		unsigned long now = jiffies;
 --- a/include/net/neighbour.h
 +++ b/include/net/neighbour.h
 @@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct
@@ -180,7 +168,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	unsigned int seq;
  	int hh_len;
-@@ -501,7 +501,7 @@ struct neighbour_cb {
+@@ -470,7 +470,7 @@ static inline int neigh_hh_output(const
+ 
+ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
+ {
+-	const struct hh_cache *hh = &n->hh;
++	struct hh_cache *hh = &n->hh;
+ 
+ 	if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
+ 		return neigh_hh_output(hh, skb);
+@@ -511,7 +511,7 @@ struct neighbour_cb {
  
  #define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
  
diff --git a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
index 0cf77ab..88730ae 100644
--- a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
+++ b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
@@ -1,7 +1,7 @@
 Subject: signal: Make __lock_task_sighand() RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 22 Jul 2011 08:07:08 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 local_irq_save() + spin_lock(&sighand->siglock) does not work on
 -RT. Use the nort variants.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/signal.c
 +++ b/kernel/signal.c
-@@ -1276,12 +1276,12 @@ struct sighand_struct *__lock_task_sigha
+@@ -1287,12 +1287,12 @@ struct sighand_struct *__lock_task_sigha
  		 * Disable interrupts early to avoid deadlocks.
  		 * See rcu_read_unlock() comment header for details.
  		 */
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			break;
  		}
  		/*
-@@ -1302,7 +1302,7 @@ struct sighand_struct *__lock_task_sigha
+@@ -1313,7 +1313,7 @@ struct sighand_struct *__lock_task_sigha
  		}
  		spin_unlock(&sighand->siglock);
  		rcu_read_unlock();
diff --git a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
index e55cee7..8507534 100644
--- a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
+++ b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
@@ -1,7 +1,7 @@
 Subject: signal: Revert ptrace preempt magic
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 21 Sep 2011 19:57:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
 than a bandaid around the ptrace design trainwreck. It's not a
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/signal.c
 +++ b/kernel/signal.c
-@@ -1846,15 +1846,7 @@ static void ptrace_stop(int exit_code, i
+@@ -1857,15 +1857,7 @@ static void ptrace_stop(int exit_code, i
  		if (gstop_done && ptrace_reparented(current))
  			do_notify_parent_cldstop(current, false, why);
  
diff --git a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 80a7420..d59de91 100644
--- a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:44:56 -0500
 Subject: signals: Allow rt tasks to cache one sigqueue struct
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 To avoid allocation allow rt tasks to cache one sigqueue struct in
 task struct.
@@ -9,26 +9,27 @@ task struct.
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 ---
- include/linux/sched.h  |    1 
+ include/linux/sched.h  |    2 +
  include/linux/signal.h |    1 
  kernel/exit.c          |    2 -
  kernel/fork.c          |    1 
  kernel/signal.c        |   69 ++++++++++++++++++++++++++++++++++++++++++++++---
- 5 files changed, 69 insertions(+), 5 deletions(-)
+ 5 files changed, 70 insertions(+), 5 deletions(-)
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1689,6 +1689,7 @@ struct task_struct {
- /* signal handlers */
- 	struct signal_struct *signal;
- 	struct sighand_struct *sighand;
-+	struct sigqueue *sigqueue_cache;
- 
- 	sigset_t blocked, real_blocked;
- 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
+@@ -753,6 +753,8 @@ struct task_struct {
+ 	/* Signal handlers: */
+ 	struct signal_struct		*signal;
+ 	struct sighand_struct		*sighand;
++	struct sigqueue			*sigqueue_cache;
++
+ 	sigset_t			blocked;
+ 	sigset_t			real_blocked;
+ 	/* Restored if set_restore_sigmask() was used: */
 --- a/include/linux/signal.h
 +++ b/include/linux/signal.h
-@@ -233,6 +233,7 @@ static inline void init_sigpending(struc
+@@ -231,6 +231,7 @@ static inline void init_sigpending(struc
  }
  
  extern void flush_sigqueue(struct sigpending *queue);
@@ -38,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static inline int valid_signal(unsigned long sig)
 --- a/kernel/exit.c
 +++ b/kernel/exit.c
-@@ -143,7 +143,7 @@ static void __exit_signal(struct task_st
+@@ -159,7 +159,7 @@ static void __exit_signal(struct task_st
  	 * Do this under ->siglock, we can race with another thread
  	 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  	 */
@@ -49,25 +50,25 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -1553,6 +1553,7 @@ static __latent_entropy struct task_stru
+@@ -1607,6 +1607,7 @@ static __latent_entropy struct task_stru
  	spin_lock_init(&p->alloc_lock);
  
  	init_sigpending(&p->pending);
 +	p->sigqueue_cache = NULL;
  
  	p->utime = p->stime = p->gtime = 0;
- 	p->utimescaled = p->stimescaled = 0;
+ #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 --- a/kernel/signal.c
 +++ b/kernel/signal.c
-@@ -14,6 +14,7 @@
- #include <linux/export.h>
- #include <linux/init.h>
- #include <linux/sched.h>
+@@ -19,6 +19,7 @@
+ #include <linux/sched/task.h>
+ #include <linux/sched/task_stack.h>
+ #include <linux/sched/cputime.h>
 +#include <linux/sched/rt.h>
  #include <linux/fs.h>
  #include <linux/tty.h>
  #include <linux/binfmts.h>
-@@ -352,13 +353,30 @@ static bool task_participate_group_stop(
+@@ -357,13 +358,30 @@ static bool task_participate_group_stop(
  	return false;
  }
  
@@ -99,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	struct sigqueue *q = NULL;
  	struct user_struct *user;
-@@ -375,7 +393,10 @@ static struct sigqueue *
+@@ -380,7 +398,10 @@ static struct sigqueue *
  	if (override_rlimit ||
  	    atomic_read(&user->sigpending) <=
  			task_rlimit(t, RLIMIT_SIGPENDING)) {
@@ -111,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	} else {
  		print_dropped_signal(sig);
  	}
-@@ -392,6 +413,13 @@ static struct sigqueue *
+@@ -397,6 +418,13 @@ static struct sigqueue *
  	return q;
  }
  
@@ -125,7 +126,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static void __sigqueue_free(struct sigqueue *q)
  {
  	if (q->flags & SIGQUEUE_PREALLOC)
-@@ -401,6 +429,21 @@ static void __sigqueue_free(struct sigqu
+@@ -406,6 +434,21 @@ static void __sigqueue_free(struct sigqu
  	kmem_cache_free(sigqueue_cachep, q);
  }
  
@@ -147,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void flush_sigqueue(struct sigpending *queue)
  {
  	struct sigqueue *q;
-@@ -414,6 +457,21 @@ void flush_sigqueue(struct sigpending *q
+@@ -419,6 +462,21 @@ void flush_sigqueue(struct sigpending *q
  }
  
  /*
@@ -169,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * Flush all pending signals for this kthread.
   */
  void flush_signals(struct task_struct *t)
-@@ -525,7 +583,7 @@ static void collect_signal(int sig, stru
+@@ -532,7 +590,7 @@ static void collect_signal(int sig, stru
  still_pending:
  		list_del_init(&first->list);
  		copy_siginfo(info, &first->info);
@@ -178,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	} else {
  		/*
  		 * Ok, it wasn't in the queue.  This must be
-@@ -560,6 +618,8 @@ int dequeue_signal(struct task_struct *t
+@@ -567,6 +625,8 @@ int dequeue_signal(struct task_struct *t
  {
  	int signr;
  
@@ -187,7 +188,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/* We only dequeue private signals from ourselves, we don't let
  	 * signalfd steal them
  	 */
-@@ -1485,7 +1545,8 @@ EXPORT_SYMBOL(kill_pid);
+@@ -1496,7 +1556,8 @@ EXPORT_SYMBOL(kill_pid);
   */
  struct sigqueue *sigqueue_alloc(void)
  {
diff --git a/debian/patches/features/all/rt/skbufhead-raw-lock.patch b/debian/patches/features/all/rt/skbufhead-raw-lock.patch
index a14afde..7923cd8 100644
--- a/debian/patches/features/all/rt/skbufhead-raw-lock.patch
+++ b/debian/patches/features/all/rt/skbufhead-raw-lock.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 12 Jul 2011 15:38:34 +0200
 Subject: net: Use skbufhead with raw lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use the rps lock as rawlock so we can keep irq-off regions. It looks low
 latency. However we can't kfree() from this context therefore we defer this
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
-@@ -2855,6 +2855,7 @@ struct softnet_data {
+@@ -2767,6 +2767,7 @@ struct softnet_data {
  	unsigned int		dropped;
  	struct sk_buff_head	input_pkt_queue;
  	struct napi_struct	backlog;
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/include/linux/skbuff.h
 +++ b/include/linux/skbuff.h
-@@ -284,6 +284,7 @@ struct sk_buff_head {
+@@ -285,6 +285,7 @@ struct sk_buff_head {
  
  	__u32		qlen;
  	spinlock_t	lock;
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  };
  
  struct sk_buff;
-@@ -1573,6 +1574,12 @@ static inline void skb_queue_head_init(s
+@@ -1587,6 +1588,12 @@ static inline void skb_queue_head_init(s
  	__skb_queue_head_init(list);
  }
  
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  }
  
-@@ -4337,7 +4337,7 @@ static void flush_backlog(struct work_st
+@@ -4318,7 +4318,7 @@ static void flush_backlog(struct work_st
  	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  			__skb_unlink(skb, &sd->input_pkt_queue);
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			input_queue_head_incr(sd);
  		}
  	}
-@@ -4347,11 +4347,14 @@ static void flush_backlog(struct work_st
+@@ -4328,11 +4328,14 @@ static void flush_backlog(struct work_st
  	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  			__skb_unlink(skb, &sd->process_queue);
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static void flush_all_backlogs(void)
-@@ -4876,7 +4879,9 @@ static int process_backlog(struct napi_s
+@@ -4866,7 +4869,9 @@ static int process_backlog(struct napi_s
  	while (again) {
  		struct sk_buff *skb;
  
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			rcu_read_lock();
  			__netif_receive_skb(skb);
  			rcu_read_unlock();
-@@ -4884,9 +4889,9 @@ static int process_backlog(struct napi_s
+@@ -4874,9 +4879,9 @@ static int process_backlog(struct napi_s
  			if (++work >= quota)
  				return work;
  
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		rps_lock(sd);
  		if (skb_queue_empty(&sd->input_pkt_queue)) {
  			/*
-@@ -5228,13 +5233,21 @@ static __latent_entropy void net_rx_acti
+@@ -5317,13 +5322,21 @@ static __latent_entropy void net_rx_acti
  	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  	unsigned long time_limit = jiffies + 2;
  	int budget = netdev_budget;
@@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	for (;;) {
  		struct napi_struct *n;
  
-@@ -8037,6 +8050,9 @@ static int dev_cpu_callback(struct notif
+@@ -8084,6 +8097,9 @@ static int dev_cpu_dead(unsigned int old
  		netif_rx_ni(skb);
  		input_queue_head_incr(oldsd);
  	}
@@ -142,9 +142,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +		kfree_skb(skb);
 +	}
  
- 	return NOTIFY_OK;
+ 	return 0;
  }
-@@ -8341,8 +8357,9 @@ static int __init net_dev_init(void)
+@@ -8387,8 +8403,9 @@ static int __init net_dev_init(void)
  
  		INIT_WORK(flush, flush_backlog);
  
diff --git a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch b/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
index 7a1e0a2..21ccce6 100644
--- a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 15 Apr 2015 19:00:47 +0200
 Subject: slub: Disable SLUB_CPU_PARTIAL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
 |in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -1799,7 +1799,7 @@ config SLAB_FREELIST_RANDOM
+@@ -1865,7 +1865,7 @@ config SLAB_FREELIST_RANDOM
  
  config SLUB_CPU_PARTIAL
  	default y
diff --git a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
index 7ca6270..efdb64b 100644
--- a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
+++ b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
@@ -1,7 +1,7 @@
 Subject: slub: Enable irqs for __GFP_WAIT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 09 Jan 2013 12:08:15 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 SYSTEM_RUNNING might be too late for enabling interrupts. Allocations
 with GFP_WAIT can happen before that. So use this as an indicator.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/mm/slub.c
 +++ b/mm/slub.c
-@@ -1533,14 +1533,17 @@ static struct page *allocate_slab(struct
+@@ -1538,14 +1538,17 @@ static struct page *allocate_slab(struct
  	void *start, *p;
  	int idx, order;
  	bool shuffle;
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	if (gfpflags_allow_blocking(flags))
 +		enableirqs = true;
  #ifdef CONFIG_PREEMPT_RT_FULL
- 	if (system_state == SYSTEM_RUNNING)
+ 	if (system_state > SYSTEM_BOOTING)
 -#else
 -	if (gfpflags_allow_blocking(flags))
 +		enableirqs = true;
@@ -33,12 +33,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		local_irq_enable();
  
  	flags |= s->allocflags;
-@@ -1615,11 +1618,7 @@ static struct page *allocate_slab(struct
+@@ -1620,11 +1623,7 @@ static struct page *allocate_slab(struct
  	page->frozen = 1;
  
  out:
 -#ifdef CONFIG_PREEMPT_RT_FULL
--	if (system_state == SYSTEM_RUNNING)
+-	if (system_state > SYSTEM_BOOTING)
 -#else
 -	if (gfpflags_allow_blocking(flags))
 -#endif
diff --git a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch b/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
index 63167b1..6118a8e 100644
--- a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
+++ b/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Wed, 18 Feb 2015 15:09:23 +0100
 Subject: snd/pcm: fix snd_pcm_stream_lock*() irqs_disabled() splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Locking functions previously using read_lock_irq()/read_lock_irqsave() were
 changed to local_irq_disable/save(), leading to gripes.  Use nort variants.
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/sound/core/pcm_native.c
 +++ b/sound/core/pcm_native.c
-@@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock)
+@@ -136,7 +136,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock)
  void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
  {
  	if (!substream->pcm->nonatomic)
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	snd_pcm_stream_lock(substream);
  }
  EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
-@@ -150,7 +150,7 @@ void snd_pcm_stream_unlock_irq(struct sn
+@@ -151,7 +151,7 @@ void snd_pcm_stream_unlock_irq(struct sn
  {
  	snd_pcm_stream_unlock(substream);
  	if (!substream->pcm->nonatomic)
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
  
-@@ -158,7 +158,7 @@ unsigned long _snd_pcm_stream_lock_irqsa
+@@ -159,7 +159,7 @@ unsigned long _snd_pcm_stream_lock_irqsa
  {
  	unsigned long flags = 0;
  	if (!substream->pcm->nonatomic)
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	snd_pcm_stream_lock(substream);
  	return flags;
  }
-@@ -176,7 +176,7 @@ void snd_pcm_stream_unlock_irqrestore(st
+@@ -177,7 +177,7 @@ void snd_pcm_stream_unlock_irqrestore(st
  {
  	snd_pcm_stream_unlock(substream);
  	if (!substream->pcm->nonatomic)
diff --git a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
index bddc934..c90b53c 100644
--- a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
+++ b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: softirq: Disable softirq stacks for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 13:59:17 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Disable extra stacks for softirqs. We want to preempt softirqs and
 having them on special IRQ-stack does not make this easier.
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void fixup_irqs(void)
 --- a/arch/x86/entry/entry_64.S
 +++ b/arch/x86/entry/entry_64.S
-@@ -894,6 +894,7 @@ EXPORT_SYMBOL(native_load_gs_index)
+@@ -892,6 +892,7 @@ EXPORT_SYMBOL(native_load_gs_index)
  	jmp	2b
  	.previous
  
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Call softirq on interrupt stack. Interrupts are off. */
  ENTRY(do_softirq_own_stack)
  	pushq	%rbp
-@@ -906,6 +907,7 @@ ENTRY(do_softirq_own_stack)
+@@ -904,6 +905,7 @@ ENTRY(do_softirq_own_stack)
  	decl	PER_CPU_VAR(irq_count)
  	ret
  END(do_softirq_own_stack)
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -472,7 +472,7 @@ struct softirq_action
+@@ -484,7 +484,7 @@ struct softirq_action
  asmlinkage void do_softirq(void);
  asmlinkage void __do_softirq(void);
  
diff --git a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
index d29afc0..19cf8fd 100644
--- a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
+++ b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
@@ -1,7 +1,7 @@
 Subject: softirq: Check preemption after reenabling interrupts
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 raise_softirq_irqoff() disables interrupts and wakes the softirq
 daemon, but after reenabling interrupts there is no preemption check,
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/block/blk-softirq.c
 +++ b/block/blk-softirq.c
-@@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
+@@ -52,6 +52,7 @@ static void trigger_softirq(void *data)
  		raise_softirq_irqoff(BLOCK_SOFTIRQ);
  
  	local_irq_restore(flags);
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -89,6 +90,7 @@ static int blk_softirq_cpu_dead(unsigned
+@@ -90,6 +91,7 @@ static int blk_softirq_cpu_dead(unsigned
  			 this_cpu_ptr(&blk_cpu_done));
  	raise_softirq_irqoff(BLOCK_SOFTIRQ);
  	local_irq_enable();
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return 0;
  }
-@@ -141,6 +143,7 @@ void __blk_complete_request(struct reque
+@@ -142,6 +144,7 @@ void __blk_complete_request(struct reque
  		goto do_local;
  
  	local_irq_restore(flags);
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
 --- a/include/linux/preempt.h
 +++ b/include/linux/preempt.h
-@@ -160,8 +160,10 @@ do { \
+@@ -186,8 +186,10 @@ do { \
  
  #ifdef CONFIG_PREEMPT_RT_BASE
  # define preempt_enable_no_resched() sched_preempt_enable_no_resched()
@@ -60,14 +60,14 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  
  #define preemptible()	(preempt_count() == 0 && !irqs_disabled())
-@@ -232,6 +234,7 @@ do { \
+@@ -274,6 +276,7 @@ do { \
  #define preempt_disable_notrace()		barrier()
  #define preempt_enable_no_resched_notrace()	barrier()
  #define preempt_enable_notrace()		barrier()
 +#define preempt_check_resched_rt()		barrier()
  #define preemptible()				0
  
- #endif /* CONFIG_PREEMPT_COUNT */
+ #define migrate_disable()			barrier()
 --- a/lib/irq_poll.c
 +++ b/lib/irq_poll.c
 @@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -2285,6 +2285,7 @@ static void __netif_reschedule(struct Qd
+@@ -2403,6 +2403,7 @@ static void __netif_reschedule(struct Qd
  	sd->output_queue_tailp = &q->next_sched;
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  void __netif_schedule(struct Qdisc *q)
-@@ -2366,6 +2367,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2465,6 +2466,7 @@ void __dev_kfree_skb_irq(struct sk_buff
  	__this_cpu_write(softnet_data.completion_queue, skb);
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
-@@ -3785,6 +3787,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3772,6 +3774,7 @@ static int enqueue_to_backlog(struct sk_
  	rps_unlock(sd);
  
  	local_irq_restore(flags);
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	atomic_long_inc(&skb->dev->rx_dropped);
  	kfree_skb(skb);
-@@ -4831,6 +4834,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4821,6 +4824,7 @@ static void net_rps_action_and_irq_enabl
  		sd->rps_ipi_list = NULL;
  
  		local_irq_enable();
@@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  		/* Send pending IPI's to kick RPS processing on remote cpus. */
  		while (remsd) {
-@@ -4844,6 +4848,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4834,6 +4838,7 @@ static void net_rps_action_and_irq_enabl
  	} else
  #endif
  		local_irq_enable();
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4921,6 +4926,7 @@ void __napi_schedule(struct napi_struct
+@@ -4911,6 +4916,7 @@ void __napi_schedule(struct napi_struct
  	local_irq_save(flags);
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  	local_irq_restore(flags);
@@ -160,7 +160,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL(__napi_schedule);
  
-@@ -8022,6 +8028,7 @@ static int dev_cpu_callback(struct notif
+@@ -8069,6 +8075,7 @@ static int dev_cpu_dead(unsigned int old
  
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_enable();
diff --git a/debian/patches/features/all/rt/softirq-split-locks.patch b/debian/patches/features/all/rt/softirq-split-locks.patch
index b635255..56df96c 100644
--- a/debian/patches/features/all/rt/softirq-split-locks.patch
+++ b/debian/patches/features/all/rt/softirq-split-locks.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 04 Oct 2012 14:20:47 +0100
 Subject: softirq: Split softirq locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The 3.x RT series removed the split softirq implementation in favour
 of pushing softirq processing into the context of the thread which
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif /* _LINUX_BH_H */
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -469,10 +469,11 @@ struct softirq_action
+@@ -481,10 +481,11 @@ struct softirq_action
  	void	(*action)(struct softirq_action *);
  };
  
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void do_softirq_own_stack(void);
  #else
  static inline void do_softirq_own_stack(void)
-@@ -480,6 +481,9 @@ static inline void do_softirq_own_stack(
+@@ -492,6 +493,9 @@ static inline void do_softirq_own_stack(
  	__do_softirq();
  }
  #endif
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  extern void softirq_init(void);
-@@ -487,6 +491,7 @@ extern void __raise_softirq_irqoff(unsig
+@@ -499,6 +503,7 @@ extern void __raise_softirq_irqoff(unsig
  
  extern void raise_softirq_irqoff(unsigned int nr);
  extern void raise_softirq(unsigned int nr);
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
  
-@@ -644,6 +649,12 @@ void tasklet_hrtimer_cancel(struct taskl
+@@ -656,6 +661,12 @@ void tasklet_hrtimer_cancel(struct taskl
  	tasklet_kill(&ttimer->tasklet);
  }
  
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /* We use the MSB mostly because its available */
  #define PREEMPT_NEED_RESCHED	0x80000000
-@@ -59,9 +63,15 @@
+@@ -80,9 +84,15 @@
  #include <asm/preempt.h>
  
  #define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
@@ -163,36 +163,36 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  /*
   * Are we doing bottom half or hardware interrupt processing?
-@@ -72,7 +82,6 @@
+@@ -100,7 +110,6 @@
  #define in_irq()		(hardirq_count())
  #define in_softirq()		(softirq_count())
  #define in_interrupt()		(irq_count())
 -#define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
- 
- /*
-  * Are we in NMI context?
+ #define in_nmi()		(preempt_count() & NMI_MASK)
+ #define in_task()		(!(preempt_count() & \
+ 				   (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1971,6 +1971,8 @@ struct task_struct {
+@@ -1055,6 +1055,8 @@ struct task_struct {
  #endif
  #ifdef CONFIG_PREEMPT_RT_BASE
- 	struct rcu_head put_rcu;
-+	int softirq_nestcnt;
-+	unsigned int softirqs_raised;
+ 	struct rcu_head			put_rcu;
++	int				softirq_nestcnt;
++	unsigned int			softirqs_raised;
  #endif
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- 	unsigned long	task_state_change;
-@@ -2287,6 +2289,7 @@ extern void thread_group_cputime_adjuste
+ 	unsigned long			task_state_change;
+@@ -1227,6 +1229,7 @@ extern struct pid *cad_pid;
  /*
   * Per process flags
   */
-+#define PF_IN_SOFTIRQ	0x00000001	/* Task is serving softirq */
- #define PF_EXITING	0x00000004	/* getting shut down */
- #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
- #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
++#define PF_IN_SOFTIRQ		0x00000001      /* Task is serving softirq */
+ #define PF_IDLE			0x00000002	/* I am an IDLE thread */
+ #define PF_EXITING		0x00000004	/* Getting shut down */
+ #define PF_EXITPIDONE		0x00000008	/* PI exit done on shut down */
 --- a/init/main.c
 +++ b/init/main.c
-@@ -507,6 +507,7 @@ asmlinkage __visible void __init start_k
+@@ -537,6 +537,7 @@ asmlinkage __visible void __init start_k
  	setup_command_line(command_line);
  	setup_nr_cpu_ids();
  	setup_per_cpu_areas();
@@ -799,7 +799,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	.thread_comm		= "ksoftirqd/%u",
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -882,14 +882,7 @@ static bool can_stop_idle_tick(int cpu,
+@@ -881,14 +881,7 @@ static bool can_stop_idle_tick(int cpu,
  		return false;
  
  	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -817,7 +817,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3856,11 +3856,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3844,11 +3844,9 @@ int netif_rx_ni(struct sk_buff *skb)
  
  	trace_netif_rx_ni_entry(skb);
  
diff --git a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
index 4051ee6..fceefae 100644
--- a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
+++ b/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 20 Jan 2016 16:34:17 +0100
 Subject: softirq: split timer softirqs out of ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with
 timer wakeup which can not happen in hardirq context. The prio has been
diff --git a/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch b/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
index 0afe6c9..87ebf81 100644
--- a/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
+++ b/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <efault at gmx.de>
 Date: Fri, 20 Jan 2017 18:10:20 +0100
 Subject: [PATCH] softirq: wake the timer softirq if needed
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The irq-exit path only checks the "normal"-softirq thread if it is
 running and ignores the state of the "timer"-softirq thread. It is possible
@@ -25,12 +25,20 @@ Cc: stable-rt at vger.kernel.org
 Signed-off-by: Mike Galbraith <efault at gmx.de>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- kernel/softirq.c |   10 ++++------
- 1 file changed, 4 insertions(+), 6 deletions(-)
+ kernel/softirq.c |   11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
 
 --- a/kernel/softirq.c
 +++ b/kernel/softirq.c
-@@ -206,6 +206,7 @@ static void handle_softirq(unsigned int
+@@ -28,6 +28,7 @@
+ #include <linux/tick.h>
+ #include <linux/locallock.h>
+ #include <linux/irq.h>
++#include <linux/sched/types.h>
+ 
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -206,6 +207,7 @@ static void handle_softirq(unsigned int
  	}
  }
  
@@ -38,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * If ksoftirqd is scheduled, we do not want to process pending softirqs
   * right now. Let ksoftirqd handle this at its own rate, to get fairness.
-@@ -217,7 +218,6 @@ static bool ksoftirqd_running(void)
+@@ -217,7 +219,6 @@ static bool ksoftirqd_running(void)
  	return tsk && (tsk->state == TASK_RUNNING);
  }
  
@@ -46,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static inline int ksoftirqd_softirq_pending(void)
  {
  	return local_softirq_pending();
-@@ -773,13 +773,10 @@ void irq_enter(void)
+@@ -773,13 +774,10 @@ void irq_enter(void)
  
  static inline void invoke_softirq(void)
  {
@@ -62,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (!force_irqthreads) {
  #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
  		/*
-@@ -800,6 +797,7 @@ static inline void invoke_softirq(void)
+@@ -800,6 +798,7 @@ static inline void invoke_softirq(void)
  		wakeup_softirqd();
  	}
  #else /* PREEMPT_RT_FULL */
diff --git a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
index 4ba595a..5eead40 100644
--- a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
+++ b/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -1,7 +1,7 @@
 From: Allen Pais <allen.pais at oracle.com>
 Date: Fri, 13 Dec 2013 09:44:41 +0530
 Subject: sparc64: use generic rwsem spinlocks rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Signed-off-by: Allen Pais <allen.pais at oracle.com>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/arch/sparc/Kconfig
 +++ b/arch/sparc/Kconfig
-@@ -194,12 +194,10 @@ config NR_CPUS
+@@ -199,12 +199,10 @@ config NR_CPUS
  source kernel/Kconfig.hz
  
  config RWSEM_GENERIC_SPINLOCK
diff --git a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
index d10dd0a..e8c4bd9 100644
--- a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
+++ b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
@@ -1,7 +1,7 @@
 Subject: spinlock: Split the lock types header
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 19:34:01 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Split raw_spinlock into its own file and the remaining spinlock_t into
 its own non-RT header. The non-RT header will be replaced later by sleeping
diff --git a/debian/patches/features/all/rt/stop-machine-raw-lock.patch b/debian/patches/features/all/rt/stop-machine-raw-lock.patch
index bf19ce2..f9327fc 100644
--- a/debian/patches/features/all/rt/stop-machine-raw-lock.patch
+++ b/debian/patches/features/all/rt/stop-machine-raw-lock.patch
@@ -1,7 +1,7 @@
 Subject: stop_machine: Use raw spinlocks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 11:01:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use raw-locks in stomp_machine() to allow locking in irq-off regions.
 
diff --git a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
index d2444fe..196e529 100644
--- a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+++ b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:27 -0500
 Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Instead of playing with non-preemption, introduce explicit
 startup serialization. This is more robust and cleaner as
diff --git a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
index 8ee1922..5734feb 100644
--- a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+++ b/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Wed, 18 Feb 2015 16:05:28 +0100
 Subject: sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
 |in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd
diff --git a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
index 8e0b8cc..09972d8 100644
--- a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
+++ b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 15 Jul 2010 10:29:00 +0200
 Subject: suspend: Prevent might sleep splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 timekeeping suspend/resume calls read_persistant_clock() which takes
 rtc_lock. That results in might sleep warnings because at that point
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/kernel.h
 +++ b/include/linux/kernel.h
-@@ -488,6 +488,7 @@ extern enum system_states {
+@@ -499,6 +499,7 @@ extern enum system_states {
  	SYSTEM_HALT,
  	SYSTEM_POWER_OFF,
  	SYSTEM_RESTART,
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define TAINT_PROPRIETARY_MODULE	0
 --- a/kernel/power/hibernate.c
 +++ b/kernel/power/hibernate.c
-@@ -286,6 +286,8 @@ static int create_image(int platform_mod
+@@ -287,6 +287,8 @@ static int create_image(int platform_mod
  
  	local_irq_disable();
  
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +
  	error = syscore_suspend();
  	if (error) {
- 		printk(KERN_ERR "PM: Some system devices failed to power down, "
+ 		pr_err("Some system devices failed to power down, aborting hibernation\n");
 @@ -317,6 +319,7 @@ static int create_image(int platform_mod
  	syscore_resume();
  
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	local_irq_enable();
  
   Enable_cpus:
-@@ -446,6 +449,7 @@ static int resume_target_kernel(bool pla
+@@ -445,6 +448,7 @@ static int resume_target_kernel(bool pla
  		goto Enable_cpus;
  
  	local_irq_disable();
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	error = syscore_suspend();
  	if (error)
-@@ -479,6 +483,7 @@ static int resume_target_kernel(bool pla
+@@ -478,6 +482,7 @@ static int resume_target_kernel(bool pla
  	syscore_resume();
  
   Enable_irqs:
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	local_irq_enable();
  
   Enable_cpus:
-@@ -564,6 +569,7 @@ int hibernation_platform_enter(void)
+@@ -563,6 +568,7 @@ int hibernation_platform_enter(void)
  		goto Enable_cpus;
  
  	local_irq_disable();
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	syscore_suspend();
  	if (pm_wakeup_pending()) {
  		error = -EAGAIN;
-@@ -576,6 +582,7 @@ int hibernation_platform_enter(void)
+@@ -575,6 +581,7 @@ int hibernation_platform_enter(void)
  
   Power_up:
  	syscore_resume();
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   Enable_cpus:
 --- a/kernel/power/suspend.c
 +++ b/kernel/power/suspend.c
-@@ -369,6 +369,8 @@ static int suspend_enter(suspend_state_t
+@@ -384,6 +384,8 @@ static int suspend_enter(suspend_state_t
  	arch_suspend_disable_irqs();
  	BUG_ON(!irqs_disabled());
  
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	error = syscore_suspend();
  	if (!error) {
  		*wakeup = pm_wakeup_pending();
-@@ -385,6 +387,8 @@ static int suspend_enter(suspend_state_t
+@@ -400,6 +402,8 @@ static int suspend_enter(suspend_state_t
  		syscore_resume();
  	}
  
diff --git a/debian/patches/features/all/rt/sysfs-realtime-entry.patch b/debian/patches/features/all/rt/sysfs-realtime-entry.patch
index ffb42b3..26b6bc8 100644
--- a/debian/patches/features/all/rt/sysfs-realtime-entry.patch
+++ b/debian/patches/features/all/rt/sysfs-realtime-entry.patch
@@ -1,7 +1,7 @@
 Subject: sysfs: Add /sys/kernel/realtime entry
 From: Clark Williams <williams at redhat.com>
 Date: Sat Jul 30 21:55:53 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Add a /sys/kernel entry to indicate that the kernel is a
 realtime kernel.
diff --git a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index 9e88420..52d7bff 100644
--- a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -1,7 +1,7 @@
 Subject: tasklet: Prevent tasklets from going into infinite spin in RT
 From: Ingo Molnar <mingo at elte.hu>
 Date: Tue Nov 29 20:18:22 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
 and spinlocks turn are mutexes. But this can cause issues with
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -508,8 +508,9 @@ static inline struct task_struct *this_c
+@@ -520,8 +520,9 @@ static inline struct task_struct *this_c
       to be executed on some cpu at least once after this.
     * If the tasklet is already scheduled, but its execution is still not
       started, it will be executed only once.
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
     * Tasklet is strictly serialized wrt itself, but not
       wrt another tasklets. If client needs some intertask synchronization,
       he makes it with spinlocks.
-@@ -534,27 +535,36 @@ struct tasklet_struct name = { NULL, 0,
+@@ -546,27 +547,36 @@ struct tasklet_struct name = { NULL, 0,
  enum
  {
  	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define tasklet_unlock_wait(t) do { } while (0)
  #define tasklet_unlock(t) do { } while (0)
  #endif
-@@ -603,12 +613,7 @@ static inline void tasklet_disable(struc
+@@ -615,12 +625,7 @@ static inline void tasklet_disable(struc
  	smp_mb();
  }
  
diff --git a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
index 4827b5e..9959f81 100644
--- a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
+++ b/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
@@ -1,7 +1,7 @@
 From: Daniel Wagner <wagi at monom.org>
 Date: Tue, 17 Feb 2015 09:37:44 +0100
 Subject: thermal: Defer thermal wakups to threads
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will
 call schedule while we run in irq context.
@@ -24,8 +24,8 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
 [bigeasy: reoder init/denit position. TODO: flush swork on exit]
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- drivers/thermal/x86_pkg_temp_thermal.c |   50 +++++++++++++++++++++++++++++++--
- 1 file changed, 47 insertions(+), 3 deletions(-)
+ drivers/thermal/x86_pkg_temp_thermal.c |   52 +++++++++++++++++++++++++++++++--
+ 1 file changed, 49 insertions(+), 3 deletions(-)
 
 --- a/drivers/thermal/x86_pkg_temp_thermal.c
 +++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -37,34 +37,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #include <asm/cpu_device_id.h>
  #include <asm/mce.h>
  
-@@ -353,7 +354,7 @@ static void pkg_temp_thermal_threshold_w
- 	}
+@@ -329,7 +330,7 @@ static void pkg_thermal_schedule_work(in
+ 	schedule_delayed_work_on(cpu, work, ms);
  }
  
--static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
-+static void platform_thermal_notify_work(struct swork_event *event)
+-static int pkg_thermal_notify(u64 msr_val)
++static void pkg_thermal_notify_work(struct swork_event *event)
  {
- 	unsigned long flags;
  	int cpu = smp_processor_id();
-@@ -370,7 +371,7 @@ static int pkg_temp_thermal_platform_the
- 			pkg_work_scheduled[phy_id]) {
- 		disable_pkg_thres_interrupt();
- 		spin_unlock_irqrestore(&pkg_work_lock, flags);
--		return -EINVAL;
-+		return;
+ 	struct pkg_device *pkgdev;
+@@ -348,9 +349,47 @@ static int pkg_thermal_notify(u64 msr_va
  	}
- 	pkg_work_scheduled[phy_id] = 1;
- 	spin_unlock_irqrestore(&pkg_work_lock, flags);
-@@ -379,9 +380,48 @@ static int pkg_temp_thermal_platform_the
- 	schedule_delayed_work_on(cpu,
- 				&per_cpu(pkg_temp_thermal_threshold_work, cpu),
- 				msecs_to_jiffies(notify_delay_ms));
+ 
+ 	spin_unlock_irqrestore(&pkg_temp_lock, flags);
 +}
 +
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +static struct swork_event notify_work;
 +
-+static int thermal_notify_work_init(void)
++static int pkg_thermal_notify_work_init(void)
 +{
 +	int err;
 +
@@ -72,16 +63,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +	if (err)
 +		return err;
 +
-+	INIT_SWORK(&notify_work, platform_thermal_notify_work);
++	INIT_SWORK(&notify_work, pkg_thermal_notify_work);
  	return 0;
  }
  
-+static void thermal_notify_work_cleanup(void)
++static void pkg_thermal_notify_work_cleanup(void)
 +{
 +	swork_put();
 +}
 +
-+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
++static int pkg_thermal_notify(u64 msr_val)
 +{
 +	swork_queue(&notify_work);
 +	return 0;
@@ -89,45 +80,51 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 +
 +#else  /* !CONFIG_PREEMPT_RT_FULL */
 +
-+static int thermal_notify_work_init(void) { return 0; }
++static int pkg_thermal_notify_work_init(void) { return 0; }
 +
-+static void thermal_notify_work_cleanup(void) {  }
++static void pkg_thermal_notify_work_cleanup(void) {  }
 +
-+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
++static int pkg_thermal_notify(u64 msr_val)
 +{
-+	platform_thermal_notify_work(NULL);
-+
++	pkg_thermal_notify_work(NULL);
 +	return 0;
 +}
 +#endif /* CONFIG_PREEMPT_RT_FULL */
 +
- static int find_siblings_cpu(int cpu)
+ static int pkg_temp_thermal_device_add(unsigned int cpu)
  {
- 	int i;
-@@ -585,6 +625,9 @@ static int __init pkg_temp_thermal_init(
+ 	int pkgid = topology_logical_package_id(cpu);
+@@ -515,10 +554,15 @@ static int __init pkg_temp_thermal_init(
  	if (!x86_match_cpu(pkg_temp_thermal_ids))
  		return -ENODEV;
  
-+	if (!thermal_notify_work_init())
++	if (!pkg_thermal_notify_work_init())
 +		return -ENODEV;
 +
- 	spin_lock_init(&pkg_work_lock);
- 	platform_thermal_package_notify =
- 			pkg_temp_thermal_platform_thermal_notify;
-@@ -609,7 +652,7 @@ static int __init pkg_temp_thermal_init(
- 	kfree(pkg_work_scheduled);
- 	platform_thermal_package_notify = NULL;
- 	platform_thermal_package_rate_control = NULL;
--
-+	thermal_notify_work_cleanup();
- 	return -ENODEV;
+ 	max_packages = topology_max_packages();
+ 	packages = kzalloc(max_packages * sizeof(struct pkg_device *), GFP_KERNEL);
+-	if (!packages)
+-		return -ENOMEM;
++	if (!packages) {
++		ret = -ENOMEM;
++		goto err;
++	}
+ 
+ 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online",
+ 				pkg_thermal_cpu_online,	pkg_thermal_cpu_offline);
+@@ -536,6 +580,7 @@ static int __init pkg_temp_thermal_init(
+ 	return 0;
+ 
+ err:
++	pkg_thermal_notify_work_cleanup();
+ 	kfree(packages);
+ 	return ret;
+ }
+@@ -549,6 +594,7 @@ static void __exit pkg_temp_thermal_exit
+ 	cpuhp_remove_state(pkg_thermal_hp_state);
+ 	debugfs_remove_recursive(debugfs);
+ 	kfree(packages);
++	pkg_thermal_notify_work_cleanup();
  }
+ module_exit(pkg_temp_thermal_exit)
  
-@@ -634,6 +677,7 @@ static void __exit pkg_temp_thermal_exit
- 	mutex_unlock(&phy_dev_list_mutex);
- 	platform_thermal_package_notify = NULL;
- 	platform_thermal_package_rate_control = NULL;
-+	thermal_notify_work_cleanup();
- 	for_each_online_cpu(i)
- 		cancel_delayed_work_sync(
- 			&per_cpu(pkg_temp_thermal_threshold_work, i));
diff --git a/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
index ffc1f71..28e6c58 100644
--- a/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
+++ b/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
@@ -1,7 +1,7 @@
 Subject: tick/broadcast: Make broadcast hrtimer irqsafe
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 27 Feb 2016 10:47:10 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Otherwise we end up with the following:
 
diff --git a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
index 26597cc..152d18b 100644
--- a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
+++ b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
@@ -1,7 +1,7 @@
 Subject: timekeeping: Split jiffies seqlock
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 14 Feb 2013 22:36:59 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so
 it can be taken in atomic context on RT.
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(kti
+@@ -66,7 +66,8 @@ static void tick_do_update_jiffies64(kti
  		return;
  
  	/* Reevaluate with jiffies_lock held */
@@ -82,8 +82,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	write_seqcount_begin(&jiffies_seq);
  
  	delta = ktime_sub(now, last_jiffies_update);
- 	if (delta.tv64 >= tick_period.tv64) {
-@@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(kti
+ 	if (delta >= tick_period) {
+@@ -89,10 +90,12 @@ static void tick_do_update_jiffies64(kti
  		/* Keep the tick_next_period variable up to date */
  		tick_next_period = ktime_add(last_jiffies_update, tick_period);
  	} else {
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	update_wall_time();
  }
  
-@@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(vo
+@@ -103,12 +106,14 @@ static ktime_t tick_init_jiffy_update(vo
  {
  	ktime_t period;
  
@@ -106,7 +106,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	raw_spin_lock(&jiffies_lock);
 +	write_seqcount_begin(&jiffies_seq);
  	/* Did we start the jiffies update yet ? */
- 	if (last_jiffies_update.tv64 == 0)
+ 	if (last_jiffies_update == 0)
  		last_jiffies_update = tick_next_period;
  	period = last_jiffies_update;
 -	write_sequnlock(&jiffies_lock);
@@ -115,13 +115,13 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return period;
  }
  
-@@ -673,10 +678,10 @@ static ktime_t tick_nohz_stop_sched_tick
+@@ -672,10 +677,10 @@ static ktime_t tick_nohz_stop_sched_tick
  
  	/* Read jiffies and the time when jiffies were updated last */
  	do {
 -		seq = read_seqbegin(&jiffies_lock);
 +		seq = read_seqcount_begin(&jiffies_seq);
- 		basemono = last_jiffies_update.tv64;
+ 		basemono = last_jiffies_update;
  		basejiff = jiffies;
 -	} while (read_seqretry(&jiffies_lock, seq));
 +	} while (read_seqcount_retry(&jiffies_seq, seq));
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (rcu_needs_cpu(basemono, &next_rcu) ||
 --- a/kernel/time/timekeeping.c
 +++ b/kernel/time/timekeeping.c
-@@ -2328,8 +2328,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2302,8 +2302,10 @@ EXPORT_SYMBOL(hardpps);
   */
  void xtime_update(unsigned long ticks)
  {
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/kernel/time/timekeeping.h
 +++ b/kernel/time/timekeeping.h
-@@ -19,7 +19,8 @@ extern void timekeeping_resume(void);
+@@ -17,7 +17,8 @@ extern void timekeeping_resume(void);
  extern void do_timer(unsigned long ticks);
  extern void update_wall_time(void);
  
diff --git a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
index defe14d..c2a63f8 100644
--- a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+++ b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 21 Aug 2009 11:56:45 +0200
 Subject: timer: delay waking softirqs from the jiffy tick
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 People were complaining about broken balancing with the recent -rt
 series.
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -1641,13 +1641,13 @@ void update_process_times(int user_tick)
+@@ -1601,13 +1601,13 @@ void update_process_times(int user_tick)
  
  	/* Note: this timer irq context must be accounted for as well. */
  	account_process_tick(p, user_tick);
@@ -71,6 +71,6 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		irq_work_tick();
  #endif
 -	scheduler_tick();
- 	run_posix_cpu_timers(p);
+ 	if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+ 		run_posix_cpu_timers(p);
  }
- 
diff --git a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
index 5ae2694..d482e0e 100644
--- a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
+++ b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
@@ -1,7 +1,7 @@
 Subject: timer-fd: Prevent live lock
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 25 Jan 2012 11:08:40 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 If hrtimer_try_to_cancel() requires a retry, then depending on the
 priority setting te retry loop might prevent timer callback completion
diff --git a/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch b/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch
index e295d6b..55cf686 100644
--- a/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch
+++ b/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 1 Mar 2017 16:30:49 +0100
 Subject: [PATCH] timer/hrtimer: check properly for a running timer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 hrtimer_callback_running() checks only whether a timmer is running on a
 CPU in hardirq-context. This is okay for !RT. For RT environment we move
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
-@@ -455,7 +455,13 @@ static inline int hrtimer_is_queued(stru
+@@ -444,7 +444,13 @@ static inline int hrtimer_is_queued(stru
   */
  static inline int hrtimer_callback_running(const struct hrtimer *timer)
  {
diff --git a/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch b/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
index af13f96..18090d4 100644
--- a/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
+++ b/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 13 Jul 2016 18:22:23 +0200
 Subject: [PATCH] timer: make the base lock raw
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The part where the base lock is held got more predictable / shorter after the
 timer rework. One reason is the lack of re-cascading.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -193,7 +193,7 @@ EXPORT_SYMBOL(jiffies_64);
+@@ -195,7 +195,7 @@ EXPORT_SYMBOL(jiffies_64);
  #endif
  
  struct timer_base {
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	struct timer_list	*running_timer;
  	unsigned long		clk;
  	unsigned long		next_expiry;
-@@ -948,10 +948,10 @@ static struct timer_base *lock_timer_bas
+@@ -913,10 +913,10 @@ static struct timer_base *lock_timer_bas
  
  		if (!(tf & TIMER_MIGRATING)) {
  			base = get_timer_base(tf);
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		}
  		cpu_relax();
  	}
-@@ -1023,9 +1023,9 @@ static inline int
+@@ -986,9 +986,9 @@ static inline int
  			/* See the comment in lock_timer_base() */
  			timer->flags |= TIMER_MIGRATING;
  
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  			WRITE_ONCE(timer->flags,
  				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
  		}
-@@ -1050,7 +1050,7 @@ static inline int
+@@ -1013,7 +1013,7 @@ static inline int
  	}
  
  out_unlock:
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	return ret;
  }
-@@ -1144,16 +1144,16 @@ void add_timer_on(struct timer_list *tim
+@@ -1106,16 +1106,16 @@ void add_timer_on(struct timer_list *tim
  	if (base != new_base) {
  		timer->flags |= TIMER_MIGRATING;
  
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(add_timer_on);
  
-@@ -1180,7 +1180,7 @@ int del_timer(struct timer_list *timer)
+@@ -1141,7 +1141,7 @@ int del_timer(struct timer_list *timer)
  	if (timer_pending(timer)) {
  		base = lock_timer_base(timer, &flags);
  		ret = detach_if_pending(timer, base, true);
@@ -86,16 +86,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	}
  
  	return ret;
-@@ -1208,7 +1208,7 @@ int try_to_del_timer_sync(struct timer_l
- 		timer_stats_timer_clear_start_info(timer);
+@@ -1168,7 +1168,7 @@ int try_to_del_timer_sync(struct timer_l
+ 	if (base->running_timer != timer)
  		ret = detach_if_pending(timer, base, true);
- 	}
+ 
 -	spin_unlock_irqrestore(&base->lock, flags);
 +	raw_spin_unlock_irqrestore(&base->lock, flags);
  
  	return ret;
  }
-@@ -1340,13 +1340,13 @@ static void expire_timers(struct timer_b
+@@ -1299,13 +1299,13 @@ static void expire_timers(struct timer_b
  		data = timer->data;
  
  		if (timer->flags & TIMER_IRQSAFE) {
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		}
  	}
  }
-@@ -1515,7 +1515,7 @@ u64 get_next_timer_interrupt(unsigned lo
+@@ -1474,7 +1474,7 @@ u64 get_next_timer_interrupt(unsigned lo
  	if (cpu_is_offline(smp_processor_id()))
  		return expires;
  
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	nextevt = __next_timer_interrupt(base);
  	is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
  	base->next_expiry = nextevt;
-@@ -1543,7 +1543,7 @@ u64 get_next_timer_interrupt(unsigned lo
+@@ -1502,7 +1502,7 @@ u64 get_next_timer_interrupt(unsigned lo
  		if ((expires - basem) > TICK_NSEC)
  			base->is_idle = true;
  	}
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	return cmp_next_hrtimer_event(basem, expires);
  }
-@@ -1630,7 +1630,7 @@ static inline void __run_timers(struct t
+@@ -1590,7 +1590,7 @@ static inline void __run_timers(struct t
  	if (!time_after_eq(jiffies, base->clk))
  		return;
  
@@ -140,7 +140,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	while (time_after_eq(jiffies, base->clk)) {
  
-@@ -1641,7 +1641,7 @@ static inline void __run_timers(struct t
+@@ -1601,7 +1601,7 @@ static inline void __run_timers(struct t
  			expire_timers(base, heads + levels);
  	}
  	base->running_timer = NULL;
@@ -149,7 +149,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  /*
-@@ -1836,16 +1836,16 @@ int timers_dead_cpu(unsigned int cpu)
+@@ -1786,16 +1786,16 @@ int timers_dead_cpu(unsigned int cpu)
  		 * The caller is globally serialized and nobody else
  		 * takes two locks at once, deadlock is not possible.
  		 */
@@ -170,7 +170,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		put_cpu_ptr(&timer_bases);
  	}
  	return 0;
-@@ -1861,7 +1861,7 @@ static void __init init_timer_cpu(int cp
+@@ -1811,7 +1811,7 @@ static void __init init_timer_cpu(int cp
  	for (i = 0; i < NR_BASES; i++) {
  		base = per_cpu_ptr(&timer_bases[i], cpu);
  		base->cpu = cpu;
diff --git a/debian/patches/features/all/rt/timers-Don-t-wake-ktimersoftd-on-every-tick.patch b/debian/patches/features/all/rt/timers-Don-t-wake-ktimersoftd-on-every-tick.patch
deleted file mode 100644
index b886b1d..0000000
--- a/debian/patches/features/all/rt/timers-Don-t-wake-ktimersoftd-on-every-tick.patch
+++ /dev/null
@@ -1,229 +0,0 @@
-From: Haris Okanovic <haris.okanovic at ni.com>
-Date: Fri, 3 Feb 2017 17:26:44 +0100
-Subject: [PATCH] timers: Don't wake ktimersoftd on every tick
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-We recently upgraded from 4.1 to 4.6 and noticed a minor latency
-regression caused by an additional thread wakeup (ktimersoftd) in
-interrupt context on every tick. The wakeups are from
-run_local_timers() raising TIMER_SOFTIRQ. Both TIMER and SCHED softirq
-coalesced into one ksoftirqd wakeup prior to Sebastian's change to split
-timers into their own thread.
-
-There's already logic in run_local_timers() to avoid some unnecessary
-wakeups of ksoftirqd, but it doesn't seems to catch them all. In
-particular, I've seen many unnecessary wakeups when jiffies increments
-prior to run_local_timers().
-
-Change the way timers are collected per Julia and Thomas'
-recommendation: Expired timers are now collected in interrupt context
-and fired in ktimersoftd to avoid double-walk of `pending_map`.
-
-Collect expired timers in interrupt context to avoid overhead of waking
-ktimersoftd on every tick. ktimersoftd now wakes only when one or more
-timers are ready, which yields a minor reduction in small latency spikes.
-
-This is implemented by storing lists of expired timers in timer_base,
-updated on each tick. Any addition to the lists wakes ktimersoftd
-(softirq) to process those timers.
-
-Signed-off-by: Haris Okanovic <haris.okanovic at ni.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- kernel/time/timer.c |   96 ++++++++++++++++++++++++++++++++++++----------------
- 1 file changed, 67 insertions(+), 29 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -206,6 +206,8 @@ struct timer_base {
- 	bool			is_idle;
- 	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
- 	struct hlist_head	vectors[WHEEL_SIZE];
-+	struct hlist_head	expired_lists[LVL_DEPTH];
-+	int			expired_count;
- } ____cacheline_aligned;
- 
- static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
-@@ -1353,7 +1355,8 @@ static void call_timer_fn(struct timer_l
- 	}
- }
- 
--static void expire_timers(struct timer_base *base, struct hlist_head *head)
-+static inline void __expire_timers(struct timer_base *base,
-+				   struct hlist_head *head)
- {
- 	while (!hlist_empty(head)) {
- 		struct timer_list *timer;
-@@ -1384,21 +1387,38 @@ static void expire_timers(struct timer_b
- 	}
- }
- 
--static int __collect_expired_timers(struct timer_base *base,
--				    struct hlist_head *heads)
-+static void expire_timers(struct timer_base *base)
-+{
-+	struct hlist_head *head;
-+
-+	while (base->expired_count--) {
-+		head = base->expired_lists + base->expired_count;
-+		__expire_timers(base, head);
-+	}
-+	base->expired_count = 0;
-+}
-+
-+static void __collect_expired_timers(struct timer_base *base)
- {
- 	unsigned long clk = base->clk;
- 	struct hlist_head *vec;
--	int i, levels = 0;
-+	int i;
- 	unsigned int idx;
- 
-+	/*
-+	 * expire_timers() must be called at least once before we can
-+	 * collect more timers
-+	 */
-+	if (WARN_ON(base->expired_count))
-+		return;
-+
- 	for (i = 0; i < LVL_DEPTH; i++) {
- 		idx = (clk & LVL_MASK) + i * LVL_SIZE;
- 
- 		if (__test_and_clear_bit(idx, base->pending_map)) {
- 			vec = base->vectors + idx;
--			hlist_move_list(vec, heads++);
--			levels++;
-+			hlist_move_list(vec,
-+				&base->expired_lists[base->expired_count++]);
- 		}
- 		/* Is it time to look at the next level? */
- 		if (clk & LVL_CLK_MASK)
-@@ -1406,7 +1426,6 @@ static int __collect_expired_timers(stru
- 		/* Shift clock for the next level granularity */
- 		clk >>= LVL_CLK_SHIFT;
- 	}
--	return levels;
- }
- 
- #ifdef CONFIG_NO_HZ_COMMON
-@@ -1599,8 +1618,7 @@ void timer_clear_idle(void)
- 	base->is_idle = false;
- }
- 
--static int collect_expired_timers(struct timer_base *base,
--				  struct hlist_head *heads)
-+static void collect_expired_timers(struct timer_base *base)
- {
- 	/*
- 	 * NOHZ optimization. After a long idle sleep we need to forward the
-@@ -1617,20 +1635,49 @@ static int collect_expired_timers(struct
- 		if (time_after(next, jiffies)) {
- 			/* The call site will increment clock! */
- 			base->clk = jiffies - 1;
--			return 0;
-+			return;
- 		}
- 		base->clk = next;
- 	}
--	return __collect_expired_timers(base, heads);
-+	__collect_expired_timers(base);
- }
- #else
--static inline int collect_expired_timers(struct timer_base *base,
--					 struct hlist_head *heads)
-+static inline void collect_expired_timers(struct timer_base *base)
- {
--	return __collect_expired_timers(base, heads);
-+	__collect_expired_timers(base);
- }
- #endif
- 
-+static int find_expired_timers(struct timer_base *base)
-+{
-+	const unsigned long int end_clk = jiffies;
-+
-+	while (!base->expired_count && time_after_eq(end_clk, base->clk)) {
-+		collect_expired_timers(base);
-+		base->clk++;
-+	}
-+
-+	return base->expired_count;
-+}
-+
-+/* Called from CPU tick routine to quickly collect expired timers */
-+static int tick_find_expired(struct timer_base *base)
-+{
-+	int count;
-+
-+	raw_spin_lock(&base->lock);
-+
-+	if (unlikely(time_after(jiffies, base->clk + HZ))) {
-+		/* defer to ktimersoftd; don't spend too long in irq context */
-+		count = -1;
-+	} else
-+		count = find_expired_timers(base);
-+
-+	raw_spin_unlock(&base->lock);
-+
-+	return count;
-+}
-+
- /*
-  * Called from the timer interrupt handler to charge one tick to the current
-  * process.  user_tick is 1 if the tick is user time, 0 for system.
-@@ -1657,22 +1704,11 @@ void update_process_times(int user_tick)
-  */
- static inline void __run_timers(struct timer_base *base)
- {
--	struct hlist_head heads[LVL_DEPTH];
--	int levels;
--
--	if (!time_after_eq(jiffies, base->clk))
--		return;
--
- 	raw_spin_lock_irq(&base->lock);
- 
--	while (time_after_eq(jiffies, base->clk)) {
--
--		levels = collect_expired_timers(base, heads);
--		base->clk++;
-+	while (find_expired_timers(base))
-+		expire_timers(base);
- 
--		while (levels--)
--			expire_timers(base, heads + levels);
--	}
- 	raw_spin_unlock_irq(&base->lock);
- 	wakeup_timer_waiters(base);
- }
-@@ -1698,12 +1734,12 @@ void run_local_timers(void)
- 
- 	hrtimer_run_queues();
- 	/* Raise the softirq only if required. */
--	if (time_before(jiffies, base->clk)) {
-+	if (time_before(jiffies, base->clk) || !tick_find_expired(base)) {
- 		if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
- 			return;
- 		/* CPU is awake, so check the deferrable base. */
- 		base++;
--		if (time_before(jiffies, base->clk))
-+		if (time_before(jiffies, base->clk) || !tick_find_expired(base))
- 			return;
- 	}
- 	raise_softirq(TIMER_SOFTIRQ);
-@@ -1873,6 +1909,7 @@ int timers_dead_cpu(unsigned int cpu)
- 		raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- 
- 		BUG_ON(old_base->running_timer);
-+		BUG_ON(old_base->expired_count);
- 
- 		for (i = 0; i < WHEEL_SIZE; i++)
- 			migrate_timer_list(new_base, old_base->vectors + i);
-@@ -1899,6 +1936,7 @@ static void __init init_timer_cpu(int cp
- #ifdef CONFIG_PREEMPT_RT_FULL
- 		init_swait_queue_head(&base->wait_for_running_timer);
- #endif
-+		base->expired_count = 0;
- 	}
- }
- 
diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
index e8f233a..280901a 100644
--- a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
+++ b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:34 -0500
 Subject: timers: Prepare for full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When softirqs can be preempted we need to make sure that cancelling
 the timer from the active thread can not deadlock vs. a running timer
@@ -13,12 +13,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
  include/linux/timer.h |    2 +-
  kernel/sched/core.c   |    9 +++++++--
- kernel/time/timer.c   |   44 ++++++++++++++++++++++++++++++++++++++++----
- 3 files changed, 48 insertions(+), 7 deletions(-)
+ kernel/time/timer.c   |   45 +++++++++++++++++++++++++++++++++++++++++----
+ 3 files changed, 49 insertions(+), 7 deletions(-)
 
 --- a/include/linux/timer.h
 +++ b/include/linux/timer.h
-@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list
+@@ -198,7 +198,7 @@ extern void add_timer(struct timer_list
  
  extern int try_to_del_timer_sync(struct timer_list *timer);
  
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  # define del_timer_sync(t)		del_timer(t)
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -525,11 +525,14 @@ void resched_cpu(int cpu)
+@@ -532,11 +532,14 @@ void resched_cpu(int cpu)
   */
  int get_nohz_timer_target(void)
  {
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	rcu_read_lock();
  	for_each_domain(cpu, sd) {
-@@ -548,6 +551,8 @@ int get_nohz_timer_target(void)
+@@ -555,6 +558,8 @@ int get_nohz_timer_target(void)
  		cpu = housekeeping_any_cpu();
  unlock:
  	rcu_read_unlock();
@@ -54,10 +54,18 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	preempt_enable_rt();
  	return cpu;
  }
- /*
+ 
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -195,6 +195,9 @@ EXPORT_SYMBOL(jiffies_64);
+@@ -44,6 +44,7 @@
+ #include <linux/sched/debug.h>
+ #include <linux/slab.h>
+ #include <linux/compat.h>
++#include <linux/swait.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+@@ -197,6 +198,9 @@ EXPORT_SYMBOL(jiffies_64);
  struct timer_base {
  	raw_spinlock_t		lock;
  	struct timer_list	*running_timer;
@@ -67,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	unsigned long		clk;
  	unsigned long		next_expiry;
  	unsigned int		cpu;
-@@ -1157,6 +1160,33 @@ void add_timer_on(struct timer_list *tim
+@@ -1119,6 +1123,33 @@ void add_timer_on(struct timer_list *tim
  }
  EXPORT_SYMBOL_GPL(add_timer_on);
  
@@ -101,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * del_timer - deactive a timer.
   * @timer: the timer to be deactivated
-@@ -1214,7 +1244,7 @@ int try_to_del_timer_sync(struct timer_l
+@@ -1174,7 +1205,7 @@ int try_to_del_timer_sync(struct timer_l
  }
  EXPORT_SYMBOL(try_to_del_timer_sync);
  
@@ -110,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * del_timer_sync - deactivate a timer and wait for the handler to finish.
   * @timer: the timer to be deactivated
-@@ -1274,7 +1304,7 @@ int del_timer_sync(struct timer_list *ti
+@@ -1234,7 +1265,7 @@ int del_timer_sync(struct timer_list *ti
  		int ret = try_to_del_timer_sync(timer);
  		if (ret >= 0)
  			return ret;
@@ -119,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  EXPORT_SYMBOL(del_timer_sync);
-@@ -1339,13 +1369,16 @@ static void expire_timers(struct timer_b
+@@ -1298,13 +1329,16 @@ static void expire_timers(struct timer_b
  		fn = timer->function;
  		data = timer->data;
  
@@ -137,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			raw_spin_lock_irq(&base->lock);
  		}
  	}
-@@ -1640,8 +1673,8 @@ static inline void __run_timers(struct t
+@@ -1600,8 +1634,8 @@ static inline void __run_timers(struct t
  		while (levels--)
  			expire_timers(base, heads + levels);
  	}
@@ -147,7 +155,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -1863,6 +1896,9 @@ static void __init init_timer_cpu(int cp
+@@ -1813,6 +1847,9 @@ static void __init init_timer_cpu(int cp
  		base->cpu = cpu;
  		raw_spin_lock_init(&base->lock);
  		base->clk = jiffies;
diff --git a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch b/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
index 867d681..5fbc0ef 100644
--- a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
+++ b/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
@@ -2,7 +2,7 @@ From: Carsten Emde <C.Emde at osadl.org>
 Date: Tue, 5 Jan 2016 10:21:59 +0100
 Subject: trace/latency-hist: Consider new argument when probing the
  sched_switch tracer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The sched_switch tracer has got a new argument. Fix the latency tracer
 accordingly.
diff --git a/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch b/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
index 3ce95fc..1e17736 100644
--- a/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
+++ b/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
@@ -1,7 +1,7 @@
 Subject: trace: Use rcuidle version for preemptoff_hist trace point
 From: Yang Shi <yang.shi at windriver.com>
 Date: Tue, 23 Feb 2016 13:23:23 -0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When running -rt kernel with both PREEMPT_OFF_HIST and LOCKDEP enabled,
 the below error is reported:
@@ -55,7 +55,7 @@ in 4.4-rt. It looks such fix is still needed.
 
 --- a/kernel/trace/trace_irqsoff.c
 +++ b/kernel/trace/trace_irqsoff.c
-@@ -425,13 +425,13 @@ void start_critical_timings(void)
+@@ -437,13 +437,13 @@ void start_critical_timings(void)
  {
  	if (preempt_trace() || irq_trace())
  		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -71,7 +71,7 @@ in 4.4-rt. It looks such fix is still needed.
  	if (preempt_trace() || irq_trace())
  		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  }
-@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+@@ -453,7 +453,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
  #ifdef CONFIG_PROVE_LOCKING
  void time_hardirqs_on(unsigned long a0, unsigned long a1)
  {
@@ -80,7 +80,7 @@ in 4.4-rt. It looks such fix is still needed.
  	if (!preempt_trace() && irq_trace())
  		stop_critical_timing(a0, a1);
  }
-@@ -450,7 +450,7 @@ void time_hardirqs_off(unsigned long a0,
+@@ -462,7 +462,7 @@ void time_hardirqs_off(unsigned long a0,
  {
  	if (!preempt_trace() && irq_trace())
  		start_critical_timing(a0, a1);
diff --git a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
index e842e15..bd30cef 100644
--- a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Thu, 29 Sep 2011 12:24:30 -0500
 Subject: tracing: Account for preempt off in preempt_schedule()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 The preempt_schedule() uses the preempt_disable_notrace() version
 because it can cause infinite recursion by the function tracer as
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3583,7 +3583,16 @@ asmlinkage __visible void __sched notrac
+@@ -3654,7 +3654,16 @@ asmlinkage __visible void __sched notrac
  		 * an infinite recursion.
  		 */
  		prev_ctx = exception_enter();
diff --git a/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 3a702d0..4039bad 100644
--- a/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 11 Apr 2016 16:55:02 +0200
 Subject: [PATCH] tty: serial: 8250: don't take the trylock during oops
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 An oops with irqs off (panic() from irqsafe hrtimer like the watchdog
 timer) will lead to a lockdep warning on each invocation and as such
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/tty/serial/8250/8250_port.c
 +++ b/drivers/tty/serial/8250/8250_port.c
-@@ -3144,10 +3144,8 @@ void serial8250_console_write(struct uar
+@@ -3179,10 +3179,8 @@ void serial8250_console_write(struct uar
  
  	serial8250_rpm_get(up);
  
diff --git a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index ccea3f7..2d416c7 100644
--- a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -1,7 +1,7 @@
 Subject: net: Remove preemption disabling in netif_rx()
 From: Priyanka Jain <Priyanka.Jain at freescale.com>
 Date: Thu, 17 May 2012 09:35:11 +0530
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 1)enqueue_to_backlog() (called from netif_rx) should be
   bind to a particluar CPU. This can be achieved by
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3811,7 +3811,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3798,7 +3798,7 @@ static int netif_rx_internal(struct sk_b
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu;
  
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		rcu_read_lock();
  
  		cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3821,13 +3821,13 @@ static int netif_rx_internal(struct sk_b
+@@ -3808,14 +3808,14 @@ static int netif_rx_internal(struct sk_b
  		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  
  		rcu_read_unlock();
@@ -57,6 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  	{
  		unsigned int qtail;
+ 
 -		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
 -		put_cpu();
 +		ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
diff --git a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
index 48ecb34..d32de09 100644
--- a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
+++ b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 8 Nov 2013 17:34:54 +0100
 Subject: usb: Use _nort in giveback function
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet
 context") I see
diff --git a/debian/patches/features/all/rt/user-use-local-irq-nort.patch b/debian/patches/features/all/rt/user-use-local-irq-nort.patch
index 18a471c..33afe1e 100644
--- a/debian/patches/features/all/rt/user-use-local-irq-nort.patch
+++ b/debian/patches/features/all/rt/user-use-local-irq-nort.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 21 Jul 2009 23:06:05 +0200
 Subject: core: Do not disable interrupts on RT in kernel/users.c
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use the local_irq_*_nort variants to reduce latencies in RT. The code
 is serialized by the locks. No need to disable interrupts.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/user.c
 +++ b/kernel/user.c
-@@ -161,11 +161,11 @@ void free_uid(struct user_struct *up)
+@@ -162,11 +162,11 @@ void free_uid(struct user_struct *up)
  	if (!up)
  		return;
  
diff --git a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
index 64d2354..75d7e19 100644
--- a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
+++ b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 28 Oct 2013 12:19:57 +0100
 Subject: wait.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 |  CC      init/main.o
 |In file included from include/linux/mmzone.h:9:0,
@@ -23,8 +23,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/wait.h
 +++ b/include/linux/wait.h
-@@ -8,6 +8,7 @@
- #include <linux/spinlock.h>
+@@ -9,6 +9,7 @@
+ 
  #include <asm/current.h>
  #include <uapi/linux/wait.h>
 +#include <linux/atomic.h>
diff --git a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
index 6883546..4ddead6 100644
--- a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
+++ b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 01 Jul 2013 11:02:42 +0200
 Subject: workqueue: Prevent workqueue versus ata-piix livelock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 An Intel i7 system regularly detected rcu_preempt stalls after the kernel
 was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #include "workqueue_internal.h"
  
-@@ -1279,7 +1280,7 @@ static int try_to_grab_pending(struct wo
+@@ -1281,7 +1282,7 @@ static int try_to_grab_pending(struct wo
  	local_unlock_irqrestore(pendingb_lock, *flags);
  	if (work_is_canceling(work))
  		return -ENOENT;
diff --git a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch b/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
index 18136bd..3e757aa 100644
--- a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
+++ b/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
@@ -1,7 +1,7 @@
 From: Daniel Wagner <daniel.wagner at bmw-carit.de>
 Date: Fri, 11 Jul 2014 15:26:11 +0200
 Subject: work-simple: Simple work queue implemenation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Provides a framework for enqueuing callbacks from irq context
 PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
@@ -51,8 +51,8 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
  obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
 -obj-y += wait.o swait.o completion.o idle.o
 +obj-y += wait.o swait.o swork.o completion.o idle.o
- obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
- obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
+ obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
  obj-$(CONFIG_SCHEDSTATS) += stats.o
 --- /dev/null
 +++ b/kernel/sched/swork.c
diff --git a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
index 383a9f2..20ffd76 100644
--- a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
+++ b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
@@ -22,28 +22,28 @@ Cc: Jens Axboe <axboe at kernel.dk>
 Cc: Linus Torvalds <torvalds at linux-foundation.org>
 Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 ---
- kernel/sched/core.c         |   81 ++++++++------------------------------------
- kernel/workqueue.c          |   52 ++++++++++++----------------
+ kernel/sched/core.c         |   86 +++++++-------------------------------------
+ kernel/workqueue.c          |   52 +++++++++++---------------
  kernel/workqueue_internal.h |    5 +-
- 3 files changed, 41 insertions(+), 97 deletions(-)
+ 3 files changed, 41 insertions(+), 102 deletions(-)
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1711,10 +1711,6 @@ static inline void ttwu_activate(struct
+@@ -1690,10 +1690,6 @@ static inline void ttwu_activate(struct
  {
  	activate_task(rq, p, en_flags);
  	p->on_rq = TASK_ON_RQ_QUEUED;
 -
--	/* if a worker is waking up, notify workqueue */
+-	/* If a worker is waking up, notify the workqueue: */
 -	if (p->flags & PF_WQ_WORKER)
 -		wq_worker_waking_up(p, cpu_of(rq));
  }
  
  /*
-@@ -2152,53 +2148,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2146,58 +2142,6 @@ try_to_wake_up(struct task_struct *p, un
  }
  
  /**
@@ -55,7 +55,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
 - * ensure that this_rq() is locked, @p is bound to this_rq() and not
 - * the current task.
 - */
--static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
+-static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
 -{
 -	struct rq *rq = task_rq(p);
 -
@@ -72,11 +72,11 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
 -		 * disabled avoiding further scheduler activity on it and we've
 -		 * not yet picked a replacement task.
 -		 */
--		lockdep_unpin_lock(&rq->lock, cookie);
+-		rq_unpin_lock(rq, rf);
 -		raw_spin_unlock(&rq->lock);
 -		raw_spin_lock(&p->pi_lock);
 -		raw_spin_lock(&rq->lock);
--		lockdep_repin_lock(&rq->lock, cookie);
+-		rq_repin_lock(rq, rf);
 -	}
 -
 -	if (!(p->state & TASK_NORMAL))
@@ -84,10 +84,15 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
 -
 -	trace_sched_waking(p);
 -
--	if (!task_on_rq_queued(p))
+-	if (!task_on_rq_queued(p)) {
+-		if (p->in_iowait) {
+-			delayacct_blkio_end();
+-			atomic_dec(&rq->nr_iowait);
+-		}
 -		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+-	}
 -
--	ttwu_do_wakeup(rq, p, 0, cookie);
+-	ttwu_do_wakeup(rq, p, 0, rf);
 -	ttwu_stat(p, smp_processor_id(), 0);
 -out:
 -	raw_spin_unlock(&p->pi_lock);
@@ -97,10 +102,10 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
   * wake_up_process - Wake up a specific process
   * @p: The process to be woken up.
   *
-@@ -3494,21 +3443,6 @@ static void __sched notrace __schedule(b
- 		} else {
- 			deactivate_task(rq, prev, DEQUEUE_SLEEP);
- 			prev->on_rq = 0;
+@@ -3485,21 +3429,6 @@ static void __sched notrace __schedule(b
+ 				atomic_inc(&rq->nr_iowait);
+ 				delayacct_blkio_start();
+ 			}
 -
 -			/*
 -			 * If a worker went to sleep, notify and ask workqueue
@@ -114,12 +119,12 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
 -
 -				to_wakeup = wq_worker_sleeping(prev);
 -				if (to_wakeup)
--					try_to_wake_up_local(to_wakeup, cookie);
+-					try_to_wake_up_local(to_wakeup, &rf);
 -			}
  		}
  		switch_count = &prev->nvcsw;
  	}
-@@ -3567,6 +3501,14 @@ static inline void sched_submit_work(str
+@@ -3564,6 +3493,14 @@ static inline void sched_submit_work(str
  {
  	if (!tsk->state || tsk_is_pi_blocked(tsk))
  		return;
@@ -134,7 +139,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
  	/*
  	 * If we are going to sleep and we have plugged IO queued,
  	 * make sure to submit it to avoid deadlocks.
-@@ -3575,6 +3517,12 @@ static inline void sched_submit_work(str
+@@ -3572,6 +3509,12 @@ static inline void sched_submit_work(str
  		blk_schedule_flush_plug(tsk);
  }
  
@@ -147,7 +152,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
  asmlinkage __visible void __sched schedule(void)
  {
  	struct task_struct *tsk = current;
-@@ -3585,6 +3533,7 @@ asmlinkage __visible void __sched schedu
+@@ -3582,6 +3525,7 @@ asmlinkage __visible void __sched schedu
  		__schedule(false);
  		sched_preempt_enable_no_resched();
  	} while (need_resched());
@@ -157,7 +162,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
  
 --- a/kernel/workqueue.c
 +++ b/kernel/workqueue.c
-@@ -841,43 +841,32 @@ static void wake_up_worker(struct worker
+@@ -843,43 +843,32 @@ static void wake_up_worker(struct worker
  }
  
  /**
@@ -212,7 +217,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
  	struct worker_pool *pool;
  
  	/*
-@@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(s
+@@ -888,13 +877,15 @@ struct task_struct *wq_worker_sleeping(s
  	 * checking NOT_RUNNING.
  	 */
  	if (worker->flags & WORKER_NOT_RUNNING)
@@ -232,7 +237,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.
  
  	/*
  	 * The counterpart of the following dec_and_test, implied mb,
-@@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(s
+@@ -908,9 +899,12 @@ struct task_struct *wq_worker_sleeping(s
  	 * lock is safe.
  	 */
  	if (atomic_dec_and_test(&pool->nr_running) &&
diff --git a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch b/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
index acc0878..44df623 100644
--- a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
+++ b/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
@@ -1,7 +1,7 @@
 Subject: workqueue: Prevent deadlock/stall on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 27 Jun 2014 16:24:52 +0200 (CEST)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Austin reported a XFS deadlock/stall on RT where scheduled work gets
 never exececuted and tasks are waiting for each other for ever.
@@ -80,7 +80,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
   * A: pool->attach_mutex protected.
   *
   * PL: wq_pool_mutex protected.
-@@ -428,6 +433,31 @@ static void workqueue_sysfs_unregister(s
+@@ -430,6 +435,31 @@ static void workqueue_sysfs_unregister(s
  		if (({ assert_rcu_or_wq_mutex(wq); false; })) { }	\
  		else
  
@@ -112,7 +112,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  #ifdef CONFIG_DEBUG_OBJECTS_WORK
  
  static struct debug_obj_descr work_debug_descr;
-@@ -834,10 +864,16 @@ static struct worker *first_idle_worker(
+@@ -836,10 +866,16 @@ static struct worker *first_idle_worker(
   */
  static void wake_up_worker(struct worker_pool *pool)
  {
@@ -130,7 +130,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  }
  
  /**
-@@ -866,7 +902,7 @@ void wq_worker_running(struct task_struc
+@@ -868,7 +904,7 @@ void wq_worker_running(struct task_struc
   */
  void wq_worker_sleeping(struct task_struct *task)
  {
@@ -139,7 +139,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  	struct worker_pool *pool;
  
  	/*
-@@ -883,26 +919,18 @@ void wq_worker_sleeping(struct task_stru
+@@ -885,26 +921,18 @@ void wq_worker_sleeping(struct task_stru
  		return;
  
  	worker->sleeping = 1;
@@ -169,7 +169,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  }
  
  /**
-@@ -1631,7 +1659,9 @@ static void worker_enter_idle(struct wor
+@@ -1635,7 +1663,9 @@ static void worker_enter_idle(struct wor
  	worker->last_active = jiffies;
  
  	/* idle_list is LIFO */
@@ -179,7 +179,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  
  	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
  		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1664,7 +1694,9 @@ static void worker_leave_idle(struct wor
+@@ -1668,7 +1698,9 @@ static void worker_leave_idle(struct wor
  		return;
  	worker_clr_flags(worker, WORKER_IDLE);
  	pool->nr_idle--;
@@ -189,7 +189,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  }
  
  static struct worker *alloc_worker(int node)
-@@ -1830,7 +1862,9 @@ static void destroy_worker(struct worker
+@@ -1834,7 +1866,9 @@ static void destroy_worker(struct worker
  	pool->nr_workers--;
  	pool->nr_idle--;
  
diff --git a/debian/patches/features/all/rt/workqueue-use-locallock.patch b/debian/patches/features/all/rt/workqueue-use-locallock.patch
index e4410f9..8efb3e0 100644
--- a/debian/patches/features/all/rt/workqueue-use-locallock.patch
+++ b/debian/patches/features/all/rt/workqueue-use-locallock.patch
@@ -1,15 +1,15 @@
 Subject: workqueue: Use local irq lock instead of irq disable regions
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:42:26 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Use a local_irq_lock as a replacement for irq off regions. We keep the
 semantic of irq-off in regard to the pool->lock and remain preemptible.
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- kernel/workqueue.c |   33 +++++++++++++++++++--------------
- 1 file changed, 19 insertions(+), 14 deletions(-)
+ kernel/workqueue.c |   36 ++++++++++++++++++++++--------------
+ 1 file changed, 22 insertions(+), 14 deletions(-)
 
 --- a/kernel/workqueue.c
 +++ b/kernel/workqueue.c
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #include "workqueue_internal.h"
  
-@@ -348,6 +349,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
+@@ -350,6 +351,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
  struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
  EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
  
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  static int worker_thread(void *__worker);
  static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
  
-@@ -1101,9 +1104,11 @@ static void put_pwq_unlocked(struct pool
+@@ -1103,9 +1106,11 @@ static void put_pwq_unlocked(struct pool
  		 * As both pwqs and pools are RCU protected, the
  		 * following lock operations are safe.
  		 */
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  
-@@ -1207,7 +1212,7 @@ static int try_to_grab_pending(struct wo
+@@ -1209,7 +1214,7 @@ static int try_to_grab_pending(struct wo
  	struct worker_pool *pool;
  	struct pool_workqueue *pwq;
  
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	/* try to steal the timer if it exists */
  	if (is_dwork) {
-@@ -1271,7 +1276,7 @@ static int try_to_grab_pending(struct wo
+@@ -1273,7 +1278,7 @@ static int try_to_grab_pending(struct wo
  	spin_unlock(&pool->lock);
  fail:
  	rcu_read_unlock();
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (work_is_canceling(work))
  		return -ENOENT;
  	cpu_relax();
-@@ -1376,7 +1381,7 @@ static void __queue_work(int cpu, struct
+@@ -1378,7 +1383,7 @@ static void __queue_work(int cpu, struct
  	 * queued or lose PENDING.  Grabbing PENDING and queueing should
  	 * happen with IRQ disabled.
  	 */
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	debug_work_activate(work);
  
-@@ -1482,14 +1487,14 @@ bool queue_work_on(int cpu, struct workq
+@@ -1484,14 +1489,14 @@ bool queue_work_on(int cpu, struct workq
  	bool ret = false;
  	unsigned long flags;
  
@@ -88,7 +88,19 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return ret;
  }
  EXPORT_SYMBOL(queue_work_on);
-@@ -1556,14 +1561,14 @@ bool queue_delayed_work_on(int cpu, stru
+@@ -1500,8 +1505,11 @@ void delayed_work_timer_fn(unsigned long
+ {
+ 	struct delayed_work *dwork = (struct delayed_work *)__data;
+ 
++	/* XXX */
++	/* local_lock(pendingb_lock); */
+ 	/* should have been called from irqsafe timer with irq already off */
+ 	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
++	/* local_unlock(pendingb_lock); */
+ }
+ EXPORT_SYMBOL(delayed_work_timer_fn);
+ 
+@@ -1557,14 +1565,14 @@ bool queue_delayed_work_on(int cpu, stru
  	unsigned long flags;
  
  	/* read the comment in __queue_work() */
@@ -105,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return ret;
  }
  EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1598,7 +1603,7 @@ bool mod_delayed_work_on(int cpu, struct
+@@ -1599,7 +1607,7 @@ bool mod_delayed_work_on(int cpu, struct
  
  	if (likely(ret >= 0)) {
  		__queue_delayed_work(cpu, wq, dwork, delay);
@@ -114,16 +126,16 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  
  	/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -2916,7 +2921,7 @@ static bool __cancel_work_timer(struct w
+@@ -2923,7 +2931,7 @@ static bool __cancel_work_timer(struct w
  
  	/* tell other tasks trying to grab @work to back off */
  	mark_work_canceling(work);
 -	local_irq_restore(flags);
 +	local_unlock_irqrestore(pendingb_lock, flags);
  
- 	flush_work(work);
- 	clear_work_data(work);
-@@ -2971,10 +2976,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+ 	/*
+ 	 * This allows canceling during early boot.  We know that @work
+@@ -2984,10 +2992,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
   */
  bool flush_delayed_work(struct delayed_work *dwork)
  {
@@ -136,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return flush_work(&dwork->work);
  }
  EXPORT_SYMBOL(flush_delayed_work);
-@@ -2992,7 +2997,7 @@ static bool __cancel_work(struct work_st
+@@ -3005,7 +3013,7 @@ static bool __cancel_work(struct work_st
  		return false;
  
  	set_work_pool_and_clear_pending(work, get_work_pool_id(work));
diff --git a/debian/patches/features/all/rt/workqueue-use-rcu.patch b/debian/patches/features/all/rt/workqueue-use-rcu.patch
index eab4783..fc340b7 100644
--- a/debian/patches/features/all/rt/workqueue-use-rcu.patch
+++ b/debian/patches/features/all/rt/workqueue-use-rcu.patch
@@ -1,7 +1,7 @@
 Subject: workqueue: Use normal rcu
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 24 Jul 2013 15:26:54 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 There is no need for sched_rcu. The undocumented reason why sched_rcu
 is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	 * determined without grabbing wq->mutex.
  	 */
  	struct work_struct	unbound_release_work;
-@@ -355,20 +355,20 @@ static void workqueue_sysfs_unregister(s
+@@ -357,20 +357,20 @@ static void workqueue_sysfs_unregister(s
  #include <trace/events/workqueue.h>
  
  #define assert_rcu_or_pool_mutex()					\
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  #define for_each_cpu_worker_pool(pool, cpu)				\
  	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
-@@ -380,7 +380,7 @@ static void workqueue_sysfs_unregister(s
+@@ -382,7 +382,7 @@ static void workqueue_sysfs_unregister(s
   * @pool: iteration cursor
   * @pi: integer used for iteration
   *
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * locked.  If the pool needs to be used beyond the locking in effect, the
   * caller is responsible for guaranteeing that the pool stays online.
   *
-@@ -412,7 +412,7 @@ static void workqueue_sysfs_unregister(s
+@@ -414,7 +414,7 @@ static void workqueue_sysfs_unregister(s
   * @pwq: iteration cursor
   * @wq: the target workqueue
   *
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * If the pwq needs to be used beyond the locking in effect, the caller is
   * responsible for guaranteeing that the pwq stays online.
   *
-@@ -548,7 +548,7 @@ static int worker_pool_assign_id(struct
+@@ -550,7 +550,7 @@ static int worker_pool_assign_id(struct
   * @wq: the target workqueue
   * @node: the node ID
   *
@@ -105,7 +105,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * read locked.
   * If the pwq needs to be used beyond the locking in effect, the caller is
   * responsible for guaranteeing that the pwq stays online.
-@@ -692,8 +692,8 @@ static struct pool_workqueue *get_work_p
+@@ -694,8 +694,8 @@ static struct pool_workqueue *get_work_p
   * @work: the work item of interest
   *
   * Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -116,7 +116,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   *
   * All fields of the returned pool are accessible as long as the above
   * mentioned locking is in effect.  If the returned pool needs to be used
-@@ -1098,7 +1098,7 @@ static void put_pwq_unlocked(struct pool
+@@ -1100,7 +1100,7 @@ static void put_pwq_unlocked(struct pool
  {
  	if (pwq) {
  		/*
@@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		 * following lock operations are safe.
  		 */
  		spin_lock_irq(&pwq->pool->lock);
-@@ -1226,6 +1226,7 @@ static int try_to_grab_pending(struct wo
+@@ -1228,6 +1228,7 @@ static int try_to_grab_pending(struct wo
  	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
  		return 0;
  
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * The queueing is in progress, or it is already queued. Try to
  	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1264,10 +1265,12 @@ static int try_to_grab_pending(struct wo
+@@ -1266,10 +1267,12 @@ static int try_to_grab_pending(struct wo
  		set_work_pool_and_keep_pending(work, pool->id);
  
  		spin_unlock(&pool->lock);
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	local_irq_restore(*flags);
  	if (work_is_canceling(work))
  		return -ENOENT;
-@@ -1381,6 +1384,7 @@ static void __queue_work(int cpu, struct
+@@ -1383,6 +1386,7 @@ static void __queue_work(int cpu, struct
  	if (unlikely(wq->flags & __WQ_DRAINING) &&
  	    WARN_ON_ONCE(!is_chained_work(wq)))
  		return;
@@ -154,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  retry:
  	if (req_cpu == WORK_CPU_UNBOUND)
  		cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-@@ -1437,10 +1441,8 @@ static void __queue_work(int cpu, struct
+@@ -1439,10 +1443,8 @@ static void __queue_work(int cpu, struct
  	/* pwq determined, queue */
  	trace_workqueue_queue_work(req_cpu, pwq, work);
  
@@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	pwq->nr_in_flight[pwq->work_color]++;
  	work_flags = work_color_to_flags(pwq->work_color);
-@@ -1458,7 +1460,9 @@ static void __queue_work(int cpu, struct
+@@ -1460,7 +1462,9 @@ static void __queue_work(int cpu, struct
  
  	insert_work(pwq, work, worklist, work_flags);
  
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -2785,14 +2789,14 @@ static bool start_flush_work(struct work
+@@ -2789,14 +2793,14 @@ static bool start_flush_work(struct work
  
  	might_sleep();
  
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/* see the comment in try_to_grab_pending() with the same code */
  	pwq = get_work_pwq(work);
  	if (pwq) {
-@@ -2821,10 +2825,11 @@ static bool start_flush_work(struct work
+@@ -2825,10 +2829,11 @@ static bool start_flush_work(struct work
  	else
  		lock_map_acquire_read(&pwq->wq->lockdep_map);
  	lock_map_release(&pwq->wq->lockdep_map);
@@ -208,7 +208,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return false;
  }
  
-@@ -3245,7 +3250,7 @@ static void rcu_free_pool(struct rcu_hea
+@@ -3258,7 +3263,7 @@ static void rcu_free_pool(struct rcu_hea
   * put_unbound_pool - put a worker_pool
   * @pool: worker_pool to put
   *
@@ -217,7 +217,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * safe manner.  get_unbound_pool() calls this function on its failure path
   * and this function should be able to release pools which went through,
   * successfully or not, init_worker_pool().
-@@ -3299,8 +3304,8 @@ static void put_unbound_pool(struct work
+@@ -3312,8 +3317,8 @@ static void put_unbound_pool(struct work
  	del_timer_sync(&pool->idle_timer);
  	del_timer_sync(&pool->mayday_timer);
  
@@ -228,7 +228,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -3407,14 +3412,14 @@ static void pwq_unbound_release_workfn(s
+@@ -3420,14 +3425,14 @@ static void pwq_unbound_release_workfn(s
  	put_unbound_pool(pool);
  	mutex_unlock(&wq_pool_mutex);
  
@@ -245,7 +245,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /**
-@@ -4064,7 +4069,7 @@ void destroy_workqueue(struct workqueue_
+@@ -4081,7 +4086,7 @@ void destroy_workqueue(struct workqueue_
  		 * The base ref is never dropped on per-cpu pwqs.  Directly
  		 * schedule RCU free.
  		 */
@@ -254,7 +254,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	} else {
  		/*
  		 * We're the sole accessor of @wq at this point.  Directly
-@@ -4157,7 +4162,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4174,7 +4179,8 @@ bool workqueue_congested(int cpu, struct
  	struct pool_workqueue *pwq;
  	bool ret;
  
@@ -264,7 +264,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (cpu == WORK_CPU_UNBOUND)
  		cpu = smp_processor_id();
-@@ -4168,7 +4174,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4185,7 +4191,8 @@ bool workqueue_congested(int cpu, struct
  		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
  
  	ret = !list_empty(&pwq->delayed_works);
@@ -274,7 +274,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return ret;
  }
-@@ -4194,15 +4201,15 @@ unsigned int work_busy(struct work_struc
+@@ -4211,15 +4218,15 @@ unsigned int work_busy(struct work_struc
  	if (work_pending(work))
  		ret |= WORK_BUSY_PENDING;
  
@@ -294,7 +294,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	return ret;
  }
-@@ -4391,7 +4398,7 @@ void show_workqueue_state(void)
+@@ -4408,7 +4415,7 @@ void show_workqueue_state(void)
  	unsigned long flags;
  	int pi;
  
@@ -303,7 +303,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	pr_info("Showing busy workqueues and worker pools:\n");
  
-@@ -4444,7 +4451,7 @@ void show_workqueue_state(void)
+@@ -4461,7 +4468,7 @@ void show_workqueue_state(void)
  		spin_unlock_irqrestore(&pool->lock, flags);
  	}
  
@@ -312,7 +312,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -4782,16 +4789,16 @@ bool freeze_workqueues_busy(void)
+@@ -4822,16 +4829,16 @@ bool freeze_workqueues_busy(void)
  		 * nr_active is monotonically decreasing.  It's safe
  		 * to peek without lock.
  		 */
@@ -332,7 +332,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  out_unlock:
  	mutex_unlock(&wq_pool_mutex);
-@@ -4981,7 +4988,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5021,7 +5028,8 @@ static ssize_t wq_pool_ids_show(struct d
  	const char *delim = "";
  	int node, written = 0;
  
@@ -342,7 +342,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	for_each_node(node) {
  		written += scnprintf(buf + written, PAGE_SIZE - written,
  				     "%s%d:%d", delim, node,
-@@ -4989,7 +4997,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5029,7 +5037,8 @@ static ssize_t wq_pool_ids_show(struct d
  		delim = " ";
  	}
  	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
diff --git a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch b/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
index 5563990..d834094 100644
--- a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
+++ b/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 2 Nov 2014 08:31:37 +0100
 Subject: x86: UV: raw_spinlock conversion
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Shrug.  Lots of hobbyists have a beast in their basement, right?
 
@@ -11,8 +11,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
  arch/x86/include/asm/uv/uv_bau.h |   14 +++++++-------
  arch/x86/platform/uv/tlb_uv.c    |   26 +++++++++++++-------------
- arch/x86/platform/uv/uv_time.c   |   21 +++++++++++++--------
- 3 files changed, 33 insertions(+), 28 deletions(-)
+ arch/x86/platform/uv/uv_time.c   |   20 ++++++++++++--------
+ 3 files changed, 32 insertions(+), 28 deletions(-)
 
 --- a/arch/x86/include/asm/uv/uv_bau.h
 +++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
 --- a/arch/x86/platform/uv/tlb_uv.c
 +++ b/arch/x86/platform/uv/tlb_uv.c
-@@ -748,9 +748,9 @@ static void destination_plugged(struct b
+@@ -747,9 +747,9 @@ static void destination_plugged(struct b
  
  		quiesce_local_uvhub(hmaster);
  
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		end_uvhub_quiesce(hmaster);
  
-@@ -770,9 +770,9 @@ static void destination_timeout(struct b
+@@ -769,9 +769,9 @@ static void destination_timeout(struct b
  
  		quiesce_local_uvhub(hmaster);
  
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		end_uvhub_quiesce(hmaster);
  
-@@ -793,7 +793,7 @@ static void disable_for_period(struct ba
+@@ -792,7 +792,7 @@ static void disable_for_period(struct ba
  	cycles_t tm1;
  
  	hmaster = bcp->uvhub_master;
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (!bcp->baudisabled) {
  		stat->s_bau_disabled++;
  		tm1 = get_cycles();
-@@ -806,7 +806,7 @@ static void disable_for_period(struct ba
+@@ -805,7 +805,7 @@ static void disable_for_period(struct ba
  			}
  		}
  	}
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -869,7 +869,7 @@ static void record_send_stats(cycles_t t
+@@ -868,7 +868,7 @@ static void record_send_stats(cycles_t t
   */
  static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
  {
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	atomic_t *v;
  
  	v = &hmaster->active_descriptor_count;
-@@ -1002,7 +1002,7 @@ static int check_enable(struct bau_contr
+@@ -1001,7 +1001,7 @@ static int check_enable(struct bau_contr
  	struct bau_control *hmaster;
  
  	hmaster = bcp->uvhub_master;
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
  		stat->s_bau_reenabled++;
  		for_each_present_cpu(tcpu) {
-@@ -1014,10 +1014,10 @@ static int check_enable(struct bau_contr
+@@ -1013,10 +1013,10 @@ static int check_enable(struct bau_contr
  				tbcp->period_giveups = 0;
  			}
  		}
@@ -124,7 +124,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	return -1;
  }
  
-@@ -1940,9 +1940,9 @@ static void __init init_per_cpu_tunables
+@@ -1938,9 +1938,9 @@ static void __init init_per_cpu_tunables
  		bcp->cong_reps			= congested_reps;
  		bcp->disabled_period		= sec_2_cycles(disabled_period);
  		bcp->giveup_limit		= giveup_limit;
@@ -199,11 +199,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  	return rc;
  }
-@@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, i
- static cycle_t uv_read_rtc(struct clocksource *cs)
+@@ -299,13 +299,17 @@ static int uv_rtc_unset_timer(int cpu, i
+ static u64 uv_read_rtc(struct clocksource *cs)
  {
  	unsigned long offset;
-+	cycle_t cycles;
++	u64 cycles;
  
 +	preempt_disable();
  	if (uv_get_min_hub_revision_id() == 1)
@@ -211,10 +211,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	else
  		offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
  
--	return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
-+	cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+-	return (u64)uv_read_local_mmr(UVH_RTC | offset);
++	cycles = (u64)uv_read_local_mmr(UVH_RTC | offset);
 +	preempt_enable();
-+
 +	return cycles;
  }
  
diff --git a/debian/patches/features/all/rt/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch b/debian/patches/features/all/rt/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
deleted file mode 100644
index 29012dd..0000000
--- a/debian/patches/features/all/rt/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-Date: Fri, 21 Oct 2016 10:29:11 +0200
-Subject: [PATCH] x86/apic: get rid of "warning: 'acpi_ioapic_lock' defined but
- not used"
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-kbuild test robot reported this against the -RT tree:
-
-|   In file included from include/linux/mutex.h:30:0,
-|                    from include/linux/notifier.h:13,
-|                    from include/linux/memory_hotplug.h:6,
-|                    from include/linux/mmzone.h:777,
-|                    from include/linux/gfp.h:5,
-|                    from include/linux/slab.h:14,
-|                    from include/linux/resource_ext.h:19,
-|                    from include/linux/acpi.h:26,
-|                    from arch/x86/kernel/acpi/boot.c:27:
-|>> arch/x86/kernel/acpi/boot.c:90:21: warning: 'acpi_ioapic_lock' defined but not used [-Wunused-variable]
-|    static DEFINE_MUTEX(acpi_ioapic_lock);
-|                        ^
-|   include/linux/mutex_rt.h:27:15: note: in definition of macro 'DEFINE_MUTEX'
-|     struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-                  ^~~~~~~~~
-which is also true (as in non-used) for !RT but the compiler does not
-emit a warning.
-
-Reported-by: kbuild test robot <fengguang.wu at intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- arch/x86/kernel/acpi/boot.c |    2 ++
- 1 file changed, 2 insertions(+)
-
---- a/arch/x86/kernel/acpi/boot.c
-+++ b/arch/x86/kernel/acpi/boot.c
-@@ -87,7 +87,9 @@ static u64 acpi_lapic_addr __initdata =
-  *		->ioapic_mutex
-  *			->ioapic_lock
-  */
-+#ifdef CONFIG_X86_IO_APIC
- static DEFINE_MUTEX(acpi_ioapic_lock);
-+#endif
- 
- /* --------------------------------------------------------------------------
-                               Boot-time Configuration
diff --git a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
index aae9a68..9d23b42 100644
--- a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
+++ b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -1,7 +1,7 @@
 Subject: x86: crypto: Reduce preempt disabled regions
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Mon, 14 Nov 2011 18:19:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Restrict the preempt disabled regions to the actual floating point
 operations and enable preemption for the administrative actions.
@@ -14,32 +14,31 @@ Signed-off-by: Peter Zijlstra <peterz at infradead.org>
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
- arch/x86/crypto/aesni-intel_glue.c |   24 +++++++++++++-----------
- 1 file changed, 13 insertions(+), 11 deletions(-)
+ arch/x86/crypto/aesni-intel_glue.c |   22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
 
 --- a/arch/x86/crypto/aesni-intel_glue.c
 +++ b/arch/x86/crypto/aesni-intel_glue.c
-@@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_
- 	err = blkcipher_walk_virt(desc, &walk);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -374,14 +374,14 @@ static int ecb_encrypt(struct skcipher_r
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes)) {
 +		kernel_fpu_begin();
  		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
--			      nbytes & AES_BLOCK_MASK);
-+				nbytes & AES_BLOCK_MASK);
+ 			      nbytes & AES_BLOCK_MASK);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
 -	kernel_fpu_end();
  
  	return err;
  }
-@@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_
- 	err = blkcipher_walk_virt(desc, &walk);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -396,14 +396,14 @@ static int ecb_decrypt(struct skcipher_r
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes)) {
@@ -48,15 +47,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			      nbytes & AES_BLOCK_MASK);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
 -	kernel_fpu_end();
  
  	return err;
  }
-@@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_
- 	err = blkcipher_walk_virt(desc, &walk);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -418,14 +418,14 @@ static int cbc_encrypt(struct skcipher_r
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes)) {
@@ -65,15 +64,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			      nbytes & AES_BLOCK_MASK, walk.iv);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
 -	kernel_fpu_end();
  
  	return err;
  }
-@@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_
- 	err = blkcipher_walk_virt(desc, &walk);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -440,14 +440,14 @@ static int cbc_decrypt(struct skcipher_r
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes)) {
@@ -82,15 +81,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			      nbytes & AES_BLOCK_MASK, walk.iv);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
 -	kernel_fpu_end();
  
  	return err;
  }
-@@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_de
- 	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -497,18 +497,20 @@ static int ctr_crypt(struct skcipher_req
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
@@ -99,13 +98,13 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			              nbytes & AES_BLOCK_MASK, walk.iv);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
  	if (walk.nbytes) {
 +		kernel_fpu_begin();
  		ctr_crypt_final(ctx, &walk);
 +		kernel_fpu_end();
- 		err = blkcipher_walk_done(desc, &walk, 0);
+ 		err = skcipher_walk_done(&walk, 0);
  	}
 -	kernel_fpu_end();
  
diff --git a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch b/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
index 7718665..9c59d28 100644
--- a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
+++ b/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 11 Mar 2013 17:09:55 +0100
 Subject: x86/highmem: Add a "already used pte" check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 This is a copy from kmap_atomic_prot().
 
diff --git a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
index e751d4a..dd5c3b7 100644
--- a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
+++ b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:27 -0500
 Subject: x86/ioapic: Do not unmask io_apic when interrupt is in progress
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 With threaded interrupts we might see an interrupt in progress on
 migration. Do not unmask it when this is the case.
@@ -16,7 +16,7 @@ xXx
 
 --- a/arch/x86/kernel/apic/io_apic.c
 +++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(st
+@@ -1711,7 +1711,8 @@ static bool io_apic_level_ack_pending(st
  static inline bool ioapic_irqd_mask(struct irq_data *data)
  {
  	/* If we are moving the irq we need to mask it */
diff --git a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
index 79390c1..5509b53 100644
--- a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
+++ b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: x86: kvm Require const tsc for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 06 Nov 2011 12:26:18 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Non constant TSC is a nightmare on bare metal already, but with
 virtualization it becomes a complete disaster because the workarounds
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -5958,6 +5958,13 @@ int kvm_arch_init(void *opaque)
+@@ -6105,6 +6105,13 @@ int kvm_arch_init(void *opaque)
  		goto out;
  	}
  
diff --git a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
index 5a16bb9..566c969 100644
--- a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
+++ b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 13 Dec 2010 16:33:39 +0100
 Subject: x86: Convert mce timer to hrtimer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 mce_timer is started in atomic contexts of cpu bringup. This results
 in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
@@ -22,8 +22,8 @@ fold in:
 |[bigeasy: use ULL instead of u64 cast]
 |Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
- arch/x86/kernel/cpu/mcheck/mce.c |   52 +++++++++++++++------------------------
- 1 file changed, 20 insertions(+), 32 deletions(-)
+ arch/x86/kernel/cpu/mcheck/mce.c |   54 ++++++++++++++++++---------------------
+ 1 file changed, 26 insertions(+), 28 deletions(-)
 
 --- a/arch/x86/kernel/cpu/mcheck/mce.c
 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -34,8 +34,8 @@ fold in:
 +#include <linux/jiffies.h>
  #include <linux/jump_label.h>
  
- #include <asm/processor.h>
-@@ -1307,7 +1308,7 @@ void mce_log_therm_throt_event(__u64 sta
+ #include <asm/intel-family.h>
+@@ -1315,7 +1316,7 @@ int memory_failure(unsigned long pfn, in
  static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
  
  static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -44,35 +44,30 @@ fold in:
  
  static unsigned long mce_adjust_timer_default(unsigned long interval)
  {
-@@ -1316,32 +1317,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1324,27 +1325,19 @@ static unsigned long mce_adjust_timer_de
  
  static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
  
--static void __restart_timer(struct timer_list *t, unsigned long interval)
-+static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval)
+-static void __start_timer(struct timer_list *t, unsigned long interval)
++static void __start_timer(struct hrtimer *t, unsigned long iv)
  {
 -	unsigned long when = jiffies + interval;
 -	unsigned long flags;
 -
 -	local_irq_save(flags);
 -
--	if (timer_pending(t)) {
--		if (time_before(when, t->expires))
--			mod_timer(t, when);
--	} else {
--		t->expires = round_jiffies(when);
--		add_timer_on(t, smp_processor_id());
--	}
--
+-	if (!timer_pending(t) || time_before(when, t->expires))
+-		mod_timer(t, round_jiffies(when));
++	if (!iv)
++		return;
+ 
 -	local_irq_restore(flags);
-+	if (!interval)
-+		return HRTIMER_NORESTART;
-+	hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval)));
-+	return HRTIMER_RESTART;
++	hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
++			       0, HRTIMER_MODE_REL_PINNED);
  }
  
 -static void mce_timer_fn(unsigned long data)
-+static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
++static  enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
  {
 -	struct timer_list *t = this_cpu_ptr(&mce_timer);
 -	int cpu = smp_processor_id();
@@ -83,16 +78,20 @@ fold in:
  	iv = __this_cpu_read(mce_next_interval);
  
  	if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1364,7 +1351,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1367,7 +1360,11 @@ static void mce_timer_fn(unsigned long d
  
  done:
  	__this_cpu_write(mce_next_interval, iv);
--	__restart_timer(t, iv);
-+	return __restart_timer(timer, iv);
+-	__start_timer(t, iv);
++	if (!iv)
++		return HRTIMER_NORESTART;
++
++	hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(iv)));
++	return HRTIMER_RESTART;
  }
  
  /*
-@@ -1372,7 +1359,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1375,7 +1372,7 @@ static void mce_timer_fn(unsigned long d
   */
  void mce_timer_kick(unsigned long interval)
  {
@@ -100,8 +99,8 @@ fold in:
 +	struct hrtimer *t = this_cpu_ptr(&mce_timer);
  	unsigned long iv = __this_cpu_read(mce_next_interval);
  
- 	__restart_timer(t, interval);
-@@ -1387,7 +1374,7 @@ static void mce_timer_delete_all(void)
+ 	__start_timer(t, interval);
+@@ -1390,7 +1387,7 @@ static void mce_timer_delete_all(void)
  	int cpu;
  
  	for_each_online_cpu(cpu)
@@ -110,71 +109,60 @@ fold in:
  }
  
  static void mce_do_trigger(struct work_struct *work)
-@@ -1722,7 +1709,7 @@ static void __mcheck_cpu_clear_vendor(st
+@@ -1725,7 +1722,7 @@ static void __mcheck_cpu_clear_vendor(st
  	}
  }
  
--static void mce_start_timer(unsigned int cpu, struct timer_list *t)
-+static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
+-static void mce_start_timer(struct timer_list *t)
++static void mce_start_timer(struct hrtimer *t)
  {
  	unsigned long iv = check_interval * HZ;
  
-@@ -1731,16 +1718,17 @@ static void mce_start_timer(unsigned int
+@@ -1738,18 +1735,19 @@ static void mce_start_timer(struct timer
  
- 	per_cpu(mce_next_interval, cpu) = iv;
+ static void __mcheck_cpu_setup_timer(void)
+ {
+-	struct timer_list *t = this_cpu_ptr(&mce_timer);
+-	unsigned int cpu = smp_processor_id();
++	struct hrtimer *t = this_cpu_ptr(&mce_timer);
  
--	t->expires = round_jiffies(jiffies + iv);
--	add_timer_on(t, cpu);
-+	hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
-+			0, HRTIMER_MODE_REL_PINNED);
+-	setup_pinned_timer(t, mce_timer_fn, cpu);
++	hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++	t->function = mce_timer_fn;
  }
  
  static void __mcheck_cpu_init_timer(void)
  {
 -	struct timer_list *t = this_cpu_ptr(&mce_timer);
+-	unsigned int cpu = smp_processor_id();
 +	struct hrtimer *t = this_cpu_ptr(&mce_timer);
- 	unsigned int cpu = smp_processor_id();
- 
--	setup_pinned_timer(t, mce_timer_fn, cpu);
++
 +	hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 +	t->function = mce_timer_fn;
- 	mce_start_timer(cpu, t);
+ 
+-	setup_pinned_timer(t, mce_timer_fn, cpu);
+ 	mce_start_timer(t);
  }
  
-@@ -2465,6 +2453,8 @@ static void mce_disable_cpu(void *h)
- 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
- 		return;
+@@ -2509,7 +2507,7 @@ static int mce_cpu_dead(unsigned int cpu
  
-+	hrtimer_cancel(this_cpu_ptr(&mce_timer));
-+
- 	if (!(action & CPU_TASKS_FROZEN))
- 		cmci_clear();
+ static int mce_cpu_online(unsigned int cpu)
+ {
+-	struct timer_list *t = this_cpu_ptr(&mce_timer);
++	struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ 	int ret;
  
-@@ -2487,6 +2477,7 @@ static void mce_reenable_cpu(void *h)
- 		if (b->init)
- 			wrmsrl(msr_ops.ctl(i), b->ctl);
- 	}
-+	__mcheck_cpu_init_timer();
- }
+ 	mce_device_create(cpu);
+@@ -2526,10 +2524,10 @@ static int mce_cpu_online(unsigned int c
  
- /* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2494,7 +2485,6 @@ static int
- mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ static int mce_cpu_pre_down(unsigned int cpu)
  {
- 	unsigned int cpu = (unsigned long)hcpu;
--	struct timer_list *t = &per_cpu(mce_timer, cpu);
- 
- 	switch (action & ~CPU_TASKS_FROZEN) {
- 	case CPU_ONLINE:
-@@ -2514,11 +2504,9 @@ mce_cpu_callback(struct notifier_block *
- 		break;
- 	case CPU_DOWN_PREPARE:
- 		smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
--		del_timer_sync(t);
- 		break;
- 	case CPU_DOWN_FAILED:
- 		smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
--		mce_start_timer(cpu, t);
- 		break;
- 	}
+-	struct timer_list *t = this_cpu_ptr(&mce_timer);
++	struct hrtimer *t = this_cpu_ptr(&mce_timer);
  
+ 	mce_disable_cpu();
+-	del_timer_sync(t);
++	hrtimer_cancel(t);
+ 	mce_threshold_remove_device(cpu);
+ 	mce_device_remove(cpu);
+ 	return 0;
diff --git a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch b/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
index f3d0cba..88f7072 100644
--- a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -1,7 +1,7 @@
 Subject: x86/mce: use swait queue for mce wakeups
 From: Steven Rostedt <rostedt at goodmis.org>
 Date:	Fri, 27 Feb 2015 15:20:37 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 We had a customer report a lockup on a 3.0-rt kernel that had the
 following backtrace:
@@ -68,8 +68,8 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
 +#include <linux/swork.h>
  #include <linux/jump_label.h>
  
- #include <asm/processor.h>
-@@ -1384,6 +1385,56 @@ static void mce_do_trigger(struct work_s
+ #include <asm/intel-family.h>
+@@ -1397,6 +1398,56 @@ static void mce_do_trigger(struct work_s
  
  static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  
@@ -126,7 +126,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
  /*
   * Notify the user(s) about new machine check events.
   * Can be called from interrupt context, but not from machine check/NMI
-@@ -1391,19 +1442,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1404,19 +1455,8 @@ static DECLARE_WORK(mce_trigger_work, mc
   */
  int mce_notify_irq(void)
  {
@@ -147,7 +147,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
  		return 1;
  	}
  	return 0;
-@@ -2545,6 +2585,10 @@ static __init int mcheck_init_device(voi
+@@ -2561,6 +2601,10 @@ static __init int mcheck_init_device(voi
  		goto err_out;
  	}
  
diff --git a/debian/patches/features/all/rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch b/debian/patches/features/all/rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch
deleted file mode 100644
index 6dc764e..0000000
--- a/debian/patches/features/all/rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From: John Ogness <john.ogness at linutronix.de>
-Date: Mon, 30 Jan 2017 09:41:21 +0100
-Subject: [PATCH] x86/mm/cpa: avoid wbinvd() for PREEMPT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
-
-Although wbinvd() is faster than flushing many individual pages, it
-blocks the memory bus for "long" periods of time (>100us), thus
-directly causing unusually large latencies on all CPUs, regardless
-of any CPU isolation features that may be active.
-
-For 1024 pages, flushing those pages individually can take up to
-2200us, but the task remains fully preemptible during that time.
-
-Cc: stable-rt at vger.kernel.org
-Acked-by: Peter Zijlstra (Intel) <peterz at infradead.org>
-Signed-off-by: John Ogness <john.ogness at linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- arch/x86/mm/pageattr.c |    8 ++++++++
- 1 file changed, 8 insertions(+)
-
---- a/arch/x86/mm/pageattr.c
-+++ b/arch/x86/mm/pageattr.c
-@@ -214,7 +214,15 @@ static void cpa_flush_array(unsigned lon
- 			    int in_flags, struct page **pages)
- {
- 	unsigned int i, level;
-+#ifdef CONFIG_PREEMPT
-+	/*
-+	 * Avoid wbinvd() because it causes latencies on all CPUs,
-+	 * regardless of any CPU isolation that may be in effect.
-+	 */
-+	unsigned long do_wbinvd = 0;
-+#else
- 	unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
-+#endif
- 
- 	BUG_ON(irqs_disabled());
- 
diff --git a/debian/patches/features/all/rt/x86-preempt-lazy.patch b/debian/patches/features/all/rt/x86-preempt-lazy.patch
index 96e5aa5..0d76b6b 100644
--- a/debian/patches/features/all/rt/x86-preempt-lazy.patch
+++ b/debian/patches/features/all/rt/x86-preempt-lazy.patch
@@ -1,7 +1,7 @@
 Subject: x86: Support for lazy preemption
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 01 Nov 2012 11:03:47 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Implement the x86 pieces for lazy preempt.
 
@@ -18,17 +18,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
-@@ -17,6 +17,7 @@ config X86_64
- ### Arch settings
- config X86
- 	def_bool y
+@@ -160,6 +160,7 @@ config X86
+ 	select HAVE_PERF_REGS
+ 	select HAVE_PERF_USER_STACK_DUMP
+ 	select HAVE_REGS_AND_STACK_ACCESS_API
 +	select HAVE_PREEMPT_LAZY
- 	select ACPI_LEGACY_TABLES_LOOKUP	if ACPI
- 	select ACPI_SYSTEM_POWER_STATES_SUPPORT	if ACPI
- 	select ANON_INODES
+ 	select HAVE_STACK_VALIDATION		if X86_64
+ 	select HAVE_SYSCALL_TRACEPOINTS
+ 	select HAVE_UNSTABLE_SCHED_CLOCK
 --- a/arch/x86/entry/common.c
 +++ b/arch/x86/entry/common.c
-@@ -129,7 +129,7 @@ static long syscall_trace_enter(struct p
+@@ -130,7 +130,7 @@ static long syscall_trace_enter(struct p
  
  #define EXIT_TO_USERMODE_LOOP_FLAGS				\
  	(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |	\
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
  {
-@@ -145,7 +145,7 @@ static void exit_to_usermode_loop(struct
+@@ -146,7 +146,7 @@ static void exit_to_usermode_loop(struct
  		/* We have work to do. */
  		local_irq_enable();
  
@@ -48,10 +48,10 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef ARCH_RT_DELAYS_SIGNAL_SEND
 --- a/arch/x86/entry/entry_32.S
 +++ b/arch/x86/entry/entry_32.S
-@@ -308,8 +308,25 @@ END(ret_from_exception)
+@@ -332,8 +332,25 @@ END(ret_from_exception)
  ENTRY(resume_kernel)
  	DISABLE_INTERRUPTS(CLBR_ANY)
- need_resched:
+ .Lneed_resched:
 +	# preempt count == 0 + NEED_RS set?
  	cmpl	$0, PER_CPU_VAR(__preempt_count)
 +#ifndef CONFIG_PREEMPT_LAZY
@@ -63,12 +63,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 +	cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
 +	jne restore_all
 +
-+	movl    PER_CPU_VAR(current_task), %ebp
-+	cmpl $0,TASK_TI_preempt_lazy_count(%ebp)	# non-zero preempt_lazy_count ?
-+	jnz restore_all
++	movl	PER_CPU_VAR(current_task), %ebp
++	cmpl	$0,TASK_TI_preempt_lazy_count(%ebp)	# non-zero preempt_lazy_count ?
++	jnz	restore_all
 +
-+	testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
-+	jz restore_all
++	testl	$_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
++	jz	restore_all
 +test_int_off:
 +#endif
  	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	call	preempt_schedule_irq
 --- a/arch/x86/entry/entry_64.S
 +++ b/arch/x86/entry/entry_64.S
-@@ -546,7 +546,23 @@ GLOBAL(retint_user)
+@@ -544,7 +544,23 @@ GLOBAL(retint_user)
  	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
  	jnc	1f
  0:	cmpl	$0, PER_CPU_VAR(__preempt_count)
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  1:
 --- a/arch/x86/include/asm/preempt.h
 +++ b/arch/x86/include/asm/preempt.h
-@@ -79,17 +79,46 @@ static __always_inline void __preempt_co
+@@ -85,17 +85,46 @@ static __always_inline void __preempt_co
   * a decrement which hits zero means we have no preempt_count and should
   * reschedule.
   */
@@ -213,7 +213,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
  
  	BLANK();
-@@ -91,4 +92,5 @@ void common(void) {
+@@ -92,4 +93,5 @@ void common(void) {
  
  	BLANK();
  	DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
diff --git a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch b/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
index 8643aa7..8768e14 100644
--- a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
+++ b/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at linaro.org>
 Date: Thu, 10 Dec 2015 10:58:51 -0800
 Subject: x86/signal: delay calling signals on 32bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 When running some ptrace single step tests on x86-32 machine, the below problem
 is triggered:
diff --git a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
index ef5c799..3580bfb 100644
--- a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
+++ b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 16 Dec 2010 14:25:18 +0100
 Subject: x86: stackprotector: Avoid random pool on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 CPU bringup calls into the random pool to initialize the stack
 canary. During boot that works nicely even on RT as the might sleep
diff --git a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
index b753c58..039513c 100644
--- a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 26 Jul 2009 02:21:32 +0200
 Subject: x86: Use generic rwsem_spinlocks on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
 
 Simplifies the separation of anon_rw_semaphores and rw_semaphores for
 -rt.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
-@@ -232,8 +232,11 @@ config ARCH_MAY_HAVE_PC_FDC
+@@ -242,8 +242,11 @@ config ARCH_MAY_HAVE_PC_FDC
  	def_bool y
  	depends on ISA_DMA_API
  
diff --git a/debian/patches/series-rt b/debian/patches/series-rt
index 880a5f8..78cf92e 100644
--- a/debian/patches/series-rt
+++ b/debian/patches/series-rt
@@ -46,6 +46,77 @@ features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
 features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
 features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
 
+###
+# get_online_cpus() rework.
+# cpus_allowed queue from sched/core
+features/all/rt/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch
+features/all/rt/0002-workqueue-Provide-work_on_cpu_safe.patch
+features/all/rt/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch
+features/all/rt/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch
+features/all/rt/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch
+features/all/rt/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch
+features/all/rt/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch
+features/all/rt/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch
+features/all/rt/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch
+features/all/rt/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch
+features/all/rt/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch
+features/all/rt/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch
+features/all/rt/0013-crypto-N2-Replace-racy-task-affinity-logic.patch
+
+# a few patches from tip's sched/core
+features/all/rt/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch
+features/all/rt/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch
+features/all/rt/0002-arm-Adjust-system_state-check.patch
+features/all/rt/0003-arm64-Adjust-system_state-check.patch
+features/all/rt/0004-x86-smp-Adjust-system_state-check.patch
+features/all/rt/0005-metag-Adjust-system_state-check.patch
+features/all/rt/0006-powerpc-Adjust-system_state-check.patch
+features/all/rt/0007-ACPI-Adjust-system_state-check.patch
+features/all/rt/0008-mm-Adjust-system_state-check.patch
+features/all/rt/0009-cpufreq-pasemi-Adjust-system_state-check.patch
+features/all/rt/0010-iommu-vt-d-Adjust-system_state-checks.patch
+features/all/rt/0012-async-Adjust-system_state-checks.patch
+features/all/rt/0013-extable-Adjust-system_state-checks.patch
+features/all/rt/0014-printk-Adjust-system_state-checks.patch
+features/all/rt/0015-mm-vmscan-Adjust-system_state-checks.patch
+features/all/rt/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch
+features/all/rt/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
+
+# recursive get_online_cpus() invocations from smp/hotplug
+#0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
+#0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
+#0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch
+#0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch
+#0005-stop_machine-Provide-stop_machine_cpuslocked.patch
+#0006-padata-Make-padata_alloc-static.patch
+#0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch
+#0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch
+#0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch
+#0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch
+#0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch
+#0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch
+#0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch
+#0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch
+#0015-s390-kernel-Use-stop_machine_cpuslocked.patch
+#0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch
+#0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch
+#0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch
+#0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch
+#0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch
+#0021-PCI-Replace-the-racy-recursion-prevention.patch
+#0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch
+#0023-perf-tracing-cpuhotplug-Fix-locking-order.patch
+#0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch
+#0025-kprobes-Cure-hotplug-lock-ordering-issues.patch
+#0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch
+#0027-arm-Prevent-hotplug-rwsem-recursion.patch
+#0028-s390-Prevent-hotplug-rwsem-recursion.patch
+#0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch
+#0030-sched-Provide-is_percpu_thread-helper.patch
+#0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch
+#0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
+###
+
 # Those two should vanish soon (not use PIT during bootup)
 features/all/rt/at91_dont_enable_disable_clock.patch
 
@@ -59,17 +130,11 @@ features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-inva
 # Stuff broken upstream, need to be sent
 ############################################################
 features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
-features/all/rt/fs-dcache-include-wait.h.patch
 features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
 features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
 features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
 features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
-features/all/rt/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
 features/all/rt/rxrpc-remove-unused-static-variables.patch
-features/all/rt/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
-features/all/rt/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
-features/all/rt/pinctrl-qcom-Use-raw-spinlock-variants.patch
-features/all/rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch
 
 # Wants a different fix for upstream
 features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -111,6 +176,9 @@ features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
 ############################################################
 # Stuff which should go upstream ASAP
 ############################################################
+features/all/rt/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
+features/all/rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+features/all/rt/add_migrate_disable.patch
 
 # SCHED BLOCK/WQ
 features/all/rt/block-shorten-interrupt-disabled-regions.patch
@@ -157,6 +225,8 @@ features/all/rt/suspend-prevernt-might-sleep-splats.patch
 # NETWORKING
 features/all/rt/net-prevent-abba-deadlock.patch
 features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+features/all/rt/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
+features/all/rt/net_disable_NET_RX_BUSY_POLL.patch
 
 # X86
 features/all/rt/x86-io-apic-migra-no-unmask.patch
@@ -165,12 +235,8 @@ features/all/rt/x86-io-apic-migra-no-unmask.patch
 
 # LOCKING INIT FIXES
 
-# PCI
-features/all/rt/pci-access-use-__wake_up_all_locked.patch
-
 # WORKQUEUE
 
-
 #####################################################
 # Stuff which should go mainline, but wants some care
 #####################################################
@@ -210,7 +276,7 @@ features/all/rt/local-irq-rt-depending-variants.patch
 features/all/rt/preempt-nort-rt-variants.patch
 
 # local locks & migrate disable
-features/all/rt/introduce_migrate_disable_cpu_light.patch
+#introduce_migrate_disable_cpu_light.patch
 features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
 features/all/rt/rt-local-irq-lock.patch
 features/all/rt/locallock-add-local_lock_on.patch
@@ -246,6 +312,9 @@ features/all/rt/genirq-force-threading.patch
 # DRIVERS NET
 features/all/rt/drivers-net-vortex-fix-locking-issues.patch
 
+# ACCT
+features/all/rt/delayacct-use-raw_spinlocks.patch
+
 # MM PAGE_ALLOC
 features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
 features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
@@ -305,6 +374,8 @@ features/all/rt/sched-limit-nr-migrate.patch
 features/all/rt/sched-mmdrop-delayed.patch
 features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
 features/all/rt/sched-rt-mutex-wakeup.patch
+features/all/rt/sched-Prevent-task-state-corruption-by-spurious-lock.patch
+features/all/rt/sched-Remove-TASK_ALL.patch
 features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
 features/all/rt/cond-resched-softirq-rt.patch
 features/all/rt/cond-resched-lock-rt-tweak.patch
@@ -318,6 +389,7 @@ features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
 features/all/rt/stop-machine-raw-lock.patch
 
 # MIGRATE DISABLE AND PER CPU
+# XXX redo
 features/all/rt/hotplug-light-get-online-cpus.patch
 features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
 features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -336,16 +408,12 @@ features/all/rt/softirq-preempt-fix-3-re.patch
 features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
 features/all/rt/softirq-split-locks.patch
 features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
-features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
 features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
 features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
 features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
-features/all/rt/timers-Don-t-wake-ktimersoftd-on-every-tick.patch
-features/all/rt/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
 features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
 
 # compile fix due to rtmutex locks
-features/all/rt/gpu_don_t_check_for_the_lock_owner.patch
 features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
 
 # FUTEX/RTMUTEX
@@ -363,7 +431,6 @@ features/all/rt/spinlock-types-separate-raw.patch
 features/all/rt/rtmutex-avoid-include-hell.patch
 features/all/rt/rtmutex_dont_include_rcu.patch
 features/all/rt/rt-add-rt-locks.patch
-features/all/rt/rt-drop_mutex_disable_on_not_debug.patch
 features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
 features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
 features/all/rt/rtmutex-Provide-locked-slowpath.patch
@@ -436,8 +503,7 @@ features/all/rt/workqueue-use-locallock.patch
 features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
 features/all/rt/workqueue-distangle-from-rq-lock.patch
 
-# IDR
-features/all/rt/idr-use-local-lock-for-protection.patch
+# IDA
 features/all/rt/percpu_ida-use-locklocks.patch
 
 # DEBUGOBJECTS
@@ -506,9 +572,6 @@ features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
 features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
 features/all/rt/arm-enable-highmem-for-rt.patch
 
-# IPC
-features/all/rt/ipc-sem-rework-semaphore-wakeups.patch
-
 # SYSRQ
 
 # KVM require constant freq TSC (smp function call -> cpufreq)
@@ -533,10 +596,13 @@ features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
 features/all/rt/cpumask-disable-offstack-on-rt.patch
 
 # RANDOM
+features/all/rt/Revert-random-invalidate-batched-entropy-after-crng-.patch
 features/all/rt/random-make-it-work-on-rt.patch
 features/all/rt/random-avoid-preempt_disable-ed-section.patch
+features/all/rt/char-random-don-t-print-that-the-init-is-done.patch
 
 # HOTPLUG
+# XXX
 features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
 features/all/rt/cpu-rt-rework-cpu-down.patch
 features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -544,7 +610,7 @@ features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
 features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
 features/all/rt/cpu_down_move_migrate_enable_back.patch
 features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
-
+#
 features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
 
 # SCSCI QLA2xxx
@@ -597,6 +663,7 @@ features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
 features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
 features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
 features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+features/all/rt/drm-i915-init-spinlock-properly-on-RT.patch
 
 # CGROUPS
 features/all/rt/cgroups-use-simple-wait-in-css_release.patch

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/kernel/linux.git



More information about the Kernel-svn-changes mailing list