[linux] 03/03: [rt] Update to 4.11.8-rt5

debian-kernel at lists.debian.org debian-kernel at lists.debian.org
Sun Jul 9 12:50:32 UTC 2017


This is an automated email from the git hooks/post-receive script.

carnil pushed a commit to branch sid
in repository linux.

commit 5cea93cf9c0f6a0157476c3d512b4ee6eb050668
Author: Salvatore Bonaccorso <carnil at debian.org>
Date:   Sun Jul 9 13:42:39 2017 +0200

    [rt] Update to 4.11.8-rt5
---
 debian/changelog                                   |    1 +
 .../0001-futex-Avoid-freeing-an-active-timer.patch |    3 +-
 ...eanup-variable-names-for-futex_top_waiter.patch |    2 +-
 ...topology-Remove-cpus_allowed-manipulation.patch |    3 +-
 ...t-Pin-init-task-to-the-boot-CPU-initially.patch |    3 +-
 ...x-Deboost-before-waking-up-the-top-waiter.patch |    3 +-
 ...-Fix-early-boot-preempt-assumption-in-__s.patch |    3 +-
 ...0001-tracing-Add-hist_field_name-accessor.patch |  176 ++
 .../rt/0002-arm-Adjust-system_state-check.patch    |    3 +-
 ...mall-and-harmless-looking-inconsistencies.patch |    3 +-
 ...-Use-smp_store_release-in-mark_wake_futex.patch |    2 +-
 ...ex-deadline-Fix-a-PI-crash-for-deadline-t.patch |    3 +-
 .../all/rt/0002-tracing-Reimplement-log2.patch     |  115 ++
 .../0002-workqueue-Provide-work_on_cpu_safe.patch  |    3 +-
 .../rt/0003-arm64-Adjust-system_state-check.patch  |    3 +-
 ...rify-mark_wake_futex-memory-barrier-usage.patch |    3 +-
 ...3-futex-Remove-rt_mutex_deadlock_account_.patch |    2 +-
 ...-salinfo-Replace-racy-task-affinity-logic.patch |    3 +-
 ...-Add-interface-for-setting-absolute-time-.patch |  115 ++
 ...ine-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch |    3 +-
 .../rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch  |    3 +-
 ...mutex-Provide-futex-specific-rt_mutex-API.patch |    2 +-
 ...n-hwperf-Replace-racy-task-affinity-logic.patch |    3 +-
 ...-Redefine-the-unimplemented-RINGBUF_TIME_.patch |  331 ++++
 .../features/all/rt/0004-rtmutex-Clean-up.patch    |    3 +-
 .../0004-x86-smp-Adjust-system_state-check.patch   |    3 +-
 .../all/rt/0005-futex-Change-locking-rules.patch   |    2 +-
 .../rt/0005-metag-Adjust-system_state-check.patch  |    3 +-
 ...mp-Replace-open-coded-task-affinity-logic.patch |    3 +-
 ...5-sched-rtmutex-Refactor-rt_mutex_setprio.patch |    3 +-
 ...e-event-triggers-access-to-ring_buffer_ev.patch |  299 ++++
 .../all/rt/0006-futex-Cleanup-refcounting.patch    |    2 +-
 .../0006-powerpc-Adjust-system_state-check.patch   |    3 +-
 ...hed-tracing-Update-trace_sched_pi_setprio.patch |    3 +-
 ...rc-sysfs-Replace-racy-task-affinity-logic.patch |    3 +-
 ...-ring-buffer-event-param-to-hist-field-fu.patch |  140 ++
 .../rt/0007-ACPI-Adjust-system_state-check.patch   |    3 +-
 ...sor-Fix-error-handling-in-__acpi_processo.patch |    3 +-
 ...ework-inconsistent-rt_mutex-futex_q-state.patch |    2 +-
 ...0007-rtmutex-Fix-PI-chain-order-integrity.patch |    3 +-
 ...racing-Increase-tracing-map-KEYS_MAX-size.patch |   25 +
 ...rocessor-Replace-racy-task-affinity-logic.patch |    3 +-
 ...rt_mutex_futex_unlock-out-from-under-hb-l.patch |    2 +-
 .../all/rt/0008-mm-Adjust-system_state-check.patch |    3 +-
 .../0008-rtmutex-Fix-more-prio-comparisons.patch   |    3 +-
 ...Break-out-hist-trigger-assignment-parsing.patch |   92 +
 ...req-ia64-Replace-racy-task-affinity-logic.patch |    3 +-
 ...-cpufreq-pasemi-Adjust-system_state-check.patch |    3 +-
 ...x-rt_mutex-Introduce-rt_mutex_init_waiter.patch |    2 +-
 ...g-preempt-count-leak-in-rt_mutex_futex_un.patch |    3 +-
 ...ing-Make-traceprobe-parsing-code-reusable.patch |  318 ++++
 ...ufreq-sh-Replace-racy-task-affinity-logic.patch |    3 +-
 ...tex-Restructure-rt_mutex_finish_proxy_loc.patch |    2 +-
 ...010-iommu-vt-d-Adjust-system_state-checks.patch |    3 +-
 ...10-tracing-Add-NO_DISCARD-event-file-flag.patch |  106 ++
 ...parc-us3-Replace-racy-task-affinity-logic.patch |    3 +-
 ...k-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch |    2 +-
 ...-post-trigger-flag-to-hist-trigger-comman.patch |   29 +
 .../rt/0012-async-Adjust-system_state-checks.patch |    3 +-
 ...arc-us2e-Replace-racy-task-affinity-logic.patch |    3 +-
 .../0012-futex-Futex_unlock_pi-determinism.patch   |    2 +-
 ...racing-Add-hist-trigger-timestamp-support.patch |  232 +++
 ...rypto-N2-Replace-racy-task-affinity-logic.patch |    3 +-
 .../0013-extable-Adjust-system_state-checks.patch  |    3 +-
 ...-hb-lock-before-enqueueing-on-the-rtmutex.patch |    2 +-
 ...-per-element-variable-support-to-tracing_.patch |  233 +++
 .../0014-printk-Adjust-system_state-checks.patch   |    3 +-
 ...racing-Add-hist_data-member-to-hist_field.patch |   79 +
 ...0015-mm-vmscan-Adjust-system_state-checks.patch |    3 +-
 ...-usecs-modifier-for-hist-trigger-timestam.patch |  131 ++
 ...16-init-Introduce-SYSTEM_SCHEDULING-state.patch |    3 +-
 ...ing-Add-variable-support-to-hist-triggers.patch |  692 ++++++++
 ...Enable-might_sleep-and-smp_processor_id-c.patch |    3 +-
 ...ount-for-variables-in-named-trigger-compa.patch |   43 +
 ...-simple-expression-support-to-hist-trigge.patch |  603 +++++++
 ...-variable-reference-handling-to-hist-trig.patch | 1123 ++++++++++++
 ...acing-Add-support-for-dynamic-tracepoints.patch |  196 +++
 ...0021-tracing-Add-hist-trigger-action-hook.patch |  228 +++
 ...-tracing-Add-support-for-synthetic-events.patch |  822 +++++++++
 ...g-Add-onmatch-hist-trigger-action-support.patch | 1269 ++++++++++++++
 ...ing-Add-onmax-hist-trigger-action-support.patch |  456 +++++
 ...ow-whitespace-to-surround-hist-trigger-fi.patch |   58 +
 ...e-duplicate-count-from-tracing_map-availa.patch |  125 ++
 ...7-tracing-Add-cpu-field-for-hist-triggers.patch |  133 ++
 ...-hist-trigger-support-for-variable-refere.patch |  106 ++
 ...-last-error-error-facility-for-hist-trigg.patch |  500 ++++++
 ...dd-inter-event-hist-trigger-Documentation.patch |  403 +++++
 ...tracing-Make-tracing_set_clock-non-static.patch |   40 +
 ...g-Add-a-clock-attribute-for-hist-triggers.patch |  116 ++
 ...irq-in-translation-section-permission-fau.patch |    2 +-
 ...UFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch |    3 +-
 ...CK-printk-drop-the-logbuf_lock-more-often.patch |    2 +-
 ...64-downgrade-preempt_disable-d-region-to-.patch |    2 +-
 ...lapic-mark-LAPIC-timer-handler-as-irqsafe.patch |    2 +-
 ...NFSv4-replace-seqcount_t-with-a-seqlock_t.patch |    2 +-
 ...om-invalidate-batched-entropy-after-crng-.patch |  162 --
 ...vert-acpi_gbl_hardware-lock-back-to-a-raw.patch |    2 +-
 .../features/all/rt/add_migrate_disable.patch      |    2 +-
 .../rt/arch-arm64-Add-lazy-preempt-support.patch   |    2 +-
 ...t-remove-irq-handler-when-clock-is-unused.patch |    2 +-
 ...-at91-tclib-default-to-tclib-timer-for-rt.patch |    2 +-
 .../all/rt/arm-convert-boot-lock-to-raw.patch      |    2 +-
 .../all/rt/arm-enable-highmem-for-rt.patch         |    2 +-
 .../all/rt/arm-highmem-flush-tlb-on-unmap.patch    |    2 +-
 .../rt/arm-include-definition-for-cpumask_t.patch  |    2 +-
 ...arm-kprobe-replace-patch_lock-to-raw-lock.patch |    2 +-
 .../features/all/rt/arm-preempt-lazy-support.patch |    2 +-
 .../features/all/rt/arm-unwind-use_raw_lock.patch  |    2 +-
 ...pufeature-don-t-use-mutex-in-bringup-path.patch |  170 ++
 .../rt/arm64-xen--Make-XEN-depend-on-non-rt.patch  |    2 +-
 .../all/rt/at91_dont_enable_disable_clock.patch    |    2 +-
 .../all/rt/ata-disable-interrupts-if-non-rt.patch  |    2 +-
 .../features/all/rt/block-blk-mq-use-swait.patch   |    2 +-
 .../block-mq-don-t-complete-requests-via-IPI.patch |    2 +-
 .../all/rt/block-mq-drop-preempt-disable.patch     |    2 +-
 .../features/all/rt/block-mq-use-cpu_light.patch   |    2 +-
 .../block-shorten-interrupt-disabled-regions.patch |    2 +-
 .../features/all/rt/block-use-cpu-chill.patch      |    2 +-
 .../all/rt/bug-rt-dependend-variants.patch         |    2 +-
 ...ps-scheduling-while-atomic-in-cgroup-code.patch |    2 +-
 .../cgroups-use-simple-wait-in-css_release.patch   |    2 +-
 ...-random-don-t-print-that-the-init-is-done.patch |   12 +-
 ...-drivers-timer-atmel-pit-fix-double-free_.patch |    2 +-
 ...clocksource-tclib-allow-higher-clockrates.patch |    2 +-
 .../all/rt/completion-use-simple-wait-queues.patch |    6 +-
 .../all/rt/cond-resched-lock-rt-tweak.patch        |    2 +-
 .../features/all/rt/cond-resched-softirq-rt.patch  |    4 +-
 ...n_proc-Protect-send_msg-with-a-local-lock.patch |    2 +-
 ...g-Document-why-PREEMPT_RT-uses-a-spinlock.patch |    2 +-
 ...ke-hotplug-lock-a-sleeping-spinlock-on-rt.patch |    2 +-
 .../features/all/rt/cpu-rt-rework-cpu-down.patch   |    8 +-
 ...l-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch |   14 +-
 .../all/rt/cpu_down_move_migrate_enable_back.patch |    2 +-
 ...req-drop-K8-s-driver-from-beeing-selected.patch |    2 +-
 .../all/rt/cpumask-disable-offstack-on-rt.patch    |    2 +-
 ...t-Convert-callback_lock-to-raw_spinlock_t.patch |    2 +-
 ...educe-preempt-disabled-regions-more-algos.patch |    2 +-
 .../patches/features/all/rt/debugobjects-rt.patch  |    2 +-
 .../all/rt/delayacct-use-raw_spinlocks.patch       |    3 +-
 .../patches/features/all/rt/dm-make-rt-aware.patch |    2 +-
 ...ck-zram-Replace-bit-spinlocks-with-rtmute.patch |    2 +-
 .../rt/drivers-net-8139-disable-irq-nosync.patch   |    2 +-
 .../rt/drivers-net-vortex-fix-locking-issues.patch |    2 +-
 ...ers-random-reduce-preempt-disabled-region.patch |    2 +-
 .../all/rt/drivers-tty-fix-omap-lock-crap.patch    |    2 +-
 .../rt/drivers-tty-pl011-irq-disable-madness.patch |    2 +-
 ...m-Don-t-disable-preemption-in-zcomp_strea.patch |    2 +-
 ...15-drop-trace_i915_gem_ring_dispatch-onrt.patch |    2 +-
 .../rt/drm-i915-init-spinlock-properly-on-RT.patch |    2 +-
 ...ock_irq()_in_intel_pipe_update_startend().patch |    8 +-
 ...empt_disableenable_rt()_where_recommended.patch |    2 +-
 .../features/all/rt/epoll-use-get-cpu-light.patch  |    2 +-
 .../all/rt/fs-aio-simple-simple-work.patch         |    2 +-
 .../features/all/rt/fs-block-rt-support.patch      |    2 +-
 .../rt/fs-dcache-init-in_lookup_hashtable.patch    |    2 +-
 .../fs-dcache-use-cpu-chill-in-trylock-loops.patch |    2 +-
 ...ache-use-swait_queue-instead-of-waitqueue.patch |    2 +-
 .../all/rt/fs-jbd-replace-bh_state-lock.patch      |    2 +-
 ...bd2-pull-your-plug-when-waiting-for-space.patch |    2 +-
 .../all/rt/fs-namespace-preemption-fix.patch       |    2 +-
 .../fs-nfs-turn-rmdir_sem-into-a-semaphore.patch   |    2 +-
 .../all/rt/fs-ntfs-disable-interrupt-non-rt.patch  |    2 +-
 .../rt/fs-replace-bh_uptodate_lock-for-rt.patch    |    2 +-
 .../all/rt/ftrace-Fix-trace-header-alignment.patch |    6 +-
 .../all/rt/ftrace-migrate-disable-tracing.patch    |    6 +-
 ...e-lock-unlock-symetry-versus-pi_lock-and-.patch |    2 +-
 .../features/all/rt/futex-requeue-pi-fix.patch     |    2 +-
 ...-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch |    2 +-
 ...tex-rtmutex-Cure-RT-double-blocking-issue.patch |    7 +-
 ...round-migrate_disable-enable-in-different.patch |    2 +-
 .../all/rt/genirq-disable-irqpoll-on-rt.patch      |    2 +-
 ...ot-invoke-the-affinity-callback-via-a-wor.patch |    2 +-
 .../features/all/rt/genirq-force-threading.patch   |    2 +-
 ...pdate-irq_set_irqchip_state-documentation.patch |    4 +-
 ...-set_cpus_allowed_ptr-in-sync_unplug_thre.patch |    2 +-
 .../all/rt/hotplug-light-get-online-cpus.patch     |    2 +-
 ...lug-sync_unplug-no-27-5cn-27-in-task-name.patch |    2 +-
 .../all/rt/hotplug-use-migrate-disable.patch       |    2 +-
 ...-Move-schedule_work-call-to-helper-thread.patch |    6 +-
 .../all/rt/hrtimer-enfore-64byte-alignment.patch   |    4 +-
 ...up-hrtimer-callback-changes-for-preempt-r.patch |   48 +-
 .../all/rt/hrtimers-prepare-full-preemption.patch  |   16 +-
 ...warning-from-i915-when-running-on-PREEMPT.patch |    4 +-
 .../all/rt/ide-use-nort-local-irq-variants.patch   |    2 +-
 .../rt/infiniband-mellanox-ib-use-nort-irq.patch   |    2 +-
 .../all/rt/inpt-gameport-use-local-irq-nort.patch  |    2 +-
 .../all/rt/iommu-amd--Use-WARN_ON_NORT.patch       |    2 +-
 ...don-t-disable-preempt-around-this_cpu_ptr.patch |    2 +-
 ...don-t-disable-preemption-while-accessing-.patch |    2 +-
 ...-softirq-processing-in-irq-thread-context.patch |    4 +-
 ...irqwork-Move-irq-safe-work-to-irq-context.patch |    2 +-
 ...qwork-push_most_work_into_softirq_context.patch |    2 +-
 debian/patches/features/all/rt/jump-label-rt.patch |    2 +-
 .../all/rt/kconfig-disable-a-few-options-rt.patch  |    2 +-
 .../features/all/rt/kconfig-preempt-rt-full.patch  |    2 +-
 .../kernel-SRCU-provide-a-static-initializer.patch |    2 +-
 ...fix-cpu-down-problem-if-kthread-s-cpu-is-.patch |    2 +-
 ...plug-restore-original-cpu-mask-oncpu-down.patch |    2 +-
 ...cking-use-an-exclusive-wait_q-for-sleeper.patch |  142 ++
 ...-mark-perf_cpu_context-s-timer-as-irqsafe.patch |    2 +-
 ...tk-Don-t-try-to-print-from-IRQ-NMI-region.patch |    2 +-
 ...d-Provide-a-pointer-to-the-valid-CPU-mask.patch |    3 +-
 ...d-move-stack-kprobe-clean-up-to-__put_tas.patch |    2 +-
 .../rt/kernel-softirq-unlock-with-irqs-on.patch    |    2 +-
 .../features/all/rt/kgb-serial-hackaround.patch    |    2 +-
 debian/patches/features/all/rt/latency-hist.patch  | 1819 --------------------
 .../latency_hist-update-sched_wakeup-probe.patch   |   41 -
 .../all/rt/latencyhist-disable-jump-labels.patch   |   62 -
 .../leds-trigger-disable-CPU-trigger-on-RT.patch   |    2 +-
 .../rt/list_bl-fixup-bogus-lockdep-warning.patch   |    2 +-
 .../list_bl.h-make-list-head-locking-RT-safe.patch |    2 +-
 .../all/rt/local-irq-rt-depending-variants.patch   |    2 +-
 .../all/rt/locallock-add-local_lock_on.patch       |    2 +-
 debian/patches/features/all/rt/localversion.patch  |    4 +-
 ...-compilation-error-for-CONFIG_MODULES-and.patch |    2 +-
 .../rt/lockdep-Fix-per-cpu-static-objects.patch    |    3 +-
 ...dle-statically-initialized-PER_CPU-locks-.patch |    2 +-
 .../rt/lockdep-no-softirq-accounting-on-rt.patch   |    2 +-
 ...ftest-fix-warnings-due-to-missing-PREEMPT.patch |    2 +-
 ...-do-hardirq-context-test-for-raw-spinlock.patch |    2 +-
 ...ktorture-Do-NOT-include-rwlock.h-directly.patch |    2 +-
 .../features/all/rt/md-disable-bcache.patch        |    2 +-
 .../all/rt/md-raid5-percpu-handling-rt-aware.patch |    2 +-
 .../all/rt/mips-disable-highmem-on-rt.patch        |    2 +-
 .../mm--rt--Fix-generic-kmap_atomic-for-RT.patch   |    2 +-
 ...dev-don-t-disable-IRQs-in-wb_congested_pu.patch |    2 +-
 .../all/rt/mm-bounce-local-irq-save-nort.patch     |    2 +-
 .../all/rt/mm-convert-swap-to-percpu-locked.patch  |    2 +-
 .../features/all/rt/mm-disable-sloub-rt.patch      |    2 +-
 .../patches/features/all/rt/mm-enable-slub.patch   |    2 +-
 .../features/all/rt/mm-make-vmstat-rt-aware.patch  |    2 +-
 ...ol-Don-t-call-schedule_work_on-in-preempt.patch |    2 +-
 .../all/rt/mm-memcontrol-do_not_disable_irq.patch  |    2 +-
 ...ol-mem_cgroup_migrate-replace-another-loc.patch |    2 +-
 ...m-page-alloc-use-local-lock-on-target-cpu.patch |    2 +-
 ...m-page_alloc-reduce-lock-sections-further.patch |    2 +-
 .../mm-page_alloc-rt-friendly-per-cpu-pages.patch  |    2 +-
 .../rt/mm-perform-lru_add_drain_all-remotely.patch |    2 +-
 .../all/rt/mm-protect-activate-switch-mm.patch     |    4 +-
 .../all/rt/mm-rt-kmap-atomic-scheduling.patch      |    4 +-
 .../mm-scatterlist-dont-disable-irqs-on-RT.patch   |    2 +-
 ...-t-disable-preemption-while-taking-the-pe.patch |   46 +
 .../all/rt/mm-vmalloc-use-get-cpu-light.patch      |    2 +-
 ...et-do-not-protect-workingset_shadow_nodes.patch |    2 +-
 ...smalloc_copy_with_get_cpu_var_and_locking.patch |    2 +-
 .../all/rt/mmci-remove-bogus-irq-save.patch        |    2 +-
 .../all/rt/move_sched_delayed_work_to_helper.patch |    2 +-
 .../features/all/rt/mutex-no-spin-on-rt.patch      |    2 +-
 ...napi_schedule_irqoff-disable-interrupts-o.patch |    2 +-
 .../net-Qdisc-use-a-seqlock-instead-seqcount.patch |    2 +-
 .../all/rt/net-add-a-lock-around-icmp_sk.patch     |    8 +-
 ...k-the-missing-serialization-in-ip_send_un.patch |    2 +-
 ...r-local-irq-disable-alloc-atomic-headache.patch |    2 +-
 ...cpuhotplug-drain-input_pkt_queue-lockless.patch |    2 +-
 ...otect-users-of-napi_alloc_cache-against-r.patch |    2 +-
 ...move-explicit-do_softirq-from-busy_poll_s.patch |    2 +-
 ...ays-take-qdisc-s-busylock-in-__dev_xmit_s.patch |    2 +-
 ...-iptable-xt-write-recseq-begin-rt-fallout.patch |    2 +-
 .../rt/net-make-devnet_rename_seq-a-mutex.patch    |    2 +-
 ...xmit_recursion-to-per-task-variable-on-RT.patch |    4 +-
 .../all/rt/net-prevent-abba-deadlock.patch         |    2 +-
 ...-a-way-to-delegate-processing-a-softirq-t.patch |    2 +-
 ...ev_deactivate_many-use-msleep-1-instead-o.patch |    2 +-
 .../features/all/rt/net-use-cpu-chill.patch        |    2 +-
 .../features/all/rt/net-wireless-warn-nort.patch   |    4 +-
 .../all/rt/net_disable_NET_RX_BUSY_POLL.patch      |    2 +-
 .../features/all/rt/oleg-signal-rt-fix.patch       |    6 +-
 .../all/rt/panic-disable-random-on-rt.patch        |    2 +-
 ...troduce-rcu-bh-qs-where-safe-from-softirq.patch |    2 +-
 .../features/all/rt/percpu_ida-use-locklocks.patch |    2 +-
 .../all/rt/perf-make-swevent-hrtimer-irqsafe.patch |    2 +-
 .../features/all/rt/peter_zijlstra-frob-rcu.patch  |    2 +-
 .../features/all/rt/peterz-percpu-rwsem-rt.patch   |    2 +-
 .../features/all/rt/peterz-srcu-crypto-chain.patch |    2 +-
 .../features/all/rt/pid.h-include-atomic.h.patch   |    2 +-
 debian/patches/features/all/rt/ping-sysrq.patch    |    2 +-
 .../all/rt/posix-timers-no-broadcast.patch         |    2 +-
 ...osix-timers-thread-posix-cpu-timers-on-rt.patch |    4 +-
 .../all/rt/power-disable-highmem-on-rt.patch       |    2 +-
 .../all/rt/power-use-generic-rwsem-on-rt.patch     |    2 +-
 ...-Disable-in-kernel-MPIC-emulation-for-PRE.patch |    2 +-
 .../all/rt/powerpc-preempt-lazy-support.patch      |    2 +-
 ...-device-init.c-adapt-to-completions-using.patch |    2 +-
 .../features/all/rt/preempt-lazy-support.patch     |   32 +-
 .../features/all/rt/preempt-nort-rt-variants.patch |    2 +-
 ...intk-27-boot-param-to-help-with-debugging.patch |    2 +-
 debian/patches/features/all/rt/printk-kill.patch   |    2 +-
 .../patches/features/all/rt/printk-rt-aware.patch  |    2 +-
 .../ptrace-fix-ptrace-vs-tasklist_lock-race.patch  |   10 +-
 .../all/rt/radix-tree-use-local-locks.patch        |    2 +-
 .../random-avoid-preempt_disable-ed-section.patch  |   27 +-
 .../all/rt/random-make-it-work-on-rt.patch         |    4 +-
 .../rbtree-include-rcu.h-because-we-use-it.patch   |    2 +-
 ...Eliminate-softirq-processing-from-rcutree.patch |    2 +-
 .../all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch  |    2 +-
 ...e-rcu_normal_after_boot-by-default-for-RT.patch |    2 +-
 .../all/rt/rcu-make-RCU_BOOST-default-on-RT.patch  |    2 +-
 .../rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch |    2 +-
 ..._bh_qs-disable-irq-while-calling-rcu_pree.patch |    2 +-
 ...-migrate_disable-race-with-cpu-hotplug-3f.patch |    2 +-
 ...t_full-arm-coredump-fails-for-cpu-3e-3d-4.patch |    2 +-
 ...ping-function-called-from-invalid-context.patch |    2 +-
 .../patches/features/all/rt/rt-add-rt-locks.patch  |    2 +-
 .../features/all/rt/rt-introduce-cpu-chill.patch   |    4 +-
 .../features/all/rt/rt-local-irq-lock.patch        |    2 +-
 ...cking-Reenable-migration-accross-schedule.patch |   10 +-
 .../features/all/rt/rt-preempt-base-config.patch   |    2 +-
 .../features/all/rt/rt-serial-warn-fix.patch       |    2 +-
 ...x--Handle-non-enqueued-waiters-gracefully.patch |    2 +-
 .../all/rt/rtmutex-Fix-lock-stealing-logic.patch   |  162 ++
 .../all/rt/rtmutex-Make-lock_killable-work.patch   |    2 +-
 .../all/rt/rtmutex-Provide-locked-slowpath.patch   |   12 +-
 .../rt/rtmutex-Provide-rt_mutex_lock_state.patch   |    8 +-
 .../rt/rtmutex-add-a-first-shot-of-ww_mutex.patch  |   30 +-
 .../all/rt/rtmutex-avoid-include-hell.patch        |    2 +-
 .../features/all/rt/rtmutex-futex-prepare-rt.patch |    2 +-
 .../features/all/rt/rtmutex-lock-killable.patch    |    2 +-
 .../all/rt/rtmutex-trylock-is-okay-on-RT.patch     |    2 +-
 .../features/all/rt/rtmutex_dont_include_rcu.patch |    2 +-
 .../rwsem-rt-Lift-single-reader-restriction.patch  |    2 +-
 .../rt/rxrpc-remove-unused-static-variables.patch  |    2 +-
 ...i-dont-t-disable-interrupts-in-qc_issue-h.patch |    2 +-
 ...nt-task-state-corruption-by-spurious-lock.patch |    2 +-
 .../features/all/rt/sched-Remove-TASK_ALL.patch    |    2 +-
 ...-deadline-dl_task_timer-has-to-be-irqsafe.patch |    2 +-
 .../features/all/rt/sched-delay-put-task.patch     |    4 +-
 .../rt/sched-disable-rt-group-sched-on-rt.patch    |    2 +-
 .../features/all/rt/sched-disable-ttwu-queue.patch |    2 +-
 .../features/all/rt/sched-limit-nr-migrate.patch   |    2 +-
 ...ched-might-sleep-do-not-account-rcu-depth.patch |    2 +-
 .../features/all/rt/sched-mmdrop-delayed.patch     |    2 +-
 .../features/all/rt/sched-rt-mutex-wakeup.patch    |    4 +-
 ...hed-ttwu-ensure-success-return-is-correct.patch |    2 +-
 ...ueue-Only-wake-up-idle-workers-if-not-blo.patch |    2 +-
 .../features/all/rt/scsi-fcoe-rt-aware.patch       |    2 +-
 ...ping-function-called-from-invalid-context.patch |    2 +-
 .../all/rt/seqlock-prevent-rt-starvation.patch     |    2 +-
 .../all/rt/signal-fix-up-rcu-wreckage.patch        |    6 +-
 .../rt/signal-revert-ptrace-preempt-magic.patch    |    4 +-
 ...low-rt-tasks-to-cache-one-sigqueue-struct.patch |   16 +-
 .../features/all/rt/skbufhead-raw-lock.patch       |    2 +-
 .../all/rt/slub-disable-SLUB_CPU_PARTIAL.patch     |    2 +-
 .../all/rt/slub-enable-irqs-for-no-wait.patch      |    2 +-
 ...-snd_pcm_stream_lock-irqs_disabled-splats.patch |    2 +-
 .../rt/softirq-disable-softirq-stacks-for-rt.patch |    2 +-
 .../features/all/rt/softirq-preempt-fix-3-re.patch |    2 +-
 .../features/all/rt/softirq-split-locks.patch      |    6 +-
 ...irq-split-timer-softirqs-out-of-ksoftirqd.patch |    2 +-
 .../softirq-wake-the-timer-softirq-if-needed.patch |    2 +-
 .../sparc64-use-generic-rwsem-spinlocks-rt.patch   |    2 +-
 .../all/rt/spinlock-types-separate-raw.patch       |    2 +-
 .../features/all/rt/stop-machine-raw-lock.patch    |    2 +-
 ...ne-convert-stop_machine_run-to-PREEMPT_RT.patch |    2 +-
 ...ake-svc_xprt_do_enqueue-use-get_cpu_light.patch |    2 +-
 .../rt/suspend-prevernt-might-sleep-splats.patch   |    2 +-
 .../features/all/rt/sysfs-realtime-entry.patch     |    2 +-
 ...klets-from-going-into-infinite-spin-in-rt.patch |    2 +-
 .../thermal-Defer-thermal-wakups-to-threads.patch  |    2 +-
 .../rt/tick-broadcast--Make-hrtimer-irqsafe.patch  |    2 +-
 .../all/rt/timekeeping-split-jiffies-lock.patch    |    4 +-
 ...delay-waking-softirqs-from-the-jiffy-tick.patch |    2 +-
 .../features/all/rt/timer-fd-avoid-live-lock.patch |    2 +-
 ...rtimer-check-properly-for-a-running-timer.patch |    4 +-
 .../all/rt/timer-make-the-base-lock-raw.patch      |    2 +-
 .../rt/timers-prepare-for-full-preemption.patch    |    2 +-
 ...cy-hist-Consider-new-argument-when-probin.patch |   38 -
 ...e_version_for_preemptoff_hist_trace_point.patch |   91 -
 ...count-for-preempt-off-in-preempt_schedule.patch |    2 +-
 ...l-8250-don-t-take-the-trylock-during-oops.patch |    2 +-
 ...t-remove-preemption-disabling-in-netif_rx.patch |    2 +-
 .../all/rt/usb-use-_nort-in-giveback.patch         |    2 +-
 .../features/all/rt/user-use-local-irq-nort.patch  |    2 +-
 .../features/all/rt/wait.h-include-atomic.h.patch  |    2 +-
 ...ue-work-around-irqsafe-timer-optimization.patch |    2 +-
 ...rk-simple-Simple-work-queue-implemenation.patch |    2 +-
 .../all/rt/workqueue-distangle-from-rq-lock.patch  |   14 +-
 .../all/rt/workqueue-prevent-deadlock-stall.patch  |    6 +-
 .../features/all/rt/workqueue-use-locallock.patch  |    2 +-
 .../features/all/rt/workqueue-use-rcu.patch        |    2 +-
 .../all/rt/x86-UV-raw_spinlock-conversion.patch    |    2 +-
 ...86-crypto-reduce-preempt-disabled-regions.patch |    2 +-
 .../x86-highmem-add-a-already-used-pte-check.patch |    2 +-
 .../all/rt/x86-io-apic-migra-no-unmask.patch       |    2 +-
 .../all/rt/x86-kvm-require-const-tsc-for-rt.patch  |    4 +-
 .../features/all/rt/x86-mce-timer-hrtimer.patch    |    2 +-
 .../x86-mce-use-swait-queue-for-mce-wakeups.patch  |    2 +-
 .../patches/features/all/rt/x86-preempt-lazy.patch |    2 +-
 ...x86-signal-delay-calling-signals-on-32bit.patch |    2 +-
 .../all/rt/x86-stackprot-no-random-on-rt.patch     |    2 +-
 .../all/rt/x86-use-gen-rwsem-spinlocks-rt.patch    |    2 +-
 debian/patches/series-rt                           |   47 +-
 391 files changed, 10399 insertions(+), 2773 deletions(-)

diff --git a/debian/changelog b/debian/changelog
index 5508116..6834557 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -235,6 +235,7 @@ linux (4.11.9-1) UNRELEASED; urgency=medium
 
   [ Salvatore Bonaccorso ]
   * Bump ABI to 2
+  * [rt] Update to 4.11.8-rt5
 
  -- Ben Hutchings <ben at decadent.org.uk>  Tue, 20 Jun 2017 19:18:44 +0100
 
diff --git a/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch b/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch
index 171dd5e..7a6bdc6 100644
--- a/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch
+++ b/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch
@@ -1,8 +1,7 @@
-From 97181f9bd57405b879403763284537e27d46963d Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 10 Apr 2017 18:03:36 +0200
 Subject: [PATCH 1/4] futex: Avoid freeing an active timer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Alexander reported a hrtimer debug_object splat:
 
diff --git a/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch b/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
index 4c58bbd..9a246ea 100644
--- a/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
+++ b/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:48 +0100
 Subject: [PATCH] futex: Cleanup variable names for futex_top_waiter()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit 499f5aca2cdd5e958b27e2655e7e7f82524f46b1
 
diff --git a/debian/patches/features/all/rt/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch b/debian/patches/features/all/rt/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch
index d192b76..f628f23 100644
--- a/debian/patches/features/all/rt/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch
+++ b/debian/patches/features/all/rt/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch
@@ -1,8 +1,7 @@
-From 048c9b954e20396e0c45ee778466994d1be2e612 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 12 Apr 2017 22:07:27 +0200
 Subject: [PATCH 01/13] ia64/topology: Remove cpus_allowed manipulation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The CPU hotplug callback fiddles with the cpus_allowed pointer to pin the
 calling thread on the plugged CPU. That's already guaranteed by the hotplug
diff --git a/debian/patches/features/all/rt/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch b/debian/patches/features/all/rt/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch
index aecafe3..c432123 100644
--- a/debian/patches/features/all/rt/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch
+++ b/debian/patches/features/all/rt/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch
@@ -1,8 +1,7 @@
-From 8fb12156b8db61af3d49f3e5e104568494581d1f Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:32 +0200
 Subject: [PATCH 01/17] init: Pin init task to the boot CPU, initially
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Some of the boot code in init_kernel_freeable() which runs before SMP
 bringup assumes (rightfully) that it runs on the boot CPU and therefore can
diff --git a/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch b/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
index c88ea6d..ace9620 100644
--- a/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
+++ b/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
@@ -1,8 +1,7 @@
-From 2a1c6029940675abb2217b590512dbf691867ec4 Mon Sep 17 00:00:00 2001
 From: Xunlei Pang <xlpang at redhat.com>
 Date: Thu, 23 Mar 2017 15:56:07 +0100
 Subject: [PATCH 1/9] rtmutex: Deboost before waking up the top waiter
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 We should deboost before waking the high-priority task, such that we
 don't run two tasks with the same "state" (priority, deadline,
diff --git a/debian/patches/features/all/rt/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch b/debian/patches/features/all/rt/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch
index 3a4f7b4..da56a44 100644
--- a/debian/patches/features/all/rt/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch
+++ b/debian/patches/features/all/rt/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch
@@ -1,9 +1,8 @@
-From 45aea321678856687927c53972321ebfab77759a Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 24 May 2017 08:52:02 +0200
 Subject: [PATCH] sched/clock: Fix early boot preempt assumption in
  __set_sched_clock_stable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The more strict early boot preemption warnings found that
 __set_sched_clock_stable() was incorrectly assuming we'd still be
diff --git a/debian/patches/features/all/rt/0001-tracing-Add-hist_field_name-accessor.patch b/debian/patches/features/all/rt/0001-tracing-Add-hist_field_name-accessor.patch
new file mode 100644
index 0000000..7643fd0
--- /dev/null
+++ b/debian/patches/features/all/rt/0001-tracing-Add-hist_field_name-accessor.patch
@@ -0,0 +1,176 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:02 -0500
+Subject: [PATCH 01/32] tracing: Add hist_field_name() accessor
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+In preparation for hist_fields that won't be strictly based on
+trace_event_fields, add a new hist_field_name() accessor to allow that
+flexibility and update associated users.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |   68 ++++++++++++++++++++++++++-------------
+ 1 file changed, 46 insertions(+), 22 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -146,6 +146,23 @@ struct hist_trigger_data {
+ 	struct tracing_map		*map;
+ };
+ 
++static const char *hist_field_name(struct hist_field *field,
++				   unsigned int level)
++{
++	const char *field_name = "";
++
++	if (level > 1)
++		return field_name;
++
++	if (field->field)
++		field_name = field->field->name;
++
++	if (field_name == NULL)
++		field_name = "";
++
++	return field_name;
++}
++
+ static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
+ {
+ 	hist_field_fn_t fn = NULL;
+@@ -653,7 +670,6 @@ static int is_descending(const char *str
+ static int create_sort_keys(struct hist_trigger_data *hist_data)
+ {
+ 	char *fields_str = hist_data->attrs->sort_key_str;
+-	struct ftrace_event_field *field = NULL;
+ 	struct tracing_map_sort_key *sort_key;
+ 	int descending, ret = 0;
+ 	unsigned int i, j;
+@@ -670,7 +686,9 @@ static int create_sort_keys(struct hist_
+ 	}
+ 
+ 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
++		struct hist_field *hist_field;
+ 		char *field_str, *field_name;
++		const char *test_name;
+ 
+ 		sort_key = &hist_data->sort_keys[i];
+ 
+@@ -703,8 +721,11 @@ static int create_sort_keys(struct hist_
+ 		}
+ 
+ 		for (j = 1; j < hist_data->n_fields; j++) {
+-			field = hist_data->fields[j]->field;
+-			if (field && (strcmp(field_name, field->name) == 0)) {
++			hist_field = hist_data->fields[j];
++			test_name = hist_field_name(hist_field, 0);
++			if (test_name == NULL)
++				continue;
++			if (strcmp(field_name, test_name) == 0) {
+ 				sort_key->field_idx = j;
+ 				descending = is_descending(field_str);
+ 				if (descending < 0) {
+@@ -952,6 +973,7 @@ hist_trigger_entry_print(struct seq_file
+ 	struct hist_field *key_field;
+ 	char str[KSYM_SYMBOL_LEN];
+ 	bool multiline = false;
++	const char *field_name;
+ 	unsigned int i;
+ 	u64 uval;
+ 
+@@ -963,26 +985,27 @@ hist_trigger_entry_print(struct seq_file
+ 		if (i > hist_data->n_vals)
+ 			seq_puts(m, ", ");
+ 
++		field_name = hist_field_name(key_field, 0);
++
+ 		if (key_field->flags & HIST_FIELD_FL_HEX) {
+ 			uval = *(u64 *)(key + key_field->offset);
+-			seq_printf(m, "%s: %llx",
+-				   key_field->field->name, uval);
++			seq_printf(m, "%s: %llx", field_name, uval);
+ 		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
+ 			uval = *(u64 *)(key + key_field->offset);
+ 			sprint_symbol_no_offset(str, uval);
+-			seq_printf(m, "%s: [%llx] %-45s",
+-				   key_field->field->name, uval, str);
++			seq_printf(m, "%s: [%llx] %-45s", field_name,
++				   uval, str);
+ 		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
+ 			uval = *(u64 *)(key + key_field->offset);
+ 			sprint_symbol(str, uval);
+-			seq_printf(m, "%s: [%llx] %-55s",
+-				   key_field->field->name, uval, str);
++			seq_printf(m, "%s: [%llx] %-55s", field_name,
++				   uval, str);
+ 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
+ 			char *comm = elt->private_data;
+ 
+ 			uval = *(u64 *)(key + key_field->offset);
+-			seq_printf(m, "%s: %-16s[%10llu]",
+-				   key_field->field->name, comm, uval);
++			seq_printf(m, "%s: %-16s[%10llu]", field_name,
++				   comm, uval);
+ 		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
+ 			const char *syscall_name;
+ 
+@@ -991,8 +1014,8 @@ hist_trigger_entry_print(struct seq_file
+ 			if (!syscall_name)
+ 				syscall_name = "unknown_syscall";
+ 
+-			seq_printf(m, "%s: %-30s[%3llu]",
+-				   key_field->field->name, syscall_name, uval);
++			seq_printf(m, "%s: %-30s[%3llu]", field_name,
++				   syscall_name, uval);
+ 		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
+ 			seq_puts(m, "stacktrace:\n");
+ 			hist_trigger_stacktrace_print(m,
+@@ -1000,15 +1023,14 @@ hist_trigger_entry_print(struct seq_file
+ 						      HIST_STACKTRACE_DEPTH);
+ 			multiline = true;
+ 		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
+-			seq_printf(m, "%s: ~ 2^%-2llu", key_field->field->name,
++			seq_printf(m, "%s: ~ 2^%-2llu", field_name,
+ 				   *(u64 *)(key + key_field->offset));
+ 		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
+-			seq_printf(m, "%s: %-50s", key_field->field->name,
++			seq_printf(m, "%s: %-50s", field_name,
+ 				   (char *)(key + key_field->offset));
+ 		} else {
+ 			uval = *(u64 *)(key + key_field->offset);
+-			seq_printf(m, "%s: %10llu", key_field->field->name,
+-				   uval);
++			seq_printf(m, "%s: %10llu", field_name, uval);
+ 		}
+ 	}
+ 
+@@ -1021,13 +1043,13 @@ hist_trigger_entry_print(struct seq_file
+ 		   tracing_map_read_sum(elt, HITCOUNT_IDX));
+ 
+ 	for (i = 1; i < hist_data->n_vals; i++) {
++		field_name = hist_field_name(hist_data->fields[i], 0);
++
+ 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
+-			seq_printf(m, "  %s: %10llx",
+-				   hist_data->fields[i]->field->name,
++			seq_printf(m, "  %s: %10llx", field_name,
+ 				   tracing_map_read_sum(elt, i));
+ 		} else {
+-			seq_printf(m, "  %s: %10llu",
+-				   hist_data->fields[i]->field->name,
++			seq_printf(m, "  %s: %10llu", field_name,
+ 				   tracing_map_read_sum(elt, i));
+ 		}
+ 	}
+@@ -1142,7 +1164,9 @@ static const char *get_hist_field_flags(
+ 
+ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
+ {
+-	seq_printf(m, "%s", hist_field->field->name);
++	const char *field_name = hist_field_name(hist_field, 0);
++
++	seq_printf(m, "%s", field_name);
+ 	if (hist_field->flags) {
+ 		const char *flags_str = get_hist_field_flags(hist_field);
+ 
diff --git a/debian/patches/features/all/rt/0002-arm-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0002-arm-Adjust-system_state-check.patch
index 32bf4c4..ebd3484 100644
--- a/debian/patches/features/all/rt/0002-arm-Adjust-system_state-check.patch
+++ b/debian/patches/features/all/rt/0002-arm-Adjust-system_state-check.patch
@@ -1,8 +1,7 @@
-From 5976a66913a8bf42465d96776fd37fb5631edc19 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:33 +0200
 Subject: [PATCH 02/17] arm: Adjust system_state check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch b/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
index c2efd94..6211b64 100644
--- a/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
+++ b/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
@@ -1,8 +1,7 @@
-From 94ffac5d847cfd790bb37b7cef1cad803743985e Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 7 Apr 2017 09:04:07 +0200
 Subject: [PATCH 2/4] futex: Fix small (and harmless looking) inconsistencies
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 During (post-commit) review Darren spotted a few minor things. One
 (harmless AFAICT) type inconsistency and a comment that wasn't as
diff --git a/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch b/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
index 9720454..4d53263 100644
--- a/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
+++ b/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:49 +0100
 Subject: [PATCH] futex: Use smp_store_release() in mark_wake_futex()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit  1b367ece0d7e696cab1c8501bab282cc6a538b3f
 
diff --git a/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch b/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
index d234319..eec7585 100644
--- a/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
+++ b/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
@@ -1,8 +1,7 @@
-From e96a7705e7d3fef96aec9b590c63b2f6f7d2ba22 Mon Sep 17 00:00:00 2001
 From: Xunlei Pang <xlpang at redhat.com>
 Date: Thu, 23 Mar 2017 15:56:08 +0100
 Subject: [PATCH 2/9] sched/rtmutex/deadline: Fix a PI crash for deadline tasks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 A crash happened while I was playing with deadline PI rtmutex.
 
diff --git a/debian/patches/features/all/rt/0002-tracing-Reimplement-log2.patch b/debian/patches/features/all/rt/0002-tracing-Reimplement-log2.patch
new file mode 100644
index 0000000..00f17a5
--- /dev/null
+++ b/debian/patches/features/all/rt/0002-tracing-Reimplement-log2.patch
@@ -0,0 +1,115 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:03 -0500
+Subject: [PATCH 02/32] tracing: Reimplement log2
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+log2 as currently implemented applies only to u64 trace_event_field
+derived fields, and assumes that anything it's applied to is a u64
+field.
+
+To prepare for synthetic fields like latencies, log2 should be
+applicable to those as well, so take the opportunity now to fix the
+current problems as well as expand to more general uses.
+
+log2 should be thought of as a chaining function rather than a field
+type.  To enable this as well as possible future function
+implementations, add a hist_field operand array into the hist_field
+definition for this purpose, and make use of it to implement the log2
+'function'.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |   31 +++++++++++++++++++++++++++----
+ 1 file changed, 27 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -28,12 +28,16 @@ struct hist_field;
+ 
+ typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
+ 
++#define HIST_FIELD_OPERANDS_MAX	2
++
+ struct hist_field {
+ 	struct ftrace_event_field	*field;
+ 	unsigned long			flags;
+ 	hist_field_fn_t			fn;
+ 	unsigned int			size;
+ 	unsigned int			offset;
++	unsigned int                    is_signed;
++	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
+ };
+ 
+ static u64 hist_field_none(struct hist_field *field, void *event)
+@@ -71,7 +75,9 @@ static u64 hist_field_pstring(struct his
+ 
+ static u64 hist_field_log2(struct hist_field *hist_field, void *event)
+ {
+-	u64 val = *(u64 *)(event + hist_field->field->offset);
++	struct hist_field *operand = hist_field->operands[0];
++
++	u64 val = operand->fn(operand, event);
+ 
+ 	return (u64) ilog2(roundup_pow_of_two(val));
+ }
+@@ -156,6 +162,8 @@ static const char *hist_field_name(struc
+ 
+ 	if (field->field)
+ 		field_name = field->field->name;
++	else if (field->flags & HIST_FIELD_FL_LOG2)
++		field_name = hist_field_name(field->operands[0], ++level);
+ 
+ 	if (field_name == NULL)
+ 		field_name = "";
+@@ -357,8 +365,20 @@ static const struct tracing_map_ops hist
+ 	.elt_init	= hist_trigger_elt_comm_init,
+ };
+ 
+-static void destroy_hist_field(struct hist_field *hist_field)
++static void destroy_hist_field(struct hist_field *hist_field,
++			       unsigned int level)
+ {
++	unsigned int i;
++
++	if (level > 2)
++		return;
++
++	if (!hist_field)
++		return;
++
++	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
++		destroy_hist_field(hist_field->operands[i], ++level);
++
+ 	kfree(hist_field);
+ }
+ 
+@@ -385,7 +405,10 @@ static struct hist_field *create_hist_fi
+ 	}
+ 
+ 	if (flags & HIST_FIELD_FL_LOG2) {
++		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
+ 		hist_field->fn = hist_field_log2;
++		hist_field->operands[0] = create_hist_field(field, fl);
++		hist_field->size = hist_field->operands[0]->size;
+ 		goto out;
+ 	}
+ 
+@@ -405,7 +428,7 @@ static struct hist_field *create_hist_fi
+ 		hist_field->fn = select_value_fn(field->size,
+ 						 field->is_signed);
+ 		if (!hist_field->fn) {
+-			destroy_hist_field(hist_field);
++			destroy_hist_field(hist_field, 0);
+ 			return NULL;
+ 		}
+ 	}
+@@ -422,7 +445,7 @@ static void destroy_hist_fields(struct h
+ 
+ 	for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
+ 		if (hist_data->fields[i]) {
+-			destroy_hist_field(hist_data->fields[i]);
++			destroy_hist_field(hist_data->fields[i], 0);
+ 			hist_data->fields[i] = NULL;
+ 		}
+ 	}
diff --git a/debian/patches/features/all/rt/0002-workqueue-Provide-work_on_cpu_safe.patch b/debian/patches/features/all/rt/0002-workqueue-Provide-work_on_cpu_safe.patch
index a9e089d..55545da 100644
--- a/debian/patches/features/all/rt/0002-workqueue-Provide-work_on_cpu_safe.patch
+++ b/debian/patches/features/all/rt/0002-workqueue-Provide-work_on_cpu_safe.patch
@@ -1,8 +1,7 @@
-From 0e8d6a9336b487a1dd6f1991ff376e669d4c87c6 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 12 Apr 2017 22:07:28 +0200
 Subject: [PATCH 02/13] workqueue: Provide work_on_cpu_safe()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 work_on_cpu() is not protected against CPU hotplug. For code which requires
 to be either executed on an online CPU or to fail if the CPU is not
diff --git a/debian/patches/features/all/rt/0003-arm64-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0003-arm64-Adjust-system_state-check.patch
index 1567152..7a871cc 100644
--- a/debian/patches/features/all/rt/0003-arm64-Adjust-system_state-check.patch
+++ b/debian/patches/features/all/rt/0003-arm64-Adjust-system_state-check.patch
@@ -1,8 +1,7 @@
-From ef284f5ca5f102bf855e599305c0c16d6e844635 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:34 +0200
 Subject: [PATCH 03/17] arm64: Adjust system_state check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch b/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
index 276b7d3..b327e51 100644
--- a/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
+++ b/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
@@ -1,8 +1,7 @@
-From 38fcd06e9b7f6855db1f3ebac5e18b8fdb467ffd Mon Sep 17 00:00:00 2001
 From: "Darren Hart (VMware)" <dvhart at infradead.org>
 Date: Fri, 14 Apr 2017 15:31:38 -0700
 Subject: [PATCH 3/4] futex: Clarify mark_wake_futex memory barrier usage
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Clarify the scenario described in mark_wake_futex requiring the
 smp_store_release(). Update the comment to explicitly refer to the
diff --git a/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch b/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch
index 1cdf2e0..ef4b256 100644
--- a/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch
+++ b/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:50 +0100
 Subject: [PATCH] futex: Remove rt_mutex_deadlock_account_*()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit fffa954fb528963c2fb7b0c0084eb77e2be7ab52
 
diff --git a/debian/patches/features/all/rt/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch
index 812746a..0108f6a 100644
--- a/debian/patches/features/all/rt/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From 67cb85fdcee7fbc61c09c00360d1a4ae37641db4 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 12 Apr 2017 22:07:29 +0200
 Subject: [PATCH 03/13] ia64/salinfo: Replace racy task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Some of the file operations in /proc/sal require to run code on the
 requested cpu. This is achieved by temporarily setting the affinity of the
diff --git a/debian/patches/features/all/rt/0003-ring-buffer-Add-interface-for-setting-absolute-time-.patch b/debian/patches/features/all/rt/0003-ring-buffer-Add-interface-for-setting-absolute-time-.patch
new file mode 100644
index 0000000..7e3ee4c
--- /dev/null
+++ b/debian/patches/features/all/rt/0003-ring-buffer-Add-interface-for-setting-absolute-time-.patch
@@ -0,0 +1,115 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:04 -0500
+Subject: [PATCH 03/32] ring-buffer: Add interface for setting absolute time
+ stamps
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Define a new function, tracing_set_time_stamp_abs(), which can be used
+to enable or disable the use of absolute timestamps rather than time
+deltas for a trace array.
+
+This resets the buffer to prevent a mix of time deltas and absolute
+timestamps.
+
+Only the interface is added here; a subsequent patch will add the
+underlying implementation.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/ring_buffer.h |    2 ++
+ kernel/trace/ring_buffer.c  |   11 +++++++++++
+ kernel/trace/trace.c        |   25 ++++++++++++++++++++++++-
+ kernel/trace/trace.h        |    2 ++
+ 4 files changed, 39 insertions(+), 1 deletion(-)
+
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -180,6 +180,8 @@ void ring_buffer_normalize_time_stamp(st
+ 				      int cpu, u64 *ts);
+ void ring_buffer_set_clock(struct ring_buffer *buffer,
+ 			   u64 (*clock)(void));
++void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs);
++bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
+ 
+ size_t ring_buffer_page_len(void *page);
+ 
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -484,6 +484,7 @@ struct ring_buffer {
+ 	u64				(*clock)(void);
+ 
+ 	struct rb_irq_work		irq_work;
++	bool				time_stamp_abs;
+ };
+ 
+ struct ring_buffer_iter {
+@@ -1378,6 +1379,16 @@ void ring_buffer_set_clock(struct ring_b
+ 	buffer->clock = clock;
+ }
+ 
++void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs)
++{
++	buffer->time_stamp_abs = abs;
++}
++
++bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer)
++{
++	return buffer->time_stamp_abs;
++}
++
+ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
+ 
+ static inline unsigned long rb_page_entries(struct buffer_page *bpage)
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2082,7 +2082,7 @@ trace_event_buffer_lock_reserve(struct r
+ 
+ 	*current_rb = trace_file->tr->trace_buffer.buffer;
+ 
+-	if ((trace_file->flags &
++	if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
+ 	     (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
+ 	    (entry = this_cpu_read(trace_buffered_event))) {
+ 		/* Try to use the per cpu buffer first */
+@@ -5959,6 +5959,29 @@ static int tracing_clock_open(struct ino
+ 	return ret;
+ }
+ 
++int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
++{
++	mutex_lock(&trace_types_lock);
++
++	ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
++
++	/*
++	 * New timestamps may not be consistent with the previous setting.
++	 * Reset the buffer so that it doesn't have incomparable timestamps.
++	 */
++	tracing_reset_online_cpus(&tr->trace_buffer);
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
++		ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
++	tracing_reset_online_cpus(&tr->max_buffer);
++#endif
++
++	mutex_unlock(&trace_types_lock);
++
++	return 0;
++}
++
+ struct ftrace_buffer_info {
+ 	struct trace_iterator	iter;
+ 	void			*spare;
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -278,6 +278,8 @@ extern struct mutex trace_types_lock;
+ extern int trace_array_get(struct trace_array *tr);
+ extern void trace_array_put(struct trace_array *tr);
+ 
++extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
++
+ /*
+  * The global tracer (top) should be the first trace array added,
+  * but we check the flag anyway.
diff --git a/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch b/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
index cba5c56..70b8599 100644
--- a/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
+++ b/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
@@ -1,9 +1,8 @@
-From 85e2d4f992868ad78dc8bb2c077b652fcfb3661a Mon Sep 17 00:00:00 2001
 From: Xunlei Pang <xlpang at redhat.com>
 Date: Thu, 23 Mar 2017 15:56:09 +0100
 Subject: [PATCH 3/9] sched/deadline/rtmutex: Dont miss the
  dl_runtime/dl_period update
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Currently dl tasks will actually return at the very beginning
 of rt_mutex_adjust_prio_chain() in !detect_deadlock cases:
diff --git a/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch b/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
index e8568e5..38b2f66 100644
--- a/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
+++ b/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
@@ -1,8 +1,7 @@
-From 59cd42c29618c45cd3c56da43402b14f611888dd Mon Sep 17 00:00:00 2001
 From: "Darren Hart (VMware)" <dvhart at infradead.org>
 Date: Fri, 14 Apr 2017 15:46:08 -0700
 Subject: [PATCH 4/4] MAINTAINERS: Add FUTEX SUBSYSTEM
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Add a MAINTAINERS block for the FUTEX SUBSYSTEM which includes the core
 kernel code, include headers, testing code, and Documentation. Excludes
diff --git a/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch b/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
index ef04066..7f9af84 100644
--- a/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
+++ b/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:51 +0100
 Subject: [PATCH] futex,rt_mutex: Provide futex specific rt_mutex API
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit 5293c2efda37775346885c7e924d4ef7018ea60b
 
diff --git a/debian/patches/features/all/rt/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch
index e0b3249..0ba29a6 100644
--- a/debian/patches/features/all/rt/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From 9feb42ac88b516e378b9782e82b651ca5bed95c4 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 6 Apr 2017 14:56:18 +0200
 Subject: [PATCH 04/13] ia64/sn/hwperf: Replace racy task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 sn_hwperf_op_cpu() which is invoked from an ioctl requires to run code on
 the requested cpu. This is achieved by temporarily setting the affinity of
diff --git a/debian/patches/features/all/rt/0004-ring-buffer-Redefine-the-unimplemented-RINGBUF_TIME_.patch b/debian/patches/features/all/rt/0004-ring-buffer-Redefine-the-unimplemented-RINGBUF_TIME_.patch
new file mode 100644
index 0000000..d49c102
--- /dev/null
+++ b/debian/patches/features/all/rt/0004-ring-buffer-Redefine-the-unimplemented-RINGBUF_TIME_.patch
@@ -0,0 +1,331 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:05 -0500
+Subject: [PATCH 04/32] ring-buffer: Redefine the unimplemented
+ RINGBUF_TIME_TIME_STAMP
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+RINGBUF_TYPE_TIME_STAMP is defined but not used, and from what I can
+gather was reserved for something like an absolute timestamp feature
+for the ring buffer, if not a complete replacement of the current
+time_delta scheme.
+
+This code redefines RINGBUF_TYPE_TIME_STAMP to implement absolute time
+stamps.  Another way to look at it is that it essentially forces
+extended time_deltas for all events.
+
+The motivation for doing this is to enable time_deltas that aren't
+dependent on previous events in the ring buffer, making it feasible to
+use the ring_buffer_event timetamps in a more random-access way, for
+purposes other than serial event printing.
+
+To set/reset this mode, use tracing_set_timestamp_abs() from the
+previous interface patch.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/ring_buffer.h |   12 ++--
+ kernel/trace/ring_buffer.c  |  107 +++++++++++++++++++++++++++++++-------------
+ 2 files changed, 83 insertions(+), 36 deletions(-)
+
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -36,10 +36,12 @@ struct ring_buffer_event {
+  *				 array[0] = time delta (28 .. 59)
+  *				 size = 8 bytes
+  *
+- * @RINGBUF_TYPE_TIME_STAMP:	Sync time stamp with external clock
+- *				 array[0]    = tv_nsec
+- *				 array[1..2] = tv_sec
+- *				 size = 16 bytes
++ * @RINGBUF_TYPE_TIME_STAMP:	Absolute timestamp
++ *				 Same format as TIME_EXTEND except that the
++ *				 value is an absolute timestamp, not a delta
++ *				 event.time_delta contains bottom 27 bits
++ *				 array[0] = top (28 .. 59) bits
++ *				 size = 8 bytes
+  *
+  * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
+  *				Data record
+@@ -56,12 +58,12 @@ enum ring_buffer_type {
+ 	RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
+ 	RINGBUF_TYPE_PADDING,
+ 	RINGBUF_TYPE_TIME_EXTEND,
+-	/* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
+ 	RINGBUF_TYPE_TIME_STAMP,
+ };
+ 
+ unsigned ring_buffer_event_length(struct ring_buffer_event *event);
+ void *ring_buffer_event_data(struct ring_buffer_event *event);
++u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
+ 
+ /*
+  * ring_buffer_discard_commit will remove an event that has not
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -42,6 +42,8 @@ int ring_buffer_print_entry_header(struc
+ 			 RINGBUF_TYPE_PADDING);
+ 	trace_seq_printf(s, "\ttime_extend : type == %d\n",
+ 			 RINGBUF_TYPE_TIME_EXTEND);
++	trace_seq_printf(s, "\ttime_stamp : type == %d\n",
++			 RINGBUF_TYPE_TIME_STAMP);
+ 	trace_seq_printf(s, "\tdata max type_len  == %d\n",
+ 			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
+ 
+@@ -147,6 +149,9 @@ enum {
+ #define skip_time_extend(event) \
+ 	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
+ 
++#define extended_time(event) \
++	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
++
+ static inline int rb_null_event(struct ring_buffer_event *event)
+ {
+ 	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
+@@ -187,10 +192,8 @@ rb_event_length(struct ring_buffer_event
+ 		return  event->array[0] + RB_EVNT_HDR_SIZE;
+ 
+ 	case RINGBUF_TYPE_TIME_EXTEND:
+-		return RB_LEN_TIME_EXTEND;
+-
+ 	case RINGBUF_TYPE_TIME_STAMP:
+-		return RB_LEN_TIME_STAMP;
++		return RB_LEN_TIME_EXTEND;
+ 
+ 	case RINGBUF_TYPE_DATA:
+ 		return rb_event_data_length(event);
+@@ -210,7 +213,7 @@ rb_event_ts_length(struct ring_buffer_ev
+ {
+ 	unsigned len = 0;
+ 
+-	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
++	if (extended_time(event)) {
+ 		/* time extends include the data event after it */
+ 		len = RB_LEN_TIME_EXTEND;
+ 		event = skip_time_extend(event);
+@@ -232,7 +235,7 @@ unsigned ring_buffer_event_length(struct
+ {
+ 	unsigned length;
+ 
+-	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
++	if (extended_time(event))
+ 		event = skip_time_extend(event);
+ 
+ 	length = rb_event_length(event);
+@@ -249,7 +252,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_leng
+ static __always_inline void *
+ rb_event_data(struct ring_buffer_event *event)
+ {
+-	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
++	if (extended_time(event))
+ 		event = skip_time_extend(event);
+ 	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
+ 	/* If length is in len field, then array[0] has the data */
+@@ -276,6 +279,27 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data
+ #define TS_MASK		((1ULL << TS_SHIFT) - 1)
+ #define TS_DELTA_TEST	(~TS_MASK)
+ 
++/**
++ * ring_buffer_event_time_stamp - return the event's extended timestamp
++ * @event: the event to get the timestamp of
++ *
++ * Returns the extended timestamp associated with a data event.
++ * An extended time_stamp is a 64-bit timestamp represented
++ * internally in a special way that makes the best use of space
++ * contained within a ring buffer event.  This function decodes
++ * it and maps it to a straight u64 value.
++ */
++u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
++{
++	u64 ts;
++
++	ts = event->array[0];
++	ts <<= TS_SHIFT;
++	ts += event->time_delta;
++
++	return ts;
++}
++
+ /* Flag when events were overwritten */
+ #define RB_MISSED_EVENTS	(1 << 31)
+ /* Missed count stored at end */
+@@ -2219,13 +2243,16 @@ rb_move_tail(struct ring_buffer_per_cpu
+ }
+ 
+ /* Slow path, do not inline */
+-static noinline struct ring_buffer_event *
+-rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
++static struct noinline ring_buffer_event *
++rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
+ {
+-	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
++	if (abs)
++		event->type_len = RINGBUF_TYPE_TIME_STAMP;
++	else
++		event->type_len = RINGBUF_TYPE_TIME_EXTEND;
+ 
+-	/* Not the first event on the page? */
+-	if (rb_event_index(event)) {
++	/* Not the first event on the page, or not delta? */
++	if (abs || rb_event_index(event)) {
+ 		event->time_delta = delta & TS_MASK;
+ 		event->array[0] = delta >> TS_SHIFT;
+ 	} else {
+@@ -2268,7 +2295,9 @@ rb_update_event(struct ring_buffer_per_c
+ 	 * add it to the start of the resevered space.
+ 	 */
+ 	if (unlikely(info->add_timestamp)) {
+-		event = rb_add_time_stamp(event, delta);
++		bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
++
++		event = rb_add_time_stamp(event, info->delta, abs);
+ 		length -= RB_LEN_TIME_EXTEND;
+ 		delta = 0;
+ 	}
+@@ -2456,7 +2485,7 @@ static __always_inline void rb_end_commi
+ 
+ static inline void rb_event_discard(struct ring_buffer_event *event)
+ {
+-	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
++	if (extended_time(event))
+ 		event = skip_time_extend(event);
+ 
+ 	/* array[0] holds the actual length for the discarded event */
+@@ -2487,6 +2516,10 @@ rb_update_write_stamp(struct ring_buffer
+ {
+ 	u64 delta;
+ 
++	/* In TIME_STAMP mode, write_stamp is unused, nothing to do */
++	if (event->type_len == RINGBUF_TYPE_TIME_STAMP)
++		return;
++
+ 	/*
+ 	 * The event first in the commit queue updates the
+ 	 * time stamp.
+@@ -2500,9 +2533,7 @@ rb_update_write_stamp(struct ring_buffer
+ 			cpu_buffer->write_stamp =
+ 				cpu_buffer->commit_page->page->time_stamp;
+ 		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
+-			delta = event->array[0];
+-			delta <<= TS_SHIFT;
+-			delta += event->time_delta;
++			delta = ring_buffer_event_time_stamp(event);
+ 			cpu_buffer->write_stamp += delta;
+ 		} else
+ 			cpu_buffer->write_stamp += event->time_delta;
+@@ -2686,7 +2717,7 @@ static struct ring_buffer_event *
+ 	 * If this is the first commit on the page, then it has the same
+ 	 * timestamp as the page itself.
+ 	 */
+-	if (!tail)
++	if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer))
+ 		info->delta = 0;
+ 
+ 	/* See if we shot pass the end of this buffer page */
+@@ -2764,8 +2795,11 @@ rb_reserve_next_event(struct ring_buffer
+ 	/* make sure this diff is calculated here */
+ 	barrier();
+ 
+-	/* Did the write stamp get updated already? */
+-	if (likely(info.ts >= cpu_buffer->write_stamp)) {
++	if (ring_buffer_time_stamp_abs(buffer)) {
++		info.delta = info.ts;
++		rb_handle_timestamp(cpu_buffer, &info);
++	} else /* Did the write stamp get updated already? */
++		if (likely(info.ts >= cpu_buffer->write_stamp)) {
+ 		info.delta = diff;
+ 		if (unlikely(test_time_stamp(info.delta)))
+ 			rb_handle_timestamp(cpu_buffer, &info);
+@@ -3447,14 +3481,12 @@ rb_update_read_stamp(struct ring_buffer_
+ 		return;
+ 
+ 	case RINGBUF_TYPE_TIME_EXTEND:
+-		delta = event->array[0];
+-		delta <<= TS_SHIFT;
+-		delta += event->time_delta;
++		delta = ring_buffer_event_time_stamp(event);
+ 		cpu_buffer->read_stamp += delta;
+ 		return;
+ 
+ 	case RINGBUF_TYPE_TIME_STAMP:
+-		/* FIXME: not implemented */
++		/* In TIME_STAMP mode, write_stamp is unused, nothing to do */
+ 		return;
+ 
+ 	case RINGBUF_TYPE_DATA:
+@@ -3478,14 +3510,12 @@ rb_update_iter_read_stamp(struct ring_bu
+ 		return;
+ 
+ 	case RINGBUF_TYPE_TIME_EXTEND:
+-		delta = event->array[0];
+-		delta <<= TS_SHIFT;
+-		delta += event->time_delta;
++		delta = ring_buffer_event_time_stamp(event);
+ 		iter->read_stamp += delta;
+ 		return;
+ 
+ 	case RINGBUF_TYPE_TIME_STAMP:
+-		/* FIXME: not implemented */
++		/* In TIME_STAMP mode, write_stamp is unused, nothing to do */
+ 		return;
+ 
+ 	case RINGBUF_TYPE_DATA:
+@@ -3709,6 +3739,8 @@ rb_buffer_peek(struct ring_buffer_per_cp
+ 	struct buffer_page *reader;
+ 	int nr_loops = 0;
+ 
++	if (ts)
++		*ts = 0;
+  again:
+ 	/*
+ 	 * We repeat when a time extend is encountered.
+@@ -3745,12 +3777,17 @@ rb_buffer_peek(struct ring_buffer_per_cp
+ 		goto again;
+ 
+ 	case RINGBUF_TYPE_TIME_STAMP:
+-		/* FIXME: not implemented */
++		if (ts) {
++			*ts = ring_buffer_event_time_stamp(event);
++			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
++							 cpu_buffer->cpu, ts);
++		}
++		/* Internal data, OK to advance */
+ 		rb_advance_reader(cpu_buffer);
+ 		goto again;
+ 
+ 	case RINGBUF_TYPE_DATA:
+-		if (ts) {
++		if (ts && !(*ts)) {
+ 			*ts = cpu_buffer->read_stamp + event->time_delta;
+ 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
+ 							 cpu_buffer->cpu, ts);
+@@ -3775,6 +3812,9 @@ rb_iter_peek(struct ring_buffer_iter *it
+ 	struct ring_buffer_event *event;
+ 	int nr_loops = 0;
+ 
++	if (ts)
++		*ts = 0;
++
+ 	cpu_buffer = iter->cpu_buffer;
+ 	buffer = cpu_buffer->buffer;
+ 
+@@ -3827,12 +3867,17 @@ rb_iter_peek(struct ring_buffer_iter *it
+ 		goto again;
+ 
+ 	case RINGBUF_TYPE_TIME_STAMP:
+-		/* FIXME: not implemented */
++		if (ts) {
++			*ts = ring_buffer_event_time_stamp(event);
++			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
++							 cpu_buffer->cpu, ts);
++		}
++		/* Internal data, OK to advance */
+ 		rb_advance_iter(iter);
+ 		goto again;
+ 
+ 	case RINGBUF_TYPE_DATA:
+-		if (ts) {
++		if (ts && !(*ts)) {
+ 			*ts = iter->read_stamp + event->time_delta;
+ 			ring_buffer_normalize_time_stamp(buffer,
+ 							 cpu_buffer->cpu, ts);
diff --git a/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch b/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch
index ae6ea1f..636921a 100644
--- a/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch
+++ b/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch
@@ -1,8 +1,7 @@
-From aa2bfe55366552cb7e93e8709d66e698d79ccc47 Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:10 +0100
 Subject: [PATCH 4/9] rtmutex: Clean up
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Previous patches changed the meaning of the return value of
 rt_mutex_slowunlock(); update comments and code to reflect this.
diff --git a/debian/patches/features/all/rt/0004-x86-smp-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0004-x86-smp-Adjust-system_state-check.patch
index 792544b..52ca99a 100644
--- a/debian/patches/features/all/rt/0004-x86-smp-Adjust-system_state-check.patch
+++ b/debian/patches/features/all/rt/0004-x86-smp-Adjust-system_state-check.patch
@@ -1,8 +1,7 @@
-From 719b3680d1f789c1e3054e3fcb26bfff07c3c623 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:35 +0200
 Subject: [PATCH 04/17] x86/smp: Adjust system_state check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch b/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch
index eaf05bb..cbc5ec1 100644
--- a/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch
+++ b/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:52 +0100
 Subject: [PATCH] futex: Change locking rules
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit 734009e96d1983ad739e5b656e03430b3660c913
 
diff --git a/debian/patches/features/all/rt/0005-metag-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0005-metag-Adjust-system_state-check.patch
index 0e57c9c..408b66a 100644
--- a/debian/patches/features/all/rt/0005-metag-Adjust-system_state-check.patch
+++ b/debian/patches/features/all/rt/0005-metag-Adjust-system_state-check.patch
@@ -1,8 +1,7 @@
-From dcd2e4734b428709984e2fa35ebbd6cccc246d47 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:36 +0200
 Subject: [PATCH 05/17] metag: Adjust system_state check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch b/debian/patches/features/all/rt/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch
index 720cdc9..8b6196a 100644
--- a/debian/patches/features/all/rt/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From 6d11b87d55eb75007a3721c2de5938f5bbf607fb Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 12 Apr 2017 22:07:31 +0200
 Subject: [PATCH 05/13] powerpc/smp: Replace open coded task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Init task invokes smp_ops->setup_cpu() from smp_cpus_done(). Init task can
 run on any online CPU at this point, but the setup_cpu() callback requires
diff --git a/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch b/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
index 4487c6f..91800e1 100644
--- a/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
+++ b/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
@@ -1,8 +1,7 @@
-From acd58620e415aee4a43a808d7d2fd87259ee0001 Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:11 +0100
 Subject: [PATCH 5/9] sched/rtmutex: Refactor rt_mutex_setprio()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 With the introduction of SCHED_DEADLINE the whole notion that priority
 is a single number is gone, therefore the @prio argument to
diff --git a/debian/patches/features/all/rt/0005-tracing-Give-event-triggers-access-to-ring_buffer_ev.patch b/debian/patches/features/all/rt/0005-tracing-Give-event-triggers-access-to-ring_buffer_ev.patch
new file mode 100644
index 0000000..a9e4c41
--- /dev/null
+++ b/debian/patches/features/all/rt/0005-tracing-Give-event-triggers-access-to-ring_buffer_ev.patch
@@ -0,0 +1,299 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:06 -0500
+Subject: [PATCH 05/32] tracing: Give event triggers access to
+ ring_buffer_event
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+The ring_buffer event can provide a timestamp that may be useful to
+various triggers - pass it into the handlers for that purpose.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/trace_events.h        |   14 ++++++----
+ kernel/trace/trace.h                |    9 +++---
+ kernel/trace/trace_events_hist.c    |   11 +++++---
+ kernel/trace/trace_events_trigger.c |   47 ++++++++++++++++++++++--------------
+ 4 files changed, 49 insertions(+), 32 deletions(-)
+
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -400,11 +400,13 @@ enum event_trigger_type {
+ 
+ extern int filter_match_preds(struct event_filter *filter, void *rec);
+ 
+-extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
+-						   void *rec);
+-extern void event_triggers_post_call(struct trace_event_file *file,
+-				     enum event_trigger_type tt,
+-				     void *rec);
++extern enum event_trigger_type
++event_triggers_call(struct trace_event_file *file, void *rec,
++		    struct ring_buffer_event *event);
++extern void
++event_triggers_post_call(struct trace_event_file *file,
++			 enum event_trigger_type tt,
++			 void *rec, struct ring_buffer_event *event);
+ 
+ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
+ 
+@@ -424,7 +426,7 @@ trace_trigger_soft_disabled(struct trace
+ 
+ 	if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ 		if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+-			event_triggers_call(file, NULL);
++			event_triggers_call(file, NULL, NULL);
+ 		if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+ 			return true;
+ 		if (eflags & EVENT_FILE_FL_PID_FILTER)
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1189,7 +1189,7 @@ static inline bool
+ 	unsigned long eflags = file->flags;
+ 
+ 	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+-		*tt = event_triggers_call(file, entry);
++		*tt = event_triggers_call(file, entry, event);
+ 
+ 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
+ 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
+@@ -1226,7 +1226,7 @@ event_trigger_unlock_commit(struct trace
+ 		trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
+ 
+ 	if (tt)
+-		event_triggers_post_call(file, tt, entry);
++		event_triggers_post_call(file, tt, entry, event);
+ }
+ 
+ /**
+@@ -1259,7 +1259,7 @@ event_trigger_unlock_commit_regs(struct
+ 						irq_flags, pc, regs);
+ 
+ 	if (tt)
+-		event_triggers_post_call(file, tt, entry);
++		event_triggers_post_call(file, tt, entry, event);
+ }
+ 
+ #define FILTER_PRED_INVALID	((unsigned short)-1)
+@@ -1482,7 +1482,8 @@ extern int register_trigger_hist_enable_
+  */
+ struct event_trigger_ops {
+ 	void			(*func)(struct event_trigger_data *data,
+-					void *rec);
++					void *rec,
++					struct ring_buffer_event *rbe);
+ 	int			(*init)(struct event_trigger_ops *ops,
+ 					struct event_trigger_data *data);
+ 	void			(*free)(struct event_trigger_ops *ops,
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -921,7 +921,8 @@ static inline void add_to_key(char *comp
+ 	memcpy(compound_key + key_field->offset, key, size);
+ }
+ 
+-static void event_hist_trigger(struct event_trigger_data *data, void *rec)
++static void event_hist_trigger(struct event_trigger_data *data, void *rec,
++			       struct ring_buffer_event *event)
+ {
+ 	struct hist_trigger_data *hist_data = data->private_data;
+ 	bool use_compound_key = (hist_data->n_keys > 1);
+@@ -1672,7 +1673,8 @@ static struct event_command trigger_hist
+ }
+ 
+ static void
+-hist_enable_trigger(struct event_trigger_data *data, void *rec)
++hist_enable_trigger(struct event_trigger_data *data, void *rec,
++		    struct ring_buffer_event *event)
+ {
+ 	struct enable_trigger_data *enable_data = data->private_data;
+ 	struct event_trigger_data *test;
+@@ -1688,7 +1690,8 @@ hist_enable_trigger(struct event_trigger
+ }
+ 
+ static void
+-hist_enable_count_trigger(struct event_trigger_data *data, void *rec)
++hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
++			  struct ring_buffer_event *event)
+ {
+ 	if (!data->count)
+ 		return;
+@@ -1696,7 +1699,7 @@ hist_enable_count_trigger(struct event_t
+ 	if (data->count != -1)
+ 		(data->count)--;
+ 
+-	hist_enable_trigger(data, rec);
++	hist_enable_trigger(data, rec, event);
+ }
+ 
+ static struct event_trigger_ops hist_enable_trigger_ops = {
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -63,7 +63,8 @@ void trigger_data_free(struct event_trig
+  * any trigger that should be deferred, ETT_NONE if nothing to defer.
+  */
+ enum event_trigger_type
+-event_triggers_call(struct trace_event_file *file, void *rec)
++event_triggers_call(struct trace_event_file *file, void *rec,
++		    struct ring_buffer_event *event)
+ {
+ 	struct event_trigger_data *data;
+ 	enum event_trigger_type tt = ETT_NONE;
+@@ -76,7 +77,7 @@ event_triggers_call(struct trace_event_f
+ 		if (data->paused)
+ 			continue;
+ 		if (!rec) {
+-			data->ops->func(data, rec);
++			data->ops->func(data, rec, event);
+ 			continue;
+ 		}
+ 		filter = rcu_dereference_sched(data->filter);
+@@ -86,7 +87,7 @@ event_triggers_call(struct trace_event_f
+ 			tt |= data->cmd_ops->trigger_type;
+ 			continue;
+ 		}
+-		data->ops->func(data, rec);
++		data->ops->func(data, rec, event);
+ 	}
+ 	return tt;
+ }
+@@ -108,7 +109,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
+ void
+ event_triggers_post_call(struct trace_event_file *file,
+ 			 enum event_trigger_type tt,
+-			 void *rec)
++			 void *rec, struct ring_buffer_event *event)
+ {
+ 	struct event_trigger_data *data;
+ 
+@@ -116,7 +117,7 @@ event_triggers_post_call(struct trace_ev
+ 		if (data->paused)
+ 			continue;
+ 		if (data->cmd_ops->trigger_type & tt)
+-			data->ops->func(data, rec);
++			data->ops->func(data, rec, event);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(event_triggers_post_call);
+@@ -909,7 +910,8 @@ void set_named_trigger_data(struct event
+ }
+ 
+ static void
+-traceon_trigger(struct event_trigger_data *data, void *rec)
++traceon_trigger(struct event_trigger_data *data, void *rec,
++		struct ring_buffer_event *event)
+ {
+ 	if (tracing_is_on())
+ 		return;
+@@ -918,7 +920,8 @@ traceon_trigger(struct event_trigger_dat
+ }
+ 
+ static void
+-traceon_count_trigger(struct event_trigger_data *data, void *rec)
++traceon_count_trigger(struct event_trigger_data *data, void *rec,
++		      struct ring_buffer_event *event)
+ {
+ 	if (tracing_is_on())
+ 		return;
+@@ -933,7 +936,8 @@ traceon_count_trigger(struct event_trigg
+ }
+ 
+ static void
+-traceoff_trigger(struct event_trigger_data *data, void *rec)
++traceoff_trigger(struct event_trigger_data *data, void *rec,
++		 struct ring_buffer_event *event)
+ {
+ 	if (!tracing_is_on())
+ 		return;
+@@ -942,7 +946,8 @@ traceoff_trigger(struct event_trigger_da
+ }
+ 
+ static void
+-traceoff_count_trigger(struct event_trigger_data *data, void *rec)
++traceoff_count_trigger(struct event_trigger_data *data, void *rec,
++		       struct ring_buffer_event *event)
+ {
+ 	if (!tracing_is_on())
+ 		return;
+@@ -1039,13 +1044,15 @@ static struct event_command trigger_trac
+ 
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ static void
+-snapshot_trigger(struct event_trigger_data *data, void *rec)
++snapshot_trigger(struct event_trigger_data *data, void *rec,
++		 struct ring_buffer_event *event)
+ {
+ 	tracing_snapshot();
+ }
+ 
+ static void
+-snapshot_count_trigger(struct event_trigger_data *data, void *rec)
++snapshot_count_trigger(struct event_trigger_data *data, void *rec,
++		       struct ring_buffer_event *event)
+ {
+ 	if (!data->count)
+ 		return;
+@@ -1053,7 +1060,7 @@ snapshot_count_trigger(struct event_trig
+ 	if (data->count != -1)
+ 		(data->count)--;
+ 
+-	snapshot_trigger(data, rec);
++	snapshot_trigger(data, rec, event);
+ }
+ 
+ static int
+@@ -1132,13 +1139,15 @@ static __init int register_trigger_snaps
+ #define STACK_SKIP 3
+ 
+ static void
+-stacktrace_trigger(struct event_trigger_data *data, void *rec)
++stacktrace_trigger(struct event_trigger_data *data, void *rec,
++		   struct ring_buffer_event *event)
+ {
+ 	trace_dump_stack(STACK_SKIP);
+ }
+ 
+ static void
+-stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
++stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
++			 struct ring_buffer_event *event)
+ {
+ 	if (!data->count)
+ 		return;
+@@ -1146,7 +1155,7 @@ stacktrace_count_trigger(struct event_tr
+ 	if (data->count != -1)
+ 		(data->count)--;
+ 
+-	stacktrace_trigger(data, rec);
++	stacktrace_trigger(data, rec, event);
+ }
+ 
+ static int
+@@ -1208,7 +1217,8 @@ static __init void unregister_trigger_tr
+ }
+ 
+ static void
+-event_enable_trigger(struct event_trigger_data *data, void *rec)
++event_enable_trigger(struct event_trigger_data *data, void *rec,
++		     struct ring_buffer_event *event)
+ {
+ 	struct enable_trigger_data *enable_data = data->private_data;
+ 
+@@ -1219,7 +1229,8 @@ event_enable_trigger(struct event_trigge
+ }
+ 
+ static void
+-event_enable_count_trigger(struct event_trigger_data *data, void *rec)
++event_enable_count_trigger(struct event_trigger_data *data, void *rec,
++			   struct ring_buffer_event *event)
+ {
+ 	struct enable_trigger_data *enable_data = data->private_data;
+ 
+@@ -1233,7 +1244,7 @@ event_enable_count_trigger(struct event_
+ 	if (data->count != -1)
+ 		(data->count)--;
+ 
+-	event_enable_trigger(data, rec);
++	event_enable_trigger(data, rec, event);
+ }
+ 
+ int event_enable_trigger_print(struct seq_file *m,
diff --git a/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch b/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch
index 3193905..fcc8d81 100644
--- a/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch
+++ b/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:53 +0100
 Subject: [PATCH] futex: Cleanup refcounting
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit bf92cf3a5100f5a0d5f9834787b130159397cb22
 
diff --git a/debian/patches/features/all/rt/0006-powerpc-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0006-powerpc-Adjust-system_state-check.patch
index 7a1087e..49d9a65 100644
--- a/debian/patches/features/all/rt/0006-powerpc-Adjust-system_state-check.patch
+++ b/debian/patches/features/all/rt/0006-powerpc-Adjust-system_state-check.patch
@@ -1,8 +1,7 @@
-From a8fcfc1917681ba1ccc23a429543a67aad8bfd00 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:37 +0200
 Subject: [PATCH 06/17] powerpc: Adjust system_state check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch b/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
index ba0bfe1..a31b2af 100644
--- a/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
+++ b/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
@@ -1,8 +1,7 @@
-From b91473ff6e979c0028f02f90e40c844959c736d8 Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:12 +0100
 Subject: [PATCH 6/9] sched,tracing: Update trace_sched_pi_setprio()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Pass the PI donor task, instead of a numerical priority.
 
diff --git a/debian/patches/features/all/rt/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch
index 3fadb94..e77cd22 100644
--- a/debian/patches/features/all/rt/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From ea875ec94eafb858990f3fe9528501f983105653 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 13 Apr 2017 10:17:07 +0200
 Subject: [PATCH 06/13] sparc/sysfs: Replace racy task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The mmustat_enable sysfs file accessor functions must run code on the
 target CPU. This is achieved by temporarily setting the affinity of the
diff --git a/debian/patches/features/all/rt/0006-tracing-Add-ring-buffer-event-param-to-hist-field-fu.patch b/debian/patches/features/all/rt/0006-tracing-Add-ring-buffer-event-param-to-hist-field-fu.patch
new file mode 100644
index 0000000..88aa0d2
--- /dev/null
+++ b/debian/patches/features/all/rt/0006-tracing-Add-ring-buffer-event-param-to-hist-field-fu.patch
@@ -0,0 +1,140 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:07 -0500
+Subject: [PATCH 06/32] tracing: Add ring buffer event param to hist field
+ functions
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Some events such as timestamps require access to a ring_buffer_event
+struct; add a param so that hist field functions can access that.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |   39 ++++++++++++++++++++++++---------------
+ 1 file changed, 24 insertions(+), 15 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -26,7 +26,8 @@
+ 
+ struct hist_field;
+ 
+-typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
++typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event,
++				struct ring_buffer_event *rbe);
+ 
+ #define HIST_FIELD_OPERANDS_MAX	2
+ 
+@@ -40,24 +41,28 @@ struct hist_field {
+ 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
+ };
+ 
+-static u64 hist_field_none(struct hist_field *field, void *event)
++static u64 hist_field_none(struct hist_field *field, void *event,
++			   struct ring_buffer_event *rbe)
+ {
+ 	return 0;
+ }
+ 
+-static u64 hist_field_counter(struct hist_field *field, void *event)
++static u64 hist_field_counter(struct hist_field *field, void *event,
++			      struct ring_buffer_event *rbe)
+ {
+ 	return 1;
+ }
+ 
+-static u64 hist_field_string(struct hist_field *hist_field, void *event)
++static u64 hist_field_string(struct hist_field *hist_field, void *event,
++			     struct ring_buffer_event *rbe)
+ {
+ 	char *addr = (char *)(event + hist_field->field->offset);
+ 
+ 	return (u64)(unsigned long)addr;
+ }
+ 
+-static u64 hist_field_dynstring(struct hist_field *hist_field, void *event)
++static u64 hist_field_dynstring(struct hist_field *hist_field, void *event,
++				struct ring_buffer_event *rbe)
+ {
+ 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
+ 	int str_loc = str_item & 0xffff;
+@@ -66,24 +71,28 @@ static u64 hist_field_dynstring(struct h
+ 	return (u64)(unsigned long)addr;
+ }
+ 
+-static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
++static u64 hist_field_pstring(struct hist_field *hist_field, void *event,
++			      struct ring_buffer_event *rbe)
+ {
+ 	char **addr = (char **)(event + hist_field->field->offset);
+ 
+ 	return (u64)(unsigned long)*addr;
+ }
+ 
+-static u64 hist_field_log2(struct hist_field *hist_field, void *event)
++static u64 hist_field_log2(struct hist_field *hist_field, void *event,
++			   struct ring_buffer_event *rbe)
+ {
+ 	struct hist_field *operand = hist_field->operands[0];
+ 
+-	u64 val = operand->fn(operand, event);
++	u64 val = operand->fn(operand, event, rbe);
+ 
+ 	return (u64) ilog2(roundup_pow_of_two(val));
+ }
+ 
+ #define DEFINE_HIST_FIELD_FN(type)					\
+-static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
++	static u64 hist_field_##type(struct hist_field *hist_field,	\
++				     void *event,			\
++				     struct ring_buffer_event *rbe)	\
+ {									\
+ 	type *addr = (type *)(event + hist_field->field->offset);	\
+ 									\
+@@ -883,8 +892,8 @@ create_hist_data(unsigned int map_bits,
+ }
+ 
+ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
+-				    struct tracing_map_elt *elt,
+-				    void *rec)
++				    struct tracing_map_elt *elt, void *rec,
++				    struct ring_buffer_event *rbe)
+ {
+ 	struct hist_field *hist_field;
+ 	unsigned int i;
+@@ -892,7 +901,7 @@ static void hist_trigger_elt_update(stru
+ 
+ 	for_each_hist_val_field(i, hist_data) {
+ 		hist_field = hist_data->fields[i];
+-		hist_val = hist_field->fn(hist_field, rec);
++		hist_val = hist_field->fn(hist_field, rec, rbe);
+ 		tracing_map_update_sum(elt, i, hist_val);
+ 	}
+ }
+@@ -922,7 +931,7 @@ static inline void add_to_key(char *comp
+ }
+ 
+ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
+-			       struct ring_buffer_event *event)
++			       struct ring_buffer_event *rbe)
+ {
+ 	struct hist_trigger_data *hist_data = data->private_data;
+ 	bool use_compound_key = (hist_data->n_keys > 1);
+@@ -951,7 +960,7 @@ static void event_hist_trigger(struct ev
+ 
+ 			key = entries;
+ 		} else {
+-			field_contents = key_field->fn(key_field, rec);
++			field_contents = key_field->fn(key_field, rec, rbe);
+ 			if (key_field->flags & HIST_FIELD_FL_STRING) {
+ 				key = (void *)(unsigned long)field_contents;
+ 				use_compound_key = true;
+@@ -968,7 +977,7 @@ static void event_hist_trigger(struct ev
+ 
+ 	elt = tracing_map_insert(hist_data->map, key);
+ 	if (elt)
+-		hist_trigger_elt_update(hist_data, elt, rec);
++		hist_trigger_elt_update(hist_data, elt, rec, rbe);
+ }
+ 
+ static void hist_trigger_stacktrace_print(struct seq_file *m,
diff --git a/debian/patches/features/all/rt/0007-ACPI-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0007-ACPI-Adjust-system_state-check.patch
index e38e86e..3eb19d6 100644
--- a/debian/patches/features/all/rt/0007-ACPI-Adjust-system_state-check.patch
+++ b/debian/patches/features/all/rt/0007-ACPI-Adjust-system_state-check.patch
@@ -1,8 +1,7 @@
-From 9762b33dc31c67e34b36ba4e787e64084b3136ff Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:38 +0200
 Subject: [PATCH 07/17] ACPI: Adjust system_state check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch b/debian/patches/features/all/rt/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch
index cf7c935..98f1f15 100644
--- a/debian/patches/features/all/rt/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch
+++ b/debian/patches/features/all/rt/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch
@@ -1,9 +1,8 @@
-From a5cbdf693a60d5b86d4d21dfedd90f17754eb273 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 12 Apr 2017 22:07:33 +0200
 Subject: [PATCH 07/13] ACPI/processor: Fix error handling in
  __acpi_processor_start()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When acpi_install_notify_handler() fails the cooling device stays
 registered and the sysfs files created via acpi_pss_perf_init() are
diff --git a/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch b/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
index 724046c..2c7f9a6 100644
--- a/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
+++ b/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:54 +0100
 Subject: [PATCH] futex: Rework inconsistent rt_mutex/futex_q state
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit 73d786bd043ebc855f349c81ea805f6b11cbf2aa
 
diff --git a/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch b/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch
index 6875fb7..fc4c51a 100644
--- a/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch
+++ b/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch
@@ -1,8 +1,7 @@
-From e0aad5b44ff5d28ac1d6ae70cdf84ca228e889dc Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:13 +0100
 Subject: [PATCH 7/9] rtmutex: Fix PI chain order integrity
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 rt_mutex_waiter::prio is a copy of task_struct::prio which is updated
 during the PI chain walk, such that the PI chain order isn't messed up
diff --git a/debian/patches/features/all/rt/0007-tracing-Increase-tracing-map-KEYS_MAX-size.patch b/debian/patches/features/all/rt/0007-tracing-Increase-tracing-map-KEYS_MAX-size.patch
new file mode 100644
index 0000000..910f3db
--- /dev/null
+++ b/debian/patches/features/all/rt/0007-tracing-Increase-tracing-map-KEYS_MAX-size.patch
@@ -0,0 +1,25 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:08 -0500
+Subject: [PATCH 07/32] tracing: Increase tracing map KEYS_MAX size
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+The current default for the number of subkeys in a compound key is 2,
+which is too restrictive.  Increase it to a more realistic value of 3.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/tracing_map.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/tracing_map.h
++++ b/kernel/trace/tracing_map.h
+@@ -5,7 +5,7 @@
+ #define TRACING_MAP_BITS_MAX		17
+ #define TRACING_MAP_BITS_MIN		7
+ 
+-#define TRACING_MAP_KEYS_MAX		2
++#define TRACING_MAP_KEYS_MAX		3
+ #define TRACING_MAP_VALS_MAX		3
+ #define TRACING_MAP_FIELDS_MAX		(TRACING_MAP_KEYS_MAX + \
+ 					 TRACING_MAP_VALS_MAX)
diff --git a/debian/patches/features/all/rt/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch
index 04b040c..4534086 100644
--- a/debian/patches/features/all/rt/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From 8153f9ac43897f9f4786b30badc134fcc1a4fb11 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 12 Apr 2017 22:07:34 +0200
 Subject: [PATCH 08/13] ACPI/processor: Replace racy task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 acpi_processor_get_throttling() requires to invoke the getter function on
 the target CPU. This is achieved by temporarily setting the affinity of the
diff --git a/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch b/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
index 8b10d4f..1b5f07e 100644
--- a/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
+++ b/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:55 +0100
 Subject: [PATCH] futex: Pull rt_mutex_futex_unlock() out from under hb->lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit 16ffa12d742534d4ff73e8b3a4e81c1de39196f0
 
diff --git a/debian/patches/features/all/rt/0008-mm-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0008-mm-Adjust-system_state-check.patch
index 4b0ae88..1b184e8 100644
--- a/debian/patches/features/all/rt/0008-mm-Adjust-system_state-check.patch
+++ b/debian/patches/features/all/rt/0008-mm-Adjust-system_state-check.patch
@@ -1,8 +1,7 @@
-From 8cdde385c7a33afbe13fd71351da0968540fa566 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:39 +0200
 Subject: [PATCH 08/17] mm: Adjust system_state check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch b/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch
index 959c633..7078d0d 100644
--- a/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch
+++ b/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch
@@ -1,8 +1,7 @@
-From 19830e55247cddb3f46f1bf60b8e245593491bea Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 23 Mar 2017 15:56:14 +0100
 Subject: [PATCH 8/9] rtmutex: Fix more prio comparisons
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 There was a pure ->prio comparison left in try_to_wake_rt_mutex(),
 convert it to use rt_mutex_waiter_less(), noting that greater-or-equal
diff --git a/debian/patches/features/all/rt/0008-tracing-Break-out-hist-trigger-assignment-parsing.patch b/debian/patches/features/all/rt/0008-tracing-Break-out-hist-trigger-assignment-parsing.patch
new file mode 100644
index 0000000..18feb49
--- /dev/null
+++ b/debian/patches/features/all/rt/0008-tracing-Break-out-hist-trigger-assignment-parsing.patch
@@ -0,0 +1,92 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:09 -0500
+Subject: [PATCH 08/32] tracing: Break out hist trigger assignment parsing
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+This will make it easier to add variables, and makes the parsing code
+cleaner regardless.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |   56 ++++++++++++++++++++++++---------------
+ 1 file changed, 35 insertions(+), 21 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -251,6 +251,35 @@ static void destroy_hist_trigger_attrs(s
+ 	kfree(attrs);
+ }
+ 
++static int parse_assignment(char *str, struct hist_trigger_attrs *attrs)
++{
++	int ret = 0;
++
++	if ((strncmp(str, "key=", strlen("key=")) == 0) ||
++	    (strncmp(str, "keys=", strlen("keys=")) == 0))
++		attrs->keys_str = kstrdup(str, GFP_KERNEL);
++	else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
++		 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
++		 (strncmp(str, "values=", strlen("values=")) == 0))
++		attrs->vals_str = kstrdup(str, GFP_KERNEL);
++	else if (strncmp(str, "sort=", strlen("sort=")) == 0)
++		attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
++	else if (strncmp(str, "name=", strlen("name=")) == 0)
++		attrs->name = kstrdup(str, GFP_KERNEL);
++	else if (strncmp(str, "size=", strlen("size=")) == 0) {
++		int map_bits = parse_map_size(str);
++
++		if (map_bits < 0) {
++			ret = map_bits;
++			goto out;
++		}
++		attrs->map_bits = map_bits;
++	} else
++		ret = -EINVAL;
++ out:
++	return ret;
++}
++
+ static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
+ {
+ 	struct hist_trigger_attrs *attrs;
+@@ -263,33 +292,18 @@ static struct hist_trigger_attrs *parse_
+ 	while (trigger_str) {
+ 		char *str = strsep(&trigger_str, ":");
+ 
+-		if ((strncmp(str, "key=", strlen("key=")) == 0) ||
+-		    (strncmp(str, "keys=", strlen("keys=")) == 0))
+-			attrs->keys_str = kstrdup(str, GFP_KERNEL);
+-		else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
+-			 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
+-			 (strncmp(str, "values=", strlen("values=")) == 0))
+-			attrs->vals_str = kstrdup(str, GFP_KERNEL);
+-		else if (strncmp(str, "sort=", strlen("sort=")) == 0)
+-			attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
+-		else if (strncmp(str, "name=", strlen("name=")) == 0)
+-			attrs->name = kstrdup(str, GFP_KERNEL);
+-		else if (strcmp(str, "pause") == 0)
++		if (strchr(str, '=')) {
++			ret = parse_assignment(str, attrs);
++			if (ret)
++				goto free;
++		} else if (strcmp(str, "pause") == 0)
+ 			attrs->pause = true;
+ 		else if ((strcmp(str, "cont") == 0) ||
+ 			 (strcmp(str, "continue") == 0))
+ 			attrs->cont = true;
+ 		else if (strcmp(str, "clear") == 0)
+ 			attrs->clear = true;
+-		else if (strncmp(str, "size=", strlen("size=")) == 0) {
+-			int map_bits = parse_map_size(str);
+-
+-			if (map_bits < 0) {
+-				ret = map_bits;
+-				goto free;
+-			}
+-			attrs->map_bits = map_bits;
+-		} else {
++		else {
+ 			ret = -EINVAL;
+ 			goto free;
+ 		}
diff --git a/debian/patches/features/all/rt/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch
index df3dfdc..efd52dd 100644
--- a/debian/patches/features/all/rt/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From 38f05ed04beb276f780fcd2b5c0b78c76d0b3c0c Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 12 Apr 2017 22:55:03 +0200
 Subject: [PATCH 09/13] cpufreq/ia64: Replace racy task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The get() and target() callbacks must run on the affected cpu. This is
 achieved by temporarily setting the affinity of the calling thread to the
diff --git a/debian/patches/features/all/rt/0009-cpufreq-pasemi-Adjust-system_state-check.patch b/debian/patches/features/all/rt/0009-cpufreq-pasemi-Adjust-system_state-check.patch
index 1f02514..fb7a5a7 100644
--- a/debian/patches/features/all/rt/0009-cpufreq-pasemi-Adjust-system_state-check.patch
+++ b/debian/patches/features/all/rt/0009-cpufreq-pasemi-Adjust-system_state-check.patch
@@ -1,8 +1,7 @@
-From d04e31a23c3c828456cb5613f391ce4ac4e5765f Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:40 +0200
 Subject: [PATCH 09/17] cpufreq/pasemi: Adjust system_state check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch b/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
index 62afc78..5b30457 100644
--- a/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
+++ b/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:56 +0100
 Subject: [PATCH] futex,rt_mutex: Introduce rt_mutex_init_waiter()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit 50809358dd7199aa7ce232f6877dd09ec30ef374
 
diff --git a/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch b/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
index 5b09b18..b18205e 100644
--- a/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
+++ b/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
@@ -1,9 +1,8 @@
-From def34eaae5ce04b324e48e1bfac873091d945213 Mon Sep 17 00:00:00 2001
 From: Mike Galbraith <efault at gmx.de>
 Date: Wed, 5 Apr 2017 10:08:27 +0200
 Subject: [PATCH 9/9] rtmutex: Plug preempt count leak in
  rt_mutex_futex_unlock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 mark_wakeup_next_waiter() already disables preemption, doing so again
 leaves us with an unpaired preempt_disable().
diff --git a/debian/patches/features/all/rt/0009-tracing-Make-traceprobe-parsing-code-reusable.patch b/debian/patches/features/all/rt/0009-tracing-Make-traceprobe-parsing-code-reusable.patch
new file mode 100644
index 0000000..8f8542c
--- /dev/null
+++ b/debian/patches/features/all/rt/0009-tracing-Make-traceprobe-parsing-code-reusable.patch
@@ -0,0 +1,318 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:10 -0500
+Subject: [PATCH 09/32] tracing: Make traceprobe parsing code reusable
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+traceprobe_probes_write() and traceprobe_command() actually contain
+nothing that ties them to kprobes - the code is generically useful for
+similar types of parsing elsewhere, so separate it out and move it to
+trace.c/trace.h.
+
+Other than moving it, the only change is in naming:
+traceprobe_probes_write() becomes trace_parse_run_command() and
+traceprobe_command() becomes trace_run_command().
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace.c        |   86 ++++++++++++++++++++++++++++++++++++++++++++
+ kernel/trace/trace.h        |    7 +++
+ kernel/trace/trace_kprobe.c |   18 ++++-----
+ kernel/trace/trace_probe.c  |   86 --------------------------------------------
+ kernel/trace/trace_probe.h  |    7 ---
+ kernel/trace/trace_uprobe.c |    2 -
+ 6 files changed, 103 insertions(+), 103 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7907,6 +7907,92 @@ void ftrace_dump(enum ftrace_dump_mode o
+ }
+ EXPORT_SYMBOL_GPL(ftrace_dump);
+ 
++int trace_run_command(const char *buf, int (*createfn)(int, char **))
++{
++	char **argv;
++	int argc, ret;
++
++	argc = 0;
++	ret = 0;
++	argv = argv_split(GFP_KERNEL, buf, &argc);
++	if (!argv)
++		return -ENOMEM;
++
++	if (argc)
++		ret = createfn(argc, argv);
++
++	argv_free(argv);
++
++	return ret;
++}
++
++#define WRITE_BUFSIZE  4096
++
++ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
++				size_t count, loff_t *ppos,
++				int (*createfn)(int, char **))
++{
++	char *kbuf, *buf, *tmp;
++	int ret = 0;
++	size_t done = 0;
++	size_t size;
++
++	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
++	if (!kbuf)
++		return -ENOMEM;
++
++	while (done < count) {
++		size = count - done;
++
++		if (size >= WRITE_BUFSIZE)
++			size = WRITE_BUFSIZE - 1;
++
++		if (copy_from_user(kbuf, buffer + done, size)) {
++			ret = -EFAULT;
++			goto out;
++		}
++		kbuf[size] = '\0';
++		buf = kbuf;
++		do {
++			tmp = strchr(buf, '\n');
++			if (tmp) {
++				*tmp = '\0';
++				size = tmp - buf + 1;
++			} else {
++				size = strlen(buf);
++				if (done + size < count) {
++					if (buf != kbuf)
++						break;
++					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
++					pr_warn("Line length is too long: Should be less than %d\n",
++						WRITE_BUFSIZE - 2);
++					ret = -EINVAL;
++					goto out;
++				}
++			}
++			done += size;
++
++			/* Remove comments */
++			tmp = strchr(buf, '#');
++
++			if (tmp)
++				*tmp = '\0';
++
++			ret = trace_run_command(buf, createfn);
++			if (ret)
++				goto out;
++			buf += size;
++
++		} while (done < count);
++	}
++	ret = done;
++
++out:
++	kfree(kbuf);
++
++	return ret;
++}
++
+ __init static int tracer_alloc_buffers(void)
+ {
+ 	int ring_buf_size;
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1650,6 +1650,13 @@ void trace_printk_start_comm(void);
+ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
+ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
+ 
++#define MAX_EVENT_NAME_LEN	64
++
++extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
++extern ssize_t trace_parse_run_command(struct file *file,
++		const char __user *buffer, size_t count, loff_t *ppos,
++		int (*createfn)(int, char**));
++
+ /*
+  * Normal trace_printk() and friends allocates special buffers
+  * to do the manipulation, as well as saves the print formats
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -878,8 +878,8 @@ static int probes_open(struct inode *ino
+ static ssize_t probes_write(struct file *file, const char __user *buffer,
+ 			    size_t count, loff_t *ppos)
+ {
+-	return traceprobe_probes_write(file, buffer, count, ppos,
+-			create_trace_kprobe);
++	return trace_parse_run_command(file, buffer, count, ppos,
++				       create_trace_kprobe);
+ }
+ 
+ static const struct file_operations kprobe_events_ops = {
+@@ -1404,9 +1404,9 @@ static __init int kprobe_trace_self_test
+ 
+ 	pr_info("Testing kprobe tracing: ");
+ 
+-	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
+-				  "$stack $stack0 +0($stack)",
+-				  create_trace_kprobe);
++	ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
++				"$stack $stack0 +0($stack)",
++				create_trace_kprobe);
+ 	if (WARN_ON_ONCE(ret)) {
+ 		pr_warn("error on probing function entry.\n");
+ 		warn++;
+@@ -1426,8 +1426,8 @@ static __init int kprobe_trace_self_test
+ 		}
+ 	}
+ 
+-	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
+-				  "$retval", create_trace_kprobe);
++	ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
++				"$retval", create_trace_kprobe);
+ 	if (WARN_ON_ONCE(ret)) {
+ 		pr_warn("error on probing function return.\n");
+ 		warn++;
+@@ -1497,13 +1497,13 @@ static __init int kprobe_trace_self_test
+ 			disable_trace_kprobe(tk, file);
+ 	}
+ 
+-	ret = traceprobe_command("-:testprobe", create_trace_kprobe);
++	ret = trace_run_command("-:testprobe", create_trace_kprobe);
+ 	if (WARN_ON_ONCE(ret)) {
+ 		pr_warn("error on deleting a probe.\n");
+ 		warn++;
+ 	}
+ 
+-	ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
++	ret = trace_run_command("-:testprobe2", create_trace_kprobe);
+ 	if (WARN_ON_ONCE(ret)) {
+ 		pr_warn("error on deleting a probe.\n");
+ 		warn++;
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -623,92 +623,6 @@ void traceprobe_free_probe_arg(struct pr
+ 	kfree(arg->comm);
+ }
+ 
+-int traceprobe_command(const char *buf, int (*createfn)(int, char **))
+-{
+-	char **argv;
+-	int argc, ret;
+-
+-	argc = 0;
+-	ret = 0;
+-	argv = argv_split(GFP_KERNEL, buf, &argc);
+-	if (!argv)
+-		return -ENOMEM;
+-
+-	if (argc)
+-		ret = createfn(argc, argv);
+-
+-	argv_free(argv);
+-
+-	return ret;
+-}
+-
+-#define WRITE_BUFSIZE  4096
+-
+-ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
+-				size_t count, loff_t *ppos,
+-				int (*createfn)(int, char **))
+-{
+-	char *kbuf, *buf, *tmp;
+-	int ret = 0;
+-	size_t done = 0;
+-	size_t size;
+-
+-	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
+-	if (!kbuf)
+-		return -ENOMEM;
+-
+-	while (done < count) {
+-		size = count - done;
+-
+-		if (size >= WRITE_BUFSIZE)
+-			size = WRITE_BUFSIZE - 1;
+-
+-		if (copy_from_user(kbuf, buffer + done, size)) {
+-			ret = -EFAULT;
+-			goto out;
+-		}
+-		kbuf[size] = '\0';
+-		buf = kbuf;
+-		do {
+-			tmp = strchr(buf, '\n');
+-			if (tmp) {
+-				*tmp = '\0';
+-				size = tmp - buf + 1;
+-			} else {
+-				size = strlen(buf);
+-				if (done + size < count) {
+-					if (buf != kbuf)
+-						break;
+-					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
+-					pr_warn("Line length is too long: Should be less than %d\n",
+-						WRITE_BUFSIZE - 2);
+-					ret = -EINVAL;
+-					goto out;
+-				}
+-			}
+-			done += size;
+-
+-			/* Remove comments */
+-			tmp = strchr(buf, '#');
+-
+-			if (tmp)
+-				*tmp = '\0';
+-
+-			ret = traceprobe_command(buf, createfn);
+-			if (ret)
+-				goto out;
+-			buf += size;
+-
+-		} while (done < count);
+-	}
+-	ret = done;
+-
+-out:
+-	kfree(kbuf);
+-
+-	return ret;
+-}
+-
+ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
+ 			   bool is_return)
+ {
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -42,7 +42,6 @@
+ 
+ #define MAX_TRACE_ARGS		128
+ #define MAX_ARGSTR_LEN		63
+-#define MAX_EVENT_NAME_LEN	64
+ #define MAX_STRING_SIZE		PATH_MAX
+ 
+ /* Reserved field names */
+@@ -356,12 +355,6 @@ extern void traceprobe_free_probe_arg(st
+ 
+ extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
+ 
+-extern ssize_t traceprobe_probes_write(struct file *file,
+-		const char __user *buffer, size_t count, loff_t *ppos,
+-		int (*createfn)(int, char**));
+-
+-extern int traceprobe_command(const char *buf, int (*createfn)(int, char**));
+-
+ /* Sum up total data length for dynamic arraies (strings) */
+ static nokprobe_inline int
+ __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -651,7 +651,7 @@ static int probes_open(struct inode *ino
+ static ssize_t probes_write(struct file *file, const char __user *buffer,
+ 			    size_t count, loff_t *ppos)
+ {
+-	return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
++	return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
+ }
+ 
+ static const struct file_operations uprobe_events_ops = {
diff --git a/debian/patches/features/all/rt/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch
index c52f354..05c00ba 100644
--- a/debian/patches/features/all/rt/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From 205dcc1ecbc566cbc20acf246e68de3b080b3ecf Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 12 Apr 2017 22:07:36 +0200
 Subject: [PATCH 10/13] cpufreq/sh: Replace racy task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The target() callback must run on the affected cpu. This is achieved by
 temporarily setting the affinity of the calling thread to the requested CPU
diff --git a/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch b/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
index 2387996..866dc8b 100644
--- a/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
+++ b/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:57 +0100
 Subject: [PATCH] futex,rt_mutex: Restructure rt_mutex_finish_proxy_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit 38d589f2fd08f1296aea3ce62bebd185125c6d81
 
diff --git a/debian/patches/features/all/rt/0010-iommu-vt-d-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0010-iommu-vt-d-Adjust-system_state-checks.patch
index 1b52e1f..94b4557 100644
--- a/debian/patches/features/all/rt/0010-iommu-vt-d-Adjust-system_state-checks.patch
+++ b/debian/patches/features/all/rt/0010-iommu-vt-d-Adjust-system_state-checks.patch
@@ -1,8 +1,7 @@
-From b608fe356fe8328665445a26ec75dfac918c8c5d Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:41 +0200
 Subject: [PATCH 10/17] iommu/vt-d: Adjust system_state checks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0010-tracing-Add-NO_DISCARD-event-file-flag.patch b/debian/patches/features/all/rt/0010-tracing-Add-NO_DISCARD-event-file-flag.patch
new file mode 100644
index 0000000..7e71ef0
--- /dev/null
+++ b/debian/patches/features/all/rt/0010-tracing-Add-NO_DISCARD-event-file-flag.patch
@@ -0,0 +1,106 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:11 -0500
+Subject: [PATCH 10/32] tracing: Add NO_DISCARD event file flag
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Whenever an event_command has a post-trigger that needs access to the
+event record, the event record can't be discarded, or the post-trigger
+will eventually see bogus data.
+
+In order to allow the discard check to treat this case separately, add
+an EVENT_FILE_FL_NO_DISCARD flag to the event file flags, along with
+code in the discard check that checks the flag and avoids the discard
+when the flag is set.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/trace_events.h        |    3 +++
+ kernel/trace/trace.h                |   13 ++++++++++---
+ kernel/trace/trace_events_trigger.c |   16 +++++++++++++---
+ 3 files changed, 26 insertions(+), 6 deletions(-)
+
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -306,6 +306,7 @@ enum {
+ 	EVENT_FILE_FL_TRIGGER_MODE_BIT,
+ 	EVENT_FILE_FL_TRIGGER_COND_BIT,
+ 	EVENT_FILE_FL_PID_FILTER_BIT,
++	EVENT_FILE_FL_NO_DISCARD_BIT,
+ };
+ 
+ /*
+@@ -320,6 +321,7 @@ enum {
+  *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
+  *  TRIGGER_COND  - When set, one or more triggers has an associated filter
+  *  PID_FILTER    - When set, the event is filtered based on pid
++ *  NO_DISCARD    - When set, do not discard events, something needs them later
+  */
+ enum {
+ 	EVENT_FILE_FL_ENABLED		= (1 << EVENT_FILE_FL_ENABLED_BIT),
+@@ -331,6 +333,7 @@ enum {
+ 	EVENT_FILE_FL_TRIGGER_MODE	= (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
+ 	EVENT_FILE_FL_TRIGGER_COND	= (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
+ 	EVENT_FILE_FL_PID_FILTER	= (1 << EVENT_FILE_FL_PID_FILTER_BIT),
++	EVENT_FILE_FL_NO_DISCARD	= (1 << EVENT_FILE_FL_NO_DISCARD_BIT),
+ };
+ 
+ struct trace_event_file {
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1191,9 +1191,16 @@ static inline bool
+ 	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+ 		*tt = event_triggers_call(file, entry, event);
+ 
+-	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
+-	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
+-	     !filter_match_preds(file->filter, entry))) {
++	if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
++	    !filter_match_preds(file->filter, entry)) {
++		__trace_event_discard_commit(buffer, event);
++		return true;
++	}
++
++	if (test_bit(EVENT_FILE_FL_NO_DISCARD_BIT, &file->flags))
++		return false;
++
++	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags)) {
+ 		__trace_event_discard_commit(buffer, event);
+ 		return true;
+ 	}
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -505,20 +505,30 @@ clear_event_triggers(struct trace_array
+ void update_cond_flag(struct trace_event_file *file)
+ {
+ 	struct event_trigger_data *data;
+-	bool set_cond = false;
++	bool set_cond = false, set_no_discard = false;
+ 
+ 	list_for_each_entry_rcu(data, &file->triggers, list) {
+ 		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
+-		    event_command_needs_rec(data->cmd_ops)) {
++		    event_command_needs_rec(data->cmd_ops))
+ 			set_cond = true;
++
++		if (event_command_post_trigger(data->cmd_ops) &&
++		    event_command_needs_rec(data->cmd_ops))
++			set_no_discard = true;
++
++		if (set_cond && set_no_discard)
+ 			break;
+-		}
+ 	}
+ 
+ 	if (set_cond)
+ 		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
+ 	else
+ 		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
++
++	if (set_no_discard)
++		set_bit(EVENT_FILE_FL_NO_DISCARD_BIT, &file->flags);
++	else
++		clear_bit(EVENT_FILE_FL_NO_DISCARD_BIT, &file->flags);
+ }
+ 
+ /**
diff --git a/debian/patches/features/all/rt/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch
index 4aa4b90..d07473e 100644
--- a/debian/patches/features/all/rt/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From 9fe24c4e92d3963d92d7d383e28ed098bd5689d8 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 12 Apr 2017 22:07:37 +0200
 Subject: [PATCH 11/13] cpufreq/sparc-us3: Replace racy task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The access to the safari config register in the CPU frequency functions
 must be executed on the target CPU. This is achieved by temporarily setting
diff --git a/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch b/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
index 50e8ea9..ae2f76f 100644
--- a/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
+++ b/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:58 +0100
 Subject: [PATCH] futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit cfafcd117da0216520568c195cb2f6cd1980c4bb
 
diff --git a/debian/patches/features/all/rt/0011-tracing-Add-post-trigger-flag-to-hist-trigger-comman.patch b/debian/patches/features/all/rt/0011-tracing-Add-post-trigger-flag-to-hist-trigger-comman.patch
new file mode 100644
index 0000000..a5599af
--- /dev/null
+++ b/debian/patches/features/all/rt/0011-tracing-Add-post-trigger-flag-to-hist-trigger-comman.patch
@@ -0,0 +1,29 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:12 -0500
+Subject: [PATCH 11/32] tracing: Add post-trigger flag to hist trigger command
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add EVENT_CMD_FL_POST_TRIGGER to the hist trigger cmd - it doesn't
+affect the hist trigger results, and allows further events such as
+synthetic events to be generated from a hist trigger.
+
+Without this change, generating an event from a hist trigger will
+cause the generated event to fail a ring buffer trace_recursive_lock()
+check and return without actually logging the event.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1676,7 +1676,7 @@ static int event_hist_trigger_func(struc
+ static struct event_command trigger_hist_cmd = {
+ 	.name			= "hist",
+ 	.trigger_type		= ETT_EVENT_HIST,
+-	.flags			= EVENT_CMD_FL_NEEDS_REC,
++	.flags			= EVENT_CMD_FL_NEEDS_REC | EVENT_CMD_FL_POST_TRIGGER,
+ 	.func			= event_hist_trigger_func,
+ 	.reg			= hist_register_trigger,
+ 	.unreg			= hist_unregister_trigger,
diff --git a/debian/patches/features/all/rt/0012-async-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0012-async-Adjust-system_state-checks.patch
index a3ab8ed..18ec3bf 100644
--- a/debian/patches/features/all/rt/0012-async-Adjust-system_state-checks.patch
+++ b/debian/patches/features/all/rt/0012-async-Adjust-system_state-checks.patch
@@ -1,8 +1,7 @@
-From b4def42724594cd399cfee365221f5b38639711d Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:43 +0200
 Subject: [PATCH 12/17] async: Adjust system_state checks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch
index 4326566..40a19ed 100644
--- a/debian/patches/features/all/rt/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From 12699ac53a2e5fbd1fd7c164b11685d55c8aa28b Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 13 Apr 2017 10:22:43 +0200
 Subject: [PATCH 12/13] cpufreq/sparc-us2e: Replace racy task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The access to the HBIRD_ESTAR_MODE register in the cpu frequency control
 functions must happen on the target CPU. This is achieved by temporarily
diff --git a/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch b/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
index 1e2c49d..8c3b664 100644
--- a/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
+++ b/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:35:59 +0100
 Subject: [PATCH] futex: Futex_unlock_pi() determinism
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit bebe5b514345f09be2c15e414d076b02ecb9cce8
 
diff --git a/debian/patches/features/all/rt/0012-tracing-Add-hist-trigger-timestamp-support.patch b/debian/patches/features/all/rt/0012-tracing-Add-hist-trigger-timestamp-support.patch
new file mode 100644
index 0000000..6725c60
--- /dev/null
+++ b/debian/patches/features/all/rt/0012-tracing-Add-hist-trigger-timestamp-support.patch
@@ -0,0 +1,232 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:13 -0500
+Subject: [PATCH 12/32] tracing: Add hist trigger timestamp support
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add support for a timestamp event field.  This is actually a 'pseudo-'
+event field in that it behaves like it's part of the event record, but
+is really part of the corresponding ring buffer event.
+
+To make use of the timestamp field, users can specify
+"$common_timestamp" as a field name for any histogram.  Note that this
+doesn't make much sense on its own either as either a key or value,
+but needs to be supported even so, since follow-on patches will add
+support for making use of this field in time deltas.  The '$' is used
+as a prefix on the variable name to indicate that it's not an bonafide
+event field - so you won't find it in the event description - but
+rather it's a synthetic field that can be used like a real field).
+
+Note that the use of this field requires the ring buffer be put into
+TIME_EXTEND_ABS mode, which saves the complete timestamp for each
+event rather than an offset.  This mode will be enabled if and only if
+a histogram makes use of the "$common_timestamp" field.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |   90 ++++++++++++++++++++++++++++-----------
+ 1 file changed, 66 insertions(+), 24 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -89,6 +89,12 @@ static u64 hist_field_log2(struct hist_f
+ 	return (u64) ilog2(roundup_pow_of_two(val));
+ }
+ 
++static u64 hist_field_timestamp(struct hist_field *hist_field, void *event,
++				struct ring_buffer_event *rbe)
++{
++	return ring_buffer_event_time_stamp(rbe);
++}
++
+ #define DEFINE_HIST_FIELD_FN(type)					\
+ 	static u64 hist_field_##type(struct hist_field *hist_field,	\
+ 				     void *event,			\
+@@ -135,6 +141,7 @@ enum hist_field_flags {
+ 	HIST_FIELD_FL_SYSCALL		= 128,
+ 	HIST_FIELD_FL_STACKTRACE	= 256,
+ 	HIST_FIELD_FL_LOG2		= 512,
++	HIST_FIELD_FL_TIMESTAMP		= 1024,
+ };
+ 
+ struct hist_trigger_attrs {
+@@ -159,6 +166,7 @@ struct hist_trigger_data {
+ 	struct trace_event_file		*event_file;
+ 	struct hist_trigger_attrs	*attrs;
+ 	struct tracing_map		*map;
++	bool				enable_timestamps;
+ };
+ 
+ static const char *hist_field_name(struct hist_field *field,
+@@ -173,6 +181,8 @@ static const char *hist_field_name(struc
+ 		field_name = field->field->name;
+ 	else if (field->flags & HIST_FIELD_FL_LOG2)
+ 		field_name = hist_field_name(field->operands[0], ++level);
++	else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
++		field_name = "$common_timestamp";
+ 
+ 	if (field_name == NULL)
+ 		field_name = "";
+@@ -435,6 +445,12 @@ static struct hist_field *create_hist_fi
+ 		goto out;
+ 	}
+ 
++	if (flags & HIST_FIELD_FL_TIMESTAMP) {
++		hist_field->fn = hist_field_timestamp;
++		hist_field->size = sizeof(u64);
++		goto out;
++	}
++
+ 	if (WARN_ON_ONCE(!field))
+ 		goto out;
+ 
+@@ -512,10 +528,15 @@ static int create_val_field(struct hist_
+ 		}
+ 	}
+ 
+-	field = trace_find_event_field(file->event_call, field_name);
+-	if (!field) {
+-		ret = -EINVAL;
+-		goto out;
++	if (strcmp(field_name, "$common_timestamp") == 0) {
++		flags |= HIST_FIELD_FL_TIMESTAMP;
++		hist_data->enable_timestamps = true;
++	} else {
++		field = trace_find_event_field(file->event_call, field_name);
++		if (!field) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 	}
+ 
+ 	hist_data->fields[val_idx] = create_hist_field(field, flags);
+@@ -610,16 +631,22 @@ static int create_key_field(struct hist_
+ 			}
+ 		}
+ 
+-		field = trace_find_event_field(file->event_call, field_name);
+-		if (!field) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
++		if (strcmp(field_name, "$common_timestamp") == 0) {
++			flags |= HIST_FIELD_FL_TIMESTAMP;
++			hist_data->enable_timestamps = true;
++			key_size = sizeof(u64);
++		} else {
++			field = trace_find_event_field(file->event_call, field_name);
++			if (!field) {
++				ret = -EINVAL;
++				goto out;
++			}
+ 
+-		if (is_string_field(field))
+-			key_size = MAX_FILTER_STR_VAL;
+-		else
+-			key_size = field->size;
++			if (is_string_field(field))
++				key_size = MAX_FILTER_STR_VAL;
++			else
++				key_size = field->size;
++		}
+ 	}
+ 
+ 	hist_data->fields[key_idx] = create_hist_field(field, flags);
+@@ -756,7 +783,7 @@ static int create_sort_keys(struct hist_
+ 			break;
+ 		}
+ 
+-		if (strcmp(field_name, "hitcount") == 0) {
++		if ((strcmp(field_name, "hitcount") == 0)) {
+ 			descending = is_descending(field_str);
+ 			if (descending < 0) {
+ 				ret = descending;
+@@ -816,6 +843,9 @@ static int create_tracing_map_fields(str
+ 
+ 			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
+ 				cmp_fn = tracing_map_cmp_none;
++			else if (!field)
++				cmp_fn = tracing_map_cmp_num(hist_field->size,
++							     hist_field->is_signed);
+ 			else if (is_string_field(field))
+ 				cmp_fn = tracing_map_cmp_string;
+ 			else
+@@ -1213,7 +1243,11 @@ static void hist_field_print(struct seq_
+ {
+ 	const char *field_name = hist_field_name(hist_field, 0);
+ 
+-	seq_printf(m, "%s", field_name);
++	if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
++		seq_puts(m, "$common_timestamp");
++	else if (field_name)
++		seq_printf(m, "%s", field_name);
++
+ 	if (hist_field->flags) {
+ 		const char *flags_str = get_hist_field_flags(hist_field);
+ 
+@@ -1264,27 +1298,25 @@ static int event_hist_trigger_print(stru
+ 
+ 	for (i = 0; i < hist_data->n_sort_keys; i++) {
+ 		struct tracing_map_sort_key *sort_key;
++		unsigned int idx;
+ 
+ 		sort_key = &hist_data->sort_keys[i];
++		idx = sort_key->field_idx;
++
++		if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
++			return -EINVAL;
+ 
+ 		if (i > 0)
+ 			seq_puts(m, ",");
+ 
+-		if (sort_key->field_idx == HITCOUNT_IDX)
++		if (idx == HITCOUNT_IDX)
+ 			seq_puts(m, "hitcount");
+-		else {
+-			unsigned int idx = sort_key->field_idx;
+-
+-			if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
+-				return -EINVAL;
+-
++		else
+ 			hist_field_print(m, hist_data->fields[idx]);
+-		}
+ 
+ 		if (sort_key->descending)
+ 			seq_puts(m, ".descending");
+ 	}
+-
+ 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
+ 
+ 	if (data->filter_str)
+@@ -1452,6 +1484,10 @@ static bool hist_trigger_match(struct ev
+ 			return false;
+ 		if (key_field->offset != key_field_test->offset)
+ 			return false;
++		if (key_field->size != key_field_test->size)
++			return false;
++		if (key_field->is_signed != key_field_test->is_signed)
++			return false;
+ 	}
+ 
+ 	for (i = 0; i < hist_data->n_sort_keys; i++) {
+@@ -1534,6 +1570,9 @@ static int hist_register_trigger(char *g
+ 
+ 	update_cond_flag(file);
+ 
++	if (hist_data->enable_timestamps)
++		tracing_set_time_stamp_abs(file->tr, true);
++
+ 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
+ 		list_del_rcu(&data->list);
+ 		update_cond_flag(file);
+@@ -1568,6 +1607,9 @@ static void hist_unregister_trigger(char
+ 
+ 	if (unregistered && test->ops->free)
+ 		test->ops->free(test->ops, test);
++
++	if (hist_data->enable_timestamps)
++		tracing_set_time_stamp_abs(file->tr, false);
+ }
+ 
+ static void hist_unreg_all(struct trace_event_file *file)
diff --git a/debian/patches/features/all/rt/0013-crypto-N2-Replace-racy-task-affinity-logic.patch b/debian/patches/features/all/rt/0013-crypto-N2-Replace-racy-task-affinity-logic.patch
index 794f273..a68042c 100644
--- a/debian/patches/features/all/rt/0013-crypto-N2-Replace-racy-task-affinity-logic.patch
+++ b/debian/patches/features/all/rt/0013-crypto-N2-Replace-racy-task-affinity-logic.patch
@@ -1,8 +1,7 @@
-From 73810a069120aa831debb4d967310ab900f628ad Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 13 Apr 2017 10:20:23 +0200
 Subject: [PATCH 13/13] crypto: N2 - Replace racy task affinity logic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 spu_queue_register() needs to invoke setup functions on a particular
 CPU. This is achieved by temporarily setting the affinity of the
diff --git a/debian/patches/features/all/rt/0013-extable-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0013-extable-Adjust-system_state-checks.patch
index 562d63a..0d740f9 100644
--- a/debian/patches/features/all/rt/0013-extable-Adjust-system_state-checks.patch
+++ b/debian/patches/features/all/rt/0013-extable-Adjust-system_state-checks.patch
@@ -1,8 +1,7 @@
-From 0594729c24d846889408a07057b5cc9e8d931419 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:44 +0200
 Subject: [PATCH 13/17] extable: Adjust system_state checks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch b/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
index 9b5c128..988b58b 100644
--- a/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
+++ b/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 22 Mar 2017 11:36:00 +0100
 Subject: [PATCH] futex: Drop hb->lock before enqueueing on the rtmutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit 56222b212e8edb1cf51f5dd73ff645809b082b40
 
diff --git a/debian/patches/features/all/rt/0013-tracing-Add-per-element-variable-support-to-tracing_.patch b/debian/patches/features/all/rt/0013-tracing-Add-per-element-variable-support-to-tracing_.patch
new file mode 100644
index 0000000..d94d990
--- /dev/null
+++ b/debian/patches/features/all/rt/0013-tracing-Add-per-element-variable-support-to-tracing_.patch
@@ -0,0 +1,233 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:14 -0500
+Subject: [PATCH 13/32] tracing: Add per-element variable support to
+ tracing_map
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+In order to allow information to be passed between trace events, add
+support for per-element variables to tracing_map.  This provides a
+means for histograms to associate a value or values with an entry when
+it's saved or updated, and retrieved by a subsequent event occurrences.
+
+Variables can be set using tracing_map_set_var() and read using
+tracing_map_read_var().  tracing_map_var_set() returns true or false
+depending on whether or not the variable has been set or not, which is
+important for event-matching applications.
+
+tracing_map_read_var_once() reads the variable and resets it to the
+'unset' state, implementing read-once variables, which are also
+important for event-matching uses.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/tracing_map.c |  113 +++++++++++++++++++++++++++++++++++++++++++++
+ kernel/trace/tracing_map.h |   11 ++++
+ 2 files changed, 124 insertions(+)
+
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -66,6 +66,73 @@ u64 tracing_map_read_sum(struct tracing_
+ 	return (u64)atomic64_read(&elt->fields[i].sum);
+ }
+ 
++/**
++ * tracing_map_set_var - Assign a tracing_map_elt's variable field
++ * @elt: The tracing_map_elt
++ * @i: The index of the given variable associated with the tracing_map_elt
++ * @n: The value to assign
++ *
++ * Assign n to variable i associated with the specified tracing_map_elt
++ * instance.  The index i is the index returned by the call to
++ * tracing_map_add_var() when the tracing map was set up.
++ */
++void tracing_map_set_var(struct tracing_map_elt *elt, unsigned int i, u64 n)
++{
++	atomic64_set(&elt->vars[i], n);
++	elt->var_set[i] = true;
++}
++
++/**
++ * tracing_map_var_set - Return whether or not a variable has been set
++ * @elt: The tracing_map_elt
++ * @i: The index of the given variable associated with the tracing_map_elt
++ *
++ * Return true if the variable has been set, false otherwise.  The
++ * index i is the index returned by the call to tracing_map_add_var()
++ * when the tracing map was set up.
++ */
++bool tracing_map_var_set(struct tracing_map_elt *elt, unsigned int i)
++{
++	return elt->var_set[i];
++}
++
++/**
++ * tracing_map_read_var - Return the value of a tracing_map_elt's variable field
++ * @elt: The tracing_map_elt
++ * @i: The index of the given variable associated with the tracing_map_elt
++ *
++ * Retrieve the value of the variable i associated with the specified
++ * tracing_map_elt instance.  The index i is the index returned by the
++ * call to tracing_map_add_var() when the tracing map was set
++ * up.
++ *
++ * Return: The variable value associated with field i for elt.
++ */
++u64 tracing_map_read_var(struct tracing_map_elt *elt, unsigned int i)
++{
++	return (u64)atomic64_read(&elt->vars[i]);
++}
++
++/**
++ * tracing_map_read_var_once - Return and reset a tracing_map_elt's variable field
++ * @elt: The tracing_map_elt
++ * @i: The index of the given variable associated with the tracing_map_elt
++ *
++ * Retrieve the value of the variable i associated with the specified
++ * tracing_map_elt instance, and reset the variable to the 'not set'
++ * state.  The index i is the index returned by the call to
++ * tracing_map_add_var() when the tracing map was set up.  The reset
++ * essentially makes the variable a read-once variable if it's only
++ * accessed using this function.
++ *
++ * Return: The variable value associated with field i for elt.
++ */
++u64 tracing_map_read_var_once(struct tracing_map_elt *elt, unsigned int i)
++{
++	elt->var_set[i] = false;
++	return (u64)atomic64_read(&elt->vars[i]);
++}
++
+ int tracing_map_cmp_string(void *val_a, void *val_b)
+ {
+ 	char *a = val_a;
+@@ -171,6 +238,28 @@ int tracing_map_add_sum_field(struct tra
+ }
+ 
+ /**
++ * tracing_map_add_var - Add a field describing a tracing_map var
++ * @map: The tracing_map
++ *
++ * Add a var to the map and return the index identifying it in the map
++ * and associated tracing_map_elts.  This is the index used for
++ * instance to update a var for a particular tracing_map_elt using
++ * tracing_map_update_var() or reading it via tracing_map_read_var().
++ *
++ * Return: The index identifying the var in the map and associated
++ * tracing_map_elts, or -EINVAL on error.
++ */
++int tracing_map_add_var(struct tracing_map *map)
++{
++	int ret = -EINVAL;
++
++	if (map->n_vars < TRACING_MAP_VARS_MAX)
++		ret = map->n_vars++;
++
++	return ret;
++}
++
++/**
+  * tracing_map_add_key_field - Add a field describing a tracing_map key
+  * @map: The tracing_map
+  * @offset: The offset within the key
+@@ -277,6 +366,11 @@ static void tracing_map_elt_clear(struct
+ 		if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
+ 			atomic64_set(&elt->fields[i].sum, 0);
+ 
++	for (i = 0; i < elt->map->n_vars; i++) {
++		atomic64_set(&elt->vars[i], 0);
++		elt->var_set[i] = false;
++	}
++
+ 	if (elt->map->ops && elt->map->ops->elt_clear)
+ 		elt->map->ops->elt_clear(elt);
+ }
+@@ -303,6 +397,8 @@ static void tracing_map_elt_free(struct
+ 	if (elt->map->ops && elt->map->ops->elt_free)
+ 		elt->map->ops->elt_free(elt);
+ 	kfree(elt->fields);
++	kfree(elt->vars);
++	kfree(elt->var_set);
+ 	kfree(elt->key);
+ 	kfree(elt);
+ }
+@@ -330,6 +426,18 @@ static struct tracing_map_elt *tracing_m
+ 		goto free;
+ 	}
+ 
++	elt->vars = kcalloc(map->n_vars, sizeof(*elt->vars), GFP_KERNEL);
++	if (!elt->vars) {
++		err = -ENOMEM;
++		goto free;
++	}
++
++	elt->var_set = kcalloc(map->n_vars, sizeof(*elt->var_set), GFP_KERNEL);
++	if (!elt->var_set) {
++		err = -ENOMEM;
++		goto free;
++	}
++
+ 	tracing_map_elt_init_fields(elt);
+ 
+ 	if (map->ops && map->ops->elt_alloc) {
+@@ -833,6 +941,11 @@ static struct tracing_map_elt *copy_elt(
+ 		dup_elt->fields[i].cmp_fn = elt->fields[i].cmp_fn;
+ 	}
+ 
++	for (i = 0; i < elt->map->n_vars; i++) {
++		atomic64_set(&dup_elt->vars[i], atomic64_read(&elt->vars[i]));
++		dup_elt->var_set[i] = elt->var_set[i];
++	}
++
+ 	return dup_elt;
+ }
+ 
+--- a/kernel/trace/tracing_map.h
++++ b/kernel/trace/tracing_map.h
+@@ -9,6 +9,7 @@
+ #define TRACING_MAP_VALS_MAX		3
+ #define TRACING_MAP_FIELDS_MAX		(TRACING_MAP_KEYS_MAX + \
+ 					 TRACING_MAP_VALS_MAX)
++#define TRACING_MAP_VARS_MAX		16
+ #define TRACING_MAP_SORT_KEYS_MAX	2
+ 
+ typedef int (*tracing_map_cmp_fn_t) (void *val_a, void *val_b);
+@@ -136,6 +137,8 @@ struct tracing_map_field {
+ struct tracing_map_elt {
+ 	struct tracing_map		*map;
+ 	struct tracing_map_field	*fields;
++	atomic64_t			*vars;
++	bool				*var_set;
+ 	void				*key;
+ 	void				*private_data;
+ };
+@@ -191,6 +194,7 @@ struct tracing_map {
+ 	int				key_idx[TRACING_MAP_KEYS_MAX];
+ 	unsigned int			n_keys;
+ 	struct tracing_map_sort_key	sort_key;
++	unsigned int			n_vars;
+ 	atomic64_t			hits;
+ 	atomic64_t			drops;
+ };
+@@ -247,6 +251,7 @@ tracing_map_create(unsigned int map_bits
+ extern int tracing_map_init(struct tracing_map *map);
+ 
+ extern int tracing_map_add_sum_field(struct tracing_map *map);
++extern int tracing_map_add_var(struct tracing_map *map);
+ extern int tracing_map_add_key_field(struct tracing_map *map,
+ 				     unsigned int offset,
+ 				     tracing_map_cmp_fn_t cmp_fn);
+@@ -266,7 +271,13 @@ extern int tracing_map_cmp_none(void *va
+ 
+ extern void tracing_map_update_sum(struct tracing_map_elt *elt,
+ 				   unsigned int i, u64 n);
++extern void tracing_map_set_var(struct tracing_map_elt *elt,
++				unsigned int i, u64 n);
++extern bool tracing_map_var_set(struct tracing_map_elt *elt, unsigned int i);
+ extern u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i);
++extern u64 tracing_map_read_var(struct tracing_map_elt *elt, unsigned int i);
++extern u64 tracing_map_read_var_once(struct tracing_map_elt *elt, unsigned int i);
++
+ extern void tracing_map_set_field_descr(struct tracing_map *map,
+ 					unsigned int i,
+ 					unsigned int key_offset,
diff --git a/debian/patches/features/all/rt/0014-printk-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0014-printk-Adjust-system_state-checks.patch
index 8cbedb7..eb8d9df 100644
--- a/debian/patches/features/all/rt/0014-printk-Adjust-system_state-checks.patch
+++ b/debian/patches/features/all/rt/0014-printk-Adjust-system_state-checks.patch
@@ -1,8 +1,7 @@
-From ff48cd26fc4889b9deb5f9333d3c61746e450b7f Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:45 +0200
 Subject: [PATCH 14/17] printk: Adjust system_state checks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0014-tracing-Add-hist_data-member-to-hist_field.patch b/debian/patches/features/all/rt/0014-tracing-Add-hist_data-member-to-hist_field.patch
new file mode 100644
index 0000000..01570c9
--- /dev/null
+++ b/debian/patches/features/all/rt/0014-tracing-Add-hist_data-member-to-hist_field.patch
@@ -0,0 +1,79 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:15 -0500
+Subject: [PATCH 14/32] tracing: Add hist_data member to hist_field
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Allow hist_data access via hist_field.  Some users of hist_fields
+require or will require more access to the associated hist_data.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |   14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -39,6 +39,7 @@ struct hist_field {
+ 	unsigned int			offset;
+ 	unsigned int                    is_signed;
+ 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
++	struct hist_trigger_data	*hist_data;
+ };
+ 
+ static u64 hist_field_none(struct hist_field *field, void *event,
+@@ -415,7 +416,8 @@ static void destroy_hist_field(struct hi
+ 	kfree(hist_field);
+ }
+ 
+-static struct hist_field *create_hist_field(struct ftrace_event_field *field,
++static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
++					    struct ftrace_event_field *field,
+ 					    unsigned long flags)
+ {
+ 	struct hist_field *hist_field;
+@@ -427,6 +429,8 @@ static struct hist_field *create_hist_fi
+ 	if (!hist_field)
+ 		return NULL;
+ 
++	hist_field->hist_data = hist_data;
++
+ 	if (flags & HIST_FIELD_FL_HITCOUNT) {
+ 		hist_field->fn = hist_field_counter;
+ 		goto out;
+@@ -440,7 +444,7 @@ static struct hist_field *create_hist_fi
+ 	if (flags & HIST_FIELD_FL_LOG2) {
+ 		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
+ 		hist_field->fn = hist_field_log2;
+-		hist_field->operands[0] = create_hist_field(field, fl);
++		hist_field->operands[0] = create_hist_field(hist_data, field, fl);
+ 		hist_field->size = hist_field->operands[0]->size;
+ 		goto out;
+ 	}
+@@ -493,7 +497,7 @@ static void destroy_hist_fields(struct h
+ static int create_hitcount_val(struct hist_trigger_data *hist_data)
+ {
+ 	hist_data->fields[HITCOUNT_IDX] =
+-		create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
++		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT);
+ 	if (!hist_data->fields[HITCOUNT_IDX])
+ 		return -ENOMEM;
+ 
+@@ -539,7 +543,7 @@ static int create_val_field(struct hist_
+ 		}
+ 	}
+ 
+-	hist_data->fields[val_idx] = create_hist_field(field, flags);
++	hist_data->fields[val_idx] = create_hist_field(hist_data, field, flags);
+ 	if (!hist_data->fields[val_idx]) {
+ 		ret = -ENOMEM;
+ 		goto out;
+@@ -649,7 +653,7 @@ static int create_key_field(struct hist_
+ 		}
+ 	}
+ 
+-	hist_data->fields[key_idx] = create_hist_field(field, flags);
++	hist_data->fields[key_idx] = create_hist_field(hist_data, field, flags);
+ 	if (!hist_data->fields[key_idx]) {
+ 		ret = -ENOMEM;
+ 		goto out;
diff --git a/debian/patches/features/all/rt/0015-mm-vmscan-Adjust-system_state-checks.patch b/debian/patches/features/all/rt/0015-mm-vmscan-Adjust-system_state-checks.patch
index 80cf671..d09586e 100644
--- a/debian/patches/features/all/rt/0015-mm-vmscan-Adjust-system_state-checks.patch
+++ b/debian/patches/features/all/rt/0015-mm-vmscan-Adjust-system_state-checks.patch
@@ -1,8 +1,7 @@
-From c6202adf3a0969514299cf10ff07376a84ad09bb Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:46 +0200
 Subject: [PATCH 15/17] mm/vmscan: Adjust system_state checks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To enable smp_processor_id() and might_sleep() debug checks earlier, it's
 required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
diff --git a/debian/patches/features/all/rt/0015-tracing-Add-usecs-modifier-for-hist-trigger-timestam.patch b/debian/patches/features/all/rt/0015-tracing-Add-usecs-modifier-for-hist-trigger-timestam.patch
new file mode 100644
index 0000000..efe1b7b
--- /dev/null
+++ b/debian/patches/features/all/rt/0015-tracing-Add-usecs-modifier-for-hist-trigger-timestam.patch
@@ -0,0 +1,131 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:16 -0500
+Subject: [PATCH 15/32] tracing: Add usecs modifier for hist trigger timestamps
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Appending .usecs onto a common_timestamp field will cause the
+timestamp value to be in microseconds instead of the default
+nanoseconds.  A typical latency histogram using usecs would look like
+this:
+
+   # echo 'hist:keys=pid,prio:ts0=$common_timestamp.usecs ...
+   # echo 'hist:keys=next_pid:wakeup_lat=$common_timestamp.usecs-$ts0 ...
+
+This also adds an external trace_clock_in_ns() to trace.c for the
+timestamp conversion.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace.c             |    8 ++++++++
+ kernel/trace/trace.h             |    2 ++
+ kernel/trace/trace_events_hist.c |   28 ++++++++++++++++++++++------
+ 3 files changed, 32 insertions(+), 6 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1164,6 +1164,14 @@ static struct {
+ 	ARCH_TRACE_CLOCKS
+ };
+ 
++bool trace_clock_in_ns(struct trace_array *tr)
++{
++	if (trace_clocks[tr->clock_id].in_ns)
++		return true;
++
++	return false;
++}
++
+ /*
+  * trace_parser_get_init - gets the buffer for trace parser
+  */
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -280,6 +280,8 @@ extern void trace_array_put(struct trace
+ 
+ extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
+ 
++extern bool trace_clock_in_ns(struct trace_array *tr);
++
+ /*
+  * The global tracer (top) should be the first trace array added,
+  * but we check the flag anyway.
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -90,12 +90,6 @@ static u64 hist_field_log2(struct hist_f
+ 	return (u64) ilog2(roundup_pow_of_two(val));
+ }
+ 
+-static u64 hist_field_timestamp(struct hist_field *hist_field, void *event,
+-				struct ring_buffer_event *rbe)
+-{
+-	return ring_buffer_event_time_stamp(rbe);
+-}
+-
+ #define DEFINE_HIST_FIELD_FN(type)					\
+ 	static u64 hist_field_##type(struct hist_field *hist_field,	\
+ 				     void *event,			\
+@@ -143,6 +137,7 @@ enum hist_field_flags {
+ 	HIST_FIELD_FL_STACKTRACE	= 256,
+ 	HIST_FIELD_FL_LOG2		= 512,
+ 	HIST_FIELD_FL_TIMESTAMP		= 1024,
++	HIST_FIELD_FL_TIMESTAMP_USECS	= 2048,
+ };
+ 
+ struct hist_trigger_attrs {
+@@ -153,6 +148,7 @@ struct hist_trigger_attrs {
+ 	bool		pause;
+ 	bool		cont;
+ 	bool		clear;
++	bool		ts_in_usecs;
+ 	unsigned int	map_bits;
+ };
+ 
+@@ -170,6 +166,20 @@ struct hist_trigger_data {
+ 	bool				enable_timestamps;
+ };
+ 
++static u64 hist_field_timestamp(struct hist_field *hist_field, void *event,
++				struct ring_buffer_event *rbe)
++{
++	struct hist_trigger_data *hist_data = hist_field->hist_data;
++	struct trace_array *tr = hist_data->event_file->tr;
++
++	u64 ts = ring_buffer_event_time_stamp(rbe);
++
++	if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
++		ts = ns2usecs(ts);
++
++	return ts;
++}
++
+ static const char *hist_field_name(struct hist_field *field,
+ 				   unsigned int level)
+ {
+@@ -629,6 +639,8 @@ static int create_key_field(struct hist_
+ 				flags |= HIST_FIELD_FL_SYSCALL;
+ 			else if (strcmp(field_str, "log2") == 0)
+ 				flags |= HIST_FIELD_FL_LOG2;
++			else if (strcmp(field_str, "usecs") == 0)
++				flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
+ 			else {
+ 				ret = -EINVAL;
+ 				goto out;
+@@ -638,6 +650,8 @@ static int create_key_field(struct hist_
+ 		if (strcmp(field_name, "$common_timestamp") == 0) {
+ 			flags |= HIST_FIELD_FL_TIMESTAMP;
+ 			hist_data->enable_timestamps = true;
++			if (flags & HIST_FIELD_FL_TIMESTAMP_USECS)
++				hist_data->attrs->ts_in_usecs = true;
+ 			key_size = sizeof(u64);
+ 		} else {
+ 			field = trace_find_event_field(file->event_call, field_name);
+@@ -1239,6 +1253,8 @@ static const char *get_hist_field_flags(
+ 		flags_str = "syscall";
+ 	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
+ 		flags_str = "log2";
++	else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
++		flags_str = "usecs";
+ 
+ 	return flags_str;
+ }
diff --git a/debian/patches/features/all/rt/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch b/debian/patches/features/all/rt/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch
index 51e443a..ab1774c 100644
--- a/debian/patches/features/all/rt/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch
+++ b/debian/patches/features/all/rt/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch
@@ -1,8 +1,7 @@
-From 69a78ff226fe0241ab6cb9dd961667be477e3cf7 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:47 +0200
 Subject: [PATCH 16/17] init: Introduce SYSTEM_SCHEDULING state
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 might_sleep() debugging and smp_processor_id() debugging should be active
 right after the scheduler starts working. The init task can invoke
diff --git a/debian/patches/features/all/rt/0016-tracing-Add-variable-support-to-hist-triggers.patch b/debian/patches/features/all/rt/0016-tracing-Add-variable-support-to-hist-triggers.patch
new file mode 100644
index 0000000..59392d4
--- /dev/null
+++ b/debian/patches/features/all/rt/0016-tracing-Add-variable-support-to-hist-triggers.patch
@@ -0,0 +1,692 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:17 -0500
+Subject: [PATCH 16/32] tracing: Add variable support to hist triggers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add support for saving the value of a current event's event field by
+assigning it to a variable that can be read by a subsequent event.
+
+The basic syntax for saving a variable is to simply prefix a unique
+variable name not corresponding to any keyword along with an '=' sign
+to any event field.
+
+Both keys and values can be saved and retrieved in this way:
+
+    # echo 'hist:keys=next_pid:vals=ts0=common_timestamp ...
+    # echo 'hist:key=timer_pid=common_pid ...'
+
+If a variable isn't a key variable or prefixed with 'vals=', the
+associated event field will be saved in a variable but won't be summed
+as a value:
+
+    # echo 'hist:keys=next_pid:ts1=common_timestamp:...
+
+Multiple variables can be assigned at the same time:
+
+    # echo 'hist:keys=pid:vals=ts0=common_timestamp,b=field1,field2 ...
+
+Multiple (or single) variables can also be assigned at the same time
+using separate assignments:
+
+    # echo 'hist:keys=pid:vals=ts0=common_timestamp:b=field1:c=field2 ...
+
+Variables set as above can be used by being referenced from another
+event, as described in a subsequent patch.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |  299 ++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 264 insertions(+), 35 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -30,6 +30,13 @@ typedef u64 (*hist_field_fn_t) (struct h
+ 				struct ring_buffer_event *rbe);
+ 
+ #define HIST_FIELD_OPERANDS_MAX	2
++#define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
++
++struct hist_var {
++	char				*name;
++	struct hist_trigger_data	*hist_data;
++	unsigned int			idx;
++};
+ 
+ struct hist_field {
+ 	struct ftrace_event_field	*field;
+@@ -40,6 +47,7 @@ struct hist_field {
+ 	unsigned int                    is_signed;
+ 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
+ 	struct hist_trigger_data	*hist_data;
++	struct hist_var			var;
+ };
+ 
+ static u64 hist_field_none(struct hist_field *field, void *event,
+@@ -138,6 +146,8 @@ enum hist_field_flags {
+ 	HIST_FIELD_FL_LOG2		= 512,
+ 	HIST_FIELD_FL_TIMESTAMP		= 1024,
+ 	HIST_FIELD_FL_TIMESTAMP_USECS	= 2048,
++	HIST_FIELD_FL_VAR		= 4096,
++	HIST_FIELD_FL_VAR_ONLY		= 8192,
+ };
+ 
+ struct hist_trigger_attrs {
+@@ -150,13 +160,18 @@ struct hist_trigger_attrs {
+ 	bool		clear;
+ 	bool		ts_in_usecs;
+ 	unsigned int	map_bits;
++
++	char		*assignment_str[TRACING_MAP_VARS_MAX];
++	unsigned int	n_assignments;
+ };
+ 
+ struct hist_trigger_data {
+-	struct hist_field               *fields[TRACING_MAP_FIELDS_MAX];
++	struct hist_field               *fields[HIST_FIELDS_MAX];
+ 	unsigned int			n_vals;
+ 	unsigned int			n_keys;
+ 	unsigned int			n_fields;
++	unsigned int			n_vars;
++	unsigned int			n_var_only;
+ 	unsigned int			key_size;
+ 	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
+ 	unsigned int			n_sort_keys;
+@@ -164,6 +179,7 @@ struct hist_trigger_data {
+ 	struct hist_trigger_attrs	*attrs;
+ 	struct tracing_map		*map;
+ 	bool				enable_timestamps;
++	bool				remove;
+ };
+ 
+ static u64 hist_field_timestamp(struct hist_field *hist_field, void *event,
+@@ -262,9 +278,14 @@ static int parse_map_size(char *str)
+ 
+ static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
+ {
++	unsigned int i;
++
+ 	if (!attrs)
+ 		return;
+ 
++	for (i = 0; i < attrs->n_assignments; i++)
++		kfree(attrs->assignment_str[i]);
++
+ 	kfree(attrs->name);
+ 	kfree(attrs->sort_key_str);
+ 	kfree(attrs->keys_str);
+@@ -295,8 +316,22 @@ static int parse_assignment(char *str, s
+ 			goto out;
+ 		}
+ 		attrs->map_bits = map_bits;
+-	} else
+-		ret = -EINVAL;
++	} else {
++		char *assignment;
++
++		if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		assignment = kstrdup(str, GFP_KERNEL);
++		if (!assignment) {
++			ret = -ENOMEM;
++			goto out;
++		}
++
++		attrs->assignment_str[attrs->n_assignments++] = assignment;
++	}
+  out:
+ 	return ret;
+ }
+@@ -423,12 +458,15 @@ static void destroy_hist_field(struct hi
+ 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
+ 		destroy_hist_field(hist_field->operands[i], ++level);
+ 
++	kfree(hist_field->var.name);
++
+ 	kfree(hist_field);
+ }
+ 
+ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
+ 					    struct ftrace_event_field *field,
+-					    unsigned long flags)
++					    unsigned long flags,
++					    char *var_name)
+ {
+ 	struct hist_field *hist_field;
+ 
+@@ -454,7 +492,7 @@ static struct hist_field *create_hist_fi
+ 	if (flags & HIST_FIELD_FL_LOG2) {
+ 		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
+ 		hist_field->fn = hist_field_log2;
+-		hist_field->operands[0] = create_hist_field(hist_data, field, fl);
++		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
+ 		hist_field->size = hist_field->operands[0]->size;
+ 		goto out;
+ 	}
+@@ -489,14 +527,23 @@ static struct hist_field *create_hist_fi
+ 	hist_field->field = field;
+ 	hist_field->flags = flags;
+ 
++	if (var_name) {
++		hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
++		if (!hist_field->var.name)
++			goto free;
++	}
++
+ 	return hist_field;
++ free:
++	destroy_hist_field(hist_field, 0);
++	return NULL;
+ }
+ 
+ static void destroy_hist_fields(struct hist_trigger_data *hist_data)
+ {
+ 	unsigned int i;
+ 
+-	for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
++	for (i = 0; i < HIST_FIELDS_MAX; i++) {
+ 		if (hist_data->fields[i]) {
+ 			destroy_hist_field(hist_data->fields[i], 0);
+ 			hist_data->fields[i] = NULL;
+@@ -507,11 +554,12 @@ static void destroy_hist_fields(struct h
+ static int create_hitcount_val(struct hist_trigger_data *hist_data)
+ {
+ 	hist_data->fields[HITCOUNT_IDX] =
+-		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT);
++		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
+ 	if (!hist_data->fields[HITCOUNT_IDX])
+ 		return -ENOMEM;
+ 
+ 	hist_data->n_vals++;
++	hist_data->n_fields++;
+ 
+ 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
+ 		return -EINVAL;
+@@ -519,19 +567,81 @@ static int create_hitcount_val(struct hi
+ 	return 0;
+ }
+ 
++static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
++					 const char *var_name)
++{
++	struct hist_field *hist_field, *found = NULL;
++	int i;
++
++	for_each_hist_field(i, hist_data) {
++		hist_field = hist_data->fields[i];
++		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
++		    strcmp(hist_field->var.name, var_name) == 0) {
++			found = hist_field;
++			break;
++		}
++	}
++
++	return found;
++}
++
++static struct hist_field *find_var(struct trace_event_file *file,
++				   const char *var_name)
++{
++	struct hist_trigger_data *hist_data;
++	struct event_trigger_data *test;
++	struct hist_field *hist_field;
++
++	list_for_each_entry_rcu(test, &file->triggers, list) {
++		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
++			hist_data = test->private_data;
++			hist_field = find_var_field(hist_data, var_name);
++			if (hist_field)
++				return hist_field;
++		}
++	}
++
++	return NULL;
++}
++
+ static int create_val_field(struct hist_trigger_data *hist_data,
+ 			    unsigned int val_idx,
+ 			    struct trace_event_file *file,
+-			    char *field_str)
++			    char *field_str, bool var_only)
+ {
+ 	struct ftrace_event_field *field = NULL;
++	char *field_name, *var_name;
+ 	unsigned long flags = 0;
+-	char *field_name;
+ 	int ret = 0;
+ 
+-	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
++	if (WARN_ON(!var_only && val_idx >= TRACING_MAP_VALS_MAX))
+ 		return -EINVAL;
+ 
++	var_name = strsep(&field_str, "=");
++	if (field_str && var_name) {
++		if (find_var(file, var_name) &&
++		    !hist_data->remove) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		flags |= HIST_FIELD_FL_VAR;
++		hist_data->n_vars++;
++		if (hist_data->n_vars > TRACING_MAP_VARS_MAX) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		if (var_only)
++			flags |= HIST_FIELD_FL_VAR_ONLY;
++	} else if (!var_only && var_name != NULL && field_str == NULL) {
++		field_str = var_name;
++		var_name = NULL;
++	} else {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	field_name = strsep(&field_str, ".");
+ 	if (field_str) {
+ 		if (strcmp(field_str, "hex") == 0)
+@@ -553,15 +663,19 @@ static int create_val_field(struct hist_
+ 		}
+ 	}
+ 
+-	hist_data->fields[val_idx] = create_hist_field(hist_data, field, flags);
++	hist_data->fields[val_idx] = create_hist_field(hist_data, field, flags, var_name);
+ 	if (!hist_data->fields[val_idx]) {
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+ 
+ 	++hist_data->n_vals;
++	++hist_data->n_fields;
+ 
+-	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
++	if (hist_data->fields[val_idx]->flags & HIST_FIELD_FL_VAR_ONLY)
++		hist_data->n_var_only++;
++
++	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
+ 		ret = -EINVAL;
+  out:
+ 	return ret;
+@@ -571,7 +685,7 @@ static int create_val_fields(struct hist
+ 			     struct trace_event_file *file)
+ {
+ 	char *fields_str, *field_str;
+-	unsigned int i, j;
++	unsigned int i, j = 1;
+ 	int ret;
+ 
+ 	ret = create_hitcount_val(hist_data);
+@@ -591,12 +705,15 @@ static int create_val_fields(struct hist
+ 		field_str = strsep(&fields_str, ",");
+ 		if (!field_str)
+ 			break;
++
+ 		if (strcmp(field_str, "hitcount") == 0)
+ 			continue;
+-		ret = create_val_field(hist_data, j++, file, field_str);
++
++		ret = create_val_field(hist_data, j++, file, field_str, false);
+ 		if (ret)
+ 			goto out;
+ 	}
++
+ 	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
+ 		ret = -EINVAL;
+  out:
+@@ -610,18 +727,32 @@ static int create_key_field(struct hist_
+ 			    char *field_str)
+ {
+ 	struct ftrace_event_field *field = NULL;
++	struct hist_field *hist_field = NULL;
+ 	unsigned long flags = 0;
+ 	unsigned int key_size;
++	char *var_name;
+ 	int ret = 0;
+ 
+-	if (WARN_ON(key_idx >= TRACING_MAP_FIELDS_MAX))
++	if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
+ 		return -EINVAL;
+ 
+ 	flags |= HIST_FIELD_FL_KEY;
+ 
++	var_name = strsep(&field_str, "=");
++	if (field_str) {
++		if (find_var(file, var_name) &&
++		    !hist_data->remove)
++			return -EINVAL;
++		flags |= HIST_FIELD_FL_VAR;
++	} else {
++		field_str = var_name;
++		var_name = NULL;
++	}
++
+ 	if (strcmp(field_str, "stacktrace") == 0) {
+ 		flags |= HIST_FIELD_FL_STACKTRACE;
+ 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
++		hist_field = create_hist_field(hist_data, NULL, flags, var_name);
+ 	} else {
+ 		char *field_name = strsep(&field_str, ".");
+ 
+@@ -667,7 +798,7 @@ static int create_key_field(struct hist_
+ 		}
+ 	}
+ 
+-	hist_data->fields[key_idx] = create_hist_field(hist_data, field, flags);
++	hist_data->fields[key_idx] = create_hist_field(hist_data, field, flags, var_name);
+ 	if (!hist_data->fields[key_idx]) {
+ 		ret = -ENOMEM;
+ 		goto out;
+@@ -683,6 +814,7 @@ static int create_key_field(struct hist_
+ 	}
+ 
+ 	hist_data->n_keys++;
++	hist_data->n_fields++;
+ 
+ 	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
+ 		return -EINVAL;
+@@ -726,6 +858,29 @@ static int create_key_fields(struct hist
+ 	return ret;
+ }
+ 
++static int create_var_fields(struct hist_trigger_data *hist_data,
++			     struct trace_event_file *file)
++{
++	unsigned int i, j, k = hist_data->n_vals;
++	char *str, *field_str;
++	int ret = 0;
++
++	for (i = 0; i < hist_data->attrs->n_assignments; i++) {
++		str = hist_data->attrs->assignment_str[i];
++
++		for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
++			field_str = strsep(&str, ",");
++			if (!field_str)
++				break;
++			ret = create_val_field(hist_data, k++, file, field_str, true);
++			if (ret)
++				goto out;
++		}
++	}
++ out:
++	return ret;
++}
++
+ static int create_hist_fields(struct hist_trigger_data *hist_data,
+ 			      struct trace_event_file *file)
+ {
+@@ -735,11 +890,13 @@ static int create_hist_fields(struct his
+ 	if (ret)
+ 		goto out;
+ 
+-	ret = create_key_fields(hist_data, file);
++	ret = create_var_fields(hist_data, file);
+ 	if (ret)
+ 		goto out;
+ 
+-	hist_data->n_fields = hist_data->n_vals + hist_data->n_keys;
++	ret = create_key_fields(hist_data, file);
++	if (ret)
++		goto out;
+  out:
+ 	return ret;
+ }
+@@ -763,7 +920,7 @@ static int create_sort_keys(struct hist_
+ 	char *fields_str = hist_data->attrs->sort_key_str;
+ 	struct tracing_map_sort_key *sort_key;
+ 	int descending, ret = 0;
+-	unsigned int i, j;
++	unsigned int i, j, k;
+ 
+ 	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
+ 
+@@ -811,13 +968,21 @@ static int create_sort_keys(struct hist_
+ 			continue;
+ 		}
+ 
+-		for (j = 1; j < hist_data->n_fields; j++) {
++		for (j = 1, k = 1; j < hist_data->n_fields; j++) {
++			unsigned idx;
++
+ 			hist_field = hist_data->fields[j];
++			if (hist_field->flags & HIST_FIELD_FL_VAR_ONLY)
++				continue;
++
++			idx = k++;
++
+ 			test_name = hist_field_name(hist_field, 0);
++
+ 			if (test_name == NULL)
+ 				continue;
+ 			if (strcmp(field_name, test_name) == 0) {
+-				sort_key->field_idx = j;
++				sort_key->field_idx = idx;
+ 				descending = is_descending(field_str);
+ 				if (descending < 0) {
+ 					ret = descending;
+@@ -832,6 +997,7 @@ static int create_sort_keys(struct hist_
+ 			break;
+ 		}
+ 	}
++
+ 	hist_data->n_sort_keys = i;
+  out:
+ 	return ret;
+@@ -872,12 +1038,19 @@ static int create_tracing_map_fields(str
+ 			idx = tracing_map_add_key_field(map,
+ 							hist_field->offset,
+ 							cmp_fn);
+-
+-		} else
++		} else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
+ 			idx = tracing_map_add_sum_field(map);
+ 
+ 		if (idx < 0)
+ 			return idx;
++
++		if (hist_field->flags & HIST_FIELD_FL_VAR) {
++			idx = tracing_map_add_var(map);
++			if (idx < 0)
++				return idx;
++			hist_field->var.idx = idx;
++			hist_field->var.hist_data = hist_data;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -901,7 +1074,8 @@ static bool need_tracing_map_ops(struct
+ static struct hist_trigger_data *
+ create_hist_data(unsigned int map_bits,
+ 		 struct hist_trigger_attrs *attrs,
+-		 struct trace_event_file *file)
++		 struct trace_event_file *file,
++		 bool remove)
+ {
+ 	const struct tracing_map_ops *map_ops = NULL;
+ 	struct hist_trigger_data *hist_data;
+@@ -912,6 +1086,7 @@ create_hist_data(unsigned int map_bits,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	hist_data->attrs = attrs;
++	hist_data->remove = remove;
+ 
+ 	ret = create_hist_fields(hist_data, file);
+ 	if (ret)
+@@ -958,14 +1133,29 @@ static void hist_trigger_elt_update(stru
+ 				    struct ring_buffer_event *rbe)
+ {
+ 	struct hist_field *hist_field;
+-	unsigned int i;
++	unsigned int i, var_idx;
+ 	u64 hist_val;
+ 
+ 	for_each_hist_val_field(i, hist_data) {
+ 		hist_field = hist_data->fields[i];
+-		hist_val = hist_field->fn(hist_field, rec, rbe);
++		hist_val = hist_field->fn(hist_field, rbe, rec);
++		if (hist_field->flags & HIST_FIELD_FL_VAR) {
++			var_idx = hist_field->var.idx;
++			tracing_map_set_var(elt, var_idx, hist_val);
++			if (hist_field->flags & HIST_FIELD_FL_VAR_ONLY)
++				continue;
++		}
+ 		tracing_map_update_sum(elt, i, hist_val);
+ 	}
++
++	for_each_hist_key_field(i, hist_data) {
++		hist_field = hist_data->fields[i];
++		if (hist_field->flags & HIST_FIELD_FL_VAR) {
++			hist_val = hist_field->fn(hist_field, rbe, rec);
++			var_idx = hist_field->var.idx;
++			tracing_map_set_var(elt, var_idx, hist_val);
++		}
++	}
+ }
+ 
+ static inline void add_to_key(char *compound_key, void *key,
+@@ -1140,6 +1330,9 @@ hist_trigger_entry_print(struct seq_file
+ 	for (i = 1; i < hist_data->n_vals; i++) {
+ 		field_name = hist_field_name(hist_data->fields[i], 0);
+ 
++		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR)
++			continue;
++
+ 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
+ 			seq_printf(m, "  %s: %10llx", field_name,
+ 				   tracing_map_read_sum(elt, i));
+@@ -1263,6 +1456,9 @@ static void hist_field_print(struct seq_
+ {
+ 	const char *field_name = hist_field_name(hist_field, 0);
+ 
++	if (hist_field->var.name)
++		seq_printf(m, "%s=", hist_field->var.name);
++
+ 	if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
+ 		seq_puts(m, "$common_timestamp");
+ 	else if (field_name)
+@@ -1281,7 +1477,8 @@ static int event_hist_trigger_print(stru
+ 				    struct event_trigger_data *data)
+ {
+ 	struct hist_trigger_data *hist_data = data->private_data;
+-	struct hist_field *key_field;
++	bool have_var_only = false;
++	struct hist_field *field;
+ 	unsigned int i;
+ 
+ 	seq_puts(m, "hist:");
+@@ -1292,25 +1489,47 @@ static int event_hist_trigger_print(stru
+ 	seq_puts(m, "keys=");
+ 
+ 	for_each_hist_key_field(i, hist_data) {
+-		key_field = hist_data->fields[i];
++		field = hist_data->fields[i];
+ 
+ 		if (i > hist_data->n_vals)
+ 			seq_puts(m, ",");
+ 
+-		if (key_field->flags & HIST_FIELD_FL_STACKTRACE)
++		if (field->flags & HIST_FIELD_FL_STACKTRACE)
+ 			seq_puts(m, "stacktrace");
+ 		else
+-			hist_field_print(m, key_field);
++			hist_field_print(m, field);
+ 	}
+ 
+ 	seq_puts(m, ":vals=");
+ 
+ 	for_each_hist_val_field(i, hist_data) {
++		field = hist_data->fields[i];
++		if (field->flags & HIST_FIELD_FL_VAR_ONLY) {
++			have_var_only = true;
++			continue;
++		}
++
+ 		if (i == HITCOUNT_IDX)
+ 			seq_puts(m, "hitcount");
+ 		else {
+ 			seq_puts(m, ",");
+-			hist_field_print(m, hist_data->fields[i]);
++			hist_field_print(m, field);
++		}
++	}
++
++	if (have_var_only) {
++		unsigned int n = 0;
++
++		seq_puts(m, ":");
++
++		for_each_hist_val_field(i, hist_data) {
++			field = hist_data->fields[i];
++
++			if (field->flags & HIST_FIELD_FL_VAR_ONLY) {
++				if (n++)
++					seq_puts(m, ",");
++				hist_field_print(m, field);
++			}
+ 		}
+ 	}
+ 
+@@ -1318,7 +1537,10 @@ static int event_hist_trigger_print(stru
+ 
+ 	for (i = 0; i < hist_data->n_sort_keys; i++) {
+ 		struct tracing_map_sort_key *sort_key;
+-		unsigned int idx;
++		unsigned int idx, first_key_idx;
++
++		/* skip VAR_ONLY vals */
++		first_key_idx = hist_data->n_vals - hist_data->n_var_only;
+ 
+ 		sort_key = &hist_data->sort_keys[i];
+ 		idx = sort_key->field_idx;
+@@ -1331,8 +1553,11 @@ static int event_hist_trigger_print(stru
+ 
+ 		if (idx == HITCOUNT_IDX)
+ 			seq_puts(m, "hitcount");
+-		else
++		else {
++			if (idx >= first_key_idx)
++				idx += hist_data->n_var_only;
+ 			hist_field_print(m, hist_data->fields[idx]);
++		}
+ 
+ 		if (sort_key->descending)
+ 			seq_puts(m, ".descending");
+@@ -1656,12 +1881,16 @@ static int event_hist_trigger_func(struc
+ 	struct hist_trigger_attrs *attrs;
+ 	struct event_trigger_ops *trigger_ops;
+ 	struct hist_trigger_data *hist_data;
++	bool remove = false;
+ 	char *trigger;
+ 	int ret = 0;
+ 
+ 	if (!param)
+ 		return -EINVAL;
+ 
++	if (glob[0] == '!')
++		remove = true;
++
+ 	/* separate the trigger from the filter (k:v [if filter]) */
+ 	trigger = strsep(&param, " \t");
+ 	if (!trigger)
+@@ -1674,7 +1903,7 @@ static int event_hist_trigger_func(struc
+ 	if (attrs->map_bits)
+ 		hist_trigger_bits = attrs->map_bits;
+ 
+-	hist_data = create_hist_data(hist_trigger_bits, attrs, file);
++	hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
+ 	if (IS_ERR(hist_data)) {
+ 		destroy_hist_trigger_attrs(attrs);
+ 		return PTR_ERR(hist_data);
+@@ -1703,7 +1932,7 @@ static int event_hist_trigger_func(struc
+ 			goto out_free;
+ 	}
+ 
+-	if (glob[0] == '!') {
++	if (remove) {
+ 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+ 		ret = 0;
+ 		goto out_free;
diff --git a/debian/patches/features/all/rt/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch b/debian/patches/features/all/rt/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
index 6c44c4a..a2dbe3a 100644
--- a/debian/patches/features/all/rt/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
+++ b/debian/patches/features/all/rt/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
@@ -1,9 +1,8 @@
-From 1c3c5eab171590f86edd8d31389d61dd1efe3037 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 16 May 2017 20:42:48 +0200
 Subject: [PATCH 17/17] sched/core: Enable might_sleep() and smp_processor_id()
  checks early
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 might_sleep() and smp_processor_id() checks are enabled after the boot
 process is done. That hides bugs in the SMP bringup and driver
diff --git a/debian/patches/features/all/rt/0017-tracing-Account-for-variables-in-named-trigger-compa.patch b/debian/patches/features/all/rt/0017-tracing-Account-for-variables-in-named-trigger-compa.patch
new file mode 100644
index 0000000..36d7a2f
--- /dev/null
+++ b/debian/patches/features/all/rt/0017-tracing-Account-for-variables-in-named-trigger-compa.patch
@@ -0,0 +1,43 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:18 -0500
+Subject: [PATCH 17/32] tracing: Account for variables in named trigger
+ compatibility
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Named triggers must also have the same set of variables in order to be
+considered compatible - update the trigger match test to account for
+that.
+
+The reason for this requirement is that named triggers with variables
+are meant to allow one or more events to set the same variable.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1545,7 +1545,7 @@ static int event_hist_trigger_print(stru
+ 		sort_key = &hist_data->sort_keys[i];
+ 		idx = sort_key->field_idx;
+ 
+-		if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
++		if (WARN_ON(idx >= HIST_FIELDS_MAX))
+ 			return -EINVAL;
+ 
+ 		if (i > 0)
+@@ -1733,6 +1733,12 @@ static bool hist_trigger_match(struct ev
+ 			return false;
+ 		if (key_field->is_signed != key_field_test->is_signed)
+ 			return false;
++		if ((key_field->var.name && !key_field_test->var.name) ||
++		    (!key_field->var.name && key_field_test->var.name))
++			return false;
++		if ((key_field->var.name && key_field_test->var.name) &&
++		     strcmp(key_field->var.name, key_field_test->var.name) != 0)
++			return false;
+ 	}
+ 
+ 	for (i = 0; i < hist_data->n_sort_keys; i++) {
diff --git a/debian/patches/features/all/rt/0018-tracing-Add-simple-expression-support-to-hist-trigge.patch b/debian/patches/features/all/rt/0018-tracing-Add-simple-expression-support-to-hist-trigge.patch
new file mode 100644
index 0000000..1f6a8de
--- /dev/null
+++ b/debian/patches/features/all/rt/0018-tracing-Add-simple-expression-support-to-hist-trigge.patch
@@ -0,0 +1,603 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:19 -0500
+Subject: [PATCH 18/32] tracing: Add simple expression support to hist triggers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add support for simple addition, subtraction, and unary expressions
+(-(expr) and expr, where expr = b-a, a+b, a+b+c) to hist triggers, in
+order to support a minimal set of useful inter-event calculations.
+
+These operations are needed for calculating latencies between events
+(timestamp1-timestamp0) and for combined latencies (latencies over 3
+or more events).
+
+In the process, factor out some common code from key and value
+parsing.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |  457 +++++++++++++++++++++++++++++++++------
+ 1 file changed, 390 insertions(+), 67 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -32,6 +32,13 @@ typedef u64 (*hist_field_fn_t) (struct h
+ #define HIST_FIELD_OPERANDS_MAX	2
+ #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
+ 
++enum field_op_id {
++	FIELD_OP_NONE,
++	FIELD_OP_PLUS,
++	FIELD_OP_MINUS,
++	FIELD_OP_UNARY_MINUS,
++};
++
+ struct hist_var {
+ 	char				*name;
+ 	struct hist_trigger_data	*hist_data;
+@@ -48,6 +55,8 @@ struct hist_field {
+ 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
+ 	struct hist_trigger_data	*hist_data;
+ 	struct hist_var			var;
++	enum field_op_id		operator;
++	char				*name;
+ };
+ 
+ static u64 hist_field_none(struct hist_field *field, void *event,
+@@ -98,6 +107,41 @@ static u64 hist_field_log2(struct hist_f
+ 	return (u64) ilog2(roundup_pow_of_two(val));
+ }
+ 
++static u64 hist_field_plus(struct hist_field *hist_field, void *event,
++			   struct ring_buffer_event *rbe)
++{
++	struct hist_field *operand1 = hist_field->operands[0];
++	struct hist_field *operand2 = hist_field->operands[1];
++
++	u64 val1 = operand1->fn(operand1, event, rbe);
++	u64 val2 = operand2->fn(operand2, event, rbe);
++
++	return val1 + val2;
++}
++
++static u64 hist_field_minus(struct hist_field *hist_field, void *event,
++			    struct ring_buffer_event *rbe)
++{
++	struct hist_field *operand1 = hist_field->operands[0];
++	struct hist_field *operand2 = hist_field->operands[1];
++
++	u64 val1 = operand1->fn(operand1, event, rbe);
++	u64 val2 = operand2->fn(operand2, event, rbe);
++
++	return val1 - val2;
++}
++
++static u64 hist_field_unary_minus(struct hist_field *hist_field, void *event,
++				  struct ring_buffer_event *rbe)
++{
++	struct hist_field *operand = hist_field->operands[0];
++
++	s64 sval = (s64)operand->fn(operand, event, rbe);
++	u64 val = (u64)-sval;
++
++	return val;
++}
++
+ #define DEFINE_HIST_FIELD_FN(type)					\
+ 	static u64 hist_field_##type(struct hist_field *hist_field,	\
+ 				     void *event,			\
+@@ -148,6 +192,7 @@ enum hist_field_flags {
+ 	HIST_FIELD_FL_TIMESTAMP_USECS	= 2048,
+ 	HIST_FIELD_FL_VAR		= 4096,
+ 	HIST_FIELD_FL_VAR_ONLY		= 8192,
++	HIST_FIELD_FL_EXPR		= 16384,
+ };
+ 
+ struct hist_trigger_attrs {
+@@ -210,6 +255,8 @@ static const char *hist_field_name(struc
+ 		field_name = hist_field_name(field->operands[0], ++level);
+ 	else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
+ 		field_name = "$common_timestamp";
++	else if (field->flags & HIST_FIELD_FL_EXPR)
++		field_name = field->name;
+ 
+ 	if (field_name == NULL)
+ 		field_name = "";
+@@ -444,6 +491,73 @@ static const struct tracing_map_ops hist
+ 	.elt_init	= hist_trigger_elt_comm_init,
+ };
+ 
++static char *expr_str(struct hist_field *field, unsigned int level)
++{
++	char *expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
++
++	if (!expr || level > 1)
++		return NULL;
++
++	if (field->operator == FIELD_OP_UNARY_MINUS) {
++		char *subexpr;
++
++		strcat(expr, "-(");
++		subexpr = expr_str(field->operands[0], ++level);
++		if (!subexpr) {
++			kfree(expr);
++			return NULL;
++		}
++		strcat(expr, subexpr);
++		strcat(expr, ")");
++
++		return expr;
++	}
++
++	strcat(expr, hist_field_name(field->operands[0], 0));
++
++	switch (field->operator) {
++	case FIELD_OP_MINUS:
++		strcat(expr, "-");
++		break;
++	case FIELD_OP_PLUS:
++		strcat(expr, "+");
++		break;
++	default:
++		kfree(expr);
++		return NULL;
++	}
++
++	strcat(expr, hist_field_name(field->operands[1], 0));
++
++	return expr;
++}
++
++static int contains_operator(char *str)
++{
++	enum field_op_id field_op = FIELD_OP_NONE;
++	char *op;
++
++	op = strpbrk(str, "+-");
++	if (!op)
++		return FIELD_OP_NONE;
++
++	switch (*op) {
++	case '-':
++		if (*str == '-')
++			field_op = FIELD_OP_UNARY_MINUS;
++		else
++			field_op = FIELD_OP_MINUS;
++		break;
++	case '+':
++		field_op = FIELD_OP_PLUS;
++		break;
++	default:
++		break;
++	}
++
++	return field_op;
++}
++
+ static void destroy_hist_field(struct hist_field *hist_field,
+ 			       unsigned int level)
+ {
+@@ -459,6 +573,7 @@ static void destroy_hist_field(struct hi
+ 		destroy_hist_field(hist_field->operands[i], ++level);
+ 
+ 	kfree(hist_field->var.name);
++	kfree(hist_field->name);
+ 
+ 	kfree(hist_field);
+ }
+@@ -479,6 +594,9 @@ static struct hist_field *create_hist_fi
+ 
+ 	hist_field->hist_data = hist_data;
+ 
++	if (flags & HIST_FIELD_FL_EXPR)
++		goto out; /* caller will populate */
++
+ 	if (flags & HIST_FIELD_FL_HITCOUNT) {
+ 		hist_field->fn = hist_field_counter;
+ 		goto out;
+@@ -551,6 +669,247 @@ static void destroy_hist_fields(struct h
+ 	}
+ }
+ 
++static struct ftrace_event_field *
++parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
++	    char *field_str, unsigned long *flags)
++{
++	struct ftrace_event_field *field = NULL;
++	char *field_name;
++
++	field_name = strsep(&field_str, ".");
++	if (field_str) {
++		if (strcmp(field_str, "hex") == 0)
++			*flags |= HIST_FIELD_FL_HEX;
++		else if (strcmp(field_str, "sym") == 0)
++			*flags |= HIST_FIELD_FL_SYM;
++		else if (strcmp(field_str, "sym-offset") == 0)
++			*flags |= HIST_FIELD_FL_SYM_OFFSET;
++		else if ((strcmp(field_str, "execname") == 0) &&
++			 (strcmp(field_name, "common_pid") == 0))
++			*flags |= HIST_FIELD_FL_EXECNAME;
++		else if (strcmp(field_str, "syscall") == 0)
++			*flags |= HIST_FIELD_FL_SYSCALL;
++		else if (strcmp(field_str, "log2") == 0)
++			*flags |= HIST_FIELD_FL_LOG2;
++		else if (strcmp(field_str, "usecs") == 0)
++			*flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
++		else
++			return ERR_PTR(-EINVAL);
++	}
++
++	if (strcmp(field_name, "$common_timestamp") == 0) {
++		*flags |= HIST_FIELD_FL_TIMESTAMP;
++		hist_data->enable_timestamps = true;
++		if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
++			hist_data->attrs->ts_in_usecs = true;
++	} else {
++		field = trace_find_event_field(file->event_call, field_name);
++		if (!field)
++			return ERR_PTR(-EINVAL);
++	}
++
++	return field;
++}
++
++struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
++			      struct trace_event_file *file, char *str,
++			      unsigned long *flags, char *var_name)
++{
++	struct ftrace_event_field *field = NULL;
++	struct hist_field *hist_field = NULL;
++	int ret = 0;
++
++	field = parse_field(hist_data, file, str, flags);
++	if (IS_ERR(field)) {
++		ret = PTR_ERR(field);
++		goto out;
++	}
++
++	hist_field = create_hist_field(hist_data, field, *flags, var_name);
++	if (!hist_field) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	return hist_field;
++ out:
++	return ERR_PTR(ret);
++}
++
++static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
++				     struct trace_event_file *file,
++				     char *str, unsigned long flags,
++				     char *var_name, unsigned int level);
++
++static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
++				      struct trace_event_file *file,
++				      char *str, unsigned long flags,
++				      char *var_name, unsigned int level)
++{
++	struct hist_field *operand1, *expr = NULL;
++	unsigned long operand_flags;
++	char *operand1_str;
++	int ret = 0;
++	char *s;
++
++	// we support only -(xxx) i.e. explicit parens required
++
++	if (level > 2) {
++		ret = -EINVAL;
++		goto free;
++	}
++
++	str++; // skip leading '-'
++
++	s = strchr(str, '(');
++	if (s)
++		str++;
++	else {
++		ret = -EINVAL;
++		goto free;
++	}
++
++	s = strchr(str, ')');
++	if (s)
++		*s = '\0';
++	else {
++		ret = -EINVAL; // no closing ')'
++		goto free;
++	}
++
++	operand1_str = strsep(&str, "(");
++	if (!operand1_str)
++		goto free;
++
++	flags |= HIST_FIELD_FL_EXPR;
++	expr = create_hist_field(hist_data, NULL, flags, var_name);
++	if (!expr) {
++		ret = -ENOMEM;
++		goto free;
++	}
++
++	operand_flags = 0;
++	operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
++	if (IS_ERR(operand1)) {
++		ret = PTR_ERR(operand1);
++		goto free;
++	}
++
++	if (operand1 == NULL) {
++		operand_flags = 0;
++		operand1 = parse_atom(hist_data, file, operand1_str,
++				      &operand_flags, NULL);
++		if (IS_ERR(operand1)) {
++			ret = PTR_ERR(operand1);
++			goto free;
++		}
++	}
++
++	expr->fn = hist_field_unary_minus;
++	expr->operands[0] = operand1;
++	expr->operator = FIELD_OP_UNARY_MINUS;
++	expr->name = expr_str(expr, 0);
++
++	return expr;
++ free:
++	return ERR_PTR(ret);
++}
++
++static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
++				     struct trace_event_file *file,
++				     char *str, unsigned long flags,
++				     char *var_name, unsigned int level)
++{
++	struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
++	unsigned long operand_flags;
++	int field_op, ret = -EINVAL;
++	char *sep, *operand1_str;
++
++	if (level > 2)
++		return NULL;
++
++	field_op = contains_operator(str);
++	if (field_op == FIELD_OP_NONE)
++		return NULL;
++
++	if (field_op == FIELD_OP_UNARY_MINUS)
++		return parse_unary(hist_data, file, str, flags, var_name, ++level);
++
++	switch (field_op) {
++	case FIELD_OP_MINUS:
++		sep = "-";
++		break;
++	case FIELD_OP_PLUS:
++		sep = "+";
++		break;
++	default:
++		goto free;
++	}
++
++	operand1_str = strsep(&str, sep);
++	if (!operand1_str || !str)
++		goto free;
++
++	operand_flags = 0;
++	operand1 = parse_atom(hist_data, file, operand1_str,
++			      &operand_flags, NULL);
++	if (IS_ERR(operand1)) {
++		ret = PTR_ERR(operand1);
++		operand1 = NULL;
++		goto free;
++	}
++
++	// rest of string could be another expression e.g. b+c in a+b+c
++	operand_flags = 0;
++	operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
++	if (IS_ERR(operand2)) {
++		ret = PTR_ERR(operand2);
++		operand2 = NULL;
++		goto free;
++	}
++	if (!operand2) {
++		operand_flags = 0;
++		operand2 = parse_atom(hist_data, file, str,
++				      &operand_flags, NULL);
++		if (IS_ERR(operand2)) {
++			ret = PTR_ERR(operand2);
++			operand2 = NULL;
++			goto free;
++		}
++	}
++
++	flags |= HIST_FIELD_FL_EXPR;
++	expr = create_hist_field(hist_data, NULL, flags, var_name);
++	if (!expr) {
++		ret = -ENOMEM;
++		goto free;
++	}
++
++	expr->operands[0] = operand1;
++	expr->operands[1] = operand2;
++	expr->operator = field_op;
++	expr->name = expr_str(expr, 0);
++
++	switch (field_op) {
++	case FIELD_OP_MINUS:
++		expr->fn = hist_field_minus;
++		break;
++	case FIELD_OP_PLUS:
++		expr->fn = hist_field_plus;
++		break;
++	default:
++		goto free;
++	}
++
++	return expr;
++ free:
++	destroy_hist_field(operand1, 0);
++	destroy_hist_field(operand2, 0);
++	destroy_hist_field(expr, 0);
++
++	return ERR_PTR(ret);
++}
++
+ static int create_hitcount_val(struct hist_trigger_data *hist_data)
+ {
+ 	hist_data->fields[HITCOUNT_IDX] =
+@@ -609,9 +968,9 @@ static int create_val_field(struct hist_
+ 			    struct trace_event_file *file,
+ 			    char *field_str, bool var_only)
+ {
+-	struct ftrace_event_field *field = NULL;
+-	char *field_name, *var_name;
++	struct hist_field *hist_field;
+ 	unsigned long flags = 0;
++	char *var_name;
+ 	int ret = 0;
+ 
+ 	if (WARN_ON(!var_only && val_idx >= TRACING_MAP_VALS_MAX))
+@@ -642,37 +1001,27 @@ static int create_val_field(struct hist_
+ 		goto out;
+ 	}
+ 
+-	field_name = strsep(&field_str, ".");
+-	if (field_str) {
+-		if (strcmp(field_str, "hex") == 0)
+-			flags |= HIST_FIELD_FL_HEX;
+-		else {
+-			ret = -EINVAL;
+-			goto out;
+-		}
++	hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
++	if (IS_ERR(hist_field)) {
++		ret = PTR_ERR(hist_field);
++		goto out;
+ 	}
+ 
+-	if (strcmp(field_name, "$common_timestamp") == 0) {
+-		flags |= HIST_FIELD_FL_TIMESTAMP;
+-		hist_data->enable_timestamps = true;
+-	} else {
+-		field = trace_find_event_field(file->event_call, field_name);
+-		if (!field) {
+-			ret = -EINVAL;
++	if (!hist_field) {
++		hist_field = parse_atom(hist_data, file, field_str,
++					&flags, var_name);
++		if (IS_ERR(hist_field)) {
++			ret = PTR_ERR(hist_field);
+ 			goto out;
+ 		}
+ 	}
+ 
+-	hist_data->fields[val_idx] = create_hist_field(hist_data, field, flags, var_name);
+-	if (!hist_data->fields[val_idx]) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
++	hist_data->fields[val_idx] = hist_field;
+ 
+ 	++hist_data->n_vals;
+ 	++hist_data->n_fields;
+ 
+-	if (hist_data->fields[val_idx]->flags & HIST_FIELD_FL_VAR_ONLY)
++	if (hist_field->flags & HIST_FIELD_FL_VAR_ONLY)
+ 		hist_data->n_var_only++;
+ 
+ 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
+@@ -726,8 +1075,8 @@ static int create_key_field(struct hist_
+ 			    struct trace_event_file *file,
+ 			    char *field_str)
+ {
+-	struct ftrace_event_field *field = NULL;
+ 	struct hist_field *hist_field = NULL;
++
+ 	unsigned long flags = 0;
+ 	unsigned int key_size;
+ 	char *var_name;
+@@ -754,60 +1103,33 @@ static int create_key_field(struct hist_
+ 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
+ 		hist_field = create_hist_field(hist_data, NULL, flags, var_name);
+ 	} else {
+-		char *field_name = strsep(&field_str, ".");
+-
+-		if (field_str) {
+-			if (strcmp(field_str, "hex") == 0)
+-				flags |= HIST_FIELD_FL_HEX;
+-			else if (strcmp(field_str, "sym") == 0)
+-				flags |= HIST_FIELD_FL_SYM;
+-			else if (strcmp(field_str, "sym-offset") == 0)
+-				flags |= HIST_FIELD_FL_SYM_OFFSET;
+-			else if ((strcmp(field_str, "execname") == 0) &&
+-				 (strcmp(field_name, "common_pid") == 0))
+-				flags |= HIST_FIELD_FL_EXECNAME;
+-			else if (strcmp(field_str, "syscall") == 0)
+-				flags |= HIST_FIELD_FL_SYSCALL;
+-			else if (strcmp(field_str, "log2") == 0)
+-				flags |= HIST_FIELD_FL_LOG2;
+-			else if (strcmp(field_str, "usecs") == 0)
+-				flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
+-			else {
+-				ret = -EINVAL;
+-				goto out;
+-			}
++		hist_field = parse_expr(hist_data, file, field_str, flags,
++					var_name, 0);
++		if (IS_ERR(hist_field)) {
++			ret = PTR_ERR(hist_field);
++			goto out;
+ 		}
+ 
+-		if (strcmp(field_name, "$common_timestamp") == 0) {
+-			flags |= HIST_FIELD_FL_TIMESTAMP;
+-			hist_data->enable_timestamps = true;
+-			if (flags & HIST_FIELD_FL_TIMESTAMP_USECS)
+-				hist_data->attrs->ts_in_usecs = true;
+-			key_size = sizeof(u64);
+-		} else {
+-			field = trace_find_event_field(file->event_call, field_name);
+-			if (!field) {
+-				ret = -EINVAL;
++		if (!hist_field) {
++			hist_field = parse_atom(hist_data, file, field_str,
++						&flags, var_name);
++			if (IS_ERR(hist_field)) {
++				ret = PTR_ERR(hist_field);
+ 				goto out;
+ 			}
+-
+-			if (is_string_field(field))
+-				key_size = MAX_FILTER_STR_VAL;
+-			else
+-				key_size = field->size;
+ 		}
+-	}
+ 
+-	hist_data->fields[key_idx] = create_hist_field(hist_data, field, flags, var_name);
+-	if (!hist_data->fields[key_idx]) {
+-		ret = -ENOMEM;
+-		goto out;
++		key_size = hist_field->size;
+ 	}
+ 
++	hist_data->fields[key_idx] = hist_field;
++
+ 	key_size = ALIGN(key_size, sizeof(u64));
+ 	hist_data->fields[key_idx]->size = key_size;
+ 	hist_data->fields[key_idx]->offset = key_offset;
++
+ 	hist_data->key_size += key_size;
++
+ 	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
+ 		ret = -EINVAL;
+ 		goto out;
+@@ -1330,7 +1652,8 @@ hist_trigger_entry_print(struct seq_file
+ 	for (i = 1; i < hist_data->n_vals; i++) {
+ 		field_name = hist_field_name(hist_data->fields[i], 0);
+ 
+-		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR)
++		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
++		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
+ 			continue;
+ 
+ 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
diff --git a/debian/patches/features/all/rt/0019-tracing-Add-variable-reference-handling-to-hist-trig.patch b/debian/patches/features/all/rt/0019-tracing-Add-variable-reference-handling-to-hist-trig.patch
new file mode 100644
index 0000000..6918dda
--- /dev/null
+++ b/debian/patches/features/all/rt/0019-tracing-Add-variable-reference-handling-to-hist-trig.patch
@@ -0,0 +1,1123 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:20 -0500
+Subject: [PATCH 19/32] tracing: Add variable reference handling to hist
+ triggers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add the necessary infrastructure to allow the variables defined on one
+event to be referenced in another.  This allows variables set by a
+previous event to be referenced and used in expressions combining the
+variable values saved by that previous event and the event fields of
+the current event.  For example, here's how a latency can be
+calculated and saved into yet another variable named 'wakeup_lat':
+
+    # echo 'hist:keys=pid,prio:ts0=common_timestamp ...
+    # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...
+
+In the first event, the event's timetamp is saved into the variable
+ts0.  In the next line, ts0 is subtracted from the second event's
+timestamp to produce the latency.
+
+Further users of variable references will be described in subsequent
+patches, such as for instance how the 'wakeup_lat' variable above can
+be displayed in a latency histogram.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace.h                |    2 
+ kernel/trace/trace_events_hist.c    |  719 +++++++++++++++++++++++++++++-------
+ kernel/trace/trace_events_trigger.c |    6 
+ 3 files changed, 604 insertions(+), 123 deletions(-)
+
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1448,6 +1448,8 @@ extern void pause_named_trigger(struct e
+ extern void unpause_named_trigger(struct event_trigger_data *data);
+ extern void set_named_trigger_data(struct event_trigger_data *data,
+ 				   struct event_trigger_data *named_data);
++extern struct event_trigger_data *
++get_named_trigger_data(struct event_trigger_data *data);
+ extern int register_event_command(struct event_command *cmd);
+ extern int unregister_event_command(struct event_command *cmd);
+ extern int register_trigger_hist_enable_disable_cmds(void);
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -26,8 +26,10 @@
+ 
+ struct hist_field;
+ 
+-typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event,
+-				struct ring_buffer_event *rbe);
++typedef u64 (*hist_field_fn_t) (struct hist_field *field,
++				struct tracing_map_elt *elt,
++				struct ring_buffer_event *rbe,
++				void *event);
+ 
+ #define HIST_FIELD_OPERANDS_MAX	2
+ #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
+@@ -57,30 +59,41 @@ struct hist_field {
+ 	struct hist_var			var;
+ 	enum field_op_id		operator;
+ 	char				*name;
++	unsigned int			var_idx;
++	unsigned int			var_ref_idx;
++	bool                            read_once;
+ };
+ 
+-static u64 hist_field_none(struct hist_field *field, void *event,
+-			   struct ring_buffer_event *rbe)
++static u64 hist_field_none(struct hist_field *field,
++			   struct tracing_map_elt *elt,
++			   struct ring_buffer_event *rbe,
++			   void *event)
+ {
+ 	return 0;
+ }
+ 
+-static u64 hist_field_counter(struct hist_field *field, void *event,
+-			      struct ring_buffer_event *rbe)
++static u64 hist_field_counter(struct hist_field *field,
++			      struct tracing_map_elt *elt,
++			      struct ring_buffer_event *rbe,
++			      void *event)
+ {
+ 	return 1;
+ }
+ 
+-static u64 hist_field_string(struct hist_field *hist_field, void *event,
+-			     struct ring_buffer_event *rbe)
++static u64 hist_field_string(struct hist_field *hist_field,
++			     struct tracing_map_elt *elt,
++			     struct ring_buffer_event *rbe,
++			     void *event)
+ {
+ 	char *addr = (char *)(event + hist_field->field->offset);
+ 
+ 	return (u64)(unsigned long)addr;
+ }
+ 
+-static u64 hist_field_dynstring(struct hist_field *hist_field, void *event,
+-				struct ring_buffer_event *rbe)
++static u64 hist_field_dynstring(struct hist_field *hist_field,
++				struct tracing_map_elt *elt,
++				struct ring_buffer_event *rbe,
++				void *event)
+ {
+ 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
+ 	int str_loc = str_item & 0xffff;
+@@ -89,54 +102,64 @@ static u64 hist_field_dynstring(struct h
+ 	return (u64)(unsigned long)addr;
+ }
+ 
+-static u64 hist_field_pstring(struct hist_field *hist_field, void *event,
+-			      struct ring_buffer_event *rbe)
++static u64 hist_field_pstring(struct hist_field *hist_field,
++			      struct tracing_map_elt *elt,
++			      struct ring_buffer_event *rbe,
++			      void *event)
+ {
+ 	char **addr = (char **)(event + hist_field->field->offset);
+ 
+ 	return (u64)(unsigned long)*addr;
+ }
+ 
+-static u64 hist_field_log2(struct hist_field *hist_field, void *event,
+-			   struct ring_buffer_event *rbe)
++static u64 hist_field_log2(struct hist_field *hist_field,
++			   struct tracing_map_elt *elt,
++			   struct ring_buffer_event *rbe,
++			   void *event)
+ {
+ 	struct hist_field *operand = hist_field->operands[0];
+ 
+-	u64 val = operand->fn(operand, event, rbe);
++	u64 val = operand->fn(operand, elt, rbe, event);
+ 
+ 	return (u64) ilog2(roundup_pow_of_two(val));
+ }
+ 
+-static u64 hist_field_plus(struct hist_field *hist_field, void *event,
+-			   struct ring_buffer_event *rbe)
++static u64 hist_field_plus(struct hist_field *hist_field,
++			   struct tracing_map_elt *elt,
++			   struct ring_buffer_event *rbe,
++			   void *event)
+ {
+ 	struct hist_field *operand1 = hist_field->operands[0];
+ 	struct hist_field *operand2 = hist_field->operands[1];
+ 
+-	u64 val1 = operand1->fn(operand1, event, rbe);
+-	u64 val2 = operand2->fn(operand2, event, rbe);
++	u64 val1 = operand1->fn(operand1, elt, rbe, event);
++	u64 val2 = operand2->fn(operand2, elt, rbe, event);
+ 
+ 	return val1 + val2;
+ }
+ 
+-static u64 hist_field_minus(struct hist_field *hist_field, void *event,
+-			    struct ring_buffer_event *rbe)
++static u64 hist_field_minus(struct hist_field *hist_field,
++			    struct tracing_map_elt *elt,
++			    struct ring_buffer_event *rbe,
++			    void *event)
+ {
+ 	struct hist_field *operand1 = hist_field->operands[0];
+ 	struct hist_field *operand2 = hist_field->operands[1];
+ 
+-	u64 val1 = operand1->fn(operand1, event, rbe);
+-	u64 val2 = operand2->fn(operand2, event, rbe);
++	u64 val1 = operand1->fn(operand1, elt, rbe, event);
++	u64 val2 = operand2->fn(operand2, elt, rbe, event);
+ 
+ 	return val1 - val2;
+ }
+ 
+-static u64 hist_field_unary_minus(struct hist_field *hist_field, void *event,
+-				  struct ring_buffer_event *rbe)
++static u64 hist_field_unary_minus(struct hist_field *hist_field,
++				  struct tracing_map_elt *elt,
++				  struct ring_buffer_event *rbe,
++				  void *event)
+ {
+ 	struct hist_field *operand = hist_field->operands[0];
+ 
+-	s64 sval = (s64)operand->fn(operand, event, rbe);
++	s64 sval = (s64)operand->fn(operand, elt, rbe, event);
+ 	u64 val = (u64)-sval;
+ 
+ 	return val;
+@@ -144,8 +167,9 @@ static u64 hist_field_unary_minus(struct
+ 
+ #define DEFINE_HIST_FIELD_FN(type)					\
+ 	static u64 hist_field_##type(struct hist_field *hist_field,	\
+-				     void *event,			\
+-				     struct ring_buffer_event *rbe)	\
++				     struct tracing_map_elt *elt,	\
++				     struct ring_buffer_event *rbe,	\
++				     void *event)			\
+ {									\
+ 	type *addr = (type *)(event + hist_field->field->offset);	\
+ 									\
+@@ -193,6 +217,7 @@ enum hist_field_flags {
+ 	HIST_FIELD_FL_VAR		= 4096,
+ 	HIST_FIELD_FL_VAR_ONLY		= 8192,
+ 	HIST_FIELD_FL_EXPR		= 16384,
++	HIST_FIELD_FL_VAR_REF		= 32768,
+ };
+ 
+ struct hist_trigger_attrs {
+@@ -225,10 +250,14 @@ struct hist_trigger_data {
+ 	struct tracing_map		*map;
+ 	bool				enable_timestamps;
+ 	bool				remove;
++	struct hist_field               *var_refs[TRACING_MAP_VARS_MAX];
++	unsigned int			n_var_refs;
+ };
+ 
+-static u64 hist_field_timestamp(struct hist_field *hist_field, void *event,
+-				struct ring_buffer_event *rbe)
++static u64 hist_field_timestamp(struct hist_field *hist_field,
++				struct tracing_map_elt *elt,
++				struct ring_buffer_event *rbe,
++				void *event)
+ {
+ 	struct hist_trigger_data *hist_data = hist_field->hist_data;
+ 	struct trace_array *tr = hist_data->event_file->tr;
+@@ -241,6 +270,324 @@ static u64 hist_field_timestamp(struct h
+ 	return ts;
+ }
+ 
++static LIST_HEAD(hist_var_list);
++
++struct hist_var_data {
++	struct list_head list;
++	struct hist_trigger_data *hist_data;
++};
++
++static struct hist_field *check_var_ref(struct hist_field *hist_field,
++					struct hist_trigger_data *var_data,
++					unsigned int var_idx)
++{
++	struct hist_field *found = NULL;
++
++	if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF) {
++		if (hist_field->var.idx == var_idx &&
++		    hist_field->var.hist_data == var_data) {
++			found = hist_field;
++		}
++	}
++
++	return found;
++}
++
++static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
++				       struct hist_trigger_data *var_data,
++				       unsigned int var_idx)
++{
++	struct hist_field *hist_field, *found = NULL;
++	unsigned int i, j;
++
++	for_each_hist_field(i, hist_data) {
++		hist_field = hist_data->fields[i];
++		found = check_var_ref(hist_field, var_data, var_idx);
++		if (found)
++			return found;
++
++		for (j = 0; j < HIST_FIELD_OPERANDS_MAX; j++) {
++			struct hist_field *operand;
++
++			operand = hist_field->operands[j];
++			found = check_var_ref(operand, var_data, var_idx);
++			if (found)
++				return found;
++		}
++	}
++
++	return found;
++}
++
++static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
++					   unsigned int var_idx)
++{
++	struct hist_field *found = NULL;
++	struct hist_var_data *var_data;
++
++	list_for_each_entry(var_data, &hist_var_list, list) {
++		found = find_var_ref(var_data->hist_data, hist_data, var_idx);
++		if (found)
++			break;
++	}
++
++	return found;
++}
++
++static bool check_var_refs(struct hist_trigger_data *hist_data)
++{
++	struct hist_field *field;
++	bool found = false;
++	int i;
++
++	for_each_hist_field(i, hist_data) {
++		field = hist_data->fields[i];
++		if (field && field->flags & HIST_FIELD_FL_VAR) {
++			if (find_any_var_ref(hist_data, field->var.idx)) {
++				found = true;
++				break;
++			}
++		}
++	}
++
++	return found;
++}
++
++static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
++{
++	struct hist_var_data *var_data, *found = NULL;
++
++	list_for_each_entry(var_data, &hist_var_list, list) {
++		if (var_data->hist_data == hist_data) {
++			found = var_data;
++			break;
++		}
++	}
++
++	return found;
++}
++
++static bool has_hist_vars(struct hist_trigger_data *hist_data)
++{
++	struct hist_field *hist_field;
++	bool found = false;
++	int i;
++
++	for_each_hist_field(i, hist_data) {
++		hist_field = hist_data->fields[i];
++		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR) {
++			found = true;
++			break;
++		}
++	}
++
++	return found;
++}
++
++static int save_hist_vars(struct hist_trigger_data *hist_data)
++{
++	struct hist_var_data *var_data;
++
++	var_data = find_hist_vars(hist_data);
++	if (var_data)
++		return 0;
++
++	var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
++	if (!var_data)
++		return -ENOMEM;
++
++	var_data->hist_data = hist_data;
++	list_add(&var_data->list, &hist_var_list);
++
++	return 0;
++}
++
++static void remove_hist_vars(struct hist_trigger_data *hist_data)
++{
++	struct hist_var_data *var_data;
++
++	var_data = find_hist_vars(hist_data);
++	if (!var_data)
++		return;
++
++	if (WARN_ON(check_var_refs(hist_data)))
++		return;
++
++	list_del(&var_data->list);
++
++	kfree(var_data);
++}
++
++static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
++					 const char *var_name)
++{
++	struct hist_field *hist_field, *found = NULL;
++	int i;
++
++	for_each_hist_field(i, hist_data) {
++		hist_field = hist_data->fields[i];
++		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
++		    strcmp(hist_field->var.name, var_name) == 0) {
++			found = hist_field;
++			break;
++		}
++	}
++
++	return found;
++}
++
++static struct hist_field *find_var(struct trace_event_file *file,
++				   const char *var_name)
++{
++	struct hist_trigger_data *hist_data;
++	struct event_trigger_data *test;
++	struct hist_field *hist_field;
++
++	list_for_each_entry_rcu(test, &file->triggers, list) {
++		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
++			hist_data = test->private_data;
++			hist_field = find_var_field(hist_data, var_name);
++			if (hist_field)
++				return hist_field;
++		}
++	}
++
++	return NULL;
++}
++
++static struct trace_event_file *find_var_file(const char *system,
++					      const char *event_name,
++					      const char *var_name)
++{
++	struct hist_trigger_data *var_hist_data;
++	struct hist_var_data *var_data;
++	struct trace_event_call *call;
++	struct trace_event_file *file;
++	const char *name;
++
++	list_for_each_entry(var_data, &hist_var_list, list) {
++		var_hist_data = var_data->hist_data;
++		file = var_hist_data->event_file;
++		call = file->event_call;
++		name = trace_event_name(call);
++
++		if (!system || !event_name) {
++			if (find_var(file, var_name))
++				return file;
++			continue;
++		}
++
++		if (strcmp(event_name, name) != 0)
++			continue;
++		if (strcmp(system, call->class->system) != 0)
++			continue;
++
++		return file;
++	}
++
++	return NULL;
++}
++
++static struct hist_field *find_file_var(struct trace_event_file *file,
++					const char *var_name)
++{
++	struct hist_trigger_data *test_data;
++	struct event_trigger_data *test;
++	struct hist_field *hist_field;
++
++	list_for_each_entry_rcu(test, &file->triggers, list) {
++		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
++			test_data = test->private_data;
++			hist_field = find_var_field(test_data, var_name);
++			if (hist_field)
++				return hist_field;
++		}
++	}
++
++	return NULL;
++}
++
++static struct hist_field *find_event_var(const char *system,
++					 const char *event_name,
++					 const char *var_name)
++{
++	struct hist_field *hist_field = NULL;
++	struct trace_event_file *file;
++
++	file = find_var_file(system, event_name, var_name);
++	if (!file)
++		return NULL;
++
++	hist_field = find_file_var(file, var_name);
++
++	return hist_field;
++}
++
++struct hist_elt_data {
++	char *comm;
++	u64 *var_ref_vals;
++};
++
++static u64 hist_field_var_ref(struct hist_field *hist_field,
++			      struct tracing_map_elt *elt,
++			      struct ring_buffer_event *rbe,
++			      void *event)
++{
++	struct hist_elt_data *elt_data;
++	u64 var_val = 0;
++
++	elt_data = elt->private_data;
++	var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
++
++	return var_val;
++}
++
++static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
++			     u64 *var_ref_vals, bool self)
++{
++	struct hist_trigger_data *var_data;
++	struct tracing_map_elt *var_elt;
++	struct hist_field *hist_field;
++	unsigned int i, var_idx;
++	bool resolved = true;
++	u64 var_val = 0;
++
++	for (i = 0; i < hist_data->n_var_refs; i++) {
++		hist_field = hist_data->var_refs[i];
++		var_idx = hist_field->var.idx;
++		var_data = hist_field->var.hist_data;
++
++		if (var_data == NULL) {
++			resolved = false;
++			break;
++		}
++
++		if ((self && var_data != hist_data) ||
++		    (!self && var_data == hist_data))
++			continue;
++
++		var_elt = tracing_map_lookup(var_data->map, key);
++		if (!var_elt) {
++			resolved = false;
++			break;
++		}
++
++		if (!tracing_map_var_set(var_elt, var_idx)) {
++			resolved = false;
++			break;
++		}
++
++		if (self || !hist_field->read_once)
++			var_val = tracing_map_read_var(var_elt, var_idx);
++		else
++			var_val = tracing_map_read_var_once(var_elt, var_idx);
++
++		var_ref_vals[i] = var_val;
++	}
++
++	return resolved;
++}
++
+ static const char *hist_field_name(struct hist_field *field,
+ 				   unsigned int level)
+ {
+@@ -255,7 +602,8 @@ static const char *hist_field_name(struc
+ 		field_name = hist_field_name(field->operands[0], ++level);
+ 	else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
+ 		field_name = "$common_timestamp";
+-	else if (field->flags & HIST_FIELD_FL_EXPR)
++	else if (field->flags & HIST_FIELD_FL_EXPR ||
++		 field->flags & HIST_FIELD_FL_VAR_REF)
+ 		field_name = field->name;
+ 
+ 	if (field_name == NULL)
+@@ -439,26 +787,36 @@ static inline void save_comm(char *comm,
+ 	memcpy(comm, task->comm, TASK_COMM_LEN);
+ }
+ 
+-static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
++static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
+ {
+-	kfree((char *)elt->private_data);
++	struct hist_elt_data *private_data = elt->private_data;
++
++	kfree(private_data->comm);
++	kfree(private_data);
+ }
+ 
+-static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
++static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
+ {
+ 	struct hist_trigger_data *hist_data = elt->map->private_data;
++	unsigned int size = TASK_COMM_LEN + 1;
++	struct hist_elt_data *elt_data;
+ 	struct hist_field *key_field;
+ 	unsigned int i;
+ 
++	elt->private_data = elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
++	if (!elt_data)
++		return -ENOMEM;
++
+ 	for_each_hist_key_field(i, hist_data) {
+ 		key_field = hist_data->fields[i];
+ 
+ 		if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
+-			unsigned int size = TASK_COMM_LEN + 1;
+-
+-			elt->private_data = kzalloc(size, GFP_KERNEL);
+-			if (!elt->private_data)
++			elt_data->comm = kzalloc(size, GFP_KERNEL);
++			if (!elt_data->comm) {
++				kfree(elt_data);
++				elt->private_data = NULL;
+ 				return -ENOMEM;
++			}
+ 			break;
+ 		}
+ 	}
+@@ -466,29 +824,31 @@ static int hist_trigger_elt_comm_alloc(s
+ 	return 0;
+ }
+ 
+-static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
++static void hist_trigger_elt_data_copy(struct tracing_map_elt *to,
+ 				       struct tracing_map_elt *from)
+ {
+-	char *comm_from = from->private_data;
+-	char *comm_to = to->private_data;
++	struct hist_elt_data *from_data = from->private_data;
++	struct hist_elt_data *to_data = to->private_data;
++
++	memcpy(to_data, from_data, sizeof(*to));
+ 
+-	if (comm_from)
+-		memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
++	if (from_data->comm)
++		memcpy(to_data->comm, from_data->comm, TASK_COMM_LEN + 1);
+ }
+ 
+-static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
++static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
+ {
+-	char *comm = elt->private_data;
++	struct hist_elt_data *private_data = elt->private_data;
+ 
+-	if (comm)
+-		save_comm(comm, current);
++	if (private_data->comm)
++		save_comm(private_data->comm, current);
+ }
+ 
+-static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
+-	.elt_alloc	= hist_trigger_elt_comm_alloc,
+-	.elt_copy	= hist_trigger_elt_comm_copy,
+-	.elt_free	= hist_trigger_elt_comm_free,
+-	.elt_init	= hist_trigger_elt_comm_init,
++static const struct tracing_map_ops hist_trigger_elt_data_ops = {
++	.elt_alloc	= hist_trigger_elt_data_alloc,
++	.elt_copy	= hist_trigger_elt_data_copy,
++	.elt_free	= hist_trigger_elt_data_free,
++	.elt_init	= hist_trigger_elt_data_init,
+ };
+ 
+ static char *expr_str(struct hist_field *field, unsigned int level)
+@@ -513,6 +873,8 @@ static char *expr_str(struct hist_field
+ 		return expr;
+ 	}
+ 
++	if (field->operands[0]->flags & HIST_FIELD_FL_VAR_REF)
++		strcat(expr, "$");
+ 	strcat(expr, hist_field_name(field->operands[0], 0));
+ 
+ 	switch (field->operator) {
+@@ -527,6 +889,8 @@ static char *expr_str(struct hist_field
+ 		return NULL;
+ 	}
+ 
++	if (field->operands[1]->flags & HIST_FIELD_FL_VAR_REF)
++		strcat(expr, "$");
+ 	strcat(expr, hist_field_name(field->operands[1], 0));
+ 
+ 	return expr;
+@@ -597,6 +961,11 @@ static struct hist_field *create_hist_fi
+ 	if (flags & HIST_FIELD_FL_EXPR)
+ 		goto out; /* caller will populate */
+ 
++	if (flags & HIST_FIELD_FL_VAR_REF) {
++		hist_field->fn = hist_field_var_ref;
++		goto out;
++	}
++
+ 	if (flags & HIST_FIELD_FL_HITCOUNT) {
+ 		hist_field->fn = hist_field_counter;
+ 		goto out;
+@@ -669,6 +1038,44 @@ static void destroy_hist_fields(struct h
+ 	}
+ }
+ 
++static struct hist_field *create_var_ref(struct hist_field *var_field)
++{
++	unsigned long flags = HIST_FIELD_FL_VAR_REF;
++	struct hist_field *ref_field;
++
++	ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
++	if (ref_field) {
++		ref_field->var.idx = var_field->var.idx;
++		ref_field->var.hist_data = var_field->hist_data;
++		ref_field->size = var_field->size;
++		ref_field->is_signed = var_field->is_signed;
++		ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
++		if (!ref_field->name) {
++			destroy_hist_field(ref_field, 0);
++			return NULL;
++		}
++	}
++
++	return ref_field;
++}
++
++static struct hist_field *parse_var_ref(char *system, char *event_name,
++					char *var_name)
++{
++	struct hist_field *var_field = NULL, *ref_field = NULL;
++
++	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
++		return NULL;
++
++	var_name++;
++
++	var_field = find_event_var(system, event_name, var_name);
++	if (var_field)
++		ref_field = create_var_ref(var_field);
++
++	return ref_field;
++}
++
+ static struct ftrace_event_field *
+ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
+ 	    char *field_str, unsigned long *flags)
+@@ -715,10 +1122,28 @@ struct hist_field *parse_atom(struct his
+ 			      struct trace_event_file *file, char *str,
+ 			      unsigned long *flags, char *var_name)
+ {
++	char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
+ 	struct ftrace_event_field *field = NULL;
+ 	struct hist_field *hist_field = NULL;
+ 	int ret = 0;
+ 
++	s = strchr(str, '.');
++	if (s) {
++		s = strchr(++s, '.');
++		if (s) {
++			ref_system = strsep(&str, ".");
++			ref_event = strsep(&str, ".");
++			ref_var = str;
++		}
++	}
++
++	hist_field = parse_var_ref(ref_system, ref_event, ref_var);
++	if (hist_field) {
++		hist_data->var_refs[hist_data->n_var_refs] = hist_field;
++		hist_field->var_ref_idx = hist_data->n_var_refs++;
++		return hist_field;
++	}
++
+ 	field = parse_field(hist_data, file, str, flags);
+ 	if (IS_ERR(field)) {
+ 		ret = PTR_ERR(field);
+@@ -885,6 +1310,9 @@ static struct hist_field *parse_expr(str
+ 		goto free;
+ 	}
+ 
++	operand1->read_once = true;
++	operand2->read_once = true;
++
+ 	expr->operands[0] = operand1;
+ 	expr->operands[1] = operand2;
+ 	expr->operator = field_op;
+@@ -926,43 +1354,6 @@ static int create_hitcount_val(struct hi
+ 	return 0;
+ }
+ 
+-static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
+-					 const char *var_name)
+-{
+-	struct hist_field *hist_field, *found = NULL;
+-	int i;
+-
+-	for_each_hist_field(i, hist_data) {
+-		hist_field = hist_data->fields[i];
+-		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
+-		    strcmp(hist_field->var.name, var_name) == 0) {
+-			found = hist_field;
+-			break;
+-		}
+-	}
+-
+-	return found;
+-}
+-
+-static struct hist_field *find_var(struct trace_event_file *file,
+-				   const char *var_name)
+-{
+-	struct hist_trigger_data *hist_data;
+-	struct event_trigger_data *test;
+-	struct hist_field *hist_field;
+-
+-	list_for_each_entry_rcu(test, &file->triggers, list) {
+-		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+-			hist_data = test->private_data;
+-			hist_field = find_var_field(hist_data, var_name);
+-			if (hist_field)
+-				return hist_field;
+-		}
+-	}
+-
+-	return NULL;
+-}
+-
+ static int create_val_field(struct hist_trigger_data *hist_data,
+ 			    unsigned int val_idx,
+ 			    struct trace_event_file *file,
+@@ -1119,6 +1510,12 @@ static int create_key_field(struct hist_
+ 			}
+ 		}
+ 
++		if (hist_field->flags & HIST_FIELD_FL_VAR_REF) {
++			destroy_hist_field(hist_field, 0);
++			ret = -EINVAL;
++			goto out;
++		}
++
+ 		key_size = hist_field->size;
+ 	}
+ 
+@@ -1378,21 +1775,6 @@ static int create_tracing_map_fields(str
+ 	return 0;
+ }
+ 
+-static bool need_tracing_map_ops(struct hist_trigger_data *hist_data)
+-{
+-	struct hist_field *key_field;
+-	unsigned int i;
+-
+-	for_each_hist_key_field(i, hist_data) {
+-		key_field = hist_data->fields[i];
+-
+-		if (key_field->flags & HIST_FIELD_FL_EXECNAME)
+-			return true;
+-	}
+-
+-	return false;
+-}
+-
+ static struct hist_trigger_data *
+ create_hist_data(unsigned int map_bits,
+ 		 struct hist_trigger_attrs *attrs,
+@@ -1418,8 +1800,7 @@ create_hist_data(unsigned int map_bits,
+ 	if (ret)
+ 		goto free;
+ 
+-	if (need_tracing_map_ops(hist_data))
+-		map_ops = &hist_trigger_elt_comm_ops;
++	map_ops = &hist_trigger_elt_data_ops;
+ 
+ 	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
+ 					    map_ops, hist_data);
+@@ -1433,10 +1814,6 @@ create_hist_data(unsigned int map_bits,
+ 	if (ret)
+ 		goto free;
+ 
+-	ret = tracing_map_init(hist_data->map);
+-	if (ret)
+-		goto free;
+-
+ 	hist_data->event_file = file;
+  out:
+ 	return hist_data;
+@@ -1452,15 +1829,20 @@ create_hist_data(unsigned int map_bits,
+ 
+ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
+ 				    struct tracing_map_elt *elt, void *rec,
+-				    struct ring_buffer_event *rbe)
++				    struct ring_buffer_event *rbe,
++				    u64 *var_ref_vals)
+ {
++	struct hist_elt_data *elt_data;
+ 	struct hist_field *hist_field;
+ 	unsigned int i, var_idx;
+ 	u64 hist_val;
+ 
++	elt_data = elt->private_data;
++	elt_data->var_ref_vals = var_ref_vals;
++
+ 	for_each_hist_val_field(i, hist_data) {
+ 		hist_field = hist_data->fields[i];
+-		hist_val = hist_field->fn(hist_field, rbe, rec);
++		hist_val = hist_field->fn(hist_field, elt, rbe, rec);
+ 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
+ 			var_idx = hist_field->var.idx;
+ 			tracing_map_set_var(elt, var_idx, hist_val);
+@@ -1473,7 +1855,7 @@ static void hist_trigger_elt_update(stru
+ 	for_each_hist_key_field(i, hist_data) {
+ 		hist_field = hist_data->fields[i];
+ 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
+-			hist_val = hist_field->fn(hist_field, rbe, rec);
++			hist_val = hist_field->fn(hist_field, elt, rbe, rec);
+ 			var_idx = hist_field->var.idx;
+ 			tracing_map_set_var(elt, var_idx, hist_val);
+ 		}
+@@ -1510,10 +1892,11 @@ static void event_hist_trigger(struct ev
+ 	struct hist_trigger_data *hist_data = data->private_data;
+ 	bool use_compound_key = (hist_data->n_keys > 1);
+ 	unsigned long entries[HIST_STACKTRACE_DEPTH];
++	u64 var_ref_vals[TRACING_MAP_VARS_MAX];
+ 	char compound_key[HIST_KEY_SIZE_MAX];
++	struct tracing_map_elt *elt = NULL;
+ 	struct stack_trace stacktrace;
+ 	struct hist_field *key_field;
+-	struct tracing_map_elt *elt;
+ 	u64 field_contents;
+ 	void *key = NULL;
+ 	unsigned int i;
+@@ -1534,7 +1917,7 @@ static void event_hist_trigger(struct ev
+ 
+ 			key = entries;
+ 		} else {
+-			field_contents = key_field->fn(key_field, rec, rbe);
++			field_contents = key_field->fn(key_field, elt, rbe, rec);
+ 			if (key_field->flags & HIST_FIELD_FL_STRING) {
+ 				key = (void *)(unsigned long)field_contents;
+ 				use_compound_key = true;
+@@ -1549,9 +1932,15 @@ static void event_hist_trigger(struct ev
+ 	if (use_compound_key)
+ 		key = compound_key;
+ 
++	if (hist_data->n_var_refs &&
++	    !resolve_var_refs(hist_data, key, var_ref_vals, false))
++		return;
++
+ 	elt = tracing_map_insert(hist_data->map, key);
+-	if (elt)
+-		hist_trigger_elt_update(hist_data, elt, rec, rbe);
++	if (!elt)
++		return;
++
++	hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
+ }
+ 
+ static void hist_trigger_stacktrace_print(struct seq_file *m,
+@@ -1608,7 +1997,8 @@ hist_trigger_entry_print(struct seq_file
+ 			seq_printf(m, "%s: [%llx] %-55s", field_name,
+ 				   uval, str);
+ 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
+-			char *comm = elt->private_data;
++			struct hist_elt_data *elt_data = elt->private_data;
++			char *comm = elt_data->comm;
+ 
+ 			uval = *(u64 *)(key + key_field->offset);
+ 			seq_printf(m, "%s: %-16s[%10llu]", field_name,
+@@ -1653,7 +2043,8 @@ hist_trigger_entry_print(struct seq_file
+ 		field_name = hist_field_name(hist_data->fields[i], 0);
+ 
+ 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
+-		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
++		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR ||
++		    hist_data->fields[i]->flags & HIST_FIELD_FL_VAR_REF)
+ 			continue;
+ 
+ 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
+@@ -1925,7 +2316,11 @@ static void event_hist_trigger_free(stru
+ 	if (!data->ref) {
+ 		if (data->name)
+ 			del_named_trigger(data);
++
+ 		trigger_data_free(data);
++
++		remove_hist_vars(hist_data);
++
+ 		destroy_hist_data(hist_data);
+ 	}
+ }
+@@ -2139,23 +2534,55 @@ static int hist_register_trigger(char *g
+ 			goto out;
+ 	}
+ 
+-	list_add_rcu(&data->list, &file->triggers);
+ 	ret++;
+ 
+-	update_cond_flag(file);
+-
+ 	if (hist_data->enable_timestamps)
+ 		tracing_set_time_stamp_abs(file->tr, true);
++ out:
++	return ret;
++}
++
++static int hist_trigger_enable(struct event_trigger_data *data,
++			       struct trace_event_file *file)
++{
++	int ret = 0;
++
++	list_add_rcu(&data->list, &file->triggers);
++
++	update_cond_flag(file);
+ 
+ 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
+ 		list_del_rcu(&data->list);
+ 		update_cond_flag(file);
+ 		ret--;
+ 	}
+- out:
++
+ 	return ret;
+ }
+ 
++static bool hist_trigger_check_refs(struct event_trigger_data *data,
++				    struct trace_event_file *file)
++{
++	struct hist_trigger_data *hist_data = data->private_data;
++	struct event_trigger_data *test, *named_data = NULL;
++
++	if (hist_data->attrs->name)
++		named_data = find_named_trigger(hist_data->attrs->name);
++
++	list_for_each_entry_rcu(test, &file->triggers, list) {
++		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
++			if (!hist_trigger_match(data, test, named_data, false))
++				continue;
++			hist_data = test->private_data;
++			if (check_var_refs(hist_data))
++				return true;
++			break;
++		}
++	}
++
++	return false;
++}
++
+ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
+ 				    struct event_trigger_data *data,
+ 				    struct trace_event_file *file)
+@@ -2186,10 +2613,32 @@ static void hist_unregister_trigger(char
+ 		tracing_set_time_stamp_abs(file->tr, false);
+ }
+ 
++static bool hist_file_check_refs(struct trace_event_file *file)
++{
++	struct hist_trigger_data *hist_data;
++	struct event_trigger_data *test;
++
++	printk("func: %s\n", __func__);
++
++	list_for_each_entry_rcu(test, &file->triggers, list) {
++		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
++			hist_data = test->private_data;
++			if (check_var_refs(hist_data))
++				return true;
++			break;
++		}
++	}
++
++	return false;
++}
++
+ static void hist_unreg_all(struct trace_event_file *file)
+ {
+ 	struct event_trigger_data *test, *n;
+ 
++	if (hist_file_check_refs(file))
++	    return;
++
+ 	list_for_each_entry_safe(test, n, &file->triggers, list) {
+ 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ 			list_del_rcu(&test->list);
+@@ -2262,6 +2711,11 @@ static int event_hist_trigger_func(struc
+ 	}
+ 
+ 	if (remove) {
++		if (hist_trigger_check_refs(trigger_data, file)) {
++			ret = -EBUSY;
++			goto out_free;
++		}
++
+ 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+ 		ret = 0;
+ 		goto out_free;
+@@ -2279,14 +2733,33 @@ static int event_hist_trigger_func(struc
+ 		goto out_free;
+ 	} else if (ret < 0)
+ 		goto out_free;
++
++	if (get_named_trigger_data(trigger_data))
++		goto enable;
++
++	if (has_hist_vars(hist_data))
++		save_hist_vars(hist_data);
++
++	ret = tracing_map_init(hist_data->map);
++	if (ret)
++		goto out_unreg;
++enable:
++	ret = hist_trigger_enable(trigger_data, file);
++	if (ret)
++		goto out_unreg;
++
+ 	/* Just return zero, not the number of registered triggers */
+ 	ret = 0;
+  out:
+ 	return ret;
++ out_unreg:
++	cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+  out_free:
+ 	if (cmd_ops->set_filter)
+ 		cmd_ops->set_filter(NULL, trigger_data, NULL);
+ 
++	remove_hist_vars(hist_data);
++
+ 	kfree(trigger_data);
+ 
+ 	destroy_hist_data(hist_data);
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -919,6 +919,12 @@ void set_named_trigger_data(struct event
+ 	data->named_data = named_data;
+ }
+ 
++struct event_trigger_data *
++get_named_trigger_data(struct event_trigger_data *data)
++{
++	return data->named_data;
++}
++
+ static void
+ traceon_trigger(struct event_trigger_data *data, void *rec,
+ 		struct ring_buffer_event *event)
diff --git a/debian/patches/features/all/rt/0020-tracing-Add-support-for-dynamic-tracepoints.patch b/debian/patches/features/all/rt/0020-tracing-Add-support-for-dynamic-tracepoints.patch
new file mode 100644
index 0000000..953acd1
--- /dev/null
+++ b/debian/patches/features/all/rt/0020-tracing-Add-support-for-dynamic-tracepoints.patch
@@ -0,0 +1,196 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:21 -0500
+Subject: [PATCH 20/32] tracing: Add support for dynamic tracepoints
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+The tracepoint infrastructure assumes statically-defined tracepoints
+and uses static_keys for tracepoint enablement.  In order to define
+tracepoints on the fly, we need to have a dynamic counterpart.
+
+Add a dynamic_tracepoint_probe_register() and a dynamic param onto
+tracepoint_probe_unregister() for this purpose.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/tracepoint.h  |   11 +++++++----
+ kernel/trace/trace_events.c |    4 ++--
+ kernel/tracepoint.c         |   42 ++++++++++++++++++++++++++++++------------
+ 3 files changed, 39 insertions(+), 18 deletions(-)
+
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -37,9 +37,12 @@ extern int
+ tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
+ extern int
+ tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
+-			       int prio);
++			       int prio, bool dynamic);
++extern int dynamic_tracepoint_probe_register(struct tracepoint *tp,
++					     void *probe, void *data);
+ extern int
+-tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
++tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data,
++			    bool dynamic);
+ extern void
+ for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
+ 		void *priv);
+@@ -206,13 +209,13 @@ extern void syscall_unregfunc(void);
+ 				   int prio)				\
+ 	{								\
+ 		return tracepoint_probe_register_prio(&__tracepoint_##name, \
+-					      (void *)probe, data, prio); \
++				      (void *)probe, data, prio, false); \
+ 	}								\
+ 	static inline int						\
+ 	unregister_trace_##name(void (*probe)(data_proto), void *data)	\
+ 	{								\
+ 		return tracepoint_probe_unregister(&__tracepoint_##name,\
+-						(void *)probe, data);	\
++					   (void *)probe, data, false); \
+ 	}								\
+ 	static inline void						\
+ 	check_trace_callback_type_##name(void (*cb)(data_proto))	\
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -297,7 +297,7 @@ int trace_event_reg(struct trace_event_c
+ 	case TRACE_REG_UNREGISTER:
+ 		tracepoint_probe_unregister(call->tp,
+ 					    call->class->probe,
+-					    file);
++					    file, false);
+ 		return 0;
+ 
+ #ifdef CONFIG_PERF_EVENTS
+@@ -308,7 +308,7 @@ int trace_event_reg(struct trace_event_c
+ 	case TRACE_REG_PERF_UNREGISTER:
+ 		tracepoint_probe_unregister(call->tp,
+ 					    call->class->perf_probe,
+-					    call);
++					    call, false);
+ 		return 0;
+ 	case TRACE_REG_PERF_OPEN:
+ 	case TRACE_REG_PERF_CLOSE:
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -192,12 +192,15 @@ static void *func_remove(struct tracepoi
+  * Add the probe function to a tracepoint.
+  */
+ static int tracepoint_add_func(struct tracepoint *tp,
+-			       struct tracepoint_func *func, int prio)
++			       struct tracepoint_func *func, int prio,
++			       bool dynamic)
+ {
+ 	struct tracepoint_func *old, *tp_funcs;
+ 	int ret;
+ 
+-	if (tp->regfunc && !static_key_enabled(&tp->key)) {
++	if (tp->regfunc &&
++	    ((dynamic && !(atomic_read(&tp->key.enabled) > 0)) ||
++	     !static_key_enabled(&tp->key))) {
+ 		ret = tp->regfunc();
+ 		if (ret < 0)
+ 			return ret;
+@@ -219,7 +222,9 @@ static int tracepoint_add_func(struct tr
+ 	 * is used.
+ 	 */
+ 	rcu_assign_pointer(tp->funcs, tp_funcs);
+-	if (!static_key_enabled(&tp->key))
++	if (dynamic && !(atomic_read(&tp->key.enabled) > 0))
++		atomic_inc(&tp->key.enabled);
++	else if (!dynamic && !static_key_enabled(&tp->key))
+ 		static_key_slow_inc(&tp->key);
+ 	release_probes(old);
+ 	return 0;
+@@ -232,7 +237,7 @@ static int tracepoint_add_func(struct tr
+  * by preempt_disable around the call site.
+  */
+ static int tracepoint_remove_func(struct tracepoint *tp,
+-		struct tracepoint_func *func)
++				  struct tracepoint_func *func, bool dynamic)
+ {
+ 	struct tracepoint_func *old, *tp_funcs;
+ 
+@@ -246,10 +251,14 @@ static int tracepoint_remove_func(struct
+ 
+ 	if (!tp_funcs) {
+ 		/* Removed last function */
+-		if (tp->unregfunc && static_key_enabled(&tp->key))
++		if (tp->unregfunc &&
++		    ((dynamic && (atomic_read(&tp->key.enabled) > 0)) ||
++		     static_key_enabled(&tp->key)))
+ 			tp->unregfunc();
+ 
+-		if (static_key_enabled(&tp->key))
++		if (dynamic && (atomic_read(&tp->key.enabled) > 0))
++			atomic_dec(&tp->key.enabled);
++		else if (!dynamic && static_key_enabled(&tp->key))
+ 			static_key_slow_dec(&tp->key);
+ 	}
+ 	rcu_assign_pointer(tp->funcs, tp_funcs);
+@@ -258,7 +267,7 @@ static int tracepoint_remove_func(struct
+ }
+ 
+ /**
+- * tracepoint_probe_register -  Connect a probe to a tracepoint
++ * tracepoint_probe_register_prio -  Connect a probe to a tracepoint
+  * @tp: tracepoint
+  * @probe: probe handler
+  * @data: tracepoint data
+@@ -271,7 +280,7 @@ static int tracepoint_remove_func(struct
+  * within module exit functions.
+  */
+ int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
+-				   void *data, int prio)
++				   void *data, int prio, bool dynamic)
+ {
+ 	struct tracepoint_func tp_func;
+ 	int ret;
+@@ -280,7 +289,7 @@ int tracepoint_probe_register_prio(struc
+ 	tp_func.func = probe;
+ 	tp_func.data = data;
+ 	tp_func.prio = prio;
+-	ret = tracepoint_add_func(tp, &tp_func, prio);
++	ret = tracepoint_add_func(tp, &tp_func, prio, dynamic);
+ 	mutex_unlock(&tracepoints_mutex);
+ 	return ret;
+ }
+@@ -301,10 +310,18 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_regis
+  */
+ int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
+ {
+-	return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
++	return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO, false);
+ }
+ EXPORT_SYMBOL_GPL(tracepoint_probe_register);
+ 
++int dynamic_tracepoint_probe_register(struct tracepoint *tp, void *probe,
++				      void *data)
++{
++	return tracepoint_probe_register_prio(tp, probe, data,
++					      TRACEPOINT_DEFAULT_PRIO, true);
++}
++EXPORT_SYMBOL_GPL(dynamic_tracepoint_probe_register);
++
+ /**
+  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
+  * @tp: tracepoint
+@@ -313,7 +330,8 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_regis
+  *
+  * Returns 0 if ok, error value on error.
+  */
+-int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
++int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data,
++				bool dynamic)
+ {
+ 	struct tracepoint_func tp_func;
+ 	int ret;
+@@ -321,7 +339,7 @@ int tracepoint_probe_unregister(struct t
+ 	mutex_lock(&tracepoints_mutex);
+ 	tp_func.func = probe;
+ 	tp_func.data = data;
+-	ret = tracepoint_remove_func(tp, &tp_func);
++	ret = tracepoint_remove_func(tp, &tp_func, dynamic);
+ 	mutex_unlock(&tracepoints_mutex);
+ 	return ret;
+ }
diff --git a/debian/patches/features/all/rt/0021-tracing-Add-hist-trigger-action-hook.patch b/debian/patches/features/all/rt/0021-tracing-Add-hist-trigger-action-hook.patch
new file mode 100644
index 0000000..7c87bca
--- /dev/null
+++ b/debian/patches/features/all/rt/0021-tracing-Add-hist-trigger-action-hook.patch
@@ -0,0 +1,228 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:22 -0500
+Subject: [PATCH 21/32] tracing: Add hist trigger action hook
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add a hook for executing extra actions whenever a histogram entry is
+added or updated.
+
+The default 'action' when a hist entry is added to a histogram is to
+update the set of values associated with it.  Some applications may
+want to perform additional actions at that point, such as generate
+another event, or compare and save a maximum.
+
+Add a simple framework for doing that; specific actions will be
+implemented on top of it in later patches.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |  114 +++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 111 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -33,6 +33,7 @@ typedef u64 (*hist_field_fn_t) (struct h
+ 
+ #define HIST_FIELD_OPERANDS_MAX	2
+ #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
++#define HIST_ACTIONS_MAX	8
+ 
+ enum field_op_id {
+ 	FIELD_OP_NONE,
+@@ -233,6 +234,9 @@ struct hist_trigger_attrs {
+ 
+ 	char		*assignment_str[TRACING_MAP_VARS_MAX];
+ 	unsigned int	n_assignments;
++
++	char		*action_str[HIST_ACTIONS_MAX];
++	unsigned int	n_actions;
+ };
+ 
+ struct hist_trigger_data {
+@@ -252,6 +256,21 @@ struct hist_trigger_data {
+ 	bool				remove;
+ 	struct hist_field               *var_refs[TRACING_MAP_VARS_MAX];
+ 	unsigned int			n_var_refs;
++
++	struct action_data		*actions[HIST_ACTIONS_MAX];
++	unsigned int			n_actions;
++};
++
++struct action_data;
++
++typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
++			     struct tracing_map_elt *elt, void *rec,
++			     struct ring_buffer_event *rbe,
++			     struct action_data *data, u64 *var_ref_vals);
++
++struct action_data {
++	action_fn_t		fn;
++	unsigned int		var_ref_idx;
+ };
+ 
+ static u64 hist_field_timestamp(struct hist_field *hist_field,
+@@ -681,6 +700,9 @@ static void destroy_hist_trigger_attrs(s
+ 	for (i = 0; i < attrs->n_assignments; i++)
+ 		kfree(attrs->assignment_str[i]);
+ 
++	for (i = 0; i < attrs->n_actions; i++)
++		kfree(attrs->action_str[i]);
++
+ 	kfree(attrs->name);
+ 	kfree(attrs->sort_key_str);
+ 	kfree(attrs->keys_str);
+@@ -688,6 +710,16 @@ static void destroy_hist_trigger_attrs(s
+ 	kfree(attrs);
+ }
+ 
++static int parse_action(char *str, struct hist_trigger_attrs *attrs)
++{
++	int ret = 0;
++
++	if (attrs->n_actions >= HIST_ACTIONS_MAX)
++		return ret;
++
++	return ret;
++}
++
+ static int parse_assignment(char *str, struct hist_trigger_attrs *attrs)
+ {
+ 	int ret = 0;
+@@ -755,8 +787,9 @@ static struct hist_trigger_attrs *parse_
+ 		else if (strcmp(str, "clear") == 0)
+ 			attrs->clear = true;
+ 		else {
+-			ret = -EINVAL;
+-			goto free;
++			ret = parse_action(str, attrs);
++			if (ret)
++				goto free;
+ 		}
+ 	}
+ 
+@@ -1722,11 +1755,63 @@ static int create_sort_keys(struct hist_
+ 	return ret;
+ }
+ 
++static void destroy_actions(struct hist_trigger_data *hist_data)
++{
++	unsigned int i;
++
++	for (i = 0; i < hist_data->n_actions; i++) {
++		struct action_data *data = hist_data->actions[i];
++
++		kfree(data);
++	}
++}
++
++static int create_actions(struct hist_trigger_data *hist_data,
++			  struct trace_event_file *file)
++{
++	unsigned int i;
++	int ret = 0;
++	char *str;
++
++	for (i = 0; i < hist_data->attrs->n_actions; i++) {
++		str = hist_data->attrs->action_str[i];
++	}
++
++	return ret;
++}
++
++static void print_actions(struct seq_file *m,
++			  struct hist_trigger_data *hist_data,
++			  struct tracing_map_elt *elt)
++{
++	unsigned int i;
++
++	for (i = 0; i < hist_data->n_actions; i++) {
++		struct action_data *data = hist_data->actions[i];
++	}
++}
++
++static void print_actions_spec(struct seq_file *m,
++			       struct hist_trigger_data *hist_data)
++{
++	unsigned int i;
++
++	for (i = 0; i < hist_data->n_actions; i++) {
++		struct action_data *data = hist_data->actions[i];
++	}
++}
++
+ static void destroy_hist_data(struct hist_trigger_data *hist_data)
+ {
++	if (!hist_data)
++		return;
++
+ 	destroy_hist_trigger_attrs(hist_data->attrs);
+ 	destroy_hist_fields(hist_data);
+ 	tracing_map_destroy(hist_data->map);
++
++	destroy_actions(hist_data);
++
+ 	kfree(hist_data);
+ }
+ 
+@@ -1886,6 +1971,20 @@ static inline void add_to_key(char *comp
+ 	memcpy(compound_key + key_field->offset, key, size);
+ }
+ 
++static void
++hist_trigger_actions(struct hist_trigger_data *hist_data,
++		     struct tracing_map_elt *elt, void *rec,
++		     struct ring_buffer_event *rbe, u64 *var_ref_vals)
++{
++	struct action_data *data;
++	unsigned int i;
++
++	for (i = 0; i < hist_data->n_actions; i++) {
++		data = hist_data->actions[i];
++		data->fn(hist_data, elt, rec, rbe, data, var_ref_vals);
++	}
++}
++
+ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
+ 			       struct ring_buffer_event *rbe)
+ {
+@@ -1941,6 +2040,9 @@ static void event_hist_trigger(struct ev
+ 		return;
+ 
+ 	hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
++
++	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
++		hist_trigger_actions(hist_data, elt, rec, rbe, var_ref_vals);
+ }
+ 
+ static void hist_trigger_stacktrace_print(struct seq_file *m,
+@@ -2278,6 +2380,8 @@ static int event_hist_trigger_print(stru
+ 	}
+ 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
+ 
++	print_actions_spec(m, hist_data);
++
+ 	if (data->filter_str)
+ 		seq_printf(m, " if %s", data->filter_str);
+ 
+@@ -2740,6 +2844,10 @@ static int event_hist_trigger_func(struc
+ 	if (has_hist_vars(hist_data))
+ 		save_hist_vars(hist_data);
+ 
++	ret = create_actions(hist_data, file);
++	if (ret)
++		goto out_unreg;
++
+ 	ret = tracing_map_init(hist_data->map);
+ 	if (ret)
+ 		goto out_unreg;
+@@ -2761,8 +2869,8 @@ static int event_hist_trigger_func(struc
+ 	remove_hist_vars(hist_data);
+ 
+ 	kfree(trigger_data);
+-
+ 	destroy_hist_data(hist_data);
++
+ 	goto out;
+ }
+ 
diff --git a/debian/patches/features/all/rt/0022-tracing-Add-support-for-synthetic-events.patch b/debian/patches/features/all/rt/0022-tracing-Add-support-for-synthetic-events.patch
new file mode 100644
index 0000000..e9a1796
--- /dev/null
+++ b/debian/patches/features/all/rt/0022-tracing-Add-support-for-synthetic-events.patch
@@ -0,0 +1,822 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:23 -0500
+Subject: [PATCH 22/32] tracing: Add support for 'synthetic' events
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Synthetic events are user-defined events generated from hist trigger
+variables saved from one or more other events.
+
+To define a synthetic event, the user writes a simple specification
+consisting of the name of the new event along with one or more
+variables and their type(s), to the tracing/synthetic_events file.
+
+For instance, the following creates a new event named 'wakeup_latency'
+with 3 fields: lat, pid, and prio:
+
+    # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> \
+      /sys/kernel/debug/tracing/synthetic_events
+
+Reading the tracing/synthetic_events file lists all the
+currently-defined synthetic events, in this case the event we defined
+above:
+
+    # cat /sys/kernel/debug/tracing/synthetic_events
+    wakeup_latency u64 lat; pid_t pid; int prio
+
+At this point, the synthetic event is ready to use, and a histogram
+can be defined using it:
+
+    # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
+    /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
+
+The new event is created under the tracing/events/synthetic/ directory
+and looks and behaves just like any other event:
+
+    # ls /sys/kernel/debug/tracing/events/synthetic/wakeup_latency
+      enable  filter  format  hist  id  trigger
+
+Although a histogram can be defined for it, nothing will happen until
+an action tracing that event via the trace_synth() function occurs.
+The trace_synth() function is very similar to all the other trace_*
+invocations spread throughout the kernel, except in this case the
+trace_ function and its corresponding tracepoint isn't statically
+generated but defined by the user at run-time.
+
+How this can be automatically hooked up via a hist trigger 'action' is
+discussed in a subsequent patch.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |  738 +++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 738 insertions(+)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -20,10 +20,14 @@
+ #include <linux/slab.h>
+ #include <linux/stacktrace.h>
+ #include <linux/rculist.h>
++#include <linux/tracefs.h>
+ 
+ #include "tracing_map.h"
+ #include "trace.h"
+ 
++#define SYNTH_SYSTEM		"synthetic"
++#define SYNTH_FIELDS_MAX	16
++
+ struct hist_field;
+ 
+ typedef u64 (*hist_field_fn_t) (struct hist_field *field,
+@@ -261,6 +265,23 @@ struct hist_trigger_data {
+ 	unsigned int			n_actions;
+ };
+ 
++struct synth_field {
++	char *type;
++	char *name;
++	unsigned int size;
++	bool is_signed;
++};
++
++struct synth_event {
++	struct list_head			list;
++	char					*name;
++	struct synth_field			**fields;
++	unsigned int				n_fields;
++	struct trace_event_class		class;
++	struct trace_event_call			call;
++	struct tracepoint			*tp;
++};
++
+ struct action_data;
+ 
+ typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
+@@ -273,6 +294,688 @@ struct action_data {
+ 	unsigned int		var_ref_idx;
+ };
+ 
++static LIST_HEAD(synth_event_list);
++static DEFINE_MUTEX(synth_event_mutex);
++
++struct synth_trace_event {
++	struct trace_entry	ent;
++	int			n_fields;
++	u64			fields[];
++};
++
++static int synth_event_define_fields(struct trace_event_call *call)
++{
++	struct synth_trace_event trace;
++	int offset = offsetof(typeof(trace), fields);
++	struct synth_event *event = call->data;
++	unsigned int i, size;
++	char *name, *type;
++	bool is_signed;
++	int ret = 0;
++
++	for (i = 0; i < event->n_fields; i++) {
++		size = event->fields[i]->size;
++		is_signed = event->fields[i]->is_signed;
++		type = event->fields[i]->type;
++		name = event->fields[i]->name;
++		ret = trace_define_field(call, type, name, offset, size,
++					 is_signed, FILTER_OTHER);
++		offset += sizeof(u64);
++	}
++
++	return ret;
++}
++
++static enum print_line_t print_synth_event(struct trace_iterator *iter,
++					   int flags,
++					   struct trace_event *event)
++{
++	struct trace_array *tr = iter->tr;
++	struct trace_seq *s = &iter->seq;
++	struct synth_trace_event *entry;
++	struct synth_event *se;
++	unsigned int i;
++
++	entry = (struct synth_trace_event *)iter->ent;
++	se = container_of(event, struct synth_event, call.event);
++
++	trace_seq_printf(s, "%s: ", se->name);
++
++	for (i = 0; i < entry->n_fields; i++) {
++		if (trace_seq_has_overflowed(s))
++			goto end;
++
++		/* parameter types */
++		if (tr->trace_flags & TRACE_ITER_VERBOSE)
++			trace_seq_printf(s, "%s ", "u64");
++
++		/* parameter values */
++		trace_seq_printf(s, "%s=%llu%s", se->fields[i]->name,
++				 entry->fields[i],
++				 i == entry->n_fields - 1 ? "" : ", ");
++	}
++end:
++	trace_seq_putc(s, '\n');
++
++	return trace_handle_return(s);
++}
++
++static struct trace_event_functions synth_event_funcs = {
++	.trace		= print_synth_event
++};
++
++static notrace void trace_event_raw_event_synth(void *__data,
++						u64 *var_ref_vals,
++						unsigned int var_ref_idx)
++{
++	struct trace_event_file *trace_file = __data;
++	struct synth_trace_event *entry;
++	struct trace_event_buffer fbuffer;
++	int fields_size;
++	unsigned int i;
++
++	struct synth_event *event;
++
++	event = trace_file->event_call->data;
++
++	if (trace_trigger_soft_disabled(trace_file))
++		return;
++
++	fields_size = event->n_fields * sizeof(u64);
++
++	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
++					   sizeof(*entry) + fields_size);
++	if (!entry)
++		return;
++
++	entry->n_fields = event->n_fields;
++
++	for (i = 0; i < event->n_fields; i++)
++		entry->fields[i] = var_ref_vals[var_ref_idx + i];
++
++	trace_event_buffer_commit(&fbuffer);
++}
++
++static void free_synth_event_print_fmt(struct trace_event_call *call)
++{
++	if (call)
++		kfree(call->print_fmt);
++}
++
++static int __set_synth_event_print_fmt(struct synth_event *event,
++				       char *buf, int len)
++{
++	int pos = 0;
++	int i;
++
++	/* When len=0, we just calculate the needed length */
++#define LEN_OR_ZERO (len ? len - pos : 0)
++
++	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
++	for (i = 0; i < event->n_fields; i++) {
++		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
++				event->fields[i]->name, sizeof(u64),
++				i == event->n_fields - 1 ? "" : ", ");
++	}
++	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
++
++	for (i = 0; i < event->n_fields; i++) {
++		pos += snprintf(buf + pos, LEN_OR_ZERO,
++				", ((u64)(REC->%s))", event->fields[i]->name);
++	}
++
++#undef LEN_OR_ZERO
++
++	/* return the length of print_fmt */
++	return pos;
++}
++
++static int set_synth_event_print_fmt(struct trace_event_call *call)
++{
++	struct synth_event *event = call->data;
++	char *print_fmt;
++	int len;
++
++	/* First: called with 0 length to calculate the needed length */
++	len = __set_synth_event_print_fmt(event, NULL, 0);
++
++	print_fmt = kmalloc(len + 1, GFP_KERNEL);
++	if (!print_fmt)
++		return -ENOMEM;
++
++	/* Second: actually write the @print_fmt */
++	__set_synth_event_print_fmt(event, print_fmt, len + 1);
++	call->print_fmt = print_fmt;
++
++	return 0;
++}
++
++int dynamic_trace_event_reg(struct trace_event_call *call,
++			    enum trace_reg type, void *data)
++{
++	struct trace_event_file *file = data;
++
++	WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
++	switch (type) {
++	case TRACE_REG_REGISTER:
++		return dynamic_tracepoint_probe_register(call->tp,
++							 call->class->probe,
++							 file);
++	case TRACE_REG_UNREGISTER:
++		tracepoint_probe_unregister(call->tp,
++					    call->class->probe,
++					    file, true);
++		return 0;
++
++#ifdef CONFIG_PERF_EVENTS
++	case TRACE_REG_PERF_REGISTER:
++		return dynamic_tracepoint_probe_register(call->tp,
++							 call->class->perf_probe,
++							 call);
++	case TRACE_REG_PERF_UNREGISTER:
++		tracepoint_probe_unregister(call->tp,
++					    call->class->perf_probe,
++					    call, true);
++		return 0;
++	case TRACE_REG_PERF_OPEN:
++	case TRACE_REG_PERF_CLOSE:
++	case TRACE_REG_PERF_ADD:
++	case TRACE_REG_PERF_DEL:
++		return 0;
++#endif
++	}
++	return 0;
++}
++
++static void free_synth_field(struct synth_field *field)
++{
++	kfree(field->type);
++	kfree(field->name);
++	kfree(field);
++}
++
++static bool synth_field_signed(char *type)
++{
++	if (strncmp(type, "u", 1) == 0)
++		return false;
++
++	return true;
++}
++
++static unsigned int synth_field_size(char *type)
++{
++	unsigned int size = 0;
++
++	if (strcmp(type, "s64") == 0)
++		size = sizeof(s64);
++	else if (strcmp(type, "u64") == 0)
++		size = sizeof(u64);
++	else if (strcmp(type, "s32") == 0)
++		size = sizeof(s32);
++	else if (strcmp(type, "u32") == 0)
++		size = sizeof(u32);
++	else if (strcmp(type, "s16") == 0)
++		size = sizeof(s16);
++	else if (strcmp(type, "u16") == 0)
++		size = sizeof(u16);
++	else if (strcmp(type, "s8") == 0)
++		size = sizeof(s8);
++	else if (strcmp(type, "u8") == 0)
++		size = sizeof(u8);
++	else if (strcmp(type, "char") == 0)
++		size = sizeof(char);
++	else if (strcmp(type, "unsigned char") == 0)
++		size = sizeof(unsigned char);
++	else if (strcmp(type, "int") == 0)
++		size = sizeof(int);
++	else if (strcmp(type, "unsigned int") == 0)
++		size = sizeof(unsigned int);
++	else if (strcmp(type, "long") == 0)
++		size = sizeof(long);
++	else if (strcmp(type, "unsigned long") == 0)
++		size = sizeof(unsigned long);
++	else if (strcmp(type, "pid_t") == 0)
++		size = sizeof(pid_t);
++	else if (strstr(type, "[") == 0)
++		size = sizeof(u64);
++
++	return size;
++}
++
++static struct synth_field *parse_synth_field(char *field_type,
++					     char *field_name)
++{
++	struct synth_field *field;
++	int len, ret = 0;
++	char *array;
++
++	if (field_type[0] == ';')
++		field_type++;
++
++	len = strlen(field_name);
++	if (field_name[len - 1] == ';')
++		field_name[len - 1] = '\0';
++
++	field = kzalloc(sizeof(*field), GFP_KERNEL);
++	if (!field)
++		return ERR_PTR(-ENOMEM);
++
++	len = strlen(field_type) + 1;
++	array = strchr(field_name, '[');
++	if (array)
++		len += strlen(array);
++	field->type = kzalloc(len, GFP_KERNEL);
++	if (!field->type) {
++		ret = -ENOMEM;
++		goto free;
++	}
++	strcat(field->type, field_type);
++	if (array)
++		strcat(field->type, array);
++
++	field->size = synth_field_size(field->type);
++	if (!field->size) {
++		ret = -EINVAL;
++		goto free;
++	}
++
++	field->is_signed = synth_field_signed(field->type);
++
++	field->name = kstrdup(field_name, GFP_KERNEL);
++	if (!field->name) {
++		ret = -ENOMEM;
++		goto free;
++	}
++ out:
++	return field;
++ free:
++	free_synth_field(field);
++	field = ERR_PTR(ret);
++	goto out;
++}
++
++static void free_synth_tracepoint(struct tracepoint *tp)
++{
++	if (!tp)
++		return;
++
++	kfree(tp->name);
++	kfree(tp);
++}
++
++static struct tracepoint *alloc_synth_tracepoint(char *name)
++{
++	struct tracepoint *tp;
++	int ret = 0;
++
++	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
++	if (!tp) {
++		ret = -ENOMEM;
++		goto free;
++	}
++
++	tp->name = kstrdup(name, GFP_KERNEL);
++	if (!tp->name) {
++		ret = -ENOMEM;
++		goto free;
++	}
++
++	return tp;
++ free:
++	free_synth_tracepoint(tp);
++
++	return ERR_PTR(ret);
++}
++
++static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
++			       unsigned int var_ref_idx)
++{
++	struct tracepoint *tp = event->tp;
++
++	if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
++		struct tracepoint_func *it_func_ptr;
++		void *it_func;
++		void *__data;
++
++		if (!(cpu_online(raw_smp_processor_id())))
++			return;
++
++		it_func_ptr = rcu_dereference_sched((tp)->funcs);
++		if (it_func_ptr) {
++			do {
++				it_func = (it_func_ptr)->func;
++				__data = (it_func_ptr)->data;
++				((void(*)(void *__data, u64 *var_ref_vals, unsigned int var_ref_idx))(it_func))(__data, var_ref_vals, var_ref_idx);
++			} while ((++it_func_ptr)->func);
++		}
++	}
++}
++
++static struct synth_event *find_synth_event(const char *name)
++{
++	struct synth_event *event;
++
++	list_for_each_entry(event, &synth_event_list, list) {
++		if (strcmp(event->name, name) == 0)
++			return event;
++	}
++
++	return NULL;
++}
++
++static int register_synth_event(struct synth_event *event)
++{
++	struct trace_event_call *call = &event->call;
++	int ret = 0;
++
++	event->call.class = &event->class;
++	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
++	if (!event->class.system) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	event->tp = alloc_synth_tracepoint(event->name);
++	if (IS_ERR(event->tp)) {
++		ret = PTR_ERR(event->tp);
++		event->tp = NULL;
++		goto out;
++	}
++
++	INIT_LIST_HEAD(&call->class->fields);
++	call->event.funcs = &synth_event_funcs;
++	call->class->define_fields = synth_event_define_fields;
++
++	ret = register_trace_event(&call->event);
++	if (!ret) {
++		ret = -ENODEV;
++		goto out;
++	}
++	call->flags = TRACE_EVENT_FL_TRACEPOINT;
++	call->class->reg = dynamic_trace_event_reg;
++	call->class->probe = trace_event_raw_event_synth;
++	call->data = event;
++	call->tp = event->tp;
++	ret = trace_add_event_call(call);
++	if (ret) {
++		pr_warn("Failed to register synthetic event: %s\n",
++			trace_event_name(call));
++		goto err;
++	}
++
++	ret = set_synth_event_print_fmt(call);
++	if (ret < 0) {
++		trace_remove_event_call(call);
++		goto err;
++	}
++ out:
++	return ret;
++ err:
++	unregister_trace_event(&call->event);
++	goto out;
++}
++
++static int unregister_synth_event(struct synth_event *event)
++{
++	struct trace_event_call *call = &event->call;
++	int ret;
++
++	ret = trace_remove_event_call(call);
++	if (ret) {
++		pr_warn("Failed to remove synthetic event: %s\n",
++			trace_event_name(call));
++		free_synth_event_print_fmt(call);
++		unregister_trace_event(&call->event);
++	}
++
++	return ret;
++}
++
++static void remove_synth_event(struct synth_event *event)
++{
++	unregister_synth_event(event);
++	list_del(&event->list);
++}
++
++static int add_synth_event(struct synth_event *event)
++{
++	int ret;
++
++	ret = register_synth_event(event);
++	if (ret)
++		return ret;
++
++	list_add(&event->list, &synth_event_list);
++
++	return 0;
++}
++
++static void free_synth_event(struct synth_event *event)
++{
++	unsigned int i;
++
++	if (!event)
++		return;
++
++	for (i = 0; i < event->n_fields; i++)
++		free_synth_field(event->fields[i]);
++
++	kfree(event->fields);
++	kfree(event->name);
++	kfree(event->class.system);
++	free_synth_tracepoint(event->tp);
++	free_synth_event_print_fmt(&event->call);
++	kfree(event);
++}
++
++static struct synth_event *alloc_synth_event(char *event_name, int n_fields,
++					     struct synth_field **fields)
++{
++	struct synth_event *event;
++	unsigned int i;
++
++	event = kzalloc(sizeof(*event), GFP_KERNEL);
++	if (!event) {
++		event = ERR_PTR(-ENOMEM);
++		goto out;
++	}
++
++	event->name = kstrdup(event_name, GFP_KERNEL);
++	if (!event->name) {
++		kfree(event);
++		event = ERR_PTR(-ENOMEM);
++		goto out;
++	}
++
++	event->fields = kcalloc(n_fields, sizeof(event->fields), GFP_KERNEL);
++	if (!event->fields) {
++		free_synth_event(event);
++		event = ERR_PTR(-ENOMEM);
++		goto out;
++	}
++
++	for (i = 0; i < n_fields; i++)
++		event->fields[i] = fields[i];
++
++	event->n_fields = n_fields;
++ out:
++	return event;
++}
++
++static int create_synth_event(int argc, char **argv)
++{
++	struct synth_field *fields[SYNTH_FIELDS_MAX];
++	struct synth_event *event = NULL;
++	bool delete_event = false;
++	int i, n_fields = 0, ret = 0;
++	char *name;
++
++	mutex_lock(&synth_event_mutex);
++
++	/*
++	 * Argument syntax:
++	 *  - Add synthetic event: <event_name> field[;field] ...
++	 *  - Remove synthetic event: !<event_name> field[;field] ...
++	 *      where 'field' = type field_name
++	 */
++	if (argc < 1) {
++		ret = -EINVAL;
++		goto err;
++	}
++
++	name = argv[0];
++	if (name[0] == '!') {
++		delete_event = true;
++		name++;
++	}
++
++	event = find_synth_event(name);
++	if (event) {
++		if (delete_event) {
++			remove_synth_event(event);
++			goto err;
++		} else
++			ret = -EEXIST;
++		goto out;
++	} else if (delete_event) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (argc < 2) {
++		ret = -EINVAL;
++		goto err;
++	}
++
++	for (i = 1; i < argc - 1; i++) {
++		if (strcmp(argv[i], ";") == 0)
++			continue;
++		if (n_fields == SYNTH_FIELDS_MAX) {
++			ret = -EINVAL;
++			goto out;
++		}
++		fields[n_fields] = parse_synth_field(argv[i], argv[i + 1]);
++		if (!fields[n_fields])
++			goto err;
++		i++; n_fields++;
++	}
++	if (i < argc) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	event = alloc_synth_event(name, n_fields, fields);
++	if (IS_ERR(event)) {
++		ret = PTR_ERR(event);
++		event = NULL;
++		goto err;
++	}
++
++	add_synth_event(event);
++ out:
++	mutex_unlock(&synth_event_mutex);
++
++	return ret;
++ err:
++	for (i = 0; i < n_fields; i++)
++		free_synth_field(fields[i]);
++	free_synth_event(event);
++
++	goto out;
++}
++
++static int release_all_synth_events(void)
++{
++	struct synth_event *event, *e;
++	int ret = 0;
++
++	mutex_lock(&synth_event_mutex);
++
++	list_for_each_entry_safe(event, e, &synth_event_list, list) {
++		remove_synth_event(event);
++		free_synth_event(event);
++	}
++
++	mutex_unlock(&synth_event_mutex);
++
++	return ret;
++}
++
++
++static void *synth_events_seq_start(struct seq_file *m, loff_t *pos)
++{
++	mutex_lock(&synth_event_mutex);
++
++	return seq_list_start(&synth_event_list, *pos);
++}
++
++static void *synth_events_seq_next(struct seq_file *m, void *v, loff_t *pos)
++{
++	return seq_list_next(v, &synth_event_list, pos);
++}
++
++static void synth_events_seq_stop(struct seq_file *m, void *v)
++{
++	mutex_unlock(&synth_event_mutex);
++}
++
++static int synth_events_seq_show(struct seq_file *m, void *v)
++{
++	struct synth_field *field;
++	struct synth_event *event = v;
++	unsigned int i;
++
++	seq_printf(m, "%s\t", event->name);
++
++	for (i = 0; i < event->n_fields; i++) {
++		field = event->fields[i];
++
++		/* parameter values */
++		seq_printf(m, "%s %s%s", field->type, field->name,
++			   i == event->n_fields - 1 ? "" : "; ");
++	}
++
++	seq_putc(m, '\n');
++
++	return 0;
++}
++
++static const struct seq_operations synth_events_seq_op = {
++	.start  = synth_events_seq_start,
++	.next   = synth_events_seq_next,
++	.stop   = synth_events_seq_stop,
++	.show   = synth_events_seq_show
++};
++
++static int synth_events_open(struct inode *inode, struct file *file)
++{
++	int ret;
++
++	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
++		ret = release_all_synth_events();
++		if (ret < 0)
++			return ret;
++	}
++
++	return seq_open(file, &synth_events_seq_op);
++}
++
++static ssize_t synth_events_write(struct file *file,
++				  const char __user *buffer,
++				  size_t count, loff_t *ppos)
++{
++	return trace_parse_run_command(file, buffer, count, ppos,
++				       create_synth_event);
++}
++
++static const struct file_operations synth_events_fops = {
++	.open           = synth_events_open,
++	.write		= synth_events_write,
++	.read           = seq_read,
++	.llseek         = seq_lseek,
++	.release        = seq_release,
++};
++
+ static u64 hist_field_timestamp(struct hist_field *hist_field,
+ 				struct tracing_map_elt *elt,
+ 				struct ring_buffer_event *rbe,
+@@ -3028,3 +3731,38 @@ static __init void unregister_trigger_hi
+ 
+ 	return ret;
+ }
++
++static __init int trace_events_hist_init(void)
++{
++	struct dentry *entry = NULL;
++	struct trace_array *tr;
++	struct dentry *d_tracer;
++	int err = 0;
++
++	tr = top_trace_array();
++	if (!tr) {
++		err = -ENODEV;
++		goto err;
++	}
++
++	d_tracer = tracing_init_dentry();
++	if (IS_ERR(d_tracer)) {
++		err = PTR_ERR(d_tracer);
++		goto err;
++	}
++
++	entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
++				    tr, &synth_events_fops);
++	if (!entry) {
++		err = -ENODEV;
++		goto err;
++	}
++
++	return err;
++ err:
++	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
++
++	return err;
++}
++
++fs_initcall(trace_events_hist_init);
diff --git a/debian/patches/features/all/rt/0023-tracing-Add-onmatch-hist-trigger-action-support.patch b/debian/patches/features/all/rt/0023-tracing-Add-onmatch-hist-trigger-action-support.patch
new file mode 100644
index 0000000..17603aa
--- /dev/null
+++ b/debian/patches/features/all/rt/0023-tracing-Add-onmatch-hist-trigger-action-support.patch
@@ -0,0 +1,1269 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:24 -0500
+Subject: [PATCH 23/32] tracing: Add 'onmatch' hist trigger action support
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add an 'onmatch(matching.event).<synthetic_event_name>(param list)'
+hist trigger action which is invoked with the set of variables or
+event fields named in the 'param list'.  The result is the generation
+of a synthetic event that consists of the values contained in those
+variables and/or fields at the time the invoking event was hit.
+
+As an example the below defines a simple synthetic event using a
+variable defined on the sched_wakeup_new event, and shows the event
+definition with unresolved fields, since the sched_wakeup_new event
+with the testpid variable hasn't been defined yet:
+
+    # echo 'wakeup_new_test pid_t pid; int prio' >> \
+      /sys/kernel/debug/tracing/synthetic_events
+
+    # cat /sys/kernel/debug/tracing/synthetic_events
+      wakeup_new_test pid_t pid; int prio
+
+The following hist trigger both defines a testpid variable and
+specifies an onmatch() trace action that uses that variable along with
+a non-variable field to generate a wakeup_new_test synthetic event
+whenever a sched_wakeup_new event occurs, which because of the 'if
+comm == "cyclictest"' filter only happens when the executable is
+cyclictest:
+
+    # echo 'hist:keys=testpid=pid:\
+      onmatch(sched.sched_wakeup_new).wakeup_new_test($testpid, prio) \
+        if comm=="cyclictest"' >> \
+      /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
+
+Creating and displaying a histogram based on those events is now just
+a matter of using the fields and new synthetic event in the
+tracing/events/synthetic directory, as usual:
+
+    # echo 'hist:keys=pid,prio:sort=pid,prio' >> \
+      /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/trigger
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |  955 ++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 940 insertions(+), 15 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -59,6 +59,7 @@ struct hist_field {
+ 	unsigned int			size;
+ 	unsigned int			offset;
+ 	unsigned int                    is_signed;
++	const char			*type;
+ 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
+ 	struct hist_trigger_data	*hist_data;
+ 	struct hist_var			var;
+@@ -243,6 +244,16 @@ struct hist_trigger_attrs {
+ 	unsigned int	n_actions;
+ };
+ 
++struct field_var {
++	struct hist_field	*var;
++	struct hist_field	*val;
++};
++
++struct field_var_hist {
++	struct hist_trigger_data	*hist_data;
++	char				*cmd;
++};
++
+ struct hist_trigger_data {
+ 	struct hist_field               *fields[HIST_FIELDS_MAX];
+ 	unsigned int			n_vals;
+@@ -263,6 +274,14 @@ struct hist_trigger_data {
+ 
+ 	struct action_data		*actions[HIST_ACTIONS_MAX];
+ 	unsigned int			n_actions;
++
++	struct hist_field		*synth_var_refs[SYNTH_FIELDS_MAX];
++	unsigned int			n_synth_var_refs;
++	struct field_var		*field_vars[SYNTH_FIELDS_MAX];
++	unsigned int			n_field_vars;
++	unsigned int			n_field_var_str;
++	struct field_var_hist		*field_var_hists[SYNTH_FIELDS_MAX];
++	unsigned int			n_field_var_hists;
+ };
+ 
+ struct synth_field {
+@@ -291,7 +310,14 @@ typedef void (*action_fn_t) (struct hist
+ 
+ struct action_data {
+ 	action_fn_t		fn;
++	unsigned int		n_params;
++	char			*params[SYNTH_FIELDS_MAX];
++
+ 	unsigned int		var_ref_idx;
++	char			*match_event;
++	char			*match_event_system;
++	char			*synth_event_name;
++	struct synth_event	*synth_event;
+ };
+ 
+ static LIST_HEAD(synth_event_list);
+@@ -802,6 +828,50 @@ static struct synth_event *alloc_synth_e
+ 	return event;
+ }
+ 
++static void action_trace(struct hist_trigger_data *hist_data,
++			 struct tracing_map_elt *elt, void *rec,
++			 struct ring_buffer_event *rbe,
++			 struct action_data *data, u64 *var_ref_vals)
++{
++	struct synth_event *event = data->synth_event;
++
++	trace_synth(event, var_ref_vals, data->var_ref_idx);
++}
++
++static bool check_hist_action_refs(struct hist_trigger_data *hist_data,
++				   struct synth_event *event)
++{
++	unsigned int i;
++
++	for (i = 0; i < hist_data->n_actions; i++) {
++		struct action_data *data = hist_data->actions[i];
++
++		if (data->fn == action_trace && data->synth_event == event)
++			return true;
++	}
++
++	return false;
++}
++
++static LIST_HEAD(hist_action_list);
++static LIST_HEAD(hist_var_list);
++
++struct hist_var_data {
++	struct list_head list;
++	struct hist_trigger_data *hist_data;
++};
++
++static bool check_synth_action_refs(struct synth_event *event)
++{
++	struct hist_var_data *var_data;
++
++	list_for_each_entry(var_data, &hist_action_list, list)
++		if (check_hist_action_refs(var_data->hist_data, event))
++			return true;
++
++	return false;
++}
++
+ static int create_synth_event(int argc, char **argv)
+ {
+ 	struct synth_field *fields[SYNTH_FIELDS_MAX];
+@@ -832,15 +902,17 @@ static int create_synth_event(int argc,
+ 	event = find_synth_event(name);
+ 	if (event) {
+ 		if (delete_event) {
++			if (check_synth_action_refs(event)) {
++				ret = -EBUSY;
++				goto out;
++			}
+ 			remove_synth_event(event);
+ 			goto err;
+ 		} else
+ 			ret = -EEXIST;
+ 		goto out;
+-	} else if (delete_event) {
+-		ret = -EINVAL;
++	} else if (delete_event)
+ 		goto out;
+-	}
+ 
+ 	if (argc < 2) {
+ 		ret = -EINVAL;
+@@ -891,11 +963,18 @@ static int release_all_synth_events(void
+ 
+ 	mutex_lock(&synth_event_mutex);
+ 
++	list_for_each_entry(event, &synth_event_list, list) {
++		if (check_synth_action_refs(event)) {
++			ret = -EBUSY;
++			goto out;
++		}
++	}
++
+ 	list_for_each_entry_safe(event, e, &synth_event_list, list) {
+ 		remove_synth_event(event);
+ 		free_synth_event(event);
+ 	}
+-
++ out:
+ 	mutex_unlock(&synth_event_mutex);
+ 
+ 	return ret;
+@@ -992,13 +1071,6 @@ static u64 hist_field_timestamp(struct h
+ 	return ts;
+ }
+ 
+-static LIST_HEAD(hist_var_list);
+-
+-struct hist_var_data {
+-	struct list_head list;
+-	struct hist_trigger_data *hist_data;
+-};
+-
+ static struct hist_field *check_var_ref(struct hist_field *hist_field,
+ 					struct hist_trigger_data *var_data,
+ 					unsigned int var_idx)
+@@ -1248,6 +1320,7 @@ static struct hist_field *find_event_var
+ struct hist_elt_data {
+ 	char *comm;
+ 	u64 *var_ref_vals;
++	char *field_var_str[SYNTH_FIELDS_MAX];
+ };
+ 
+ static u64 hist_field_var_ref(struct hist_field *hist_field,
+@@ -1415,11 +1488,21 @@ static void destroy_hist_trigger_attrs(s
+ 
+ static int parse_action(char *str, struct hist_trigger_attrs *attrs)
+ {
+-	int ret = 0;
++	int ret = -EINVAL;
+ 
+ 	if (attrs->n_actions >= HIST_ACTIONS_MAX)
+ 		return ret;
+ 
++	if ((strncmp(str, "onmatch(", strlen("onmatch(")) == 0)) {
++		attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
++		if (!attrs->action_str[attrs->n_actions]) {
++			ret = -ENOMEM;
++			return ret;
++		}
++		attrs->n_actions++;
++		ret = 0;
++	}
++
+ 	return ret;
+ }
+ 
+@@ -1525,7 +1608,14 @@ static inline void save_comm(char *comm,
+ 
+ static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
+ {
++	struct hist_trigger_data *hist_data = elt->map->private_data;
+ 	struct hist_elt_data *private_data = elt->private_data;
++	unsigned int i, n_str;
++
++	n_str = hist_data->n_field_var_str;
++
++	for (i = 0; i < n_str; i++)
++		kfree(private_data->field_var_str[i]);
+ 
+ 	kfree(private_data->comm);
+ 	kfree(private_data);
+@@ -1537,7 +1627,7 @@ static int hist_trigger_elt_data_alloc(s
+ 	unsigned int size = TASK_COMM_LEN + 1;
+ 	struct hist_elt_data *elt_data;
+ 	struct hist_field *key_field;
+-	unsigned int i;
++	unsigned int i, n_str;
+ 
+ 	elt->private_data = elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
+ 	if (!elt_data)
+@@ -1557,6 +1647,16 @@ static int hist_trigger_elt_data_alloc(s
+ 		}
+ 	}
+ 
++	n_str = hist_data->n_field_var_str;
++
++	for (i = 0; i < n_str; i++) {
++		elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
++		if (!elt_data->field_var_str[i]) {
++			hist_trigger_elt_data_free(elt);
++			return -ENOMEM;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+@@ -1674,6 +1774,7 @@ static void destroy_hist_field(struct hi
+ 
+ 	kfree(hist_field->var.name);
+ 	kfree(hist_field->name);
++	kfree(hist_field->type);
+ 
+ 	kfree(hist_field);
+ }
+@@ -1704,6 +1805,10 @@ static struct hist_field *create_hist_fi
+ 
+ 	if (flags & HIST_FIELD_FL_HITCOUNT) {
+ 		hist_field->fn = hist_field_counter;
++		hist_field->size = sizeof(u64);
++		hist_field->type = kstrdup("u64", GFP_KERNEL);
++		if (!hist_field->type)
++			goto free;
+ 		goto out;
+ 	}
+ 
+@@ -1717,12 +1822,18 @@ static struct hist_field *create_hist_fi
+ 		hist_field->fn = hist_field_log2;
+ 		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
+ 		hist_field->size = hist_field->operands[0]->size;
++		hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
++		if (!hist_field->type)
++			goto free;
+ 		goto out;
+ 	}
+ 
+ 	if (flags & HIST_FIELD_FL_TIMESTAMP) {
+ 		hist_field->fn = hist_field_timestamp;
+ 		hist_field->size = sizeof(u64);
++		hist_field->type = kstrdup("u64", GFP_KERNEL);
++		if (!hist_field->type)
++			goto free;
+ 		goto out;
+ 	}
+ 
+@@ -1731,6 +1842,10 @@ static struct hist_field *create_hist_fi
+ 
+ 	if (is_string_field(field)) {
+ 		flags |= HIST_FIELD_FL_STRING;
++		hist_field->size = MAX_FILTER_STR_VAL;
++		hist_field->type = kstrdup(field->type, GFP_KERNEL);
++		if (!hist_field->type)
++			goto free;
+ 
+ 		if (field->filter_type == FILTER_STATIC_STRING)
+ 			hist_field->fn = hist_field_string;
+@@ -1739,6 +1854,12 @@ static struct hist_field *create_hist_fi
+ 		else
+ 			hist_field->fn = hist_field_pstring;
+ 	} else {
++		hist_field->size = field->size;
++		hist_field->is_signed = field->is_signed;
++		hist_field->type = kstrdup(field->type, GFP_KERNEL);
++		if (!hist_field->type)
++			goto free;
++
+ 		hist_field->fn = select_value_fn(field->size,
+ 						 field->is_signed);
+ 		if (!hist_field->fn) {
+@@ -1786,7 +1907,10 @@ static struct hist_field *create_var_ref
+ 		ref_field->size = var_field->size;
+ 		ref_field->is_signed = var_field->is_signed;
+ 		ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
+-		if (!ref_field->name) {
++		ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
++		if (!ref_field->name || !ref_field->type) {
++			kfree(ref_field->name);
++			kfree(ref_field->type);
+ 			destroy_hist_field(ref_field, 0);
+ 			return NULL;
+ 		}
+@@ -1970,6 +2094,11 @@ static struct hist_field *parse_unary(st
+ 	expr->operands[0] = operand1;
+ 	expr->operator = FIELD_OP_UNARY_MINUS;
+ 	expr->name = expr_str(expr, 0);
++	expr->type = kstrdup(operand1->type, GFP_KERNEL);
++	if (!expr->type) {
++		ret = -ENOMEM;
++		goto free;
++	}
+ 
+ 	return expr;
+  free:
+@@ -2053,6 +2182,11 @@ static struct hist_field *parse_expr(str
+ 	expr->operands[1] = operand2;
+ 	expr->operator = field_op;
+ 	expr->name = expr_str(expr, 0);
++	expr->type = kstrdup(operand1->type, GFP_KERNEL);
++	if (!expr->type) {
++		ret = -ENOMEM;
++		goto free;
++	}
+ 
+ 	switch (field_op) {
+ 	case FIELD_OP_MINUS:
+@@ -2074,6 +2208,718 @@ static struct hist_field *parse_expr(str
+ 	return ERR_PTR(ret);
+ }
+ 
++static struct hist_var_data *find_actions(struct hist_trigger_data *hist_data)
++{
++	struct hist_var_data *var_data, *found = NULL;
++
++	list_for_each_entry(var_data, &hist_action_list, list) {
++		if (var_data->hist_data == hist_data) {
++			found = var_data;
++			break;
++		}
++	}
++
++	return found;
++}
++
++static int save_hist_actions(struct hist_trigger_data *hist_data)
++{
++	struct hist_var_data *var_data;
++
++	var_data = find_actions(hist_data);
++	if (var_data)
++		return 0;
++
++	var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
++	if (!var_data)
++		return -ENOMEM;
++
++	var_data->hist_data = hist_data;
++	list_add(&var_data->list, &hist_action_list);
++
++	return 0;
++}
++
++static void remove_hist_actions(struct hist_trigger_data *hist_data)
++{
++	struct hist_var_data *var_data;
++
++	var_data = find_actions(hist_data);
++	if (!var_data)
++		return;
++
++	list_del(&var_data->list);
++
++	kfree(var_data);
++}
++
++static char *find_trigger_filter(struct hist_trigger_data *hist_data,
++				 struct trace_event_file *file)
++{
++	struct event_trigger_data *test;
++
++	list_for_each_entry_rcu(test, &file->triggers, list) {
++		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
++			if (test->private_data == hist_data)
++				return test->filter_str;
++		}
++	}
++
++	return NULL;
++}
++
++static struct event_command trigger_hist_cmd;
++static int event_hist_trigger_func(struct event_command *cmd_ops,
++				   struct trace_event_file *file,
++				   char *glob, char *cmd, char *param);
++
++static bool compatible_keys(struct hist_trigger_data *target_hist_data,
++			    struct hist_trigger_data *hist_data,
++			    unsigned int n_keys)
++{
++	struct hist_field *target_hist_field, *hist_field;
++	unsigned int n, i, j;
++
++	if (hist_data->n_fields - hist_data->n_vals != n_keys)
++		return false;
++
++	i = hist_data->n_vals;
++	j = target_hist_data->n_vals;
++
++	for (n = 0; n < n_keys; n++) {
++		hist_field = hist_data->fields[i + n];
++		target_hist_field = hist_data->fields[j + n];
++
++		if (strcmp(hist_field->type, target_hist_field->type) != 0)
++			return false;
++		if (hist_field->size != target_hist_field->size)
++			return false;
++		if (hist_field->is_signed != target_hist_field->is_signed)
++			return false;
++	}
++
++	return true;
++}
++
++static struct hist_trigger_data *
++find_compatible_hist(struct hist_trigger_data *target_hist_data,
++		     struct trace_event_file *file)
++{
++	struct hist_trigger_data *hist_data;
++	struct event_trigger_data *test;
++	unsigned int n_keys;
++
++	n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
++
++	list_for_each_entry_rcu(test, &file->triggers, list) {
++		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
++			hist_data = test->private_data;
++
++			if (compatible_keys(target_hist_data, hist_data, n_keys))
++				return hist_data;
++		}
++	}
++
++	return NULL;
++}
++
++static struct trace_event_file *event_file(char *system, char *event_name)
++{
++	struct trace_event_file *file;
++	struct trace_array *tr;
++
++	tr = top_trace_array();
++	if (!tr)
++		return ERR_PTR(-ENODEV);
++
++	file = find_event_file(tr, system, event_name);
++	if (!file)
++		return ERR_PTR(-EINVAL);
++
++	return file;
++}
++
++static struct hist_field *
++create_field_var_hist(struct hist_trigger_data *target_hist_data,
++		      char *system, char *event_name, char *field_name)
++{
++	struct hist_field *event_var = ERR_PTR(-EINVAL);
++	struct hist_trigger_data *hist_data;
++	unsigned int i, n, first = true;
++	struct field_var_hist *var_hist;
++	struct trace_event_file *file;
++	struct hist_field *key_field;
++	struct trace_array *tr;
++	char *saved_filter;
++	char *cmd;
++	int ret;
++
++	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX)
++		return ERR_PTR(-EINVAL);
++
++	tr = top_trace_array();
++	if (!tr)
++		return ERR_PTR(-ENODEV);
++
++	file = event_file(system, event_name);
++	if (IS_ERR(file)) {
++		ret = PTR_ERR(file);
++		return ERR_PTR(ret);
++	}
++
++	hist_data = find_compatible_hist(target_hist_data, file);
++	if (!hist_data)
++		return ERR_PTR(-EINVAL);
++
++	var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
++	if (!var_hist)
++		return ERR_PTR(-ENOMEM);
++
++	cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
++	if (!cmd) {
++		kfree(var_hist);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	strcat(cmd, "keys=");
++
++	for_each_hist_key_field(i, hist_data) {
++		key_field = hist_data->fields[i];
++		if (!first)
++			strcat(cmd, ",");
++		strcat(cmd, key_field->field->name);
++		first = false;
++	}
++
++	strcat(cmd, ":synthetic_");
++	strcat(cmd, field_name);
++	strcat(cmd, "=");
++	strcat(cmd, field_name);
++
++	saved_filter = find_trigger_filter(hist_data, file);
++	if (saved_filter) {
++		strcat(cmd, " if ");
++		strcat(cmd, saved_filter);
++	}
++
++	var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
++	if (!var_hist->cmd) {
++		kfree(cmd);
++		kfree(var_hist);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	var_hist->hist_data = hist_data;
++
++	ret = event_hist_trigger_func(&trigger_hist_cmd, file,
++				      "", "hist", cmd);
++	if (ret) {
++		kfree(cmd);
++		kfree(var_hist->cmd);
++		kfree(var_hist);
++		return ERR_PTR(ret);
++	}
++
++	strcpy(cmd, "synthetic_");
++	strcat(cmd, field_name);
++
++	event_var = find_event_var(system, event_name, cmd);
++	if (!event_var) {
++		kfree(cmd);
++		kfree(var_hist->cmd);
++		kfree(var_hist);
++		return ERR_PTR(-EINVAL);
++	}
++
++	n = target_hist_data->n_field_var_hists;
++	target_hist_data->field_var_hists[n] = var_hist;
++	target_hist_data->n_field_var_hists++;
++
++	return event_var;
++}
++
++static struct hist_field *
++find_target_event_var(struct hist_trigger_data *hist_data,
++		      char *system, char *event_name, char *var_name)
++{
++	struct trace_event_file *file = hist_data->event_file;
++	struct hist_field *hist_field = NULL;
++
++	if (system) {
++		struct trace_event_call *call;
++
++		if (!event_name)
++			return NULL;
++
++		call = file->event_call;
++
++		if (strcmp(system, call->class->system) != 0)
++			return NULL;
++
++		if (strcmp(event_name, trace_event_name(call)) != 0)
++			return NULL;
++	}
++
++	hist_field = find_var_field(hist_data, var_name);
++
++	return hist_field;
++}
++
++static inline void __update_field_vars(struct tracing_map_elt *elt,
++				       struct ring_buffer_event *rbe,
++				       void *rec,
++				       struct field_var **field_vars,
++				       unsigned int n_field_vars,
++				       unsigned int field_var_str_start)
++{
++	struct hist_elt_data *elt_data = elt->private_data;
++	unsigned int i, j, var_idx;
++	u64 var_val;
++
++	for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
++		struct field_var *field_var = field_vars[i];
++		struct hist_field *var = field_var->var;
++		struct hist_field *val = field_var->val;
++
++		var_val = val->fn(val, elt, rbe, rec);
++		var_idx = var->var.idx;
++
++		if (val->flags & HIST_FIELD_FL_STRING) {
++			char *str = elt_data->field_var_str[j++];
++
++			memcpy(str, (char *)(uintptr_t)var_val,
++			       TASK_COMM_LEN + 1);
++			var_val = (u64)(uintptr_t)str;
++		}
++		tracing_map_set_var(elt, var_idx, var_val);
++	}
++}
++
++static void update_field_vars(struct hist_trigger_data *hist_data,
++			      struct tracing_map_elt *elt,
++			      struct ring_buffer_event *rbe,
++			      void *rec)
++{
++	__update_field_vars(elt, rbe, rec, hist_data->field_vars,
++			    hist_data->n_field_vars, 0);
++}
++
++static struct hist_field *create_var(struct hist_trigger_data *hist_data,
++				     struct trace_event_file *file,
++				     char *name, int size, const char *type)
++{
++	struct hist_field *var;
++	int idx;
++
++	if (find_var(file, name) && !hist_data->remove) {
++		var = ERR_PTR(-EINVAL);
++		goto out;
++	}
++
++	var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
++	if (!var) {
++		var = ERR_PTR(-ENOMEM);
++		goto out;
++	}
++
++	idx = tracing_map_add_var(hist_data->map);
++	if (idx < 0) {
++		kfree(var);
++		var = ERR_PTR(-EINVAL);
++		goto out;
++	}
++
++	var->flags = HIST_FIELD_FL_VAR;
++	var->var.idx = idx;
++	var->var.hist_data = var->hist_data = hist_data;
++	var->size = size;
++	var->var.name = kstrdup(name, GFP_KERNEL);
++	var->type = kstrdup(type, GFP_KERNEL);
++	if (!var->var.name || !var->type) {
++		kfree(var->var.name);
++		kfree(var->type);
++		kfree(var);
++		var = ERR_PTR(-ENOMEM);
++	}
++ out:
++	return var;
++}
++
++static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
++					  struct trace_event_file *file,
++					  char *field_name)
++{
++	struct hist_field *val = NULL, *var = NULL;
++	unsigned long flags = HIST_FIELD_FL_VAR;
++	struct field_var *field_var;
++	int ret = 0;
++
++	if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
++		ret = -EINVAL;
++		goto err;
++	}
++
++	val = parse_atom(hist_data, file, field_name, &flags, NULL);
++	if (IS_ERR(val)) {
++		ret = PTR_ERR(val);
++		goto err;
++	}
++
++	var = create_var(hist_data, file, field_name, val->size, val->type);
++	if (IS_ERR(var)) {
++		kfree(val);
++		ret = PTR_ERR(var);
++		goto err;
++	}
++
++	field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
++	if (!field_var) {
++		kfree(val);
++		kfree(var);
++		ret =  -ENOMEM;
++		goto err;
++	}
++
++	field_var->var = var;
++	field_var->val = val;
++ out:
++	return field_var;
++ err:
++	field_var = ERR_PTR(ret);
++	goto out;
++}
++
++static struct field_var *
++create_target_field_var(struct hist_trigger_data *hist_data,
++			char *system, char *event_name, char *var_name)
++{
++	struct trace_event_file *file = hist_data->event_file;
++
++	if (system) {
++		struct trace_event_call *call;
++
++		if (!event_name)
++			return NULL;
++
++		call = file->event_call;
++
++		if (strcmp(system, call->class->system) != 0)
++			return NULL;
++
++		if (strcmp(event_name, trace_event_name(call)) != 0)
++			return NULL;
++	}
++
++	return create_field_var(hist_data, file, var_name);
++}
++
++static void onmatch_destroy(struct action_data *data)
++{
++	unsigned int i;
++
++	kfree(data->match_event);
++	kfree(data->match_event_system);
++	kfree(data->synth_event_name);
++
++	for (i = 0; i < data->n_params; i++)
++		kfree(data->params[i]);
++
++	kfree(data);
++}
++
++static void destroy_field_var(struct field_var *field_var)
++{
++	if (!field_var)
++		return;
++
++	destroy_hist_field(field_var->var, 0);
++	destroy_hist_field(field_var->val, 0);
++
++	kfree(field_var);
++}
++
++static void destroy_field_vars(struct hist_trigger_data *hist_data)
++{
++	unsigned int i;
++
++	for (i = 0; i < hist_data->n_field_vars; i++)
++		destroy_field_var(hist_data->field_vars[i]);
++}
++
++static void save_field_var(struct hist_trigger_data *hist_data,
++			   struct field_var *field_var)
++{
++	hist_data->field_vars[hist_data->n_field_vars++] = field_var;
++
++	if (field_var->val->flags & HIST_FIELD_FL_STRING)
++		hist_data->n_field_var_str++;
++}
++
++static void destroy_synth_var_refs(struct hist_trigger_data *hist_data)
++{
++	unsigned int i;
++
++	for (i = 0; i < hist_data->n_synth_var_refs; i++)
++		destroy_hist_field(hist_data->synth_var_refs[i], 0);
++}
++
++static void save_synth_var_ref(struct hist_trigger_data *hist_data,
++			 struct hist_field *var_ref)
++{
++	hist_data->synth_var_refs[hist_data->n_synth_var_refs++] = var_ref;
++
++	hist_data->var_refs[hist_data->n_var_refs] = var_ref;
++	var_ref->var_ref_idx = hist_data->n_var_refs++;
++}
++
++static int check_synth_field(struct synth_event *event,
++			     struct hist_field *hist_field,
++			     unsigned int field_pos)
++{
++	struct synth_field *field;
++
++	if (field_pos >= event->n_fields)
++		return -EINVAL;
++
++	field = event->fields[field_pos];
++
++	if (strcmp(field->type, hist_field->type) != 0)
++		return -EINVAL;
++
++	return 0;
++}
++
++static int parse_action_params(char *params, struct action_data *data)
++{
++	char *param, *saved_param;
++	int ret = 0;
++
++	while (params) {
++		if (data->n_params >= SYNTH_FIELDS_MAX)
++			goto out;
++
++		param = strsep(&params, ",");
++		if (!param)
++			goto out;
++
++		param = strstrip(param);
++		if (strlen(param) < 2) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		saved_param = kstrdup(param, GFP_KERNEL);
++		if (!saved_param) {
++			ret = -ENOMEM;
++			goto out;
++		}
++
++		data->params[data->n_params++] = saved_param;
++	}
++ out:
++	return ret;
++}
++
++static struct hist_field *
++onmatch_find_var(struct hist_trigger_data *hist_data, struct action_data *data,
++		 char *system, char *event, char *var)
++{
++	struct hist_field *hist_field;
++
++	var++; /* skip '$' */
++
++	hist_field = find_target_event_var(hist_data, system, event, var);
++	if (!hist_field) {
++		if (!system) {
++			system = data->match_event_system;
++			event = data->match_event;
++		}
++
++		hist_field = find_event_var(system, event, var);
++	}
++
++	return hist_field;
++}
++
++static struct hist_field *
++onmatch_create_field_var(struct hist_trigger_data *hist_data,
++			 struct action_data *data, char *system,
++			 char *event, char *var)
++{
++	struct hist_field *hist_field = NULL;
++	struct field_var *field_var;
++
++	field_var = create_target_field_var(hist_data, system, event, var);
++	if (IS_ERR(field_var))
++		goto out;
++
++	if (field_var) {
++		save_field_var(hist_data, field_var);
++		hist_field = field_var->var;
++	} else {
++		if (!system) {
++			system = data->match_event_system;
++			event = data->match_event;
++		}
++
++		hist_field = create_field_var_hist(hist_data, system, event, var);
++		if (IS_ERR(hist_field))
++			goto free;
++	}
++ out:
++	return hist_field;
++ free:
++	destroy_field_var(field_var);
++	hist_field = NULL;
++	goto out;
++}
++
++static int onmatch_create(struct hist_trigger_data *hist_data,
++			  struct trace_event_file *file,
++			  struct action_data *data)
++{
++	char *event_name, *param, *system = NULL;
++	struct hist_field *hist_field, *var_ref;
++	unsigned int i, var_ref_idx;
++	unsigned int field_pos = 0;
++	struct synth_event *event;
++	int ret = 0;
++
++	mutex_lock(&synth_event_mutex);
++
++	event = find_synth_event(data->synth_event_name);
++	if (!event) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	var_ref_idx = hist_data->n_var_refs;
++
++	for (i = 0; i < data->n_params; i++) {
++		char *p;
++
++		p = param = kstrdup(data->params[i], GFP_KERNEL);
++		if (!param)
++			goto out;
++
++		system = strsep(&param, ".");
++		if (!param) {
++			param = (char *)system;
++			system = event_name = NULL;
++		} else {
++			event_name = strsep(&param, ".");
++			if (!param) {
++				kfree(p);
++				ret = -EINVAL;
++				goto out;
++			}
++		}
++
++		if (param[0] == '$')
++			hist_field = onmatch_find_var(hist_data, data, system,
++						      event_name, param);
++		else
++			hist_field = onmatch_create_field_var(hist_data, data,
++							      system,
++							      event_name,
++							      param);
++
++		if (!hist_field) {
++			kfree(p);
++			ret = -EINVAL;
++			goto out;
++		}
++
++		if (check_synth_field(event, hist_field, field_pos) == 0) {
++			var_ref = create_var_ref(hist_field);
++			if (!var_ref) {
++				kfree(p);
++				ret = -ENOMEM;
++				goto out;
++			}
++
++			save_synth_var_ref(hist_data, var_ref);
++			field_pos++;
++			kfree(p);
++			continue;
++		}
++
++		kfree(p);
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (field_pos != event->n_fields) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	data->fn = action_trace;
++	data->synth_event = event;
++	data->var_ref_idx = var_ref_idx;
++	hist_data->actions[hist_data->n_actions++] = data;
++	save_hist_actions(hist_data);
++ out:
++	mutex_unlock(&synth_event_mutex);
++
++	return ret;
++}
++
++static struct action_data *onmatch_parse(char *str)
++{
++	char *match_event, *match_event_system;
++	char *synth_event_name, *params;
++	struct action_data *data;
++	int ret = -EINVAL;
++
++	data = kzalloc(sizeof(*data), GFP_KERNEL);
++	if (!data)
++		return ERR_PTR(-ENOMEM);
++
++	match_event = strsep(&str, ")");
++	if (!match_event || !str)
++		goto free;
++
++	match_event_system = strsep(&match_event, ".");
++	if (!match_event)
++		goto free;
++
++	if (IS_ERR(event_file(match_event_system, match_event)))
++		goto free;
++
++	data->match_event = kstrdup(match_event, GFP_KERNEL);
++	data->match_event_system = kstrdup(match_event_system, GFP_KERNEL);
++
++	strsep(&str, ".");
++	if (!str)
++		goto free;
++
++	synth_event_name = strsep(&str, "(");
++	if (!synth_event_name || !str)
++		goto free;
++	data->synth_event_name = kstrdup(synth_event_name, GFP_KERNEL);
++
++	params = strsep(&str, ")");
++	if (!params || !str || (str && strlen(str)))
++		goto free;
++
++	ret = parse_action_params(params, data);
++	if (ret)
++		goto free;
++
++	if (!data->match_event_system || !data->match_event ||
++	    !data->synth_event_name) {
++		ret = -ENOMEM;
++		goto free;
++	}
++ out:
++	return data;
++ free:
++	onmatch_destroy(data);
++	data = ERR_PTR(ret);
++	goto out;
++}
++
+ static int create_hitcount_val(struct hist_trigger_data *hist_data)
+ {
+ 	hist_data->fields[HITCOUNT_IDX] =
+@@ -2465,19 +3311,37 @@ static void destroy_actions(struct hist_
+ 	for (i = 0; i < hist_data->n_actions; i++) {
+ 		struct action_data *data = hist_data->actions[i];
+ 
+-		kfree(data);
++		if (data->fn == action_trace)
++			onmatch_destroy(data);
++		else
++			kfree(data);
+ 	}
+ }
+ 
+ static int create_actions(struct hist_trigger_data *hist_data,
+ 			  struct trace_event_file *file)
+ {
++	struct action_data *data;
+ 	unsigned int i;
+ 	int ret = 0;
+ 	char *str;
+ 
+ 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
+ 		str = hist_data->attrs->action_str[i];
++
++		if (strncmp(str, "onmatch(", strlen("onmatch(")) == 0) {
++			char *action_str = str + strlen("onmatch(");
++
++			data = onmatch_parse(action_str);
++			if (IS_ERR(data))
++				return PTR_ERR(data);
++
++			ret = onmatch_create(hist_data, file, data);
++			if (ret) {
++				onmatch_destroy(data);
++				return ret;
++			}
++		}
+ 	}
+ 
+ 	return ret;
+@@ -2494,6 +3358,26 @@ static void print_actions(struct seq_fil
+ 	}
+ }
+ 
++static void print_onmatch_spec(struct seq_file *m,
++			       struct hist_trigger_data *hist_data,
++			       struct action_data *data)
++{
++	unsigned int i;
++
++	seq_printf(m, ":onmatch(%s.%s).", data->match_event_system,
++		   data->match_event);
++
++	seq_printf(m, "%s(", data->synth_event->name);
++
++	for (i = 0; i < data->n_params; i++) {
++		if (i)
++			seq_puts(m, ",");
++		seq_printf(m, "%s", data->params[i]);
++	}
++
++	seq_puts(m, ")");
++}
++
+ static void print_actions_spec(struct seq_file *m,
+ 			       struct hist_trigger_data *hist_data)
+ {
+@@ -2501,6 +3385,19 @@ static void print_actions_spec(struct se
+ 
+ 	for (i = 0; i < hist_data->n_actions; i++) {
+ 		struct action_data *data = hist_data->actions[i];
++
++		if (data->fn == action_trace)
++			print_onmatch_spec(m, hist_data, data);
++	}
++}
++
++static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
++{
++	unsigned int i;
++
++	for (i = 0; i < hist_data->n_field_var_hists; i++) {
++		kfree(hist_data->field_var_hists[i]->cmd);
++		kfree(hist_data->field_var_hists[i]);
+ 	}
+ }
+ 
+@@ -2514,6 +3411,9 @@ static void destroy_hist_data(struct his
+ 	tracing_map_destroy(hist_data->map);
+ 
+ 	destroy_actions(hist_data);
++	destroy_field_vars(hist_data);
++	destroy_field_var_hists(hist_data);
++	destroy_synth_var_refs(hist_data);
+ 
+ 	kfree(hist_data);
+ }
+@@ -2648,6 +3548,8 @@ static void hist_trigger_elt_update(stru
+ 			tracing_map_set_var(elt, var_idx, hist_val);
+ 		}
+ 	}
++
++	update_field_vars(hist_data, elt, rbe, rec);
+ }
+ 
+ static inline void add_to_key(char *compound_key, void *key,
+@@ -2861,6 +3763,8 @@ hist_trigger_entry_print(struct seq_file
+ 		}
+ 	}
+ 
++	print_actions(m, hist_data, elt);
++
+ 	seq_puts(m, "\n");
+ }
+ 
+@@ -3128,6 +4032,8 @@ static void event_hist_trigger_free(stru
+ 
+ 		remove_hist_vars(hist_data);
+ 
++		remove_hist_actions(hist_data);
++
+ 		destroy_hist_data(hist_data);
+ 	}
+ }
+@@ -3390,6 +4296,21 @@ static bool hist_trigger_check_refs(stru
+ 	return false;
+ }
+ 
++static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
++{
++	struct trace_event_file *file;
++	unsigned int i;
++	char *cmd;
++	int ret;
++
++	for (i = 0; i < hist_data->n_field_var_hists; i++) {
++		file = hist_data->field_var_hists[i]->hist_data->event_file;
++		cmd = hist_data->field_var_hists[i]->cmd;
++		ret = event_hist_trigger_func(&trigger_hist_cmd, file,
++					      "!hist", "hist", cmd);
++	}
++}
++
+ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
+ 				    struct event_trigger_data *data,
+ 				    struct trace_event_file *file)
+@@ -3405,6 +4326,7 @@ static void hist_unregister_trigger(char
+ 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ 			if (!hist_trigger_match(data, test, named_data, false))
+ 				continue;
++			unregister_field_var_hists(test->private_data);
+ 			unregistered = true;
+ 			list_del_rcu(&test->list);
+ 			trace_event_trigger_enable_disable(file, 0);
+@@ -3448,6 +4370,7 @@ static void hist_unreg_all(struct trace_
+ 
+ 	list_for_each_entry_safe(test, n, &file->triggers, list) {
+ 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
++			unregister_field_var_hists(test->private_data);
+ 			list_del_rcu(&test->list);
+ 			trace_event_trigger_enable_disable(file, 0);
+ 			update_cond_flag(file);
+@@ -3571,6 +4494,8 @@ static int event_hist_trigger_func(struc
+ 
+ 	remove_hist_vars(hist_data);
+ 
++	remove_hist_actions(hist_data);
++
+ 	kfree(trigger_data);
+ 	destroy_hist_data(hist_data);
+ 
diff --git a/debian/patches/features/all/rt/0024-tracing-Add-onmax-hist-trigger-action-support.patch b/debian/patches/features/all/rt/0024-tracing-Add-onmax-hist-trigger-action-support.patch
new file mode 100644
index 0000000..c0eaafa
--- /dev/null
+++ b/debian/patches/features/all/rt/0024-tracing-Add-onmax-hist-trigger-action-support.patch
@@ -0,0 +1,456 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:25 -0500
+Subject: [PATCH 24/32] tracing: Add 'onmax' hist trigger action support
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add an 'onmax(var).save(field,...)' hist trigger action which is
+invoked whenever an event exceeds the current maximum.
+
+The end result is that the trace event fields or variables specified
+as the onmax.save() params will be saved if 'var' exceeds the current
+maximum for that hist trigger entry.  This allows context from the
+event that exhibited the new maximum to be saved for later reference.
+When the histogram is displayed, additional fields displaying the
+saved values will be printed.
+
+As an example the below defines a couple of hist triggers, one for
+sched_wakeup and another for sched_switch, keyed on pid.  Whenever a
+sched_wakeup occurs, the timestamp is saved in the entry corresponding
+to the current pid, and when the scheduler switches back to that pid,
+the timestamp difference is calculated.  If the resulting latency
+exceeds the current maximum latency, the specified save() values are
+saved:
+
+    # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
+        if comm=="cyclictest"' >> \
+      /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
+
+    # echo 'hist:keys=next_pid:\
+      wakeup_lat=common_timestamp.usecs-$ts0:\
+      onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) \
+        if next_comm=="cyclictest"' >> \
+      /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+
+When the histogram is displayed, the max value and the saved values
+corresponding to the max are displayed following the rest of the
+fields:
+
+    # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist
+      { next_pid:       2255 } hitcount:        239 \
+        common_timestamp-$ts0:          0
+        max:         27  next_comm: cyclictest \
+        prev_pid:          0  prev_prio:        120  prev_comm: swapper/1 \
+      { next_pid:       2256 } hitcount:       2355  common_timestamp-$ts0: 0 \
+      	max:         49  next_comm: cyclictest \
+        prev_pid:          0  prev_prio:        120  prev_comm: swapper/0
+
+    Totals:
+        Hits: 12970
+        Entries: 2
+        Dropped: 0
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |  310 ++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 276 insertions(+), 34 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -282,6 +282,10 @@ struct hist_trigger_data {
+ 	unsigned int			n_field_var_str;
+ 	struct field_var_hist		*field_var_hists[SYNTH_FIELDS_MAX];
+ 	unsigned int			n_field_var_hists;
++
++	struct field_var		*max_vars[SYNTH_FIELDS_MAX];
++	unsigned int			n_max_vars;
++	unsigned int			n_max_var_str;
+ };
+ 
+ struct synth_field {
+@@ -318,6 +322,12 @@ struct action_data {
+ 	char			*match_event_system;
+ 	char			*synth_event_name;
+ 	struct synth_event	*synth_event;
++
++	char			*onmax_var_str;
++	char			*onmax_fn_name;
++	unsigned int		max_var_ref_idx;
++	struct hist_field	*max_var;
++	struct hist_field	*onmax_var;
+ };
+ 
+ static LIST_HEAD(synth_event_list);
+@@ -1493,7 +1503,8 @@ static int parse_action(char *str, struc
+ 	if (attrs->n_actions >= HIST_ACTIONS_MAX)
+ 		return ret;
+ 
+-	if ((strncmp(str, "onmatch(", strlen("onmatch(")) == 0)) {
++	if ((strncmp(str, "onmatch(", strlen("onmatch(")) == 0) ||
++	    (strncmp(str, "onmax(", strlen("onmax(")) == 0)) {
+ 		attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
+ 		if (!attrs->action_str[attrs->n_actions]) {
+ 			ret = -ENOMEM;
+@@ -1612,7 +1623,7 @@ static void hist_trigger_elt_data_free(s
+ 	struct hist_elt_data *private_data = elt->private_data;
+ 	unsigned int i, n_str;
+ 
+-	n_str = hist_data->n_field_var_str;
++	n_str = hist_data->n_field_var_str + hist_data->n_max_var_str;
+ 
+ 	for (i = 0; i < n_str; i++)
+ 		kfree(private_data->field_var_str[i]);
+@@ -1647,7 +1658,7 @@ static int hist_trigger_elt_data_alloc(s
+ 		}
+ 	}
+ 
+-	n_str = hist_data->n_field_var_str;
++	n_str = hist_data->n_field_var_str + hist_data->n_max_var_str;
+ 
+ 	for (i = 0; i < n_str; i++) {
+ 		elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
+@@ -2504,6 +2515,15 @@ static void update_field_vars(struct his
+ 			    hist_data->n_field_vars, 0);
+ }
+ 
++static void update_max_vars(struct hist_trigger_data *hist_data,
++			    struct tracing_map_elt *elt,
++			    struct ring_buffer_event *rbe,
++			    void *rec)
++{
++	__update_field_vars(elt, rbe, rec, hist_data->max_vars,
++			    hist_data->n_max_vars, hist_data->n_field_var_str);
++}
++
+ static struct hist_field *create_var(struct hist_trigger_data *hist_data,
+ 				     struct trace_event_file *file,
+ 				     char *name, int size, const char *type)
+@@ -2613,6 +2633,222 @@ create_target_field_var(struct hist_trig
+ 	return create_field_var(hist_data, file, var_name);
+ }
+ 
++static void onmax_print(struct seq_file *m,
++			struct hist_trigger_data *hist_data,
++			struct tracing_map_elt *elt,
++			struct action_data *data)
++{
++	unsigned int i, save_var_idx, max_idx = data->max_var->var.idx;
++
++	seq_printf(m, "\n\tmax: %10llu", tracing_map_read_var(elt, max_idx));
++
++	for (i = 0; i < hist_data->n_max_vars; i++) {
++		struct hist_field *save_val = hist_data->max_vars[i]->val;
++		struct hist_field *save_var = hist_data->max_vars[i]->var;
++		u64 val;
++
++		save_var_idx = save_var->var.idx;
++
++		val = tracing_map_read_var(elt, save_var_idx);
++
++		if (save_val->flags & HIST_FIELD_FL_STRING) {
++			seq_printf(m, "  %s: %-50s", save_var->var.name,
++				   (char *)(uintptr_t)(val));
++		} else
++			seq_printf(m, "  %s: %10llu", save_var->var.name, val);
++	}
++}
++
++static void onmax_save(struct hist_trigger_data *hist_data,
++		       struct tracing_map_elt *elt, void *rec,
++		       struct ring_buffer_event *rbe,
++		       struct action_data *data, u64 *var_ref_vals)
++{
++	unsigned int max_idx = data->max_var->var.idx;
++	unsigned int max_var_ref_idx = data->max_var_ref_idx;
++
++	u64 var_val, max_val;
++
++	var_val = var_ref_vals[max_var_ref_idx];
++	max_val = tracing_map_read_var(elt, max_idx);
++
++	if (var_val <= max_val)
++		return;
++
++	tracing_map_set_var(elt, max_idx, var_val);
++
++	update_max_vars(hist_data, elt, rbe, rec);
++}
++
++static void onmax_destroy(struct action_data *data)
++{
++	unsigned int i;
++
++	destroy_hist_field(data->max_var, 0);
++	destroy_hist_field(data->onmax_var, 0);
++
++	kfree(data->onmax_var_str);
++	kfree(data->onmax_fn_name);
++
++	for (i = 0; i < data->n_params; i++)
++		kfree(data->params[i]);
++
++	kfree(data);
++}
++
++static int onmax_create(struct hist_trigger_data *hist_data,
++			struct action_data *data)
++{
++	struct trace_event_call *call = hist_data->event_file->event_call;
++	struct trace_event_file *file = hist_data->event_file;
++	struct hist_field *var_field, *ref_field, *max_var;
++	unsigned int var_ref_idx = hist_data->n_var_refs;
++	struct field_var *field_var;
++	char *onmax_var_str, *param;
++	const char *event_name;
++	unsigned long flags;
++	unsigned int i;
++	int ret = 0;
++
++	onmax_var_str = data->onmax_var_str;
++	if (onmax_var_str[0] != '$')
++		return -EINVAL;
++	onmax_var_str++;
++
++	event_name = trace_event_name(call);
++	var_field = find_target_event_var(hist_data, NULL, NULL, onmax_var_str);
++	if (!var_field)
++		return -EINVAL;
++
++	flags = HIST_FIELD_FL_VAR_REF;
++	ref_field = create_hist_field(hist_data, NULL, flags, NULL);
++	if (!ref_field)
++		return -ENOMEM;
++
++	ref_field->var.idx = var_field->var.idx;
++	ref_field->var.hist_data = hist_data;
++	ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
++	ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
++	if (!ref_field->name || !ref_field->type) {
++		destroy_hist_field(ref_field, 0);
++		ret = -ENOMEM;
++		goto out;
++	}
++	hist_data->var_refs[hist_data->n_var_refs] = ref_field;
++	ref_field->var_ref_idx = hist_data->n_var_refs++;
++	data->onmax_var = ref_field;
++
++	data->fn = onmax_save;
++	data->max_var_ref_idx = var_ref_idx;
++	max_var = create_var(hist_data, file, "max", sizeof(u64), "u64");
++	if (IS_ERR(max_var)) {
++		ret = PTR_ERR(max_var);
++		goto out;
++	}
++	data->max_var = max_var;
++
++	for (i = 0; i < data->n_params; i++) {
++		param = kstrdup(data->params[i], GFP_KERNEL);
++		if (!param)
++			goto out;
++
++		field_var = create_target_field_var(hist_data, NULL, NULL, param);
++		if (IS_ERR(field_var)) {
++			ret = PTR_ERR(field_var);
++			kfree(param);
++			goto out;
++		}
++
++		hist_data->max_vars[hist_data->n_max_vars++] = field_var;
++		if (field_var->val->flags & HIST_FIELD_FL_STRING)
++			hist_data->n_max_var_str++;
++
++		kfree(param);
++	}
++
++	hist_data->actions[hist_data->n_actions++] = data;
++ out:
++	return ret;
++}
++
++static int parse_action_params(char *params, struct action_data *data)
++{
++	char *param, *saved_param;
++	int ret = 0;
++
++	while (params) {
++		if (data->n_params >= SYNTH_FIELDS_MAX)
++			goto out;
++
++		param = strsep(&params, ",");
++		if (!param)
++			goto out;
++
++		param = strstrip(param);
++		if (strlen(param) < 2) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		saved_param = kstrdup(param, GFP_KERNEL);
++		if (!saved_param) {
++			ret = -ENOMEM;
++			goto out;
++		}
++
++		data->params[data->n_params++] = saved_param;
++	}
++ out:
++	return ret;
++}
++
++static struct action_data *onmax_parse(char *str)
++{
++	char *onmax_fn_name, *onmax_var_str;
++	struct action_data *data;
++	int ret = -EINVAL;
++
++	data = kzalloc(sizeof(*data), GFP_KERNEL);
++	if (!data)
++		return ERR_PTR(-ENOMEM);
++
++	onmax_var_str = strsep(&str, ")");
++	if (!onmax_var_str || !str)
++		return ERR_PTR(-EINVAL);
++	data->onmax_var_str = kstrdup(onmax_var_str, GFP_KERNEL);
++
++	strsep(&str, ".");
++	if (!str)
++		goto free;
++
++	onmax_fn_name = strsep(&str, "(");
++	if (!onmax_fn_name || !str)
++		goto free;
++
++	if (strncmp(onmax_fn_name, "save", strlen("save")) == 0) {
++		char *params = strsep(&str, ")");
++
++		if (!params)
++			goto free;
++
++		ret = parse_action_params(params, data);
++		if (ret)
++			goto free;
++	}
++	data->onmax_fn_name = kstrdup(onmax_fn_name, GFP_KERNEL);
++
++	if (!data->onmax_var_str || !data->onmax_fn_name) {
++		ret = -ENOMEM;
++		goto free;
++	}
++ out:
++	return data;
++ free:
++	onmax_destroy(data);
++	data = ERR_PTR(ret);
++	goto out;
++}
++
+ static void onmatch_destroy(struct action_data *data)
+ {
+ 	unsigned int i;
+@@ -2689,37 +2925,6 @@ static int check_synth_field(struct synt
+ 	return 0;
+ }
+ 
+-static int parse_action_params(char *params, struct action_data *data)
+-{
+-	char *param, *saved_param;
+-	int ret = 0;
+-
+-	while (params) {
+-		if (data->n_params >= SYNTH_FIELDS_MAX)
+-			goto out;
+-
+-		param = strsep(&params, ",");
+-		if (!param)
+-			goto out;
+-
+-		param = strstrip(param);
+-		if (strlen(param) < 2) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		saved_param = kstrdup(param, GFP_KERNEL);
+-		if (!saved_param) {
+-			ret = -ENOMEM;
+-			goto out;
+-		}
+-
+-		data->params[data->n_params++] = saved_param;
+-	}
+- out:
+-	return ret;
+-}
+-
+ static struct hist_field *
+ onmatch_find_var(struct hist_trigger_data *hist_data, struct action_data *data,
+ 		 char *system, char *event, char *var)
+@@ -3313,6 +3518,8 @@ static void destroy_actions(struct hist_
+ 
+ 		if (data->fn == action_trace)
+ 			onmatch_destroy(data);
++		else if (data->fn == onmax_save)
++			onmax_destroy(data);
+ 		else
+ 			kfree(data);
+ 	}
+@@ -3341,6 +3548,18 @@ static int create_actions(struct hist_tr
+ 				onmatch_destroy(data);
+ 				return ret;
+ 			}
++		} else if (strncmp(str, "onmax(", strlen("onmax(")) == 0) {
++			char *action_str = str + strlen("onmax(");
++
++			data = onmax_parse(action_str);
++			if (IS_ERR(data))
++				return PTR_ERR(data);
++
++			ret = onmax_create(hist_data, data);
++			if (ret) {
++				onmax_destroy(data);
++				return ret;
++			}
+ 		}
+ 	}
+ 
+@@ -3355,9 +3574,30 @@ static void print_actions(struct seq_fil
+ 
+ 	for (i = 0; i < hist_data->n_actions; i++) {
+ 		struct action_data *data = hist_data->actions[i];
++
++		if (data->fn == onmax_save)
++			onmax_print(m, hist_data, elt, data);
+ 	}
+ }
+ 
++static void print_onmax_spec(struct seq_file *m,
++			     struct hist_trigger_data *hist_data,
++			     struct action_data *data)
++{
++	unsigned int i;
++
++	seq_puts(m, ":onmax(");
++	seq_printf(m, "%s", data->onmax_var_str);
++	seq_printf(m, ").%s(", data->onmax_fn_name);
++
++	for (i = 0; i < hist_data->n_max_vars; i++) {
++		seq_printf(m, "%s", hist_data->max_vars[i]->var->var.name);
++		if (i < hist_data->n_max_vars - 1)
++			seq_puts(m, ",");
++	}
++	seq_puts(m, ")");
++}
++
+ static void print_onmatch_spec(struct seq_file *m,
+ 			       struct hist_trigger_data *hist_data,
+ 			       struct action_data *data)
+@@ -3388,6 +3628,8 @@ static void print_actions_spec(struct se
+ 
+ 		if (data->fn == action_trace)
+ 			print_onmatch_spec(m, hist_data, data);
++		else if (data->fn == onmax_save)
++			print_onmax_spec(m, hist_data, data);
+ 	}
+ }
+ 
diff --git a/debian/patches/features/all/rt/0025-tracing-Allow-whitespace-to-surround-hist-trigger-fi.patch b/debian/patches/features/all/rt/0025-tracing-Allow-whitespace-to-surround-hist-trigger-fi.patch
new file mode 100644
index 0000000..b0270c0
--- /dev/null
+++ b/debian/patches/features/all/rt/0025-tracing-Allow-whitespace-to-surround-hist-trigger-fi.patch
@@ -0,0 +1,58 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:26 -0500
+Subject: [PATCH 25/32] tracing: Allow whitespace to surround hist trigger
+ filter
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+The existing code only allows for one space before and after the 'if'
+specifying the filter for a hist trigger.  Add code to make that more
+permissive as far as whitespace goes.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |   19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -4632,7 +4632,7 @@ static int event_hist_trigger_func(struc
+ 	struct event_trigger_ops *trigger_ops;
+ 	struct hist_trigger_data *hist_data;
+ 	bool remove = false;
+-	char *trigger;
++	char *trigger, *p;
+ 	int ret = 0;
+ 
+ 	if (!param)
+@@ -4642,9 +4642,19 @@ static int event_hist_trigger_func(struc
+ 		remove = true;
+ 
+ 	/* separate the trigger from the filter (k:v [if filter]) */
+-	trigger = strsep(&param, " \t");
+-	if (!trigger)
+-		return -EINVAL;
++	trigger = param;
++	p = strstr(param, " if");
++	if (!p)
++		p = strstr(param, "\tif");
++	if (p) {
++		if (p == trigger)
++			return -EINVAL;
++		param = p + 1;
++		param = strstrip(param);
++		*p = '\0';
++		trigger = strstrip(trigger);
++	} else
++		param = NULL;
+ 
+ 	attrs = parse_hist_trigger_attrs(trigger);
+ 	if (IS_ERR(attrs))
+@@ -4694,6 +4704,7 @@ static int event_hist_trigger_func(struc
+ 	}
+ 
+ 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
++
+ 	/*
+ 	 * The above returns on success the # of triggers registered,
+ 	 * but if it didn't register any it returns zero.  Consider no
diff --git a/debian/patches/features/all/rt/0026-tracing-Make-duplicate-count-from-tracing_map-availa.patch b/debian/patches/features/all/rt/0026-tracing-Make-duplicate-count-from-tracing_map-availa.patch
new file mode 100644
index 0000000..9682f4a
--- /dev/null
+++ b/debian/patches/features/all/rt/0026-tracing-Make-duplicate-count-from-tracing_map-availa.patch
@@ -0,0 +1,125 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:27 -0500
+Subject: [PATCH 26/32] tracing: Make duplicate count from tracing_map
+ available
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Though extremely rare, there can be duplicate entries in the tracing
+map.  This isn't normally a problem, as the sorting code makes this
+transparent by merging them during the sort.
+
+It's useful to know however, as a check on that assumption - if a
+non-zero duplicate count is seen more than rarely, it might indicate
+an unexpected change to the algorithm, or a pathological data set.
+
+Add an extra param to tracing_map_sort_entries() and use it to display
+the value in the hist trigger output.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |   14 ++++++++------
+ kernel/trace/tracing_map.c       |   12 +++++++++---
+ kernel/trace/tracing_map.h       |    3 ++-
+ 3 files changed, 19 insertions(+), 10 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -4011,7 +4011,8 @@ hist_trigger_entry_print(struct seq_file
+ }
+ 
+ static int print_entries(struct seq_file *m,
+-			 struct hist_trigger_data *hist_data)
++			 struct hist_trigger_data *hist_data,
++			 unsigned int *n_dups)
+ {
+ 	struct tracing_map_sort_entry **sort_entries = NULL;
+ 	struct tracing_map *map = hist_data->map;
+@@ -4019,7 +4020,7 @@ static int print_entries(struct seq_file
+ 
+ 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
+ 					     hist_data->n_sort_keys,
+-					     &sort_entries);
++					     &sort_entries, n_dups);
+ 	if (n_entries < 0)
+ 		return n_entries;
+ 
+@@ -4038,6 +4039,7 @@ static void hist_trigger_show(struct seq
+ {
+ 	struct hist_trigger_data *hist_data;
+ 	int n_entries, ret = 0;
++	unsigned int n_dups;
+ 
+ 	if (n > 0)
+ 		seq_puts(m, "\n\n");
+@@ -4047,15 +4049,15 @@ static void hist_trigger_show(struct seq
+ 	seq_puts(m, "#\n\n");
+ 
+ 	hist_data = data->private_data;
+-	n_entries = print_entries(m, hist_data);
++	n_entries = print_entries(m, hist_data, &n_dups);
+ 	if (n_entries < 0) {
+ 		ret = n_entries;
+ 		n_entries = 0;
+ 	}
+ 
+-	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
+-		   (u64)atomic64_read(&hist_data->map->hits),
+-		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
++	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n    Duplicates: %u\n",
++		   (u64)atomic64_read(&hist_data->map->hits), n_entries,
++		   (u64)atomic64_read(&hist_data->map->drops), n_dups);
+ }
+ 
+ static int hist_show(struct seq_file *m, void *v)
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -1084,6 +1084,7 @@ static void sort_secondary(struct tracin
+  * @map: The tracing_map
+  * @sort_key: The sort key to use for sorting
+  * @sort_entries: outval: pointer to allocated and sorted array of entries
++ * @n_dups: outval: pointer to variable receiving a count of duplicates found
+  *
+  * tracing_map_sort_entries() sorts the current set of entries in the
+  * map and returns the list of tracing_map_sort_entries containing
+@@ -1100,13 +1101,16 @@ static void sort_secondary(struct tracin
+  * The client should not hold on to the returned array but should use
+  * it and call tracing_map_destroy_sort_entries() when done.
+  *
+- * Return: the number of sort_entries in the struct tracing_map_sort_entry
+- * array, negative on error
++ * Return: the number of sort_entries in the struct
++ * tracing_map_sort_entry array, negative on error.  If n_dups is
++ * non-NULL, it will receive the number of duplicate entries found
++ * (and merged) during the sort.
+  */
+ int tracing_map_sort_entries(struct tracing_map *map,
+ 			     struct tracing_map_sort_key *sort_keys,
+ 			     unsigned int n_sort_keys,
+-			     struct tracing_map_sort_entry ***sort_entries)
++			     struct tracing_map_sort_entry ***sort_entries,
++			     unsigned int *n_dups)
+ {
+ 	int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
+ 			      const struct tracing_map_sort_entry **);
+@@ -1147,6 +1151,8 @@ int tracing_map_sort_entries(struct trac
+ 	if (ret < 0)
+ 		goto free;
+ 	n_entries -= ret;
++	if (n_dups)
++		*n_dups = ret;
+ 
+ 	if (is_key(map, sort_keys[0].field_idx))
+ 		cmp_entries_fn = cmp_entries_key;
+--- a/kernel/trace/tracing_map.h
++++ b/kernel/trace/tracing_map.h
+@@ -286,7 +286,8 @@ extern int
+ tracing_map_sort_entries(struct tracing_map *map,
+ 			 struct tracing_map_sort_key *sort_keys,
+ 			 unsigned int n_sort_keys,
+-			 struct tracing_map_sort_entry ***sort_entries);
++			 struct tracing_map_sort_entry ***sort_entries,
++			 unsigned int *n_dups);
+ 
+ extern void
+ tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
diff --git a/debian/patches/features/all/rt/0027-tracing-Add-cpu-field-for-hist-triggers.patch b/debian/patches/features/all/rt/0027-tracing-Add-cpu-field-for-hist-triggers.patch
new file mode 100644
index 0000000..9d4e4a5
--- /dev/null
+++ b/debian/patches/features/all/rt/0027-tracing-Add-cpu-field-for-hist-triggers.patch
@@ -0,0 +1,133 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:28 -0500
+Subject: [PATCH 27/32] tracing: Add cpu field for hist triggers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+A common key to use in a histogram is the cpuid - add a new cpu
+'synthetic' field for that purpose.  This field is named cpu rather
+than $cpu or $common_cpu because 'cpu' already exists as a special
+filter field and it makes more sense to match that rather than add
+another name for the same thing.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ Documentation/trace/events.txt   |   18 ++++++++++++++++++
+ kernel/trace/trace_events_hist.c |   30 +++++++++++++++++++++++++++---
+ 2 files changed, 45 insertions(+), 3 deletions(-)
+
+--- a/Documentation/trace/events.txt
++++ b/Documentation/trace/events.txt
+@@ -668,6 +668,24 @@ triggers (you have to use '!' for each o
+   The examples below provide a more concrete illustration of the
+   concepts and typical usage patterns discussed above.
+ 
++  'synthetic' event fields
++  ------------------------
++
++  There are a number of 'synthetic fields' available for use as keys
++  or values in a hist trigger.  These look like and behave as if they
++  were event fields, but aren't actually part of the event's field
++  definition or format file.  They are however available for any
++  event, and can be used anywhere an actual event field could be.
++  'Synthetic' field names are always prefixed with a '$' character to
++  indicate that they're not normal fields (with the exception of
++  'cpu', for compatibility with existing filter usage):
++
++    $common_timestamp      u64 - timestamp (from ring buffer) associated
++                                 with the event, in nanoseconds.  May be
++				 modified by .usecs to have timestamps
++				 interpreted as microseconds.
++    cpu                    int - the cpu on which the event occurred.
++
+ 
+ 6.2 'hist' trigger examples
+ ---------------------------
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -224,6 +224,7 @@ enum hist_field_flags {
+ 	HIST_FIELD_FL_VAR_ONLY		= 8192,
+ 	HIST_FIELD_FL_EXPR		= 16384,
+ 	HIST_FIELD_FL_VAR_REF		= 32768,
++	HIST_FIELD_FL_CPU		= 65536,
+ };
+ 
+ struct hist_trigger_attrs {
+@@ -1081,6 +1082,16 @@ static u64 hist_field_timestamp(struct h
+ 	return ts;
+ }
+ 
++static u64 hist_field_cpu(struct hist_field *hist_field,
++			  struct tracing_map_elt *elt,
++			  struct ring_buffer_event *rbe,
++			  void *event)
++{
++	int cpu = raw_smp_processor_id();
++
++	return cpu;
++}
++
+ static struct hist_field *check_var_ref(struct hist_field *hist_field,
+ 					struct hist_trigger_data *var_data,
+ 					unsigned int var_idx)
+@@ -1407,6 +1418,8 @@ static const char *hist_field_name(struc
+ 		field_name = hist_field_name(field->operands[0], ++level);
+ 	else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
+ 		field_name = "$common_timestamp";
++	else if (field->flags & HIST_FIELD_FL_CPU)
++		field_name = "cpu";
+ 	else if (field->flags & HIST_FIELD_FL_EXPR ||
+ 		 field->flags & HIST_FIELD_FL_VAR_REF)
+ 		field_name = field->name;
+@@ -1848,6 +1861,15 @@ static struct hist_field *create_hist_fi
+ 		goto out;
+ 	}
+ 
++	if (flags & HIST_FIELD_FL_CPU) {
++		hist_field->fn = hist_field_cpu;
++		hist_field->size = sizeof(int);
++		hist_field->type = kstrdup("int", GFP_KERNEL);
++		if (!hist_field->type)
++			goto free;
++		goto out;
++	}
++
+ 	if (WARN_ON_ONCE(!field))
+ 		goto out;
+ 
+@@ -1980,7 +2002,9 @@ parse_field(struct hist_trigger_data *hi
+ 		hist_data->enable_timestamps = true;
+ 		if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
+ 			hist_data->attrs->ts_in_usecs = true;
+-	} else {
++	} else if (strcmp(field_name, "cpu") == 0)
++		*flags |= HIST_FIELD_FL_CPU;
++	else {
+ 		field = trace_find_event_field(file->event_call, field_name);
+ 		if (!field)
+ 			return ERR_PTR(-EINVAL);
+@@ -3019,7 +3043,6 @@ static int onmatch_create(struct hist_tr
+ 				goto out;
+ 			}
+ 		}
+-
+ 		if (param[0] == '$')
+ 			hist_field = onmatch_find_var(hist_data, data, system,
+ 						      event_name, param);
+@@ -3034,7 +3057,6 @@ static int onmatch_create(struct hist_tr
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+-
+ 		if (check_synth_field(event, hist_field, field_pos) == 0) {
+ 			var_ref = create_var_ref(hist_field);
+ 			if (!var_ref) {
+@@ -4128,6 +4150,8 @@ static void hist_field_print(struct seq_
+ 
+ 	if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
+ 		seq_puts(m, "$common_timestamp");
++	else if (hist_field->flags & HIST_FIELD_FL_CPU)
++		seq_puts(m, "cpu");
+ 	else if (field_name)
+ 		seq_printf(m, "%s", field_name);
+ 
diff --git a/debian/patches/features/all/rt/0028-tracing-Add-hist-trigger-support-for-variable-refere.patch b/debian/patches/features/all/rt/0028-tracing-Add-hist-trigger-support-for-variable-refere.patch
new file mode 100644
index 0000000..280d40f
--- /dev/null
+++ b/debian/patches/features/all/rt/0028-tracing-Add-hist-trigger-support-for-variable-refere.patch
@@ -0,0 +1,106 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:29 -0500
+Subject: [PATCH 28/32] tracing: Add hist trigger support for variable
+ reference aliases
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add support for alias=$somevar where alias can be used as
+onmatch($alias).
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace_events_hist.c |   46 ++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 43 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -225,6 +225,7 @@ enum hist_field_flags {
+ 	HIST_FIELD_FL_EXPR		= 16384,
+ 	HIST_FIELD_FL_VAR_REF		= 32768,
+ 	HIST_FIELD_FL_CPU		= 65536,
++	HIST_FIELD_FL_ALIAS		= 131072,
+ };
+ 
+ struct hist_trigger_attrs {
+@@ -1414,7 +1415,8 @@ static const char *hist_field_name(struc
+ 
+ 	if (field->field)
+ 		field_name = field->field->name;
+-	else if (field->flags & HIST_FIELD_FL_LOG2)
++	else if (field->flags & HIST_FIELD_FL_LOG2 ||
++		 field->flags & HIST_FIELD_FL_ALIAS)
+ 		field_name = hist_field_name(field->operands[0], ++level);
+ 	else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
+ 		field_name = "$common_timestamp";
+@@ -1819,7 +1821,7 @@ static struct hist_field *create_hist_fi
+ 
+ 	hist_field->hist_data = hist_data;
+ 
+-	if (flags & HIST_FIELD_FL_EXPR)
++	if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
+ 		goto out; /* caller will populate */
+ 
+ 	if (flags & HIST_FIELD_FL_VAR_REF) {
+@@ -2013,6 +2015,34 @@ parse_field(struct hist_trigger_data *hi
+ 	return field;
+ }
+ 
++static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
++				       struct hist_field *var_ref,
++				       char *var_name)
++{
++	struct hist_field *alias = NULL;
++	unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR |
++		HIST_FIELD_FL_VAR_ONLY;
++
++	alias = create_hist_field(hist_data, NULL, flags, var_name);
++	if (!alias)
++		return NULL;
++
++	alias->fn = var_ref->fn;
++	alias->operands[0] = var_ref;
++	alias->var.idx = var_ref->var.idx;
++	alias->var.hist_data = var_ref->hist_data;
++	alias->size = var_ref->size;
++	alias->is_signed = var_ref->is_signed;
++	alias->type = kstrdup(var_ref->type, GFP_KERNEL);
++	if (!alias->type) {
++		kfree(alias->type);
++		destroy_hist_field(alias, 0);
++		return NULL;
++	}
++
++	return alias;
++}
++
+ struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
+ 			      struct trace_event_file *file, char *str,
+ 			      unsigned long *flags, char *var_name)
+@@ -2036,6 +2066,13 @@ struct hist_field *parse_atom(struct his
+ 	if (hist_field) {
+ 		hist_data->var_refs[hist_data->n_var_refs] = hist_field;
+ 		hist_field->var_ref_idx = hist_data->n_var_refs++;
++		if (var_name) {
++			hist_field = create_alias(hist_data, hist_field, var_name);
++			if (!hist_field) {
++				ret = -ENOMEM;
++				goto out;
++			}
++		}
+ 		return hist_field;
+ 	}
+ 
+@@ -4152,8 +4189,11 @@ static void hist_field_print(struct seq_
+ 		seq_puts(m, "$common_timestamp");
+ 	else if (hist_field->flags & HIST_FIELD_FL_CPU)
+ 		seq_puts(m, "cpu");
+-	else if (field_name)
++	else if (field_name) {
++		if (hist_field->flags & HIST_FIELD_FL_ALIAS)
++			seq_putc(m, '$');
+ 		seq_printf(m, "%s", field_name);
++	}
+ 
+ 	if (hist_field->flags) {
+ 		const char *flags_str = get_hist_field_flags(hist_field);
diff --git a/debian/patches/features/all/rt/0029-tracing-Add-last-error-error-facility-for-hist-trigg.patch b/debian/patches/features/all/rt/0029-tracing-Add-last-error-error-facility-for-hist-trigg.patch
new file mode 100644
index 0000000..e83abe4
--- /dev/null
+++ b/debian/patches/features/all/rt/0029-tracing-Add-last-error-error-facility-for-hist-trigg.patch
@@ -0,0 +1,500 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:30 -0500
+Subject: [PATCH 29/32] tracing: Add 'last error' error facility for hist
+ triggers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+With the addition of variables and actions, it's become necessary to
+provide more detailed error information to users about syntax errors.
+
+Add a 'last error' facility accessible via the erroring event's 'hist'
+file.  Reading the hist file after an error will display more detailed
+information about what went wrong, if information is available.  This
+extended error information will be available until the next hist
+trigger command for that event.
+
+  # echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
+  echo: write error: Invalid argument
+
+  # cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/hist
+
+  ERROR: Couldn't yyy: zzz
+  Last command: xxx
+
+Also add specific error messages for variable and action errors.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ Documentation/trace/events.txt   |   19 ++++
+ kernel/trace/trace_events_hist.c |  181 ++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 188 insertions(+), 12 deletions(-)
+
+--- a/Documentation/trace/events.txt
++++ b/Documentation/trace/events.txt
+@@ -686,6 +686,25 @@ triggers (you have to use '!' for each o
+ 				 interpreted as microseconds.
+     cpu                    int - the cpu on which the event occurred.
+ 
++  Extended error information
++  --------------------------
++
++  For some error conditions encountered when invoking a hist trigger
++  command, extended error information is available via the
++  corresponding event's 'hist' file.  Reading the hist file after an
++  error will display more detailed information about what went wrong,
++  if information is available.  This extended error information will
++  be available until the next hist trigger command for that event.
++
++  If available for a given error condition, the extended error
++  information and usage takes the following form:
++
++    # echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
++    echo: write error: Invalid argument
++
++    # cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/hist
++    ERROR: Couldn't yyy: zzz
++      Last command: xxx
+ 
+ 6.2 'hist' trigger examples
+ ---------------------------
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -288,6 +288,7 @@ struct hist_trigger_data {
+ 	struct field_var		*max_vars[SYNTH_FIELDS_MAX];
+ 	unsigned int			n_max_vars;
+ 	unsigned int			n_max_var_str;
++	char				*last_err;
+ };
+ 
+ struct synth_field {
+@@ -332,6 +333,83 @@ struct action_data {
+ 	struct hist_field	*onmax_var;
+ };
+ 
++
++static char *hist_err_str;
++static char *last_hist_cmd;
++
++static int hist_err_alloc(void)
++{
++	int ret = 0;
++
++	last_hist_cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
++	hist_err_str = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
++	if (!last_hist_cmd || !hist_err_str)
++		ret = -ENOMEM;
++
++	return ret;
++}
++
++static void last_cmd_set(char *str)
++{
++	if (!last_hist_cmd || !str)
++		return;
++
++	if (strlen(last_hist_cmd) > MAX_FILTER_STR_VAL - 1)
++		return;
++
++	strcpy(last_hist_cmd, str);
++}
++
++static void hist_err(char *str, char *var)
++{
++	int maxlen = MAX_FILTER_STR_VAL - 1;
++
++	if (strlen(hist_err_str))
++		return;
++
++	if (!hist_err_str || !str)
++		return;
++
++	if (!var)
++		var = "";
++
++	if (strlen(hist_err_str) + strlen(str) + strlen(var) > maxlen)
++		return;
++
++	strcat(hist_err_str, str);
++	strcat(hist_err_str, var);
++}
++
++static void hist_err_event(char *str, char *system, char *event, char *var)
++{
++	char err[MAX_FILTER_STR_VAL];
++
++	if (system && var)
++		sprintf(err, "%s.%s.%s", system, event, var);
++	else if (system)
++		sprintf(err, "%s.%s", system, event);
++	else
++		strcpy(err, var);
++
++	hist_err(str, err);
++}
++
++static void hist_err_clear(void)
++{
++	if (!hist_err_str)
++		return;
++
++	hist_err_str[0] = '\0';
++}
++
++static bool have_hist_err(void)
++{
++	if (hist_err_str && strlen(hist_err_str))
++		return true;
++
++	return false;
++}
++
+ static LIST_HEAD(synth_event_list);
+ static DEFINE_MUTEX(synth_event_mutex);
+ 
+@@ -1954,12 +2032,21 @@ static struct hist_field *create_var_ref
+ 	return ref_field;
+ }
+ 
++static bool is_common_field(char *var_name)
++{
++	if (strncmp(var_name, "$common_timestamp", strlen("$common_timestamp")) == 0)
++		return true;
++
++	return false;
++}
++
+ static struct hist_field *parse_var_ref(char *system, char *event_name,
+ 					char *var_name)
+ {
+ 	struct hist_field *var_field = NULL, *ref_field = NULL;
+ 
+-	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
++	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$' ||
++	    is_common_field(var_name))
+ 		return NULL;
+ 
+ 	var_name++;
+@@ -1968,6 +2055,10 @@ static struct hist_field *parse_var_ref(
+ 	if (var_field)
+ 		ref_field = create_var_ref(var_field);
+ 
++	if (!ref_field)
++		hist_err_event("Couldn't find variable: $",
++			       system, event_name, var_name);
++
+ 	return ref_field;
+ }
+ 
+@@ -2426,8 +2517,11 @@ create_field_var_hist(struct hist_trigge
+ 	char *cmd;
+ 	int ret;
+ 
+-	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX)
++	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
++		hist_err_event("onmatch: Too many field variables defined: ",
++			       system, event_name, field_name);
+ 		return ERR_PTR(-EINVAL);
++	}
+ 
+ 	tr = top_trace_array();
+ 	if (!tr)
+@@ -2435,13 +2529,18 @@ create_field_var_hist(struct hist_trigge
+ 
+ 	file = event_file(system, event_name);
+ 	if (IS_ERR(file)) {
++		hist_err_event("onmatch: Event file not found: ",
++			       system, event_name, field_name);
+ 		ret = PTR_ERR(file);
+ 		return ERR_PTR(ret);
+ 	}
+ 
+ 	hist_data = find_compatible_hist(target_hist_data, file);
+-	if (!hist_data)
++	if (!hist_data) {
++		hist_err_event("onmatch: Matching event histogram not found: ",
++			       system, event_name, field_name);
+ 		return ERR_PTR(-EINVAL);
++	}
+ 
+ 	var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
+ 	if (!var_hist)
+@@ -2489,6 +2588,8 @@ create_field_var_hist(struct hist_trigge
+ 		kfree(cmd);
+ 		kfree(var_hist->cmd);
+ 		kfree(var_hist);
++		hist_err_event("onmatch: Couldn't create histogram for field: ",
++			       system, event_name, field_name);
+ 		return ERR_PTR(ret);
+ 	}
+ 
+@@ -2500,6 +2601,8 @@ create_field_var_hist(struct hist_trigge
+ 		kfree(cmd);
+ 		kfree(var_hist->cmd);
+ 		kfree(var_hist);
++		hist_err_event("onmatch: Couldn't find synthetic variable: ",
++			       system, event_name, field_name);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+@@ -2636,18 +2739,21 @@ static struct field_var *create_field_va
+ 	int ret = 0;
+ 
+ 	if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
++		hist_err("Too many field variables defined: ", field_name);
+ 		ret = -EINVAL;
+ 		goto err;
+ 	}
+ 
+ 	val = parse_atom(hist_data, file, field_name, &flags, NULL);
+ 	if (IS_ERR(val)) {
++		hist_err("Couldn't parse field variable: ", field_name);
+ 		ret = PTR_ERR(val);
+ 		goto err;
+ 	}
+ 
+ 	var = create_var(hist_data, file, field_name, val->size, val->type);
+ 	if (IS_ERR(var)) {
++		hist_err("Couldn't create or find variable: ", field_name);
+ 		kfree(val);
+ 		ret = PTR_ERR(var);
+ 		goto err;
+@@ -2772,14 +2878,18 @@ static int onmax_create(struct hist_trig
+ 	int ret = 0;
+ 
+ 	onmax_var_str = data->onmax_var_str;
+-	if (onmax_var_str[0] != '$')
++	if (onmax_var_str[0] != '$') {
++		hist_err("onmax: For onmax(x), x must be a variable: ", onmax_var_str);
+ 		return -EINVAL;
++	}
+ 	onmax_var_str++;
+ 
+ 	event_name = trace_event_name(call);
+ 	var_field = find_target_event_var(hist_data, NULL, NULL, onmax_var_str);
+-	if (!var_field)
++	if (!var_field) {
++		hist_err("onmax: Couldn't find onmax variable: ", onmax_var_str);
+ 		return -EINVAL;
++	}
+ 
+ 	flags = HIST_FIELD_FL_VAR_REF;
+ 	ref_field = create_hist_field(hist_data, NULL, flags, NULL);
+@@ -2803,6 +2913,7 @@ static int onmax_create(struct hist_trig
+ 	data->max_var_ref_idx = var_ref_idx;
+ 	max_var = create_var(hist_data, file, "max", sizeof(u64), "u64");
+ 	if (IS_ERR(max_var)) {
++		hist_err("onmax: Couldn't create onmax variable: ", "max");
+ 		ret = PTR_ERR(max_var);
+ 		goto out;
+ 	}
+@@ -2815,6 +2926,7 @@ static int onmax_create(struct hist_trig
+ 
+ 		field_var = create_target_field_var(hist_data, NULL, NULL, param);
+ 		if (IS_ERR(field_var)) {
++			hist_err("onmax: Couldn't create field variable: ", param);
+ 			ret = PTR_ERR(field_var);
+ 			kfree(param);
+ 			goto out;
+@@ -2847,6 +2959,7 @@ static int parse_action_params(char *par
+ 
+ 		param = strstrip(param);
+ 		if (strlen(param) < 2) {
++			hist_err("Invalid action param: ", param);
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+@@ -3004,6 +3117,9 @@ onmatch_find_var(struct hist_trigger_dat
+ 		hist_field = find_event_var(system, event, var);
+ 	}
+ 
++	if (!hist_field)
++		hist_err_event("onmatch: Couldn't find onmatch param: $", system, event, var);
++
+ 	return hist_field;
+ }
+ 
+@@ -3055,6 +3171,7 @@ static int onmatch_create(struct hist_tr
+ 
+ 	event = find_synth_event(data->synth_event_name);
+ 	if (!event) {
++		hist_err("onmatch: Couldn't find synthetic event: ", data->synth_event_name);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -3094,6 +3211,7 @@ static int onmatch_create(struct hist_tr
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
++
+ 		if (check_synth_field(event, hist_field, field_pos) == 0) {
+ 			var_ref = create_var_ref(hist_field);
+ 			if (!var_ref) {
+@@ -3108,12 +3226,15 @@ static int onmatch_create(struct hist_tr
+ 			continue;
+ 		}
+ 
++		hist_err_event("onmatch: Param type doesn't match synthetic event field type: ",
++			       system, event_name, param);
+ 		kfree(p);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+ 
+ 	if (field_pos != event->n_fields) {
++		hist_err("onmatch: Param count doesn't match synthetic event field count: ", event->name);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -3141,31 +3262,44 @@ static struct action_data *onmatch_parse
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	match_event = strsep(&str, ")");
+-	if (!match_event || !str)
++	if (!match_event || !str) {
++		hist_err("onmatch: Missing closing paren: ", match_event);
+ 		goto free;
++	}
+ 
+ 	match_event_system = strsep(&match_event, ".");
+-	if (!match_event)
++	if (!match_event) {
++		hist_err("onmatch: Missing subsystem for match event: ", match_event_system);
+ 		goto free;
++	}
+ 
+-	if (IS_ERR(event_file(match_event_system, match_event)))
++	if (IS_ERR(event_file(match_event_system, match_event))) {
++		hist_err_event("onmatch: Invalid subsystem or event name: ",
++			       match_event_system, match_event, NULL);
+ 		goto free;
++	}
+ 
+ 	data->match_event = kstrdup(match_event, GFP_KERNEL);
+ 	data->match_event_system = kstrdup(match_event_system, GFP_KERNEL);
+ 
+ 	strsep(&str, ".");
+-	if (!str)
++	if (!str) {
++		hist_err("onmatch: Missing . after onmatch(): ", str);
+ 		goto free;
++	}
+ 
+ 	synth_event_name = strsep(&str, "(");
+-	if (!synth_event_name || !str)
++	if (!synth_event_name || !str) {
++		hist_err("onmatch: Missing opening paramlist paren: ", synth_event_name);
+ 		goto free;
++	}
+ 	data->synth_event_name = kstrdup(synth_event_name, GFP_KERNEL);
+ 
+ 	params = strsep(&str, ")");
+-	if (!params || !str || (str && strlen(str)))
++	if (!params || !str || (str && strlen(str))) {
++		hist_err("onmatch: Missing closing paramlist paren: ", params);
+ 		goto free;
++	}
+ 
+ 	ret = parse_action_params(params, data);
+ 	if (ret)
+@@ -3217,6 +3351,7 @@ static int create_val_field(struct hist_
+ 	if (field_str && var_name) {
+ 		if (find_var(file, var_name) &&
+ 		    !hist_data->remove) {
++			hist_err("Variable already defined: ", var_name);
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+@@ -3224,6 +3359,7 @@ static int create_val_field(struct hist_
+ 		flags |= HIST_FIELD_FL_VAR;
+ 		hist_data->n_vars++;
+ 		if (hist_data->n_vars > TRACING_MAP_VARS_MAX) {
++			hist_err("Too many variables defined: ", var_name);
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+@@ -3234,6 +3370,7 @@ static int create_val_field(struct hist_
+ 		field_str = var_name;
+ 		var_name = NULL;
+ 	} else {
++		hist_err("Malformed assignment: ", var_name);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -3248,6 +3385,7 @@ static int create_val_field(struct hist_
+ 		hist_field = parse_atom(hist_data, file, field_str,
+ 					&flags, var_name);
+ 		if (IS_ERR(hist_field)) {
++			hist_err("Unable to parse atom: ", field_str);
+ 			ret = PTR_ERR(hist_field);
+ 			goto out;
+ 		}
+@@ -4138,6 +4276,11 @@ static int hist_show(struct seq_file *m,
+ 			hist_trigger_show(m, data, n++);
+ 	}
+ 
++	if (have_hist_err()) {
++		seq_printf(m, "\nERROR: %s\n", hist_err_str);
++		seq_printf(m, "  Last command: %s\n", last_hist_cmd);
++	}
++
+  out_unlock:
+ 	mutex_unlock(&event_mutex);
+ 
+@@ -4509,6 +4652,7 @@ static int hist_register_trigger(char *g
+ 		if (named_data) {
+ 			if (!hist_trigger_match(data, named_data, named_data,
+ 						true)) {
++				hist_err("Named hist trigger doesn't match existing named trigger (includes variables): ", hist_data->attrs->name);
+ 				ret = -EINVAL;
+ 				goto out;
+ 			}
+@@ -4528,13 +4672,16 @@ static int hist_register_trigger(char *g
+ 				test->paused = false;
+ 			else if (hist_data->attrs->clear)
+ 				hist_clear(test);
+-			else
++			else {
++				hist_err("Hist trigger already exists", NULL);
+ 				ret = -EEXIST;
++			}
+ 			goto out;
+ 		}
+ 	}
+  new:
+ 	if (hist_data->attrs->cont || hist_data->attrs->clear) {
++		hist_err("Can't clear or continue a nonexistent hist trigger", NULL);
+ 		ret = -ENOENT;
+ 		goto out;
+ 	}
+@@ -4701,6 +4848,11 @@ static int event_hist_trigger_func(struc
+ 	char *trigger, *p;
+ 	int ret = 0;
+ 
++	if (glob && strlen(glob)) {
++		last_cmd_set(param);
++		hist_err_clear();
++	}
++
+ 	if (!param)
+ 		return -EINVAL;
+ 
+@@ -4804,6 +4956,9 @@ static int event_hist_trigger_func(struc
+ 	/* Just return zero, not the number of registered triggers */
+ 	ret = 0;
+  out:
++	if (ret == 0)
++		hist_err_clear();
++
+ 	return ret;
+  out_unreg:
+ 	cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+@@ -5002,6 +5157,8 @@ static __init int trace_events_hist_init
+ 		goto err;
+ 	}
+ 
++	hist_err_alloc();
++
+ 	return err;
+  err:
+ 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
diff --git a/debian/patches/features/all/rt/0030-tracing-Add-inter-event-hist-trigger-Documentation.patch b/debian/patches/features/all/rt/0030-tracing-Add-inter-event-hist-trigger-Documentation.patch
new file mode 100644
index 0000000..8cbb091
--- /dev/null
+++ b/debian/patches/features/all/rt/0030-tracing-Add-inter-event-hist-trigger-Documentation.patch
@@ -0,0 +1,403 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:31 -0500
+Subject: [PATCH 30/32] tracing: Add inter-event hist trigger Documentation
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Add background and details on inter-event hist triggers, including
+hist variables, synthetic events, and actions.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ Documentation/trace/events.txt |  376 +++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 376 insertions(+)
+
+--- a/Documentation/trace/events.txt
++++ b/Documentation/trace/events.txt
+@@ -571,6 +571,7 @@ triggers (you have to use '!' for each o
+ 	.sym-offset display an address as a symbol and offset
+ 	.syscall    display a syscall id as a system call name
+ 	.execname   display a common_pid as a program name
++	.usecs      display a $common_timestamp in microseconds
+ 
+   Note that in general the semantics of a given field aren't
+   interpreted when applying a modifier to it, but there are some
+@@ -2101,3 +2102,378 @@ triggers (you have to use '!' for each o
+         Hits: 489
+         Entries: 7
+         Dropped: 0
++
++6.3 Inter-event hist triggers
++-----------------------------
++
++Inter-event hist triggers are hist triggers that combine values from
++one or more other events and create a histogram using that data.  Data
++from an inter-event histogram can in turn become the source for
++further combined histograms, thus providing a chain of related
++histograms, which is important for some applications.
++
++The most important example of an inter-event quantity that can be used
++in this manner is latency, which is simply a difference in timestamps
++between two events (although trace events don't have an externally
++visible timestamp field, the inter-event hist trigger support adds a
++pseudo-field to all events named '$common_timestamp' which can be used
++as if it were an actual event field).  Although latency is the most
++important inter-event quantity, note that because the support is
++completely general across the trace event subsystem, any event field
++can be used in an inter-event quantity.
++
++An example of a histogram that combines data from other histograms
++into a useful chain would be a 'wakeupswitch latency' histogram that
++combines a 'wakeup latency' histogram and a 'switch latency'
++histogram.
++
++Normally, a hist trigger specification consists of a (possibly
++compound) key along with one or more numeric values, which are
++continually updated sums associated with that key.  A histogram
++specification in this case consists of individual key and value
++specifications that refer to trace event fields associated with a
++single event type.
++
++The inter-event hist trigger extension allows fields from multiple
++events to be referenced and combined into a multi-event histogram
++specification.  In support of this overall goal, a few enabling
++features have been added to the hist trigger support:
++
++  - In order to compute an inter-event quantity, a value from one
++    event needs to saved and then referenced from another event.  This
++    requires the introduction of support for histogram 'variables'.
++
++  - The computation of inter-event quantities and their combination
++    require some minimal amount of support for applying simple
++    expressions to variables (+ and -).
++
++  - A histogram consisting of inter-event quantities isn't logically a
++    histogram on either event (so having the 'hist' file for either
++    event host the histogram output doesn't really make sense).  To
++    address the idea that the histogram is associated with a
++    combination of events, support is added allowing the creation of
++    'synthetic' events that are events derived from other events.
++    These synthetic events are full-fledged events just like any other
++    and can be used as such, as for instance to create the
++    'combination' histograms mentioned previously.
++
++  - A set of 'actions' can be associated with histogram entries -
++    these can be used to generate the previously mentioned synthetic
++    events, but can also be used for other purposes, such as for
++    example saving context when a 'max' latency has been hit.
++
++  - Trace events don't have a 'timestamp' associated with them, but
++    there is an implicit timestamp saved along with an event in the
++    underlying ftrace ring buffer.  This timestamp is now exposed as a
++    a synthetic field named '$common_timestamp' which can be used in
++    histograms as if it were any other event field.  Note that it has
++    a '$' prefixed to it - this is meant to indicate that it isn't an
++    actual field in the trace format but rather is a synthesized value
++    that nonetheless can be used as if it were an actual field.  By
++    default it is in units of nanoseconds; appending '.usecs' to a
++    common_timestamp field changes the units to microseconds.
++
++These features are decribed in more detail in the following sections.
++
++6.3.1 Histogram Variables
++-------------------------
++
++Variables are simply named locations used for saving and retrieving
++values between matching events.  A 'matching' event is defined as an
++event that has a matching key - if a variable is saved for a histogram
++entry corresponding to that key, any subsequent event with a matching
++key can access that variable.
++
++A variable's value is normally available to any subsequent event until
++it is set to something else by a subsequent event.  The one exception
++to that rule is that any variable used in an expression is essentially
++'read-once' - once it's used by an expression in a subsequent event,
++it's reset to its 'unset' state, which means it can't be used again
++unless it's set again.  This ensures not only that an event doesn't
++use an uninitialized variable in a calculation, but that that variable
++is used only once and not for any unrelated subsequent match.
++
++The basic syntax for saving a variable is to simply prefix a unique
++variable name not corresponding to any keyword along with an '=' sign
++to any event field.
++
++Either keys or values can be saved and retrieved in this way.  This
++creates a variable named 'ts0' for a histogram entry with the key
++'next_pid':
++
++  # echo 'hist:keys=next_pid:vals=ts0=$common_timestamp ... >> event/trigger
++
++The ts0 variable can be accessed by any subsequent event having the
++same pid as 'next_pid'.
++
++Variable references are formed by prepending the variable name with
++the '$' sign.  Thus for example, the ts0 variable above would be
++referenced as '$ts0' in subsequent expressions.
++
++Because 'vals=' is used, the $common_timestamp variable value above
++will also be summed as a normal histogram value would (though for a
++timestamp it makes little sense).
++
++The below shows that a key value can also be saved in the same way:
++
++  # echo 'hist:key=timer_pid=common_pid ...' >> event/trigger
++
++If a variable isn't a key variable or prefixed with 'vals=', the
++associated event field will be saved in a variable but won't be summed
++as a value:
++
++  # echo 'hist:keys=next_pid:ts1=$common_timestamp ... >> event/trigger
++
++Multiple variables can be assigned at the same time.  The below would
++result in both ts0 and b being created as variables, with both
++common_timestamp and field1 additionally being summed as values:
++
++  # echo 'hist:keys=pid:vals=ts0=$common_timestamp,b=field1 ... >> event/trigger
++
++Any number of variables not bound to a 'vals=' prefix can also be
++assigned by simply separating them with colons.  Below is the same
++thing but without the values being summed in the histogram:
++
++  # echo 'hist:keys=pid:ts0=$common_timestamp:b=field1 ... >> event/trigger
++
++Variables set as above can be referenced and used in expressions on
++another event.
++
++For example, here's how a latency can be calculated:
++
++  # echo 'hist:keys=pid,prio:ts0=$common_timestamp ... >> event1/trigger
++  # echo 'hist:keys=next_pid:wakeup_lat=$common_timestamp-$ts0 ... >> event2/trigger
++
++In the first line above, the event's timetamp is saved into the
++variable ts0.  In the next line, ts0 is subtracted from the second
++event's timestamp to produce the latency, which is then assigned into
++yet another variable, 'wakeup_lat'.  The hist trigger below in turn
++makes use of the wakeup_lat variable to compute a combined latency
++using the same key and variable from yet another event:
++
++  # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
++
++6.3.2 Synthetic Events
++----------------------
++
++Synthetic events are user-defined events generated from hist trigger
++variables or fields associated with one or more other events.  Their
++purpose is to provide a mechanism for displaying data spanning
++multiple events consistent with the existing and already familiar
++usage for normal events.
++
++To define a synthetic event, the user writes a simple specification
++consisting of the name of the new event along with one or more
++variables and their types, which can be any valid field type,
++separated by semicolons, to the tracing/synthetic_events file.
++
++For instance, the following creates a new event named 'wakeup_latency'
++with 3 fields: lat, pid, and prio.  Each of those fields is simply a
++variable reference to a variable on another event:
++
++  # echo 'wakeup_latency \
++          u64 lat; \
++          pid_t pid; \
++	  int prio' >> \
++	  /sys/kernel/debug/tracing/synthetic_events
++
++Reading the tracing/synthetic_events file lists all the currently
++defined synthetic events, in this case the event defined above:
++
++  # cat /sys/kernel/debug/tracing/synthetic_events
++    wakeup_latency u64 lat; pid_t pid; int prio
++
++An existing synthetic event definition can be removed by prepending
++the command that defined it with a '!':
++
++  # echo '!wakeup_latency u64 lat pid_t pid int prio' >> \
++    /sys/kernel/debug/tracing/synthetic_events
++
++At this point, there isn't yet an actual 'wakeup_latency' event
++instantiated in the event subsytem - for this to happen, a 'hist
++trigger action' needs to be instantiated and bound to actual fields
++and variables defined on other events (see Section 6.3.3 below).
++
++Once that is done, an event instance is created, and a histogram can
++be defined using it:
++
++  # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
++        /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
++
++The new event is created under the tracing/events/synthetic/ directory
++and looks and behaves just like any other event:
++
++  # ls /sys/kernel/debug/tracing/events/synthetic/wakeup_latency
++        enable  filter  format  hist  id  trigger
++
++Like any other event, once a histogram is enabled for the event, the
++output can be displayed by reading the event's 'hist' file.
++
++6.3.3 Hist trigger 'actions'
++----------------------------
++
++A hist trigger 'action' is a function that's executed whenever a
++histogram entry is added or updated.
++
++The default 'action' if no special function is explicity specified is
++as it always has been, to simply update the set of values associated
++with an entry.  Some applications, however, may want to perform
++additional actions at that point, such as generate another event, or
++compare and save a maximum.
++
++The following additional actions are available.  To specify an action
++for a given event, simply specify the action between colons in the
++hist trigger specification.
++
++  - onmatch(matching.event).<synthetic_event_name>(param list)
++
++    The 'onmatch(matching.event).<synthetic_event_name>(params)' hist
++    trigger action is invoked whenever an event matches and the
++    histogram entry would be added or updated.  It causes the named
++    synthetic event to be generated with the values given in the
++    'param list'.  The result is the generation of a synthetic event
++    that consists of the values contained in those variables at the
++    time the invoking event was hit.
++
++    The 'param list' consists of one or more parameters which may be
++    either variables or fields defined on either the 'matching.event'
++    or the target event.  The variables or fields specified in the
++    param list may be either fully-qualified or unqualified.  If a
++    variable is specified as unqualified, it must be unique between
++    the two events.  A field name used as a param can be unqualified
++    if it refers to the target event, but must be fully qualified if
++    it refers to the matching event.  A fully-qualified name is of the
++    form 'system.event_name.$var_name' or 'system.event_name.field'.
++
++    The 'matching.event' specification is simply the fully qualified
++    event name of the event that matches the target event for the
++    onmatch() functionality, in the form 'system.event_name'.
++
++    Finally, the number and type of variables/fields in the 'param
++    list' must match the number and types of the fields in the
++    synthetic event being generated.
++
++    As an example the below defines a simple synthetic event and uses
++    a variable defined on the sched_wakeup_new event as a parameter
++    when invoking the synthetic event.  Here we define the synthetic
++    event:
++
++    # echo 'wakeup_new_test pid_t pid' >> \
++           /sys/kernel/debug/tracing/synthetic_events
++
++    # cat /sys/kernel/debug/tracing/synthetic_events
++          wakeup_new_test pid_t pid
++
++    The following hist trigger both defines the missing testpid
++    variable and specifies an onmatch() action that generates a
++    wakeup_new_test synthetic event whenever a sched_wakeup_new event
++    occurs, which because of the 'if comm == "cyclictest"' filter only
++    happens when the executable is cyclictest:
++
++    # echo 'hist:keys=testpid=pid:onmatch(sched.sched_wakeup_new).\
++            wakeup_new_test($testpid) if comm=="cyclictest"' >> \
++            /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
++
++    Creating and displaying a histogram based on those events is now
++    just a matter of using the fields and new synthetic event in the
++    tracing/events/synthetic directory, as usual:
++
++    # echo 'hist:keys=pid:sort=pid' >> \
++           /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/trigger
++
++    Running 'cyclictest' should cause wakeup_new events to generate
++    wakeup_new_test synthetic events which should result in histogram
++    output in the wakeup_new_test event's hist file:
++
++    # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/hist
++
++    A more typical usage would be to use two events to calculate a
++    latency.  The following example uses a set of hist triggers to
++    produce a 'wakeup_latency' histogram:
++
++    First, we define a 'wakeup_latency' synthetic event:
++
++    # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> \
++            /sys/kernel/debug/tracing/synthetic_events
++
++    Next, we specify that whenever we see a sched_wakeup event for a
++    cyclictest thread, save the timestamp in a 'ts0' variable:
++
++    # echo 'hist:keys=saved_pid=pid:ts0=$common_timestamp.usecs \
++            if comm=="cyclictest"' >> \
++	    /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
++
++    Then, when the corresponding thread is actually scheduled onto the
++    CPU by a sched_switch event, calculate the latency and use that
++    along with another variable and an event field to generate a
++    wakeup_latency synthetic event:
++
++    # echo 'hist:keys=next_pid:wakeup_lat=$common_timestamp.usecs-$ts0:\
++            onmatch(sched.sched_wakeup).wakeup_latency($wakeup_lat,\
++	            $saved_pid,next_prio) if next_comm=="cyclictest"' >> \
++	    /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
++
++    We also need to create a histogram on the wakeup_latency synthetic
++    event in order to aggregate the generated synthetic event data:
++
++    # echo 'hist:keys=pid,prio,lat:sort=pid,lat' >> \
++            /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
++
++    Finally, once we've run cyclictest to actually generate some
++    events, we can see the output by looking at the wakeup_latency
++    synthetic event's hist file:
++
++    # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/hist
++
++  - onmax(var).save(field,...)
++
++    The 'onmax(var).save(field,...)' hist trigger action is invoked
++    whenever the value of 'var' associated with a histogram entry
++    exceeds the current maximum contained in that variable.
++
++    The end result is that the trace event fields specified as the
++    onmax.save() params will be saved if 'var' exceeds the current
++    maximum for that hist trigger entry.  This allows context from the
++    event that exhibited the new maximum to be saved for later
++    reference.  When the histogram is displayed, additional fields
++    displaying the saved values will be printed.
++
++    As an example the below defines a couple of hist triggers, one for
++    sched_wakeup and another for sched_switch, keyed on pid.  Whenever
++    a sched_wakeup occurs, the timestamp is saved in the entry
++    corresponding to the current pid, and when the scheduler switches
++    back to that pid, the timestamp difference is calculated.  If the
++    resulting latency, stored in wakeup_lat, exceeds the current
++    maximum latency, the values specified in the save() fields are
++    recoreded:
++
++    # echo 'hist:keys=pid:ts0=$common_timestamp.usecs \
++            if comm=="cyclictest"' >> \
++            /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
++
++    # echo 'hist:keys=next_pid:\
++            wakeup_lat=$common_timestamp.usecs-$ts0:\
++            onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) \
++            if next_comm=="cyclictest"' >> \
++            /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
++
++    When the histogram is displayed, the max value and the saved
++    values corresponding to the max are displayed following the rest
++    of the fields:
++
++    # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist
++      { next_pid:       2255 } hitcount:        239
++        common_timestamp-ts0:          0
++        max:         27
++	next_comm: cyclictest
++        prev_pid:          0  prev_prio:        120  prev_comm: swapper/1
++
++      { next_pid:       2256 } hitcount:       2355
++        common_timestamp-ts0: 0
++        max:         49  next_comm: cyclictest
++        prev_pid:          0  prev_prio:        120  prev_comm: swapper/0
++
++      Totals:
++          Hits: 12970
++          Entries: 2
++          Dropped: 0
diff --git a/debian/patches/features/all/rt/0031-tracing-Make-tracing_set_clock-non-static.patch b/debian/patches/features/all/rt/0031-tracing-Make-tracing_set_clock-non-static.patch
new file mode 100644
index 0000000..9ab9021
--- /dev/null
+++ b/debian/patches/features/all/rt/0031-tracing-Make-tracing_set_clock-non-static.patch
@@ -0,0 +1,40 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:32 -0500
+Subject: [PATCH 31/32] tracing: Make tracing_set_clock() non-static
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Allow tracing code outside of trace.c to access tracing_set_clock().
+
+Some applications may require a particular clock in order to function
+properly, such as latency calculations.
+
+Also, add an accessor returning the current clock string.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/trace/trace.c |    2 +-
+ kernel/trace/trace.h |    1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5887,7 +5887,7 @@ static int tracing_clock_show(struct seq
+ 	return 0;
+ }
+ 
+-static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
++int tracing_set_clock(struct trace_array *tr, const char *clockstr)
+ {
+ 	int i;
+ 
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -279,6 +279,7 @@ extern int trace_array_get(struct trace_
+ extern void trace_array_put(struct trace_array *tr);
+ 
+ extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
++extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
+ 
+ extern bool trace_clock_in_ns(struct trace_array *tr);
+ 
diff --git a/debian/patches/features/all/rt/0032-tracing-Add-a-clock-attribute-for-hist-triggers.patch b/debian/patches/features/all/rt/0032-tracing-Add-a-clock-attribute-for-hist-triggers.patch
new file mode 100644
index 0000000..db23ceb
--- /dev/null
+++ b/debian/patches/features/all/rt/0032-tracing-Add-a-clock-attribute-for-hist-triggers.patch
@@ -0,0 +1,116 @@
+From: Tom Zanussi <tom.zanussi at linux.intel.com>
+Date: Mon, 26 Jun 2017 17:49:33 -0500
+Subject: [PATCH 32/32] tracing: Add a clock attribute for hist triggers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+The default clock if timestamps are used in a histogram is "global".
+If timestamps aren't used, the clock is irrelevant.
+
+Use the "clock=" param only if you want to override the default
+"global" clock for a histogram with timestamps.
+
+Signed-off-by: Tom Zanussi <tom.zanussi at linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ Documentation/trace/events.txt   |    9 +++++++++
+ kernel/trace/trace_events_hist.c |   34 +++++++++++++++++++++++++++++++---
+ 2 files changed, 40 insertions(+), 3 deletions(-)
+
+--- a/Documentation/trace/events.txt
++++ b/Documentation/trace/events.txt
+@@ -2173,6 +2173,15 @@ specification.  In support of this overa
+     default it is in units of nanoseconds; appending '.usecs' to a
+     common_timestamp field changes the units to microseconds.
+ 
++A note on inter-event timestamps: If $common_timestamp is used in a
++histogram, the trace buffer is automatically switched over to using
++absolute timestamps and the "global" trace clock, in order to avoid
++bogus timestamp differences with other clocks that aren't coherent
++across CPUs.  This can be overriden by specifying one of the other
++trace clocks instead, using the "clock=XXX" hist trigger attribute,
++where XXX is any of the clocks listed in the tracing/trace_clock
++pseudo-file.
++
+ These features are decribed in more detail in the following sections.
+ 
+ 6.3.1 Histogram Variables
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -233,6 +233,7 @@ struct hist_trigger_attrs {
+ 	char		*vals_str;
+ 	char		*sort_key_str;
+ 	char		*name;
++	char		*clock;
+ 	bool		pause;
+ 	bool		cont;
+ 	bool		clear;
+@@ -1586,6 +1587,7 @@ static void destroy_hist_trigger_attrs(s
+ 	kfree(attrs->sort_key_str);
+ 	kfree(attrs->keys_str);
+ 	kfree(attrs->vals_str);
++	kfree(attrs->clock);
+ 	kfree(attrs);
+ }
+ 
+@@ -1625,7 +1627,16 @@ static int parse_assignment(char *str, s
+ 		attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
+ 	else if (strncmp(str, "name=", strlen("name=")) == 0)
+ 		attrs->name = kstrdup(str, GFP_KERNEL);
+-	else if (strncmp(str, "size=", strlen("size=")) == 0) {
++	else if (strncmp(str, "clock=", strlen("clock=")) == 0) {
++		strsep(&str, "=");
++		if (!str) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		str = strstrip(str);
++		attrs->clock = kstrdup(str, GFP_KERNEL);
++	} else if (strncmp(str, "size=", strlen("size=")) == 0) {
+ 		int map_bits = parse_map_size(str);
+ 
+ 		if (map_bits < 0) {
+@@ -1688,6 +1699,12 @@ static struct hist_trigger_attrs *parse_
+ 		goto free;
+ 	}
+ 
++	if (!attrs->clock) {
++		attrs->clock = kstrdup("global", GFP_KERNEL);
++		if (!attrs->clock)
++			goto free;
++	}
++
+ 	return attrs;
+  free:
+ 	destroy_hist_trigger_attrs(attrs);
+@@ -4437,6 +4454,8 @@ static int event_hist_trigger_print(stru
+ 			seq_puts(m, ".descending");
+ 	}
+ 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
++	if (hist_data->enable_timestamps)
++		seq_printf(m, ":clock=%s", hist_data->attrs->clock);
+ 
+ 	print_actions_spec(m, hist_data);
+ 
+@@ -4702,10 +4721,19 @@ static int hist_register_trigger(char *g
+ 			goto out;
+ 	}
+ 
+-	ret++;
++	if (hist_data->enable_timestamps) {
++		char *clock = hist_data->attrs->clock;
++
++		ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
++		if (ret) {
++			hist_err("Couldn't set trace_clock: ", clock);
++			goto out;
++		}
+ 
+-	if (hist_data->enable_timestamps)
+ 		tracing_set_time_stamp_abs(file->tr, true);
++	}
++
++	ret++;
+  out:
+ 	return ret;
+ }
diff --git a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
index d5d7447..f73bf29 100644
--- a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
+++ b/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
@@ -1,7 +1,7 @@
 From: "Yadi.hu" <yadi.hu at windriver.com>
 Date: Wed, 10 Dec 2014 10:32:09 +0800
 Subject: ARM: enable irq in translation/section permission fault handlers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Probably happens on all ARM, with
 CONFIG_PREEMPT_RT_FULL
diff --git a/debian/patches/features/all/rt/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch b/debian/patches/features/all/rt/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
index 3b6ec06..5c807f3 100644
--- a/debian/patches/features/all/rt/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
+++ b/debian/patches/features/all/rt/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
@@ -1,8 +1,7 @@
-From 5ffb5cace8448c787c9f44e16a7b12f8c2866848 Mon Sep 17 00:00:00 2001
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 4 Apr 2017 17:43:55 +0200
 Subject: [PATCH] CPUFREQ: Loongson2: drop set_cpus_allowed_ptr()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 It is pure mystery to me why we need to be on a specific CPU while
 looking up a value in an array.
diff --git a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
index d765863..771b62f 100644
--- a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 21 Mar 2013 19:01:05 +0100
 Subject: printk: Drop the logbuf_lock more often
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The lock is hold with irgs off. The latency drops 500us+ on my arm bugs
 with a "full" buffer after executing "dmesg" on the shell.
diff --git a/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 4be9dfa..b03b34c 100644
--- a/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 11 Feb 2016 11:54:01 -0600
 Subject: KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating
 the vgic and timer states to prevent the calling task from migrating to
diff --git a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
index 69263e9..faee188 100644
--- a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ b/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Marcelo Tosatti <mtosatti at redhat.com>
 Date: Wed, 8 Apr 2015 20:33:25 -0300
 Subject: KVM: lapic: mark LAPIC timer handler as irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Since lapic timer handler only wakes up a simple waitqueue,
 it can be executed from hardirq context.
diff --git a/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index 59417c2..3d65b1a 100644
--- a/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -5,7 +5,7 @@ Cc:     Anna Schumaker <anna.schumaker at netapp.com>,
         linux-nfs at vger.kernel.org, linux-kernel at vger.kernel.org,
         tglx at linutronix.de
 Subject: NFSv4: replace seqcount_t with a seqlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me
 because it maps to preempt_disable() in -RT which I can't have at this
diff --git a/debian/patches/features/all/rt/Revert-random-invalidate-batched-entropy-after-crng-.patch b/debian/patches/features/all/rt/Revert-random-invalidate-batched-entropy-after-crng-.patch
deleted file mode 100644
index 82115e6..0000000
--- a/debian/patches/features/all/rt/Revert-random-invalidate-batched-entropy-after-crng-.patch
+++ /dev/null
@@ -1,162 +0,0 @@
-From 8adeebf2a94f4625c39c25ec461d0d2ab623b3ad Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-Date: Wed, 14 Jun 2017 21:29:16 +0200
-Subject: [PATCH] Revert "random: invalidate batched entropy after crng init"
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
-
-This reverts commit 86f95e53ed76fec2579e00351c6050ab398a7730.
-
-In -RT lockdep complains with
-| -> #1 (primary_crng.lock){+.+...}:
-|        lock_acquire+0xb5/0x2b0
-|        rt_spin_lock+0x46/0x50
-|        _extract_crng+0x39/0xa0
-|        extract_crng+0x3a/0x40
-|        get_random_u64+0x17a/0x200
-|        cache_random_seq_create+0x51/0x100
-|        init_cache_random_seq+0x35/0x90
-|        __kmem_cache_create+0xd3/0x560
-|        create_boot_cache+0x8c/0xb2
-|        create_kmalloc_cache+0x54/0x9f
-|        create_kmalloc_caches+0xe3/0xfd
-|        kmem_cache_init+0x14f/0x1f0
-|        start_kernel+0x1e7/0x3b3
-|        x86_64_start_reservations+0x2a/0x2c
-|        x86_64_start_kernel+0x13d/0x14c
-|        verify_cpu+0x0/0xfc
-|
-| -> #0 (batched_entropy_reset_lock){+.+...}:
-|        __lock_acquire+0x11b4/0x1320
-|        lock_acquire+0xb5/0x2b0
-|        rt_write_lock+0x26/0x40
-|        rt_write_lock_irqsave+0x9/0x10
-|        invalidate_batched_entropy+0x28/0xb0
-|        crng_fast_load+0xb5/0xe0
-|        add_interrupt_randomness+0x16c/0x1a0
-|        irq_thread+0x15c/0x1e0
-|        kthread+0x112/0x150
-|        ret_from_fork+0x31/0x40
-
-so revert this for now and check later with upstream.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- drivers/char/random.c |   37 -------------------------------------
- 1 file changed, 37 deletions(-)
-
---- a/drivers/char/random.c
-+++ b/drivers/char/random.c
-@@ -1,9 +1,6 @@
- /*
-  * random.c -- A strong random number generator
-  *
-- * Copyright (C) 2017 Jason A. Donenfeld <Jason at zx2c4.com>. All
-- * Rights Reserved.
-- *
-  * Copyright Matt Mackall <mpm at selenic.com>, 2003, 2004, 2005
-  *
-  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
-@@ -765,8 +762,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init
- static struct crng_state **crng_node_pool __read_mostly;
- #endif
- 
--static void invalidate_batched_entropy(void);
--
- static void crng_initialize(struct crng_state *crng)
- {
- 	int		i;
-@@ -804,7 +799,6 @@ static int crng_fast_load(const char *cp
- 		cp++; crng_init_cnt++; len--;
- 	}
- 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
--		invalidate_batched_entropy();
- 		crng_init = 1;
- 		wake_up_interruptible(&crng_init_wait);
- 		pr_notice("random: fast init done\n");
-@@ -842,7 +836,6 @@ static void crng_reseed(struct crng_stat
- 	memzero_explicit(&buf, sizeof(buf));
- 	crng->init_time = jiffies;
- 	if (crng == &primary_crng && crng_init < 2) {
--		invalidate_batched_entropy();
- 		crng_init = 2;
- 		process_random_ready_list();
- 		wake_up_interruptible(&crng_init_wait);
-@@ -2023,7 +2016,6 @@ struct batched_entropy {
- 	};
- 	unsigned int position;
- };
--static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
- 
- /*
-  * Get a random word for internal kernel use only. The quality of the random
-@@ -2034,8 +2026,6 @@ static DEFINE_PER_CPU(struct batched_ent
- u64 get_random_u64(void)
- {
- 	u64 ret;
--	bool use_lock = crng_init < 2;
--	unsigned long flags;
- 	struct batched_entropy *batch;
- 
- #if BITS_PER_LONG == 64
-@@ -2048,15 +2038,11 @@ u64 get_random_u64(void)
- #endif
- 
- 	batch = &get_cpu_var(batched_entropy_u64);
--	if (use_lock)
--		read_lock_irqsave(&batched_entropy_reset_lock, flags);
- 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- 		extract_crng((u8 *)batch->entropy_u64);
- 		batch->position = 0;
- 	}
- 	ret = batch->entropy_u64[batch->position++];
--	if (use_lock)
--		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- 	put_cpu_var(batched_entropy_u64);
- 	return ret;
- }
-@@ -2066,45 +2052,22 @@ static DEFINE_PER_CPU(struct batched_ent
- u32 get_random_u32(void)
- {
- 	u32 ret;
--	bool use_lock = crng_init < 2;
--	unsigned long flags;
- 	struct batched_entropy *batch;
- 
- 	if (arch_get_random_int(&ret))
- 		return ret;
- 
- 	batch = &get_cpu_var(batched_entropy_u32);
--	if (use_lock)
--		read_lock_irqsave(&batched_entropy_reset_lock, flags);
- 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- 		extract_crng((u8 *)batch->entropy_u32);
- 		batch->position = 0;
- 	}
- 	ret = batch->entropy_u32[batch->position++];
--	if (use_lock)
--		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- 	put_cpu_var(batched_entropy_u32);
- 	return ret;
- }
- EXPORT_SYMBOL(get_random_u32);
- 
--/* It's important to invalidate all potential batched entropy that might
-- * be stored before the crng is initialized, which we can do lazily by
-- * simply resetting the counter to zero so that it's re-extracted on the
-- * next usage. */
--static void invalidate_batched_entropy(void)
--{
--	int cpu;
--	unsigned long flags;
--
--	write_lock_irqsave(&batched_entropy_reset_lock, flags);
--	for_each_possible_cpu (cpu) {
--		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
--		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
--	}
--	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
--}
--
- /**
-  * randomize_page - Generate a random, page aligned address
-  * @start:	The smallest acceptable address the caller will take.
diff --git a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index ea33cbf..26a1e53 100644
--- a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Wed, 13 Feb 2013 09:26:05 -0500
 Subject: acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 We hit the following bug with 3.6-rt:
 
diff --git a/debian/patches/features/all/rt/add_migrate_disable.patch b/debian/patches/features/all/rt/add_migrate_disable.patch
index 47a9936..646f822 100644
--- a/debian/patches/features/all/rt/add_migrate_disable.patch
+++ b/debian/patches/features/all/rt/add_migrate_disable.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Sat, 27 May 2017 19:02:06 +0200
 Subject: kernel/sched/core: add migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 ---
  include/linux/preempt.h |   23 ++++++++
diff --git a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch b/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
index 3efd85c..ce235ac 100644
--- a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
+++ b/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
@@ -1,7 +1,7 @@
 From: Anders Roxell <anders.roxell at linaro.org>
 Date: Thu, 14 May 2015 17:52:17 +0200
 Subject: arch/arm64: Add lazy preempt support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 arm64 is missing support for PREEMPT_RT. The main feature which is
 lacking is support for lazy preemption. The arch-specific entry code,
diff --git a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
index 90fe774..fd0e585 100644
--- a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
+++ b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
@@ -1,7 +1,7 @@
 From: Benedikt Spranger <b.spranger at linutronix.de>
 Date: Sat, 6 Mar 2010 17:47:10 +0100
 Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Setup and remove the interrupt handler in clock event mode selection.
 This avoids calling the (shared) interrupt handler when the device is
diff --git a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
index 65e2951..1366588 100644
--- a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
+++ b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 May 2010 18:29:35 +0200
 Subject: ARM: at91: tclib: Default to tclib timer for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 RT is not too happy about the shared timer interrupt in AT91
 devices. Default to tclib timer for RT.
diff --git a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
index e7a556d..5d62658 100644
--- a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
+++ b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
@@ -1,7 +1,7 @@
 From: Frank Rowand <frank.rowand at am.sony.com>
 Date: Mon, 19 Sep 2011 14:51:14 -0700
 Subject: arm: Convert arm boot_lock to raw
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The arm boot_lock is used by the secondary processor startup code.  The locking
 task is the idle thread, which has idle->sched_class == &idle_sched_class.
diff --git a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
index d687e51..3299998 100644
--- a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
+++ b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: arm: Enable highmem for rt
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 13 Feb 2013 11:03:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 fixup highmem for ARM.
 
diff --git a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch b/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
index 12b78c2..bd65560 100644
--- a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
+++ b/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 11 Mar 2013 21:37:27 +0100
 Subject: arm/highmem: Flush tlb on unmap
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The tlb should be flushed on unmap and thus make the mapping entry
 invalid. This is only done in the non-debug case which does not look
diff --git a/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch b/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch
index bbfe4ad..cd212b7 100644
--- a/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch
+++ b/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 22 Dec 2016 17:28:33 +0100
 Subject: [PATCH] arm: include definition for cpumask_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 This definition gets pulled in by other files. With the (later) split of
 RCU and spinlock.h it won't compile anymore.
diff --git a/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch b/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch
index 9fb6f16..c515125 100644
--- a/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch
+++ b/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at linaro.org>
 Date: Thu, 10 Nov 2016 16:17:55 -0800
 Subject: [PATCH] arm: kprobe: replace patch_lock to raw lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When running kprobe on -rt kernel, the below bug is caught:
 
diff --git a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
index b12159b..be7ae75 100644
--- a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
 Subject: arm: Add support for lazy preemption
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 31 Oct 2012 12:04:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Implement the arm pieces for lazy preempt.
 
diff --git a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
index ed75644..82508cf 100644
--- a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
+++ b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 20 Sep 2013 14:31:54 +0200
 Subject: arm/unwind: use a raw_spin_lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Mostly unwind is done with irqs enabled however SLUB may call it with
 irqs disabled while creating a new SLUB cache.
diff --git a/debian/patches/features/all/rt/arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch b/debian/patches/features/all/rt/arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch
new file mode 100644
index 0000000..0a28b26
--- /dev/null
+++ b/debian/patches/features/all/rt/arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch
@@ -0,0 +1,170 @@
+From: Mark Rutland <mark.rutland at arm.com>
+Date: Tue, 16 May 2017 15:18:05 +0100
+Subject: [PATCH] arm64/cpufeature: don't use mutex in bringup path
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+Commit b2bb439ad99a1497daa392a527c0e52c69915ce9 upstream
+
+Currently, cpus_set_cap() calls static_branch_enable_cpuslocked(), which
+must take the jump_label mutex.
+
+We call cpus_set_cap() in the secondary bringup path, from the idle
+thread where interrupts are disabled. Taking a mutex in this path "is a
+NONO" regardless of whether it's contended, and something we must avoid.
+We didn't spot this until recently, as ___might_sleep() won't warn for
+this case until all CPUs have been brought up.
+
+This patch avoids taking the mutex in the secondary bringup path. The
+poking of static keys is deferred until enable_cpu_capabilities(), which
+runs in a suitable context on the boot CPU. To account for the static
+keys being set later, cpus_have_const_cap() is updated to use another
+static key to check whether the const cap keys have been initialised,
+falling back to the caps bitmap until this is the case.
+
+This means that users of cpus_have_const_cap() gain should only gain a
+single additional NOP in the fast path once the const caps are
+initialised, but should always see the current cap value.
+
+The hyp code should never dereference the caps array, since the caps are
+initialized before we run the module initcall to initialise hyp. A check
+is added to the hyp init code to document this requirement.
+
+This change will sidestep a number of issues when the upcoming hotplug
+locking rework is merged.
+
+Signed-off-by: Mark Rutland <mark.rutland at arm.com>
+Reviewed-by: Marc Zyniger <marc.zyngier at arm.com>
+Reviewed-by: Suzuki Poulose <suzuki.poulose at arm.com>
+Acked-by: Will Deacon <will.deacon at arm.com>
+Cc: Christoffer Dall <christoffer.dall at linaro.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Sebastian Sewior <bigeasy at linutronix.de>
+Cc: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Catalin Marinas <catalin.marinas at arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ arch/arm64/include/asm/cpufeature.h |   12 ++++++++++--
+ arch/arm64/include/asm/kvm_host.h   |    8 ++++++--
+ arch/arm64/kernel/cpufeature.c      |   23 +++++++++++++++++++++--
+ 3 files changed, 37 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
+ 
+ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+ extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
++extern struct static_key_false arm64_const_caps_ready;
+ 
+ bool this_cpu_has_cap(unsigned int cap);
+ 
+@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsi
+ }
+ 
+ /* System capability check for constant caps */
+-static inline bool cpus_have_const_cap(int num)
++static inline bool __cpus_have_const_cap(int num)
+ {
+ 	if (num >= ARM64_NCAPS)
+ 		return false;
+@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigne
+ 	return test_bit(num, cpu_hwcaps);
+ }
+ 
++static inline bool cpus_have_const_cap(int num)
++{
++	if (static_branch_likely(&arm64_const_caps_ready))
++		return __cpus_have_const_cap(num);
++	else
++		return cpus_have_cap(num);
++}
++
+ static inline void cpus_set_cap(unsigned int num)
+ {
+ 	if (num >= ARM64_NCAPS) {
+@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned
+ 			num, ARM64_NCAPS);
+ 	} else {
+ 		__set_bit(num, cpu_hwcaps);
+-		static_branch_enable(&cpu_hwcap_keys[num]);
+ 	}
+ }
+ 
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -24,6 +24,7 @@
+ 
+ #include <linux/types.h>
+ #include <linux/kvm_types.h>
++#include <asm/cpufeature.h>
+ #include <asm/kvm.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_mmio.h>
+@@ -356,9 +357,12 @@ static inline void __cpu_init_hyp_mode(p
+ 				       unsigned long vector_ptr)
+ {
+ 	/*
+-	 * Call initialization code, and switch to the full blown
+-	 * HYP code.
++	 * Call initialization code, and switch to the full blown HYP code.
++	 * If the cpucaps haven't been finalized yet, something has gone very
++	 * wrong, and hyp will crash and burn when it uses any
++	 * cpus_have_const_cap() wrapper.
+ 	 */
++	BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+ 	__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
+ }
+ 
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -975,8 +975,16 @@ void update_cpu_capabilities(const struc
+  */
+ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+ {
+-	for (; caps->matches; caps++)
+-		if (caps->enable && cpus_have_cap(caps->capability))
++	for (; caps->matches; caps++) {
++		unsigned int num = caps->capability;
++
++		if (!cpus_have_cap(num))
++			continue;
++
++		/* Ensure cpus_have_const_cap(num) works */
++		static_branch_enable(&cpu_hwcap_keys[num]);
++
++		if (caps->enable) {
+ 			/*
+ 			 * Use stop_machine() as it schedules the work allowing
+ 			 * us to modify PSTATE, instead of on_each_cpu() which
+@@ -984,6 +992,8 @@ void __init enable_cpu_capabilities(cons
+ 			 * we return.
+ 			 */
+ 			stop_machine(caps->enable, NULL, cpu_online_mask);
++		}
++	}
+ }
+ 
+ /*
+@@ -1086,6 +1096,14 @@ static void __init setup_feature_capabil
+ 	enable_cpu_capabilities(arm64_features);
+ }
+ 
++DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
++EXPORT_SYMBOL(arm64_const_caps_ready);
++
++static void __init mark_const_caps_ready(void)
++{
++	static_branch_enable(&arm64_const_caps_ready);
++}
++
+ /*
+  * Check if the current CPU has a given feature capability.
+  * Should be called from non-preemptible context.
+@@ -1112,6 +1130,7 @@ void __init setup_cpu_features(void)
+ 	/* Set the CPU feature capabilies */
+ 	setup_feature_capabilities();
+ 	enable_errata_workarounds();
++	mark_const_caps_ready();
+ 	setup_elf_hwcaps(arm64_elf_hwcaps);
+ 
+ 	if (system_supports_32bit_el0())
diff --git a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch b/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
index 75496fb..a662d45 100644
--- a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -1,7 +1,7 @@
 Subject: arm64/xen: Make XEN depend on !RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 12 Oct 2015 11:18:40 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 It's not ready and probably never will be, unless xen folks have a
 look at it.
diff --git a/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch b/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
index e6f8fe8..8625fb3 100644
--- a/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
+++ b/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 09 Mar 2016 10:51:06 +0100
 Subject: arm: at91: do not disable/enable clocks in a row
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Currently the driver will disable the clock and enable it one line later
 if it is switching from periodic mode into one shot.
diff --git a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
index d53b339..ab88e39 100644
--- a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
+++ b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <srostedt at redhat.com>
 Date: Fri, 3 Jul 2009 08:44:29 -0500
 Subject: ata: Do not disable interrupts in ide code for preempt-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use the local_irq_*_nort variants.
 
diff --git a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch b/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
index 11304a6..35d504a 100644
--- a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
+++ b/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 13 Feb 2015 11:01:26 +0100
 Subject: block: blk-mq: Use swait
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
 | in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6
diff --git a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch b/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
index bac2123..1e5cd58 100644
--- a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Jan 2015 15:10:08 +0100
 Subject: block/mq: don't complete requests via IPI
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The IPI runs in hardirq context and there are sleeping locks. This patch
 moves the completion into a workqueue.
diff --git a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch b/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
index 0491335..766bd6d 100644
--- a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
+++ b/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: block/mq: do not invoke preempt_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 preempt_disable() and get_cpu() don't play well together with the sleeping
 locks it tries to allocate later.
diff --git a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
index caf666b..9a394c1 100644
--- a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
+++ b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 9 Apr 2014 10:37:23 +0200
 Subject: block: mq: use cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 there is a might sleep splat because get_cpu() disables preemption and
 later we grab a lock. As a workaround for this we use get_cpu_light().
diff --git a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
index 7ed89e4..d645b3d 100644
--- a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
+++ b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
@@ -1,7 +1,7 @@
 Subject: block: Shorten interrupt disabled regions
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 22 Jun 2011 19:47:02 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Moving the blk_sched_flush_plug() call out of the interrupt/preempt
 disabled region in the scheduler allows us to replace
diff --git a/debian/patches/features/all/rt/block-use-cpu-chill.patch b/debian/patches/features/all/rt/block-use-cpu-chill.patch
index 3ff5597..90d6e68 100644
--- a/debian/patches/features/all/rt/block-use-cpu-chill.patch
+++ b/debian/patches/features/all/rt/block-use-cpu-chill.patch
@@ -1,7 +1,7 @@
 Subject: block: Use cpu_chill() for retry loops
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 20 Dec 2012 18:28:26 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Steven also observed a live lock when there was a
diff --git a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
index cedc8b4..a278034 100644
--- a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
+++ b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:58 -0500
 Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Introduce RT/NON-RT WARN/BUG statements to avoid ifdefs in the code.
 
diff --git a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index d3d9d79..92e0f20 100644
--- a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sat, 21 Jun 2014 10:09:48 +0200
 Subject: memcontrol: Prevent scheduling while atomic in cgroup code
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 mm, memcg: make refill_stock() use get_cpu_light()
 
diff --git a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch b/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
index e02f2c8..3f02599 100644
--- a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
+++ b/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 13 Feb 2015 15:52:24 +0100
 Subject: cgroups: use simple wait in css_release()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To avoid:
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
diff --git a/debian/patches/features/all/rt/char-random-don-t-print-that-the-init-is-done.patch b/debian/patches/features/all/rt/char-random-don-t-print-that-the-init-is-done.patch
index aecfec8..18bed52 100644
--- a/debian/patches/features/all/rt/char-random-don-t-print-that-the-init-is-done.patch
+++ b/debian/patches/features/all/rt/char-random-don-t-print-that-the-init-is-done.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 30 May 2017 16:39:01 +0200
 Subject: char/random: don't print that the init is done
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On RT we run into circular locking with pendingb_lock (workqueue),
 port_lock_key (uart) and the primary_crng (random):
@@ -147,21 +147,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
-@@ -802,7 +802,7 @@ static int crng_fast_load(const char *cp
- 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+@@ -809,7 +809,7 @@ static int crng_fast_load(const char *cp
+ 		invalidate_batched_entropy();
  		crng_init = 1;
  		wake_up_interruptible(&crng_init_wait);
 -		pr_notice("random: fast init done\n");
 +		/* pr_notice("random: fast init done\n"); */
  	}
- 	spin_unlock_irqrestore(&primary_crng.lock, flags);
  	return 1;
-@@ -840,7 +840,7 @@ static void crng_reseed(struct crng_stat
+ }
+@@ -848,7 +848,7 @@ static void crng_reseed(struct crng_stat
  		crng_init = 2;
  		process_random_ready_list();
  		wake_up_interruptible(&crng_init_wait);
 -		pr_notice("random: crng init done\n");
 +		/* pr_notice("random: crng init done\n"); */
  	}
- 	spin_unlock_irqrestore(&primary_crng.lock, flags);
  }
+ 
diff --git a/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch b/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
index d47db31..bbf0a7c 100644
--- a/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
+++ b/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
@@ -1,7 +1,7 @@
 From: Alexandre Belloni <alexandre.belloni at free-electrons.com>
 Date: Thu, 17 Mar 2016 21:09:43 +0100
 Subject: [PATCH] clockevents/drivers/timer-atmel-pit: fix double free_irq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 clockevents_exchange_device() changes the state from detached to shutdown
 and so at that point the IRQ has not yet been requested.
diff --git a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
index 11ccd7c..2b97a8f 100644
--- a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
+++ b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
@@ -1,7 +1,7 @@
 From: Benedikt Spranger <b.spranger at linutronix.de>
 Date: Mon, 8 Mar 2010 18:57:04 +0100
 Subject: clocksource: TCLIB: Allow higher clock rates for clock events
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 As default the TCLIB uses the 32KiHz base clock rate for clock events.
 Add a compile time selection to allow higher clock resulution.
diff --git a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
index 5d86cb7..b4e4573 100644
--- a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
+++ b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
@@ -1,7 +1,7 @@
 Subject: completion: Use simple wait queues
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 11 Jan 2013 11:23:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Completions have no long lasting callbacks and therefor do not need
 the complex waitqueue variant. Use simple waitqueues which reduces the
@@ -277,7 +277,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  EXPORT_SYMBOL(completion_done);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -7476,7 +7476,10 @@ void migrate_disable(void)
+@@ -7487,7 +7487,10 @@ void migrate_disable(void)
  		return;
  	}
  #ifdef CONFIG_SCHED_DEBUG
@@ -289,7 +289,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  
  	if (p->migrate_disable) {
-@@ -7509,7 +7512,10 @@ void migrate_enable(void)
+@@ -7520,7 +7523,10 @@ void migrate_enable(void)
  	}
  
  #ifdef CONFIG_SCHED_DEBUG
diff --git a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
index 2ea1b8d..aa57f57 100644
--- a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
+++ b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
@@ -1,7 +1,7 @@
 Subject: sched: Use the proper LOCK_OFFSET for cond_resched()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 22:51:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 RT does not increment preempt count when a 'sleeping' spinlock is
 locked. Update PREEMPT_LOCK_OFFSET for that case.
diff --git a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
index b07c730..38edaa4 100644
--- a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
+++ b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
@@ -1,7 +1,7 @@
 Subject: sched: Take RT softirq semantics into account in cond_resched()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 14 Jul 2011 09:56:44 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The softirq semantics work different on -RT. There is no SOFTIRQ_MASK in
 the preemption counter which leads to the BUG_ON() statement in
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1514,12 +1514,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -1509,12 +1509,16 @@ extern int __cond_resched_lock(spinlock_
  	__cond_resched_lock(lock);				\
  })
  
diff --git a/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch b/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
index 10b57f4..0d54999 100644
--- a/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
+++ b/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 16 Oct 2016 05:11:54 +0200
 Subject: [PATCH] connector/cn_proc: Protect send_msg() with a local lock
  on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931
 |in_atomic(): 1, irqs_disabled(): 0, pid: 31807, name: sleep
diff --git a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
index 953bcba..b55fc64 100644
--- a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
+++ b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Thu, 5 Dec 2013 09:16:52 -0500
 Subject: cpu hotplug: Document why PREEMPT_RT uses a spinlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The patch:
 
diff --git a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index 67ed26d..a33e351 100644
--- a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Fri, 02 Mar 2012 10:36:57 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Tasks can block on hotplug.lock in pin_current_cpu(), but their state
 might be != RUNNING. So the mutex wakeup will set the state
diff --git a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
index 073cc5d..4463a7a 100644
--- a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
+++ b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <srostedt at redhat.com>
 Date: Mon, 16 Jul 2012 08:07:43 +0000
 Subject: cpu/rt: Rework cpu down for PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Bringing a CPU down is a pain with the PREEMPT_RT kernel because
 tasks can be preempted in many more places than in non-RT. In
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1346,6 +1346,10 @@ extern int task_can_attach(struct task_s
+@@ -1342,6 +1342,10 @@ extern int task_can_attach(struct task_s
  #ifdef CONFIG_SMP
  extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
  extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #else
  static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  {
-@@ -1356,6 +1360,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -1352,6 +1356,9 @@ static inline int set_cpus_allowed_ptr(s
  		return -EINVAL;
  	return 0;
  }
@@ -439,7 +439,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	 * interrupt affinities.
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1111,6 +1111,84 @@ void do_set_cpus_allowed(struct task_str
+@@ -1122,6 +1122,84 @@ void do_set_cpus_allowed(struct task_str
  	__do_set_cpus_allowed_tail(p, new_mask);
  }
  
diff --git a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index 2858571..ffdcb81 100644
--- a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Tue, 4 Mar 2014 12:28:32 -0500
 Subject: cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 We hit another bug that was caused by switching cpu_chill() from
 msleep() to hrtimer_nanosleep().
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -1622,12 +1622,13 @@ void hrtimer_init_sleeper(struct hrtimer
+@@ -1601,12 +1601,13 @@ void hrtimer_init_sleeper(struct hrtimer
  }
  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
  
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		hrtimer_start_expires(&t->timer, mode);
  
  		if (likely(t->task))
-@@ -1669,7 +1670,8 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1648,7 +1649,8 @@ long __sched hrtimer_nanosleep_restart(s
  				HRTIMER_MODE_ABS);
  	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
  
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		goto out;
  
  	rmtp = restart->nanosleep.rmtp;
-@@ -1686,8 +1688,10 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1665,8 +1667,10 @@ long __sched hrtimer_nanosleep_restart(s
  	return ret;
  }
  
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	struct restart_block *restart;
  	struct hrtimer_sleeper t;
-@@ -1700,7 +1704,7 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1679,7 +1683,7 @@ long hrtimer_nanosleep(struct timespec *
  
  	hrtimer_init_on_stack(&t.timer, clockid, mode);
  	hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		goto out;
  
  	/* Absolute timers do not update the rmtp value and restart: */
-@@ -1727,6 +1731,12 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1706,6 +1710,12 @@ long hrtimer_nanosleep(struct timespec *
  	return ret;
  }
  
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
  		struct timespec __user *, rmtp)
  {
-@@ -1753,7 +1763,8 @@ void cpu_chill(void)
+@@ -1732,7 +1742,8 @@ void cpu_chill(void)
  	unsigned int freeze_flag = current->flags & PF_NOFREEZE;
  
  	current->flags |= PF_NOFREEZE;
diff --git a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
index 0607eac..06e3e83 100644
--- a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
+++ b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
@@ -1,7 +1,7 @@
 From:	Tiejun Chen <tiejun.chen at windriver.com>
 Subject: cpu_down: move migrate_enable() back
 Date:	Thu, 7 Nov 2013 10:06:07 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to
 use migrate_enable()/migrate_disable() to replace that combination
diff --git a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
index 7d6c7cd..f2e8e64 100644
--- a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
+++ b/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 9 Apr 2015 15:23:01 +0200
 Subject: cpufreq: drop K8's driver from beeing selected
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Ralf posted a picture of a backtrace from
 
diff --git a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
index 883b3e3..90ac798 100644
--- a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
+++ b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 14 Dec 2011 01:03:49 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 There are "valid" GFP_ATOMIC allocations such as
 
diff --git a/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
index 5a603ac..3130a97 100644
--- a/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
+++ b/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <efault at gmx.de>
 Date: Sun, 8 Jan 2017 09:32:25 +0100
 Subject: [PATCH] cpuset: Convert callback_lock to raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The two commits below add up to a cpuset might_sleep() splat for RT:
 
diff --git a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
index 1778081..0712018 100644
--- a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
+++ b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 21 Feb 2014 17:24:04 +0100
 Subject: crypto: Reduce preempt disabled regions, more algos
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Don Estabrook reported
 | kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
diff --git a/debian/patches/features/all/rt/debugobjects-rt.patch b/debian/patches/features/all/rt/debugobjects-rt.patch
index f46b6df..28bb822 100644
--- a/debian/patches/features/all/rt/debugobjects-rt.patch
+++ b/debian/patches/features/all/rt/debugobjects-rt.patch
@@ -1,7 +1,7 @@
 Subject: debugobjects: Make RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:41:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Avoid filling the pool / allocating memory with irqs off().
 
diff --git a/debian/patches/features/all/rt/delayacct-use-raw_spinlocks.patch b/debian/patches/features/all/rt/delayacct-use-raw_spinlocks.patch
index 81dc987..e3fdc35 100644
--- a/debian/patches/features/all/rt/delayacct-use-raw_spinlocks.patch
+++ b/debian/patches/features/all/rt/delayacct-use-raw_spinlocks.patch
@@ -1,8 +1,7 @@
-From 2c887ccff27de53f76fbdedc0afea9fa3be3ea2f Mon Sep 17 00:00:00 2001
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Sat, 20 May 2017 12:32:23 +0200
 Subject: [PATCH] delayacct: use raw_spinlocks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 try_to_wake_up() might invoke delayacct_blkio_end() while holding the
 pi_lock. The lock is only held for a short amount of time so it should
diff --git a/debian/patches/features/all/rt/dm-make-rt-aware.patch b/debian/patches/features/all/rt/dm-make-rt-aware.patch
index 31c7fb8..de90b80 100644
--- a/debian/patches/features/all/rt/dm-make-rt-aware.patch
+++ b/debian/patches/features/all/rt/dm-make-rt-aware.patch
@@ -1,7 +1,7 @@
 Subject: dm: Make rt aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 14 Nov 2011 23:06:09 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
 interrupts legitimately enabled here as we cant deadlock against the
diff --git a/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index fbe2009..bdb626c 100644
--- a/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Thu, 31 Mar 2016 04:08:28 +0200
 Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex
  for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 They're nondeterministic, and lead to ___might_sleep() splats in -rt.
 OTOH, they're a lot less wasteful than an rtmutex per page.
diff --git a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
index 56be59e..0bef7c5 100644
--- a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
+++ b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:24 -0500
 Subject: drivers/net: Use disable_irq_nosync() in 8139too
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use disable_irq_nosync() instead of disable_irq() as this might be
 called in atomic context with netpoll.
diff --git a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
index f824972..da55836 100644
--- a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
+++ b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Fri, 3 Jul 2009 08:30:00 -0500
 Subject: drivers/net: vortex fix locking issues
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Argh, cut and paste wasn't enough...
 
diff --git a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
index 42b70c1..6c450ad 100644
--- a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
+++ b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:30 -0500
 Subject: drivers: random: Reduce preempt disabled region
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 No need to keep preemption disabled across the whole function.
 
diff --git a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
index a8b6e57..2808908 100644
--- a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
+++ b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
@@ -1,7 +1,7 @@
 Subject: tty/serial/omap: Make the locking RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 28 Jul 2011 13:32:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The lock is a sleeping lock and local_irq_save() is not the
 optimsation we are looking for. Redo it to make it work on -RT and
diff --git a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
index 598e031..d9a76fe 100644
--- a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
+++ b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
@@ -1,7 +1,7 @@
 Subject: tty/serial/pl011: Make the locking work on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 08 Jan 2013 21:36:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The lock is a sleeping lock and local_irq_save() is not the optimsation
 we are looking for. Redo it to make it work on -RT and non-RT.
diff --git a/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index f6a4d11..d6629bb 100644
--- a/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Thu, 20 Oct 2016 11:15:22 +0200
 Subject: [PATCH] drivers/zram: Don't disable preemption in
  zcomp_stream_get/put()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 In v4.7, the driver switched to percpu compression streams, disabling
 preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We
diff --git a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
index 042bbfb..55a114a 100644
--- a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
+++ b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 25 Apr 2013 18:12:52 +0200
 Subject: drm/i915: drop trace_i915_gem_ring_dispatch on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 This tracepoint is responsible for:
 
diff --git a/debian/patches/features/all/rt/drm-i915-init-spinlock-properly-on-RT.patch b/debian/patches/features/all/rt/drm-i915-init-spinlock-properly-on-RT.patch
index cbd6da8..57b40da 100644
--- a/debian/patches/features/all/rt/drm-i915-init-spinlock-properly-on-RT.patch
+++ b/debian/patches/features/all/rt/drm-i915-init-spinlock-properly-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
 Date: Mon, 29 May 2017 15:33:52 +0200
 Subject: [PATCH] drm/i915: init spinlock properly on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 THe lockinit is opencoded so need to fix it up…
 
diff --git a/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
index 72c7c79..748ac50 100644
--- a/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+++ b/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
@@ -1,7 +1,7 @@
 Subject: drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end()
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sat, 27 Feb 2016 09:01:42 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 
 [    8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /**
   * intel_pipe_update_start() - start update of a set of display registers
   * @crtc: the crtc of which the registers are going to be updated
-@@ -95,7 +98,7 @@ void intel_pipe_update_start(struct inte
+@@ -98,7 +101,7 @@ void intel_pipe_update_start(struct inte
  	min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
  	max = vblank_start - 1;
  
@@ -88,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  	if (min <= 0 || max <= 0)
  		return;
-@@ -125,11 +128,11 @@ void intel_pipe_update_start(struct inte
+@@ -128,11 +131,11 @@ void intel_pipe_update_start(struct inte
  			break;
  		}
  
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  
  	finish_wait(wq, &wait);
-@@ -181,7 +184,7 @@ void intel_pipe_update_end(struct intel_
+@@ -202,7 +205,7 @@ void intel_pipe_update_end(struct intel_
  		crtc->base.state->event = NULL;
  	}
  
diff --git a/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index 7ad039d..785ccb3 100644
--- a/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -1,7 +1,7 @@
 Subject: drm,radeon,i915: Use preempt_disable/enable_rt() where recommended
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sat, 27 Feb 2016 08:09:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 DRM folks identified the spots, so use them.
 
diff --git a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
index 0e4d2c6..f62451e 100644
--- a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
+++ b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
 Subject: fs/epoll: Do not disable preemption on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 08 Jul 2011 16:35:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 ep_call_nested() takes a sleeping lock so we can't disable preemption.
 The light version is enough since ep_call_nested() doesn't mind beeing
diff --git a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch b/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
index 78aedb9..f60d116 100644
--- a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
+++ b/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 16 Feb 2015 18:49:10 +0100
 Subject: fs/aio: simple simple work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768
 |in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2
diff --git a/debian/patches/features/all/rt/fs-block-rt-support.patch b/debian/patches/features/all/rt/fs-block-rt-support.patch
index 7d32fa8..11ae869 100644
--- a/debian/patches/features/all/rt/fs-block-rt-support.patch
+++ b/debian/patches/features/all/rt/fs-block-rt-support.patch
@@ -1,7 +1,7 @@
 Subject: block: Turn off warning which is bogus on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jun 2011 17:05:09 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On -RT the context is always with IRQs enabled. Ignore this warning on -RT.
 
diff --git a/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch b/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
index 849c501..9bde212 100644
--- a/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
+++ b/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 14 Sep 2016 17:57:03 +0200
 Subject: [PATCH] fs/dcache: init in_lookup_hashtable
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 in_lookup_hashtable was introduced in commit 94bdd655caba ("parallel
 lookups machinery, part 3") and never initialized but since it is in
diff --git a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index bac57a6..5ab5ad0 100644
--- a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -1,7 +1,7 @@
 Subject: fs: dcache: Use cpu_chill() in trylock loops
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 07 Mar 2012 21:00:34 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Use cpu_chill() instead of cpu_relax() to let the system
diff --git a/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index fc9f23d..7b4cac1 100644
--- a/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 14 Sep 2016 14:35:49 +0200
 Subject: [PATCH] fs/dcache: use swait_queue instead of waitqueue
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 __d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock()
 which disables preemption. As a workaround convert it to swait.
diff --git a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
index 50085f4..ded9bc2 100644
--- a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
+++ b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 18 Mar 2011 10:11:25 +0100
 Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 bit_spin_locks break under RT.
 
diff --git a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
index d1ddfa2..5b6005e 100644
--- a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
+++ b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 17 Feb 2014 17:30:03 +0100
 Subject: fs: jbd2: pull your plug when waiting for space
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Two cps in parallel managed to stall the the ext4 fs. It seems that
 journal code is either waiting for locks or sleeping waiting for
diff --git a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
index c452542..dd6146b 100644
--- a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
+++ b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 19 Jul 2009 08:44:27 -0500
 Subject: fs: namespace preemption fix
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On RT we cannot loop with preemption disabled here as
 mnt_make_readonly() might have been preempted. We can safely enable
diff --git a/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
index 9571eab..08a5458 100644
--- a/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
+++ b/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 15 Sep 2016 10:51:27 +0200
 Subject: [PATCH] fs/nfs: turn rmdir_sem into a semaphore
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The RW semaphore had a reader side which used the _non_owner version
 because it most likely took the reader lock in one thread and released it
diff --git a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
index ed2d188..dd51080 100644
--- a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
+++ b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <efault at gmx.de>
 Date: Fri, 3 Jul 2009 08:44:12 -0500
 Subject: fs: ntfs: disable interrupt only on !RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
 > * Nick Piggin <nickpiggin at yahoo.com.au> wrote:
diff --git a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
index dd3dc81..8e1acd9 100644
--- a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 18 Mar 2011 09:18:52 +0100
 Subject: buffer_head: Replace bh_uptodate_lock for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Wrap the bit_spin_lock calls into a separate inline and add the RT
 replacements with a real spinlock.
diff --git a/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch b/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch
index 904a438..2894b41 100644
--- a/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch
+++ b/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 16 Oct 2016 05:08:30 +0200
 Subject: [PATCH] ftrace: Fix trace header alignment
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Line up helper arrows to the right column.
 
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
-@@ -3113,17 +3113,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3121,17 +3121,17 @@ get_total_entries(struct trace_buffer *b
  
  static void print_lat_help_header(struct seq_file *m)
  {
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -3152,11 +3152,11 @@ static void print_func_help_header_irq(s
+@@ -3160,11 +3160,11 @@ static void print_func_help_header_irq(s
  		    "#                            |/  _-----=> need-resched_lazy\n"
  		    "#                            || / _---=> hardirq/softirq\n"
  		    "#                            ||| / _--=> preempt-depth\n"
diff --git a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
index 9953f46..d22f8d6 100644
--- a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
+++ b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:56:42 +0200
 Subject: trace: Add migrate-disabled counter to tracing output
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #define TRACE_EVENT_TYPE_MAX						\
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
-@@ -1946,6 +1946,8 @@ tracing_generic_entry_update(struct trac
+@@ -1954,6 +1954,8 @@ tracing_generic_entry_update(struct trac
  		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
  		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
  		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
  
-@@ -3114,9 +3116,10 @@ static void print_lat_help_header(struct
+@@ -3122,9 +3124,10 @@ static void print_lat_help_header(struct
  		    "#                | / _----=> need-resched    \n"
  		    "#                || / _---=> hardirq/softirq \n"
  		    "#                ||| / _--=> preempt-depth   \n"
diff --git a/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
index 0695247..0684a06 100644
--- a/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ b/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 1 Mar 2013 11:17:42 +0100
 Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 In exit_pi_state_list() we have the following locking construct:
 
diff --git a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
index 7a9dddd..6663004 100644
--- a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
+++ b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: futex: Fix bug on when a requeued RT task times out
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Requeue with timeout causes a bug with PREEMPT_RT_FULL.
 
diff --git a/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch b/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
index 09117d0..9a844ee 100644
--- a/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
+++ b/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Mon, 22 May 2017 13:04:50 -0700
 Subject: [PATCH] futex,rt_mutex: Fix rt_mutex_cleanup_proxy_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Markus reported that the glibc/nptl/tst-robustpi8 test was failing after
 commit:
diff --git a/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch b/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch
index 243ed05..3828c77 100644
--- a/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch
+++ b/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch
@@ -1,8 +1,7 @@
-From 8a35f416ca9ff27e893cebcbe064a1f3c8e1de57 Mon Sep 17 00:00:00 2001
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 9 May 2017 17:11:10 +0200
 Subject: [PATCH] futex/rtmutex: Cure RT double blocking issue
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 RT has a problem when the wait on a futex/rtmutex got interrupted by a
 timeout or a signal. task->pi_blocked_on is still set when returning from
@@ -27,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -2408,6 +2408,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2407,6 +2407,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
  			       struct hrtimer_sleeper *to,
  			       struct rt_mutex_waiter *waiter)
  {
@@ -35,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	int ret;
  
  	raw_spin_lock_irq(&lock->wait_lock);
-@@ -2419,6 +2420,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2418,6 +2419,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
  	 * have to fix that up.
  	 */
  	fixup_rt_mutex_waiters(lock);
diff --git a/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch b/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
index 701d7df..67d4a89 100644
--- a/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
+++ b/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 8 Mar 2017 14:23:35 +0100
 Subject: [PATCH] futex: workaround migrate_disable/enable in different context
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 migrate_disable()/migrate_enable() takes a different path in atomic() vs
 !atomic() context. These little hacks ensure that we don't underflow / overflow
diff --git a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
index 2894bff..157f396 100644
--- a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
+++ b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:57 -0500
 Subject: genirq: Disable irqpoll on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Creates long latencies for no value
 
diff --git a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
index 138dd28..035bc88 100644
--- a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 21 Aug 2013 17:48:46 +0200
 Subject: genirq: Do not invoke the affinity callback via a workqueue on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Joe Korty reported, that __irq_set_affinity_locked() schedules a
 workqueue while holding a rawlock which results in a might_sleep()
diff --git a/debian/patches/features/all/rt/genirq-force-threading.patch b/debian/patches/features/all/rt/genirq-force-threading.patch
index ca0b877..c1486de 100644
--- a/debian/patches/features/all/rt/genirq-force-threading.patch
+++ b/debian/patches/features/all/rt/genirq-force-threading.patch
@@ -1,7 +1,7 @@
 Subject: genirq: Force interrupt thread on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 03 Apr 2011 11:57:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Force threaded_irqs and optimize the code (force_irqthreads) in regard
 to this.
diff --git a/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch b/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
index 9862b3c..936901b 100644
--- a/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 11 Feb 2016 11:54:00 -0600
 Subject: genirq: update irq_set_irqchip_state documentation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On -rt kernels, the use of migrate_disable()/migrate_enable() is
 sufficient to guarantee a task isn't moved to another CPU.  Update the
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
-@@ -2113,7 +2113,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2115,7 +2115,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
   *	This call sets the internal irqchip state of an interrupt,
   *	depending on the value of @which.
   *
diff --git a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
index b08acb6..af1798e 100644
--- a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+++ b/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Tue, 24 Mar 2015 08:14:49 +0100
 Subject: hotplug: Use set_cpus_allowed_ptr() in sync_unplug_thread()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 do_set_cpus_allowed() is not safe vs ->sched_class change.
 
diff --git a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
index 11b8068..bba043c 100644
--- a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
+++ b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
@@ -1,7 +1,7 @@
 Subject: hotplug: Lightweight get online cpus
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 15 Jun 2011 12:36:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 get_online_cpus() is a heavy weight function which involves a global
 mutex. migrate_disable() wants a simpler construct which prevents only
diff --git a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
index b8b0d92..bdfed92 100644
--- a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+++ b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
@@ -1,7 +1,7 @@
 Subject: hotplug: sync_unplug: No "\n" in task name
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Sun, 16 Oct 2011 18:56:43 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Otherwise the output will look a little odd.
 
diff --git a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
index 90db14e..08a2b33 100644
--- a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
+++ b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
@@ -1,7 +1,7 @@
 Subject: hotplug: Use migrate disable on unplug
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 19:35:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Migration needs to be disabled accross the unplug handling to make
 sure that the unplug thread is off the unplugged cpu.
diff --git a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
index 4694586..cbd77d1 100644
--- a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
+++ b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at windriver.com>
 Date: Mon, 16 Sep 2013 14:09:19 -0700
 Subject: hrtimer: Move schedule_work call to helper thread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When run ltp leapsec_timer test, the following call trace is caught:
 
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -696,6 +696,29 @@ static void hrtimer_switch_to_hres(void)
+@@ -695,6 +695,29 @@ static void hrtimer_switch_to_hres(void)
  	retrigger_next_event(NULL);
  }
  
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  static void clock_was_set_work(struct work_struct *work)
  {
  	clock_was_set();
-@@ -711,6 +734,7 @@ void clock_was_set_delayed(void)
+@@ -710,6 +733,7 @@ void clock_was_set_delayed(void)
  {
  	schedule_work(&hrtimer_work);
  }
diff --git a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch b/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
index dac5550..d543ca5 100644
--- a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
+++ b/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 23 Dec 2015 20:57:41 +0100
 Subject: hrtimer: enfore 64byte alignment
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The patch "hrtimer: Fixup hrtimer callback changes for preempt-rt" adds
 a list_head expired to struct hrtimer_clock_base and with it we run into
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
-@@ -116,11 +116,7 @@ struct hrtimer_sleeper {
+@@ -112,11 +112,7 @@ struct hrtimer_sleeper {
  	struct task_struct *task;
  };
  
diff --git a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 3f18bce..3504071 100644
--- a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:44:31 -0500
 Subject: hrtimer: Fixup hrtimer callback changes for preempt-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 In preempt-rt we can not call the callbacks which take sleeping locks
 from the timer interrupt context.
@@ -23,25 +23,25 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
 
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
-@@ -87,6 +87,8 @@ enum hrtimer_restart {
+@@ -86,6 +86,8 @@ enum hrtimer_restart {
+  *		was armed.
   * @function:	timer expiry callback function
   * @base:	pointer to the timer base (per cpu and per clock)
-  * @state:	state information (See bit values above)
 + * @cb_entry:	list entry to defer timers from hardirq context
 + * @irqsafe:	timer can run in hardirq context
-  * @praecox:	timer expiry time if expired at the time of programming
+  * @state:	state information (See bit values above)
   * @is_rel:	Set if the timer was armed relative
   *
-@@ -98,6 +100,8 @@ struct hrtimer {
+@@ -96,6 +98,8 @@ struct hrtimer {
+ 	ktime_t				_softexpires;
  	enum hrtimer_restart		(*function)(struct hrtimer *);
  	struct hrtimer_clock_base	*base;
- 	u8				state;
 +	struct list_head		cb_entry;
 +	int				irqsafe;
- #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- 	ktime_t				praecox;
- #endif
-@@ -125,6 +129,7 @@ struct hrtimer_sleeper {
+ 	u8				state;
+ 	u8				is_rel;
+ };
+@@ -121,6 +125,7 @@ struct hrtimer_sleeper {
   *			timer to a base on another cpu.
   * @clockid:		clock id for per_cpu support
   * @active:		red black tree root node for the active timers
@@ -49,7 +49,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
   * @get_time:		function to retrieve the current time of the clock
   * @offset:		offset of this clock to the monotonic base
   */
-@@ -133,6 +138,7 @@ struct hrtimer_clock_base {
+@@ -129,6 +134,7 @@ struct hrtimer_clock_base {
  	int			index;
  	clockid_t		clockid;
  	struct timerqueue_head	active;
@@ -57,7 +57,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	ktime_t			(*get_time)(void);
  	ktime_t			offset;
  } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
-@@ -176,6 +182,7 @@ struct hrtimer_cpu_base {
+@@ -172,6 +178,7 @@ struct hrtimer_cpu_base {
  	raw_spinlock_t			lock;
  	seqcount_t			seq;
  	struct hrtimer			*running;
@@ -87,7 +87,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -720,11 +720,8 @@ static inline int hrtimer_is_hres_enable
+@@ -719,11 +719,8 @@ static inline int hrtimer_is_hres_enable
  static inline void hrtimer_switch_to_hres(void) { }
  static inline void
  hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
@@ -101,7 +101,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
  static inline void retrigger_next_event(void *arg) { }
  
-@@ -845,7 +842,7 @@ void hrtimer_wait_for_timer(const struct
+@@ -844,7 +841,7 @@ void hrtimer_wait_for_timer(const struct
  {
  	struct hrtimer_clock_base *base = timer->base;
  
@@ -110,7 +110,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  		wait_event(base->cpu_base->wait,
  				!(hrtimer_callback_running(timer)));
  }
-@@ -895,6 +892,11 @@ static void __remove_hrtimer(struct hrti
+@@ -894,6 +891,11 @@ static void __remove_hrtimer(struct hrti
  	if (!(state & HRTIMER_STATE_ENQUEUED))
  		return;
  
@@ -122,7 +122,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	if (!timerqueue_del(&base->active, &timer->node))
  		cpu_base->active_bases &= ~(1 << base->index);
  
-@@ -1144,6 +1146,7 @@ static void __hrtimer_init(struct hrtime
+@@ -1134,6 +1136,7 @@ static void __hrtimer_init(struct hrtime
  
  	base = hrtimer_clockid_to_base(clock_id);
  	timer->base = &cpu_base->clock_base[base];
@@ -130,7 +130,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	timerqueue_init(&timer->node);
  }
  
-@@ -1178,6 +1181,7 @@ bool hrtimer_active(const struct hrtimer
+@@ -1168,6 +1171,7 @@ bool hrtimer_active(const struct hrtimer
  		seq = raw_read_seqcount_begin(&cpu_base->seq);
  
  		if (timer->state != HRTIMER_STATE_INACTIVE ||
@@ -138,7 +138,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  		    cpu_base->running == timer)
  			return true;
  
-@@ -1275,12 +1279,111 @@ static void __run_hrtimer(struct hrtimer
+@@ -1265,10 +1269,109 @@ static void __run_hrtimer(struct hrtimer
  	cpu_base->running = NULL;
  }
  
@@ -240,8 +240,6 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
 +
 +#endif
 +
- static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
- 
  static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
  {
  	struct hrtimer_clock_base *base = cpu_base->clock_base;
@@ -250,7 +248,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  
  	for (; active; base++, active >>= 1) {
  		struct timerqueue_node *node;
-@@ -1320,9 +1423,14 @@ static void __hrtimer_run_queues(struct
+@@ -1299,9 +1402,14 @@ static void __hrtimer_run_queues(struct
  			if (basenow < hrtimer_get_softexpires_tv64(timer))
  				break;
  
@@ -266,7 +264,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  }
  
  #ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1464,8 +1572,6 @@ void hrtimer_run_queues(void)
+@@ -1443,8 +1551,6 @@ void hrtimer_run_queues(void)
  	now = hrtimer_update_base(cpu_base);
  	__hrtimer_run_queues(cpu_base, now);
  	raw_spin_unlock(&cpu_base->lock);
@@ -275,7 +273,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  }
  
  /*
-@@ -1487,6 +1593,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1466,6 +1572,7 @@ static enum hrtimer_restart hrtimer_wake
  void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
  {
  	sl->timer.function = hrtimer_wakeup;
@@ -283,7 +281,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	sl->task = task;
  }
  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1621,6 +1728,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+@@ -1600,6 +1707,7 @@ int hrtimers_prepare_cpu(unsigned int cp
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  		cpu_base->clock_base[i].cpu_base = cpu_base;
  		timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -291,7 +289,7 @@ Signed-off-by: Ingo Molnar <mingo at elte.hu>
  	}
  
  	cpu_base->cpu = cpu;
-@@ -1697,9 +1805,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
+@@ -1676,9 +1784,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
  
  #endif /* CONFIG_HOTPLUG_CPU */
  
diff --git a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
index 4bc9529..ffe5b2f 100644
--- a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
+++ b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:34 -0500
 Subject: hrtimers: Prepare full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Make cancellation of a running callback in softirq context safe
 against preemption.
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
  struct hrtimer_clock_base;
  struct hrtimer_cpu_base;
-@@ -195,6 +196,9 @@ struct hrtimer_cpu_base {
+@@ -191,6 +192,9 @@ struct hrtimer_cpu_base {
  	unsigned int			nr_hangs;
  	unsigned int			max_hang_time;
  #endif
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	struct hrtimer_clock_base	clock_base[HRTIMER_MAX_CLOCK_BASES];
  } ____cacheline_aligned;
  
-@@ -404,6 +408,13 @@ static inline void hrtimer_restart(struc
+@@ -400,6 +404,13 @@ static inline void hrtimer_restart(struc
  	hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
  }
  
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /* Query timers: */
  extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
  
-@@ -428,7 +439,7 @@ static inline int hrtimer_is_queued(stru
+@@ -424,7 +435,7 @@ static inline int hrtimer_is_queued(stru
   * Helper function to check, whether the timer is running the callback
   * function
   */
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -828,6 +828,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -827,6 +827,32 @@ u64 hrtimer_forward(struct hrtimer *time
  }
  EXPORT_SYMBOL_GPL(hrtimer_forward);
  
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  /*
   * enqueue_hrtimer - internal function to (re)start a timer
   *
-@@ -1042,7 +1068,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1032,7 +1058,7 @@ int hrtimer_cancel(struct hrtimer *timer
  
  		if (ret >= 0)
  			return ret;
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	}
  }
  EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1438,6 +1464,8 @@ void hrtimer_run_queues(void)
+@@ -1417,6 +1443,8 @@ void hrtimer_run_queues(void)
  	now = hrtimer_update_base(cpu_base);
  	__hrtimer_run_queues(cpu_base, now);
  	raw_spin_unlock(&cpu_base->lock);
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  /*
-@@ -1597,6 +1625,9 @@ int hrtimers_prepare_cpu(unsigned int cp
+@@ -1576,6 +1604,9 @@ int hrtimers_prepare_cpu(unsigned int cp
  
  	cpu_base->cpu = cpu;
  	hrtimer_init_hres(cpu_base);
diff --git a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index 1eb542e..0b583e3 100644
--- a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -1,7 +1,7 @@
 From: Clark Williams <williams at redhat.com>
 Date: Tue, 26 May 2015 10:43:43 -0500
 Subject: i915: bogus warning from i915 when running on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The i915 driver has a 'WARN_ON(!in_interrupt())' in the display
 handler, which whines constanly on the RT kernel (since the interrupt
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/gpu/drm/i915/intel_display.c
 +++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -12113,7 +12113,7 @@ void intel_check_page_flip(struct drm_i9
+@@ -12115,7 +12115,7 @@ void intel_check_page_flip(struct drm_i9
  	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  	struct intel_flip_work *work;
  
diff --git a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
index 0a05105..af9cbdd 100644
--- a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
+++ b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:16 -0500
 Subject: ide: Do not disable interrupts for PREEMPT-RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use the local_irq_*_nort variants.
 
diff --git a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
index 0b985a3..5732cd1 100644
--- a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
+++ b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
@@ -1,7 +1,7 @@
 From: Sven-Thorsten Dietrich <sdietrich at novell.com>
 Date: Fri, 3 Jul 2009 08:30:35 -0500
 Subject: infiniband: Mellanox IB driver patch use _nort() primitives
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT
 Kernel.
diff --git a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
index 727c215..919be65 100644
--- a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
+++ b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:16 -0500
 Subject: input: gameport: Do not disable interrupts on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use the _nort() primitives.
 
diff --git a/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch b/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
index 5c53a3f..a47e669 100644
--- a/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
+++ b/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
@@ -1,7 +1,7 @@
 Subject: iommu/amd: Use WARN_ON_NORT in __attach_device()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 27 Feb 2016 10:22:23 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 RT does not disable interrupts here, but the protection is still
 correct. Fixup the WARN_ON so it won't yell on RT.
diff --git a/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch b/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
index a12eace..7ac3f05 100644
--- a/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
+++ b/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 15 Sep 2016 16:58:19 +0200
 Subject: [PATCH] iommu/iova: don't disable preempt around this_cpu_ptr()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Commit 583248e6620a ("iommu/iova: Disable preemption around use of
 this_cpu_ptr()") disables preemption while accessing a per-CPU variable.
diff --git a/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch b/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
index 47c62ab..8ed6ec6 100644
--- a/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
+++ b/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 15 Sep 2016 17:16:44 +0200
 Subject: [PATCH] iommu/vt-d: don't disable preemption while accessing
  deferred_flush()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 get_cpu() disables preemption and returns the current CPU number. The
 CPU number is later only used once while retrieving the address of the
diff --git a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index a21e31a..6d1525c 100644
--- a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -1,7 +1,7 @@
 Subject: genirq: Allow disabling of softirq processing in irq thread context
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 31 Jan 2012 13:01:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The processing of softirqs in irq thread context is a performance gain
 for the non-rt workloads of a system, but it's counterproductive for
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	return ret;
  }
  
-@@ -1340,6 +1348,9 @@ static int
+@@ -1342,6 +1350,9 @@ static int
  			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  		}
  
diff --git a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch b/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
index 322563d..f610c11 100644
--- a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
+++ b/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
@@ -1,7 +1,7 @@
 Subject: irqwork: Move irq safe work to irq context
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 15 Nov 2015 18:40:17 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On architectures where arch_irq_work_has_interrupt() returns false, we
 end up running the irq safe work from the softirq context. That
diff --git a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch b/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
index b2afe0d..411da6e 100644
--- a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
+++ b/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
@@ -1,7 +1,7 @@
 Subject: irqwork: push most work into softirq context
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 23 Jun 2015 15:32:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Initially we defered all irqwork into softirq because we didn't want the
 latency spikes if perf or another user was busy and delayed the RT task.
diff --git a/debian/patches/features/all/rt/jump-label-rt.patch b/debian/patches/features/all/rt/jump-label-rt.patch
index c864dbd..eec5638 100644
--- a/debian/patches/features/all/rt/jump-label-rt.patch
+++ b/debian/patches/features/all/rt/jump-label-rt.patch
@@ -1,7 +1,7 @@
 Subject: jump-label: disable if stop_machine() is used
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 08 Jul 2015 17:14:48 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Some architectures are using stop_machine() while switching the opcode which
 leads to latency spikes.
diff --git a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
index 58af067..1a6ab28 100644
--- a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
+++ b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
@@ -1,7 +1,7 @@
 Subject: kconfig: Disable config options which are not RT compatible
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 24 Jul 2011 12:11:43 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Disable stuff which is known to have issues on RT
 
diff --git a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
index 1fcb73f..3675b7f 100644
--- a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
+++ b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
@@ -1,7 +1,7 @@
 Subject: kconfig: Add PREEMPT_RT_FULL
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 14:58:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Introduce the final symbol for PREEMPT_RT_FULL.
 
diff --git a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch b/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
index d9fb768..4f125b9 100644
--- a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
+++ b/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 19 Mar 2013 14:44:30 +0100
 Subject: kernel/SRCU: provide a static initializer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 There are macros for static initializer for the three out of four
 possible notifier types, that are:
diff --git a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index 1607d40..4783730 100644
--- a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 7 Jun 2013 22:37:06 +0200
 Subject: kernel/cpu: fix cpu down problem if kthread's cpu is going down
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 If kthread is pinned to CPUx and CPUx is going down then we get into
 trouble:
diff --git a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index 24eef39..dadc0da 100644
--- a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 14 Jun 2013 17:16:35 +0200
 Subject: kernel/hotplug: restore original cpu mask oncpu/down
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 If a task which is allowed to run only on CPU X puts CPU Y down then it
 will be allowed on all CPUs but the on CPU Y after it comes back from
diff --git a/debian/patches/features/all/rt/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch b/debian/patches/features/all/rt/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch
new file mode 100644
index 0000000..d63d69c
--- /dev/null
+++ b/debian/patches/features/all/rt/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch
@@ -0,0 +1,142 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Thu, 22 Jun 2017 17:53:34 +0200
+Subject: [PATCH] kernel/locking: use an exclusive wait_q for sleepers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+If a task is queued as a sleeper for a wakeup and never goes to
+schedule() (because it just obtained the lock) then it will receive a
+spurious wake up which is not "bad", it is considered. Until that wake
+up happens this task can no be enqueued for any wake ups handled by the
+WAKE_Q infrastructure (because a task can only be enqueued once). This
+wouldn't be bad if we would use the same wakeup mechanism for the wake
+up of sleepers as we do for "normal" wake ups. But we don't…
+
+So.
+   T1			T2		T3
+   spin_lock(x)				spin_unlock(x);
+   					wake_q_add_sleeper(q1, T1)
+   spin_unlock(x)
+   set_state(TASK_INTERRUPTIBLE)
+   if (!condition)
+	schedule()
+			condition = true
+			wake_q_add(q2, T1)
+	                // T1 not added, still enqueued
+			wake_up_q(q2)
+					wake_up_q_sleeper(q1)
+					// T1 not woken up, wrong task state
+
+In order to solve this race this patch adds a wake_q_node for the
+sleeper case.
+
+Reported-by: Mike Galbraith <efault at gmx.de>
+Cc: stable-rt at vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ include/linux/sched.h        |    1 +
+ include/linux/sched/wake_q.h |   16 ++++++++++++++--
+ kernel/fork.c                |    1 +
+ kernel/locking/rtmutex.c     |    2 +-
+ kernel/sched/core.c          |   21 ++++++++++++++++-----
+ 5 files changed, 33 insertions(+), 8 deletions(-)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -800,6 +800,7 @@ struct task_struct {
+ 	raw_spinlock_t			pi_lock;
+ 
+ 	struct wake_q_node		wake_q;
++	struct wake_q_node		wake_q_sleeper;
+ 
+ #ifdef CONFIG_RT_MUTEXES
+ 	/* PI waiters blocked on a rt_mutex held by this task: */
+--- a/include/linux/sched/wake_q.h
++++ b/include/linux/sched/wake_q.h
+@@ -46,8 +46,20 @@ static inline void wake_q_init(struct wa
+ 	head->lastp = &head->first;
+ }
+ 
+-extern void wake_q_add(struct wake_q_head *head,
+-		       struct task_struct *task);
++extern void __wake_q_add(struct wake_q_head *head,
++			 struct task_struct *task, bool sleeper);
++static inline void wake_q_add(struct wake_q_head *head,
++			      struct task_struct *task)
++{
++	__wake_q_add(head, task, false);
++}
++
++static inline void wake_q_add_sleeper(struct wake_q_head *head,
++				      struct task_struct *task)
++{
++	__wake_q_add(head, task, true);
++}
++
+ extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
+ static inline void wake_up_q(struct wake_q_head *head)
+ {
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -575,6 +575,7 @@ static struct task_struct *dup_task_stru
+ 	tsk->splice_pipe = NULL;
+ 	tsk->task_frag.page = NULL;
+ 	tsk->wake_q.next = NULL;
++	tsk->wake_q_sleeper.next = NULL;
+ 
+ 	account_kernel_stack(tsk, 1);
+ 
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1463,7 +1463,7 @@ static void mark_wakeup_next_waiter(stru
+ 	 */
+ 	preempt_disable();
+ 	if (waiter->savestate)
+-		wake_q_add(wake_sleeper_q, waiter->task);
++		wake_q_add_sleeper(wake_sleeper_q, waiter->task);
+ 	else
+ 		wake_q_add(wake_q, waiter->task);
+ 	raw_spin_unlock(&current->pi_lock);
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -437,9 +437,15 @@ static bool set_nr_if_polling(struct tas
+ #endif
+ #endif
+ 
+-void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++void __wake_q_add(struct wake_q_head *head, struct task_struct *task,
++		  bool sleeper)
+ {
+-	struct wake_q_node *node = &task->wake_q;
++	struct wake_q_node *node;
++
++	if (sleeper)
++		node = &task->wake_q_sleeper;
++	else
++		node = &task->wake_q;
+ 
+ 	/*
+ 	 * Atomically grab the task, if ->wake_q is !nil already it means
+@@ -468,12 +474,17 @@ void __wake_up_q(struct wake_q_head *hea
+ 	while (node != WAKE_Q_TAIL) {
+ 		struct task_struct *task;
+ 
+-		task = container_of(node, struct task_struct, wake_q);
++		if (sleeper)
++			task = container_of(node, struct task_struct, wake_q_sleeper);
++		else
++			task = container_of(node, struct task_struct, wake_q);
+ 		BUG_ON(!task);
+ 		/* Task can safely be re-inserted now: */
+ 		node = node->next;
+-		task->wake_q.next = NULL;
+-
++		if (sleeper)
++			task->wake_q_sleeper.next = NULL;
++		else
++			task->wake_q.next = NULL;
+ 		/*
+ 		 * wake_up_process() implies a wmb() to pair with the queueing
+ 		 * in wake_q_add() so as not to miss wakeups.
diff --git a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index 4714554..ef494dd 100644
--- a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 4 Feb 2016 16:38:10 +0100
 Subject: [PATCH] kernel/perf: mark perf_cpu_context's timer as irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Otherwise we get a WARN_ON() backtrace and some events are reported as
 "not counted".
diff --git a/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
index b6c3d9f..f7d50ca 100644
--- a/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
+++ b/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 19 May 2016 17:45:27 +0200
 Subject: [PATCH] kernel/printk: Don't try to print from IRQ/NMI region
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On -RT we try to acquire sleeping locks which might lead to warnings
 from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on
diff --git a/debian/patches/features/all/rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/debian/patches/features/all/rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index 74438bf..2f616dc 100644
--- a/debian/patches/features/all/rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/debian/patches/features/all/rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -1,11 +1,10 @@
-From 866f2c8a7f0eec01a72cceeb73bab62eb3624694 Mon Sep 17 00:00:00 2001
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 4 Apr 2017 12:50:16 +0200
 Subject: [PATCH] kernel: sched: Provide a pointer to the valid CPU mask
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed()
 wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not
diff --git a/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index 077bb65..c339a7f 100644
--- a/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 21 Nov 2016 19:31:08 +0100
 Subject: [PATCH] kernel/sched: move stack + kprobe clean up to
  __put_task_struct()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 There is no need to free the stack before the task struct. This also
 comes handy on -RT because we can't free memory in preempt disabled
diff --git a/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch b/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
index 300c0ff..4eb9bc1 100644
--- a/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
+++ b/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Tue, 9 Feb 2016 18:17:18 +0100
 Subject: kernel: softirq: unlock with irqs on
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 We unlock the lock while the interrupts are off. This isn't a problem
 now but will get because the migrate_disable() + enable are not
diff --git a/debian/patches/features/all/rt/kgb-serial-hackaround.patch b/debian/patches/features/all/rt/kgb-serial-hackaround.patch
index c41c38a..ad8e74b 100644
--- a/debian/patches/features/all/rt/kgb-serial-hackaround.patch
+++ b/debian/patches/features/all/rt/kgb-serial-hackaround.patch
@@ -1,7 +1,7 @@
 From: Jason Wessel <jason.wessel at windriver.com>
 Date: Thu, 28 Jul 2011 12:42:23 -0500
 Subject: kgdb/serial: Short term workaround
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On 07/27/2011 04:37 PM, Thomas Gleixner wrote:
 >  - KGDB (not yet disabled) is reportedly unusable on -rt right now due
diff --git a/debian/patches/features/all/rt/latency-hist.patch b/debian/patches/features/all/rt/latency-hist.patch
deleted file mode 100644
index 667a9f6..0000000
--- a/debian/patches/features/all/rt/latency-hist.patch
+++ /dev/null
@@ -1,1819 +0,0 @@
-Subject: tracing: Add latency histograms
-From: Carsten Emde <C.Emde at osadl.org>
-Date: Tue, 19 Jul 2011 14:03:41 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
-
-This patch provides a recording mechanism to store data of potential
-sources of system latencies. The recordings separately determine the
-latency caused by a delayed timer expiration, by a delayed wakeup of the
-related user space program and by the sum of both. The histograms can be
-enabled and reset individually. The data are accessible via the debug
-filesystem. For details please consult Documentation/trace/histograms.txt.
-
-Signed-off-by: Carsten Emde <C.Emde at osadl.org>
-Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
-
----
- Documentation/trace/histograms.txt  |  186 +++++
- include/linux/hrtimer.h             |    4 
- include/linux/sched.h               |    7 
- include/trace/events/hist.h         |   73 ++
- include/trace/events/latency_hist.h |   29 
- kernel/time/hrtimer.c               |   21 
- kernel/trace/Kconfig                |  104 +++
- kernel/trace/Makefile               |    4 
- kernel/trace/latency_hist.c         | 1178 ++++++++++++++++++++++++++++++++++++
- kernel/trace/trace_irqsoff.c        |   11 
- 10 files changed, 1616 insertions(+), 1 deletion(-)
-
---- /dev/null
-+++ b/Documentation/trace/histograms.txt
-@@ -0,0 +1,186 @@
-+		Using the Linux Kernel Latency Histograms
-+
-+
-+This document gives a short explanation how to enable, configure and use
-+latency histograms. Latency histograms are primarily relevant in the
-+context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
-+and are used in the quality management of the Linux real-time
-+capabilities.
-+
-+
-+* Purpose of latency histograms
-+
-+A latency histogram continuously accumulates the frequencies of latency
-+data. There are two types of histograms
-+- potential sources of latencies
-+- effective latencies
-+
-+
-+* Potential sources of latencies
-+
-+Potential sources of latencies are code segments where interrupts,
-+preemption or both are disabled (aka critical sections). To create
-+histograms of potential sources of latency, the kernel stores the time
-+stamp at the start of a critical section, determines the time elapsed
-+when the end of the section is reached, and increments the frequency
-+counter of that latency value - irrespective of whether any concurrently
-+running process is affected by latency or not.
-+- Configuration items (in the Kernel hacking/Tracers submenu)
-+  CONFIG_INTERRUPT_OFF_LATENCY
-+  CONFIG_PREEMPT_OFF_LATENCY
-+
-+
-+* Effective latencies
-+
-+Effective latencies are actually occuring during wakeup of a process. To
-+determine effective latencies, the kernel stores the time stamp when a
-+process is scheduled to be woken up, and determines the duration of the
-+wakeup time shortly before control is passed over to this process. Note
-+that the apparent latency in user space may be somewhat longer, since the
-+process may be interrupted after control is passed over to it but before
-+the execution in user space takes place. Simply measuring the interval
-+between enqueuing and wakeup may also not appropriate in cases when a
-+process is scheduled as a result of a timer expiration. The timer may have
-+missed its deadline, e.g. due to disabled interrupts, but this latency
-+would not be registered. Therefore, the offsets of missed timers are
-+recorded in a separate histogram. If both wakeup latency and missed timer
-+offsets are configured and enabled, a third histogram may be enabled that
-+records the overall latency as a sum of the timer latency, if any, and the
-+wakeup latency. This histogram is called "timerandwakeup".
-+- Configuration items (in the Kernel hacking/Tracers submenu)
-+  CONFIG_WAKEUP_LATENCY
-+  CONFIG_MISSED_TIMER_OFSETS
-+
-+
-+* Usage
-+
-+The interface to the administration of the latency histograms is located
-+in the debugfs file system. To mount it, either enter
-+
-+mount -t sysfs nodev /sys
-+mount -t debugfs nodev /sys/kernel/debug
-+
-+from shell command line level, or add
-+
-+nodev	/sys			sysfs	defaults	0 0
-+nodev	/sys/kernel/debug	debugfs	defaults	0 0
-+
-+to the file /etc/fstab. All latency histogram related files are then
-+available in the directory /sys/kernel/debug/tracing/latency_hist. A
-+particular histogram type is enabled by writing non-zero to the related
-+variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
-+Select "preemptirqsoff" for the histograms of potential sources of
-+latencies and "wakeup" for histograms of effective latencies etc. The
-+histogram data - one per CPU - are available in the files
-+
-+/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
-+/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
-+/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
-+/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
-+
-+The histograms are reset by writing non-zero to the file "reset" in a
-+particular latency directory. To reset all latency data, use
-+
-+#!/bin/sh
-+
-+TRACINGDIR=/sys/kernel/debug/tracing
-+HISTDIR=$TRACINGDIR/latency_hist
-+
-+if test -d $HISTDIR
-+then
-+  cd $HISTDIR
-+  for i in `find . | grep /reset$`
-+  do
-+    echo 1 >$i
-+  done
-+fi
-+
-+
-+* Data format
-+
-+Latency data are stored with a resolution of one microsecond. The
-+maximum latency is 10,240 microseconds. The data are only valid, if the
-+overflow register is empty. Every output line contains the latency in
-+microseconds in the first row and the number of samples in the second
-+row. To display only lines with a positive latency count, use, for
-+example,
-+
-+grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
-+
-+#Minimum latency: 0 microseconds.
-+#Average latency: 0 microseconds.
-+#Maximum latency: 25 microseconds.
-+#Total samples: 3104770694
-+#There are 0 samples greater or equal than 10240 microseconds
-+#usecs	         samples
-+    0	      2984486876
-+    1	        49843506
-+    2	        58219047
-+    3	         5348126
-+    4	         2187960
-+    5	         3388262
-+    6	          959289
-+    7	          208294
-+    8	           40420
-+    9	            4485
-+   10	           14918
-+   11	           18340
-+   12	           25052
-+   13	           19455
-+   14	            5602
-+   15	             969
-+   16	              47
-+   17	              18
-+   18	              14
-+   19	               1
-+   20	               3
-+   21	               2
-+   22	               5
-+   23	               2
-+   25	               1
-+
-+
-+* Wakeup latency of a selected process
-+
-+To only collect wakeup latency data of a particular process, write the
-+PID of the requested process to
-+
-+/sys/kernel/debug/tracing/latency_hist/wakeup/pid
-+
-+PIDs are not considered, if this variable is set to 0.
-+
-+
-+* Details of the process with the highest wakeup latency so far
-+
-+Selected data of the process that suffered from the highest wakeup
-+latency that occurred in a particular CPU are available in the file
-+
-+/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
-+
-+In addition, other relevant system data at the time when the
-+latency occurred are given.
-+
-+The format of the data is (all in one line):
-+<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
-+<- <PID> <Priority> <Command> <Timestamp>
-+
-+The value of <Timeroffset> is only relevant in the combined timer
-+and wakeup latency recording. In the wakeup recording, it is
-+always 0, in the missed_timer_offsets recording, it is the same
-+as <Latency>.
-+
-+When retrospectively searching for the origin of a latency and
-+tracing was not enabled, it may be helpful to know the name and
-+some basic data of the task that (finally) was switching to the
-+late real-tlme task. In addition to the victim's data, also the
-+data of the possible culprit are therefore displayed after the
-+"<-" symbol.
-+
-+Finally, the timestamp of the time when the latency occurred
-+in <seconds>.<microseconds> after the most recent system boot
-+is provided.
-+
-+These data are also reset when the wakeup histogram is reset.
---- a/include/linux/hrtimer.h
-+++ b/include/linux/hrtimer.h
-@@ -86,6 +86,7 @@ enum hrtimer_restart {
-  * @function:	timer expiry callback function
-  * @base:	pointer to the timer base (per cpu and per clock)
-  * @state:	state information (See bit values above)
-+ * @praecox:	timer expiry time if expired at the time of programming
-  * @is_rel:	Set if the timer was armed relative
-  *
-  * The hrtimer structure must be initialized by hrtimer_init()
-@@ -96,6 +97,9 @@ struct hrtimer {
- 	enum hrtimer_restart		(*function)(struct hrtimer *);
- 	struct hrtimer_clock_base	*base;
- 	u8				state;
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+	ktime_t				praecox;
-+#endif
- 	u8				is_rel;
- };
- 
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1009,7 +1009,12 @@ struct task_struct {
- 	/* Bitmask and counter of trace recursion: */
- 	unsigned long			trace_recursion;
- #endif /* CONFIG_TRACING */
--
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+	u64 preempt_timestamp_hist;
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+	long timer_offset;
-+#endif
-+#endif
- #ifdef CONFIG_KCOV
- 	/* Coverage collection mode enabled for this task (0 if disabled): */
- 	enum kcov_mode			kcov_mode;
---- /dev/null
-+++ b/include/trace/events/hist.h
-@@ -0,0 +1,73 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM hist
-+
-+#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_HIST_H
-+
-+#include "latency_hist.h"
-+#include <linux/tracepoint.h>
-+
-+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
-+#define trace_preemptirqsoff_hist(a, b)
-+#define trace_preemptirqsoff_hist_rcuidle(a, b)
-+#else
-+TRACE_EVENT(preemptirqsoff_hist,
-+
-+	TP_PROTO(int reason, int starthist),
-+
-+	TP_ARGS(reason, starthist),
-+
-+	TP_STRUCT__entry(
-+		__field(int,	reason)
-+		__field(int,	starthist)
-+	),
-+
-+	TP_fast_assign(
-+		__entry->reason		= reason;
-+		__entry->starthist	= starthist;
-+	),
-+
-+	TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
-+		  __entry->starthist ? "start" : "stop")
-+);
-+#endif
-+
-+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+#define trace_hrtimer_interrupt(a, b, c, d)
-+#else
-+TRACE_EVENT(hrtimer_interrupt,
-+
-+	TP_PROTO(int cpu, long long offset, struct task_struct *curr,
-+		struct task_struct *task),
-+
-+	TP_ARGS(cpu, offset, curr, task),
-+
-+	TP_STRUCT__entry(
-+		__field(int,		cpu)
-+		__field(long long,	offset)
-+		__array(char,		ccomm,	TASK_COMM_LEN)
-+		__field(int,		cprio)
-+		__array(char,		tcomm,	TASK_COMM_LEN)
-+		__field(int,		tprio)
-+	),
-+
-+	TP_fast_assign(
-+		__entry->cpu	= cpu;
-+		__entry->offset	= offset;
-+		memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
-+		__entry->cprio  = curr->prio;
-+		memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
-+			task != NULL ? TASK_COMM_LEN : 7);
-+		__entry->tprio  = task != NULL ? task->prio : -1;
-+	),
-+
-+	TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
-+		__entry->cpu, __entry->offset, __entry->ccomm,
-+		__entry->cprio, __entry->tcomm, __entry->tprio)
-+);
-+#endif
-+
-+#endif /* _TRACE_HIST_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
---- /dev/null
-+++ b/include/trace/events/latency_hist.h
-@@ -0,0 +1,29 @@
-+#ifndef _LATENCY_HIST_H
-+#define _LATENCY_HIST_H
-+
-+enum hist_action {
-+	IRQS_ON,
-+	PREEMPT_ON,
-+	TRACE_STOP,
-+	IRQS_OFF,
-+	PREEMPT_OFF,
-+	TRACE_START,
-+};
-+
-+static char *actions[] = {
-+	"IRQS_ON",
-+	"PREEMPT_ON",
-+	"TRACE_STOP",
-+	"IRQS_OFF",
-+	"PREEMPT_OFF",
-+	"TRACE_START",
-+};
-+
-+static inline char *getaction(int action)
-+{
-+	if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
-+		return actions[action];
-+	return "unknown";
-+}
-+
-+#endif /* _LATENCY_HIST_H */
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -50,6 +50,7 @@
- #include <linux/sched/nohz.h>
- #include <linux/sched/debug.h>
- #include <linux/timer.h>
-+#include <trace/events/hist.h>
- #include <linux/freezer.h>
- 
- #include <linux/uaccess.h>
-@@ -960,7 +961,16 @@ void hrtimer_start_range_ns(struct hrtim
- 
- 	/* Switch the timer base, if necessary: */
- 	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+	{
-+		ktime_t now = new_base->get_time();
- 
-+		if (ktime_to_ns(tim) < ktime_to_ns(now))
-+			timer->praecox = now;
-+		else
-+			timer->praecox = ktime_set(0, 0);
-+	}
-+#endif
- 	leftmost = enqueue_hrtimer(timer, new_base);
- 	if (!leftmost)
- 		goto unlock;
-@@ -1239,6 +1249,8 @@ static void __run_hrtimer(struct hrtimer
- 	cpu_base->running = NULL;
- }
- 
-+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
-+
- static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
- {
- 	struct hrtimer_clock_base *base = cpu_base->clock_base;
-@@ -1258,6 +1270,15 @@ static void __hrtimer_run_queues(struct
- 
- 			timer = container_of(node, struct hrtimer, node);
- 
-+			trace_hrtimer_interrupt(raw_smp_processor_id(),
-+			    ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
-+				timer->praecox : hrtimer_get_expires(timer),
-+				basenow)),
-+			    current,
-+			    timer->function == hrtimer_wakeup ?
-+			    container_of(timer, struct hrtimer_sleeper,
-+				timer)->task : NULL);
-+
- 			/*
- 			 * The immediate goal for using the softexpires is
- 			 * minimizing wakeups, not running timers at the
---- a/kernel/trace/Kconfig
-+++ b/kernel/trace/Kconfig
-@@ -184,6 +184,24 @@ config IRQSOFF_TRACER
- 	  enabled. This option and the preempt-off timing option can be
- 	  used together or separately.)
- 
-+config INTERRUPT_OFF_HIST
-+	bool "Interrupts-off Latency Histogram"
-+	depends on IRQSOFF_TRACER
-+	help
-+	  This option generates continuously updated histograms (one per cpu)
-+	  of the duration of time periods with interrupts disabled. The
-+	  histograms are disabled by default. To enable them, write a non-zero
-+	  number to
-+
-+	      /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
-+
-+	  If PREEMPT_OFF_HIST is also selected, additional histograms (one
-+	  per cpu) are generated that accumulate the duration of time periods
-+	  when both interrupts and preemption are disabled. The histogram data
-+	  will be located in the debug file system at
-+
-+	      /sys/kernel/debug/tracing/latency_hist/irqsoff
-+
- config PREEMPT_TRACER
- 	bool "Preemption-off Latency Tracer"
- 	default n
-@@ -208,6 +226,24 @@ config PREEMPT_TRACER
- 	  enabled. This option and the irqs-off timing option can be
- 	  used together or separately.)
- 
-+config PREEMPT_OFF_HIST
-+	bool "Preemption-off Latency Histogram"
-+	depends on PREEMPT_TRACER
-+	help
-+	  This option generates continuously updated histograms (one per cpu)
-+	  of the duration of time periods with preemption disabled. The
-+	  histograms are disabled by default. To enable them, write a non-zero
-+	  number to
-+
-+	      /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
-+
-+	  If INTERRUPT_OFF_HIST is also selected, additional histograms (one
-+	  per cpu) are generated that accumulate the duration of time periods
-+	  when both interrupts and preemption are disabled. The histogram data
-+	  will be located in the debug file system at
-+
-+	      /sys/kernel/debug/tracing/latency_hist/preemptoff
-+
- config SCHED_TRACER
- 	bool "Scheduling Latency Tracer"
- 	select GENERIC_TRACER
-@@ -253,6 +289,74 @@ config HWLAT_TRACER
- 	 file. Every time a latency is greater than tracing_thresh, it will
- 	 be recorded into the ring buffer.
- 
-+config WAKEUP_LATENCY_HIST
-+	bool "Scheduling Latency Histogram"
-+	depends on SCHED_TRACER
-+	help
-+	  This option generates continuously updated histograms (one per cpu)
-+	  of the scheduling latency of the highest priority task.
-+	  The histograms are disabled by default. To enable them, write a
-+	  non-zero number to
-+
-+	      /sys/kernel/debug/tracing/latency_hist/enable/wakeup
-+
-+	  Two different algorithms are used, one to determine the latency of
-+	  processes that exclusively use the highest priority of the system and
-+	  another one to determine the latency of processes that share the
-+	  highest system priority with other processes. The former is used to
-+	  improve hardware and system software, the latter to optimize the
-+	  priority design of a given system. The histogram data will be
-+	  located in the debug file system at
-+
-+	      /sys/kernel/debug/tracing/latency_hist/wakeup
-+
-+	  and
-+
-+	      /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
-+
-+	  If both Scheduling Latency Histogram and Missed Timer Offsets
-+	  Histogram are selected, additional histogram data will be collected
-+	  that contain, in addition to the wakeup latency, the timer latency, in
-+	  case the wakeup was triggered by an expired timer. These histograms
-+	  are available in the
-+
-+	      /sys/kernel/debug/tracing/latency_hist/timerandwakeup
-+
-+	  directory. They reflect the apparent interrupt and scheduling latency
-+	  and are best suitable to determine the worst-case latency of a given
-+	  system. To enable these histograms, write a non-zero number to
-+
-+	      /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
-+
-+config MISSED_TIMER_OFFSETS_HIST
-+	depends on HIGH_RES_TIMERS
-+	select GENERIC_TRACER
-+	bool "Missed Timer Offsets Histogram"
-+	help
-+	  Generate a histogram of missed timer offsets in microseconds. The
-+	  histograms are disabled by default. To enable them, write a non-zero
-+	  number to
-+
-+	      /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
-+
-+	  The histogram data will be located in the debug file system at
-+
-+	      /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
-+
-+	  If both Scheduling Latency Histogram and Missed Timer Offsets
-+	  Histogram are selected, additional histogram data will be collected
-+	  that contain, in addition to the wakeup latency, the timer latency, in
-+	  case the wakeup was triggered by an expired timer. These histograms
-+	  are available in the
-+
-+	      /sys/kernel/debug/tracing/latency_hist/timerandwakeup
-+
-+	  directory. They reflect the apparent interrupt and scheduling latency
-+	  and are best suitable to determine the worst-case latency of a given
-+	  system. To enable these histograms, write a non-zero number to
-+
-+	      /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
-+
- config ENABLE_DEFAULT_TRACERS
- 	bool "Trace process context switches and events"
- 	depends on !GENERIC_TRACER
---- a/kernel/trace/Makefile
-+++ b/kernel/trace/Makefile
-@@ -38,6 +38,10 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_ir
- obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
- obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
- obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
-+obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
-+obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
-+obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
-+obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
- obj-$(CONFIG_NOP_TRACER) += trace_nop.o
- obj-$(CONFIG_STACK_TRACER) += trace_stack.o
- obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
---- /dev/null
-+++ b/kernel/trace/latency_hist.c
-@@ -0,0 +1,1178 @@
-+/*
-+ * kernel/trace/latency_hist.c
-+ *
-+ * Add support for histograms of preemption-off latency and
-+ * interrupt-off latency and wakeup latency, it depends on
-+ * Real-Time Preemption Support.
-+ *
-+ *  Copyright (C) 2005 MontaVista Software, Inc.
-+ *  Yi Yang <yyang at ch.mvista.com>
-+ *
-+ *  Converted to work with the new latency tracer.
-+ *  Copyright (C) 2008 Red Hat, Inc.
-+ *    Steven Rostedt <srostedt at redhat.com>
-+ *
-+ */
-+#include <linux/module.h>
-+#include <linux/debugfs.h>
-+#include <linux/seq_file.h>
-+#include <linux/percpu.h>
-+#include <linux/kallsyms.h>
-+#include <linux/uaccess.h>
-+#include <linux/sched.h>
-+#include <linux/sched/rt.h>
-+#include <linux/slab.h>
-+#include <linux/atomic.h>
-+#include <asm/div64.h>
-+
-+#include "trace.h"
-+#include <trace/events/sched.h>
-+
-+#define NSECS_PER_USECS 1000L
-+
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/hist.h>
-+
-+enum {
-+	IRQSOFF_LATENCY = 0,
-+	PREEMPTOFF_LATENCY,
-+	PREEMPTIRQSOFF_LATENCY,
-+	WAKEUP_LATENCY,
-+	WAKEUP_LATENCY_SHAREDPRIO,
-+	MISSED_TIMER_OFFSETS,
-+	TIMERANDWAKEUP_LATENCY,
-+	MAX_LATENCY_TYPE,
-+};
-+
-+#define MAX_ENTRY_NUM 10240
-+
-+struct hist_data {
-+	atomic_t hist_mode; /* 0 log, 1 don't log */
-+	long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
-+	long min_lat;
-+	long max_lat;
-+	unsigned long long below_hist_bound_samples;
-+	unsigned long long above_hist_bound_samples;
-+	long long accumulate_lat;
-+	unsigned long long total_samples;
-+	unsigned long long hist_array[MAX_ENTRY_NUM];
-+};
-+
-+struct enable_data {
-+	int latency_type;
-+	int enabled;
-+};
-+
-+static char *latency_hist_dir_root = "latency_hist";
-+
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
-+static char *irqsoff_hist_dir = "irqsoff";
-+static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
-+static DEFINE_PER_CPU(int, hist_irqsoff_counting);
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
-+static char *preemptoff_hist_dir = "preemptoff";
-+static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
-+static DEFINE_PER_CPU(int, hist_preemptoff_counting);
-+#endif
-+
-+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
-+static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
-+static char *preemptirqsoff_hist_dir = "preemptirqsoff";
-+static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
-+static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
-+#endif
-+
-+#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
-+static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
-+static struct enable_data preemptirqsoff_enabled_data = {
-+	.latency_type = PREEMPTIRQSOFF_LATENCY,
-+	.enabled = 0,
-+};
-+#endif
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+struct maxlatproc_data {
-+	char comm[FIELD_SIZEOF(struct task_struct, comm)];
-+	char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
-+	int pid;
-+	int current_pid;
-+	int prio;
-+	int current_prio;
-+	long latency;
-+	long timeroffset;
-+	u64 timestamp;
-+};
-+#endif
-+
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
-+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
-+static char *wakeup_latency_hist_dir = "wakeup";
-+static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
-+static notrace void probe_wakeup_latency_hist_start(void *v,
-+	struct task_struct *p, int success);
-+static notrace void probe_wakeup_latency_hist_stop(void *v,
-+	struct task_struct *prev, struct task_struct *next);
-+static notrace void probe_sched_migrate_task(void *,
-+	struct task_struct *task, int cpu);
-+static struct enable_data wakeup_latency_enabled_data = {
-+	.latency_type = WAKEUP_LATENCY,
-+	.enabled = 0,
-+};
-+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
-+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
-+static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
-+static DEFINE_PER_CPU(int, wakeup_sharedprio);
-+static unsigned long wakeup_pid;
-+#endif
-+
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
-+static char *missed_timer_offsets_dir = "missed_timer_offsets";
-+static notrace void probe_hrtimer_interrupt(void *v, int cpu,
-+	long long offset, struct task_struct *curr, struct task_struct *task);
-+static struct enable_data missed_timer_offsets_enabled_data = {
-+	.latency_type = MISSED_TIMER_OFFSETS,
-+	.enabled = 0,
-+};
-+static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
-+static unsigned long missed_timer_offsets_pid;
-+#endif
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
-+static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
-+static struct enable_data timerandwakeup_enabled_data = {
-+	.latency_type = TIMERANDWAKEUP_LATENCY,
-+	.enabled = 0,
-+};
-+static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
-+#endif
-+
-+void notrace latency_hist(int latency_type, int cpu, long latency,
-+			  long timeroffset, u64 stop,
-+			  struct task_struct *p)
-+{
-+	struct hist_data *my_hist;
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+	struct maxlatproc_data *mp = NULL;
-+#endif
-+
-+	if (!cpu_possible(cpu) || latency_type < 0 ||
-+	    latency_type >= MAX_LATENCY_TYPE)
-+		return;
-+
-+	switch (latency_type) {
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+	case IRQSOFF_LATENCY:
-+		my_hist = &per_cpu(irqsoff_hist, cpu);
-+		break;
-+#endif
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+	case PREEMPTOFF_LATENCY:
-+		my_hist = &per_cpu(preemptoff_hist, cpu);
-+		break;
-+#endif
-+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
-+	case PREEMPTIRQSOFF_LATENCY:
-+		my_hist = &per_cpu(preemptirqsoff_hist, cpu);
-+		break;
-+#endif
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+	case WAKEUP_LATENCY:
-+		my_hist = &per_cpu(wakeup_latency_hist, cpu);
-+		mp = &per_cpu(wakeup_maxlatproc, cpu);
-+		break;
-+	case WAKEUP_LATENCY_SHAREDPRIO:
-+		my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
-+		mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
-+		break;
-+#endif
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+	case MISSED_TIMER_OFFSETS:
-+		my_hist = &per_cpu(missed_timer_offsets, cpu);
-+		mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
-+		break;
-+#endif
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+	case TIMERANDWAKEUP_LATENCY:
-+		my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
-+		mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
-+		break;
-+#endif
-+
-+	default:
-+		return;
-+	}
-+
-+	latency += my_hist->offset;
-+
-+	if (atomic_read(&my_hist->hist_mode) == 0)
-+		return;
-+
-+	if (latency < 0 || latency >= MAX_ENTRY_NUM) {
-+		if (latency < 0)
-+			my_hist->below_hist_bound_samples++;
-+		else
-+			my_hist->above_hist_bound_samples++;
-+	} else
-+		my_hist->hist_array[latency]++;
-+
-+	if (unlikely(latency > my_hist->max_lat ||
-+	    my_hist->min_lat == LONG_MAX)) {
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+		if (latency_type == WAKEUP_LATENCY ||
-+		    latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
-+		    latency_type == MISSED_TIMER_OFFSETS ||
-+		    latency_type == TIMERANDWAKEUP_LATENCY) {
-+			strncpy(mp->comm, p->comm, sizeof(mp->comm));
-+			strncpy(mp->current_comm, current->comm,
-+			    sizeof(mp->current_comm));
-+			mp->pid = task_pid_nr(p);
-+			mp->current_pid = task_pid_nr(current);
-+			mp->prio = p->prio;
-+			mp->current_prio = current->prio;
-+			mp->latency = latency;
-+			mp->timeroffset = timeroffset;
-+			mp->timestamp = stop;
-+		}
-+#endif
-+		my_hist->max_lat = latency;
-+	}
-+	if (unlikely(latency < my_hist->min_lat))
-+		my_hist->min_lat = latency;
-+	my_hist->total_samples++;
-+	my_hist->accumulate_lat += latency;
-+}
-+
-+static void *l_start(struct seq_file *m, loff_t *pos)
-+{
-+	loff_t *index_ptr = NULL;
-+	loff_t index = *pos;
-+	struct hist_data *my_hist = m->private;
-+
-+	if (index == 0) {
-+		char minstr[32], avgstr[32], maxstr[32];
-+
-+		atomic_dec(&my_hist->hist_mode);
-+
-+		if (likely(my_hist->total_samples)) {
-+			long avg = (long) div64_s64(my_hist->accumulate_lat,
-+			    my_hist->total_samples);
-+			snprintf(minstr, sizeof(minstr), "%ld",
-+			    my_hist->min_lat - my_hist->offset);
-+			snprintf(avgstr, sizeof(avgstr), "%ld",
-+			    avg - my_hist->offset);
-+			snprintf(maxstr, sizeof(maxstr), "%ld",
-+			    my_hist->max_lat - my_hist->offset);
-+		} else {
-+			strcpy(minstr, "<undef>");
-+			strcpy(avgstr, minstr);
-+			strcpy(maxstr, minstr);
-+		}
-+
-+		seq_printf(m, "#Minimum latency: %s microseconds\n"
-+			   "#Average latency: %s microseconds\n"
-+			   "#Maximum latency: %s microseconds\n"
-+			   "#Total samples: %llu\n"
-+			   "#There are %llu samples lower than %ld"
-+			   " microseconds.\n"
-+			   "#There are %llu samples greater or equal"
-+			   " than %ld microseconds.\n"
-+			   "#usecs\t%16s\n",
-+			   minstr, avgstr, maxstr,
-+			   my_hist->total_samples,
-+			   my_hist->below_hist_bound_samples,
-+			   -my_hist->offset,
-+			   my_hist->above_hist_bound_samples,
-+			   MAX_ENTRY_NUM - my_hist->offset,
-+			   "samples");
-+	}
-+	if (index < MAX_ENTRY_NUM) {
-+		index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
-+		if (index_ptr)
-+			*index_ptr = index;
-+	}
-+
-+	return index_ptr;
-+}
-+
-+static void *l_next(struct seq_file *m, void *p, loff_t *pos)
-+{
-+	loff_t *index_ptr = p;
-+	struct hist_data *my_hist = m->private;
-+
-+	if (++*pos >= MAX_ENTRY_NUM) {
-+		atomic_inc(&my_hist->hist_mode);
-+		return NULL;
-+	}
-+	*index_ptr = *pos;
-+	return index_ptr;
-+}
-+
-+static void l_stop(struct seq_file *m, void *p)
-+{
-+	kfree(p);
-+}
-+
-+static int l_show(struct seq_file *m, void *p)
-+{
-+	int index = *(loff_t *) p;
-+	struct hist_data *my_hist = m->private;
-+
-+	seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
-+	    my_hist->hist_array[index]);
-+	return 0;
-+}
-+
-+static const struct seq_operations latency_hist_seq_op = {
-+	.start = l_start,
-+	.next  = l_next,
-+	.stop  = l_stop,
-+	.show  = l_show
-+};
-+
-+static int latency_hist_open(struct inode *inode, struct file *file)
-+{
-+	int ret;
-+
-+	ret = seq_open(file, &latency_hist_seq_op);
-+	if (!ret) {
-+		struct seq_file *seq = file->private_data;
-+		seq->private = inode->i_private;
-+	}
-+	return ret;
-+}
-+
-+static const struct file_operations latency_hist_fops = {
-+	.open = latency_hist_open,
-+	.read = seq_read,
-+	.llseek = seq_lseek,
-+	.release = seq_release,
-+};
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static void clear_maxlatprocdata(struct maxlatproc_data *mp)
-+{
-+	mp->comm[0] = mp->current_comm[0] = '\0';
-+	mp->prio = mp->current_prio = mp->pid = mp->current_pid =
-+	    mp->latency = mp->timeroffset = -1;
-+	mp->timestamp = 0;
-+}
-+#endif
-+
-+static void hist_reset(struct hist_data *hist)
-+{
-+	atomic_dec(&hist->hist_mode);
-+
-+	memset(hist->hist_array, 0, sizeof(hist->hist_array));
-+	hist->below_hist_bound_samples = 0ULL;
-+	hist->above_hist_bound_samples = 0ULL;
-+	hist->min_lat = LONG_MAX;
-+	hist->max_lat = LONG_MIN;
-+	hist->total_samples = 0ULL;
-+	hist->accumulate_lat = 0LL;
-+
-+	atomic_inc(&hist->hist_mode);
-+}
-+
-+static ssize_t
-+latency_hist_reset(struct file *file, const char __user *a,
-+		   size_t size, loff_t *off)
-+{
-+	int cpu;
-+	struct hist_data *hist = NULL;
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+	struct maxlatproc_data *mp = NULL;
-+#endif
-+	off_t latency_type = (off_t) file->private_data;
-+
-+	for_each_online_cpu(cpu) {
-+
-+		switch (latency_type) {
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+		case PREEMPTOFF_LATENCY:
-+			hist = &per_cpu(preemptoff_hist, cpu);
-+			break;
-+#endif
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+		case IRQSOFF_LATENCY:
-+			hist = &per_cpu(irqsoff_hist, cpu);
-+			break;
-+#endif
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+		case PREEMPTIRQSOFF_LATENCY:
-+			hist = &per_cpu(preemptirqsoff_hist, cpu);
-+			break;
-+#endif
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+		case WAKEUP_LATENCY:
-+			hist = &per_cpu(wakeup_latency_hist, cpu);
-+			mp = &per_cpu(wakeup_maxlatproc, cpu);
-+			break;
-+		case WAKEUP_LATENCY_SHAREDPRIO:
-+			hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
-+			mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
-+			break;
-+#endif
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+		case MISSED_TIMER_OFFSETS:
-+			hist = &per_cpu(missed_timer_offsets, cpu);
-+			mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
-+			break;
-+#endif
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+		case TIMERANDWAKEUP_LATENCY:
-+			hist = &per_cpu(timerandwakeup_latency_hist, cpu);
-+			mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
-+			break;
-+#endif
-+		}
-+
-+		hist_reset(hist);
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+		if (latency_type == WAKEUP_LATENCY ||
-+		    latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
-+		    latency_type == MISSED_TIMER_OFFSETS ||
-+		    latency_type == TIMERANDWAKEUP_LATENCY)
-+			clear_maxlatprocdata(mp);
-+#endif
-+	}
-+
-+	return size;
-+}
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static ssize_t
-+show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-+{
-+	char buf[64];
-+	int r;
-+	unsigned long *this_pid = file->private_data;
-+
-+	r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
-+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-+}
-+
-+static ssize_t do_pid(struct file *file, const char __user *ubuf,
-+		      size_t cnt, loff_t *ppos)
-+{
-+	char buf[64];
-+	unsigned long pid;
-+	unsigned long *this_pid = file->private_data;
-+
-+	if (cnt >= sizeof(buf))
-+		return -EINVAL;
-+
-+	if (copy_from_user(&buf, ubuf, cnt))
-+		return -EFAULT;
-+
-+	buf[cnt] = '\0';
-+
-+	if (kstrtoul(buf, 10, &pid))
-+		return -EINVAL;
-+
-+	*this_pid = pid;
-+
-+	return cnt;
-+}
-+#endif
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static ssize_t
-+show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-+{
-+	int r;
-+	struct maxlatproc_data *mp = file->private_data;
-+	int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
-+	unsigned long long t;
-+	unsigned long usecs, secs;
-+	char *buf;
-+
-+	if (mp->pid == -1 || mp->current_pid == -1) {
-+		buf = "(none)\n";
-+		return simple_read_from_buffer(ubuf, cnt, ppos, buf,
-+		    strlen(buf));
-+	}
-+
-+	buf = kmalloc(strmaxlen, GFP_KERNEL);
-+	if (buf == NULL)
-+		return -ENOMEM;
-+
-+	t = ns2usecs(mp->timestamp);
-+	usecs = do_div(t, USEC_PER_SEC);
-+	secs = (unsigned long) t;
-+	r = snprintf(buf, strmaxlen,
-+	    "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
-+	    MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
-+	    mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
-+	    secs, usecs);
-+	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-+	kfree(buf);
-+	return r;
-+}
-+#endif
-+
-+static ssize_t
-+show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-+{
-+	char buf[64];
-+	struct enable_data *ed = file->private_data;
-+	int r;
-+
-+	r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
-+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-+}
-+
-+static ssize_t
-+do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
-+{
-+	char buf[64];
-+	long enable;
-+	struct enable_data *ed = file->private_data;
-+
-+	if (cnt >= sizeof(buf))
-+		return -EINVAL;
-+
-+	if (copy_from_user(&buf, ubuf, cnt))
-+		return -EFAULT;
-+
-+	buf[cnt] = 0;
-+
-+	if (kstrtoul(buf, 10, &enable))
-+		return -EINVAL;
-+
-+	if ((enable && ed->enabled) || (!enable && !ed->enabled))
-+		return cnt;
-+
-+	if (enable) {
-+		int ret;
-+
-+		switch (ed->latency_type) {
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-+		case PREEMPTIRQSOFF_LATENCY:
-+			ret = register_trace_preemptirqsoff_hist(
-+			    probe_preemptirqsoff_hist, NULL);
-+			if (ret) {
-+				pr_info("wakeup trace: Couldn't assign "
-+				    "probe_preemptirqsoff_hist "
-+				    "to trace_preemptirqsoff_hist\n");
-+				return ret;
-+			}
-+			break;
-+#endif
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+		case WAKEUP_LATENCY:
-+			ret = register_trace_sched_wakeup(
-+			    probe_wakeup_latency_hist_start, NULL);
-+			if (ret) {
-+				pr_info("wakeup trace: Couldn't assign "
-+				    "probe_wakeup_latency_hist_start "
-+				    "to trace_sched_wakeup\n");
-+				return ret;
-+			}
-+			ret = register_trace_sched_wakeup_new(
-+			    probe_wakeup_latency_hist_start, NULL);
-+			if (ret) {
-+				pr_info("wakeup trace: Couldn't assign "
-+				    "probe_wakeup_latency_hist_start "
-+				    "to trace_sched_wakeup_new\n");
-+				unregister_trace_sched_wakeup(
-+				    probe_wakeup_latency_hist_start, NULL);
-+				return ret;
-+			}
-+			ret = register_trace_sched_switch(
-+			    probe_wakeup_latency_hist_stop, NULL);
-+			if (ret) {
-+				pr_info("wakeup trace: Couldn't assign "
-+				    "probe_wakeup_latency_hist_stop "
-+				    "to trace_sched_switch\n");
-+				unregister_trace_sched_wakeup(
-+				    probe_wakeup_latency_hist_start, NULL);
-+				unregister_trace_sched_wakeup_new(
-+				    probe_wakeup_latency_hist_start, NULL);
-+				return ret;
-+			}
-+			ret = register_trace_sched_migrate_task(
-+			    probe_sched_migrate_task, NULL);
-+			if (ret) {
-+				pr_info("wakeup trace: Couldn't assign "
-+				    "probe_sched_migrate_task "
-+				    "to trace_sched_migrate_task\n");
-+				unregister_trace_sched_wakeup(
-+				    probe_wakeup_latency_hist_start, NULL);
-+				unregister_trace_sched_wakeup_new(
-+				    probe_wakeup_latency_hist_start, NULL);
-+				unregister_trace_sched_switch(
-+				    probe_wakeup_latency_hist_stop, NULL);
-+				return ret;
-+			}
-+			break;
-+#endif
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+		case MISSED_TIMER_OFFSETS:
-+			ret = register_trace_hrtimer_interrupt(
-+			    probe_hrtimer_interrupt, NULL);
-+			if (ret) {
-+				pr_info("wakeup trace: Couldn't assign "
-+				    "probe_hrtimer_interrupt "
-+				    "to trace_hrtimer_interrupt\n");
-+				return ret;
-+			}
-+			break;
-+#endif
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+		case TIMERANDWAKEUP_LATENCY:
-+			if (!wakeup_latency_enabled_data.enabled ||
-+			    !missed_timer_offsets_enabled_data.enabled)
-+				return -EINVAL;
-+			break;
-+#endif
-+		default:
-+			break;
-+		}
-+	} else {
-+		switch (ed->latency_type) {
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-+		case PREEMPTIRQSOFF_LATENCY:
-+			{
-+				int cpu;
-+
-+				unregister_trace_preemptirqsoff_hist(
-+				    probe_preemptirqsoff_hist, NULL);
-+				for_each_online_cpu(cpu) {
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+					per_cpu(hist_irqsoff_counting,
-+					    cpu) = 0;
-+#endif
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+					per_cpu(hist_preemptoff_counting,
-+					    cpu) = 0;
-+#endif
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+					per_cpu(hist_preemptirqsoff_counting,
-+					    cpu) = 0;
-+#endif
-+				}
-+			}
-+			break;
-+#endif
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+		case WAKEUP_LATENCY:
-+			{
-+				int cpu;
-+
-+				unregister_trace_sched_wakeup(
-+				    probe_wakeup_latency_hist_start, NULL);
-+				unregister_trace_sched_wakeup_new(
-+				    probe_wakeup_latency_hist_start, NULL);
-+				unregister_trace_sched_switch(
-+				    probe_wakeup_latency_hist_stop, NULL);
-+				unregister_trace_sched_migrate_task(
-+				    probe_sched_migrate_task, NULL);
-+
-+				for_each_online_cpu(cpu) {
-+					per_cpu(wakeup_task, cpu) = NULL;
-+					per_cpu(wakeup_sharedprio, cpu) = 0;
-+				}
-+			}
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+			timerandwakeup_enabled_data.enabled = 0;
-+#endif
-+			break;
-+#endif
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+		case MISSED_TIMER_OFFSETS:
-+			unregister_trace_hrtimer_interrupt(
-+			    probe_hrtimer_interrupt, NULL);
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+			timerandwakeup_enabled_data.enabled = 0;
-+#endif
-+			break;
-+#endif
-+		default:
-+			break;
-+		}
-+	}
-+	ed->enabled = enable;
-+	return cnt;
-+}
-+
-+static const struct file_operations latency_hist_reset_fops = {
-+	.open = tracing_open_generic,
-+	.write = latency_hist_reset,
-+};
-+
-+static const struct file_operations enable_fops = {
-+	.open = tracing_open_generic,
-+	.read = show_enable,
-+	.write = do_enable,
-+};
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static const struct file_operations pid_fops = {
-+	.open = tracing_open_generic,
-+	.read = show_pid,
-+	.write = do_pid,
-+};
-+
-+static const struct file_operations maxlatproc_fops = {
-+	.open = tracing_open_generic,
-+	.read = show_maxlatproc,
-+};
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-+static notrace void probe_preemptirqsoff_hist(void *v, int reason,
-+	int starthist)
-+{
-+	int cpu = raw_smp_processor_id();
-+	int time_set = 0;
-+
-+	if (starthist) {
-+		u64 uninitialized_var(start);
-+
-+		if (!preempt_count() && !irqs_disabled())
-+			return;
-+
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+		if ((reason == IRQS_OFF || reason == TRACE_START) &&
-+		    !per_cpu(hist_irqsoff_counting, cpu)) {
-+			per_cpu(hist_irqsoff_counting, cpu) = 1;
-+			start = ftrace_now(cpu);
-+			time_set++;
-+			per_cpu(hist_irqsoff_start, cpu) = start;
-+		}
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+		if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
-+		    !per_cpu(hist_preemptoff_counting, cpu)) {
-+			per_cpu(hist_preemptoff_counting, cpu) = 1;
-+			if (!(time_set++))
-+				start = ftrace_now(cpu);
-+			per_cpu(hist_preemptoff_start, cpu) = start;
-+		}
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+		if (per_cpu(hist_irqsoff_counting, cpu) &&
-+		    per_cpu(hist_preemptoff_counting, cpu) &&
-+		    !per_cpu(hist_preemptirqsoff_counting, cpu)) {
-+			per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
-+			if (!time_set)
-+				start = ftrace_now(cpu);
-+			per_cpu(hist_preemptirqsoff_start, cpu) = start;
-+		}
-+#endif
-+	} else {
-+		u64 uninitialized_var(stop);
-+
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+		if ((reason == IRQS_ON || reason == TRACE_STOP) &&
-+		    per_cpu(hist_irqsoff_counting, cpu)) {
-+			u64 start = per_cpu(hist_irqsoff_start, cpu);
-+
-+			stop = ftrace_now(cpu);
-+			time_set++;
-+			if (start) {
-+				long latency = ((long) (stop - start)) /
-+				    NSECS_PER_USECS;
-+
-+				latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
-+				    stop, NULL);
-+			}
-+			per_cpu(hist_irqsoff_counting, cpu) = 0;
-+		}
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+		if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
-+		    per_cpu(hist_preemptoff_counting, cpu)) {
-+			u64 start = per_cpu(hist_preemptoff_start, cpu);
-+
-+			if (!(time_set++))
-+				stop = ftrace_now(cpu);
-+			if (start) {
-+				long latency = ((long) (stop - start)) /
-+				    NSECS_PER_USECS;
-+
-+				latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
-+				    0, stop, NULL);
-+			}
-+			per_cpu(hist_preemptoff_counting, cpu) = 0;
-+		}
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+		if ((!per_cpu(hist_irqsoff_counting, cpu) ||
-+		     !per_cpu(hist_preemptoff_counting, cpu)) &&
-+		   per_cpu(hist_preemptirqsoff_counting, cpu)) {
-+			u64 start = per_cpu(hist_preemptirqsoff_start, cpu);
-+
-+			if (!time_set)
-+				stop = ftrace_now(cpu);
-+			if (start) {
-+				long latency = ((long) (stop - start)) /
-+				    NSECS_PER_USECS;
-+
-+				latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
-+				    latency, 0, stop, NULL);
-+			}
-+			per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
-+		}
-+#endif
-+	}
-+}
-+#endif
-+
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+static DEFINE_RAW_SPINLOCK(wakeup_lock);
-+static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
-+	int cpu)
-+{
-+	int old_cpu = task_cpu(task);
-+
-+	if (cpu != old_cpu) {
-+		unsigned long flags;
-+		struct task_struct *cpu_wakeup_task;
-+
-+		raw_spin_lock_irqsave(&wakeup_lock, flags);
-+
-+		cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
-+		if (task == cpu_wakeup_task) {
-+			put_task_struct(cpu_wakeup_task);
-+			per_cpu(wakeup_task, old_cpu) = NULL;
-+			cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
-+			get_task_struct(cpu_wakeup_task);
-+		}
-+
-+		raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-+	}
-+}
-+
-+static notrace void probe_wakeup_latency_hist_start(void *v,
-+	struct task_struct *p, int success)
-+{
-+	unsigned long flags;
-+	struct task_struct *curr = current;
-+	int cpu = task_cpu(p);
-+	struct task_struct *cpu_wakeup_task;
-+
-+	raw_spin_lock_irqsave(&wakeup_lock, flags);
-+
-+	cpu_wakeup_task = per_cpu(wakeup_task, cpu);
-+
-+	if (wakeup_pid) {
-+		if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
-+		    p->prio == curr->prio)
-+			per_cpu(wakeup_sharedprio, cpu) = 1;
-+		if (likely(wakeup_pid != task_pid_nr(p)))
-+			goto out;
-+	} else {
-+		if (likely(!rt_task(p)) ||
-+		    (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
-+		    p->prio > curr->prio)
-+			goto out;
-+		if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
-+		    p->prio == curr->prio)
-+			per_cpu(wakeup_sharedprio, cpu) = 1;
-+	}
-+
-+	if (cpu_wakeup_task)
-+		put_task_struct(cpu_wakeup_task);
-+	cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
-+	get_task_struct(cpu_wakeup_task);
-+	cpu_wakeup_task->preempt_timestamp_hist =
-+		ftrace_now(raw_smp_processor_id());
-+out:
-+	raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-+}
-+
-+static notrace void probe_wakeup_latency_hist_stop(void *v,
-+	struct task_struct *prev, struct task_struct *next)
-+{
-+	unsigned long flags;
-+	int cpu = task_cpu(next);
-+	long latency;
-+	u64 stop;
-+	struct task_struct *cpu_wakeup_task;
-+
-+	raw_spin_lock_irqsave(&wakeup_lock, flags);
-+
-+	cpu_wakeup_task = per_cpu(wakeup_task, cpu);
-+
-+	if (cpu_wakeup_task == NULL)
-+		goto out;
-+
-+	/* Already running? */
-+	if (unlikely(current == cpu_wakeup_task))
-+		goto out_reset;
-+
-+	if (next != cpu_wakeup_task) {
-+		if (next->prio < cpu_wakeup_task->prio)
-+			goto out_reset;
-+
-+		if (next->prio == cpu_wakeup_task->prio)
-+			per_cpu(wakeup_sharedprio, cpu) = 1;
-+
-+		goto out;
-+	}
-+
-+	if (current->prio == cpu_wakeup_task->prio)
-+		per_cpu(wakeup_sharedprio, cpu) = 1;
-+
-+	/*
-+	 * The task we are waiting for is about to be switched to.
-+	 * Calculate latency and store it in histogram.
-+	 */
-+	stop = ftrace_now(raw_smp_processor_id());
-+
-+	latency = ((long) (stop - next->preempt_timestamp_hist)) /
-+	    NSECS_PER_USECS;
-+
-+	if (per_cpu(wakeup_sharedprio, cpu)) {
-+		latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
-+		    next);
-+		per_cpu(wakeup_sharedprio, cpu) = 0;
-+	} else {
-+		latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+		if (timerandwakeup_enabled_data.enabled) {
-+			latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
-+			    next->timer_offset + latency, next->timer_offset,
-+			    stop, next);
-+		}
-+#endif
-+	}
-+
-+out_reset:
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+	next->timer_offset = 0;
-+#endif
-+	put_task_struct(cpu_wakeup_task);
-+	per_cpu(wakeup_task, cpu) = NULL;
-+out:
-+	raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-+}
-+#endif
-+
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+static notrace void probe_hrtimer_interrupt(void *v, int cpu,
-+	long long latency_ns, struct task_struct *curr,
-+	struct task_struct *task)
-+{
-+	if (latency_ns <= 0 && task != NULL && rt_task(task) &&
-+	    (task->prio < curr->prio ||
-+	    (task->prio == curr->prio &&
-+	    !cpumask_test_cpu(cpu, task->cpus_ptr)))) {
-+		long latency;
-+		u64 now;
-+
-+		if (missed_timer_offsets_pid) {
-+			if (likely(missed_timer_offsets_pid !=
-+			    task_pid_nr(task)))
-+				return;
-+		}
-+
-+		now = ftrace_now(cpu);
-+		latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
-+		latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
-+		    task);
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+		task->timer_offset = latency;
-+#endif
-+	}
-+}
-+#endif
-+
-+static __init int latency_hist_init(void)
-+{
-+	struct dentry *latency_hist_root = NULL;
-+	struct dentry *dentry;
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+	struct dentry *dentry_sharedprio;
-+#endif
-+	struct dentry *entry;
-+	struct dentry *enable_root;
-+	int i = 0;
-+	struct hist_data *my_hist;
-+	char name[64];
-+	char *cpufmt = "CPU%d";
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+	char *cpufmt_maxlatproc = "max_latency-CPU%d";
-+	struct maxlatproc_data *mp = NULL;
-+#endif
-+
-+	dentry = tracing_init_dentry();
-+	latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
-+	enable_root = debugfs_create_dir("enable", latency_hist_root);
-+
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+	dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
-+	for_each_possible_cpu(i) {
-+		sprintf(name, cpufmt, i);
-+		entry = debugfs_create_file(name, 0444, dentry,
-+		    &per_cpu(irqsoff_hist, i), &latency_hist_fops);
-+		my_hist = &per_cpu(irqsoff_hist, i);
-+		atomic_set(&my_hist->hist_mode, 1);
-+		my_hist->min_lat = LONG_MAX;
-+	}
-+	entry = debugfs_create_file("reset", 0644, dentry,
-+	    (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+	dentry = debugfs_create_dir(preemptoff_hist_dir,
-+	    latency_hist_root);
-+	for_each_possible_cpu(i) {
-+		sprintf(name, cpufmt, i);
-+		entry = debugfs_create_file(name, 0444, dentry,
-+		    &per_cpu(preemptoff_hist, i), &latency_hist_fops);
-+		my_hist = &per_cpu(preemptoff_hist, i);
-+		atomic_set(&my_hist->hist_mode, 1);
-+		my_hist->min_lat = LONG_MAX;
-+	}
-+	entry = debugfs_create_file("reset", 0644, dentry,
-+	    (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+	dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
-+	    latency_hist_root);
-+	for_each_possible_cpu(i) {
-+		sprintf(name, cpufmt, i);
-+		entry = debugfs_create_file(name, 0444, dentry,
-+		    &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
-+		my_hist = &per_cpu(preemptirqsoff_hist, i);
-+		atomic_set(&my_hist->hist_mode, 1);
-+		my_hist->min_lat = LONG_MAX;
-+	}
-+	entry = debugfs_create_file("reset", 0644, dentry,
-+	    (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-+	entry = debugfs_create_file("preemptirqsoff", 0644,
-+	    enable_root, (void *)&preemptirqsoff_enabled_data,
-+	    &enable_fops);
-+#endif
-+
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+	dentry = debugfs_create_dir(wakeup_latency_hist_dir,
-+	    latency_hist_root);
-+	dentry_sharedprio = debugfs_create_dir(
-+	    wakeup_latency_hist_dir_sharedprio, dentry);
-+	for_each_possible_cpu(i) {
-+		sprintf(name, cpufmt, i);
-+
-+		entry = debugfs_create_file(name, 0444, dentry,
-+		    &per_cpu(wakeup_latency_hist, i),
-+		    &latency_hist_fops);
-+		my_hist = &per_cpu(wakeup_latency_hist, i);
-+		atomic_set(&my_hist->hist_mode, 1);
-+		my_hist->min_lat = LONG_MAX;
-+
-+		entry = debugfs_create_file(name, 0444, dentry_sharedprio,
-+		    &per_cpu(wakeup_latency_hist_sharedprio, i),
-+		    &latency_hist_fops);
-+		my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
-+		atomic_set(&my_hist->hist_mode, 1);
-+		my_hist->min_lat = LONG_MAX;
-+
-+		sprintf(name, cpufmt_maxlatproc, i);
-+
-+		mp = &per_cpu(wakeup_maxlatproc, i);
-+		entry = debugfs_create_file(name, 0444, dentry, mp,
-+		    &maxlatproc_fops);
-+		clear_maxlatprocdata(mp);
-+
-+		mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
-+		entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
-+		    &maxlatproc_fops);
-+		clear_maxlatprocdata(mp);
-+	}
-+	entry = debugfs_create_file("pid", 0644, dentry,
-+	    (void *)&wakeup_pid, &pid_fops);
-+	entry = debugfs_create_file("reset", 0644, dentry,
-+	    (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
-+	entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
-+	    (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
-+	entry = debugfs_create_file("wakeup", 0644,
-+	    enable_root, (void *)&wakeup_latency_enabled_data,
-+	    &enable_fops);
-+#endif
-+
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+	dentry = debugfs_create_dir(missed_timer_offsets_dir,
-+	    latency_hist_root);
-+	for_each_possible_cpu(i) {
-+		sprintf(name, cpufmt, i);
-+		entry = debugfs_create_file(name, 0444, dentry,
-+		    &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
-+		my_hist = &per_cpu(missed_timer_offsets, i);
-+		atomic_set(&my_hist->hist_mode, 1);
-+		my_hist->min_lat = LONG_MAX;
-+
-+		sprintf(name, cpufmt_maxlatproc, i);
-+		mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
-+		entry = debugfs_create_file(name, 0444, dentry, mp,
-+		    &maxlatproc_fops);
-+		clear_maxlatprocdata(mp);
-+	}
-+	entry = debugfs_create_file("pid", 0644, dentry,
-+	    (void *)&missed_timer_offsets_pid, &pid_fops);
-+	entry = debugfs_create_file("reset", 0644, dentry,
-+	    (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
-+	entry = debugfs_create_file("missed_timer_offsets", 0644,
-+	    enable_root, (void *)&missed_timer_offsets_enabled_data,
-+	    &enable_fops);
-+#endif
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+	defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+	dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
-+	    latency_hist_root);
-+	for_each_possible_cpu(i) {
-+		sprintf(name, cpufmt, i);
-+		entry = debugfs_create_file(name, 0444, dentry,
-+		    &per_cpu(timerandwakeup_latency_hist, i),
-+		    &latency_hist_fops);
-+		my_hist = &per_cpu(timerandwakeup_latency_hist, i);
-+		atomic_set(&my_hist->hist_mode, 1);
-+		my_hist->min_lat = LONG_MAX;
-+
-+		sprintf(name, cpufmt_maxlatproc, i);
-+		mp = &per_cpu(timerandwakeup_maxlatproc, i);
-+		entry = debugfs_create_file(name, 0444, dentry, mp,
-+		    &maxlatproc_fops);
-+		clear_maxlatprocdata(mp);
-+	}
-+	entry = debugfs_create_file("reset", 0644, dentry,
-+	    (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
-+	entry = debugfs_create_file("timerandwakeup", 0644,
-+	    enable_root, (void *)&timerandwakeup_enabled_data,
-+	    &enable_fops);
-+#endif
-+	return 0;
-+}
-+
-+device_initcall(latency_hist_init);
---- a/kernel/trace/trace_irqsoff.c
-+++ b/kernel/trace/trace_irqsoff.c
-@@ -13,6 +13,7 @@
- #include <linux/uaccess.h>
- #include <linux/module.h>
- #include <linux/ftrace.h>
-+#include <trace/events/hist.h>
- 
- #include "trace.h"
- 
-@@ -436,11 +437,13 @@ void start_critical_timings(void)
- {
- 	if (preempt_trace() || irq_trace())
- 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-+	trace_preemptirqsoff_hist(TRACE_START, 1);
- }
- EXPORT_SYMBOL_GPL(start_critical_timings);
- 
- void stop_critical_timings(void)
- {
-+	trace_preemptirqsoff_hist(TRACE_STOP, 0);
- 	if (preempt_trace() || irq_trace())
- 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
-@@ -450,6 +453,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
- #ifdef CONFIG_PROVE_LOCKING
- void time_hardirqs_on(unsigned long a0, unsigned long a1)
- {
-+	trace_preemptirqsoff_hist(IRQS_ON, 0);
- 	if (!preempt_trace() && irq_trace())
- 		stop_critical_timing(a0, a1);
- }
-@@ -458,6 +462,7 @@ void time_hardirqs_off(unsigned long a0,
- {
- 	if (!preempt_trace() && irq_trace())
- 		start_critical_timing(a0, a1);
-+	trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- 
- #else /* !CONFIG_PROVE_LOCKING */
-@@ -483,6 +488,7 @@ inline void print_irqtrace_events(struct
-  */
- void trace_hardirqs_on(void)
- {
-+	trace_preemptirqsoff_hist(IRQS_ON, 0);
- 	if (!preempt_trace() && irq_trace())
- 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
-@@ -492,11 +498,13 @@ void trace_hardirqs_off(void)
- {
- 	if (!preempt_trace() && irq_trace())
- 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-+	trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- EXPORT_SYMBOL(trace_hardirqs_off);
- 
- __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
- {
-+	trace_preemptirqsoff_hist(IRQS_ON, 0);
- 	if (!preempt_trace() && irq_trace())
- 		stop_critical_timing(CALLER_ADDR0, caller_addr);
- }
-@@ -506,6 +514,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
- {
- 	if (!preempt_trace() && irq_trace())
- 		start_critical_timing(CALLER_ADDR0, caller_addr);
-+	trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- EXPORT_SYMBOL(trace_hardirqs_off_caller);
- 
-@@ -515,12 +524,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
- #ifdef CONFIG_PREEMPT_TRACER
- void trace_preempt_on(unsigned long a0, unsigned long a1)
- {
-+	trace_preemptirqsoff_hist(PREEMPT_ON, 0);
- 	if (preempt_trace() && !irq_trace())
- 		stop_critical_timing(a0, a1);
- }
- 
- void trace_preempt_off(unsigned long a0, unsigned long a1)
- {
-+	trace_preemptirqsoff_hist(PREEMPT_ON, 1);
- 	if (preempt_trace() && !irq_trace())
- 		start_critical_timing(a0, a1);
- }
diff --git a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch b/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
deleted file mode 100644
index c875ec8..0000000
--- a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-Subject: latency_hist: Update sched_wakeup probe
-From: Mathieu Desnoyers <mathieu.desnoyers at efficios.com>
-Date: Sun, 25 Oct 2015 18:06:05 -0400
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
-
-"sched: Introduce the 'trace_sched_waking' tracepoint" introduces a
-prototype change for the sched_wakeup probe: the "success" argument is
-removed. Update the latency_hist probe following this change.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers at efficios.com>
-Cc: Peter Zijlstra (Intel) <peterz at infradead.org>
-Cc: Julien Desfossez <jdesfossez at efficios.com>
-Cc: Francis Giraldeau <francis.giraldeau at gmail.com>
-Cc: Mike Galbraith <efault at gmx.de>
-Cc: Steven Rostedt <rostedt at goodmis.org>
-Link: http://lkml.kernel.org/r/1445810765-18732-1-git-send-email-mathieu.desnoyers@efficios.com
-Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
----
- kernel/trace/latency_hist.c |    4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/kernel/trace/latency_hist.c
-+++ b/kernel/trace/latency_hist.c
-@@ -115,7 +115,7 @@ static DEFINE_PER_CPU(struct hist_data,
- static char *wakeup_latency_hist_dir = "wakeup";
- static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
- static notrace void probe_wakeup_latency_hist_start(void *v,
--	struct task_struct *p, int success);
-+	struct task_struct *p);
- static notrace void probe_wakeup_latency_hist_stop(void *v,
- 	struct task_struct *prev, struct task_struct *next);
- static notrace void probe_sched_migrate_task(void *,
-@@ -869,7 +869,7 @@ static notrace void probe_sched_migrate_
- }
- 
- static notrace void probe_wakeup_latency_hist_start(void *v,
--	struct task_struct *p, int success)
-+	struct task_struct *p)
- {
- 	unsigned long flags;
- 	struct task_struct *curr = current;
diff --git a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch b/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
deleted file mode 100644
index aa9e446..0000000
--- a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-Date: Thu, 4 Feb 2016 14:08:06 +0100
-Subject: latencyhist: disable jump-labels
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
-
-Atleast on X86 we die a recursive death
-
-|CPU: 3 PID: 585 Comm: bash Not tainted 4.4.1-rt4+ #198
-|Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS Debian-1.8.2-1 04/01/2014
-|task: ffff88007ab4cd00 ti: ffff88007ab94000 task.ti: ffff88007ab94000
-|RIP: 0010:[<ffffffff81684870>]  [<ffffffff81684870>] int3+0x0/0x10
-|RSP: 0018:ffff88013c107fd8  EFLAGS: 00010082
-|RAX: ffff88007ab4cd00 RBX: ffffffff8100ceab RCX: 0000000080202001
-|RDX: 0000000000000000 RSI: ffffffff8100ceab RDI: ffffffff810c78b2
-|RBP: ffff88007ab97c10 R08: ffffffffff57b000 R09: 0000000000000000
-|R10: ffff88013bb64790 R11: ffff88007ab4cd68 R12: ffffffff8100ceab
-|R13: ffffffff810c78b2 R14: ffffffff810f8158 R15: ffffffff810f9120
-|FS:  0000000000000000(0000) GS:ffff88013c100000(0063) knlGS:00000000f74e3940
-|CS:  0010 DS: 002b ES: 002b CR0: 000000008005003b
-|CR2: 0000000008cf6008 CR3: 000000013b169000 CR4: 00000000000006e0
-|Call Trace:
-| <#DB>
-| [<ffffffff810f8158>] ? trace_preempt_off+0x18/0x170
-| <<EOE>>
-| [<ffffffff81077745>] preempt_count_add+0xa5/0xc0
-| [<ffffffff810c78b2>] on_each_cpu+0x22/0x90
-| [<ffffffff8100ceab>] text_poke_bp+0x5b/0xc0
-| [<ffffffff8100a29c>] arch_jump_label_transform+0x8c/0xf0
-| [<ffffffff8111c77c>] __jump_label_update+0x6c/0x80
-| [<ffffffff8111c83a>] jump_label_update+0xaa/0xc0
-| [<ffffffff8111ca54>] static_key_slow_inc+0x94/0xa0
-| [<ffffffff810e0d8d>] tracepoint_probe_register_prio+0x26d/0x2c0
-| [<ffffffff810e0df3>] tracepoint_probe_register+0x13/0x20
-| [<ffffffff810fca78>] trace_event_reg+0x98/0xd0
-| [<ffffffff810fcc8b>] __ftrace_event_enable_disable+0x6b/0x180
-| [<ffffffff810fd5b8>] event_enable_write+0x78/0xc0
-| [<ffffffff8117a768>] __vfs_write+0x28/0xe0
-| [<ffffffff8117b025>] vfs_write+0xa5/0x180
-| [<ffffffff8117bb76>] SyS_write+0x46/0xa0
-| [<ffffffff81002c91>] do_fast_syscall_32+0xa1/0x1d0
-| [<ffffffff81684d57>] sysenter_flags_fixed+0xd/0x17
-
-during
- echo 1 > /sys/kernel/debug/tracing/events/hist/preemptirqsoff_hist/enable
-
-Reported-By: Christoph Mathys <eraserix at gmail.com>
-Cc: stable-rt at vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- arch/Kconfig |    1 +
- 1 file changed, 1 insertion(+)
-
---- a/arch/Kconfig
-+++ b/arch/Kconfig
-@@ -55,6 +55,7 @@ config KPROBES
- config JUMP_LABEL
-        bool "Optimize very unlikely/likely branches"
-        depends on HAVE_ARCH_JUMP_LABEL
-+       depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
-        help
-          This option enables a transparent branch optimization that
- 	 makes certain almost-always-true or almost-always-false branch
diff --git a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
index 8186779..cf5fe4c 100644
--- a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
+++ b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 23 Jan 2014 14:45:59 +0100
 Subject: leds: trigger: disable CPU trigger on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 as it triggers:
 |CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141
diff --git a/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch b/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
index 9f504ac..33c53dd 100644
--- a/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
+++ b/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <joshc at ni.com>
 Date: Thu, 31 Mar 2016 00:04:25 -0500
 Subject: [PATCH] list_bl: fixup bogus lockdep warning
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 At first glance, the use of 'static inline' seems appropriate for
 INIT_HLIST_BL_HEAD().
diff --git a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
index a34d8ef..cf841ee 100644
--- a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
+++ b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
@@ -1,7 +1,7 @@
 From: Paul Gortmaker <paul.gortmaker at windriver.com>
 Date: Fri, 21 Jun 2013 15:07:25 -0400
 Subject: list_bl: Make list head locking RT safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 As per changes in include/linux/jbd_common.h for avoiding the
 bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal
diff --git a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
index bf3c2fb..83c8cff 100644
--- a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
+++ b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 21 Jul 2009 22:34:14 +0200
 Subject: rt: local_irq_* variants depending on RT/!RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Add local_irq_*_(no)rt variant which are mainly used to break
 interrupt disabled sections on PREEMPT_RT or to explicitely disable
diff --git a/debian/patches/features/all/rt/locallock-add-local_lock_on.patch b/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
index 9c74d69..66740f6 100644
--- a/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
+++ b/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 27 May 2016 15:11:51 +0200
 Subject: [PATCH] locallock: add local_lock_on()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 ---
diff --git a/debian/patches/features/all/rt/localversion.patch b/debian/patches/features/all/rt/localversion.patch
index 0d8f833..465753f 100644
--- a/debian/patches/features/all/rt/localversion.patch
+++ b/debian/patches/features/all/rt/localversion.patch
@@ -1,7 +1,7 @@
 Subject: Add localversion for -RT release
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 08 Jul 2011 20:25:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 ---
@@ -11,4 +11,4 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt1
++-rt5
diff --git a/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch b/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
index ef43082..ede7d61 100644
--- a/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
+++ b/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
@@ -2,7 +2,7 @@ From: Dan Murphy <dmurphy at ti.com>
 Date: Fri, 24 Feb 2017 08:41:49 -0600
 Subject: [PATCH] lockdep: Fix compilation error for !CONFIG_MODULES and
  !CONFIG_SMP
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When CONFIG_MODULES is not set then it fails to compile in lockdep:
 
diff --git a/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch b/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch
index 8060866..97828bb 100644
--- a/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch
+++ b/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch
@@ -1,8 +1,7 @@
-From 8ce371f9846ef1e8b3cc8f6865766cb5c1f17e40 Mon Sep 17 00:00:00 2001
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Mon, 20 Mar 2017 12:26:55 +0100
 Subject: [PATCH] lockdep: Fix per-cpu static objects
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Since commit 383776fa7527 ("locking/lockdep: Handle statically initialized
 PER_CPU locks properly") we try to collapse per-cpu locks into a single
diff --git a/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch b/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
index 8f54dcd..0c3bbf4 100644
--- a/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
+++ b/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 17 Feb 2017 19:44:39 +0100
 Subject: [PATCH] lockdep: Handle statically initialized PER_CPU locks proper
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 If a PER_CPU struct which contains a spin_lock is statically initialized
 via:
diff --git a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
index f6bea8f..d9fac26 100644
--- a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
+++ b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: lockdep: Make it RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 18:51:23 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 teach lockdep that we don't really do softirqs on -RT.
 
diff --git a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
index e50b5b5..0bc20e9 100644
--- a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
+++ b/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
@@ -1,7 +1,7 @@
 From: Josh Cartwright <josh.cartwright at ni.com>
 Date: Wed, 28 Jan 2015 13:08:45 -0600
 Subject: lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 "lockdep: Selftest: Only do hardirq context test for raw spinlock"
 disabled the execution of certain tests with PREEMPT_RT_FULL, but did
diff --git a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
index a834fb5..2643398 100644
--- a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
+++ b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
@@ -1,7 +1,7 @@
 Subject: lockdep: selftest: Only do hardirq context test for raw spinlock
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Mon, 16 Apr 2012 15:01:56 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 From: Yong Zhang <yong.zhang at windriver.com>
 
diff --git a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
index 6621eca..4cca941 100644
--- a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
+++ b/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
@@ -1,7 +1,7 @@
 From: "Wolfgang M. Reimer" <linuxball at gmail.com>
 Date: Tue, 21 Jul 2015 16:20:07 +0200
 Subject: locking: locktorture: Do NOT include rwlock.h directly
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Including rwlock.h directly will cause kernel builds to fail
 if CONFIG_PREEMPT_RT_FULL is defined. The correct header file
diff --git a/debian/patches/features/all/rt/md-disable-bcache.patch b/debian/patches/features/all/rt/md-disable-bcache.patch
index e5655bb..c942352 100644
--- a/debian/patches/features/all/rt/md-disable-bcache.patch
+++ b/debian/patches/features/all/rt/md-disable-bcache.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Aug 2013 11:48:57 +0200
 Subject: md: disable bcache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 It uses anon semaphores
 |drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’:
diff --git a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
index 7a7ffe1..6225427 100644
--- a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
+++ b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 6 Apr 2010 16:51:31 +0200
 Subject: md: raid5: Make raid5_percpu handling RT aware
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 __raid_run_ops() disables preemption with get_cpu() around the access
 to the raid5_percpu variables. That causes scheduling while atomic
diff --git a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
index b627bc8..2008a24 100644
--- a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
+++ b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: mips: Disable highmem on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 17:10:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The current highmem handling on -RT is not compatible and needs fixups.
 
diff --git a/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch b/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
index bf1c079..2381b11 100644
--- a/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
+++ b/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
@@ -1,7 +1,7 @@
 Subject: mm: rt: Fix generic kmap_atomic for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 19 Sep 2015 10:15:00 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The update to 4.1 brought in the mainline variant of the pagefault
 disable distangling from preempt count. That introduced a
diff --git a/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch b/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
index 04c34bb..0b15676 100644
--- a/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
+++ b/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 5 Feb 2016 12:17:14 +0100
 Subject: mm: backing-dev: don't disable IRQs in wb_congested_put()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 it triggers:
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:930
diff --git a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
index 22bfbab..7a4b9f5 100644
--- a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
+++ b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
@@ -1,7 +1,7 @@
 Subject: mm: bounce: Use local_irq_save_nort
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 09 Jan 2013 10:33:09 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 kmap_atomic() is preemptible on RT.
 
diff --git a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
index ec199c3..1b98e66 100644
--- a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
+++ b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:51 -0500
 Subject: mm/swap: Convert to percpu locked
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Replace global locks (get_cpu + local_irq_save) with "local_locks()".
 Currently there is one of for "rotate" and one for "swap".
diff --git a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
index 78841fa..46898f6 100644
--- a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
+++ b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:44:03 -0500
 Subject: mm: Allow only slub on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs.
 
diff --git a/debian/patches/features/all/rt/mm-enable-slub.patch b/debian/patches/features/all/rt/mm-enable-slub.patch
index fff07e2..37522c4 100644
--- a/debian/patches/features/all/rt/mm-enable-slub.patch
+++ b/debian/patches/features/all/rt/mm-enable-slub.patch
@@ -1,7 +1,7 @@
 Subject: mm: Enable SLUB for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 25 Oct 2012 10:32:35 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Make SLUB RT aware by converting locks to raw and using free lists to
 move the freeing out of the lock held region.
diff --git a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
index 7f0afec..f20f373 100644
--- a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
+++ b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:13 -0500
 Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Disable preemption on -RT for the vmstat code. On vanila the code runs in
 IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the
diff --git a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 63eeea1..8e884f4 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at windriver.com>
 Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context
 Date: Wed, 30 Oct 2013 11:48:33 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The following trace is triggered when running ltp oom test cases:
 
diff --git a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch b/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
index 0421dd5..da76293 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Subject: mm/memcontrol: Replace local_irq_disable with local locks
 Date: Wed, 28 Jan 2015 17:14:16 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 There are a few local_irq_disable() which then take sleeping locks. This
 patch converts them local locks.
diff --git a/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch b/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
index 6bc89ea..b0ac08f 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 5 Jun 2016 08:11:13 +0200
 Subject: [PATCH] mm/memcontrol: mem_cgroup_migrate() - replace another
  local_irq_disable() w. local_lock_irq()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 v4.6 grew a local_irq_disable() in mm/memcontrol.c::mem_cgroup_migrate().
 Convert it to use the existing local lock (event_lock) like the others.
diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
index 17b13ce..8928203 100644
--- a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -1,7 +1,7 @@
 Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 27 Sep 2012 11:11:46 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The plain spinlock while sufficient does not update the local_lock
 internals. Use a proper local_lock function instead to ease debugging.
diff --git a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
index 004aa97..bc709df 100644
--- a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri Jul 3 08:44:37 2009 -0500
 Subject: mm: page_alloc: Reduce lock sections further
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Split out the pages which are to be freed into a separate list and
 call free_pages_bulk() outside of the percpu page allocator locks.
diff --git a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 3fa3dfa..92f8180 100644
--- a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:37 -0500
 Subject: mm: page_alloc: rt-friendly per-cpu pages
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
 method into a preemptible, explicit-per-cpu-locks method.
diff --git a/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch b/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
index d315e91..08cb291 100644
--- a/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
+++ b/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
@@ -1,7 +1,7 @@
 From: Luiz Capitulino <lcapitulino at redhat.com>
 Date: Fri, 27 May 2016 15:03:28 +0200
 Subject: [PATCH] mm: perform lru_add_drain_all() remotely
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
 on all CPUs that have non-empty LRU pagevecs and then waiting for
diff --git a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
index e784a0e..97e603a 100644
--- a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
+++ b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
@@ -1,7 +1,7 @@
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Tue, 15 May 2012 13:53:56 +0800
 Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 User preempt_*_rt instead of local_irq_*_rt or otherwise there will be
 warning on ARM like below:
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/fs/exec.c
 +++ b/fs/exec.c
-@@ -1022,12 +1022,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -1042,12 +1042,14 @@ static int exec_mmap(struct mm_struct *m
  		}
  	}
  	task_lock(tsk);
diff --git a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
index 36cb66f..ce100fa 100644
--- a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
+++ b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
@@ -1,7 +1,7 @@
 Subject: mm, rt: kmap_atomic scheduling
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Thu, 28 Jul 2011 10:43:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 In fact, with migrate_disable() existing one could play games with
 kmap_atomic. You could save/restore the kmap_atomic slots on context
@@ -230,7 +230,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
  
  /* task_struct member predeclarations (sorted alphabetically): */
  struct audit_context;
-@@ -1062,6 +1063,12 @@ struct task_struct {
+@@ -1058,6 +1059,12 @@ struct task_struct {
  	int				softirq_nestcnt;
  	unsigned int			softirqs_raised;
  #endif
diff --git a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
index bfcc77f..883770e 100644
--- a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
+++ b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:44:34 -0500
 Subject: mm/scatterlist: Do not disable irqs on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 For -RT it is enough to keep pagefault disabled (which is currently handled by
 kmap_atomic()).
diff --git a/debian/patches/features/all/rt/mm-swap-don-t-disable-preemption-while-taking-the-pe.patch b/debian/patches/features/all/rt/mm-swap-don-t-disable-preemption-while-taking-the-pe.patch
new file mode 100644
index 0000000..1064db1
--- /dev/null
+++ b/debian/patches/features/all/rt/mm-swap-don-t-disable-preemption-while-taking-the-pe.patch
@@ -0,0 +1,46 @@
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Fri, 23 Jun 2017 11:43:30 +0200
+Subject: [PATCH] mm, swap: don't disable preemption while taking the per-CPU
+ cache
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+get_cpu_var() disables preemption and returns the per-CPU version of the
+variable. Disabling preemption is useful to ensure atomic access to the
+variable within the critical section.
+In this case however, after the per-CPU version of the variable is
+obtained the ->free_lock is acquired. For that reason it seems the raw
+accessor could be used. It only seems that ->slots_ret should be
+retested (because with disabled preemption this variable can not be set
+to NULL otherwise).
+This popped up during PREEMPT-RT testing because it tries to take
+spinlocks in a preempt disabled section.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ mm/swap_slots.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/mm/swap_slots.c
++++ b/mm/swap_slots.c
+@@ -267,11 +267,11 @@ int free_swap_slot(swp_entry_t entry)
+ {
+ 	struct swap_slots_cache *cache;
+ 
+-	cache = &get_cpu_var(swp_slots);
++	cache = raw_cpu_ptr(&swp_slots);
+ 	if (use_swap_slot_cache && cache->slots_ret) {
+ 		spin_lock_irq(&cache->free_lock);
+ 		/* Swap slots cache may be deactivated before acquiring lock */
+-		if (!use_swap_slot_cache) {
++		if (!use_swap_slot_cache || !cache->slots_ret) {
+ 			spin_unlock_irq(&cache->free_lock);
+ 			goto direct_free;
+ 		}
+@@ -291,7 +291,6 @@ int free_swap_slot(swp_entry_t entry)
+ direct_free:
+ 		swapcache_free_entries(&entry, 1);
+ 	}
+-	put_cpu_var(swp_slots);
+ 
+ 	return 0;
+ }
diff --git a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
index e97e344..2ed7d7b 100644
--- a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
+++ b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
 Subject: mm/vmalloc: Another preempt disable region which sucks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 12 Jul 2011 11:39:36 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Avoid the preempt disable version of get_cpu_var(). The inner-lock should
 provide enough serialisation.
diff --git a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index a9d6e7c..689dda7 100644
--- a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Jan 2015 17:19:44 +0100
 Subject: mm/workingset: Do not protect workingset_shadow_nodes with irq off
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 workingset_shadow_nodes is protected by local_irq_disable(). Some users
 use spin_lock_irq().
diff --git a/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
index 1b52a30..26146ca 100644
--- a/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+++ b/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Tue, 22 Mar 2016 11:16:09 +0100
 Subject: [PATCH] mm/zsmalloc: copy with get_cpu_var() and locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 get_cpu_var() disables preemption and triggers a might_sleep() splat later.
 This is replaced with get_locked_var().
diff --git a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
index aa64b10..5309435 100644
--- a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
+++ b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
@@ -1,7 +1,7 @@
 Subject: mmci: Remove bogus local_irq_save()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 09 Jan 2013 12:11:12 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On !RT interrupt runs with interrupts disabled. On RT it's in a
 thread, so no need to disable interrupts at all.
diff --git a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
index e82f8d8..6b5f93e 100644
--- a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
+++ b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
@@ -1,7 +1,7 @@
 Date: Wed, 26 Jun 2013 15:28:11 -0400
 From: Steven Rostedt <rostedt at goodmis.org>
 Subject: rt,ntp: Move call to schedule_delayed_work() to helper thread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The ntp code for notify_cmos_timer() is called from a hard interrupt
 context. schedule_delayed_work() under PREEMPT_RT_FULL calls spinlocks
diff --git a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
index ec5472d..e10f623 100644
--- a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
+++ b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:51:45 +0200
 Subject: locking: Disable spin on owner for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Drop spin on owner for mutex / rwsem. We are most likely not using it
 but…
diff --git a/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index 1832649..6fc3160 100644
--- a/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -2,7 +2,7 @@ From: Steven Rostedt <rostedt at goodmis.org>
 Date: Tue, 6 Dec 2016 17:50:30 -0500
 Subject: [PATCH] net: Have __napi_schedule_irqoff() disable interrupts on
  RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 A customer hit a crash where the napi sd->poll_list became corrupted.
 The customer had the bnx2x driver, which does a
diff --git a/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index ba17295..a97abbb 100644
--- a/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 14 Sep 2016 17:36:35 +0200
 Subject: [PATCH] net/Qdisc: use a seqlock instead seqcount
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The seqcount disables preemption on -RT while it is held which can't
 remove. Also we don't want the reader to spin for ages if the writer is
diff --git a/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch b/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
index 11fd911..ba2e8e2 100644
--- a/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
+++ b/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 31 Aug 2016 17:54:09 +0200
 Subject: [PATCH] net: add a lock around icmp_sk()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 It looks like the this_cpu_ptr() access in icmp_sk() is protected with
 local_bh_disable(). To avoid missing serialization in -RT I am adding
@@ -54,9 +54,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	local_bh_disable();
 +	local_lock(icmp_sk_lock);
  
- 	/* Check global sysctl_icmp_msgs_per_sec ratelimit */
- 	if (!icmpv4_global_allow(net, type, code))
-@@ -757,6 +763,7 @@ void icmp_send(struct sk_buff *skb_in, i
+ 	/* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
+ 	 * incoming dev is loopback.  If outgoing dev change to not be
+@@ -761,6 +767,7 @@ ende:
  out_unlock:
  	icmp_xmit_unlock(sk);
  out_bh_enable:
diff --git a/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch b/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
index d305c9c..11e6181 100644
--- a/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
+++ b/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
@@ -5,7 +5,7 @@ Subject: [PATCH] net: add back the missing serialization in
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Some time ago Sami Pietikäinen reported a crash on -RT in
 ip_send_unicast_reply() which was later fixed by Nicholas Mc Guire
diff --git a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
index 3042f9d..4248b3a 100644
--- a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 26 Sep 2012 16:21:08 +0200
 Subject: net: Another local_irq_disable/kmalloc headache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Replace it by a local lock. Though that's pretty inefficient :(
 
diff --git a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 53ee705..deeb801 100644
--- a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -1,7 +1,7 @@
 Subject: net/core/cpuhotplug: Drain input_pkt_queue lockless
 From: Grygorii Strashko <grygorii.strashko at ti.com>
 Date: Fri, 9 Oct 2015 09:25:49 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 I can constantly see below error report with 4.1 RT-kernel on TI ARM dra7-evm 
 if I'm trying to unplug cpu1:
diff --git a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
index 3dfdc79..e3b2edf 100644
--- a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
+++ b/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 15 Jan 2016 16:33:34 +0100
 Subject: net/core: protect users of napi_alloc_cache against
  reentrance
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On -RT the code running in BH can not be moved to another CPU so CPU
 local variable remain local. However the code can be preempted
diff --git a/debian/patches/features/all/rt/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch b/debian/patches/features/all/rt/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
index cb83163..4fe637a 100644
--- a/debian/patches/features/all/rt/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
+++ b/debian/patches/features/all/rt/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 22 May 2017 21:08:08 +0200
 Subject: net/core: remove explicit do_softirq() from busy_poll_stop()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Since commit 217f69743681 ("net: busy-poll: allow preemption in
 sk_busy_loop()") there is an explicit do_softirq() invocation after
diff --git a/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index 931a9f2..8d122d3 100644
--- a/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 30 Mar 2016 13:36:29 +0200
 Subject: [PATCH] net: dev: always take qdisc's busylock in __dev_xmit_skb()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The root-lock is dropped before dev_hard_start_xmit() is invoked and after
 setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away
diff --git a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index a9bfc17..9fc65d2 100644
--- a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -1,7 +1,7 @@
 Subject: net: netfilter: Serialize xt_write_recseq sections on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 28 Oct 2012 11:18:08 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The netfilter code relies only on the implicit semantics of
 local_bh_disable() for serializing wt_write_recseq sections. RT breaks
diff --git a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
index 3adf2a0..f3c1e12 100644
--- a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
+++ b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 20 Mar 2013 18:06:20 +0100
 Subject: net: Add a mutex around devnet_rename_seq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On RT write_seqcount_begin() disables preemption and device_rename()
 allocates memory with GFP_KERNEL and grabs later the sysfs_mutex
diff --git a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 31916b1..afaf928 100644
--- a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 13 Jan 2016 15:55:02 +0100
 Subject: net: move xmit_recursion to per-task variable on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 A softirq on -RT can be preempted. That means one task is in
 __dev_queue_xmit(), gets preempted and another task may enter
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1065,6 +1065,9 @@ struct task_struct {
+@@ -1061,6 +1061,9 @@ struct task_struct {
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  	unsigned long			task_state_change;
  #endif
diff --git a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch b/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
index 302fb74..1933b77 100644
--- a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
+++ b/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
@@ -1,7 +1,7 @@
 Subject: net-flip-lock-dep-thingy.patch
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 28 Jun 2011 10:59:58 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 =======================================================
 [ INFO: possible circular locking dependency detected ]
diff --git a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index 4fea66c..c3c4e31 100644
--- a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 20 Jan 2016 15:39:05 +0100
 Subject: net: provide a way to delegate processing a softirq to
  ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 If the NET_RX uses up all of his budget it moves the following NAPI
 invocations into the `ksoftirqd`. On -RT it does not do so. Instead it
diff --git a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index de82228..cc9d8a8 100644
--- a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -1,7 +1,7 @@
 From: Marc Kleine-Budde <mkl at pengutronix.de>
 Date: Wed, 5 Mar 2014 00:49:47 +0100
 Subject: net: sched: Use msleep() instead of yield()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50
 (by default). If a high priority userspace process tries to shut down a busy
diff --git a/debian/patches/features/all/rt/net-use-cpu-chill.patch b/debian/patches/features/all/rt/net-use-cpu-chill.patch
index 364d74e..d65b977 100644
--- a/debian/patches/features/all/rt/net-use-cpu-chill.patch
+++ b/debian/patches/features/all/rt/net-use-cpu-chill.patch
@@ -1,7 +1,7 @@
 Subject: net: Use cpu_chill() instead of cpu_relax()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 07 Mar 2012 21:10:04 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Use cpu_chill() instead of cpu_relax() to let the system
diff --git a/debian/patches/features/all/rt/net-wireless-warn-nort.patch b/debian/patches/features/all/rt/net-wireless-warn-nort.patch
index f8fe162..f52ddb0 100644
--- a/debian/patches/features/all/rt/net-wireless-warn-nort.patch
+++ b/debian/patches/features/all/rt/net-wireless-warn-nort.patch
@@ -1,7 +1,7 @@
 Subject: net/wireless: Use WARN_ON_NORT()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 21 Jul 2011 21:05:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The softirq counter is meaningless on RT, so the check triggers a
 false positive.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/net/mac80211/rx.c
 +++ b/net/mac80211/rx.c
-@@ -4224,7 +4224,7 @@ void ieee80211_rx_napi(struct ieee80211_
+@@ -4229,7 +4229,7 @@ void ieee80211_rx_napi(struct ieee80211_
  	struct ieee80211_supported_band *sband;
  	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  
diff --git a/debian/patches/features/all/rt/net_disable_NET_RX_BUSY_POLL.patch b/debian/patches/features/all/rt/net_disable_NET_RX_BUSY_POLL.patch
index 72cc350..dd00fc5 100644
--- a/debian/patches/features/all/rt/net_disable_NET_RX_BUSY_POLL.patch
+++ b/debian/patches/features/all/rt/net_disable_NET_RX_BUSY_POLL.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Sat, 27 May 2017 19:02:06 +0200
 Subject: net/core: disable NET_RX_BUSY_POLL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 sk_busy_loop() does preempt_disable() followed by a few operations which can
 take sleeping locks and may get long.
diff --git a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
index 2686c8c..a4803de 100644
--- a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
+++ b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
@@ -1,7 +1,7 @@
 From: Oleg Nesterov <oleg at redhat.com>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: signal/x86: Delay calling signals in atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On x86_64 we must disable preemption before we enable interrupts
 for stack faults, int3 and debugging, because the current task is using
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	unsigned int			sas_ss_flags;
 --- a/kernel/signal.c
 +++ b/kernel/signal.c
-@@ -1227,8 +1227,8 @@ int do_send_sig_info(int sig, struct sig
+@@ -1235,8 +1235,8 @@ int do_send_sig_info(int sig, struct sig
   * We don't want to have recursive SIGSEGV's etc, for example,
   * that is why we also clear SIGNAL_UNKILLABLE.
   */
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  {
  	unsigned long int flags;
  	int ret, blocked, ignored;
-@@ -1253,6 +1253,39 @@ force_sig_info(int sig, struct siginfo *
+@@ -1261,6 +1261,39 @@ force_sig_info(int sig, struct siginfo *
  	return ret;
  }
  
diff --git a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
index 945f34f..03acfd8 100644
--- a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
+++ b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Disable on -RT. If this is invoked from irq-context we will have problems
 to acquire the sleeping lock.
diff --git a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index abe7d33..7c71572 100644
--- a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Make ksoftirqd do RCU quiescent states
 From: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
 Date: Wed, 5 Oct 2011 11:45:18 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable
 to network-based denial-of-service attacks.  This patch therefore
diff --git a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
index 5f18de4..09cfd6c 100644
--- a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
+++ b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 9 Apr 2014 11:58:17 +0200
 Subject: percpu_ida: Use local locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 the local_irq_save() + spin_lock() does not work that well on -RT
 
diff --git a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
index cdc417b..3c495c7 100644
--- a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Yong Zhang <yong.zhang at windriver.com>
 Date: Wed, 11 Jul 2012 22:05:21 +0000
 Subject: perf: Make swevent hrtimer run in irq instead of softirq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Otherwise we get a deadlock like below:
 
diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
index 22a47bb..0ddc5d8 100644
--- a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
+++ b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Frob softirq test
 From: Peter Zijlstra <a.p.zijlstra at chello.nl>
 Date: Sat Aug 13 00:23:17 CEST 2011
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 With RT_FULL we get the below wreckage:
 
diff --git a/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch b/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch
index e60899b..9877ffa 100644
--- a/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch
+++ b/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch
@@ -1,7 +1,7 @@
 Subject: locking/percpu-rwsem: Remove preempt_disable variants
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed Nov 23 16:29:32 CET 2016
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Effective revert commit:
 
diff --git a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
index 6cfd795..8172a81 100644
--- a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
+++ b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
@@ -1,7 +1,7 @@
 Subject: crypto: Convert crypto notifier chain to SRCU
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 05 Oct 2012 09:03:24 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The crypto notifier deadlocks on RT. Though this can be a real deadlock
 on mainline as well due to fifo fair rwsems.
diff --git a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch b/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
index b1b03d2..622bb05 100644
--- a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
+++ b/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
 From: Grygorii Strashko <Grygorii.Strashko at linaro.org>
 Date: Tue, 21 Jul 2015 19:43:56 +0300
 Subject: pid.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 This patch fixes build error:
   CC      kernel/pid_namespace.o
diff --git a/debian/patches/features/all/rt/ping-sysrq.patch b/debian/patches/features/all/rt/ping-sysrq.patch
index 779897c..aa124ac 100644
--- a/debian/patches/features/all/rt/ping-sysrq.patch
+++ b/debian/patches/features/all/rt/ping-sysrq.patch
@@ -1,7 +1,7 @@
 Subject: net: sysrq via icmp
 From: Carsten Emde <C.Emde at osadl.org>
 Date: Tue, 19 Jul 2011 13:51:17 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 There are (probably rare) situations when a system crashed and the system
 console becomes unresponsive but the network icmp layer still is alive.
diff --git a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
index e3a50ec..48d194b 100644
--- a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
+++ b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:29:20 -0500
 Subject: posix-timers: Prevent broadcast signals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Posix timers should not send broadcast signals and kernel only
 signals. Prevent it.
diff --git a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
index a26e8ee..a766609 100644
--- a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -1,7 +1,7 @@
 From: John Stultz <johnstul at us.ibm.com>
 Date: Fri, 3 Jul 2009 08:29:58 -0500
 Subject: posix-timers: Thread posix-cpu-timers on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 posix-cpu-timer code takes non -rt safe locks in hard irq
 context. Move it to a thread.
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  # define INIT_PERF_EVENTS(tsk)
  #endif
  
-+#ifdef CONFIG_PREEMPT_RT_BASE
++#if defined(CONFIG_POSIX_TIMERS) && defined(CONFIG_PREEMPT_RT_BASE)
 +# define INIT_TIMER_LIST		.posix_timer_list = NULL,
 +#else
 +# define INIT_TIMER_LIST
diff --git a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
index 830f9c8..f1dfae2 100644
--- a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
+++ b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: powerpc: Disable highmem on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 17:08:34 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The current highmem handling on -RT is not compatible and needs fixups.
 
diff --git a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
index 7a0c28e..90a1309 100644
--- a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
+++ b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 14 Jul 2015 14:26:34 +0200
 Subject: powerpc: Use generic rwsem on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use generic code which uses rtmutex
 
diff --git a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
index d418fb5..51d61a1 100644
--- a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
+++ b/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
@@ -1,7 +1,7 @@
 From: Bogdan Purcareata <bogdan.purcareata at freescale.com>
 Date: Fri, 24 Apr 2015 15:53:13 +0000
 Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 While converting the openpic emulation code to use a raw_spinlock_t enables
 guests to run on RT, there's still a performance issue. For interrupts sent in
diff --git a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
index cde875e..9afdf5f 100644
--- a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 1 Nov 2012 10:14:11 +0100
 Subject: powerpc: Add support for lazy preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Implement the powerpc pieces for lazy preempt.
 
diff --git a/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch b/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
index 4d244eb..cba6047 100644
--- a/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
+++ b/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
@@ -1,7 +1,7 @@
 From: Paul Gortmaker <paul.gortmaker at windriver.com>
 Date: Sun, 31 May 2015 14:44:42 -0400
 Subject: powerpc: ps3/device-init.c - adapt to completions using swait vs wait
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To fix:
 
diff --git a/debian/patches/features/all/rt/preempt-lazy-support.patch b/debian/patches/features/all/rt/preempt-lazy-support.patch
index 726de61..02519c7 100644
--- a/debian/patches/features/all/rt/preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/preempt-lazy-support.patch
@@ -1,7 +1,7 @@
 Subject: sched: Add support for lazy preemption
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 26 Oct 2012 18:50:54 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 It has become an obsession to mitigate the determinism vs. throughput
 loss of RT. Looking at the mainline semantics of preemption points
@@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1513,6 +1513,44 @@ static inline int test_tsk_need_resched(
+@@ -1509,6 +1509,44 @@ static inline int test_tsk_need_resched(
  	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
  }
  
@@ -234,7 +234,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	default PREEMPT_NONE
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -517,6 +517,48 @@ void resched_curr(struct rq *rq)
+@@ -528,6 +528,48 @@ void resched_curr(struct rq *rq)
  		trace_sched_wake_idle_without_ipi(cpu);
  }
  
@@ -283,7 +283,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  void resched_cpu(int cpu)
  {
  	struct rq *rq = cpu_rq(cpu);
-@@ -2525,6 +2567,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2536,6 +2578,9 @@ int sched_fork(unsigned long clone_flags
  	p->on_cpu = 0;
  #endif
  	init_task_preempt_count(p);
@@ -293,7 +293,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_SMP
  	plist_node_init(&p->pushable_tasks, MAX_PRIO);
  	RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3516,6 +3561,7 @@ static void __sched notrace __schedule(b
+@@ -3527,6 +3572,7 @@ static void __sched notrace __schedule(b
  
  	next = pick_next_task(rq, prev, &rf);
  	clear_tsk_need_resched(prev);
@@ -301,7 +301,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	clear_preempt_need_resched();
  
  	if (likely(prev != next)) {
-@@ -3667,6 +3713,30 @@ static void __sched notrace preempt_sche
+@@ -3678,6 +3724,30 @@ static void __sched notrace preempt_sche
  	} while (need_resched());
  }
  
@@ -332,7 +332,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #ifdef CONFIG_PREEMPT
  /*
   * this is the entry point to schedule() from in-kernel preemption
-@@ -3681,7 +3751,8 @@ asmlinkage __visible void __sched notrac
+@@ -3692,7 +3762,8 @@ asmlinkage __visible void __sched notrac
  	 */
  	if (likely(!preemptible()))
  		return;
@@ -342,7 +342,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	preempt_schedule_common();
  }
  NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3708,6 +3779,9 @@ asmlinkage __visible void __sched notrac
+@@ -3719,6 +3790,9 @@ asmlinkage __visible void __sched notrac
  	if (likely(!preemptible()))
  		return;
  
@@ -352,7 +352,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	do {
  		/*
  		 * Because the function tracer can trace preempt_count_sub()
-@@ -5537,7 +5611,9 @@ void init_idle(struct task_struct *idle,
+@@ -5548,7 +5622,9 @@ void init_idle(struct task_struct *idle,
  
  	/* Set the preempt count _outside_ the spinlocks! */
  	init_idle_preempt_count(idle, cpu);
@@ -363,7 +363,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/*
  	 * The idle tasks have their own, simple scheduling class:
  	 */
-@@ -7512,6 +7588,7 @@ void migrate_disable(void)
+@@ -7523,6 +7599,7 @@ void migrate_disable(void)
  	/* get_online_cpus(); */
  
  	preempt_disable();
@@ -371,7 +371,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	pin_current_cpu();
  	p->migrate_disable = 1;
  
-@@ -7581,6 +7658,7 @@ void migrate_enable(void)
+@@ -7592,6 +7669,7 @@ void migrate_enable(void)
  			arg.dest_cpu = dest_cpu;
  
  			unpin_current_cpu();
@@ -379,7 +379,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			preempt_enable();
  			stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
  			tlb_migrate_finish(p->mm);
-@@ -7591,6 +7669,7 @@ void migrate_enable(void)
+@@ -7602,6 +7680,7 @@ void migrate_enable(void)
  	}
  	unpin_current_cpu();
  	/* put_online_cpus(); */
@@ -493,7 +493,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
-@@ -1934,6 +1934,7 @@ tracing_generic_entry_update(struct trac
+@@ -1942,6 +1942,7 @@ tracing_generic_entry_update(struct trac
  	struct task_struct *tsk = current;
  
  	entry->preempt_count		= pc & 0xff;
@@ -501,7 +501,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	entry->pid			= (tsk) ? tsk->pid : 0;
  	entry->flags =
  #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1944,7 +1945,8 @@ tracing_generic_entry_update(struct trac
+@@ -1952,7 +1953,8 @@ tracing_generic_entry_update(struct trac
  		((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
  		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
  		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -511,7 +511,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
  
  	entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -3111,15 +3113,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3119,15 +3121,17 @@ get_total_entries(struct trace_buffer *b
  
  static void print_lat_help_header(struct seq_file *m)
  {
@@ -538,7 +538,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  }
  
  static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -3145,11 +3149,14 @@ static void print_func_help_header_irq(s
+@@ -3153,11 +3157,14 @@ static void print_func_help_header_irq(s
  	print_event_info(buf, m);
  	seq_puts(m, "#                              _-----=> irqs-off\n"
  		    "#                             / _----=> need-resched\n"
diff --git a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
index 530f6d8..108e4a9 100644
--- a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
+++ b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 24 Jul 2009 12:38:56 +0200
 Subject: preempt: Provide preempt_*_(no)rt variants
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 RT needs a few preempt_disable/enable points which are not necessary
 otherwise. Implement variants to avoid #ifdeffery.
diff --git a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
index ac8af46..784ee35 100644
--- a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+++ b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -1,7 +1,7 @@
 Subject: printk: Add "force_early_printk" boot param to help with debugging
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 02 Sep 2011 14:41:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Gives me an option to screw printk and actually see what the machine
 says.
diff --git a/debian/patches/features/all/rt/printk-kill.patch b/debian/patches/features/all/rt/printk-kill.patch
index b26e564..bd043bc 100644
--- a/debian/patches/features/all/rt/printk-kill.patch
+++ b/debian/patches/features/all/rt/printk-kill.patch
@@ -1,7 +1,7 @@
 Subject: printk: Add a printk kill switch
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 22 Jul 2011 17:58:40 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that
 it does not dead-lock with the early printk code.
diff --git a/debian/patches/features/all/rt/printk-rt-aware.patch b/debian/patches/features/all/rt/printk-rt-aware.patch
index c2282bd..ec40013 100644
--- a/debian/patches/features/all/rt/printk-rt-aware.patch
+++ b/debian/patches/features/all/rt/printk-rt-aware.patch
@@ -1,7 +1,7 @@
 Subject: printk: Make rt aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 19 Sep 2012 14:50:37 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Drop the lock before calling the console driver and do not disable
 interrupts while printing to a serial console.
diff --git a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 2308e2c..c06dcc1 100644
--- a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Thu, 29 Aug 2013 18:21:04 +0200
 Subject: ptrace: fix ptrace vs tasklist_lock race
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 As explained by Alexander Fyodorov <halcy at yandex.ru>:
 
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
  					 (task->flags & PF_FROZEN) == 0 && \
  					 (task->state & TASK_NOLOAD) == 0)
-@@ -1500,6 +1496,51 @@ static inline int test_tsk_need_resched(
+@@ -1496,6 +1492,51 @@ static inline int test_tsk_need_resched(
  	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
  }
  
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	spin_unlock_irq(&task->sighand->siglock);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1363,6 +1363,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1374,6 +1374,18 @@ int migrate_swap(struct task_struct *cur
  	return ret;
  }
  
@@ -135,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /*
   * wait_task_inactive - wait for a thread to unschedule.
   *
-@@ -1407,7 +1419,7 @@ unsigned long wait_task_inactive(struct
+@@ -1418,7 +1430,7 @@ unsigned long wait_task_inactive(struct
  		 * is actually now running somewhere else!
  		 */
  		while (task_running(rq, p)) {
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  				return 0;
  			cpu_relax();
  		}
-@@ -1422,7 +1434,8 @@ unsigned long wait_task_inactive(struct
+@@ -1433,7 +1445,8 @@ unsigned long wait_task_inactive(struct
  		running = task_running(rq, p);
  		queued = task_on_rq_queued(p);
  		ncsw = 0;
diff --git a/debian/patches/features/all/rt/radix-tree-use-local-locks.patch b/debian/patches/features/all/rt/radix-tree-use-local-locks.patch
index b9cd1ed..5a7523c 100644
--- a/debian/patches/features/all/rt/radix-tree-use-local-locks.patch
+++ b/debian/patches/features/all/rt/radix-tree-use-local-locks.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 25 Jan 2017 16:34:27 +0100
 Subject: [PATCH] radix-tree: use local locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The preload functionality uses per-CPU variables and preempt-disable to
 ensure that it does not switch CPUs during its usage. This patch adds
diff --git a/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch b/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch
index 4c3e005..eb20315 100644
--- a/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch
+++ b/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch
@@ -1,8 +1,7 @@
-From 81e7296af883a58c3e5609842e129de01442198d Mon Sep 17 00:00:00 2001
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 12 May 2017 15:46:17 +0200
 Subject: [PATCH] random: avoid preempt_disable()ed section
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 extract_crng() will use sleeping locks while in a preempt_disable()
 section due to get_cpu_var().
@@ -16,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
-@@ -262,6 +262,7 @@
+@@ -265,6 +265,7 @@
  #include <linux/syscalls.h>
  #include <linux/completion.h>
  #include <linux/uuid.h>
@@ -24,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #include <crypto/chacha20.h>
  
  #include <asm/processor.h>
-@@ -2022,6 +2023,7 @@ struct batched_entropy {
+@@ -2030,6 +2031,7 @@ static rwlock_t batched_entropy_reset_lo
   * goal of being quite fast and not depleting entropy.
   */
  static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
@@ -32,17 +31,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  u64 get_random_u64(void)
  {
  	u64 ret;
-@@ -2036,18 +2038,19 @@ u64 get_random_u64(void)
+@@ -2046,7 +2048,7 @@ u64 get_random_u64(void)
  	    return ret;
  #endif
  
 -	batch = &get_cpu_var(batched_entropy_u64);
 +	batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
+ 	if (use_lock)
+ 		read_lock_irqsave(&batched_entropy_reset_lock, flags);
  	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- 		extract_crng((u8 *)batch->entropy_u64);
- 		batch->position = 0;
- 	}
+@@ -2056,12 +2058,13 @@ u64 get_random_u64(void)
  	ret = batch->entropy_u64[batch->position++];
+ 	if (use_lock)
+ 		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 -	put_cpu_var(batched_entropy_u64);
 +	put_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
  	return ret;
@@ -54,17 +55,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  u32 get_random_u32(void)
  {
  	u32 ret;
-@@ -2056,13 +2059,13 @@ u32 get_random_u32(void)
+@@ -2072,7 +2075,7 @@ u32 get_random_u32(void)
  	if (arch_get_random_int(&ret))
  		return ret;
  
 -	batch = &get_cpu_var(batched_entropy_u32);
 +	batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
+ 	if (use_lock)
+ 		read_lock_irqsave(&batched_entropy_reset_lock, flags);
  	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- 		extract_crng((u8 *)batch->entropy_u32);
- 		batch->position = 0;
- 	}
+@@ -2082,7 +2085,7 @@ u32 get_random_u32(void)
  	ret = batch->entropy_u32[batch->position++];
+ 	if (use_lock)
+ 		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 -	put_cpu_var(batched_entropy_u32);
 +	put_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
  	return ret;
diff --git a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
index 0cd711f..2c140c7 100644
--- a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
+++ b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: random: Make it work on rt
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 21 Aug 2012 20:38:50 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Delegate the random insertion to the forced threaded interrupt
 handler. Store the return IP of the hard interrupt handler in the irq
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
-@@ -1102,28 +1102,27 @@ static __u32 get_reg(struct fast_pool *f
+@@ -1109,28 +1109,27 @@ static __u32 get_reg(struct fast_pool *f
  	return *(ptr + f->reg_idx++);
  }
  
diff --git a/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch b/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
index 2b6dde9..0c5038a 100644
--- a/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
+++ b/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 14 Sep 2016 11:52:17 +0200
 Subject: rbtree: include rcu.h because we use it
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Since commit c1adf20052d8 ("Introduce rb_replace_node_rcu()")
 rbtree_augmented.h uses RCU related data structures but does not include
diff --git a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
index f26eac8..8c7d4fb 100644
--- a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -1,7 +1,7 @@
 From: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
 Date: Mon, 4 Nov 2013 13:21:10 -0800
 Subject: rcu: Eliminate softirq processing from rcutree
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Running RCU out of softirq is a problem for some workloads that would
 like to manage RCU core processing independently of other softirq work,
diff --git a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
index 321f0a8..161de89 100644
--- a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Disable RCU_FAST_NO_HZ on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 28 Oct 2012 13:26:09 +0000
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 This uses a timer_list timer from the irq disabled guts of the idle
 code. Disable it for now to prevent wreckage.
diff --git a/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
index 782d60f..ed95484 100644
--- a/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
+++ b/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -1,7 +1,7 @@
 From: Julia Cartwright <julia at ni.com>
 Date: Wed, 12 Oct 2016 11:21:14 -0500
 Subject: [PATCH] rcu: enable rcu_normal_after_boot by default for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The forcing of an expedited grace period is an expensive and very
 RT-application unfriendly operation, as it forcibly preempts all running
diff --git a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
index 8affdad..c9cf13d 100644
--- a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 21 Mar 2014 20:19:05 +0100
 Subject: rcu: make RCU_BOOST default on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Since it is no longer invoked from the softirq people run into OOM more
 often if the priority of the RCU thread is too low. Making boosting
diff --git a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index ac2a0af..9a98bf6 100644
--- a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: rcu: Merge RCU-bh into RCU-preempt
 Date: Wed, 5 Oct 2011 11:59:38 -0700
 From: Thomas Gleixner <tglx at linutronix.de>
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The Linux kernel has long RCU-bh read-side critical sections that
 intolerably increase scheduling latency under mainline's RCU-bh rules,
diff --git a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index 7deda81..4a43a87 100644
--- a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -1,7 +1,7 @@
 From: Tiejun Chen <tiejun.chen at windriver.com>
 Date: Wed, 18 Dec 2013 17:51:49 +0800
 Subject: rcutree/rcu_bh_qs: Disable irq while calling rcu_preempt_qs()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Any callers to the function rcu_preempt_qs() must disable irqs in
 order to protect the assignment to ->rcu_read_unlock_special. In
diff --git a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
index 577c2ae..7827058 100644
--- a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
+++ b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -1,7 +1,7 @@
 From: Yong Zhang <yong.zhang0 at gmail.com>
 Date: Thu, 28 Jul 2011 11:16:00 +0800
 Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When retry happens, it's likely that the task has been migrated to
 another cpu (except unplug failed), but it still derefernces the
diff --git a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
index 56f2845..c4d3431 100644
--- a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+++ b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -1,7 +1,7 @@
 Subject: ARM: Initialize split page table locks for vector page
 From: Frank Rowand <frank.rowand at am.sony.com>
 Date: Sat, 1 Oct 2011 18:58:13 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if
 PREEMPT_RT_FULL=y because vectors_user_mapping() creates a
diff --git a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
index 3f076ff..7fbd502 100644
--- a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
+++ b/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
@@ -1,7 +1,7 @@
 Subject: ARM: smp: Move clear_tasks_mm_cpumask() call to __cpu_die()
 From: Grygorii Strashko <grygorii.strashko at ti.com>
 Date: Fri, 11 Sep 2015 21:21:23 +0300
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When running with the RT-kernel (4.1.5-rt5) on TI OMAP dra7-evm and trying
 to do Suspend to RAM, the following backtrace occurs:
diff --git a/debian/patches/features/all/rt/rt-add-rt-locks.patch b/debian/patches/features/all/rt/rt-add-rt-locks.patch
index 3f41faa..4349271 100644
--- a/debian/patches/features/all/rt/rt-add-rt-locks.patch
+++ b/debian/patches/features/all/rt/rt-add-rt-locks.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 26 Jul 2009 19:39:56 +0200
 Subject: rt: Add the preempt-rt lock replacement APIs
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex
 based locking functions for preempt-rt.
diff --git a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
index f7e3603..fc3d556 100644
--- a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
+++ b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
@@ -1,7 +1,7 @@
 Subject: rt: Introduce cpu_chill()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 07 Mar 2012 20:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Retry loops on RT might loop forever when the modifying side was
 preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  #endif /* defined(_LINUX_DELAY_H) */
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
-@@ -1741,6 +1741,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
+@@ -1720,6 +1720,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
  	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
  }
  
diff --git a/debian/patches/features/all/rt/rt-local-irq-lock.patch b/debian/patches/features/all/rt/rt-local-irq-lock.patch
index 2d759bc..a85eb0b 100644
--- a/debian/patches/features/all/rt/rt-local-irq-lock.patch
+++ b/debian/patches/features/all/rt/rt-local-irq-lock.patch
@@ -1,7 +1,7 @@
 Subject: rt: Add local irq locks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 20 Jun 2011 09:03:47 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Introduce locallock. For !RT this maps to preempt_disable()/
 local_irq_disable() so there is not much that changes. For RT this will
diff --git a/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch b/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
index 3fd027f..8c81abe 100644
--- a/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
+++ b/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 8 Feb 2016 16:15:28 +0100
 Subject: rt/locking: Reenable migration accross schedule
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 We currently disable migration across lock acquisition. That includes the part
 where we block on the lock and schedule out. We cannot disable migration after
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -981,14 +981,19 @@ static int __try_to_take_rt_mutex(struct
+@@ -980,14 +980,19 @@ static int __try_to_take_rt_mutex(struct
   * preemptible spin_lock functions:
   */
  static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  }
  
  static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
-@@ -1046,7 +1051,8 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1045,7 +1050,8 @@ static int task_blocks_on_rt_mutex(struc
   * We store the current state under p->pi_lock in p->saved_state and
   * the try_to_wake_up() code handles this accordingly.
   */
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  {
  	struct task_struct *lock_owner, *self = current;
  	struct rt_mutex_waiter waiter, *top_waiter;
-@@ -1090,8 +1096,13 @@ static void  noinline __sched rt_spin_lo
+@@ -1089,8 +1095,13 @@ static void  noinline __sched rt_spin_lo
  
  		debug_rt_mutex_print_deadlock(&waiter);
  
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  		raw_spin_lock_irqsave(&lock->wait_lock, flags);
  
-@@ -1149,38 +1160,35 @@ static void  noinline __sched rt_spin_lo
+@@ -1148,38 +1159,35 @@ static void  noinline __sched rt_spin_lo
  
  void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
  {
diff --git a/debian/patches/features/all/rt/rt-preempt-base-config.patch b/debian/patches/features/all/rt/rt-preempt-base-config.patch
index 57fe177..79d3648 100644
--- a/debian/patches/features/all/rt/rt-preempt-base-config.patch
+++ b/debian/patches/features/all/rt/rt-preempt-base-config.patch
@@ -1,7 +1,7 @@
 Subject: rt: Provide PREEMPT_RT_BASE config switch
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 17 Jun 2011 12:39:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Introduce PREEMPT_RT_BASE which enables parts of
 PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT
diff --git a/debian/patches/features/all/rt/rt-serial-warn-fix.patch b/debian/patches/features/all/rt/rt-serial-warn-fix.patch
index aa06d64..f88520c 100644
--- a/debian/patches/features/all/rt/rt-serial-warn-fix.patch
+++ b/debian/patches/features/all/rt/rt-serial-warn-fix.patch
@@ -1,7 +1,7 @@
 Subject: rt: Improve the serial console PASS_LIMIT
 From: Ingo Molnar <mingo at elte.hu>
 Date: Wed Dec 14 13:05:54 CET 2011
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Beyond the warning:
 
diff --git a/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch b/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
index 5f93e2f..68c6915 100644
--- a/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
+++ b/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Handle non enqueued waiters gracefully
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 06 Nov 2015 18:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Yimin debugged that in case of a PI wakeup in progress when
 rt_mutex_start_proxy_lock() calls task_blocks_on_rt_mutex() the latter
diff --git a/debian/patches/features/all/rt/rtmutex-Fix-lock-stealing-logic.patch b/debian/patches/features/all/rt/rtmutex-Fix-lock-stealing-logic.patch
new file mode 100644
index 0000000..4a39cd3
--- /dev/null
+++ b/debian/patches/features/all/rt/rtmutex-Fix-lock-stealing-logic.patch
@@ -0,0 +1,162 @@
+From: Mike Galbraith <efault at gmx.de>
+Date: Fri, 23 Jun 2017 09:37:14 +0200
+Subject: rtmutex: Fix lock stealing logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
+
+1. When trying to acquire an rtmutex, we first try to grab it without
+queueing the waiter, and explicitly check for that initial attempt
+in the !waiter path of __try_to_take_rt_mutex().  Checking whether
+the lock taker is top waiter before allowing a steal attempt in that
+path is a thinko: the lock taker has not yet blocked.
+
+2. It seems wrong to change the definition of rt_mutex_waiter_less()
+to mean less or perhaps equal when we have an rt_mutex_waiter_equal().
+
+Remove the thinko, restore rt_mutex_waiter_less(), implement and use
+rt_mutex_steal() based upon rt_mutex_waiter_less/equal(), moving all
+qualification criteria into the function itself.
+
+Reviewed-by: Steven Rostedt (VMware) <rostedt at goodmis.org>
+Signed-off-by: Mike Galbraith <efault at gmx.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/locking/rtmutex.c |   75 +++++++++++++++++++++++------------------------
+ 1 file changed, 37 insertions(+), 38 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -235,26 +235,19 @@ static inline bool unlock_rt_mutex_safe(
+ }
+ #endif
+ 
+-#define STEAL_NORMAL  0
+-#define STEAL_LATERAL 1
+-
+ /*
+  * Only use with rt_mutex_waiter_{less,equal}()
+  */
+-#define task_to_waiter(p)	\
+-	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
++#define task_to_waiter(p) &(struct rt_mutex_waiter) \
++	{ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
+ 
+ static inline int
+ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+-		     struct rt_mutex_waiter *right, int mode)
++		     struct rt_mutex_waiter *right)
+ {
+-	if (mode == STEAL_NORMAL) {
+-		if (left->prio < right->prio)
+-			return 1;
+-	} else {
+-		if (left->prio <= right->prio)
+-			return 1;
+-	}
++	if (left->prio < right->prio)
++		return 1;
++
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -286,6 +279,27 @@ rt_mutex_waiter_equal(struct rt_mutex_wa
+ 	return 1;
+ }
+ 
++#define STEAL_NORMAL  0
++#define STEAL_LATERAL 1
++
++static inline int
++rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)
++{
++	struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
++
++	if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
++		return 1;
++
++	/*
++	 * Note that RT tasks are excluded from lateral-steals
++	 * to prevent the introduction of an unbounded latency.
++	 */
++	if (mode == STEAL_NORMAL || rt_task(waiter->task))
++		return 0;
++
++	return rt_mutex_waiter_equal(waiter, top_waiter);
++}
++
+ static void
+ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
+ {
+@@ -297,7 +311,7 @@ rt_mutex_enqueue(struct rt_mutex *lock,
+ 	while (*link) {
+ 		parent = *link;
+ 		entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
+-		if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
++		if (rt_mutex_waiter_less(waiter, entry)) {
+ 			link = &parent->rb_left;
+ 		} else {
+ 			link = &parent->rb_right;
+@@ -336,7 +350,7 @@ rt_mutex_enqueue_pi(struct task_struct *
+ 	while (*link) {
+ 		parent = *link;
+ 		entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
+-		if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
++		if (rt_mutex_waiter_less(waiter, entry)) {
+ 			link = &parent->rb_left;
+ 		} else {
+ 			link = &parent->rb_right;
+@@ -846,6 +860,7 @@ static int rt_mutex_adjust_prio_chain(st
+  * @task:   The task which wants to acquire the lock
+  * @waiter: The waiter that is queued to the lock's wait tree if the
+  *	    callsite called task_blocked_on_lock(), otherwise NULL
++ * @mode:   Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
+  */
+ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
+ 				  struct task_struct *task,
+@@ -885,14 +900,11 @@ static int __try_to_take_rt_mutex(struct
+ 	 */
+ 	if (waiter) {
+ 		/*
+-		 * If waiter is not the highest priority waiter of
+-		 * @lock, give up.
++		 * If waiter is not the highest priority waiter of @lock,
++		 * or its peer when lateral steal is allowed, give up.
+ 		 */
+-		if (waiter != rt_mutex_top_waiter(lock)) {
+-			/* XXX rt_mutex_waiter_less() ? */
++		if (!rt_mutex_steal(lock, waiter, mode))
+ 			return 0;
+-		}
+-
+ 		/*
+ 		 * We can acquire the lock. Remove the waiter from the
+ 		 * lock waiters tree.
+@@ -909,25 +921,12 @@ static int __try_to_take_rt_mutex(struct
+ 		 * not need to be dequeued.
+ 		 */
+ 		if (rt_mutex_has_waiters(lock)) {
+-			struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
+-
+-			if (task != pown)
+-				return 0;
+-
+-			/*
+-			 * Note that RT tasks are excluded from lateral-steals
+-			 * to prevent the introduction of an unbounded latency.
+-			 */
+-			if (rt_task(task))
+-				mode = STEAL_NORMAL;
+ 			/*
+-			 * If @task->prio is greater than or equal to
+-			 * the top waiter priority (kernel view),
+-			 * @task lost.
++			 * If @task->prio is greater than the top waiter
++			 * priority (kernel view), or equal to it when a
++			 * lateral steal is forbidden, @task lost.
+ 			 */
+-			if (!rt_mutex_waiter_less(task_to_waiter(task),
+-						  rt_mutex_top_waiter(lock),
+-						  mode))
++			if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
+ 				return 0;
+ 			/*
+ 			 * The current top waiter stays enqueued. We
diff --git a/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch b/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch
index 792990a..9c0de93 100644
--- a/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch
+++ b/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 Apr 2017 12:50:59 +0200
 Subject: [PATCH] rtmutex: Make lock_killable work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Locking an rt mutex killable does not work because signal handling is
 restricted to TASK_INTERRUPTIBLE.
diff --git a/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch b/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch
index 92e84bf..8dcf091 100644
--- a/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch
+++ b/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 Apr 2017 12:51:01 +0200
 Subject: [PATCH] rtmutex: Provide locked slowpath
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The new rt rwsem implementation needs rtmutex::wait_lock to protect struct
 rw_semaphore. Dropping the lock and reaquiring it for locking the rtmutex
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -1752,30 +1752,13 @@ static void ww_mutex_account_lock(struct
+@@ -1751,30 +1751,13 @@ static void ww_mutex_account_lock(struct
  }
  #endif
  
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  
  #ifdef CONFIG_PREEMPT_RT_FULL
  	if (ww_ctx) {
-@@ -1791,7 +1774,6 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1790,7 +1773,6 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	if (try_to_take_rt_mutex(lock, current, NULL)) {
  		if (ww_ctx)
  			ww_mutex_account_lock(lock, ww_ctx);
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		return 0;
  	}
  
-@@ -1801,13 +1783,13 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1800,13 +1782,13 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	if (unlikely(timeout))
  		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
  
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  		/* ww_mutex received EDEADLK, let it become EALREADY */
  		ret = __mutex_lock_check_stamp(lock, ww_ctx);
  		BUG_ON(!ret);
-@@ -1816,10 +1798,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1815,10 +1797,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	if (unlikely(ret)) {
  		__set_current_state(TASK_RUNNING);
  		if (rt_mutex_has_waiters(lock))
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  	} else if (ww_ctx) {
  		ww_mutex_account_lock(lock, ww_ctx);
  	}
-@@ -1829,6 +1811,36 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1828,6 +1810,36 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	 * unconditionally. We might have to fix that up.
  	 */
  	fixup_rt_mutex_waiters(lock);
diff --git a/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch b/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
index adb408c..562ba87 100644
--- a/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
+++ b/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 Apr 2017 12:51:00 +0200
 Subject: [PATCH] rtmutex: Provide rt_mutex_lock_state()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Allow rtmutex to be locked with arbitrary states. Preparatory patch for the
 rt rwsem rework.
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  extern int rt_mutex_timed_lock(struct rt_mutex *lock,
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
-@@ -2020,21 +2020,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -2019,21 +2019,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
  }
  
  /**
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
   * @lock:		the rt_mutex to be locked
   *
   * Returns:
-@@ -2043,20 +2054,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+@@ -2042,20 +2053,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
   */
  int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
  {
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
  /**
   * rt_mutex_lock_killable - lock a rt_mutex killable
   *
-@@ -2066,16 +2067,21 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -2065,16 +2066,21 @@ int __sched rt_mutex_futex_trylock(struc
   * Returns:
   *  0          on success
   * -EINTR      when interrupted by a signal
diff --git a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
index c947dec..5a98f65 100644
--- a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
 Date: Mon, 28 Oct 2013 09:36:37 +0100
 Subject: rtmutex: Add RT aware ww locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 lockdep says:
 | --------------------------------------------------------------------------
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  
  #include "rtmutex_common.h"
  
-@@ -1286,8 +1287,8 @@ int atomic_dec_and_spin_lock(atomic_t *a
+@@ -1285,8 +1286,8 @@ int atomic_dec_and_spin_lock(atomic_t *a
  }
  EXPORT_SYMBOL(atomic_dec_and_spin_lock);
  
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  {
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  	/*
-@@ -1301,6 +1302,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1300,6 +1301,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
  
  #endif /* PREEMPT_RT_FULL */
  
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  static inline int
  try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
  		     struct rt_mutex_waiter *waiter)
-@@ -1581,7 +1616,8 @@ void rt_mutex_init_waiter(struct rt_mute
+@@ -1580,7 +1615,8 @@ void rt_mutex_init_waiter(struct rt_mute
  static int __sched
  __rt_mutex_slowlock(struct rt_mutex *lock, int state,
  		    struct hrtimer_sleeper *timeout,
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  {
  	int ret = 0;
  
-@@ -1599,6 +1635,12 @@ static int __sched
+@@ -1598,6 +1634,12 @@ static int __sched
  			break;
  		}
  
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  		raw_spin_unlock_irq(&lock->wait_lock);
  
  		debug_rt_mutex_print_deadlock(waiter);
-@@ -1633,13 +1675,91 @@ static void rt_mutex_handle_deadlock(int
+@@ -1632,13 +1674,91 @@ static void rt_mutex_handle_deadlock(int
  	}
  }
  
@@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  {
  	struct rt_mutex_waiter waiter;
  	unsigned long flags;
-@@ -1657,8 +1777,20 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1656,8 +1776,20 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  	 */
  	raw_spin_lock_irqsave(&lock->wait_lock, flags);
  
@@ -225,7 +225,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
  		return 0;
  	}
-@@ -1673,13 +1805,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1672,13 +1804,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
  
  	if (likely(!ret))
  		/* sleep on the mutex */
@@ -251,7 +251,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  	}
  
  	/*
-@@ -1809,29 +1951,33 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1808,29 +1950,33 @@ static bool __sched rt_mutex_slowunlock(
   */
  static inline int
  rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -289,7 +289,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  
  static inline int
-@@ -1882,7 +2028,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1881,7 +2027,7 @@ void __sched rt_mutex_lock(struct rt_mut
  {
  	might_sleep();
  
@@ -298,7 +298,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock);
  
-@@ -1899,7 +2045,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1898,7 +2044,7 @@ int __sched rt_mutex_lock_interruptible(
  {
  	might_sleep();
  
@@ -307,7 +307,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
  
-@@ -1926,7 +2072,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1925,7 +2071,7 @@ int __sched rt_mutex_lock_killable(struc
  {
  	might_sleep();
  
@@ -316,7 +316,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
  
-@@ -1950,6 +2096,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1949,6 +2095,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
  
  	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
  				       RT_MUTEX_MIN_CHAINWALK,
@@ -324,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  				       rt_mutex_slowlock);
  }
  EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2248,7 +2395,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2247,7 +2394,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
  	raw_spin_lock_irq(&lock->wait_lock);
  	/* sleep on the mutex */
  	set_current_state(TASK_INTERRUPTIBLE);
@@ -333,7 +333,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
  	/*
  	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
  	 * have to fix that up.
-@@ -2315,24 +2462,98 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2314,24 +2461,98 @@ bool rt_mutex_cleanup_proxy_lock(struct
  	return cleanup;
  }
  
diff --git a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
index 03d5da5..2276ac6 100644
--- a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
+++ b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Avoid include hell
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 20:06:39 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Include only the required raw types. This avoids pulling in the
 complete spinlock header which in turn requires rtmutex.h at some point.
diff --git a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
index 41b5dba..dac014f 100644
--- a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
+++ b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Handle the various new futex race conditions
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 10 Jun 2011 11:04:15 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 RT opens a few new interesting race conditions in the rtmutex/futex
 combo due to futex hash bucket lock being a 'sleeping' spinlock and
diff --git a/debian/patches/features/all/rt/rtmutex-lock-killable.patch b/debian/patches/features/all/rt/rtmutex-lock-killable.patch
index 521d325..2fc7113 100644
--- a/debian/patches/features/all/rt/rtmutex-lock-killable.patch
+++ b/debian/patches/features/all/rt/rtmutex-lock-killable.patch
@@ -1,7 +1,7 @@
 Subject: rtmutex: Add rtmutex_lock_killable()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 09 Jun 2011 11:43:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Add "killable" type to rtmutex. We need this since rtmutex are used as
 "normal" mutexes which do use this type.
diff --git a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch b/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
index f703085..3ea9ce3 100644
--- a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
+++ b/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed 02 Dec 2015 11:34:07 +0100
 Subject: rtmutex: trylock is okay on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On
 -RT we don't run softirqs in IRQ context but in thread context so it is
diff --git a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch b/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
index e967d5c..1008ee8 100644
--- a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
+++ b/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
@@ -1,6 +1,6 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Subject: rbtree: don't include the rcu header
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The RCU header pulls in spinlock.h and fails due not yet defined types:
 
diff --git a/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch b/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
index f721cf9..b0ce08c 100644
--- a/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
+++ b/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 1 Apr 2017 12:51:02 +0200
 Subject: [PATCH] rwsem/rt: Lift single reader restriction
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The RT specific R/W semaphore implementation restricts the number of readers
 to one because a writer cannot block on multiple readers and inherit its
diff --git a/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch b/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch
index f5355a5..54dae11 100644
--- a/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch
+++ b/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 21 Oct 2016 10:54:50 +0200
 Subject: [PATCH] rxrpc: remove unused static variables
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The rxrpc_security_methods and rxrpc_security_sem user has been removed
 in 648af7fca159 ("rxrpc: Absorb the rxkad security module"). This was
diff --git a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch b/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
index d931c72..2afef97 100644
--- a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
+++ b/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
@@ -1,7 +1,7 @@
 From: Paul Gortmaker <paul.gortmaker at windriver.com>
 Date: Sat, 14 Feb 2015 11:01:16 -0500
 Subject: sas-ata/isci: dont't disable interrupts in qc_issue handler
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On 3.14-rt we see the following trace on Canoe Pass for
 SCSI_ISCI "Intel(R) C600 Series Chipset SAS Controller"
diff --git a/debian/patches/features/all/rt/sched-Prevent-task-state-corruption-by-spurious-lock.patch b/debian/patches/features/all/rt/sched-Prevent-task-state-corruption-by-spurious-lock.patch
index 2a21f77..da66eb2 100644
--- a/debian/patches/features/all/rt/sched-Prevent-task-state-corruption-by-spurious-lock.patch
+++ b/debian/patches/features/all/rt/sched-Prevent-task-state-corruption-by-spurious-lock.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 6 Jun 2017 14:20:37 +0200
 Subject: sched: Prevent task state corruption by spurious lock wakeup
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Mathias and others reported GDB failures on RT.
 
diff --git a/debian/patches/features/all/rt/sched-Remove-TASK_ALL.patch b/debian/patches/features/all/rt/sched-Remove-TASK_ALL.patch
index bae2e4f..947518e 100644
--- a/debian/patches/features/all/rt/sched-Remove-TASK_ALL.patch
+++ b/debian/patches/features/all/rt/sched-Remove-TASK_ALL.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Wed, 7 Jun 2017 10:12:45 +0200
 Subject: [PATCH] sched: Remove TASK_ALL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 It's unused:
 
diff --git a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
index 60f6122..fd116f8 100644
--- a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ b/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
@@ -1,7 +1,7 @@
 From: Juri Lelli <juri.lelli at gmail.com>
 Date: Tue, 13 May 2014 15:30:20 +0200
 Subject: sched/deadline: dl_task_timer has to be irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 As for rt_period_timer, dl_task_timer has to be irqsafe.
 
diff --git a/debian/patches/features/all/rt/sched-delay-put-task.patch b/debian/patches/features/all/rt/sched-delay-put-task.patch
index c9ce15b..9ae83f7 100644
--- a/debian/patches/features/all/rt/sched-delay-put-task.patch
+++ b/debian/patches/features/all/rt/sched-delay-put-task.patch
@@ -1,7 +1,7 @@
 Subject: sched: Move task_struct cleanup to RCU
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 31 May 2011 16:59:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 __put_task_struct() does quite some expensive work. We don't want to
 burden random tasks with that.
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1052,6 +1052,9 @@ struct task_struct {
+@@ -1047,6 +1047,9 @@ struct task_struct {
  	unsigned int			sequential_io;
  	unsigned int			sequential_io_avg;
  #endif
diff --git a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
index 52d5c61..6fb2055 100644
--- a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
+++ b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
@@ -1,7 +1,7 @@
 Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 17:03:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Carsten reported problems when running:
 
diff --git a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
index ccee435..e20fa58 100644
--- a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
+++ b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
@@ -1,7 +1,7 @@
 Subject: sched: Disable TTWU_QUEUE on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 13 Sep 2011 16:42:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The queued remote wakeup mechanism can introduce rather large
 latencies if the number of migrated tasks is high. Disable it for RT.
diff --git a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
index d8dfc59..b9f37ab 100644
--- a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
+++ b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
@@ -1,7 +1,7 @@
 Subject: sched: Limit the number of task migrations per batch
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 06 Jun 2011 12:12:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Put an upper limit on the number of tasks which are migrated per batch
 to avoid large latencies.
diff --git a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
index ef0bfde..e7a76a3 100644
--- a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -1,7 +1,7 @@
 Subject: sched: Do not account rcu_preempt_depth on RT in might_sleep()
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 07 Jun 2011 09:19:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 RT changes the rcu_preempt_depth semantics, so we cannot check for it
 in might_sleep().
diff --git a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
index b88f909..12933f1 100644
--- a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
+++ b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
@@ -1,7 +1,7 @@
 Subject: sched: Move mmdrop to RCU on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 06 Jun 2011 12:20:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Takes sleeping locks and calls into the memory allocator, so nothing
 we want to do in task switch and oder atomic contexts.
diff --git a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
index 2b91fa6..3815727 100644
--- a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
+++ b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
@@ -1,7 +1,7 @@
 Subject: sched: Add saved_state for tasks blocked on sleeping locks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 25 Jun 2011 09:21:04 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Spinlocks are state preserving in !RT. RT changes the state when a
 task gets blocked on a lock. So we need to remember the state before
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	void				*stack;
  	atomic_t			usage;
  	/* Per task flags (PF_*), defined further below: */
-@@ -1415,6 +1417,7 @@ extern struct task_struct *find_task_by_
+@@ -1410,6 +1412,7 @@ extern struct task_struct *find_task_by_
  
  extern int wake_up_state(struct task_struct *tsk, unsigned int state);
  extern int wake_up_process(struct task_struct *tsk);
diff --git a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
index 84886c1..9a88972 100644
--- a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
@@ -1,7 +1,7 @@
 Subject: sched: ttwu: Return success when only changing the saved_state value
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 13 Dec 2011 21:42:19 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When a task blocks on a rt lock, it saves the current state in
 p->saved_state, so a lock related wake up will not destroy the
diff --git a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index 0f16e03..972fefb 100644
--- a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Mon, 18 Mar 2013 15:12:49 -0400
 Subject: sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 In -rt, most spin_locks() turn into mutexes. One of these spin_lock
 conversions is performed on the workqueue gcwq->lock. When the idle
diff --git a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
index 2adcf21..261d7c9 100644
--- a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
+++ b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
@@ -1,7 +1,7 @@
 Subject: scsi/fcoe: Make RT aware.
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 12 Nov 2011 14:00:48 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Do not disable preemption while taking sleeping locks. All user look safe
 for migrate_diable() only.
diff --git a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
index 7280dd1..4b6505d 100644
--- a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
+++ b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
@@ -1,7 +1,7 @@
 Subject: scsi: qla2xxx: Use local_irq_save_nort() in qla2x00_poll
 From: John Kacur <jkacur at redhat.com>
 Date: Fri, 27 Apr 2012 12:48:46 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 RT triggers the following:
 
diff --git a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
index 11398d3..6abf56c 100644
--- a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
+++ b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
@@ -1,7 +1,7 @@
 Subject: seqlock: Prevent rt starvation
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 22 Feb 2012 12:03:30 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 If a low prio writer gets preempted while holding the seqlock write
 locked, a high prio reader spins forever on RT.
diff --git a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
index 88730ae..3e3ce2c 100644
--- a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
+++ b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
@@ -1,7 +1,7 @@
 Subject: signal: Make __lock_task_sighand() RT aware
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 22 Jul 2011 08:07:08 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 local_irq_save() + spin_lock(&sighand->siglock) does not work on
 -RT. Use the nort variants.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/signal.c
 +++ b/kernel/signal.c
-@@ -1287,12 +1287,12 @@ struct sighand_struct *__lock_task_sigha
+@@ -1295,12 +1295,12 @@ struct sighand_struct *__lock_task_sigha
  		 * Disable interrupts early to avoid deadlocks.
  		 * See rcu_read_unlock() comment header for details.
  		 */
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  			break;
  		}
  		/*
-@@ -1313,7 +1313,7 @@ struct sighand_struct *__lock_task_sigha
+@@ -1321,7 +1321,7 @@ struct sighand_struct *__lock_task_sigha
  		}
  		spin_unlock(&sighand->siglock);
  		rcu_read_unlock();
diff --git a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
index 8507534..dabc17f 100644
--- a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
+++ b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
@@ -1,7 +1,7 @@
 Subject: signal: Revert ptrace preempt magic
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 21 Sep 2011 19:57:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
 than a bandaid around the ptrace design trainwreck. It's not a
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/kernel/signal.c
 +++ b/kernel/signal.c
-@@ -1857,15 +1857,7 @@ static void ptrace_stop(int exit_code, i
+@@ -1865,15 +1865,7 @@ static void ptrace_stop(int exit_code, i
  		if (gstop_done && ptrace_reparented(current))
  			do_notify_parent_cldstop(current, false, why);
  
diff --git a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index d59de91..40a7bbe 100644
--- a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 3 Jul 2009 08:44:56 -0500
 Subject: signals: Allow rt tasks to cache one sigqueue struct
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 To avoid allocation allow rt tasks to cache one sigqueue struct in
 task struct.
@@ -170,17 +170,17 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
   * Flush all pending signals for this kthread.
   */
  void flush_signals(struct task_struct *t)
-@@ -532,7 +590,7 @@ static void collect_signal(int sig, stru
- still_pending:
- 		list_del_init(&first->list);
- 		copy_siginfo(info, &first->info);
+@@ -539,7 +597,7 @@ static void collect_signal(int sig, stru
+ 			(info->si_code == SI_TIMER) &&
+ 			(info->si_sys_private);
+ 
 -		__sigqueue_free(first);
 +		sigqueue_free_current(first);
  	} else {
  		/*
  		 * Ok, it wasn't in the queue.  This must be
-@@ -567,6 +625,8 @@ int dequeue_signal(struct task_struct *t
- {
+@@ -575,6 +633,8 @@ int dequeue_signal(struct task_struct *t
+ 	bool resched_timer = false;
  	int signr;
  
 +	WARN_ON_ONCE(tsk != current);
@@ -188,7 +188,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	/* We only dequeue private signals from ourselves, we don't let
  	 * signalfd steal them
  	 */
-@@ -1496,7 +1556,8 @@ EXPORT_SYMBOL(kill_pid);
+@@ -1504,7 +1564,8 @@ EXPORT_SYMBOL(kill_pid);
   */
  struct sigqueue *sigqueue_alloc(void)
  {
diff --git a/debian/patches/features/all/rt/skbufhead-raw-lock.patch b/debian/patches/features/all/rt/skbufhead-raw-lock.patch
index 7923cd8..ae67390 100644
--- a/debian/patches/features/all/rt/skbufhead-raw-lock.patch
+++ b/debian/patches/features/all/rt/skbufhead-raw-lock.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 12 Jul 2011 15:38:34 +0200
 Subject: net: Use skbufhead with raw lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use the rps lock as rawlock so we can keep irq-off regions. It looks low
 latency. However we can't kfree() from this context therefore we defer this
diff --git a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch b/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
index 21ccce6..4fee922 100644
--- a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 15 Apr 2015 19:00:47 +0200
 Subject: slub: Disable SLUB_CPU_PARTIAL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
 |in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7
diff --git a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
index efdb64b..b48ce8d 100644
--- a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
+++ b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
@@ -1,7 +1,7 @@
 Subject: slub: Enable irqs for __GFP_WAIT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 09 Jan 2013 12:08:15 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 SYSTEM_RUNNING might be too late for enabling interrupts. Allocations
 with GFP_WAIT can happen before that. So use this as an indicator.
diff --git a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch b/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
index 6118a8e..f02b12a 100644
--- a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
+++ b/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Wed, 18 Feb 2015 15:09:23 +0100
 Subject: snd/pcm: fix snd_pcm_stream_lock*() irqs_disabled() splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Locking functions previously using read_lock_irq()/read_lock_irqsave() were
 changed to local_irq_disable/save(), leading to gripes.  Use nort variants.
diff --git a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
index c90b53c..8546fe9 100644
--- a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
+++ b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: softirq: Disable softirq stacks for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 18 Jul 2011 13:59:17 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Disable extra stacks for softirqs. We want to preempt softirqs and
 having them on special IRQ-stack does not make this easier.
diff --git a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
index 19cf8fd..dbea99e 100644
--- a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
+++ b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
@@ -1,7 +1,7 @@
 Subject: softirq: Check preemption after reenabling interrupts
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 raise_softirq_irqoff() disables interrupts and wakes the softirq
 daemon, but after reenabling interrupts there is no preemption check,
diff --git a/debian/patches/features/all/rt/softirq-split-locks.patch b/debian/patches/features/all/rt/softirq-split-locks.patch
index 56df96c..375ce3d 100644
--- a/debian/patches/features/all/rt/softirq-split-locks.patch
+++ b/debian/patches/features/all/rt/softirq-split-locks.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 04 Oct 2012 14:20:47 +0100
 Subject: softirq: Split softirq locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The 3.x RT series removed the split softirq implementation in favour
 of pushing softirq processing into the context of the thread which
@@ -173,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  				   (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1055,6 +1055,8 @@ struct task_struct {
+@@ -1050,6 +1050,8 @@ struct task_struct {
  #endif
  #ifdef CONFIG_PREEMPT_RT_BASE
  	struct rcu_head			put_rcu;
@@ -182,7 +182,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  #endif
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  	unsigned long			task_state_change;
-@@ -1227,6 +1229,7 @@ extern struct pid *cad_pid;
+@@ -1222,6 +1224,7 @@ extern struct pid *cad_pid;
  /*
   * Per process flags
   */
diff --git a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
index fceefae..784823d 100644
--- a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
+++ b/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 20 Jan 2016 16:34:17 +0100
 Subject: softirq: split timer softirqs out of ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with
 timer wakeup which can not happen in hardirq context. The prio has been
diff --git a/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch b/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
index 87ebf81..2ef60ff 100644
--- a/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
+++ b/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <efault at gmx.de>
 Date: Fri, 20 Jan 2017 18:10:20 +0100
 Subject: [PATCH] softirq: wake the timer softirq if needed
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The irq-exit path only checks the "normal"-softirq thread if it is
 running and ignores the state of the "timer"-softirq thread. It is possible
diff --git a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
index 5eead40..8df74c1 100644
--- a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
+++ b/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -1,7 +1,7 @@
 From: Allen Pais <allen.pais at oracle.com>
 Date: Fri, 13 Dec 2013 09:44:41 +0530
 Subject: sparc64: use generic rwsem spinlocks rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Signed-off-by: Allen Pais <allen.pais at oracle.com>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
diff --git a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
index e8c4bd9..3dd000a 100644
--- a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
+++ b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
@@ -1,7 +1,7 @@
 Subject: spinlock: Split the lock types header
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 19:34:01 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Split raw_spinlock into its own file and the remaining spinlock_t into
 its own non-RT header. The non-RT header will be replaced later by sleeping
diff --git a/debian/patches/features/all/rt/stop-machine-raw-lock.patch b/debian/patches/features/all/rt/stop-machine-raw-lock.patch
index f9327fc..0ea3b8b 100644
--- a/debian/patches/features/all/rt/stop-machine-raw-lock.patch
+++ b/debian/patches/features/all/rt/stop-machine-raw-lock.patch
@@ -1,7 +1,7 @@
 Subject: stop_machine: Use raw spinlocks
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 29 Jun 2011 11:01:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use raw-locks in stomp_machine() to allow locking in irq-off regions.
 
diff --git a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
index 196e529..f49b6af 100644
--- a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+++ b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:30:27 -0500
 Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Instead of playing with non-preemption, introduce explicit
 startup serialization. This is more robust and cleaner as
diff --git a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
index 5734feb..bad9072 100644
--- a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+++ b/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Wed, 18 Feb 2015 16:05:28 +0100
 Subject: sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
 |in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd
diff --git a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
index 09972d8..0139126 100644
--- a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
+++ b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 15 Jul 2010 10:29:00 +0200
 Subject: suspend: Prevent might sleep splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 timekeeping suspend/resume calls read_persistant_clock() which takes
 rtc_lock. That results in might sleep warnings because at that point
diff --git a/debian/patches/features/all/rt/sysfs-realtime-entry.patch b/debian/patches/features/all/rt/sysfs-realtime-entry.patch
index 26b6bc8..b8c3b22 100644
--- a/debian/patches/features/all/rt/sysfs-realtime-entry.patch
+++ b/debian/patches/features/all/rt/sysfs-realtime-entry.patch
@@ -1,7 +1,7 @@
 Subject: sysfs: Add /sys/kernel/realtime entry
 From: Clark Williams <williams at redhat.com>
 Date: Sat Jul 30 21:55:53 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Add a /sys/kernel entry to indicate that the kernel is a
 realtime kernel.
diff --git a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index 52d7bff..afc0e7e 100644
--- a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -1,7 +1,7 @@
 Subject: tasklet: Prevent tasklets from going into infinite spin in RT
 From: Ingo Molnar <mingo at elte.hu>
 Date: Tue Nov 29 20:18:22 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
 and spinlocks turn are mutexes. But this can cause issues with
diff --git a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
index 9959f81..020db18 100644
--- a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
+++ b/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
@@ -1,7 +1,7 @@
 From: Daniel Wagner <wagi at monom.org>
 Date: Tue, 17 Feb 2015 09:37:44 +0100
 Subject: thermal: Defer thermal wakups to threads
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will
 call schedule while we run in irq context.
diff --git a/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
index 28e6c58..edde985 100644
--- a/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
+++ b/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
@@ -1,7 +1,7 @@
 Subject: tick/broadcast: Make broadcast hrtimer irqsafe
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sat, 27 Feb 2016 10:47:10 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Otherwise we end up with the following:
 
diff --git a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
index 152d18b..fd0893d 100644
--- a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
+++ b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
@@ -1,7 +1,7 @@
 Subject: timekeeping: Split jiffies seqlock
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 14 Feb 2013 22:36:59 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so
 it can be taken in atomic context on RT.
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
  	if (rcu_needs_cpu(basemono, &next_rcu) ||
 --- a/kernel/time/timekeeping.c
 +++ b/kernel/time/timekeeping.c
-@@ -2302,8 +2302,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2323,8 +2323,10 @@ EXPORT_SYMBOL(hardpps);
   */
  void xtime_update(unsigned long ticks)
  {
diff --git a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
index c2a63f8..60d26cc 100644
--- a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+++ b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -1,7 +1,7 @@
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Fri, 21 Aug 2009 11:56:45 +0200
 Subject: timer: delay waking softirqs from the jiffy tick
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 People were complaining about broken balancing with the recent -rt
 series.
diff --git a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
index d482e0e..4e43950 100644
--- a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
+++ b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
@@ -1,7 +1,7 @@
 Subject: timer-fd: Prevent live lock
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 25 Jan 2012 11:08:40 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 If hrtimer_try_to_cancel() requires a retry, then depending on the
 priority setting te retry loop might prevent timer callback completion
diff --git a/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch b/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch
index 55cf686..3797437 100644
--- a/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch
+++ b/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 1 Mar 2017 16:30:49 +0100
 Subject: [PATCH] timer/hrtimer: check properly for a running timer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 hrtimer_callback_running() checks only whether a timmer is running on a
 CPU in hardirq-context. This is okay for !RT. For RT environment we move
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
-@@ -444,7 +444,13 @@ static inline int hrtimer_is_queued(stru
+@@ -440,7 +440,13 @@ static inline int hrtimer_is_queued(stru
   */
  static inline int hrtimer_callback_running(const struct hrtimer *timer)
  {
diff --git a/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch b/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
index 18090d4..eb92282 100644
--- a/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
+++ b/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Wed, 13 Jul 2016 18:22:23 +0200
 Subject: [PATCH] timer: make the base lock raw
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The part where the base lock is held got more predictable / shorter after the
 timer rework. One reason is the lack of re-cascading.
diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
index 280901a..e132b22 100644
--- a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
+++ b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:34 -0500
 Subject: timers: Prepare for full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When softirqs can be preempted we need to make sure that cancelling
 the timer from the active thread can not deadlock vs. a running timer
diff --git a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch b/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
deleted file mode 100644
index 5fbc0ef..0000000
--- a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From: Carsten Emde <C.Emde at osadl.org>
-Date: Tue, 5 Jan 2016 10:21:59 +0100
-Subject: trace/latency-hist: Consider new argument when probing the
- sched_switch tracer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
-
-The sched_switch tracer has got a new argument. Fix the latency tracer
-accordingly.
-
-Recently: c73464b1c843 ("sched/core: Fix trace_sched_switch()") since
-v4.4-rc1.
-
-Signed-off-by: Carsten Emde <C.Emde at osadl.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
----
- kernel/trace/latency_hist.c |    4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/kernel/trace/latency_hist.c
-+++ b/kernel/trace/latency_hist.c
-@@ -117,7 +117,7 @@ static char *wakeup_latency_hist_dir_sha
- static notrace void probe_wakeup_latency_hist_start(void *v,
- 	struct task_struct *p);
- static notrace void probe_wakeup_latency_hist_stop(void *v,
--	struct task_struct *prev, struct task_struct *next);
-+	bool preempt, struct task_struct *prev, struct task_struct *next);
- static notrace void probe_sched_migrate_task(void *,
- 	struct task_struct *task, int cpu);
- static struct enable_data wakeup_latency_enabled_data = {
-@@ -907,7 +907,7 @@ static notrace void probe_wakeup_latency
- }
- 
- static notrace void probe_wakeup_latency_hist_stop(void *v,
--	struct task_struct *prev, struct task_struct *next)
-+	bool preempt, struct task_struct *prev, struct task_struct *next)
- {
- 	unsigned long flags;
- 	int cpu = task_cpu(next);
diff --git a/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch b/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
deleted file mode 100644
index 1e17736..0000000
--- a/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
+++ /dev/null
@@ -1,91 +0,0 @@
-Subject: trace: Use rcuidle version for preemptoff_hist trace point
-From: Yang Shi <yang.shi at windriver.com>
-Date: Tue, 23 Feb 2016 13:23:23 -0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
-
-When running -rt kernel with both PREEMPT_OFF_HIST and LOCKDEP enabled,
-the below error is reported:
-
- [ INFO: suspicious RCU usage. ]
- 4.4.1-rt6 #1 Not tainted
- include/trace/events/hist.h:31 suspicious rcu_dereference_check() usage!
-
- other info that might help us debug this:
-
- RCU used illegally from idle CPU!
- rcu_scheduler_active = 1, debug_locks = 0
- RCU used illegally from extended quiescent state!
- no locks held by swapper/0/0.
-
- stack backtrace:
- CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.4.1-rt6-WR8.0.0.0_standard #1
- Stack : 0000000000000006 0000000000000000 ffffffff81ca8c38 ffffffff81c8fc80
-    ffffffff811bdd68 ffffffff81cb0000 0000000000000000 ffffffff81cb0000
-    0000000000000000 0000000000000000 0000000000000004 0000000000000000
-    0000000000000004 ffffffff811bdf50 0000000000000000 ffffffff82b60000
-    0000000000000000 ffffffff812897ac ffffffff819f0000 000000000000000b
-    ffffffff811be460 ffffffff81b7c588 ffffffff81c8fc80 0000000000000000
-    0000000000000000 ffffffff81ec7f88 ffffffff81d70000 ffffffff81b70000
-    ffffffff81c90000 ffffffff81c3fb00 ffffffff81c3fc28 ffffffff815e6f98
-    0000000000000000 ffffffff81c8fa87 ffffffff81b70958 ffffffff811bf2c4
-    0707fe32e8d60ca5 ffffffff81126d60 0000000000000000 0000000000000000
-    ...
- Call Trace:
- [<ffffffff81126d60>] show_stack+0xe8/0x108
- [<ffffffff815e6f98>] dump_stack+0x88/0xb0
- [<ffffffff8124b88c>] time_hardirqs_off+0x204/0x300
- [<ffffffff811aa5dc>] trace_hardirqs_off_caller+0x24/0xe8
- [<ffffffff811a4ec4>] cpu_startup_entry+0x39c/0x508
- [<ffffffff81d7dc68>] start_kernel+0x584/0x5a0
-
-Replace regular trace_preemptoff_hist to rcuidle version to avoid the error.
-
-Signed-off-by: Yang Shi <yang.shi at windriver.com>
-Cc: bigeasy at linutronix.de
-Cc: rostedt at goodmis.org
-Cc: linux-rt-users at vger.kernel.org
-Link: http://lkml.kernel.org/r/1456262603-10075-1-git-send-email-yang.shi@windriver.com
-Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
----
-I recall the rcuidle version is used by 4.1-rt, but not sure why it is dropped
-in 4.4-rt. It looks such fix is still needed. 
-
- kernel/trace/trace_irqsoff.c |    8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
---- a/kernel/trace/trace_irqsoff.c
-+++ b/kernel/trace/trace_irqsoff.c
-@@ -437,13 +437,13 @@ void start_critical_timings(void)
- {
- 	if (preempt_trace() || irq_trace())
- 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
--	trace_preemptirqsoff_hist(TRACE_START, 1);
-+	trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1);
- }
- EXPORT_SYMBOL_GPL(start_critical_timings);
- 
- void stop_critical_timings(void)
- {
--	trace_preemptirqsoff_hist(TRACE_STOP, 0);
-+	trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0);
- 	if (preempt_trace() || irq_trace())
- 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
-@@ -453,7 +453,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
- #ifdef CONFIG_PROVE_LOCKING
- void time_hardirqs_on(unsigned long a0, unsigned long a1)
- {
--	trace_preemptirqsoff_hist(IRQS_ON, 0);
-+	trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
- 	if (!preempt_trace() && irq_trace())
- 		stop_critical_timing(a0, a1);
- }
-@@ -462,7 +462,7 @@ void time_hardirqs_off(unsigned long a0,
- {
- 	if (!preempt_trace() && irq_trace())
- 		start_critical_timing(a0, a1);
--	trace_preemptirqsoff_hist(IRQS_OFF, 1);
-+	trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
- }
- 
- #else /* !CONFIG_PROVE_LOCKING */
diff --git a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
index bd30cef..e1306c0 100644
--- a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -1,7 +1,7 @@
 From: Steven Rostedt <rostedt at goodmis.org>
 Date: Thu, 29 Sep 2011 12:24:30 -0500
 Subject: tracing: Account for preempt off in preempt_schedule()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 The preempt_schedule() uses the preempt_disable_notrace() version
 because it can cause infinite recursion by the function tracer as
diff --git a/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 4039bad..eaba0c9 100644
--- a/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 11 Apr 2016 16:55:02 +0200
 Subject: [PATCH] tty: serial: 8250: don't take the trylock during oops
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 An oops with irqs off (panic() from irqsafe hrtimer like the watchdog
 timer) will lead to a lockdep warning on each invocation and as such
diff --git a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 2d416c7..4c6afec 100644
--- a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -1,7 +1,7 @@
 Subject: net: Remove preemption disabling in netif_rx()
 From: Priyanka Jain <Priyanka.Jain at freescale.com>
 Date: Thu, 17 May 2012 09:35:11 +0530
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 1)enqueue_to_backlog() (called from netif_rx) should be
   bind to a particluar CPU. This can be achieved by
diff --git a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
index d32de09..4ec3b65 100644
--- a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
+++ b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Fri, 8 Nov 2013 17:34:54 +0100
 Subject: usb: Use _nort in giveback function
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet
 context") I see
diff --git a/debian/patches/features/all/rt/user-use-local-irq-nort.patch b/debian/patches/features/all/rt/user-use-local-irq-nort.patch
index 33afe1e..4c5b0a4 100644
--- a/debian/patches/features/all/rt/user-use-local-irq-nort.patch
+++ b/debian/patches/features/all/rt/user-use-local-irq-nort.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Tue, 21 Jul 2009 23:06:05 +0200
 Subject: core: Do not disable interrupts on RT in kernel/users.c
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use the local_irq_*_nort variants to reduce latencies in RT. The code
 is serialized by the locks. No need to disable interrupts.
diff --git a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
index 75d7e19..d3d6d5f 100644
--- a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
+++ b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 28 Oct 2013 12:19:57 +0100
 Subject: wait.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 |  CC      init/main.o
 |In file included from include/linux/mmzone.h:9:0,
diff --git a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
index 4ddead6..9edc8d7 100644
--- a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
+++ b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 01 Jul 2013 11:02:42 +0200
 Subject: workqueue: Prevent workqueue versus ata-piix livelock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 An Intel i7 system regularly detected rcu_preempt stalls after the kernel
 was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no
diff --git a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch b/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
index 3e757aa..10ee51a 100644
--- a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
+++ b/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
@@ -1,7 +1,7 @@
 From: Daniel Wagner <daniel.wagner at bmw-carit.de>
 Date: Fri, 11 Jul 2014 15:26:11 +0200
 Subject: work-simple: Simple work queue implemenation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Provides a framework for enqueuing callbacks from irq context
 PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
diff --git a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
index 20ffd76..0911bda 100644
--- a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
+++ b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
@@ -22,7 +22,7 @@ Cc: Jens Axboe <axboe at kernel.dk>
 Cc: Linus Torvalds <torvalds at linux-foundation.org>
 Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
 Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 ---
  kernel/sched/core.c         |   86 +++++++-------------------------------------
@@ -32,7 +32,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1690,10 +1690,6 @@ static inline void ttwu_activate(struct
+@@ -1701,10 +1701,6 @@ static inline void ttwu_activate(struct
  {
  	activate_task(rq, p, en_flags);
  	p->on_rq = TASK_ON_RQ_QUEUED;
@@ -43,7 +43,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4
  }
  
  /*
-@@ -2146,58 +2142,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2157,58 +2153,6 @@ try_to_wake_up(struct task_struct *p, un
  }
  
  /**
@@ -102,7 +102,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4
   * wake_up_process - Wake up a specific process
   * @p: The process to be woken up.
   *
-@@ -3485,21 +3429,6 @@ static void __sched notrace __schedule(b
+@@ -3496,21 +3440,6 @@ static void __sched notrace __schedule(b
  				atomic_inc(&rq->nr_iowait);
  				delayacct_blkio_start();
  			}
@@ -124,7 +124,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4
  		}
  		switch_count = &prev->nvcsw;
  	}
-@@ -3564,6 +3493,14 @@ static inline void sched_submit_work(str
+@@ -3575,6 +3504,14 @@ static inline void sched_submit_work(str
  {
  	if (!tsk->state || tsk_is_pi_blocked(tsk))
  		return;
@@ -139,7 +139,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4
  	/*
  	 * If we are going to sleep and we have plugged IO queued,
  	 * make sure to submit it to avoid deadlocks.
-@@ -3572,6 +3509,12 @@ static inline void sched_submit_work(str
+@@ -3583,6 +3520,12 @@ static inline void sched_submit_work(str
  		blk_schedule_flush_plug(tsk);
  }
  
@@ -152,7 +152,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4
  asmlinkage __visible void __sched schedule(void)
  {
  	struct task_struct *tsk = current;
-@@ -3582,6 +3525,7 @@ asmlinkage __visible void __sched schedu
+@@ -3593,6 +3536,7 @@ asmlinkage __visible void __sched schedu
  		__schedule(false);
  		sched_preempt_enable_no_resched();
  	} while (need_resched());
diff --git a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch b/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
index 44df623..84d1846 100644
--- a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
+++ b/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
@@ -1,7 +1,7 @@
 Subject: workqueue: Prevent deadlock/stall on RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Fri, 27 Jun 2014 16:24:52 +0200 (CEST)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Austin reported a XFS deadlock/stall on RT where scheduled work gets
 never exececuted and tasks are waiting for each other for ever.
@@ -44,7 +44,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3615,9 +3615,8 @@ void __noreturn do_task_dead(void)
+@@ -3626,9 +3626,8 @@ void __noreturn do_task_dead(void)
  
  static inline void sched_submit_work(struct task_struct *tsk)
  {
@@ -55,7 +55,7 @@ Cc: Steven Rostedt <rostedt at goodmis.org>
  	/*
  	 * If a worker went to sleep, notify and ask workqueue whether
  	 * it wants to wake up a task to maintain concurrency.
-@@ -3625,6 +3624,10 @@ static inline void sched_submit_work(str
+@@ -3636,6 +3635,10 @@ static inline void sched_submit_work(str
  	if (tsk->flags & PF_WQ_WORKER)
  		wq_worker_sleeping(tsk);
  
diff --git a/debian/patches/features/all/rt/workqueue-use-locallock.patch b/debian/patches/features/all/rt/workqueue-use-locallock.patch
index 8efb3e0..23e6b51 100644
--- a/debian/patches/features/all/rt/workqueue-use-locallock.patch
+++ b/debian/patches/features/all/rt/workqueue-use-locallock.patch
@@ -1,7 +1,7 @@
 Subject: workqueue: Use local irq lock instead of irq disable regions
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 17 Jul 2011 21:42:26 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Use a local_irq_lock as a replacement for irq off regions. We keep the
 semantic of irq-off in regard to the pool->lock and remain preemptible.
diff --git a/debian/patches/features/all/rt/workqueue-use-rcu.patch b/debian/patches/features/all/rt/workqueue-use-rcu.patch
index fc340b7..20f893b 100644
--- a/debian/patches/features/all/rt/workqueue-use-rcu.patch
+++ b/debian/patches/features/all/rt/workqueue-use-rcu.patch
@@ -1,7 +1,7 @@
 Subject: workqueue: Use normal rcu
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Wed, 24 Jul 2013 15:26:54 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 There is no need for sched_rcu. The undocumented reason why sched_rcu
 is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by
diff --git a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch b/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
index d834094..f513eb0 100644
--- a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
+++ b/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
@@ -1,7 +1,7 @@
 From: Mike Galbraith <umgwanakikbuti at gmail.com>
 Date: Sun, 2 Nov 2014 08:31:37 +0100
 Subject: x86: UV: raw_spinlock conversion
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Shrug.  Lots of hobbyists have a beast in their basement, right?
 
diff --git a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
index 9d23b42..8044492 100644
--- a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
+++ b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -1,7 +1,7 @@
 Subject: x86: crypto: Reduce preempt disabled regions
 From: Peter Zijlstra <peterz at infradead.org>
 Date: Mon, 14 Nov 2011 18:19:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Restrict the preempt disabled regions to the actual floating point
 operations and enable preemption for the administrative actions.
diff --git a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch b/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
index 9c59d28..26a0059 100644
--- a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
+++ b/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
@@ -1,7 +1,7 @@
 From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
 Date: Mon, 11 Mar 2013 17:09:55 +0100
 Subject: x86/highmem: Add a "already used pte" check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 This is a copy from kmap_atomic_prot().
 
diff --git a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
index dd5c3b7..291f036 100644
--- a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
+++ b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
@@ -1,7 +1,7 @@
 From: Ingo Molnar <mingo at elte.hu>
 Date: Fri, 3 Jul 2009 08:29:27 -0500
 Subject: x86/ioapic: Do not unmask io_apic when interrupt is in progress
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 With threaded interrupts we might see an interrupt in progress on
 migration. Do not unmask it when this is the case.
diff --git a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
index 5509b53..cae263b 100644
--- a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
+++ b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
@@ -1,7 +1,7 @@
 Subject: x86: kvm Require const tsc for RT
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 06 Nov 2011 12:26:18 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Non constant TSC is a nightmare on bare metal already, but with
 virtualization it becomes a complete disaster because the workarounds
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
 
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -6105,6 +6105,13 @@ int kvm_arch_init(void *opaque)
+@@ -6107,6 +6107,13 @@ int kvm_arch_init(void *opaque)
  		goto out;
  	}
  
diff --git a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
index 566c969..19c8608 100644
--- a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
+++ b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Mon, 13 Dec 2010 16:33:39 +0100
 Subject: x86: Convert mce timer to hrtimer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 mce_timer is started in atomic contexts of cpu bringup. This results
 in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
diff --git a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch b/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
index 88f7072..d6bc8cf 100644
--- a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -1,7 +1,7 @@
 Subject: x86/mce: use swait queue for mce wakeups
 From: Steven Rostedt <rostedt at goodmis.org>
 Date:	Fri, 27 Feb 2015 15:20:37 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 We had a customer report a lockup on a 3.0-rt kernel that had the
 following backtrace:
diff --git a/debian/patches/features/all/rt/x86-preempt-lazy.patch b/debian/patches/features/all/rt/x86-preempt-lazy.patch
index 0d76b6b..5af5852 100644
--- a/debian/patches/features/all/rt/x86-preempt-lazy.patch
+++ b/debian/patches/features/all/rt/x86-preempt-lazy.patch
@@ -1,7 +1,7 @@
 Subject: x86: Support for lazy preemption
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 01 Nov 2012 11:03:47 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Implement the x86 pieces for lazy preempt.
 
diff --git a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch b/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
index 8768e14..a77ac41 100644
--- a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
+++ b/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
@@ -1,7 +1,7 @@
 From: Yang Shi <yang.shi at linaro.org>
 Date: Thu, 10 Dec 2015 10:58:51 -0800
 Subject: x86/signal: delay calling signals on 32bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 When running some ptrace single step tests on x86-32 machine, the below problem
 is triggered:
diff --git a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
index 3580bfb..c864e2d 100644
--- a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
+++ b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Thu, 16 Dec 2010 14:25:18 +0100
 Subject: x86: stackprotector: Avoid random pool on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 CPU bringup calls into the random pool to initialize the stack
 canary. During boot that works nicely even on RT as the might sleep
diff --git a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
index 039513c..995f935 100644
--- a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -1,7 +1,7 @@
 From: Thomas Gleixner <tglx at linutronix.de>
 Date: Sun, 26 Jul 2009 02:21:32 +0200
 Subject: x86: Use generic rwsem_spinlocks on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.8-rt5.tar.xz
 
 Simplifies the separation of anon_rw_semaphores and rw_semaphores for
 -rt.
diff --git a/debian/patches/series-rt b/debian/patches/series-rt
index 78cf92e..0921095 100644
--- a/debian/patches/series-rt
+++ b/debian/patches/series-rt
@@ -46,6 +46,8 @@ features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
 features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
 features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
 
+features/all/rt/arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch
+
 ###
 # get_online_cpus() rework.
 # cpus_allowed queue from sched/core
@@ -135,6 +137,7 @@ features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
 features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
 features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
 features/all/rt/rxrpc-remove-unused-static-variables.patch
+features/all/rt/mm-swap-don-t-disable-preemption-while-taking-the-pe.patch
 
 # Wants a different fix for upstream
 features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -180,6 +183,40 @@ features/all/rt/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
 features/all/rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
 features/all/rt/add_migrate_disable.patch
 
+# tracing: Inter-event (e.g. latency) support | 2017-06-27
+features/all/rt/0001-tracing-Add-hist_field_name-accessor.patch
+features/all/rt/0002-tracing-Reimplement-log2.patch
+features/all/rt/0003-ring-buffer-Add-interface-for-setting-absolute-time-.patch
+features/all/rt/0004-ring-buffer-Redefine-the-unimplemented-RINGBUF_TIME_.patch
+features/all/rt/0005-tracing-Give-event-triggers-access-to-ring_buffer_ev.patch
+features/all/rt/0006-tracing-Add-ring-buffer-event-param-to-hist-field-fu.patch
+features/all/rt/0007-tracing-Increase-tracing-map-KEYS_MAX-size.patch
+features/all/rt/0008-tracing-Break-out-hist-trigger-assignment-parsing.patch
+features/all/rt/0009-tracing-Make-traceprobe-parsing-code-reusable.patch
+features/all/rt/0010-tracing-Add-NO_DISCARD-event-file-flag.patch
+features/all/rt/0011-tracing-Add-post-trigger-flag-to-hist-trigger-comman.patch
+features/all/rt/0012-tracing-Add-hist-trigger-timestamp-support.patch
+features/all/rt/0013-tracing-Add-per-element-variable-support-to-tracing_.patch
+features/all/rt/0014-tracing-Add-hist_data-member-to-hist_field.patch
+features/all/rt/0015-tracing-Add-usecs-modifier-for-hist-trigger-timestam.patch
+features/all/rt/0016-tracing-Add-variable-support-to-hist-triggers.patch
+features/all/rt/0017-tracing-Account-for-variables-in-named-trigger-compa.patch
+features/all/rt/0018-tracing-Add-simple-expression-support-to-hist-trigge.patch
+features/all/rt/0019-tracing-Add-variable-reference-handling-to-hist-trig.patch
+features/all/rt/0020-tracing-Add-support-for-dynamic-tracepoints.patch
+features/all/rt/0021-tracing-Add-hist-trigger-action-hook.patch
+features/all/rt/0022-tracing-Add-support-for-synthetic-events.patch
+features/all/rt/0023-tracing-Add-onmatch-hist-trigger-action-support.patch
+features/all/rt/0024-tracing-Add-onmax-hist-trigger-action-support.patch
+features/all/rt/0025-tracing-Allow-whitespace-to-surround-hist-trigger-fi.patch
+features/all/rt/0026-tracing-Make-duplicate-count-from-tracing_map-availa.patch
+features/all/rt/0027-tracing-Add-cpu-field-for-hist-triggers.patch
+features/all/rt/0028-tracing-Add-hist-trigger-support-for-variable-refere.patch
+features/all/rt/0029-tracing-Add-last-error-error-facility-for-hist-trigg.patch
+features/all/rt/0030-tracing-Add-inter-event-hist-trigger-Documentation.patch
+features/all/rt/0031-tracing-Make-tracing_set_clock-non-static.patch
+features/all/rt/0032-tracing-Add-a-clock-attribute-for-hist-triggers.patch
+
 # SCHED BLOCK/WQ
 features/all/rt/block-shorten-interrupt-disabled-regions.patch
 
@@ -245,13 +282,6 @@ features/all/rt/x86-io-apic-migra-no-unmask.patch
 
 # ANON RW SEMAPHORES
 
-# TRACING
-features/all/rt/latencyhist-disable-jump-labels.patch
-features/all/rt/latency-hist.patch
-features/all/rt/latency_hist-update-sched_wakeup-probe.patch
-features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
-features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
-
 ##################################################
 # REAL RT STUFF starts here
 ##################################################
@@ -431,6 +461,8 @@ features/all/rt/spinlock-types-separate-raw.patch
 features/all/rt/rtmutex-avoid-include-hell.patch
 features/all/rt/rtmutex_dont_include_rcu.patch
 features/all/rt/rt-add-rt-locks.patch
+features/all/rt/rtmutex-Fix-lock-stealing-logic.patch
+features/all/rt/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch
 features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
 features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
 features/all/rt/rtmutex-Provide-locked-slowpath.patch
@@ -596,7 +628,6 @@ features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
 features/all/rt/cpumask-disable-offstack-on-rt.patch
 
 # RANDOM
-features/all/rt/Revert-random-invalidate-batched-entropy-after-crng-.patch
 features/all/rt/random-make-it-work-on-rt.patch
 features/all/rt/random-avoid-preempt_disable-ed-section.patch
 features/all/rt/char-random-don-t-print-that-the-init-is-done.patch

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/kernel/linux.git



More information about the Kernel-svn-changes mailing list