[linux] 11/11: [rt] Update to 4.9.30-rt20
debian-kernel at lists.debian.org
debian-kernel at lists.debian.org
Sun Jun 4 02:04:54 UTC 2017
This is an automated email from the git hooks/post-receive script.
benh pushed a commit to branch sid
in repository linux.
commit 894e593fa33788ae72a101c6c8c89fb93a8bd9a9
Author: Ben Hutchings <ben at decadent.org.uk>
Date: Sun Jun 4 03:00:42 2017 +0100
[rt] Update to 4.9.30-rt20
---
debian/changelog | 16 +
.../0001-futex-Avoid-freeing-an-active-timer.patch | 53 +++
...eanup-variable-names-for-futex_top_waiter.patch | 2 +-
...x-Deboost-before-waking-up-the-top-waiter.patch | 180 ++++++++++
...mall-and-harmless-looking-inconsistencies.patch | 57 +++
...-Use-smp_store_release-in-mark_wake_futex.patch | 2 +-
...ex-deadline-Fix-a-PI-crash-for-deadline-t.patch | 169 +++++++++
...rify-mark_wake_futex-memory-barrier-usage.patch | 38 ++
...3-futex-Remove-rt_mutex_deadlock_account_.patch | 2 +-
...ine-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch | 54 +++
.../rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch | 50 +++
...mutex-Provide-futex-specific-rt_mutex-API.patch | 2 +-
.../features/all/rt/0004-rtmutex-Clean-up.patch | 147 ++++++++
.../all/rt/0005-futex-Change-locking-rules.patch | 2 +-
...5-sched-rtmutex-Refactor-rt_mutex_setprio.patch | 393 +++++++++++++++++++++
.../all/rt/0006-futex-Cleanup-refcounting.patch | 2 +-
...hed-tracing-Update-trace_sched_pi_setprio.patch | 109 ++++++
...ework-inconsistent-rt_mutex-futex_q-state.patch | 2 +-
...0007-rtmutex-Fix-PI-chain-order-integrity.patch | 122 +++++++
...rt_mutex_futex_unlock-out-from-under-hb-l.patch | 2 +-
.../0008-rtmutex-Fix-more-prio-comparisons.patch | 102 ++++++
...x-rt_mutex-Introduce-rt_mutex_init_waiter.patch | 2 +-
...g-preempt-count-leak-in-rt_mutex_futex_un.patch | 43 +++
...tex-Restructure-rt_mutex_finish_proxy_loc.patch | 2 +-
...k-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch | 2 +-
.../0012-futex-Futex_unlock_pi-determinism.patch | 2 +-
...-hb-lock-before-enqueueing-on-the-rtmutex.patch | 2 +-
...irq-in-translation-section-permission-fau.patch | 2 +-
...CK-printk-drop-the-logbuf_lock-more-often.patch | 2 +-
...64-downgrade-preempt_disable-d-region-to-.patch | 2 +-
...lapic-mark-LAPIC-timer-handler-as-irqsafe.patch | 2 +-
...NFSv4-replace-seqcount_t-with-a-seqlock_t.patch | 6 +-
...mers-Don-t-wake-ktimersoftd-on-every-tick.patch | 218 ++++++++++++
...vert-acpi_gbl_hardware-lock-back-to-a-raw.patch | 2 +-
.../rt/arch-arm64-Add-lazy-preempt-support.patch | 2 +-
...t-remove-irq-handler-when-clock-is-unused.patch | 2 +-
...-at91-tclib-default-to-tclib-timer-for-rt.patch | 2 +-
.../all/rt/arm-convert-boot-lock-to-raw.patch | 2 +-
.../all/rt/arm-enable-highmem-for-rt.patch | 2 +-
.../all/rt/arm-highmem-flush-tlb-on-unmap.patch | 2 +-
.../rt/arm-include-definition-for-cpumask_t.patch | 2 +-
...arm-kprobe-replace-patch_lock-to-raw-lock.patch | 2 +-
.../features/all/rt/arm-preempt-lazy-support.patch | 2 +-
.../features/all/rt/arm-unwind-use_raw_lock.patch | 2 +-
.../rt/arm64-xen--Make-XEN-depend-on-non-rt.patch | 2 +-
.../all/rt/at91_dont_enable_disable_clock.patch | 2 +-
.../all/rt/ata-disable-interrupts-if-non-rt.patch | 2 +-
.../features/all/rt/block-blk-mq-use-swait.patch | 2 +-
.../block-mq-don-t-complete-requests-via-IPI.patch | 2 +-
.../all/rt/block-mq-drop-preempt-disable.patch | 2 +-
.../features/all/rt/block-mq-use-cpu_light.patch | 2 +-
.../block-shorten-interrupt-disabled-regions.patch | 12 +-
.../features/all/rt/block-use-cpu-chill.patch | 2 +-
.../all/rt/bug-rt-dependend-variants.patch | 2 +-
...ps-scheduling-while-atomic-in-cgroup-code.patch | 2 +-
.../cgroups-use-simple-wait-in-css_release.patch | 8 +-
...-drivers-timer-atmel-pit-fix-double-free_.patch | 2 +-
...clocksource-tclib-allow-higher-clockrates.patch | 2 +-
.../all/rt/completion-use-simple-wait-queues.patch | 2 +-
.../all/rt/cond-resched-lock-rt-tweak.patch | 2 +-
.../features/all/rt/cond-resched-softirq-rt.patch | 8 +-
...n_proc-Protect-send_msg-with-a-local-lock.patch | 2 +-
...g-Document-why-PREEMPT_RT-uses-a-spinlock.patch | 2 +-
...ke-hotplug-lock-a-sleeping-spinlock-on-rt.patch | 2 +-
.../features/all/rt/cpu-rt-rework-cpu-down.patch | 6 +-
...l-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch | 2 +-
.../all/rt/cpu_down_move_migrate_enable_back.patch | 2 +-
...req-drop-K8-s-driver-from-beeing-selected.patch | 2 +-
.../all/rt/cpumask-disable-offstack-on-rt.patch | 2 +-
...t-Convert-callback_lock-to-raw_spinlock_t.patch | 2 +-
...educe-preempt-disabled-regions-more-algos.patch | 2 +-
.../patches/features/all/rt/debugobjects-rt.patch | 2 +-
.../patches/features/all/rt/dm-make-rt-aware.patch | 2 +-
...ck-zram-Replace-bit-spinlocks-with-rtmute.patch | 3 +-
.../rt/drivers-net-8139-disable-irq-nosync.patch | 2 +-
.../rt/drivers-net-vortex-fix-locking-issues.patch | 2 +-
...ers-random-reduce-preempt-disabled-region.patch | 2 +-
.../all/rt/drivers-tty-fix-omap-lock-crap.patch | 2 +-
.../rt/drivers-tty-pl011-irq-disable-madness.patch | 2 +-
...m-Don-t-disable-preemption-in-zcomp_strea.patch | 3 +-
...15-drop-trace_i915_gem_ring_dispatch-onrt.patch | 4 +-
...ock_irq()_in_intel_pipe_update_startend().patch | 2 +-
...empt_disableenable_rt()_where_recommended.patch | 2 +-
.../features/all/rt/epoll-use-get-cpu-light.patch | 2 +-
.../all/rt/fs-aio-simple-simple-work.patch | 2 +-
.../features/all/rt/fs-block-rt-support.patch | 2 +-
.../features/all/rt/fs-dcache-include-wait.h.patch | 2 +-
.../rt/fs-dcache-init-in_lookup_hashtable.patch | 2 +-
.../fs-dcache-use-cpu-chill-in-trylock-loops.patch | 2 +-
...ache-use-swait_queue-instead-of-waitqueue.patch | 2 +-
.../all/rt/fs-jbd-replace-bh_state-lock.patch | 2 +-
...bd2-pull-your-plug-when-waiting-for-space.patch | 2 +-
.../all/rt/fs-namespace-preemption-fix.patch | 2 +-
.../fs-nfs-turn-rmdir_sem-into-a-semaphore.patch | 2 +-
.../all/rt/fs-ntfs-disable-interrupt-non-rt.patch | 2 +-
.../rt/fs-replace-bh_uptodate_lock-for-rt.patch | 2 +-
.../all/rt/ftrace-Fix-trace-header-alignment.patch | 2 +-
.../all/rt/ftrace-migrate-disable-tracing.patch | 2 +-
...e-lock-unlock-symetry-versus-pi_lock-and-.patch | 2 +-
.../features/all/rt/futex-requeue-pi-fix.patch | 6 +-
...-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch | 126 +++++++
...tex-rtmutex-Cure-RT-double-blocking-issue.patch | 62 ++++
...round-migrate_disable-enable-in-different.patch | 6 +-
.../all/rt/genirq-disable-irqpoll-on-rt.patch | 2 +-
...ot-invoke-the-affinity-callback-via-a-wor.patch | 2 +-
.../features/all/rt/genirq-force-threading.patch | 2 +-
...pdate-irq_set_irqchip_state-documentation.patch | 2 +-
.../rt/gpu_don_t_check_for_the_lock_owner.patch | 2 +-
...-set_cpus_allowed_ptr-in-sync_unplug_thre.patch | 2 +-
.../all/rt/hotplug-light-get-online-cpus.patch | 2 +-
...lug-sync_unplug-no-27-5cn-27-in-task-name.patch | 2 +-
.../all/rt/hotplug-use-migrate-disable.patch | 2 +-
...-Move-schedule_work-call-to-helper-thread.patch | 2 +-
.../all/rt/hrtimer-enfore-64byte-alignment.patch | 2 +-
...up-hrtimer-callback-changes-for-preempt-r.patch | 2 +-
.../all/rt/hrtimers-prepare-full-preemption.patch | 2 +-
...warning-from-i915-when-running-on-PREEMPT.patch | 4 +-
.../all/rt/ide-use-nort-local-irq-variants.patch | 2 +-
.../all/rt/idr-use-local-lock-for-protection.patch | 2 +-
.../rt/infiniband-mellanox-ib-use-nort-irq.patch | 2 +-
.../all/rt/inpt-gameport-use-local-irq-nort.patch | 2 +-
.../rt/introduce_migrate_disable_cpu_light.patch | 6 +-
.../all/rt/iommu-amd--Use-WARN_ON_NORT.patch | 2 +-
...don-t-disable-preempt-around-this_cpu_ptr.patch | 2 +-
...don-t-disable-preemption-while-accessing-.patch | 6 +-
.../all/rt/ipc-sem-rework-semaphore-wakeups.patch | 2 +-
...-softirq-processing-in-irq-thread-context.patch | 2 +-
...irqwork-Move-irq-safe-work-to-irq-context.patch | 6 +-
...qwork-push_most_work_into_softirq_context.patch | 6 +-
debian/patches/features/all/rt/jump-label-rt.patch | 2 +-
.../all/rt/kconfig-disable-a-few-options-rt.patch | 2 +-
.../features/all/rt/kconfig-preempt-rt-full.patch | 2 +-
.../kernel-SRCU-provide-a-static-initializer.patch | 2 +-
...fix-cpu-down-problem-if-kthread-s-cpu-is-.patch | 2 +-
...plug-restore-original-cpu-mask-oncpu-down.patch | 2 +-
...ate_disable-do-fastpath-in-atomic-irqs-of.patch | 2 +-
...-mark-perf_cpu_context-s-timer-as-irqsafe.patch | 2 +-
...tk-Don-t-try-to-print-from-IRQ-NMI-region.patch | 2 +-
...d-move-stack-kprobe-clean-up-to-__put_tas.patch | 2 +-
.../rt/kernel-softirq-unlock-with-irqs-on.patch | 2 +-
.../features/all/rt/kgb-serial-hackaround.patch | 2 +-
debian/patches/features/all/rt/latency-hist.patch | 4 +-
.../latency_hist-update-sched_wakeup-probe.patch | 2 +-
.../all/rt/latencyhist-disable-jump-labels.patch | 2 +-
.../leds-trigger-disable-CPU-trigger-on-RT.patch | 2 +-
.../rt/list_bl-fixup-bogus-lockdep-warning.patch | 2 +-
.../list_bl.h-make-list-head-locking-RT-safe.patch | 2 +-
.../all/rt/local-irq-rt-depending-variants.patch | 2 +-
.../all/rt/locallock-add-local_lock_on.patch | 2 +-
debian/patches/features/all/rt/localversion.patch | 4 +-
...-compilation-error-for-CONFIG_MODULES-and.patch | 2 +-
.../rt/lockdep-Fix-per-cpu-static-objects.patch | 4 +-
...dle-statically-initialized-PER_CPU-locks-.patch | 2 +-
.../rt/lockdep-no-softirq-accounting-on-rt.patch | 2 +-
...ftest-fix-warnings-due-to-missing-PREEMPT.patch | 2 +-
...-do-hardirq-context-test-for-raw-spinlock.patch | 2 +-
...ktorture-Do-NOT-include-rwlock.h-directly.patch | 2 +-
...cpu-rwsem-use-swait-for-the-wating-writer.patch | 2 +-
.../features/all/rt/md-disable-bcache.patch | 2 +-
.../all/rt/md-raid5-percpu-handling-rt-aware.patch | 6 +-
.../all/rt/mips-disable-highmem-on-rt.patch | 4 +-
.../mm--rt--Fix-generic-kmap_atomic-for-RT.patch | 2 +-
...dev-don-t-disable-IRQs-in-wb_congested_pu.patch | 2 +-
.../all/rt/mm-bounce-local-irq-save-nort.patch | 2 +-
.../all/rt/mm-convert-swap-to-percpu-locked.patch | 4 +-
.../features/all/rt/mm-disable-sloub-rt.patch | 2 +-
.../patches/features/all/rt/mm-enable-slub.patch | 2 +-
.../features/all/rt/mm-make-vmstat-rt-aware.patch | 2 +-
...ol-Don-t-call-schedule_work_on-in-preempt.patch | 2 +-
.../all/rt/mm-memcontrol-do_not_disable_irq.patch | 12 +-
...ol-mem_cgroup_migrate-replace-another-loc.patch | 4 +-
...m-page-alloc-use-local-lock-on-target-cpu.patch | 2 +-
...m-page_alloc-reduce-lock-sections-further.patch | 2 +-
.../mm-page_alloc-rt-friendly-per-cpu-pages.patch | 8 +-
.../rt/mm-perform-lru_add_drain_all-remotely.patch | 2 +-
.../all/rt/mm-protect-activate-switch-mm.patch | 2 +-
.../all/rt/mm-rt-kmap-atomic-scheduling.patch | 4 +-
.../mm-scatterlist-dont-disable-irqs-on-RT.patch | 2 +-
.../all/rt/mm-vmalloc-use-get-cpu-light.patch | 2 +-
...et-do-not-protect-workingset_shadow_nodes.patch | 4 +-
...smalloc_copy_with_get_cpu_var_and_locking.patch | 2 +-
.../all/rt/mmci-remove-bogus-irq-save.patch | 2 +-
.../all/rt/move_sched_delayed_work_to_helper.patch | 2 +-
.../features/all/rt/mutex-no-spin-on-rt.patch | 2 +-
...napi_schedule_irqoff-disable-interrupts-o.patch | 2 +-
.../net-Qdisc-use-a-seqlock-instead-seqcount.patch | 2 +-
.../all/rt/net-add-a-lock-around-icmp_sk.patch | 2 +-
...k-the-missing-serialization-in-ip_send_un.patch | 2 +-
...r-local-irq-disable-alloc-atomic-headache.patch | 2 +-
...cpuhotplug-drain-input_pkt_queue-lockless.patch | 2 +-
...otect-users-of-napi_alloc_cache-against-r.patch | 2 +-
...ays-take-qdisc-s-busylock-in-__dev_xmit_s.patch | 2 +-
...-iptable-xt-write-recseq-begin-rt-fallout.patch | 2 +-
.../rt/net-make-devnet_rename_seq-a-mutex.patch | 2 +-
...xmit_recursion-to-per-task-variable-on-RT.patch | 4 +-
.../all/rt/net-prevent-abba-deadlock.patch | 2 +-
...-a-way-to-delegate-processing-a-softirq-t.patch | 2 +-
...ev_deactivate_many-use-msleep-1-instead-o.patch | 2 +-
.../features/all/rt/net-use-cpu-chill.patch | 2 +-
.../features/all/rt/net-wireless-warn-nort.patch | 4 +-
.../features/all/rt/oleg-signal-rt-fix.patch | 4 +-
.../all/rt/panic-disable-random-on-rt.patch | 2 +-
...troduce-rcu-bh-qs-where-safe-from-softirq.patch | 2 +-
.../rt/pci-access-use-__wake_up_all_locked.patch | 2 +-
.../features/all/rt/percpu_ida-use-locklocks.patch | 2 +-
.../all/rt/perf-make-swevent-hrtimer-irqsafe.patch | 2 +-
.../features/all/rt/peter_zijlstra-frob-rcu.patch | 2 +-
.../features/all/rt/peterz-percpu-rwsem-rt.patch | 2 +-
.../features/all/rt/peterz-srcu-crypto-chain.patch | 2 +-
.../features/all/rt/pid.h-include-atomic.h.patch | 2 +-
.../pinctrl-qcom-Use-raw-spinlock-variants.patch | 2 +-
debian/patches/features/all/rt/ping-sysrq.patch | 2 +-
.../all/rt/posix-timers-no-broadcast.patch | 2 +-
...osix-timers-thread-posix-cpu-timers-on-rt.patch | 8 +-
.../all/rt/power-disable-highmem-on-rt.patch | 2 +-
.../all/rt/power-use-generic-rwsem-on-rt.patch | 2 +-
...-Disable-in-kernel-MPIC-emulation-for-PRE.patch | 2 +-
.../all/rt/powerpc-preempt-lazy-support.patch | 2 +-
...-device-init.c-adapt-to-completions-using.patch | 2 +-
.../features/all/rt/preempt-lazy-support.patch | 6 +-
.../features/all/rt/preempt-nort-rt-variants.patch | 2 +-
...intk-27-boot-param-to-help-with-debugging.patch | 2 +-
debian/patches/features/all/rt/printk-kill.patch | 2 +-
.../patches/features/all/rt/printk-rt-aware.patch | 2 +-
.../ptrace-fix-ptrace-vs-tasklist_lock-race.patch | 4 +-
.../all/rt/radix-tree-use-local-locks.patch | 2 +-
.../random-avoid-preempt_disable-ed-section.patch | 75 ++++
.../all/rt/random-make-it-work-on-rt.patch | 2 +-
.../rbtree-include-rcu.h-because-we-use-it.patch | 2 +-
...Eliminate-softirq-processing-from-rcutree.patch | 2 +-
.../all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch | 2 +-
...e-rcu_normal_after_boot-by-default-for-RT.patch | 2 +-
.../all/rt/rcu-make-RCU_BOOST-default-on-RT.patch | 2 +-
.../rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch | 2 +-
...rcu-update-make-RCU_EXPEDITE_BOOT-default.patch | 2 +-
..._bh_qs-disable-irq-while-calling-rcu_pree.patch | 2 +-
...-migrate_disable-race-with-cpu-hotplug-3f.patch | 2 +-
...t_full-arm-coredump-fails-for-cpu-3e-3d-4.patch | 2 +-
...ping-function-called-from-invalid-context.patch | 2 +-
.../patches/features/all/rt/rt-add-rt-locks.patch | 303 +++++++++-------
.../rt/rt-drop_mutex_disable_on_not_debug.patch | 2 +-
.../features/all/rt/rt-introduce-cpu-chill.patch | 2 +-
.../features/all/rt/rt-local-irq-lock.patch | 2 +-
...cking-Reenable-migration-accross-schedule.patch | 10 +-
.../features/all/rt/rt-preempt-base-config.patch | 2 +-
.../features/all/rt/rt-serial-warn-fix.patch | 2 +-
...x--Handle-non-enqueued-waiters-gracefully.patch | 4 +-
.../all/rt/rtmutex-Make-lock_killable-work.patch | 4 +-
.../all/rt/rtmutex-Provide-locked-slowpath.patch | 8 +-
.../rt/rtmutex-Provide-rt_mutex_lock_state.patch | 8 +-
.../rt/rtmutex-add-a-first-shot-of-ww_mutex.patch | 28 +-
.../all/rt/rtmutex-avoid-include-hell.patch | 2 +-
.../features/all/rt/rtmutex-futex-prepare-rt.patch | 51 +--
.../features/all/rt/rtmutex-lock-killable.patch | 4 +-
.../all/rt/rtmutex-trylock-is-okay-on-RT.patch | 4 +-
.../features/all/rt/rtmutex_dont_include_rcu.patch | 2 +-
.../rwsem-rt-Lift-single-reader-restriction.patch | 2 +-
.../rt/rxrpc-remove-unused-static-variables.patch | 2 +-
...i-dont-t-disable-interrupts-in-qc_issue-h.patch | 2 +-
...-deadline-dl_task_timer-has-to-be-irqsafe.patch | 2 +-
.../features/all/rt/sched-delay-put-task.patch | 8 +-
.../rt/sched-disable-rt-group-sched-on-rt.patch | 2 +-
.../features/all/rt/sched-disable-ttwu-queue.patch | 2 +-
.../features/all/rt/sched-limit-nr-migrate.patch | 2 +-
...ched-might-sleep-do-not-account-rcu-depth.patch | 4 +-
.../features/all/rt/sched-mmdrop-delayed.patch | 10 +-
.../features/all/rt/sched-rt-mutex-wakeup.patch | 4 +-
...hed-ttwu-ensure-success-return-is-correct.patch | 2 +-
...ueue-Only-wake-up-idle-workers-if-not-blo.patch | 2 +-
.../features/all/rt/scsi-fcoe-rt-aware.patch | 2 +-
...ping-function-called-from-invalid-context.patch | 2 +-
.../all/rt/seqlock-prevent-rt-starvation.patch | 2 +-
.../all/rt/signal-fix-up-rcu-wreckage.patch | 2 +-
.../rt/signal-revert-ptrace-preempt-magic.patch | 2 +-
...low-rt-tasks-to-cache-one-sigqueue-struct.patch | 6 +-
.../features/all/rt/skbufhead-raw-lock.patch | 2 +-
.../all/rt/slub-disable-SLUB_CPU_PARTIAL.patch | 2 +-
.../all/rt/slub-enable-irqs-for-no-wait.patch | 2 +-
...-snd_pcm_stream_lock-irqs_disabled-splats.patch | 2 +-
.../rt/softirq-disable-softirq-stacks-for-rt.patch | 2 +-
.../features/all/rt/softirq-preempt-fix-3-re.patch | 2 +-
.../features/all/rt/softirq-split-locks.patch | 6 +-
...irq-split-timer-softirqs-out-of-ksoftirqd.patch | 2 +-
.../softirq-wake-the-timer-softirq-if-needed.patch | 2 +-
.../sparc64-use-generic-rwsem-spinlocks-rt.patch | 2 +-
.../all/rt/spinlock-types-separate-raw.patch | 2 +-
.../features/all/rt/stop-machine-raw-lock.patch | 2 +-
...ne-convert-stop_machine_run-to-PREEMPT_RT.patch | 2 +-
...ake-svc_xprt_do_enqueue-use-get_cpu_light.patch | 2 +-
.../rt/suspend-prevernt-might-sleep-splats.patch | 2 +-
.../features/all/rt/sysfs-realtime-entry.patch | 2 +-
...klets-from-going-into-infinite-spin-in-rt.patch | 2 +-
.../thermal-Defer-thermal-wakups-to-threads.patch | 2 +-
.../rt/tick-broadcast--Make-hrtimer-irqsafe.patch | 2 +-
.../all/rt/timekeeping-split-jiffies-lock.patch | 2 +-
...delay-waking-softirqs-from-the-jiffy-tick.patch | 2 +-
.../features/all/rt/timer-fd-avoid-live-lock.patch | 4 +-
...rtimer-check-properly-for-a-running-timer.patch | 2 +-
.../all/rt/timer-make-the-base-lock-raw.patch | 2 +-
...mers-Don-t-wake-ktimersoftd-on-every-tick.patch | 2 +-
.../rt/timers-prepare-for-full-preemption.patch | 2 +-
...cy-hist-Consider-new-argument-when-probin.patch | 2 +-
...e_version_for_preemptoff_hist_trace_point.patch | 2 +-
...count-for-preempt-off-in-preempt_schedule.patch | 2 +-
...l-8250-don-t-take-the-trylock-during-oops.patch | 2 +-
...t-remove-preemption-disabling-in-netif_rx.patch | 2 +-
.../all/rt/usb-use-_nort-in-giveback.patch | 4 +-
.../features/all/rt/user-use-local-irq-nort.patch | 2 +-
.../features/all/rt/wait.h-include-atomic.h.patch | 2 +-
...ue-work-around-irqsafe-timer-optimization.patch | 2 +-
...rk-simple-Simple-work-queue-implemenation.patch | 2 +-
.../all/rt/workqueue-distangle-from-rq-lock.patch | 2 +-
.../all/rt/workqueue-prevent-deadlock-stall.patch | 2 +-
.../features/all/rt/workqueue-use-locallock.patch | 2 +-
.../features/all/rt/workqueue-use-rcu.patch | 2 +-
.../all/rt/x86-UV-raw_spinlock-conversion.patch | 2 +-
...t-rid-of-warning-acpi_ioapic_lock-defined.patch | 2 +-
...86-crypto-reduce-preempt-disabled-regions.patch | 2 +-
.../x86-highmem-add-a-already-used-pte-check.patch | 2 +-
.../all/rt/x86-io-apic-migra-no-unmask.patch | 2 +-
.../all/rt/x86-kvm-require-const-tsc-for-rt.patch | 4 +-
.../features/all/rt/x86-mce-timer-hrtimer.patch | 24 +-
.../x86-mce-use-swait-queue-for-mce-wakeups.patch | 8 +-
.../rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch | 2 +-
.../patches/features/all/rt/x86-preempt-lazy.patch | 2 +-
...x86-signal-delay-calling-signals-on-32bit.patch | 2 +-
.../all/rt/x86-stackprot-no-random-on-rt.patch | 2 +-
.../all/rt/x86-use-gen-rwsem-spinlocks-rt.patch | 2 +-
debian/patches/series-rt | 17 +
329 files changed, 2659 insertions(+), 570 deletions(-)
diff --git a/debian/changelog b/debian/changelog
index 1d84072..db4cb5e 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -419,6 +419,22 @@ linux (4.9.30-1) UNRELEASED; urgency=medium
work again (Closes: #862723)
* [arm64] serial: pl011: add console matching function (Closes: #861898)
* [rt] Add new GPG subkeys for Sebastian Andrzej Siewior
+ * [rt] Update to 4.9.30-rt20:
+ - rtmutex: Deboost before waking up the top waiter
+ - sched/rtmutex/deadline: Fix a PI crash for deadline tasks
+ - sched/deadline/rtmutex: Dont miss the dl_runtime/dl_period update
+ - rtmutex: Clean up
+ - sched/rtmutex: Refactor rt_mutex_setprio()
+ - sched,tracing: Update trace_sched_pi_setprio()
+ - rtmutex: Fix PI chain order integrity
+ - rtmutex: Fix more prio comparisons
+ - rtmutex: Plug preempt count leak in rt_mutex_futex_unlock()
+ - futex: Avoid freeing an active timer
+ - futex: Fix small (and harmless looking) inconsistencies
+ - futex,rt_mutex: Fix rt_mutex_cleanup_proxy_lock()
+ - Revert "timers: Don't wake ktimersoftd on every tick"
+ - futex/rtmutex: Cure RT double blocking issue
+ - random: avoid preempt_disable()ed section
[ Salvatore Bonaccorso ]
* tracing: Use strlcpy() instead of strcpy() in __trace_find_cmdline()
diff --git a/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch b/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch
new file mode 100644
index 0000000..98a92ef
--- /dev/null
+++ b/debian/patches/features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch
@@ -0,0 +1,53 @@
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 10 Apr 2017 18:03:36 +0200
+Subject: [PATCH] futex: Avoid freeing an active timer
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit 97181f9bd57405b879403763284537e27d46963d
+
+Alexander reported a hrtimer debug_object splat:
+
+ ODEBUG: free active (active state 0) object type: hrtimer hint: hrtimer_wakeup (kernel/time/hrtimer.c:1423)
+
+ debug_object_free (lib/debugobjects.c:603)
+ destroy_hrtimer_on_stack (kernel/time/hrtimer.c:427)
+ futex_lock_pi (kernel/futex.c:2740)
+ do_futex (kernel/futex.c:3399)
+ SyS_futex (kernel/futex.c:3447 kernel/futex.c:3415)
+ do_syscall_64 (arch/x86/entry/common.c:284)
+ entry_SYSCALL64_slow_path (arch/x86/entry/entry_64.S:249)
+
+Which was caused by commit:
+
+ cfafcd117da0 ("futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()")
+
+... losing the hrtimer_cancel() in the shuffle. Where previously the
+hrtimer_cancel() was done by rt_mutex_slowlock() we now need to do it
+manually.
+
+Reported-by: Alexander Levin <alexander.levin at verizon.com>
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Fixes: cfafcd117da0 ("futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()")
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704101802370.2906@nanos
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ kernel/futex.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2734,8 +2734,10 @@ static int futex_lock_pi(u32 __user *uad
+ out_put_key:
+ put_futex_key(&q.key);
+ out:
+- if (to)
++ if (to) {
++ hrtimer_cancel(&to->timer);
+ destroy_hrtimer_on_stack(&to->timer);
++ }
+ return ret != -EINTR ? ret : -ERESTARTNOINTR;
+
+ uaddr_faulted:
diff --git a/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch b/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
index 438cc52..487de6e 100644
--- a/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
+++ b/debian/patches/features/all/rt/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:48 +0100
Subject: [PATCH] futex: Cleanup variable names for futex_top_waiter()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit 499f5aca2cdd5e958b27e2655e7e7f82524f46b1
diff --git a/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch b/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
new file mode 100644
index 0000000..2c2eb87
--- /dev/null
+++ b/debian/patches/features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
@@ -0,0 +1,180 @@
+From: Xunlei Pang <xlpang at redhat.com>
+Date: Thu, 23 Mar 2017 15:56:07 +0100
+Subject: [PATCH] rtmutex: Deboost before waking up the top waiter
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit 2a1c6029940675abb2217b590512dbf691867ec4
+
+We should deboost before waking the high-priority task, such that we
+don't run two tasks with the same "state" (priority, deadline,
+sched_class, etc).
+
+In order to make sure the boosting task doesn't start running between
+unlock and deboost (due to 'spurious' wakeup), we move the deboost
+under the wait_lock, that way its serialized against the wait loop in
+__rt_mutex_slowlock().
+
+Doing the deboost early can however lead to priority-inversion if
+current would get preempted after the deboost but before waking our
+high-prio task, hence we disable preemption before doing deboost, and
+enabling it after the wake up is over.
+
+This gets us the right semantic order, but most importantly however;
+this change ensures pointer stability for the next patch, where we
+have rt_mutex_setprio() cache a pointer to the top-most waiter task.
+If we, as before this change, do the wakeup first and then deboost,
+this pointer might point into thin air.
+
+[peterz: Changelog + patch munging]
+Suggested-by: Peter Zijlstra <peterz at infradead.org>
+Signed-off-by: Xunlei Pang <xlpang at redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: juri.lelli at arm.com
+Cc: bigeasy at linutronix.de
+Cc: mathieu.desnoyers at efficios.com
+Cc: jdesfossez at efficios.com
+Cc: bristot at redhat.com
+Link: http://lkml.kernel.org/r/20170323150216.110065320@infradead.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/futex.c | 5 ---
+ kernel/locking/rtmutex.c | 59 +++++++++++++++++++++-------------------
+ kernel/locking/rtmutex_common.h | 2 -
+ 3 files changed, 34 insertions(+), 32 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1458,10 +1458,7 @@ static int wake_futex_pi(u32 __user *uad
+ out_unlock:
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+
+- if (deboost) {
+- wake_up_q(&wake_q);
+- rt_mutex_adjust_prio(current);
+- }
++ rt_mutex_postunlock(&wake_q, deboost);
+
+ return ret;
+ }
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -371,24 +371,6 @@ static void __rt_mutex_adjust_prio(struc
+ }
+
+ /*
+- * Adjust task priority (undo boosting). Called from the exit path of
+- * rt_mutex_slowunlock() and rt_mutex_slowlock().
+- *
+- * (Note: We do this outside of the protection of lock->wait_lock to
+- * allow the lock to be taken while or before we readjust the priority
+- * of task. We do not use the spin_xx_mutex() variants here as we are
+- * outside of the debug path.)
+- */
+-void rt_mutex_adjust_prio(struct task_struct *task)
+-{
+- unsigned long flags;
+-
+- raw_spin_lock_irqsave(&task->pi_lock, flags);
+- __rt_mutex_adjust_prio(task);
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+-}
+-
+-/*
+ * Deadlock detection is conditional:
+ *
+ * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
+@@ -1049,6 +1031,7 @@ static void mark_wakeup_next_waiter(stru
+ * lock->wait_lock.
+ */
+ rt_mutex_dequeue_pi(current, waiter);
++ __rt_mutex_adjust_prio(current);
+
+ /*
+ * As we are waking up the top waiter, and the waiter stays
+@@ -1391,6 +1374,16 @@ static bool __sched rt_mutex_slowunlock(
+ */
+ mark_wakeup_next_waiter(wake_q, lock);
+
++ /*
++ * We should deboost before waking the top waiter task such that
++ * we don't run two tasks with the 'same' priority. This however
++ * can lead to prio-inversion if we would get preempted after
++ * the deboost but before waking our high-prio task, hence the
++ * preempt_disable before unlock. Pairs with preempt_enable() in
++ * rt_mutex_postunlock();
++ */
++ preempt_disable();
++
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ /* check PI boosting */
+@@ -1440,6 +1433,18 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+ return slowfn(lock);
+ }
+
++/*
++ * Undo pi boosting (if necessary) and wake top waiter.
++ */
++void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
++{
++ wake_up_q(wake_q);
++
++ /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
++ if (deboost)
++ preempt_enable();
++}
++
+ static inline void
+ rt_mutex_fastunlock(struct rt_mutex *lock,
+ bool (*slowfn)(struct rt_mutex *lock,
+@@ -1453,11 +1458,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+
+ deboost = slowfn(lock, &wake_q);
+
+- wake_up_q(&wake_q);
+-
+- /* Undo pi boosting if necessary: */
+- if (deboost)
+- rt_mutex_adjust_prio(current);
++ rt_mutex_postunlock(&wake_q, deboost);
+ }
+
+ /**
+@@ -1570,6 +1571,13 @@ bool __sched __rt_mutex_futex_unlock(str
+ }
+
+ mark_wakeup_next_waiter(wake_q, lock);
++ /*
++ * We've already deboosted, retain preempt_disabled when dropping
++ * the wait_lock to avoid inversion until the wakeup. Matched
++ * by rt_mutex_postunlock();
++ */
++ preempt_disable();
++
+ return true; /* deboost and wakeups */
+ }
+
+@@ -1582,10 +1590,7 @@ void __sched rt_mutex_futex_unlock(struc
+ deboost = __rt_mutex_futex_unlock(lock, &wake_q);
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+- if (deboost) {
+- wake_up_q(&wake_q);
+- rt_mutex_adjust_prio(current);
+- }
++ rt_mutex_postunlock(&wake_q, deboost);
+ }
+
+ /**
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -122,7 +122,7 @@ extern void rt_mutex_futex_unlock(struct
+ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
+ struct wake_q_head *wqh);
+
+-extern void rt_mutex_adjust_prio(struct task_struct *task);
++extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost);
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # include "rtmutex-debug.h"
diff --git a/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch b/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
new file mode 100644
index 0000000..6bc8aba
--- /dev/null
+++ b/debian/patches/features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
@@ -0,0 +1,57 @@
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Fri, 7 Apr 2017 09:04:07 +0200
+Subject: [PATCH] futex: Fix small (and harmless looking) inconsistencies
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit 94ffac5d847cfd790bb37b7cef1cad803743985e
+
+During (post-commit) review Darren spotted a few minor things. One
+(harmless AFAICT) type inconsistency and a comment that wasn't as
+clear as hoped.
+
+Reported-by: Darren Hart (VMWare) <dvhart at infradead.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Reviewed-by: Darren Hart (VMware) <dvhart at infradead.org>
+Cc: Linus Torvalds <torvalds at linux-foundation.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Thomas Gleixner <tglx at linutronix.de>
+Cc: linux-kernel at vger.kernel.org
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+ kernel/futex.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1023,7 +1023,8 @@ static int attach_to_pi_state(u32 __user
+ struct futex_pi_state **ps)
+ {
+ pid_t pid = uval & FUTEX_TID_MASK;
+- int ret, uval2;
++ u32 uval2;
++ int ret;
+
+ /*
+ * Userspace might have messed up non-PI and PI futexes [3]
+@@ -1439,6 +1440,11 @@ static int wake_futex_pi(u32 __user *uad
+ if (ret)
+ goto out_unlock;
+
++ /*
++ * This is a point of no return; once we modify the uval there is no
++ * going back and subsequent operations must not fail.
++ */
++
+ raw_spin_lock(&pi_state->owner->pi_lock);
+ WARN_ON(list_empty(&pi_state->list));
+ list_del_init(&pi_state->list);
+@@ -1450,9 +1456,6 @@ static int wake_futex_pi(u32 __user *uad
+ pi_state->owner = new_owner;
+ raw_spin_unlock(&new_owner->pi_lock);
+
+- /*
+- * We've updated the uservalue, this unlock cannot fail.
+- */
+ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+
+ out_unlock:
diff --git a/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch b/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
index da68128..b11b94c 100644
--- a/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
+++ b/debian/patches/features/all/rt/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:49 +0100
Subject: [PATCH] futex: Use smp_store_release() in mark_wake_futex()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit 1b367ece0d7e696cab1c8501bab282cc6a538b3f
diff --git a/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch b/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
new file mode 100644
index 0000000..b751d5b
--- /dev/null
+++ b/debian/patches/features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
@@ -0,0 +1,169 @@
+From: Xunlei Pang <xlpang at redhat.com>
+Date: Thu, 23 Mar 2017 15:56:08 +0100
+Subject: [PATCH] sched/rtmutex/deadline: Fix a PI crash for deadline tasks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit e96a7705e7d3fef96aec9b590c63b2f6f7d2ba22
+
+A crash happened while I was playing with deadline PI rtmutex.
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000018
+ IP: [<ffffffff810eeb8f>] rt_mutex_get_top_task+0x1f/0x30
+ PGD 232a75067 PUD 230947067 PMD 0
+ Oops: 0000 [#1] SMP
+ CPU: 1 PID: 10994 Comm: a.out Not tainted
+
+ Call Trace:
+ [<ffffffff810b658c>] enqueue_task+0x2c/0x80
+ [<ffffffff810ba763>] activate_task+0x23/0x30
+ [<ffffffff810d0ab5>] pull_dl_task+0x1d5/0x260
+ [<ffffffff810d0be6>] pre_schedule_dl+0x16/0x20
+ [<ffffffff8164e783>] __schedule+0xd3/0x900
+ [<ffffffff8164efd9>] schedule+0x29/0x70
+ [<ffffffff8165035b>] __rt_mutex_slowlock+0x4b/0xc0
+ [<ffffffff81650501>] rt_mutex_slowlock+0xd1/0x190
+ [<ffffffff810eeb33>] rt_mutex_timed_lock+0x53/0x60
+ [<ffffffff810ecbfc>] futex_lock_pi.isra.18+0x28c/0x390
+ [<ffffffff810ed8b0>] do_futex+0x190/0x5b0
+ [<ffffffff810edd50>] SyS_futex+0x80/0x180
+
+This is because rt_mutex_enqueue_pi() and rt_mutex_dequeue_pi()
+are only protected by pi_lock when operating pi waiters, while
+rt_mutex_get_top_task(), will access them with rq lock held but
+not holding pi_lock.
+
+In order to tackle it, we introduce new "pi_top_task" pointer
+cached in task_struct, and add new rt_mutex_update_top_task()
+to update its value, it can be called by rt_mutex_setprio()
+which held both owner's pi_lock and rq lock. Thus "pi_top_task"
+can be safely accessed by enqueue_task_dl() under rq lock.
+
+Originally-From: Peter Zijlstra <peterz at infradead.org>
+Signed-off-by: Xunlei Pang <xlpang at redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Steven Rostedt <rostedt at goodmis.org>
+Reviewed-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: juri.lelli at arm.com
+Cc: bigeasy at linutronix.de
+Cc: mathieu.desnoyers at efficios.com
+Cc: jdesfossez at efficios.com
+Cc: bristot at redhat.com
+Link: http://lkml.kernel.org/r/20170323150216.157682758@infradead.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/init_task.h | 1 +
+ include/linux/sched.h | 2 ++
+ include/linux/sched/rt.h | 1 +
+ kernel/fork.c | 1 +
+ kernel/locking/rtmutex.c | 29 +++++++++++++++++++++--------
+ kernel/sched/core.c | 2 ++
+ 6 files changed, 28 insertions(+), 8 deletions(-)
+
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -164,6 +164,7 @@ extern struct task_group root_task_group
+ #ifdef CONFIG_RT_MUTEXES
+ # define INIT_RT_MUTEXES(tsk) \
+ .pi_waiters = RB_ROOT, \
++ .pi_top_task = NULL, \
+ .pi_waiters_leftmost = NULL,
+ #else
+ # define INIT_RT_MUTEXES(tsk)
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1723,6 +1723,8 @@ struct task_struct {
+ /* PI waiters blocked on a rt_mutex held by this task */
+ struct rb_root pi_waiters;
+ struct rb_node *pi_waiters_leftmost;
++ /* Updated under owner's pi_lock and rq lock */
++ struct task_struct *pi_top_task;
+ /* Deadlock detection and priority inheritance handling */
+ struct rt_mutex_waiter *pi_blocked_on;
+ #endif
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -19,6 +19,7 @@ static inline int rt_task(struct task_st
+ extern int rt_mutex_getprio(struct task_struct *p);
+ extern void rt_mutex_setprio(struct task_struct *p, int prio);
+ extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
++extern void rt_mutex_update_top_task(struct task_struct *p);
+ extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
+ static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1417,6 +1417,7 @@ static void rt_mutex_init_task(struct ta
+ #ifdef CONFIG_RT_MUTEXES
+ p->pi_waiters = RB_ROOT;
+ p->pi_waiters_leftmost = NULL;
++ p->pi_top_task = NULL;
+ p->pi_blocked_on = NULL;
+ #endif
+ }
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -321,6 +321,19 @@ rt_mutex_dequeue_pi(struct task_struct *
+ }
+
+ /*
++ * Must hold both p->pi_lock and task_rq(p)->lock.
++ */
++void rt_mutex_update_top_task(struct task_struct *p)
++{
++ if (!task_has_pi_waiters(p)) {
++ p->pi_top_task = NULL;
++ return;
++ }
++
++ p->pi_top_task = task_top_pi_waiter(p)->task;
++}
++
++/*
+ * Calculate task priority from the waiter tree priority
+ *
+ * Return task->normal_prio when the waiter tree is empty or when
+@@ -335,12 +348,12 @@ int rt_mutex_getprio(struct task_struct
+ task->normal_prio);
+ }
+
++/*
++ * Must hold either p->pi_lock or task_rq(p)->lock.
++ */
+ struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+ {
+- if (likely(!task_has_pi_waiters(task)))
+- return NULL;
+-
+- return task_top_pi_waiter(task)->task;
++ return task->pi_top_task;
+ }
+
+ /*
+@@ -349,12 +362,12 @@ struct task_struct *rt_mutex_get_top_tas
+ */
+ int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
+ {
+- if (!task_has_pi_waiters(task))
++ struct task_struct *top_task = rt_mutex_get_top_task(task);
++
++ if (!top_task)
+ return newprio;
+
+- if (task_top_pi_waiter(task)->task->prio <= newprio)
+- return task_top_pi_waiter(task)->task->prio;
+- return newprio;
++ return min(top_task->prio, newprio);
+ }
+
+ /*
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3669,6 +3669,8 @@ void rt_mutex_setprio(struct task_struct
+ goto out_unlock;
+ }
+
++ rt_mutex_update_top_task(p);
++
+ trace_sched_pi_setprio(p, prio);
+ oldprio = p->prio;
+
diff --git a/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch b/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
new file mode 100644
index 0000000..d6b8d4b
--- /dev/null
+++ b/debian/patches/features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
@@ -0,0 +1,38 @@
+From: "Darren Hart (VMware)" <dvhart at infradead.org>
+Date: Fri, 14 Apr 2017 15:31:38 -0700
+Subject: [PATCH] futex: Clarify mark_wake_futex memory barrier usage
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit 38fcd06e9b7f6855db1f3ebac5e18b8fdb467ffd
+
+Clarify the scenario described in mark_wake_futex requiring the
+smp_store_release(). Update the comment to explicitly refer to the
+plist_del now under __unqueue_futex() (previously plist_del was in the
+same function as the comment).
+
+Signed-off-by: Darren Hart (VMware) <dvhart at infradead.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Link: http://lkml.kernel.org/r/20170414223138.GA4222@fury
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/futex.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1378,10 +1378,11 @@ static void mark_wake_futex(struct wake_
+ wake_q_add(wake_q, p);
+ __unqueue_futex(q);
+ /*
+- * The waiting task can free the futex_q as soon as
+- * q->lock_ptr = NULL is written, without taking any locks. A
+- * memory barrier is required here to prevent the following
+- * store to lock_ptr from getting ahead of the plist_del.
++ * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
++ * is written, without taking any locks. This is possible in the event
++ * of a spurious wakeup, for example. A memory barrier is required here
++ * to prevent the following store to lock_ptr from getting ahead of the
++ * plist_del in __unqueue_futex().
+ */
+ smp_store_release(&q->lock_ptr, NULL);
+ }
diff --git a/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch b/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch
index f54e56e..b957a33 100644
--- a/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch
+++ b/debian/patches/features/all/rt/0003-futex-Remove-rt_mutex_deadlock_account_.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:50 +0100
Subject: [PATCH] futex: Remove rt_mutex_deadlock_account_*()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit fffa954fb528963c2fb7b0c0084eb77e2be7ab52
diff --git a/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch b/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
new file mode 100644
index 0000000..641205f
--- /dev/null
+++ b/debian/patches/features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
@@ -0,0 +1,54 @@
+From: Xunlei Pang <xlpang at redhat.com>
+Date: Thu, 23 Mar 2017 15:56:09 +0100
+Subject: [PATCH] sched/deadline/rtmutex: Dont miss the
+ dl_runtime/dl_period update
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit 85e2d4f992868ad78dc8bb2c077b652fcfb3661a
+
+Currently dl tasks will actually return at the very beginning
+of rt_mutex_adjust_prio_chain() in !detect_deadlock cases:
+
+ if (waiter->prio == task->prio) {
+ if (!detect_deadlock)
+ goto out_unlock_pi; // out here
+ else
+ requeue = false;
+ }
+
+As the deadline value of blocked deadline tasks(waiters) without
+changing their sched_class(thus prio doesn't change) never changes,
+this seems reasonable, but it actually misses the chance of updating
+rt_mutex_waiter's "dl_runtime(period)_copy" if a waiter updates its
+deadline parameters(dl_runtime, dl_period) or boosted waiter changes
+to !deadline class.
+
+Thus, force deadline task not out by adding the !dl_prio() condition.
+
+Signed-off-by: Xunlei Pang <xlpang at redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Acked-by: Steven Rostedt <rostedt at goodmis.org>
+Reviewed-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: juri.lelli at arm.com
+Cc: bigeasy at linutronix.de
+Cc: mathieu.desnoyers at efficios.com
+Cc: jdesfossez at efficios.com
+Cc: bristot at redhat.com
+Link: http://lkml.kernel.org/r/1460633827-345-7-git-send-email-xlpang@redhat.com
+Link: http://lkml.kernel.org/r/20170323150216.206577901@infradead.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/locking/rtmutex.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -603,7 +603,7 @@ static int rt_mutex_adjust_prio_chain(st
+ * enabled we continue, but stop the requeueing in the chain
+ * walk.
+ */
+- if (waiter->prio == task->prio) {
++ if (waiter->prio == task->prio && !dl_task(task)) {
+ if (!detect_deadlock)
+ goto out_unlock_pi;
+ else
diff --git a/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch b/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
new file mode 100644
index 0000000..28e8f86
--- /dev/null
+++ b/debian/patches/features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
@@ -0,0 +1,50 @@
+From: "Darren Hart (VMware)" <dvhart at infradead.org>
+Date: Fri, 14 Apr 2017 15:46:08 -0700
+Subject: [PATCH] MAINTAINERS: Add FUTEX SUBSYSTEM
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit 59cd42c29618c45cd3c56da43402b14f611888dd
+
+Add a MAINTAINERS block for the FUTEX SUBSYSTEM which includes the core
+kernel code, include headers, testing code, and Documentation. Excludes
+arch files, and higher level test code.
+
+I added tglx and mingo as M as they have made the tip commits and peterz
+and myself as R.
+
+Signed-off-by: Darren Hart (VMware) <dvhart at infradead.org>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Shuah Khan <shuah at kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme at kernel.org>
+Link: http://lkml.kernel.org/r/20170414224608.GA5180@fury
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ MAINTAINERS | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5196,6 +5196,23 @@ F: fs/fuse/
+ F: include/uapi/linux/fuse.h
+ F: Documentation/filesystems/fuse.txt
+
++FUTEX SUBSYSTEM
++M: Thomas Gleixner <tglx at linutronix.de>
++M: Ingo Molnar <mingo at redhat.com>
++R: Peter Zijlstra <peterz at infradead.org>
++R: Darren Hart <dvhart at infradead.org>
++L: linux-kernel at vger.kernel.org
++T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
++S: Maintained
++F: kernel/futex.c
++F: kernel/futex_compat.c
++F: include/asm-generic/futex.h
++F: include/linux/futex.h
++F: include/uapi/linux/futex.h
++F: tools/testing/selftests/futex/
++F: tools/perf/bench/futex*
++F: Documentation/*futex*
++
+ FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit)
+ M: Rik Faith <faith at cs.unc.edu>
+ L: linux-scsi at vger.kernel.org
diff --git a/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch b/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
index 994c21e..d827d78 100644
--- a/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
+++ b/debian/patches/features/all/rt/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:51 +0100
Subject: [PATCH] futex,rt_mutex: Provide futex specific rt_mutex API
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit 5293c2efda37775346885c7e924d4ef7018ea60b
diff --git a/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch b/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch
new file mode 100644
index 0000000..df75a04
--- /dev/null
+++ b/debian/patches/features/all/rt/0004-rtmutex-Clean-up.patch
@@ -0,0 +1,147 @@
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Thu, 23 Mar 2017 15:56:10 +0100
+Subject: [PATCH] rtmutex: Clean up
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit aa2bfe55366552cb7e93e8709d66e698d79ccc47
+
+Previous patches changed the meaning of the return value of
+rt_mutex_slowunlock(); update comments and code to reflect this.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Cc: juri.lelli at arm.com
+Cc: bigeasy at linutronix.de
+Cc: xlpang at redhat.com
+Cc: rostedt at goodmis.org
+Cc: mathieu.desnoyers at efficios.com
+Cc: jdesfossez at efficios.com
+Cc: bristot at redhat.com
+Link: http://lkml.kernel.org/r/20170323150216.255058238@infradead.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/futex.c | 7 ++++---
+ kernel/locking/rtmutex.c | 28 +++++++++++++---------------
+ kernel/locking/rtmutex_common.h | 2 +-
+ 3 files changed, 18 insertions(+), 19 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1392,7 +1392,7 @@ static int wake_futex_pi(u32 __user *uad
+ {
+ u32 uninitialized_var(curval), newval;
+ struct task_struct *new_owner;
+- bool deboost = false;
++ bool postunlock = false;
+ WAKE_Q(wake_q);
+ int ret = 0;
+
+@@ -1453,12 +1453,13 @@ static int wake_futex_pi(u32 __user *uad
+ /*
+ * We've updated the uservalue, this unlock cannot fail.
+ */
+- deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
++ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+
+ out_unlock:
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+
+- rt_mutex_postunlock(&wake_q, deboost);
++ if (postunlock)
++ rt_mutex_postunlock(&wake_q);
+
+ return ret;
+ }
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1328,7 +1328,8 @@ static inline int rt_mutex_slowtrylock(s
+
+ /*
+ * Slow path to release a rt-mutex.
+- * Return whether the current task needs to undo a potential priority boosting.
++ *
++ * Return whether the current task needs to call rt_mutex_postunlock().
+ */
+ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+ struct wake_q_head *wake_q)
+@@ -1399,8 +1400,7 @@ static bool __sched rt_mutex_slowunlock(
+
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+- /* check PI boosting */
+- return true;
++ return true; /* call rt_mutex_postunlock() */
+ }
+
+ /*
+@@ -1447,15 +1447,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+ }
+
+ /*
+- * Undo pi boosting (if necessary) and wake top waiter.
++ * Performs the wakeup of the the top-waiter and re-enables preemption.
+ */
+-void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
++void rt_mutex_postunlock(struct wake_q_head *wake_q)
+ {
+ wake_up_q(wake_q);
+
+ /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
+- if (deboost)
+- preempt_enable();
++ preempt_enable();
+ }
+
+ static inline void
+@@ -1464,14 +1463,12 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+ struct wake_q_head *wqh))
+ {
+ WAKE_Q(wake_q);
+- bool deboost;
+
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
+ return;
+
+- deboost = slowfn(lock, &wake_q);
+-
+- rt_mutex_postunlock(&wake_q, deboost);
++ if (slowfn(lock, &wake_q))
++ rt_mutex_postunlock(&wake_q);
+ }
+
+ /**
+@@ -1591,19 +1588,20 @@ bool __sched __rt_mutex_futex_unlock(str
+ */
+ preempt_disable();
+
+- return true; /* deboost and wakeups */
++ return true; /* call postunlock() */
+ }
+
+ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
+ {
+ WAKE_Q(wake_q);
+- bool deboost;
++ bool postunlock;
+
+ raw_spin_lock_irq(&lock->wait_lock);
+- deboost = __rt_mutex_futex_unlock(lock, &wake_q);
++ postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+- rt_mutex_postunlock(&wake_q, deboost);
++ if (postunlock)
++ rt_mutex_postunlock(&wake_q);
+ }
+
+ /**
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -122,7 +122,7 @@ extern void rt_mutex_futex_unlock(struct
+ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
+ struct wake_q_head *wqh);
+
+-extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost);
++extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # include "rtmutex-debug.h"
diff --git a/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch b/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch
index 30e4d6b..c750000 100644
--- a/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch
+++ b/debian/patches/features/all/rt/0005-futex-Change-locking-rules.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:52 +0100
Subject: [PATCH] futex: Change locking rules
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit 734009e96d1983ad739e5b656e03430b3660c913
diff --git a/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch b/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
new file mode 100644
index 0000000..c83a42b
--- /dev/null
+++ b/debian/patches/features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
@@ -0,0 +1,393 @@
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Thu, 23 Mar 2017 15:56:11 +0100
+Subject: [PATCH] sched/rtmutex: Refactor rt_mutex_setprio()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit acd58620e415aee4a43a808d7d2fd87259ee0001
+
+With the introduction of SCHED_DEADLINE the whole notion that priority
+is a single number is gone, therefore the @prio argument to
+rt_mutex_setprio() doesn't make sense anymore.
+
+So rework the code to pass a pi_task instead.
+
+Note this also fixes a problem with pi_top_task caching; previously we
+would not set the pointer (call rt_mutex_update_top_task) if the
+priority didn't change, this could lead to a stale pointer.
+
+As for the XXX, I think its fine to use pi_task->prio, because if it
+differs from waiter->prio, a PI chain update is immenent.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Cc: juri.lelli at arm.com
+Cc: bigeasy at linutronix.de
+Cc: xlpang at redhat.com
+Cc: rostedt at goodmis.org
+Cc: mathieu.desnoyers at efficios.com
+Cc: jdesfossez at efficios.com
+Cc: bristot at redhat.com
+Link: http://lkml.kernel.org/r/20170323150216.303827095@infradead.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/sched/rt.h | 24 +++-------
+ kernel/locking/rtmutex.c | 112 ++++++++++++-----------------------------------
+ kernel/sched/core.c | 66 ++++++++++++++++++++++-----
+ 3 files changed, 91 insertions(+), 111 deletions(-)
+
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -16,28 +16,20 @@ static inline int rt_task(struct task_st
+ }
+
+ #ifdef CONFIG_RT_MUTEXES
+-extern int rt_mutex_getprio(struct task_struct *p);
+-extern void rt_mutex_setprio(struct task_struct *p, int prio);
+-extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
+-extern void rt_mutex_update_top_task(struct task_struct *p);
+-extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
++/*
++ * Must hold either p->pi_lock or task_rq(p)->lock.
++ */
++static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
++{
++ return p->pi_top_task;
++}
++extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
+ static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+ {
+ return tsk->pi_blocked_on != NULL;
+ }
+ #else
+-static inline int rt_mutex_getprio(struct task_struct *p)
+-{
+- return p->normal_prio;
+-}
+-
+-static inline int rt_mutex_get_effective_prio(struct task_struct *task,
+- int newprio)
+-{
+- return newprio;
+-}
+-
+ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+ {
+ return NULL;
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -320,67 +320,16 @@ rt_mutex_dequeue_pi(struct task_struct *
+ RB_CLEAR_NODE(&waiter->pi_tree_entry);
+ }
+
+-/*
+- * Must hold both p->pi_lock and task_rq(p)->lock.
+- */
+-void rt_mutex_update_top_task(struct task_struct *p)
+-{
+- if (!task_has_pi_waiters(p)) {
+- p->pi_top_task = NULL;
+- return;
+- }
+-
+- p->pi_top_task = task_top_pi_waiter(p)->task;
+-}
+-
+-/*
+- * Calculate task priority from the waiter tree priority
+- *
+- * Return task->normal_prio when the waiter tree is empty or when
+- * the waiter is not allowed to do priority boosting
+- */
+-int rt_mutex_getprio(struct task_struct *task)
+-{
+- if (likely(!task_has_pi_waiters(task)))
+- return task->normal_prio;
+-
+- return min(task_top_pi_waiter(task)->prio,
+- task->normal_prio);
+-}
+-
+-/*
+- * Must hold either p->pi_lock or task_rq(p)->lock.
+- */
+-struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+-{
+- return task->pi_top_task;
+-}
+-
+-/*
+- * Called by sched_setscheduler() to get the priority which will be
+- * effective after the change.
+- */
+-int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
++static void rt_mutex_adjust_prio(struct task_struct *p)
+ {
+- struct task_struct *top_task = rt_mutex_get_top_task(task);
++ struct task_struct *pi_task = NULL;
+
+- if (!top_task)
+- return newprio;
++ lockdep_assert_held(&p->pi_lock);
+
+- return min(top_task->prio, newprio);
+-}
++ if (task_has_pi_waiters(p))
++ pi_task = task_top_pi_waiter(p)->task;
+
+-/*
+- * Adjust the priority of a task, after its pi_waiters got modified.
+- *
+- * This can be both boosting and unboosting. task->pi_lock must be held.
+- */
+-static void __rt_mutex_adjust_prio(struct task_struct *task)
+-{
+- int prio = rt_mutex_getprio(task);
+-
+- if (task->prio != prio || dl_prio(prio))
+- rt_mutex_setprio(task, prio);
++ rt_mutex_setprio(p, pi_task);
+ }
+
+ /*
+@@ -740,7 +689,7 @@ static int rt_mutex_adjust_prio_chain(st
+ */
+ rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
+ rt_mutex_enqueue_pi(task, waiter);
+- __rt_mutex_adjust_prio(task);
++ rt_mutex_adjust_prio(task);
+
+ } else if (prerequeue_top_waiter == waiter) {
+ /*
+@@ -756,7 +705,7 @@ static int rt_mutex_adjust_prio_chain(st
+ rt_mutex_dequeue_pi(task, waiter);
+ waiter = rt_mutex_top_waiter(lock);
+ rt_mutex_enqueue_pi(task, waiter);
+- __rt_mutex_adjust_prio(task);
++ rt_mutex_adjust_prio(task);
+ } else {
+ /*
+ * Nothing changed. No need to do any priority
+@@ -964,7 +913,7 @@ static int task_blocks_on_rt_mutex(struc
+ return -EDEADLK;
+
+ raw_spin_lock(&task->pi_lock);
+- __rt_mutex_adjust_prio(task);
++ rt_mutex_adjust_prio(task);
+ waiter->task = task;
+ waiter->lock = lock;
+ waiter->prio = task->prio;
+@@ -986,7 +935,7 @@ static int task_blocks_on_rt_mutex(struc
+ rt_mutex_dequeue_pi(owner, top_waiter);
+ rt_mutex_enqueue_pi(owner, waiter);
+
+- __rt_mutex_adjust_prio(owner);
++ rt_mutex_adjust_prio(owner);
+ if (owner->pi_blocked_on)
+ chain_walk = 1;
+ } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
+@@ -1038,13 +987,14 @@ static void mark_wakeup_next_waiter(stru
+ waiter = rt_mutex_top_waiter(lock);
+
+ /*
+- * Remove it from current->pi_waiters. We do not adjust a
+- * possible priority boost right now. We execute wakeup in the
+- * boosted mode and go back to normal after releasing
+- * lock->wait_lock.
++ * Remove it from current->pi_waiters and deboost.
++ *
++ * We must in fact deboost here in order to ensure we call
++ * rt_mutex_setprio() to update p->pi_top_task before the
++ * task unblocks.
+ */
+ rt_mutex_dequeue_pi(current, waiter);
+- __rt_mutex_adjust_prio(current);
++ rt_mutex_adjust_prio(current);
+
+ /*
+ * As we are waking up the top waiter, and the waiter stays
+@@ -1056,9 +1006,19 @@ static void mark_wakeup_next_waiter(stru
+ */
+ lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
+
+- raw_spin_unlock(¤t->pi_lock);
+-
++ /*
++ * We deboosted before waking the top waiter task such that we don't
++ * run two tasks with the 'same' priority (and ensure the
++ * p->pi_top_task pointer points to a blocked task). This however can
++ * lead to priority inversion if we would get preempted after the
++ * deboost but before waking our donor task, hence the preempt_disable()
++ * before unlock.
++ *
++ * Pairs with preempt_enable() in rt_mutex_postunlock();
++ */
++ preempt_disable();
+ wake_q_add(wake_q, waiter->task);
++ raw_spin_unlock(¤t->pi_lock);
+ }
+
+ /*
+@@ -1093,7 +1053,7 @@ static void remove_waiter(struct rt_mute
+ if (rt_mutex_has_waiters(lock))
+ rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
+
+- __rt_mutex_adjust_prio(owner);
++ rt_mutex_adjust_prio(owner);
+
+ /* Store the lock on which owner is blocked or NULL */
+ next_lock = task_blocked_on_lock(owner);
+@@ -1132,8 +1092,7 @@ void rt_mutex_adjust_pi(struct task_stru
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+- if (!waiter || (waiter->prio == task->prio &&
+- !dl_prio(task->prio))) {
++ if (!waiter || (waiter->prio == task->prio && !dl_prio(task->prio))) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+@@ -1387,17 +1346,6 @@ static bool __sched rt_mutex_slowunlock(
+ * Queue the next waiter for wakeup once we release the wait_lock.
+ */
+ mark_wakeup_next_waiter(wake_q, lock);
+-
+- /*
+- * We should deboost before waking the top waiter task such that
+- * we don't run two tasks with the 'same' priority. This however
+- * can lead to prio-inversion if we would get preempted after
+- * the deboost but before waking our high-prio task, hence the
+- * preempt_disable before unlock. Pairs with preempt_enable() in
+- * rt_mutex_postunlock();
+- */
+- preempt_disable();
+-
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ return true; /* call rt_mutex_postunlock() */
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3629,10 +3629,25 @@ EXPORT_SYMBOL(default_wake_function);
+
+ #ifdef CONFIG_RT_MUTEXES
+
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++ if (pi_task)
++ prio = min(prio, pi_task->prio);
++
++ return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++ return __rt_effective_prio(pi_task, prio);
++}
++
+ /*
+ * rt_mutex_setprio - set the current priority of a task
+- * @p: task
+- * @prio: prio value (kernel-internal form)
++ * @p: task to boost
++ * @pi_task: donor task
+ *
+ * This function changes the 'effective' priority of a task. It does
+ * not touch ->normal_prio like __setscheduler().
+@@ -3640,16 +3655,40 @@ EXPORT_SYMBOL(default_wake_function);
+ * Used by the rt_mutex code to implement priority inheritance
+ * logic. Call site only calls if the priority of the task changed.
+ */
+-void rt_mutex_setprio(struct task_struct *p, int prio)
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
+ {
+- int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
++ int prio, oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
+ const struct sched_class *prev_class;
+ struct rq_flags rf;
+ struct rq *rq;
+
+- BUG_ON(prio > MAX_PRIO);
++ /* XXX used to be waiter->prio, not waiter->task->prio */
++ prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++ /*
++ * If nothing changed; bail early.
++ */
++ if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
++ return;
+
+ rq = __task_rq_lock(p, &rf);
++ /*
++ * Set under pi_lock && rq->lock, such that the value can be used under
++ * either lock.
++ *
++ * Note that there is loads of tricky to make this pointer cache work
++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++ * ensure a task is de-boosted (pi_task is set to NULL) before the
++ * task is allowed to run again (and can exit). This ensures the pointer
++ * points to a blocked task -- which guaratees the task is present.
++ */
++ p->pi_top_task = pi_task;
++
++ /*
++ * For FIFO/RR we only need to set prio, if that matches we're done.
++ */
++ if (prio == p->prio && !dl_prio(prio))
++ goto out_unlock;
+
+ /*
+ * Idle task boosting is a nono in general. There is one
+@@ -3669,9 +3708,7 @@ void rt_mutex_setprio(struct task_struct
+ goto out_unlock;
+ }
+
+- rt_mutex_update_top_task(p);
+-
+- trace_sched_pi_setprio(p, prio);
++ trace_sched_pi_setprio(p, prio); /* broken */
+ oldprio = p->prio;
+
+ if (oldprio == prio)
+@@ -3695,7 +3732,6 @@ void rt_mutex_setprio(struct task_struct
+ * running task
+ */
+ if (dl_prio(prio)) {
+- struct task_struct *pi_task = rt_mutex_get_top_task(p);
+ if (!dl_prio(p->normal_prio) ||
+ (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
+ p->dl.dl_boosted = 1;
+@@ -3732,6 +3768,11 @@ void rt_mutex_setprio(struct task_struct
+ balance_callback(rq);
+ preempt_enable();
+ }
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ return prio;
++}
+ #endif
+
+ void set_user_nice(struct task_struct *p, long nice)
+@@ -3976,10 +4017,9 @@ static void __setscheduler(struct rq *rq
+ * Keep a potential priority boosting if called from
+ * sched_setscheduler().
+ */
++ p->prio = normal_prio(p);
+ if (keep_boost)
+- p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+- else
+- p->prio = normal_prio(p);
++ p->prio = rt_effective_prio(p, p->prio);
+
+ if (dl_prio(p->prio))
+ p->sched_class = &dl_sched_class;
+@@ -4266,7 +4306,7 @@ static int __sched_setscheduler(struct t
+ * the runqueue. This will be done when the task deboost
+ * itself.
+ */
+- new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
++ new_effective_prio = rt_effective_prio(p, newprio);
+ if (new_effective_prio == oldprio)
+ queue_flags &= ~DEQUEUE_MOVE;
+ }
diff --git a/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch b/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch
index 7a17d7d..ddd37fa 100644
--- a/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch
+++ b/debian/patches/features/all/rt/0006-futex-Cleanup-refcounting.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:53 +0100
Subject: [PATCH] futex: Cleanup refcounting
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit bf92cf3a5100f5a0d5f9834787b130159397cb22
diff --git a/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch b/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
new file mode 100644
index 0000000..7fd765b
--- /dev/null
+++ b/debian/patches/features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
@@ -0,0 +1,109 @@
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Thu, 23 Mar 2017 15:56:12 +0100
+Subject: [PATCH] sched,tracing: Update trace_sched_pi_setprio()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit b91473ff6e979c0028f02f90e40c844959c736d8
+
+Pass the PI donor task, instead of a numerical priority.
+
+Numerical priorities are not sufficient to describe state ever since
+SCHED_DEADLINE.
+
+Annotate all sched tracepoints that are currently broken; fixing them
+will bork userspace. *hate*.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Reviewed-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: juri.lelli at arm.com
+Cc: bigeasy at linutronix.de
+Cc: xlpang at redhat.com
+Cc: mathieu.desnoyers at efficios.com
+Cc: jdesfossez at efficios.com
+Cc: bristot at redhat.com
+Link: http://lkml.kernel.org/r/20170323150216.353599881@infradead.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/trace/events/sched.h | 16 +++++++++-------
+ kernel/sched/core.c | 2 +-
+ 2 files changed, 10 insertions(+), 8 deletions(-)
+
+--- a/include/trace/events/sched.h
++++ b/include/trace/events/sched.h
+@@ -70,7 +70,7 @@ DECLARE_EVENT_CLASS(sched_wakeup_templat
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+- __entry->prio = p->prio;
++ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
+ __entry->success = 1; /* rudiment, kill when possible */
+ __entry->target_cpu = task_cpu(p);
+ ),
+@@ -147,6 +147,7 @@ TRACE_EVENT(sched_switch,
+ memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
+ __entry->next_pid = next->pid;
+ __entry->next_prio = next->prio;
++ /* XXX SCHED_DEADLINE */
+ ),
+
+ TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
+@@ -181,7 +182,7 @@ TRACE_EVENT(sched_migrate_task,
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+- __entry->prio = p->prio;
++ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
+ __entry->orig_cpu = task_cpu(p);
+ __entry->dest_cpu = dest_cpu;
+ ),
+@@ -206,7 +207,7 @@ DECLARE_EVENT_CLASS(sched_process_templa
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+- __entry->prio = p->prio;
++ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d",
+@@ -253,7 +254,7 @@ TRACE_EVENT(sched_process_wait,
+ TP_fast_assign(
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ __entry->pid = pid_nr(pid);
+- __entry->prio = current->prio;
++ __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d",
+@@ -413,9 +414,9 @@ DEFINE_EVENT(sched_stat_runtime, sched_s
+ */
+ TRACE_EVENT(sched_pi_setprio,
+
+- TP_PROTO(struct task_struct *tsk, int newprio),
++ TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
+
+- TP_ARGS(tsk, newprio),
++ TP_ARGS(tsk, pi_task),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+@@ -428,7 +429,8 @@ TRACE_EVENT(sched_pi_setprio,
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->oldprio = tsk->prio;
+- __entry->newprio = newprio;
++ __entry->newprio = pi_task ? pi_task->prio : tsk->prio;
++ /* XXX SCHED_DEADLINE bits missing */
+ ),
+
+ TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3708,7 +3708,7 @@ void rt_mutex_setprio(struct task_struct
+ goto out_unlock;
+ }
+
+- trace_sched_pi_setprio(p, prio); /* broken */
++ trace_sched_pi_setprio(p, pi_task);
+ oldprio = p->prio;
+
+ if (oldprio == prio)
diff --git a/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch b/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
index a9ad928..a305e4c 100644
--- a/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
+++ b/debian/patches/features/all/rt/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:54 +0100
Subject: [PATCH] futex: Rework inconsistent rt_mutex/futex_q state
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit 73d786bd043ebc855f349c81ea805f6b11cbf2aa
diff --git a/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch b/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch
new file mode 100644
index 0000000..445e9df
--- /dev/null
+++ b/debian/patches/features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch
@@ -0,0 +1,122 @@
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Thu, 23 Mar 2017 15:56:13 +0100
+Subject: [PATCH] rtmutex: Fix PI chain order integrity
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit e0aad5b44ff5d28ac1d6ae70cdf84ca228e889dc
+
+rt_mutex_waiter::prio is a copy of task_struct::prio which is updated
+during the PI chain walk, such that the PI chain order isn't messed up
+by (asynchronous) task state updates.
+
+Currently rt_mutex_waiter_less() uses task state for deadline tasks;
+this is broken, since the task state can, as said above, change
+asynchronously, causing the RB tree order to change without actual
+tree update -> FAIL.
+
+Fix this by also copying the deadline into the rt_mutex_waiter state
+and updating it along with its prio field.
+
+Ideally we would also force PI chain updates whenever DL tasks update
+their deadline parameter, but for first approximation this is less
+broken than it was.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Cc: juri.lelli at arm.com
+Cc: bigeasy at linutronix.de
+Cc: xlpang at redhat.com
+Cc: rostedt at goodmis.org
+Cc: mathieu.desnoyers at efficios.com
+Cc: jdesfossez at efficios.com
+Cc: bristot at redhat.com
+Link: http://lkml.kernel.org/r/20170323150216.403992539@infradead.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/locking/rtmutex.c | 29 +++++++++++++++++++++++++++--
+ kernel/locking/rtmutex_common.h | 1 +
+ 2 files changed, 28 insertions(+), 2 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -236,8 +236,7 @@ rt_mutex_waiter_less(struct rt_mutex_wai
+ * then right waiter has a dl_prio() too.
+ */
+ if (dl_prio(left->prio))
+- return dl_time_before(left->task->dl.deadline,
+- right->task->dl.deadline);
++ return dl_time_before(left->deadline, right->deadline);
+
+ return 0;
+ }
+@@ -648,7 +647,26 @@ static int rt_mutex_adjust_prio_chain(st
+
+ /* [7] Requeue the waiter in the lock waiter tree. */
+ rt_mutex_dequeue(lock, waiter);
++
++ /*
++ * Update the waiter prio fields now that we're dequeued.
++ *
++ * These values can have changed through either:
++ *
++ * sys_sched_set_scheduler() / sys_sched_setattr()
++ *
++ * or
++ *
++ * DL CBS enforcement advancing the effective deadline.
++ *
++ * Even though pi_waiters also uses these fields, and that tree is only
++ * updated in [11], we can do this here, since we hold [L], which
++ * serializes all pi_waiters access and rb_erase() does not care about
++ * the values of the node being removed.
++ */
+ waiter->prio = task->prio;
++ waiter->deadline = task->dl.deadline;
++
+ rt_mutex_enqueue(lock, waiter);
+
+ /* [8] Release the task */
+@@ -775,6 +793,8 @@ static int rt_mutex_adjust_prio_chain(st
+ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ struct rt_mutex_waiter *waiter)
+ {
++ lockdep_assert_held(&lock->wait_lock);
++
+ /*
+ * Before testing whether we can acquire @lock, we set the
+ * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
+@@ -900,6 +920,8 @@ static int task_blocks_on_rt_mutex(struc
+ struct rt_mutex *next_lock;
+ int chain_walk = 0, res;
+
++ lockdep_assert_held(&lock->wait_lock);
++
+ /*
+ * Early deadlock detection. We really don't want the task to
+ * enqueue on itself just to untangle the mess later. It's not
+@@ -917,6 +939,7 @@ static int task_blocks_on_rt_mutex(struc
+ waiter->task = task;
+ waiter->lock = lock;
+ waiter->prio = task->prio;
++ waiter->deadline = task->dl.deadline;
+
+ /* Get the top priority waiter on the lock */
+ if (rt_mutex_has_waiters(lock))
+@@ -1034,6 +1057,8 @@ static void remove_waiter(struct rt_mute
+ struct task_struct *owner = rt_mutex_owner(lock);
+ struct rt_mutex *next_lock;
+
++ lockdep_assert_held(&lock->wait_lock);
++
+ raw_spin_lock(¤t->pi_lock);
+ rt_mutex_dequeue(lock, waiter);
+ current->pi_blocked_on = NULL;
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -33,6 +33,7 @@ struct rt_mutex_waiter {
+ struct rt_mutex *deadlock_lock;
+ #endif
+ int prio;
++ u64 deadline;
+ };
+
+ /*
diff --git a/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch b/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
index 7c89ea2..8eab01d 100644
--- a/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
+++ b/debian/patches/features/all/rt/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:55 +0100
Subject: [PATCH] futex: Pull rt_mutex_futex_unlock() out from under hb->lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit 16ffa12d742534d4ff73e8b3a4e81c1de39196f0
diff --git a/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch b/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch
new file mode 100644
index 0000000..4cdcd6d
--- /dev/null
+++ b/debian/patches/features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch
@@ -0,0 +1,102 @@
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Thu, 23 Mar 2017 15:56:14 +0100
+Subject: [PATCH] rtmutex: Fix more prio comparisons
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit 19830e55247cddb3f46f1bf60b8e245593491bea
+
+There was a pure ->prio comparison left in try_to_wake_rt_mutex(),
+convert it to use rt_mutex_waiter_less(), noting that greater-or-equal
+is not-less (both in kernel priority view).
+
+This necessitated the introduction of cmp_task() which creates a
+pointer to an unnamed stack variable of struct rt_mutex_waiter type to
+compare against tasks.
+
+With this, we can now also create and employ rt_mutex_waiter_equal().
+
+Reviewed-and-tested-by: Juri Lelli <juri.lelli at arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Reviewed-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: juri.lelli at arm.com
+Cc: bigeasy at linutronix.de
+Cc: xlpang at redhat.com
+Cc: rostedt at goodmis.org
+Cc: mathieu.desnoyers at efficios.com
+Cc: jdesfossez at efficios.com
+Cc: bristot at redhat.com
+Link: http://lkml.kernel.org/r/20170323150216.455584638@infradead.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/locking/rtmutex.c | 32 +++++++++++++++++++++++++++++---
+ 1 file changed, 29 insertions(+), 3 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -222,6 +222,12 @@ static inline bool unlock_rt_mutex_safe(
+ }
+ #endif
+
++/*
++ * Only use with rt_mutex_waiter_{less,equal}()
++ */
++#define task_to_waiter(p) \
++ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
++
+ static inline int
+ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ struct rt_mutex_waiter *right)
+@@ -241,6 +247,25 @@ rt_mutex_waiter_less(struct rt_mutex_wai
+ return 0;
+ }
+
++static inline int
++rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
++ struct rt_mutex_waiter *right)
++{
++ if (left->prio != right->prio)
++ return 0;
++
++ /*
++ * If both waiters have dl_prio(), we check the deadlines of the
++ * associated tasks.
++ * If left waiter has a dl_prio(), and we didn't return 0 above,
++ * then right waiter has a dl_prio() too.
++ */
++ if (dl_prio(left->prio))
++ return left->deadline == right->deadline;
++
++ return 1;
++}
++
+ static void
+ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
+ {
+@@ -551,7 +576,7 @@ static int rt_mutex_adjust_prio_chain(st
+ * enabled we continue, but stop the requeueing in the chain
+ * walk.
+ */
+- if (waiter->prio == task->prio && !dl_task(task)) {
++ if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+ if (!detect_deadlock)
+ goto out_unlock_pi;
+ else
+@@ -854,7 +879,8 @@ static int try_to_take_rt_mutex(struct r
+ * the top waiter priority (kernel view),
+ * @task lost.
+ */
+- if (task->prio >= rt_mutex_top_waiter(lock)->prio)
++ if (!rt_mutex_waiter_less(task_to_waiter(task),
++ rt_mutex_top_waiter(lock)))
+ return 0;
+
+ /*
+@@ -1117,7 +1143,7 @@ void rt_mutex_adjust_pi(struct task_stru
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+- if (!waiter || (waiter->prio == task->prio && !dl_prio(task->prio))) {
++ if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
diff --git a/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch b/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
index 565f3c3..0a12332 100644
--- a/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
+++ b/debian/patches/features/all/rt/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:56 +0100
Subject: [PATCH] futex,rt_mutex: Introduce rt_mutex_init_waiter()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit 50809358dd7199aa7ce232f6877dd09ec30ef374
diff --git a/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch b/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
new file mode 100644
index 0000000..91f7ae7
--- /dev/null
+++ b/debian/patches/features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
@@ -0,0 +1,43 @@
+From: Mike Galbraith <efault at gmx.de>
+Date: Wed, 5 Apr 2017 10:08:27 +0200
+Subject: [PATCH] rtmutex: Plug preempt count leak in
+ rt_mutex_futex_unlock()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Upstream commit def34eaae5ce04b324e48e1bfac873091d945213
+
+mark_wakeup_next_waiter() already disables preemption, doing so again
+leaves us with an unpaired preempt_disable().
+
+Fixes: 2a1c60299406 ("rtmutex: Deboost before waking up the top waiter")
+Signed-off-by: Mike Galbraith <efault at gmx.de>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: xlpang at redhat.com
+Cc: rostedt at goodmis.org
+Link: http://lkml.kernel.org/r/1491379707.6538.2.camel@gmx.de
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/locking/rtmutex.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1579,13 +1579,13 @@ bool __sched __rt_mutex_futex_unlock(str
+ return false; /* done */
+ }
+
+- mark_wakeup_next_waiter(wake_q, lock);
+ /*
+- * We've already deboosted, retain preempt_disabled when dropping
+- * the wait_lock to avoid inversion until the wakeup. Matched
+- * by rt_mutex_postunlock();
++ * We've already deboosted, mark_wakeup_next_waiter() will
++ * retain preempt_disabled when we drop the wait_lock, to
++ * avoid inversion prior to the wakeup. preempt_disable()
++ * therein pairs with rt_mutex_postunlock().
+ */
+- preempt_disable();
++ mark_wakeup_next_waiter(wake_q, lock);
+
+ return true; /* call postunlock() */
+ }
diff --git a/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch b/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
index 4c20d4f..8581b4a 100644
--- a/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
+++ b/debian/patches/features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:57 +0100
Subject: [PATCH] futex,rt_mutex: Restructure rt_mutex_finish_proxy_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit 38d589f2fd08f1296aea3ce62bebd185125c6d81
diff --git a/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch b/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
index df88438..4ba2d46 100644
--- a/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
+++ b/debian/patches/features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:58 +0100
Subject: [PATCH] futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit cfafcd117da0216520568c195cb2f6cd1980c4bb
diff --git a/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch b/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
index 3012845..b2a8bf3 100644
--- a/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
+++ b/debian/patches/features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:35:59 +0100
Subject: [PATCH] futex: Futex_unlock_pi() determinism
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit bebe5b514345f09be2c15e414d076b02ecb9cce8
diff --git a/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch b/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
index 018a737..1ea9960 100644
--- a/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
+++ b/debian/patches/features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 22 Mar 2017 11:36:00 +0100
Subject: [PATCH] futex: Drop hb->lock before enqueueing on the rtmutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit 56222b212e8edb1cf51f5dd73ff645809b082b40
diff --git a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
index 8731f1d..fa9d875 100644
--- a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
+++ b/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
@@ -1,7 +1,7 @@
From: "Yadi.hu" <yadi.hu at windriver.com>
Date: Wed, 10 Dec 2014 10:32:09 +0800
Subject: ARM: enable irq in translation/section permission fault handlers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Probably happens on all ARM, with
CONFIG_PREEMPT_RT_FULL
diff --git a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
index ac90ae3..f9b3eff 100644
--- a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 21 Mar 2013 19:01:05 +0100
Subject: printk: Drop the logbuf_lock more often
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The lock is hold with irgs off. The latency drops 500us+ on my arm bugs
with a "full" buffer after executing "dmesg" on the shell.
diff --git a/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 8fff88f..6fc13de 100644
--- a/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/debian/patches/features/all/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <joshc at ni.com>
Date: Thu, 11 Feb 2016 11:54:01 -0600
Subject: KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating
the vgic and timer states to prevent the calling task from migrating to
diff --git a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
index c350247..bda8d1c 100644
--- a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ b/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
@@ -1,7 +1,7 @@
From: Marcelo Tosatti <mtosatti at redhat.com>
Date: Wed, 8 Apr 2015 20:33:25 -0300
Subject: KVM: lapic: mark LAPIC timer handler as irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Since lapic timer handler only wakes up a simple waitqueue,
it can be executed from hardirq context.
diff --git a/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index 4fbc69e..f65cf1d 100644
--- a/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/debian/patches/features/all/rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -5,7 +5,7 @@ Cc: Anna Schumaker <anna.schumaker at netapp.com>,
linux-nfs at vger.kernel.org, linux-kernel at vger.kernel.org,
tglx at linutronix.de
Subject: NFSv4: replace seqcount_t with a seqlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me
because it maps to preempt_disable() in -RT which I can't have at this
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
-@@ -2698,7 +2698,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2697,7 +2697,7 @@ static int _nfs4_open_and_get_state(stru
unsigned int seq;
int ret;
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
ret = _nfs4_proc_open(opendata);
if (ret != 0)
-@@ -2736,7 +2736,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2735,7 +2735,7 @@ static int _nfs4_open_and_get_state(stru
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
diff --git a/debian/patches/features/all/rt/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch b/debian/patches/features/all/rt/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
new file mode 100644
index 0000000..ed7274e
--- /dev/null
+++ b/debian/patches/features/all/rt/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
@@ -0,0 +1,218 @@
+From 16145f9c01a2e671aceb731050de9fbf977d31d0 Mon Sep 17 00:00:00 2001
+From: Anna-Maria Gleixner <anna-maria at linutronix.de>
+Date: Fri, 26 May 2017 19:16:07 +0200
+Subject: [PATCH] Revert "timers: Don't wake ktimersoftd on every tick"
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+This reverts commit 032f93cae150a ("timers: Don't wake ktimersoftd on
+every tick").
+
+The problem is that the look ahead optimization from the tick timer
+interrupt context can race with the softirq thread expiring timer. As
+a consequence the temporary hlist heads which hold the to expire
+timers are overwritten and the timers which are already removed from
+the wheel bucket for expiry are now dangling w/o a list head.
+
+That means those timers never get expired. If one of those timers is
+canceled the removal operation will result in a hlist corruption.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria at linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/time/timer.c | 96 +++++++++++++++-------------------------------------
+ 1 file changed, 29 insertions(+), 67 deletions(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -206,8 +206,6 @@ struct timer_base {
+ bool is_idle;
+ DECLARE_BITMAP(pending_map, WHEEL_SIZE);
+ struct hlist_head vectors[WHEEL_SIZE];
+- struct hlist_head expired_lists[LVL_DEPTH];
+- int expired_count;
+ } ____cacheline_aligned;
+
+ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
+@@ -1355,8 +1353,7 @@ static void call_timer_fn(struct timer_l
+ }
+ }
+
+-static inline void __expire_timers(struct timer_base *base,
+- struct hlist_head *head)
++static void expire_timers(struct timer_base *base, struct hlist_head *head)
+ {
+ while (!hlist_empty(head)) {
+ struct timer_list *timer;
+@@ -1387,38 +1384,21 @@ static inline void __expire_timers(struc
+ }
+ }
+
+-static void expire_timers(struct timer_base *base)
+-{
+- struct hlist_head *head;
+-
+- while (base->expired_count--) {
+- head = base->expired_lists + base->expired_count;
+- __expire_timers(base, head);
+- }
+- base->expired_count = 0;
+-}
+-
+-static void __collect_expired_timers(struct timer_base *base)
++static int __collect_expired_timers(struct timer_base *base,
++ struct hlist_head *heads)
+ {
+ unsigned long clk = base->clk;
+ struct hlist_head *vec;
+- int i;
++ int i, levels = 0;
+ unsigned int idx;
+
+- /*
+- * expire_timers() must be called at least once before we can
+- * collect more timers
+- */
+- if (WARN_ON(base->expired_count))
+- return;
+-
+ for (i = 0; i < LVL_DEPTH; i++) {
+ idx = (clk & LVL_MASK) + i * LVL_SIZE;
+
+ if (__test_and_clear_bit(idx, base->pending_map)) {
+ vec = base->vectors + idx;
+- hlist_move_list(vec,
+- &base->expired_lists[base->expired_count++]);
++ hlist_move_list(vec, heads++);
++ levels++;
+ }
+ /* Is it time to look at the next level? */
+ if (clk & LVL_CLK_MASK)
+@@ -1426,6 +1406,7 @@ static void __collect_expired_timers(str
+ /* Shift clock for the next level granularity */
+ clk >>= LVL_CLK_SHIFT;
+ }
++ return levels;
+ }
+
+ #ifdef CONFIG_NO_HZ_COMMON
+@@ -1618,7 +1599,8 @@ void timer_clear_idle(void)
+ base->is_idle = false;
+ }
+
+-static void collect_expired_timers(struct timer_base *base)
++static int collect_expired_timers(struct timer_base *base,
++ struct hlist_head *heads)
+ {
+ /*
+ * NOHZ optimization. After a long idle sleep we need to forward the
+@@ -1635,49 +1617,20 @@ static void collect_expired_timers(struc
+ if (time_after(next, jiffies)) {
+ /* The call site will increment clock! */
+ base->clk = jiffies - 1;
+- return;
++ return 0;
+ }
+ base->clk = next;
+ }
+- __collect_expired_timers(base);
++ return __collect_expired_timers(base, heads);
+ }
+ #else
+-static inline void collect_expired_timers(struct timer_base *base)
++static inline int collect_expired_timers(struct timer_base *base,
++ struct hlist_head *heads)
+ {
+- __collect_expired_timers(base);
++ return __collect_expired_timers(base, heads);
+ }
+ #endif
+
+-static int find_expired_timers(struct timer_base *base)
+-{
+- const unsigned long int end_clk = jiffies;
+-
+- while (!base->expired_count && time_after_eq(end_clk, base->clk)) {
+- collect_expired_timers(base);
+- base->clk++;
+- }
+-
+- return base->expired_count;
+-}
+-
+-/* Called from CPU tick routine to quickly collect expired timers */
+-static int tick_find_expired(struct timer_base *base)
+-{
+- int count;
+-
+- raw_spin_lock(&base->lock);
+-
+- if (unlikely(time_after(jiffies, base->clk + HZ))) {
+- /* defer to ktimersoftd; don't spend too long in irq context */
+- count = -1;
+- } else
+- count = find_expired_timers(base);
+-
+- raw_spin_unlock(&base->lock);
+-
+- return count;
+-}
+-
+ /*
+ * Called from the timer interrupt handler to charge one tick to the current
+ * process. user_tick is 1 if the tick is user time, 0 for system.
+@@ -1704,11 +1657,22 @@ void update_process_times(int user_tick)
+ */
+ static inline void __run_timers(struct timer_base *base)
+ {
++ struct hlist_head heads[LVL_DEPTH];
++ int levels;
++
++ if (!time_after_eq(jiffies, base->clk))
++ return;
++
+ raw_spin_lock_irq(&base->lock);
+
+- while (find_expired_timers(base))
+- expire_timers(base);
++ while (time_after_eq(jiffies, base->clk)) {
++
++ levels = collect_expired_timers(base, heads);
++ base->clk++;
+
++ while (levels--)
++ expire_timers(base, heads + levels);
++ }
+ raw_spin_unlock_irq(&base->lock);
+ wakeup_timer_waiters(base);
+ }
+@@ -1734,12 +1698,12 @@ void run_local_timers(void)
+
+ hrtimer_run_queues();
+ /* Raise the softirq only if required. */
+- if (time_before(jiffies, base->clk) || !tick_find_expired(base)) {
++ if (time_before(jiffies, base->clk)) {
+ if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+ return;
+ /* CPU is awake, so check the deferrable base. */
+ base++;
+- if (time_before(jiffies, base->clk) || !tick_find_expired(base))
++ if (time_before(jiffies, base->clk))
+ return;
+ }
+ raise_softirq(TIMER_SOFTIRQ);
+@@ -1909,7 +1873,6 @@ int timers_dead_cpu(unsigned int cpu)
+ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+ BUG_ON(old_base->running_timer);
+- BUG_ON(old_base->expired_count);
+
+ for (i = 0; i < WHEEL_SIZE; i++)
+ migrate_timer_list(new_base, old_base->vectors + i);
+@@ -1936,7 +1899,6 @@ static void __init init_timer_cpu(int cp
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ init_swait_queue_head(&base->wait_for_running_timer);
+ #endif
+- base->expired_count = 0;
+ }
+ }
+
diff --git a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index d2b5111..6197f22 100644
--- a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <rostedt at goodmis.org>
Date: Wed, 13 Feb 2013 09:26:05 -0500
Subject: acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
We hit the following bug with 3.6-rt:
diff --git a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch b/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
index 497b218..65b6899 100644
--- a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
+++ b/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch
@@ -1,7 +1,7 @@
From: Anders Roxell <anders.roxell at linaro.org>
Date: Thu, 14 May 2015 17:52:17 +0200
Subject: arch/arm64: Add lazy preempt support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
arm64 is missing support for PREEMPT_RT. The main feature which is
lacking is support for lazy preemption. The arch-specific entry code,
diff --git a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
index 33aecf1..9c0e338 100644
--- a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
+++ b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
@@ -1,7 +1,7 @@
From: Benedikt Spranger <b.spranger at linutronix.de>
Date: Sat, 6 Mar 2010 17:47:10 +0100
Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Setup and remove the interrupt handler in clock event mode selection.
This avoids calling the (shared) interrupt handler when the device is
diff --git a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
index 33fb342..48369dc 100644
--- a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
+++ b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 1 May 2010 18:29:35 +0200
Subject: ARM: at91: tclib: Default to tclib timer for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
RT is not too happy about the shared timer interrupt in AT91
devices. Default to tclib timer for RT.
diff --git a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
index 5911e54..bc89fdf 100644
--- a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
+++ b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch
@@ -1,7 +1,7 @@
From: Frank Rowand <frank.rowand at am.sony.com>
Date: Mon, 19 Sep 2011 14:51:14 -0700
Subject: arm: Convert arm boot_lock to raw
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The arm boot_lock is used by the secondary processor startup code. The locking
task is the idle thread, which has idle->sched_class == &idle_sched_class.
diff --git a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
index 275f14a..2cfebeb 100644
--- a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
+++ b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch
@@ -1,7 +1,7 @@
Subject: arm: Enable highmem for rt
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 13 Feb 2013 11:03:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
fixup highmem for ARM.
diff --git a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch b/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
index dd3bb5b..33ee4a4 100644
--- a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
+++ b/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Mon, 11 Mar 2013 21:37:27 +0100
Subject: arm/highmem: Flush tlb on unmap
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The tlb should be flushed on unmap and thus make the mapping entry
invalid. This is only done in the non-debug case which does not look
diff --git a/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch b/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch
index 8734cb9..e451b25 100644
--- a/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch
+++ b/debian/patches/features/all/rt/arm-include-definition-for-cpumask_t.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 22 Dec 2016 17:28:33 +0100
Subject: [PATCH] arm: include definition for cpumask_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
This definition gets pulled in by other files. With the (later) split of
RCU and spinlock.h it won't compile anymore.
diff --git a/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch b/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch
index 83af921..409fed9 100644
--- a/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch
+++ b/debian/patches/features/all/rt/arm-kprobe-replace-patch_lock-to-raw-lock.patch
@@ -1,7 +1,7 @@
From: Yang Shi <yang.shi at linaro.org>
Date: Thu, 10 Nov 2016 16:17:55 -0800
Subject: [PATCH] arm: kprobe: replace patch_lock to raw lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When running kprobe on -rt kernel, the below bug is caught:
diff --git a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
index 89c32ce..ed8ea0b 100644
--- a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
Subject: arm: Add support for lazy preemption
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 31 Oct 2012 12:04:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Implement the arm pieces for lazy preempt.
diff --git a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
index 72b6230..47b8b1d 100644
--- a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
+++ b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 20 Sep 2013 14:31:54 +0200
Subject: arm/unwind: use a raw_spin_lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Mostly unwind is done with irqs enabled however SLUB may call it with
irqs disabled while creating a new SLUB cache.
diff --git a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch b/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
index 1586f23..0ff187b 100644
--- a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -1,7 +1,7 @@
Subject: arm64/xen: Make XEN depend on !RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 12 Oct 2015 11:18:40 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
It's not ready and probably never will be, unless xen folks have a
look at it.
diff --git a/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch b/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
index 090a164..b079c50 100644
--- a/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
+++ b/debian/patches/features/all/rt/at91_dont_enable_disable_clock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 09 Mar 2016 10:51:06 +0100
Subject: arm: at91: do not disable/enable clocks in a row
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Currently the driver will disable the clock and enable it one line later
if it is switching from periodic mode into one shot.
diff --git a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
index 1c54718..5c38ffe 100644
--- a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
+++ b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <srostedt at redhat.com>
Date: Fri, 3 Jul 2009 08:44:29 -0500
Subject: ata: Do not disable interrupts in ide code for preempt-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use the local_irq_*_nort variants.
diff --git a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch b/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
index 8b8ada2..91e601a 100644
--- a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
+++ b/debian/patches/features/all/rt/block-blk-mq-use-swait.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 13 Feb 2015 11:01:26 +0100
Subject: block: blk-mq: Use swait
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
| in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6
diff --git a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch b/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
index b4a53ca..072355d 100644
--- a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 29 Jan 2015 15:10:08 +0100
Subject: block/mq: don't complete requests via IPI
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The IPI runs in hardirq context and there are sleeping locks. This patch
moves the completion into a workqueue.
diff --git a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch b/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
index 78919a7..7add8b9 100644
--- a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
+++ b/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: block/mq: do not invoke preempt_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
preempt_disable() and get_cpu() don't play well together with the sleeping
locks it tries to allocate later.
diff --git a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
index e239fd5..eb4d61d 100644
--- a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
+++ b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 9 Apr 2014 10:37:23 +0200
Subject: block: mq: use cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
there is a might sleep splat because get_cpu() disables preemption and
later we grab a lock. As a workaround for this we use get_cpu_light().
diff --git a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
index 2f4b575..77e4894 100644
--- a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
+++ b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch
@@ -1,7 +1,7 @@
Subject: block: Shorten interrupt disabled regions
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 22 Jun 2011 19:47:02 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Moving the blk_sched_flush_plug() call out of the interrupt/preempt
disabled region in the scheduler allows us to replace
@@ -48,7 +48,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -3177,7 +3177,7 @@ static void queue_unplugged(struct reque
+@@ -3200,7 +3200,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
@@ -57,7 +57,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3225,7 +3225,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3248,7 +3248,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -65,7 +65,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
-@@ -3245,11 +3244,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3268,11 +3267,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
@@ -77,7 +77,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
-@@ -3262,7 +3256,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3285,7 +3279,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@@ -86,7 +86,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
/*
-@@ -3289,8 +3283,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3312,8 +3306,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);
diff --git a/debian/patches/features/all/rt/block-use-cpu-chill.patch b/debian/patches/features/all/rt/block-use-cpu-chill.patch
index b8f0241..87b029a 100644
--- a/debian/patches/features/all/rt/block-use-cpu-chill.patch
+++ b/debian/patches/features/all/rt/block-use-cpu-chill.patch
@@ -1,7 +1,7 @@
Subject: block: Use cpu_chill() for retry loops
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 20 Dec 2012 18:28:26 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Steven also observed a live lock when there was a
diff --git a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
index 3373e94..91824ed 100644
--- a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
+++ b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:58 -0500
Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Introduce RT/NON-RT WARN/BUG statements to avoid ifdefs in the code.
diff --git a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index 341a02c..4415145 100644
--- a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Sat, 21 Jun 2014 10:09:48 +0200
Subject: memcontrol: Prevent scheduling while atomic in cgroup code
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
mm, memcg: make refill_stock() use get_cpu_light()
diff --git a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch b/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
index 015e3f0..5d19911 100644
--- a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
+++ b/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 13 Feb 2015 15:52:24 +0100
Subject: cgroups: use simple wait in css_release()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
To avoid:
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
/*
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
-@@ -5040,10 +5040,10 @@ static void css_free_rcu_fn(struct rcu_h
+@@ -5041,10 +5041,10 @@ static void css_free_rcu_fn(struct rcu_h
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -5086,8 +5086,8 @@ static void css_release(struct percpu_re
+@@ -5087,8 +5087,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5739,6 +5739,7 @@ static int __init cgroup_wq_init(void)
+@@ -5740,6 +5740,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
diff --git a/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch b/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
index 487e27f..4ec8809 100644
--- a/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
+++ b/debian/patches/features/all/rt/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
@@ -1,7 +1,7 @@
From: Alexandre Belloni <alexandre.belloni at free-electrons.com>
Date: Thu, 17 Mar 2016 21:09:43 +0100
Subject: [PATCH] clockevents/drivers/timer-atmel-pit: fix double free_irq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
clockevents_exchange_device() changes the state from detached to shutdown
and so at that point the IRQ has not yet been requested.
diff --git a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
index a77517f..419cab1 100644
--- a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
+++ b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch
@@ -1,7 +1,7 @@
From: Benedikt Spranger <b.spranger at linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
Subject: clocksource: TCLIB: Allow higher clock rates for clock events
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
As default the TCLIB uses the 32KiHz base clock rate for clock events.
Add a compile time selection to allow higher clock resulution.
diff --git a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
index 7bc038e..1f7fe06 100644
--- a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
+++ b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch
@@ -1,7 +1,7 @@
Subject: completion: Use simple wait queues
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 11 Jan 2013 11:23:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Completions have no long lasting callbacks and therefor do not need
the complex waitqueue variant. Use simple waitqueues which reduces the
diff --git a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
index 137bd69..b02e938 100644
--- a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
+++ b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
@@ -1,7 +1,7 @@
Subject: sched: Use the proper LOCK_OFFSET for cond_resched()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 17 Jul 2011 22:51:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
RT does not increment preempt count when a 'sleeping' spinlock is
locked. Update PREEMPT_LOCK_OFFSET for that case.
diff --git a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
index 5b68522..2329dde 100644
--- a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
+++ b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
@@ -1,7 +1,7 @@
Subject: sched: Take RT softirq semantics into account in cond_resched()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 14 Jul 2011 09:56:44 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The softirq semantics work different on -RT. There is no SOFTIRQ_MASK in
the preemption counter which leads to the BUG_ON() statement in
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -3367,12 +3367,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -3373,12 +3373,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -5050,6 +5050,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -5092,6 +5092,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -5063,6 +5064,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5105,6 +5106,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch b/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
index 6eb7369..f11c316 100644
--- a/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
+++ b/debian/patches/features/all/rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Sun, 16 Oct 2016 05:11:54 +0200
Subject: [PATCH] connector/cn_proc: Protect send_msg() with a local lock
on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931
|in_atomic(): 1, irqs_disabled(): 0, pid: 31807, name: sleep
diff --git a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
index e3abc90..73d6865 100644
--- a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
+++ b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <rostedt at goodmis.org>
Date: Thu, 5 Dec 2013 09:16:52 -0500
Subject: cpu hotplug: Document why PREEMPT_RT uses a spinlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The patch:
diff --git a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index fba50d2..8253bae 100644
--- a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -1,7 +1,7 @@
Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
From: Steven Rostedt <rostedt at goodmis.org>
Date: Fri, 02 Mar 2012 10:36:57 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Tasks can block on hotplug.lock in pin_current_cpu(), but their state
might be != RUNNING. So the mutex wakeup will set the state
diff --git a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
index 01227e9..0c6304e 100644
--- a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
+++ b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <srostedt at redhat.com>
Date: Mon, 16 Jul 2012 08:07:43 +0000
Subject: cpu/rt: Rework cpu down for PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Bringing a CPU down is a pain with the PREEMPT_RT kernel because
tasks can be preempted in many more places than in non-RT. In
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2474,6 +2474,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -2480,6 +2480,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -2486,6 +2490,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -2492,6 +2496,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
diff --git a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index 9c36a65..a4c164f 100644
--- a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <rostedt at goodmis.org>
Date: Tue, 4 Mar 2014 12:28:32 -0500
Subject: cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
We hit another bug that was caused by switching cpu_chill() from
msleep() to hrtimer_nanosleep().
diff --git a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
index 8efdc1e..804d04c 100644
--- a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
+++ b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch
@@ -1,7 +1,7 @@
From: Tiejun Chen <tiejun.chen at windriver.com>
Subject: cpu_down: move migrate_enable() back
Date: Thu, 7 Nov 2013 10:06:07 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to
use migrate_enable()/migrate_disable() to replace that combination
diff --git a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
index bd9a872..8f7ced4 100644
--- a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
+++ b/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 9 Apr 2015 15:23:01 +0200
Subject: cpufreq: drop K8's driver from beeing selected
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Ralf posted a picture of a backtrace from
diff --git a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
index 46d5d35..bf3e8f3 100644
--- a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
+++ b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch
@@ -1,7 +1,7 @@
Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 14 Dec 2011 01:03:49 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
There are "valid" GFP_ATOMIC allocations such as
diff --git a/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
index 0b93ec5..54dcfcd 100644
--- a/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
+++ b/debian/patches/features/all/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <efault at gmx.de>
Date: Sun, 8 Jan 2017 09:32:25 +0100
Subject: [PATCH] cpuset: Convert callback_lock to raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The two commits below add up to a cpuset might_sleep() splat for RT:
diff --git a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
index 0729422..59a7d86 100644
--- a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
+++ b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 21 Feb 2014 17:24:04 +0100
Subject: crypto: Reduce preempt disabled regions, more algos
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Don Estabrook reported
| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
diff --git a/debian/patches/features/all/rt/debugobjects-rt.patch b/debian/patches/features/all/rt/debugobjects-rt.patch
index c108ecf..f507ccc 100644
--- a/debian/patches/features/all/rt/debugobjects-rt.patch
+++ b/debian/patches/features/all/rt/debugobjects-rt.patch
@@ -1,7 +1,7 @@
Subject: debugobjects: Make RT aware
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 17 Jul 2011 21:41:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Avoid filling the pool / allocating memory with irqs off().
diff --git a/debian/patches/features/all/rt/dm-make-rt-aware.patch b/debian/patches/features/all/rt/dm-make-rt-aware.patch
index 19c61ba..88ba319 100644
--- a/debian/patches/features/all/rt/dm-make-rt-aware.patch
+++ b/debian/patches/features/all/rt/dm-make-rt-aware.patch
@@ -1,7 +1,7 @@
Subject: dm: Make rt aware
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 14 Nov 2011 23:06:09 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
interrupts legitimately enabled here as we cant deadlock against the
diff --git a/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index 738302f..f373ca8 100644
--- a/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/debian/patches/features/all/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -2,14 +2,13 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Thu, 31 Mar 2016 04:08:28 +0200
Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex
for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
They're nondeterministic, and lead to ___might_sleep() splats in -rt.
OTOH, they're a lot less wasteful than an rtmutex per page.
Signed-off-by: Mike Galbraith <umgwanakikbuti at gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-[bwh: Adjust context for 4.9.24]
---
drivers/block/zram/zram_drv.c | 30 ++++++++++++++++--------------
drivers/block/zram/zram_drv.h | 41 +++++++++++++++++++++++++++++++++++++++++
diff --git a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
index b9eb2b4..4e24741 100644
--- a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
+++ b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:24 -0500
Subject: drivers/net: Use disable_irq_nosync() in 8139too
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use disable_irq_nosync() instead of disable_irq() as this might be
called in atomic context with netpoll.
diff --git a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
index 241ca91..0aabfbf 100644
--- a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
+++ b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <rostedt at goodmis.org>
Date: Fri, 3 Jul 2009 08:30:00 -0500
Subject: drivers/net: vortex fix locking issues
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Argh, cut and paste wasn't enough...
diff --git a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
index 481a836..15c6002 100644
--- a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
+++ b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:30 -0500
Subject: drivers: random: Reduce preempt disabled region
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
No need to keep preemption disabled across the whole function.
diff --git a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
index f30ac9b..126dfcf 100644
--- a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
+++ b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch
@@ -1,7 +1,7 @@
Subject: tty/serial/omap: Make the locking RT aware
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 28 Jul 2011 13:32:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The lock is a sleeping lock and local_irq_save() is not the
optimsation we are looking for. Redo it to make it work on -RT and
diff --git a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
index 77a3c6f..4b2b43f 100644
--- a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
+++ b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch
@@ -1,7 +1,7 @@
Subject: tty/serial/pl011: Make the locking work on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 08 Jan 2013 21:36:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The lock is a sleeping lock and local_irq_save() is not the optimsation
we are looking for. Redo it to make it work on -RT and non-RT.
diff --git a/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index d1dec6a..c204cc8 100644
--- a/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/debian/patches/features/all/rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Thu, 20 Oct 2016 11:15:22 +0200
Subject: [PATCH] drivers/zram: Don't disable preemption in
zcomp_stream_get/put()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
In v4.7, the driver switched to percpu compression streams, disabling
preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We
@@ -13,7 +13,6 @@ zram_bvec_write().
Signed-off-by: Mike Galbraith <umgwanakikbuti at gmail.com>
[bigeasy: get_locked_var() -> per zcomp_strm lock]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-[bwh: Adjust context for 4.9.24]
---
drivers/block/zram/zcomp.c | 12 ++++++++++--
drivers/block/zram/zcomp.h | 1 +
diff --git a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
index 5085981..d44bcc2 100644
--- a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
+++ b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 25 Apr 2013 18:12:52 +0200
Subject: drm/i915: drop trace_i915_gem_ring_dispatch on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
This tracepoint is responsible for:
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -1537,7 +1537,9 @@ execbuf_submit(struct i915_execbuffer_pa
+@@ -1489,7 +1489,9 @@ execbuf_submit(struct i915_execbuffer_pa
if (ret)
return ret;
diff --git a/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
index 8b8a16e..2969c88 100644
--- a/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+++ b/debian/patches/features/all/rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
@@ -1,7 +1,7 @@
Subject: drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end()
From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Sat, 27 Feb 2016 09:01:42 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
[ 8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918
diff --git a/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index fff86c6..2c6c20d 100644
--- a/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/debian/patches/features/all/rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -1,7 +1,7 @@
Subject: drm,radeon,i915: Use preempt_disable/enable_rt() where recommended
From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Sat, 27 Feb 2016 08:09:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
DRM folks identified the spots, so use them.
diff --git a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
index 56427bc..38dc76e 100644
--- a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
+++ b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
Subject: fs/epoll: Do not disable preemption on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 08 Jul 2011 16:35:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
ep_call_nested() takes a sleeping lock so we can't disable preemption.
The light version is enough since ep_call_nested() doesn't mind beeing
diff --git a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch b/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
index c8d2ba4..be5d78e 100644
--- a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
+++ b/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Mon, 16 Feb 2015 18:49:10 +0100
Subject: fs/aio: simple simple work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768
|in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2
diff --git a/debian/patches/features/all/rt/fs-block-rt-support.patch b/debian/patches/features/all/rt/fs-block-rt-support.patch
index ac5f854..750bf27 100644
--- a/debian/patches/features/all/rt/fs-block-rt-support.patch
+++ b/debian/patches/features/all/rt/fs-block-rt-support.patch
@@ -1,7 +1,7 @@
Subject: block: Turn off warning which is bogus on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 14 Jun 2011 17:05:09 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On -RT the context is always with IRQs enabled. Ignore this warning on -RT.
diff --git a/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch b/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch
index f17e444..d04baaf 100644
--- a/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch
+++ b/debian/patches/features/all/rt/fs-dcache-include-wait.h.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 14 Sep 2016 11:55:23 +0200
Subject: fs/dcache: include wait.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Since commit d9171b934526 ("parallel lookups machinery, part 4 (and
last)") dcache.h is using but does not include wait.h. It works as long
diff --git a/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch b/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
index 6431aa9..a13ee5d 100644
--- a/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
+++ b/debian/patches/features/all/rt/fs-dcache-init-in_lookup_hashtable.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 14 Sep 2016 17:57:03 +0200
Subject: [PATCH] fs/dcache: init in_lookup_hashtable
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
in_lookup_hashtable was introduced in commit 94bdd655caba ("parallel
lookups machinery, part 3") and never initialized but since it is in
diff --git a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index b78351f..529d963 100644
--- a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -1,7 +1,7 @@
Subject: fs: dcache: Use cpu_chill() in trylock loops
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 07 Mar 2012 21:00:34 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
diff --git a/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 359d744..a0458dc 100644
--- a/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/debian/patches/features/all/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 14 Sep 2016 14:35:49 +0200
Subject: [PATCH] fs/dcache: use swait_queue instead of waitqueue
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
__d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock()
which disables preemption. As a workaround convert it to swait.
diff --git a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
index 8865658..f607cdb 100644
--- a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
+++ b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 18 Mar 2011 10:11:25 +0100
Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
bit_spin_locks break under RT.
diff --git a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
index 33b32b8..ab35872 100644
--- a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
+++ b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Mon, 17 Feb 2014 17:30:03 +0100
Subject: fs: jbd2: pull your plug when waiting for space
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Two cps in parallel managed to stall the the ext4 fs. It seems that
journal code is either waiting for locks or sleeping waiting for
diff --git a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
index 179cd13..b91177c 100644
--- a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
+++ b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 19 Jul 2009 08:44:27 -0500
Subject: fs: namespace preemption fix
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On RT we cannot loop with preemption disabled here as
mnt_make_readonly() might have been preempted. We can safely enable
diff --git a/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
index b56f7b8..ddc78f9 100644
--- a/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
+++ b/debian/patches/features/all/rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 15 Sep 2016 10:51:27 +0200
Subject: [PATCH] fs/nfs: turn rmdir_sem into a semaphore
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The RW semaphore had a reader side which used the _non_owner version
because it most likely took the reader lock in one thread and released it
diff --git a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
index 51427c7..2e02e85 100644
--- a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
+++ b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <efault at gmx.de>
Date: Fri, 3 Jul 2009 08:44:12 -0500
Subject: fs: ntfs: disable interrupt only on !RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
> * Nick Piggin <nickpiggin at yahoo.com.au> wrote:
diff --git a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
index e88cbfc..e55a20e 100644
--- a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 18 Mar 2011 09:18:52 +0100
Subject: buffer_head: Replace bh_uptodate_lock for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Wrap the bit_spin_lock calls into a separate inline and add the RT
replacements with a real spinlock.
diff --git a/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch b/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch
index 8014a7d..70f93dc 100644
--- a/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch
+++ b/debian/patches/features/all/rt/ftrace-Fix-trace-header-alignment.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Sun, 16 Oct 2016 05:08:30 +0200
Subject: [PATCH] ftrace: Fix trace header alignment
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Line up helper arrows to the right column.
diff --git a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
index db48440..a87c1f7 100644
--- a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
+++ b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 17 Jul 2011 21:56:42 +0200
Subject: trace: Add migrate-disabled counter to tracing output
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
diff --git a/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
index 5d94006..e6fa03f 100644
--- a/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ b/debian/patches/features/all/rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 1 Mar 2013 11:17:42 +0100
Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
In exit_pi_state_list() we have the following locking construct:
diff --git a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
index 708e2bc..ca48902 100644
--- a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
+++ b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <rostedt at goodmis.org>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: futex: Fix bug on when a requeued RT task times out
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Requeue with timeout causes a bug with PREEMPT_RT_FULL.
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
}
/*
-@@ -1696,6 +1697,35 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1712,6 +1713,35 @@ int __rt_mutex_start_proxy_lock(struct r
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
RT_MUTEX_FULL_CHAINWALK);
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -99,6 +99,7 @@ enum rtmutex_chainwalk {
+@@ -100,6 +100,7 @@ enum rtmutex_chainwalk {
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
diff --git a/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch b/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
new file mode 100644
index 0000000..abf5bda
--- /dev/null
+++ b/debian/patches/features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
@@ -0,0 +1,126 @@
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Mon, 22 May 2017 13:04:50 -0700
+Subject: [PATCH] futex,rt_mutex: Fix rt_mutex_cleanup_proxy_lock()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+Markus reported that the glibc/nptl/tst-robustpi8 test was failing after
+commit:
+
+ cfafcd117da0 ("futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()")
+
+The following trace shows the problem:
+
+ ld-linux-x86-64-2161 [019] .... 410.760971: SyS_futex: 00007ffbeb76b028: 80000875 op=FUTEX_LOCK_PI
+ ld-linux-x86-64-2161 [019] ...1 410.760972: lock_pi_update_atomic: 00007ffbeb76b028: curval=80000875 uval=80000875 newval=80000875 ret=0
+ ld-linux-x86-64-2165 [011] .... 410.760978: SyS_futex: 00007ffbeb76b028: 80000875 op=FUTEX_UNLOCK_PI
+ ld-linux-x86-64-2165 [011] d..1 410.760979: do_futex: 00007ffbeb76b028: curval=80000875 uval=80000875 newval=80000871 ret=0
+ ld-linux-x86-64-2165 [011] .... 410.760980: SyS_futex: 00007ffbeb76b028: 80000871 ret=0000
+ ld-linux-x86-64-2161 [019] .... 410.760980: SyS_futex: 00007ffbeb76b028: 80000871 ret=ETIMEDOUT
+
+Task 2165 does an UNLOCK_PI, assigning the lock to the waiter task 2161
+which then returns with -ETIMEDOUT. That wrecks the lock state, because now
+the owner isn't aware it acquired the lock and removes the pending robust
+list entry.
+
+If 2161 is killed, the robust list will not clear out this futex and the
+subsequent acquire on this futex will then (correctly) result in -ESRCH
+which is unexpected by glibc, triggers an internal assertion and dies.
+
+Task 2161 Task 2165
+
+rt_mutex_wait_proxy_lock()
+ timeout();
+ /* T2161 is still queued in the waiter list */
+ return -ETIMEDOUT;
+
+ futex_unlock_pi()
+ spin_lock(hb->lock);
+ rtmutex_unlock()
+ remove_rtmutex_waiter(T2161);
+ mark_lock_available();
+ /* Make the next waiter owner of the user space side */
+ futex_uval = 2161;
+ spin_unlock(hb->lock);
+spin_lock(hb->lock);
+rt_mutex_cleanup_proxy_lock()
+ if (rtmutex_owner() !== current)
+ ...
+ return FAIL;
+....
+return -ETIMEOUT;
+
+This means that rt_mutex_cleanup_proxy_lock() needs to call
+try_to_take_rt_mutex() so it can take over the rtmutex correctly which was
+assigned by the waker. If the rtmutex is owned by some other task then this
+call is harmless and just confirmes that the waiter is not able to acquire
+it.
+
+While there, fix what looks like a merge error which resulted in
+rt_mutex_cleanup_proxy_lock() having two calls to
+fixup_rt_mutex_waiters() and rt_mutex_wait_proxy_lock() not having any.
+Both should have one, since both potentially touch the waiter list.
+
+Fixes: 38d589f2fd08 ("futex,rt_mutex: Restructure rt_mutex_finish_proxy_lock()")
+Reported-by: Markus Trippelsdorf <markus at trippelsdorf.de>
+Bug-Spotted-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
+Cc: Florian Weimer <fweimer at redhat.com>
+Cc: Darren Hart <dvhart at infradead.org>
+Cc: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Cc: Markus Trippelsdorf <markus at trippelsdorf.de>
+Link: http://lkml.kernel.org/r/20170519154850.mlomgdsd26drq5j6@hirez.programming.kicks-ass.net
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/locking/rtmutex.c | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1775,12 +1775,14 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+ int ret;
+
+ raw_spin_lock_irq(&lock->wait_lock);
+-
+- set_current_state(TASK_INTERRUPTIBLE);
+-
+ /* sleep on the mutex */
++ set_current_state(TASK_INTERRUPTIBLE);
+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
+-
++ /*
++ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
++ * have to fix that up.
++ */
++ fixup_rt_mutex_waiters(lock);
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ return ret;
+@@ -1812,15 +1814,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
+
+ raw_spin_lock_irq(&lock->wait_lock);
+ /*
++ * Do an unconditional try-lock, this deals with the lock stealing
++ * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
++ * sets a NULL owner.
++ *
++ * We're not interested in the return value, because the subsequent
++ * test on rt_mutex_owner() will infer that. If the trylock succeeded,
++ * we will own the lock and it will have removed the waiter. If we
++ * failed the trylock, we're still not owner and we need to remove
++ * ourselves.
++ */
++ try_to_take_rt_mutex(lock, current, waiter);
++ /*
+ * Unless we're the owner; we're still enqueued on the wait_list.
+ * So check if we became owner, if not, take us off the wait_list.
+ */
+ if (rt_mutex_owner(lock) != current) {
+ remove_waiter(lock, waiter);
+- fixup_rt_mutex_waiters(lock);
+ cleanup = true;
+ }
+-
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ * have to fix that up.
diff --git a/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch b/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch
new file mode 100644
index 0000000..c87e83b
--- /dev/null
+++ b/debian/patches/features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch
@@ -0,0 +1,62 @@
+From 8a35f416ca9ff27e893cebcbe064a1f3c8e1de57 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 9 May 2017 17:11:10 +0200
+Subject: [PATCH] futex/rtmutex: Cure RT double blocking issue
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+RT has a problem when the wait on a futex/rtmutex got interrupted by a
+timeout or a signal. task->pi_blocked_on is still set when returning from
+rt_mutex_wait_proxy_lock(). The task must acquire the hash bucket lock
+after this.
+
+If the hash bucket lock is contended then the
+BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
+task_blocks_on_rt_mutex() will trigger.
+
+This can be avoided by clearing task->pi_blocked_on in the return path of
+rt_mutex_wait_proxy_lock() which removes the task from the boosting chain
+of the rtmutex. That's correct because the task is not longer blocked on
+it.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Reported-by: Engleder Gerhard <eg at keba.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ kernel/locking/rtmutex.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -2388,6 +2388,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+ struct hrtimer_sleeper *to,
+ struct rt_mutex_waiter *waiter)
+ {
++ struct task_struct *tsk = current;
+ int ret;
+
+ raw_spin_lock_irq(&lock->wait_lock);
+@@ -2399,6 +2400,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+ * have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
++
++ /*
++ * RT has a problem here when the wait got interrupted by a timeout
++ * or a signal. task->pi_blocked_on is still set. The task must
++ * acquire the hash bucket lock when returning from this function.
++ *
++ * If the hash bucket lock is contended then the
++ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
++ * task_blocks_on_rt_mutex() will trigger. This can be avoided by
++ * clearing task->pi_blocked_on which removes the task from the
++ * boosting chain of the rtmutex. That's correct because the task
++ * is not longer blocked on it.
++ */
++ if (ret) {
++ raw_spin_lock(&tsk->pi_lock);
++ tsk->pi_blocked_on = NULL;
++ raw_spin_unlock(&tsk->pi_lock);
++ }
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ return ret;
diff --git a/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch b/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
index a57a99f..8d6d8ce 100644
--- a/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
+++ b/debian/patches/features/all/rt/futex-workaround-migrate_disable-enable-in-different.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 8 Mar 2017 14:23:35 +0100
Subject: [PATCH] futex: workaround migrate_disable/enable in different context
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
migrate_disable()/migrate_enable() takes a different path in atomic() vs
!atomic() context. These little hacks ensure that we don't underflow / overflow
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2667,9 +2667,18 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2669,9 +2669,18 @@ static int futex_lock_pi(u32 __user *uad
* lock handoff sequence.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
if (ret) {
if (ret == 1)
-@@ -2811,10 +2820,21 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2815,10 +2824,21 @@ static int futex_unlock_pi(u32 __user *u
* observed.
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
diff --git a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
index 2b798f2..c15dd7d 100644
--- a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
+++ b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:57 -0500
Subject: genirq: Disable irqpoll on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Creates long latencies for no value
diff --git a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
index e0e1f84..e8b0570 100644
--- a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 21 Aug 2013 17:48:46 +0200
Subject: genirq: Do not invoke the affinity callback via a workqueue on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Joe Korty reported, that __irq_set_affinity_locked() schedules a
workqueue while holding a rawlock which results in a might_sleep()
diff --git a/debian/patches/features/all/rt/genirq-force-threading.patch b/debian/patches/features/all/rt/genirq-force-threading.patch
index 2ed6e86..8b807cb 100644
--- a/debian/patches/features/all/rt/genirq-force-threading.patch
+++ b/debian/patches/features/all/rt/genirq-force-threading.patch
@@ -1,7 +1,7 @@
Subject: genirq: Force interrupt thread on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 03 Apr 2011 11:57:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Force threaded_irqs and optimize the code (force_irqthreads) in regard
to this.
diff --git a/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch b/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
index 24ceb10..13ce7da 100644
--- a/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/debian/patches/features/all/rt/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <joshc at ni.com>
Date: Thu, 11 Feb 2016 11:54:00 -0600
Subject: genirq: update irq_set_irqchip_state documentation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On -rt kernels, the use of migrate_disable()/migrate_enable() is
sufficient to guarantee a task isn't moved to another CPU. Update the
diff --git a/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch b/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch
index ac3f1c0..08fd8b2 100644
--- a/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch
+++ b/debian/patches/features/all/rt/gpu_don_t_check_for_the_lock_owner.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: gpu: don't check for the lock owner.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
---
diff --git a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
index 52f3b19..9a3ccf0 100644
--- a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+++ b/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Tue, 24 Mar 2015 08:14:49 +0100
Subject: hotplug: Use set_cpus_allowed_ptr() in sync_unplug_thread()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
do_set_cpus_allowed() is not safe vs ->sched_class change.
diff --git a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
index 536504f..7bee10b 100644
--- a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
+++ b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch
@@ -1,7 +1,7 @@
Subject: hotplug: Lightweight get online cpus
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 15 Jun 2011 12:36:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
get_online_cpus() is a heavy weight function which involves a global
mutex. migrate_disable() wants a simpler construct which prevents only
diff --git a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
index 5a6bc23..5a956d7 100644
--- a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+++ b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
@@ -1,7 +1,7 @@
Subject: hotplug: sync_unplug: No "\n" in task name
From: Yong Zhang <yong.zhang0 at gmail.com>
Date: Sun, 16 Oct 2011 18:56:43 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Otherwise the output will look a little odd.
diff --git a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
index 6efab9e..84c6a61 100644
--- a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
+++ b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch
@@ -1,7 +1,7 @@
Subject: hotplug: Use migrate disable on unplug
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 17 Jul 2011 19:35:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Migration needs to be disabled accross the unplug handling to make
sure that the unplug thread is off the unplugged cpu.
diff --git a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
index b2b5286..55d94db 100644
--- a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
+++ b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
@@ -1,7 +1,7 @@
From: Yang Shi <yang.shi at windriver.com>
Date: Mon, 16 Sep 2013 14:09:19 -0700
Subject: hrtimer: Move schedule_work call to helper thread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When run ltp leapsec_timer test, the following call trace is caught:
diff --git a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch b/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
index ab77f96..a3c9d7c 100644
--- a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
+++ b/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 23 Dec 2015 20:57:41 +0100
Subject: hrtimer: enfore 64byte alignment
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The patch "hrtimer: Fixup hrtimer callback changes for preempt-rt" adds
a list_head expired to struct hrtimer_clock_base and with it we run into
diff --git a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 5389775..31ffc0e 100644
--- a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:44:31 -0500
Subject: hrtimer: Fixup hrtimer callback changes for preempt-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
In preempt-rt we can not call the callbacks which take sleeping locks
from the timer interrupt context.
diff --git a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
index 1c8371e..2b18d82 100644
--- a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
+++ b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:34 -0500
Subject: hrtimers: Prepare full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Make cancellation of a running callback in softirq context safe
against preemption.
diff --git a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index 10beb33..14ecd11 100644
--- a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -1,7 +1,7 @@
From: Clark Williams <williams at redhat.com>
Date: Tue, 26 May 2015 10:43:43 -0500
Subject: i915: bogus warning from i915 when running on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The i915 driver has a 'WARN_ON(!in_interrupt())' in the display
handler, which whines constanly on the RT kernel (since the interrupt
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -12141,7 +12141,7 @@ void intel_check_page_flip(struct drm_i9
+@@ -12131,7 +12131,7 @@ void intel_check_page_flip(struct drm_i9
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_flip_work *work;
diff --git a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
index 12018e5..12230a5 100644
--- a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
+++ b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:16 -0500
Subject: ide: Do not disable interrupts for PREEMPT-RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use the local_irq_*_nort variants.
diff --git a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch
index ff874e1..0686a35 100644
--- a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch
+++ b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: idr: Use local lock instead of preempt enable/disable
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
We need to protect the per cpu variable and prevent migration.
diff --git a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
index ac23087..cca140d 100644
--- a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
+++ b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch
@@ -1,7 +1,7 @@
From: Sven-Thorsten Dietrich <sdietrich at novell.com>
Date: Fri, 3 Jul 2009 08:30:35 -0500
Subject: infiniband: Mellanox IB driver patch use _nort() primitives
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT
Kernel.
diff --git a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
index 29b7cfd..5b07aa8 100644
--- a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
+++ b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:16 -0500
Subject: input: gameport: Do not disable interrupts on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use the _nort() primitives.
diff --git a/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch b/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch
index 26d3ffd..8ff2fdf 100644
--- a/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch
+++ b/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch
@@ -1,7 +1,7 @@
Subject: Intrduce migrate_disable() + cpu_light()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 17 Jun 2011 15:42:38 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Introduce migrate_disable(). The task can't be pushed to another CPU but can
be preempted.
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1991,14 +1997,6 @@ static inline struct vm_struct *task_sta
+@@ -1997,14 +2003,6 @@ static inline struct vm_struct *task_sta
}
#endif
@@ -105,7 +105,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
-@@ -3516,6 +3514,31 @@ static inline void set_task_cpu(struct t
+@@ -3522,6 +3520,31 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
diff --git a/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch b/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
index bcf592d..00f0814 100644
--- a/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
+++ b/debian/patches/features/all/rt/iommu-amd--Use-WARN_ON_NORT.patch
@@ -1,7 +1,7 @@
Subject: iommu/amd: Use WARN_ON_NORT in __attach_device()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 27 Feb 2016 10:22:23 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
RT does not disable interrupts here, but the protection is still
correct. Fixup the WARN_ON so it won't yell on RT.
diff --git a/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch b/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
index 1f22f12..c4040ec 100644
--- a/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
+++ b/debian/patches/features/all/rt/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 15 Sep 2016 16:58:19 +0200
Subject: [PATCH] iommu/iova: don't disable preempt around this_cpu_ptr()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Commit 583248e6620a ("iommu/iova: Disable preemption around use of
this_cpu_ptr()") disables preemption while accessing a per-CPU variable.
diff --git a/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch b/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
index 745df01..9318fbe 100644
--- a/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
+++ b/debian/patches/features/all/rt/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 15 Sep 2016 17:16:44 +0200
Subject: [PATCH] iommu/vt-d: don't disable preemption while accessing
deferred_flush()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
get_cpu() disables preemption and returns the current CPU number. The
CPU number is later only used once while retrieving the address of the
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;
-@@ -3716,10 +3716,8 @@ static void add_unmap(struct dmar_domain
+@@ -3719,10 +3719,8 @@ static void add_unmap(struct dmar_domain
struct intel_iommu *iommu;
struct deferred_flush_entry *entry;
struct deferred_flush_data *flush_data;
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
/* Flush all CPUs' entries to avoid deferring too much. If
* this becomes a bottleneck, can just flush us, and rely on
-@@ -3752,8 +3750,6 @@ static void add_unmap(struct dmar_domain
+@@ -3755,8 +3753,6 @@ static void add_unmap(struct dmar_domain
}
flush_data->size++;
spin_unlock_irqrestore(&flush_data->lock, flags);
diff --git a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch
index 24fbfba..0a9f209 100644
--- a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch
+++ b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch
@@ -1,7 +1,7 @@
Subject: ipc/sem: Rework semaphore wakeups
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed, 14 Sep 2011 11:57:04 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Current sysv sems have a weird ass wakeup scheme that involves keeping
preemption disabled over a potential O(n^2) loop and busy waiting on
diff --git a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index 6b27638..690c3aa 100644
--- a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -1,7 +1,7 @@
Subject: genirq: Allow disabling of softirq processing in irq thread context
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 31 Jan 2012 13:01:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The processing of softirqs in irq thread context is a performance gain
for the non-rt workloads of a system, but it's counterproductive for
diff --git a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch b/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
index 49dc4b5..cae7511 100644
--- a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
+++ b/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch
@@ -1,7 +1,7 @@
Subject: irqwork: Move irq safe work to irq context
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 15 Nov 2015 18:40:17 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On architectures where arch_irq_work_has_interrupt() returns false, we
end up running the irq safe work from the softirq context. That
@@ -56,7 +56,7 @@ Cc: stable-rt at vger.kernel.org
* Synchronize against the irq_work @entry, ensures the entry is not
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1691,7 +1691,7 @@ void update_process_times(int user_tick)
+@@ -1644,7 +1644,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -65,7 +65,7 @@ Cc: stable-rt at vger.kernel.org
if (in_irq())
irq_work_tick();
#endif
-@@ -1720,9 +1720,7 @@ static __latent_entropy void run_timer_s
+@@ -1684,9 +1684,7 @@ static __latent_entropy void run_timer_s
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch b/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
index 7b8eb67..1a233be 100644
--- a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
+++ b/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch
@@ -1,7 +1,7 @@
Subject: irqwork: push most work into softirq context
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Tue, 23 Jun 2015 15:32:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Initially we defered all irqwork into softirq because we didn't want the
latency spikes if perf or another user was busy and delayed the RT task.
@@ -164,7 +164,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1691,7 +1691,7 @@ void update_process_times(int user_tick)
+@@ -1644,7 +1644,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -173,7 +173,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
if (in_irq())
irq_work_tick();
#endif
-@@ -1720,6 +1720,10 @@ static __latent_entropy void run_timer_s
+@@ -1684,6 +1684,10 @@ static __latent_entropy void run_timer_s
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/debian/patches/features/all/rt/jump-label-rt.patch b/debian/patches/features/all/rt/jump-label-rt.patch
index 744d3ca..0169af8 100644
--- a/debian/patches/features/all/rt/jump-label-rt.patch
+++ b/debian/patches/features/all/rt/jump-label-rt.patch
@@ -1,7 +1,7 @@
Subject: jump-label: disable if stop_machine() is used
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 08 Jul 2015 17:14:48 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Some architectures are using stop_machine() while switching the opcode which
leads to latency spikes.
diff --git a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
index 94d433c..67de3bd 100644
--- a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
+++ b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch
@@ -1,7 +1,7 @@
Subject: kconfig: Disable config options which are not RT compatible
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 24 Jul 2011 12:11:43 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Disable stuff which is known to have issues on RT
diff --git a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
index d80d86e..d60cedd 100644
--- a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
+++ b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch
@@ -1,7 +1,7 @@
Subject: kconfig: Add PREEMPT_RT_FULL
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 29 Jun 2011 14:58:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Introduce the final symbol for PREEMPT_RT_FULL.
diff --git a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch b/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
index d47a073..8b2aa70 100644
--- a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
+++ b/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Tue, 19 Mar 2013 14:44:30 +0100
Subject: kernel/SRCU: provide a static initializer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
There are macros for static initializer for the three out of four
possible notifier types, that are:
diff --git a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index 6fc287a..e9c999b 100644
--- a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 7 Jun 2013 22:37:06 +0200
Subject: kernel/cpu: fix cpu down problem if kthread's cpu is going down
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
If kthread is pinned to CPUx and CPUx is going down then we get into
trouble:
diff --git a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index 6542152..08d9309 100644
--- a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 14 Jun 2013 17:16:35 +0200
Subject: kernel/hotplug: restore original cpu mask oncpu/down
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
If a task which is allowed to run only on CPU X puts CPU Y down then it
will be allowed on all CPUs but the on CPU Y after it comes back from
diff --git a/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch b/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
index 459999f..8e8e83b 100644
--- a/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
+++ b/debian/patches/features/all/rt/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Tue, 9 Feb 2016 18:18:01 +0100
Subject: kernel: migrate_disable() do fastpath in atomic &
irqs-off
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
With interrupts off it makes no sense to do the long path since we can't
leave the CPU anyway. Also we might end up in a recursion with lockdep.
diff --git a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index 892c481..7882587 100644
--- a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 4 Feb 2016 16:38:10 +0100
Subject: [PATCH] kernel/perf: mark perf_cpu_context's timer as irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Otherwise we get a WARN_ON() backtrace and some events are reported as
"not counted".
diff --git a/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
index 69c6e6b..c35bec7 100644
--- a/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
+++ b/debian/patches/features/all/rt/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 19 May 2016 17:45:27 +0200
Subject: [PATCH] kernel/printk: Don't try to print from IRQ/NMI region
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On -RT we try to acquire sleeping locks which might lead to warnings
from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on
diff --git a/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index 9a26527..ee81c86 100644
--- a/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/debian/patches/features/all/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Mon, 21 Nov 2016 19:31:08 +0100
Subject: [PATCH] kernel/sched: move stack + kprobe clean up to
__put_task_struct()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
There is no need to free the stack before the task struct. This also
comes handy on -RT because we can't free memory in preempt disabled
diff --git a/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch b/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
index d9b44d7..d5b6eb7 100644
--- a/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
+++ b/debian/patches/features/all/rt/kernel-softirq-unlock-with-irqs-on.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Tue, 9 Feb 2016 18:17:18 +0100
Subject: kernel: softirq: unlock with irqs on
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
We unlock the lock while the interrupts are off. This isn't a problem
now but will get because the migrate_disable() + enable are not
diff --git a/debian/patches/features/all/rt/kgb-serial-hackaround.patch b/debian/patches/features/all/rt/kgb-serial-hackaround.patch
index ec4224e..c1db5f1 100644
--- a/debian/patches/features/all/rt/kgb-serial-hackaround.patch
+++ b/debian/patches/features/all/rt/kgb-serial-hackaround.patch
@@ -1,7 +1,7 @@
From: Jason Wessel <jason.wessel at windriver.com>
Date: Thu, 28 Jul 2011 12:42:23 -0500
Subject: kgdb/serial: Short term workaround
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On 07/27/2011 04:37 PM, Thomas Gleixner wrote:
> - KGDB (not yet disabled) is reportedly unusable on -rt right now due
diff --git a/debian/patches/features/all/rt/latency-hist.patch b/debian/patches/features/all/rt/latency-hist.patch
index 027bd18..73c75c4 100644
--- a/debian/patches/features/all/rt/latency-hist.patch
+++ b/debian/patches/features/all/rt/latency-hist.patch
@@ -1,7 +1,7 @@
Subject: tracing: Add latency histograms
From: Carsten Emde <C.Emde at osadl.org>
Date: Tue, 19 Jul 2011 14:03:41 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
This patch provides a recording mechanism to store data of potential
sources of system latencies. The recordings separately determine the
@@ -237,7 +237,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
int start_pid;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1918,6 +1918,12 @@ struct task_struct {
+@@ -1924,6 +1924,12 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
diff --git a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch b/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
index 3d3f799..d0c863b 100644
--- a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
+++ b/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch
@@ -1,7 +1,7 @@
Subject: latency_hist: Update sched_wakeup probe
From: Mathieu Desnoyers <mathieu.desnoyers at efficios.com>
Date: Sun, 25 Oct 2015 18:06:05 -0400
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
"sched: Introduce the 'trace_sched_waking' tracepoint" introduces a
prototype change for the sched_wakeup probe: the "success" argument is
diff --git a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch b/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
index fb62d2b..3242593 100644
--- a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
+++ b/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 4 Feb 2016 14:08:06 +0100
Subject: latencyhist: disable jump-labels
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Atleast on X86 we die a recursive death
diff --git a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
index 33b5647..6e39e6c 100644
--- a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
+++ b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 23 Jan 2014 14:45:59 +0100
Subject: leds: trigger: disable CPU trigger on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
as it triggers:
|CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141
diff --git a/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch b/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
index 8ce7dfa..a99a0d8 100644
--- a/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
+++ b/debian/patches/features/all/rt/list_bl-fixup-bogus-lockdep-warning.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <joshc at ni.com>
Date: Thu, 31 Mar 2016 00:04:25 -0500
Subject: [PATCH] list_bl: fixup bogus lockdep warning
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
At first glance, the use of 'static inline' seems appropriate for
INIT_HLIST_BL_HEAD().
diff --git a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
index 077d1a3..4615b26 100644
--- a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
+++ b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch
@@ -1,7 +1,7 @@
From: Paul Gortmaker <paul.gortmaker at windriver.com>
Date: Fri, 21 Jun 2013 15:07:25 -0400
Subject: list_bl: Make list head locking RT safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
As per changes in include/linux/jbd_common.h for avoiding the
bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal
diff --git a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
index 07fa123..08c280c 100644
--- a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
+++ b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 21 Jul 2009 22:34:14 +0200
Subject: rt: local_irq_* variants depending on RT/!RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Add local_irq_*_(no)rt variant which are mainly used to break
interrupt disabled sections on PREEMPT_RT or to explicitely disable
diff --git a/debian/patches/features/all/rt/locallock-add-local_lock_on.patch b/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
index 0ac1fb0..e0b0f0e 100644
--- a/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
+++ b/debian/patches/features/all/rt/locallock-add-local_lock_on.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 27 May 2016 15:11:51 +0200
Subject: [PATCH] locallock: add local_lock_on()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
---
diff --git a/debian/patches/features/all/rt/localversion.patch b/debian/patches/features/all/rt/localversion.patch
index b961f8c..b096ae9 100644
--- a/debian/patches/features/all/rt/localversion.patch
+++ b/debian/patches/features/all/rt/localversion.patch
@@ -1,7 +1,7 @@
Subject: Add localversion for -RT release
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
@@ -11,4 +11,4 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt16
++-rt20
diff --git a/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch b/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
index 840eb65..3ee2b08 100644
--- a/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
+++ b/debian/patches/features/all/rt/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
@@ -2,7 +2,7 @@ From: Dan Murphy <dmurphy at ti.com>
Date: Fri, 24 Feb 2017 08:41:49 -0600
Subject: [PATCH] lockdep: Fix compilation error for !CONFIG_MODULES and
!CONFIG_SMP
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When CONFIG_MODULES is not set then it fails to compile in lockdep:
diff --git a/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch b/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch
index 59c51ec..369f8d9 100644
--- a/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch
+++ b/debian/patches/features/all/rt/lockdep-Fix-per-cpu-static-objects.patch
@@ -2,7 +2,7 @@ From 8ce371f9846ef1e8b3cc8f6865766cb5c1f17e40 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz at infradead.org>
Date: Mon, 20 Mar 2017 12:26:55 +0100
Subject: [PATCH] lockdep: Fix per-cpu static objects
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Since commit 383776fa7527 ("locking/lockdep: Handle statically initialized
PER_CPU locks properly") we try to collapse per-cpu locks into a single
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
#endif /* CONFIG_SMP */
/* Boot processor state steps */
-@@ -1923,6 +1925,10 @@ void __init boot_cpu_init(void)
+@@ -1924,6 +1926,10 @@ void __init boot_cpu_init(void)
set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
diff --git a/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch b/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
index 971d5a4..6991a28 100644
--- a/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
+++ b/debian/patches/features/all/rt/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 17 Feb 2017 19:44:39 +0100
Subject: [PATCH] lockdep: Handle statically initialized PER_CPU locks proper
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
If a PER_CPU struct which contains a spin_lock is statically initialized
via:
diff --git a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
index c947026..6668702 100644
--- a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
+++ b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch
@@ -1,7 +1,7 @@
Subject: lockdep: Make it RT aware
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 17 Jul 2011 18:51:23 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
teach lockdep that we don't really do softirqs on -RT.
diff --git a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
index 48584fb..2bdd4d6 100644
--- a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
+++ b/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <josh.cartwright at ni.com>
Date: Wed, 28 Jan 2015 13:08:45 -0600
Subject: lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
"lockdep: Selftest: Only do hardirq context test for raw spinlock"
disabled the execution of certain tests with PREEMPT_RT_FULL, but did
diff --git a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
index 39503d8..f1cad36 100644
--- a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
+++ b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
@@ -1,7 +1,7 @@
Subject: lockdep: selftest: Only do hardirq context test for raw spinlock
From: Yong Zhang <yong.zhang0 at gmail.com>
Date: Mon, 16 Apr 2012 15:01:56 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
From: Yong Zhang <yong.zhang at windriver.com>
diff --git a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
index 99a0c42..994e906 100644
--- a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
+++ b/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
@@ -1,7 +1,7 @@
From: "Wolfgang M. Reimer" <linuxball at gmail.com>
Date: Tue, 21 Jul 2015 16:20:07 +0200
Subject: locking: locktorture: Do NOT include rwlock.h directly
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Including rwlock.h directly will cause kernel builds to fail
if CONFIG_PREEMPT_RT_FULL is defined. The correct header file
diff --git a/debian/patches/features/all/rt/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch b/debian/patches/features/all/rt/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
index bd334ce..2865e74 100644
--- a/debian/patches/features/all/rt/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
+++ b/debian/patches/features/all/rt/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Mon, 21 Nov 2016 19:26:15 +0100
Subject: [PATCH] locking/percpu-rwsem: use swait for the wating writer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use struct swait_queue_head instead of wait_queue_head_t for the waiting
writer. The swait implementation is smaller and lightweight compared to
diff --git a/debian/patches/features/all/rt/md-disable-bcache.patch b/debian/patches/features/all/rt/md-disable-bcache.patch
index 612d3dd..0beba70 100644
--- a/debian/patches/features/all/rt/md-disable-bcache.patch
+++ b/debian/patches/features/all/rt/md-disable-bcache.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 29 Aug 2013 11:48:57 +0200
Subject: md: disable bcache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
It uses anon semaphores
|drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’:
diff --git a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
index f2d8f19..17a9071 100644
--- a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
+++ b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 6 Apr 2010 16:51:31 +0200
Subject: md: raid5: Make raid5_percpu handling RT aware
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
__raid_run_ops() disables preemption with get_cpu() around the access
to the raid5_percpu variables. That causes scheduling while atomic
@@ -42,7 +42,7 @@ Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
}
static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
-@@ -6391,6 +6393,7 @@ static int raid456_cpu_up_prepare(unsign
+@@ -6393,6 +6395,7 @@ static int raid456_cpu_up_prepare(unsign
__func__, cpu);
return -ENOMEM;
}
@@ -50,7 +50,7 @@ Tested-by: Udo van den Heuvel <udovdh at xs4all.nl>
return 0;
}
-@@ -6401,7 +6404,6 @@ static int raid5_alloc_percpu(struct r5c
+@@ -6403,7 +6406,6 @@ static int raid5_alloc_percpu(struct r5c
conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu)
return -ENOMEM;
diff --git a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
index 2e57fe3..dd3d779 100644
--- a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
+++ b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
Subject: mips: Disable highmem on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 18 Jul 2011 17:10:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The current highmem handling on -RT is not compatible and needs fixups.
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2514,7 +2514,7 @@ config MIPS_ASID_BITS_VARIABLE
+@@ -2516,7 +2516,7 @@ config MIPS_ASID_BITS_VARIABLE
#
config HIGHMEM
bool "High Memory Support"
diff --git a/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch b/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
index 9d729c9..afba622 100644
--- a/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
+++ b/debian/patches/features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
@@ -1,7 +1,7 @@
Subject: mm: rt: Fix generic kmap_atomic for RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 19 Sep 2015 10:15:00 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The update to 4.1 brought in the mainline variant of the pagefault
disable distangling from preempt count. That introduced a
diff --git a/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch b/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
index ce5b109..ac0e7b8 100644
--- a/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
+++ b/debian/patches/features/all/rt/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 5 Feb 2016 12:17:14 +0100
Subject: mm: backing-dev: don't disable IRQs in wb_congested_put()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
it triggers:
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:930
diff --git a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
index d4a3342..41f43e6 100644
--- a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
+++ b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch
@@ -1,7 +1,7 @@
Subject: mm: bounce: Use local_irq_save_nort
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 09 Jan 2013 10:33:09 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
kmap_atomic() is preemptible on RT.
diff --git a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
index c8402b9..59cd12b 100644
--- a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
+++ b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:51 -0500
Subject: mm/swap: Convert to percpu locked
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Replace global locks (get_cpu + local_irq_save) with "local_locks()".
Currently there is one of for "rotate" and one for "swap".
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -6593,7 +6593,9 @@ static int page_alloc_cpu_notify(struct
+@@ -6594,7 +6594,9 @@ static int page_alloc_cpu_notify(struct
int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
diff --git a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
index 03097b2..918d6fa 100644
--- a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
+++ b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:44:03 -0500
Subject: mm: Allow only slub on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs.
diff --git a/debian/patches/features/all/rt/mm-enable-slub.patch b/debian/patches/features/all/rt/mm-enable-slub.patch
index 3426372..f5e942b 100644
--- a/debian/patches/features/all/rt/mm-enable-slub.patch
+++ b/debian/patches/features/all/rt/mm-enable-slub.patch
@@ -1,7 +1,7 @@
Subject: mm: Enable SLUB for RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 25 Oct 2012 10:32:35 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Make SLUB RT aware by converting locks to raw and using free lists to
move the freeing out of the lock held region.
diff --git a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
index 0eb093e..69e4786 100644
--- a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
+++ b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:13 -0500
Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Disable preemption on -RT for the vmstat code. On vanila the code runs in
IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the
diff --git a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 99423f4..a3cc82f 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -1,7 +1,7 @@
From: Yang Shi <yang.shi at windriver.com>
Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context
Date: Wed, 30 Oct 2013 11:48:33 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The following trace is triggered when running ltp oom test cases:
diff --git a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch b/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
index 38dcf52..544c331 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Subject: mm/memcontrol: Replace local_irq_disable with local locks
Date: Wed, 28 Jan 2015 17:14:16 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
There are a few local_irq_disable() which then take sleeping locks. This
patch converts them local locks.
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4553,12 +4556,12 @@ static int mem_cgroup_move_account(struc
+@@ -4555,12 +4558,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5433,10 +5436,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5435,10 +5438,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5492,14 +5495,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5494,14 +5497,14 @@ static void uncharge_batch(struct mem_cg
memcg_oom_recover(memcg);
}
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5837,6 +5840,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5850,6 +5853,7 @@ void mem_cgroup_swapout(struct page *pag
{
struct mem_cgroup *memcg, *swap_memcg;
unsigned short oldid;
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5877,12 +5881,16 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5890,12 +5894,16 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
diff --git a/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch b/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
index 1ef02b1..986f56a 100644
--- a/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
+++ b/debian/patches/features/all/rt/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Sun, 5 Jun 2016 08:11:13 +0200
Subject: [PATCH] mm/memcontrol: mem_cgroup_migrate() - replace another
local_irq_disable() w. local_lock_irq()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
v4.6 grew a local_irq_disable() in mm/memcontrol.c::mem_cgroup_migrate().
Convert it to use the existing local lock (event_lock) like the others.
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -5657,10 +5657,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -5659,10 +5659,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
index 28cc2a7..e3e776f 100644
--- a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -1,7 +1,7 @@
Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 27 Sep 2012 11:11:46 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The plain spinlock while sufficient does not update the local_lock
internals. Use a proper local_lock function instead to ease debugging.
diff --git a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
index d38156d..3878f43 100644
--- a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Fri Jul 3 08:44:37 2009 -0500
Subject: mm: page_alloc: Reduce lock sections further
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Split out the pages which are to be freed into a separate list and
call free_pages_bulk() outside of the percpu page allocator locks.
diff --git a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 870ca86..d30d26c 100644
--- a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:37 -0500
Subject: mm: page_alloc: rt-friendly per-cpu pages
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
method into a preemptible, explicit-per-cpu-locks method.
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
return NULL;
}
-@@ -6557,6 +6581,7 @@ static int page_alloc_cpu_notify(struct
+@@ -6558,6 +6582,7 @@ static int page_alloc_cpu_notify(struct
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -184,7 +184,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
}
/*
-@@ -7385,7 +7410,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7386,7 +7411,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -193,7 +193,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7394,7 +7419,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7395,7 +7420,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch b/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
index a1bc816..a8da1e2 100644
--- a/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
+++ b/debian/patches/features/all/rt/mm-perform-lru_add_drain_all-remotely.patch
@@ -1,7 +1,7 @@
From: Luiz Capitulino <lcapitulino at redhat.com>
Date: Fri, 27 May 2016 15:03:28 +0200
Subject: [PATCH] mm: perform lru_add_drain_all() remotely
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
on all CPUs that have non-empty LRU pagevecs and then waiting for
diff --git a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
index 7c091bd..daa6d92 100644
--- a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
+++ b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch
@@ -1,7 +1,7 @@
From: Yong Zhang <yong.zhang0 at gmail.com>
Date: Tue, 15 May 2012 13:53:56 +0800
Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
User preempt_*_rt instead of local_irq_*_rt or otherwise there will be
warning on ARM like below:
diff --git a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
index 79bad78..09bdb60 100644
--- a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
+++ b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch
@@ -1,7 +1,7 @@
Subject: mm, rt: kmap_atomic scheduling
From: Peter Zijlstra <peterz at infradead.org>
Date: Thu, 28 Jul 2011 10:43:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
In fact, with migrate_disable() existing one could play games with
kmap_atomic. You could save/restore the kmap_atomic slots on context
@@ -230,7 +230,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#include <asm/page.h>
#include <asm/ptrace.h>
-@@ -1980,6 +1981,12 @@ struct task_struct {
+@@ -1986,6 +1987,12 @@ struct task_struct {
int softirq_nestcnt;
unsigned int softirqs_raised;
#endif
diff --git a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
index f276713..40ee4b6 100644
--- a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
+++ b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:44:34 -0500
Subject: mm/scatterlist: Do not disable irqs on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The local_irq_save() is not only used to get things done "fast" but
also to ensure that in case of SG_MITER_ATOMIC we are in "atomic"
diff --git a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
index a773188..a41bd7b 100644
--- a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
+++ b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
Subject: mm/vmalloc: Another preempt disable region which sucks
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 12 Jul 2011 11:39:36 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Avoid the preempt disable version of get_cpu_var(). The inner-lock should
provide enough serialisation.
diff --git a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index 07d0cd3..b2a9de1 100644
--- a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 29 Jan 2015 17:19:44 +0100
Subject: mm/workingset: Do not protect workingset_shadow_nodes with irq off
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
workingset_shadow_nodes is protected by local_irq_disable(). Some users
use spin_lock_irq().
@@ -9,8 +9,6 @@ Replace the irq/on with a local_lock(). Rename workingset_shadow_nodes
so I catch users of it which will be introduced later.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
-[bwh: Forward-ported to 4.9.22: workingset_init() now uses
- __list_lru_init() instead of list_lru_init_key()]
---
include/linux/swap.h | 4 +++-
mm/filemap.c | 13 +++++++++----
diff --git a/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
index b990b1b..7857361 100644
--- a/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+++ b/debian/patches/features/all/rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Tue, 22 Mar 2016 11:16:09 +0100
Subject: [PATCH] mm/zsmalloc: copy with get_cpu_var() and locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
get_cpu_var() disables preemption and triggers a might_sleep() splat later.
This is replaced with get_locked_var().
diff --git a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
index 7037cdb..d71239d 100644
--- a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
+++ b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch
@@ -1,7 +1,7 @@
Subject: mmci: Remove bogus local_irq_save()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 09 Jan 2013 12:11:12 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On !RT interrupt runs with interrupts disabled. On RT it's in a
thread, so no need to disable interrupts at all.
diff --git a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
index 26e97a7..632b8ef 100644
--- a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
+++ b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch
@@ -1,7 +1,7 @@
Date: Wed, 26 Jun 2013 15:28:11 -0400
From: Steven Rostedt <rostedt at goodmis.org>
Subject: rt,ntp: Move call to schedule_delayed_work() to helper thread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The ntp code for notify_cmos_timer() is called from a hard interrupt
context. schedule_delayed_work() under PREEMPT_RT_FULL calls spinlocks
diff --git a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
index 9d347a9..e9db74b 100644
--- a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
+++ b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 17 Jul 2011 21:51:45 +0200
Subject: locking: Disable spin on owner for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Drop spin on owner for mutex / rwsem. We are most likely not using it
but…
diff --git a/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index 980890d..ee63189 100644
--- a/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/debian/patches/features/all/rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -2,7 +2,7 @@ From: Steven Rostedt <rostedt at goodmis.org>
Date: Tue, 6 Dec 2016 17:50:30 -0500
Subject: [PATCH] net: Have __napi_schedule_irqoff() disable interrupts on
RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
A customer hit a crash where the napi sd->poll_list became corrupted.
The customer had the bnx2x driver, which does a
diff --git a/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index 5123ddc..7d54f13 100644
--- a/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/debian/patches/features/all/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 14 Sep 2016 17:36:35 +0200
Subject: [PATCH] net/Qdisc: use a seqlock instead seqcount
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The seqcount disables preemption on -RT while it is held which can't
remove. Also we don't want the reader to spin for ages if the writer is
diff --git a/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch b/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
index 3610740..e36982e 100644
--- a/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
+++ b/debian/patches/features/all/rt/net-add-a-lock-around-icmp_sk.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 31 Aug 2016 17:54:09 +0200
Subject: [PATCH] net: add a lock around icmp_sk()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
It looks like the this_cpu_ptr() access in icmp_sk() is protected with
local_bh_disable(). To avoid missing serialization in -RT I am adding
diff --git a/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch b/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
index 1d80c75..0770712 100644
--- a/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
+++ b/debian/patches/features/all/rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
@@ -5,7 +5,7 @@ Subject: [PATCH] net: add back the missing serialization in
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Some time ago Sami Pietikäinen reported a crash on -RT in
ip_send_unicast_reply() which was later fixed by Nicholas Mc Guire
diff --git a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
index 727a72c..792b1b6 100644
--- a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 26 Sep 2012 16:21:08 +0200
Subject: net: Another local_irq_disable/kmalloc headache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Replace it by a local lock. Though that's pretty inefficient :(
diff --git a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 1d6c363..963fea9 100644
--- a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -1,7 +1,7 @@
Subject: net/core/cpuhotplug: Drain input_pkt_queue lockless
From: Grygorii Strashko <grygorii.strashko at ti.com>
Date: Fri, 9 Oct 2015 09:25:49 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
I can constantly see below error report with 4.1 RT-kernel on TI ARM dra7-evm
if I'm trying to unplug cpu1:
diff --git a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
index 88dc924..107aa07 100644
--- a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
+++ b/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 15 Jan 2016 16:33:34 +0100
Subject: net/core: protect users of napi_alloc_cache against
reentrance
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On -RT the code running in BH can not be moved to another CPU so CPU
local variable remain local. However the code can be preempted
diff --git a/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index 6e86e33..23a4a6e 100644
--- a/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/debian/patches/features/all/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 30 Mar 2016 13:36:29 +0200
Subject: [PATCH] net: dev: always take qdisc's busylock in __dev_xmit_skb()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The root-lock is dropped before dev_hard_start_xmit() is invoked and after
setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away
diff --git a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index a7d405c..302eb0a 100644
--- a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -1,7 +1,7 @@
Subject: net: netfilter: Serialize xt_write_recseq sections on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 28 Oct 2012 11:18:08 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The netfilter code relies only on the implicit semantics of
local_bh_disable() for serializing wt_write_recseq sections. RT breaks
diff --git a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
index e4b0b40..0fb3aa6 100644
--- a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
+++ b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 20 Mar 2013 18:06:20 +0100
Subject: net: Add a mutex around devnet_rename_seq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On RT write_seqcount_begin() disables preemption and device_rename()
allocates memory with GFP_KERNEL and grabs later the sysfs_mutex
diff --git a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index e5644a5..678b0fc 100644
--- a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 13 Jan 2016 15:55:02 +0100
Subject: net: move xmit_recursion to per-task variable on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
A softirq on -RT can be preempted. That means one task is in
__dev_queue_xmit(), gets preempted and another task may enter
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1983,6 +1983,9 @@ struct task_struct {
+@@ -1989,6 +1989,9 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
diff --git a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch b/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
index 14902cf..5a3442e 100644
--- a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
+++ b/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch
@@ -1,7 +1,7 @@
Subject: net-flip-lock-dep-thingy.patch
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 28 Jun 2011 10:59:58 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
=======================================================
[ INFO: possible circular locking dependency detected ]
diff --git a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index cf7fc1b..dac816e 100644
--- a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 20 Jan 2016 15:39:05 +0100
Subject: net: provide a way to delegate processing a softirq to
ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
If the NET_RX uses up all of his budget it moves the following NAPI
invocations into the `ksoftirqd`. On -RT it does not do so. Instead it
diff --git a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index 48b4407..3931a53 100644
--- a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -1,7 +1,7 @@
From: Marc Kleine-Budde <mkl at pengutronix.de>
Date: Wed, 5 Mar 2014 00:49:47 +0100
Subject: net: sched: Use msleep() instead of yield()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50
(by default). If a high priority userspace process tries to shut down a busy
diff --git a/debian/patches/features/all/rt/net-use-cpu-chill.patch b/debian/patches/features/all/rt/net-use-cpu-chill.patch
index 7100162..4608db7 100644
--- a/debian/patches/features/all/rt/net-use-cpu-chill.patch
+++ b/debian/patches/features/all/rt/net-use-cpu-chill.patch
@@ -1,7 +1,7 @@
Subject: net: Use cpu_chill() instead of cpu_relax()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 07 Mar 2012 21:10:04 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
diff --git a/debian/patches/features/all/rt/net-wireless-warn-nort.patch b/debian/patches/features/all/rt/net-wireless-warn-nort.patch
index 41c4c32..4925333 100644
--- a/debian/patches/features/all/rt/net-wireless-warn-nort.patch
+++ b/debian/patches/features/all/rt/net-wireless-warn-nort.patch
@@ -1,7 +1,7 @@
Subject: net/wireless: Use WARN_ON_NORT()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 21 Jul 2011 21:05:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The softirq counter is meaningless on RT, so the check triggers a
false positive.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
-@@ -4180,7 +4180,7 @@ void ieee80211_rx_napi(struct ieee80211_
+@@ -4230,7 +4230,7 @@ void ieee80211_rx_napi(struct ieee80211_
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
diff --git a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
index efe6b89..6b2e55d 100644
--- a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
+++ b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
@@ -1,7 +1,7 @@
From: Oleg Nesterov <oleg at redhat.com>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: signal/x86: Delay calling signals in atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On x86_64 we must disable preemption before we enable interrupts
for stack faults, int3 and debugging, because the current task is using
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1696,6 +1696,10 @@ struct task_struct {
+@@ -1700,6 +1700,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
diff --git a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
index bce2557..5180da5 100644
--- a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
+++ b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Disable on -RT. If this is invoked from irq-context we will have problems
to acquire the sleeping lock.
diff --git a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index 56c9fe2..991f13c 100644
--- a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -1,7 +1,7 @@
Subject: rcu: Make ksoftirqd do RCU quiescent states
From: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
Date: Wed, 5 Oct 2011 11:45:18 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable
to network-based denial-of-service attacks. This patch therefore
diff --git a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch
index 0f6b958..c0c6e89 100644
--- a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch
+++ b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch
@@ -1,7 +1,7 @@
Subject: pci: Use __wake_up_all_locked in pci_unblock_user_cfg_access()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 01 Dec 2011 00:07:16 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The waitqueue is protected by the pci_lock, so we can just avoid to
lock the waitqueue lock itself. That prevents the
diff --git a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
index d3d94f6..1a97494 100644
--- a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
+++ b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 9 Apr 2014 11:58:17 +0200
Subject: percpu_ida: Use local locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
the local_irq_save() + spin_lock() does not work that well on -RT
diff --git a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
index 9cd1ca4..a5e8ce2 100644
--- a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch
@@ -1,7 +1,7 @@
From: Yong Zhang <yong.zhang at windriver.com>
Date: Wed, 11 Jul 2012 22:05:21 +0000
Subject: perf: Make swevent hrtimer run in irq instead of softirq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Otherwise we get a deadlock like below:
diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
index bf0883d..af2e5e1 100644
--- a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
+++ b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch
@@ -1,7 +1,7 @@
Subject: rcu: Frob softirq test
From: Peter Zijlstra <a.p.zijlstra at chello.nl>
Date: Sat Aug 13 00:23:17 CEST 2011
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
With RT_FULL we get the below wreckage:
diff --git a/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch b/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch
index a42e1a9..d872b13 100644
--- a/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch
+++ b/debian/patches/features/all/rt/peterz-percpu-rwsem-rt.patch
@@ -1,7 +1,7 @@
Subject: locking/percpu-rwsem: Remove preempt_disable variants
From: Peter Zijlstra <peterz at infradead.org>
Date: Wed Nov 23 16:29:32 CET 2016
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Effective revert commit:
diff --git a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
index 7d358c1..87c1f69 100644
--- a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
+++ b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch
@@ -1,7 +1,7 @@
Subject: crypto: Convert crypto notifier chain to SRCU
From: Peter Zijlstra <peterz at infradead.org>
Date: Fri, 05 Oct 2012 09:03:24 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The crypto notifier deadlocks on RT. Though this can be a real deadlock
on mainline as well due to fifo fair rwsems.
diff --git a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch b/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
index 2781b3a..ae4fd7c 100644
--- a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
+++ b/debian/patches/features/all/rt/pid.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
From: Grygorii Strashko <Grygorii.Strashko at linaro.org>
Date: Tue, 21 Jul 2015 19:43:56 +0300
Subject: pid.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
This patch fixes build error:
CC kernel/pid_namespace.o
diff --git a/debian/patches/features/all/rt/pinctrl-qcom-Use-raw-spinlock-variants.patch b/debian/patches/features/all/rt/pinctrl-qcom-Use-raw-spinlock-variants.patch
index dec0b08..8119658 100644
--- a/debian/patches/features/all/rt/pinctrl-qcom-Use-raw-spinlock-variants.patch
+++ b/debian/patches/features/all/rt/pinctrl-qcom-Use-raw-spinlock-variants.patch
@@ -1,7 +1,7 @@
From: Julia Cartwright <julia at ni.com>
Date: Fri, 20 Jan 2017 10:13:47 -0600
Subject: [PATCH] pinctrl: qcom: Use raw spinlock variants
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The MSM pinctrl driver currently implements an irq_chip for handling
GPIO interrupts; due to how irq_chip handling is done, it's necessary
diff --git a/debian/patches/features/all/rt/ping-sysrq.patch b/debian/patches/features/all/rt/ping-sysrq.patch
index 9a64842..60f6f7a 100644
--- a/debian/patches/features/all/rt/ping-sysrq.patch
+++ b/debian/patches/features/all/rt/ping-sysrq.patch
@@ -1,7 +1,7 @@
Subject: net: sysrq via icmp
From: Carsten Emde <C.Emde at osadl.org>
Date: Tue, 19 Jul 2011 13:51:17 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
There are (probably rare) situations when a system crashed and the system
console becomes unresponsive but the network icmp layer still is alive.
diff --git a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
index a3ef4b7..550871f 100644
--- a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
+++ b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:29:20 -0500
Subject: posix-timers: Prevent broadcast signals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Posix timers should not send broadcast signals and kernel only
signals. Prevent it.
diff --git a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 79b354e..91a0f27 100644
--- a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -1,7 +1,7 @@
From: John Stultz <johnstul at us.ibm.com>
Date: Fri, 3 Jul 2009 08:29:58 -0500
Subject: posix-timers: Thread posix-cpu-timers on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
posix-cpu-timer code takes non -rt safe locks in hard irq
context. Move it to a thread.
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
.vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \
-@@ -250,6 +256,7 @@ extern struct task_group root_task_group
+@@ -251,6 +257,7 @@ extern struct task_group root_task_group
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1660,6 +1660,9 @@ struct task_struct {
+@@ -1664,6 +1664,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1426,6 +1426,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1427,6 +1427,9 @@ static void rt_mutex_init_task(struct ta
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
diff --git a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
index 76f47fa..45b1b47 100644
--- a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
+++ b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
Subject: powerpc: Disable highmem on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 18 Jul 2011 17:08:34 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The current highmem handling on -RT is not compatible and needs fixups.
diff --git a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
index f627af8..b159179 100644
--- a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
+++ b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: powerpc: Use generic rwsem on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use generic code which uses rtmutex
diff --git a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
index dd52539..6650349 100644
--- a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
+++ b/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
@@ -1,7 +1,7 @@
From: Bogdan Purcareata <bogdan.purcareata at freescale.com>
Date: Fri, 24 Apr 2015 15:53:13 +0000
Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
While converting the openpic emulation code to use a raw_spinlock_t enables
guests to run on RT, there's still a performance issue. For interrupts sent in
diff --git a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
index aa03f9c..df5966b 100644
--- a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 1 Nov 2012 10:14:11 +0100
Subject: powerpc: Add support for lazy preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Implement the powerpc pieces for lazy preempt.
diff --git a/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch b/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
index 686bd2f..61930db 100644
--- a/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
+++ b/debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch
@@ -1,7 +1,7 @@
From: Paul Gortmaker <paul.gortmaker at windriver.com>
Date: Sun, 31 May 2015 14:44:42 -0400
Subject: powerpc: ps3/device-init.c - adapt to completions using swait vs wait
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
To fix:
diff --git a/debian/patches/features/all/rt/preempt-lazy-support.patch b/debian/patches/features/all/rt/preempt-lazy-support.patch
index 1072a21..ef8ca2c 100644
--- a/debian/patches/features/all/rt/preempt-lazy-support.patch
+++ b/debian/patches/features/all/rt/preempt-lazy-support.patch
@@ -1,7 +1,7 @@
Subject: sched: Add support for lazy preemption
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 26 Oct 2012 18:50:54 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
It has become an obsession to mitigate the determinism vs. throughput
loss of RT. Looking at the mainline semantics of preemption points
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -3343,6 +3343,43 @@ static inline int test_tsk_need_resched(
+@@ -3349,6 +3349,43 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -344,7 +344,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5481,7 +5547,9 @@ void init_idle(struct task_struct *idle,
+@@ -5523,7 +5589,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
diff --git a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
index b726b96..aa00aa6 100644
--- a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
+++ b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 24 Jul 2009 12:38:56 +0200
Subject: preempt: Provide preempt_*_(no)rt variants
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
RT needs a few preempt_disable/enable points which are not necessary
otherwise. Implement variants to avoid #ifdeffery.
diff --git a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
index 3e0d833..56c0689 100644
--- a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+++ b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -1,7 +1,7 @@
Subject: printk: Add "force_early_printk" boot param to help with debugging
From: Peter Zijlstra <peterz at infradead.org>
Date: Fri, 02 Sep 2011 14:41:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Gives me an option to screw printk and actually see what the machine
says.
diff --git a/debian/patches/features/all/rt/printk-kill.patch b/debian/patches/features/all/rt/printk-kill.patch
index a4e72d0..d52b2af 100644
--- a/debian/patches/features/all/rt/printk-kill.patch
+++ b/debian/patches/features/all/rt/printk-kill.patch
@@ -1,7 +1,7 @@
Subject: printk: Add a printk kill switch
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 22 Jul 2011 17:58:40 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that
it does not dead-lock with the early printk code.
diff --git a/debian/patches/features/all/rt/printk-rt-aware.patch b/debian/patches/features/all/rt/printk-rt-aware.patch
index 3f8b36b..2649909 100644
--- a/debian/patches/features/all/rt/printk-rt-aware.patch
+++ b/debian/patches/features/all/rt/printk-rt-aware.patch
@@ -1,7 +1,7 @@
Subject: printk: Make rt aware
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 19 Sep 2012 14:50:37 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Drop the lock before calling the console driver and do not disable
interrupts while printing to a serial console.
diff --git a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 5020ef4..04fb098 100644
--- a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 29 Aug 2013 18:21:04 +0200
Subject: ptrace: fix ptrace vs tasklist_lock race
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
As explained by Alexander Fyodorov <halcy at yandex.ru>:
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
-@@ -3360,6 +3357,51 @@ static inline int signal_pending_state(l
+@@ -3366,6 +3363,51 @@ static inline int signal_pending_state(l
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
diff --git a/debian/patches/features/all/rt/radix-tree-use-local-locks.patch b/debian/patches/features/all/rt/radix-tree-use-local-locks.patch
index 6a732fc..ccb8b44 100644
--- a/debian/patches/features/all/rt/radix-tree-use-local-locks.patch
+++ b/debian/patches/features/all/rt/radix-tree-use-local-locks.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 25 Jan 2017 16:34:27 +0100
Subject: [PATCH] radix-tree: use local locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The preload functionality uses per-CPU variables and preempt-disable to
ensure that it does not switch CPUs during its usage. This patch adds
diff --git a/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch b/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch
new file mode 100644
index 0000000..8b551a2
--- /dev/null
+++ b/debian/patches/features/all/rt/random-avoid-preempt_disable-ed-section.patch
@@ -0,0 +1,75 @@
+From 81e7296af883a58c3e5609842e129de01442198d Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+Date: Fri, 12 May 2017 15:46:17 +0200
+Subject: [PATCH] random: avoid preempt_disable()ed section
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
+
+extract_crng() will use sleeping locks while in a preempt_disable()
+section due to get_cpu_var().
+Work around it with local_locks.
+
+Cc: stable-rt at vger.kernel.org # where it applies to
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
+---
+ drivers/char/random.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -262,6 +262,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/completion.h>
+ #include <linux/uuid.h>
++#include <linux/locallock.h>
+ #include <crypto/chacha20.h>
+
+ #include <asm/processor.h>
+@@ -2052,6 +2053,7 @@ struct batched_entropy {
+ * goal of being quite fast and not depleting entropy.
+ */
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
++static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_long_lock);
+ unsigned long get_random_long(void)
+ {
+ unsigned long ret;
+@@ -2060,13 +2062,13 @@ unsigned long get_random_long(void)
+ if (arch_get_random_long(&ret))
+ return ret;
+
+- batch = &get_cpu_var(batched_entropy_long);
++ batch = &get_locked_var(batched_entropy_long_lock, batched_entropy_long);
+ if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
+ extract_crng((u8 *)batch->entropy_long);
+ batch->position = 0;
+ }
+ ret = batch->entropy_long[batch->position++];
+- put_cpu_var(batched_entropy_long);
++ put_locked_var(batched_entropy_long_lock, batched_entropy_long);
+ return ret;
+ }
+ EXPORT_SYMBOL(get_random_long);
+@@ -2078,6 +2080,8 @@ unsigned int get_random_int(void)
+ }
+ #else
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
++static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_int_lock);
++
+ unsigned int get_random_int(void)
+ {
+ unsigned int ret;
+@@ -2086,13 +2090,13 @@ unsigned int get_random_int(void)
+ if (arch_get_random_int(&ret))
+ return ret;
+
+- batch = &get_cpu_var(batched_entropy_int);
++ batch = &get_locked_var(batched_entropy_int_lock, batched_entropy_int);
+ if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
+ extract_crng((u8 *)batch->entropy_int);
+ batch->position = 0;
+ }
+ ret = batch->entropy_int[batch->position++];
+- put_cpu_var(batched_entropy_int);
++ put_locked_var(batched_entropy_int_lock, batched_entropy_int);
+ return ret;
+ }
+ #endif
diff --git a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
index 6057a90..2650c7d 100644
--- a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
+++ b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch
@@ -1,7 +1,7 @@
Subject: random: Make it work on rt
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 21 Aug 2012 20:38:50 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Delegate the random insertion to the forced threaded interrupt
handler. Store the return IP of the hard interrupt handler in the irq
diff --git a/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch b/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
index 87b1947..40523a0 100644
--- a/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
+++ b/debian/patches/features/all/rt/rbtree-include-rcu.h-because-we-use-it.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 14 Sep 2016 11:52:17 +0200
Subject: rbtree: include rcu.h because we use it
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Since commit c1adf20052d8 ("Introduce rb_replace_node_rcu()")
rbtree_augmented.h uses RCU related data structures but does not include
diff --git a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
index 6b30fa3..dd0a389 100644
--- a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -1,7 +1,7 @@
From: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
Date: Mon, 4 Nov 2013 13:21:10 -0800
Subject: rcu: Eliminate softirq processing from rcutree
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Running RCU out of softirq is a problem for some workloads that would
like to manage RCU core processing independently of other softirq work,
diff --git a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
index 28aaa79..b5a6b4c 100644
--- a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -1,7 +1,7 @@
Subject: rcu: Disable RCU_FAST_NO_HZ on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 28 Oct 2012 13:26:09 +0000
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
This uses a timer_list timer from the irq disabled guts of the idle
code. Disable it for now to prevent wreckage.
diff --git a/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
index 9ac29b0..627bbb1 100644
--- a/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
+++ b/debian/patches/features/all/rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -1,7 +1,7 @@
From: Julia Cartwright <julia at ni.com>
Date: Wed, 12 Oct 2016 11:21:14 -0500
Subject: [PATCH] rcu: enable rcu_normal_after_boot by default for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The forcing of an expedited grace period is an expensive and very
RT-application unfriendly operation, as it forcibly preempts all running
diff --git a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
index 830eacc..cfb094b 100644
--- a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 21 Mar 2014 20:19:05 +0100
Subject: rcu: make RCU_BOOST default on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Since it is no longer invoked from the softirq people run into OOM more
often if the priority of the RCU thread is too low. Making boosting
diff --git a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index 12ce78a..8f5da62 100644
--- a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -1,7 +1,7 @@
Subject: rcu: Merge RCU-bh into RCU-preempt
Date: Wed, 5 Oct 2011 11:59:38 -0700
From: Thomas Gleixner <tglx at linutronix.de>
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The Linux kernel has long RCU-bh read-side critical sections that
intolerably increase scheduling latency under mainline's RCU-bh rules,
diff --git a/debian/patches/features/all/rt/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch b/debian/patches/features/all/rt/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
index b903bf0..a0234c9 100644
--- a/debian/patches/features/all/rt/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
+++ b/debian/patches/features/all/rt/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 2 Nov 2016 16:45:58 +0100
Subject: [PATCH] rcu: update: make RCU_EXPEDITE_BOOT default
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
RCU_EXPEDITE_BOOT should speed up the boot process by enforcing
synchronize_rcu_expedited() instead of synchronize_rcu() during the boot
diff --git a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index 36c01d3..9e7d2fc 100644
--- a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -1,7 +1,7 @@
From: Tiejun Chen <tiejun.chen at windriver.com>
Date: Wed, 18 Dec 2013 17:51:49 +0800
Subject: rcutree/rcu_bh_qs: Disable irq while calling rcu_preempt_qs()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Any callers to the function rcu_preempt_qs() must disable irqs in
order to protect the assignment to ->rcu_read_unlock_special. In
diff --git a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
index 658ee89..1553d20 100644
--- a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
+++ b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -1,7 +1,7 @@
From: Yong Zhang <yong.zhang0 at gmail.com>
Date: Thu, 28 Jul 2011 11:16:00 +0800
Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When retry happens, it's likely that the task has been migrated to
another cpu (except unplug failed), but it still derefernces the
diff --git a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
index 2d99dcd..ed40a06 100644
--- a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+++ b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -1,7 +1,7 @@
Subject: ARM: Initialize split page table locks for vector page
From: Frank Rowand <frank.rowand at am.sony.com>
Date: Sat, 1 Oct 2011 18:58:13 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if
PREEMPT_RT_FULL=y because vectors_user_mapping() creates a
diff --git a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
index 7edb69b..9da4799 100644
--- a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
+++ b/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
@@ -1,7 +1,7 @@
Subject: ARM: smp: Move clear_tasks_mm_cpumask() call to __cpu_die()
From: Grygorii Strashko <grygorii.strashko at ti.com>
Date: Fri, 11 Sep 2015 21:21:23 +0300
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When running with the RT-kernel (4.1.5-rt5) on TI OMAP dra7-evm and trying
to do Suspend to RAM, the following backtrace occurs:
diff --git a/debian/patches/features/all/rt/rt-add-rt-locks.patch b/debian/patches/features/all/rt/rt-add-rt-locks.patch
index 2b22795..d0e1eb2 100644
--- a/debian/patches/features/all/rt/rt-add-rt-locks.patch
+++ b/debian/patches/features/all/rt/rt-add-rt-locks.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 26 Jul 2009 19:39:56 +0200
Subject: rt: Add the preempt-rt lock replacement APIs
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex
based locking functions for preempt-rt.
@@ -25,15 +25,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
include/linux/spinlock_rt.h | 162 ++++++++++++
include/linux/spinlock_types.h | 11
include/linux/spinlock_types_rt.h | 48 +++
- kernel/futex.c | 9
+ kernel/futex.c | 11
kernel/locking/Makefile | 9
kernel/locking/rt.c | 498 ++++++++++++++++++++++++++++++++++++++
- kernel/locking/rtmutex.c | 463 +++++++++++++++++++++++++++++++++--
- kernel/locking/rtmutex_common.h | 6
+ kernel/locking/rtmutex.c | 479 +++++++++++++++++++++++++++++++++---
+ kernel/locking/rtmutex_common.h | 9
kernel/locking/spinlock.c | 7
kernel/locking/spinlock_debug.c | 5
kernel/sched/core.c | 7
- 23 files changed, 1653 insertions(+), 55 deletions(-)
+ 23 files changed, 1663 insertions(+), 66 deletions(-)
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -916,32 +916,32 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+#endif
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1396,6 +1396,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1398,6 +1398,7 @@ static int wake_futex_pi(u32 __user *uad
struct task_struct *new_owner;
- bool deboost = false;
+ bool postunlock = false;
WAKE_Q(wake_q);
+ WAKE_Q(wake_sleeper_q);
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1455,13 +1456,15 @@ static int wake_futex_pi(u32 __user *uad
- /*
- * We've updated the uservalue, this unlock cannot fail.
- */
-- deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
-+ deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
-+ &wake_sleeper_q);
+@@ -1459,13 +1460,13 @@ static int wake_futex_pi(u32 __user *uad
+ pi_state->owner = new_owner;
+ raw_spin_unlock(&new_owner->pi_lock);
+- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+-
++ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
++ &wake_sleeper_q);
out_unlock:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- if (deboost) {
- wake_up_q(&wake_q);
-+ wake_up_q_sleeper(&wake_sleeper_q);
- rt_mutex_adjust_prio(current);
- }
+ if (postunlock)
+- rt_mutex_postunlock(&wake_q);
++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
-@@ -2664,7 +2667,7 @@ static int futex_lock_pi(u32 __user *uad
+ return ret;
+ }
+@@ -2666,7 +2667,7 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -950,7 +950,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3029,7 +3032,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3033,7 +3034,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -1508,7 +1508,53 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
*
* See Documentation/locking/rt-mutex-design.txt for details.
*/
-@@ -420,6 +425,14 @@ static bool rt_mutex_cond_detect_deadloc
+@@ -228,6 +233,8 @@ static inline bool unlock_rt_mutex_safe(
+ }
+ #endif
+
++#define STEAL_NORMAL 0
++#define STEAL_LATERAL 1
+ /*
+ * Only use with rt_mutex_waiter_{less,equal}()
+ */
+@@ -236,10 +243,15 @@ static inline bool unlock_rt_mutex_safe(
+
+ static inline int
+ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+- struct rt_mutex_waiter *right)
++ struct rt_mutex_waiter *right, int mode)
+ {
+- if (left->prio < right->prio)
+- return 1;
++ if (mode == STEAL_NORMAL) {
++ if (left->prio < right->prio)
++ return 1;
++ } else {
++ if (left->prio <= right->prio)
++ return 1;
++ }
+
+ /*
+ * If both waiters have dl_prio(), we check the deadlines of the
+@@ -283,7 +295,7 @@ rt_mutex_enqueue(struct rt_mutex *lock,
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
+- if (rt_mutex_waiter_less(waiter, entry)) {
++ if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+@@ -322,7 +334,7 @@ rt_mutex_enqueue_pi(struct task_struct *
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
+- if (rt_mutex_waiter_less(waiter, entry)) {
++ if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+@@ -388,6 +400,14 @@ static bool rt_mutex_cond_detect_deadloc
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}
@@ -1523,7 +1569,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -726,13 +739,16 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -713,13 +733,16 @@ static int rt_mutex_adjust_prio_chain(st
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
@@ -1542,33 +1588,15 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
-@@ -825,6 +841,25 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -812,6 +835,7 @@ static int rt_mutex_adjust_prio_chain(st
return ret;
}
+
-+#define STEAL_NORMAL 0
-+#define STEAL_LATERAL 1
-+
-+/*
-+ * Note that RT tasks are excluded from lateral-steals to prevent the
-+ * introduction of an unbounded latency
-+ */
-+static inline int lock_is_stealable(struct task_struct *task,
-+ struct task_struct *pendowner, int mode)
-+{
-+ if (mode == STEAL_NORMAL || rt_task(task)) {
-+ if (task->prio >= pendowner->prio)
-+ return 0;
-+ } else if (task->prio > pendowner->prio)
-+ return 0;
-+ return 1;
-+}
-+
/*
* Try to take an rt-mutex
*
-@@ -835,8 +870,9 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -822,8 +846,9 @@ static int rt_mutex_adjust_prio_chain(st
* @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
*/
@@ -1578,39 +1606,50 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+ struct task_struct *task,
+ struct rt_mutex_waiter *waiter, int mode)
{
- /*
- * Before testing whether we can acquire @lock, we set the
-@@ -873,8 +909,10 @@ static int try_to_take_rt_mutex(struct r
+ lockdep_assert_held(&lock->wait_lock);
+
+@@ -862,8 +887,10 @@ static int try_to_take_rt_mutex(struct r
* If waiter is not the highest priority waiter of
* @lock, give up.
*/
- if (waiter != rt_mutex_top_waiter(lock))
+ if (waiter != rt_mutex_top_waiter(lock)) {
-+ /* XXX lock_is_stealable() ? */
++ /* XXX rt_mutex_waiter_less() ? */
return 0;
+ }
/*
* We can acquire the lock. Remove the waiter from the
-@@ -892,14 +930,10 @@ static int try_to_take_rt_mutex(struct r
+@@ -881,15 +908,26 @@ static int try_to_take_rt_mutex(struct r
* not need to be dequeued.
*/
if (rt_mutex_has_waiters(lock)) {
-- /*
-- * If @task->prio is greater than or equal to
-- * the top waiter priority (kernel view),
-- * @task lost.
-- */
-- if (task->prio >= rt_mutex_top_waiter(lock)->prio)
-- return 0;
+ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
-
-+ if (task != pown && !lock_is_stealable(task, pown, mode))
++
++ if (task != pown)
+ return 0;
++
++ /*
++ * Note that RT tasks are excluded from lateral-steals
++ * to prevent the introduction of an unbounded latency.
++ */
++ if (rt_task(task))
++ mode = STEAL_NORMAL;
+ /*
+ * If @task->prio is greater than or equal to
+ * the top waiter priority (kernel view),
+ * @task lost.
+ */
+ if (!rt_mutex_waiter_less(task_to_waiter(task),
+- rt_mutex_top_waiter(lock)))
++ rt_mutex_top_waiter(lock),
++ mode))
+ return 0;
+-
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -946,6 +980,350 @@ static int try_to_take_rt_mutex(struct r
+@@ -936,6 +974,339 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@@ -1764,9 +1803,9 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+ debug_rt_mutex_free_waiter(&waiter);
+}
+
-+static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
-+ struct wake_q_head *wake_sleeper_q,
-+ struct rt_mutex *lock);
++static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
++ struct wake_q_head *wake_q,
++ struct wake_q_head *wq_sleeper);
+/*
+ * Slow path to release a rt_mutex spin_lock style
+ */
@@ -1775,25 +1814,14 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+ unsigned long flags;
+ WAKE_Q(wake_q);
+ WAKE_Q(wake_sleeper_q);
++ bool postunlock;
+
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
-+
-+ debug_rt_mutex_unlock(lock);
-+
-+ if (!rt_mutex_has_waiters(lock)) {
-+ lock->owner = NULL;
-+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+ return;
-+ }
-+
-+ mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
-+
++ postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+ wake_up_q(&wake_q);
-+ wake_up_q_sleeper(&wake_sleeper_q);
+
-+ /* Undo pi boosting.when necessary */
-+ rt_mutex_adjust_prio(current);
++ if (postunlock)
++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
+}
+
+void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
@@ -1961,7 +1989,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
/*
* Task blocks on lock.
*
-@@ -1058,6 +1436,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1051,6 +1422,7 @@ static int task_blocks_on_rt_mutex(struc
* Called with lock->wait_lock held and interrupts disabled.
*/
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
@@ -1969,19 +1997,19 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
-@@ -1086,7 +1465,10 @@ static void mark_wakeup_next_waiter(stru
-
- raw_spin_unlock(¤t->pi_lock);
-
+@@ -1090,7 +1462,10 @@ static void mark_wakeup_next_waiter(stru
+ * Pairs with preempt_enable() in rt_mutex_postunlock();
+ */
+ preempt_disable();
- wake_q_add(wake_q, waiter->task);
+ if (waiter->savestate)
+ wake_q_add(wake_sleeper_q, waiter->task);
+ else
+ wake_q_add(wake_q, waiter->task);
+ raw_spin_unlock(¤t->pi_lock);
}
- /*
-@@ -1167,21 +1549,22 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1174,21 +1549,22 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
next_lock = waiter->lock;
@@ -2006,7 +2034,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
}
/**
-@@ -1261,7 +1644,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1268,7 +1644,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
unsigned long flags;
int ret = 0;
@@ -2015,8 +2043,8 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
/*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1355,7 +1738,8 @@ static inline int rt_mutex_slowtrylock(s
- * Return whether the current task needs to undo a potential priority boosting.
+@@ -1363,7 +1739,8 @@ static inline int rt_mutex_slowtrylock(s
+ * Return whether the current task needs to call rt_mutex_postunlock().
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
- struct wake_q_head *wake_q)
@@ -2025,16 +2053,29 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
{
unsigned long flags;
-@@ -1409,7 +1793,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1417,7 +1794,7 @@ static bool __sched rt_mutex_slowunlock(
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
- mark_wakeup_next_waiter(wake_q, lock);
+ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
-
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-@@ -1463,17 +1847,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+ return true; /* call rt_mutex_postunlock() */
+@@ -1469,9 +1846,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+ /*
+ * Performs the wakeup of the the top-waiter and re-enables preemption.
+ */
+-void rt_mutex_postunlock(struct wake_q_head *wake_q)
++void rt_mutex_postunlock(struct wake_q_head *wake_q,
++ struct wake_q_head *wq_sleeper)
+ {
+ wake_up_q(wake_q);
++ wake_up_q_sleeper(wq_sleeper);
+
+ /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
+ preempt_enable();
+@@ -1480,15 +1859,17 @@ void rt_mutex_postunlock(struct wake_q_h
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -2044,56 +2085,72 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
{
WAKE_Q(wake_q);
+ WAKE_Q(wake_sleeper_q);
- bool deboost;
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
return;
-- deboost = slowfn(lock, &wake_q);
-+ deboost = slowfn(lock, &wake_q, &wake_sleeper_q);
+- if (slowfn(lock, &wake_q))
+- rt_mutex_postunlock(&wake_q);
++ if (slowfn(lock, &wake_q, &wake_sleeper_q))
++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
+ }
- wake_up_q(&wake_q);
-+ wake_up_q_sleeper(&wake_sleeper_q);
+ /**
+@@ -1607,12 +1988,9 @@ void __sched rt_mutex_unlock(struct rt_m
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
- /* Undo pi boosting if necessary: */
- if (deboost)
-@@ -1601,7 +1988,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
- * simple and will not need to retry.
- */
- bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
+-/**
+- * Futex variant, that since futex variants do not use the fast-path, can be
+- * simple and will not need to retry.
+- */
+-bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
- struct wake_q_head *wake_q)
-+ struct wake_q_head *wake_q,
-+ struct wake_q_head *wq_sleeper)
++static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
++ struct wake_q_head *wake_q,
++ struct wake_q_head *wq_sleeper)
{
lockdep_assert_held(&lock->wait_lock);
-@@ -1612,21 +2000,23 @@ bool __sched __rt_mutex_futex_unlock(str
- return false; /* done */
- }
-
+@@ -1629,22 +2007,34 @@ bool __sched __rt_mutex_futex_unlock(str
+ * avoid inversion prior to the wakeup. preempt_disable()
+ * therein pairs with rt_mutex_postunlock().
+ */
- mark_wakeup_next_waiter(wake_q, lock);
+ mark_wakeup_next_waiter(wake_q, wq_sleeper, lock);
- return true; /* deboost and wakeups */
+
+ return true; /* call postunlock() */
}
++/**
++ * Futex variant, that since futex variants do not use the fast-path, can be
++ * simple and will not need to retry.
++ */
++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
++ struct wake_q_head *wake_q,
++ struct wake_q_head *wq_sleeper)
++{
++ return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper);
++}
++
void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
{
WAKE_Q(wake_q);
+ WAKE_Q(wake_sleeper_q);
- bool deboost;
+ bool postunlock;
raw_spin_lock_irq(&lock->wait_lock);
-- deboost = __rt_mutex_futex_unlock(lock, &wake_q);
-+ deboost = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);
+- postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
++ postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);
raw_spin_unlock_irq(&lock->wait_lock);
- if (deboost) {
- wake_up_q(&wake_q);
-+ wake_up_q_sleeper(&wake_sleeper_q);
- rt_mutex_adjust_prio(current);
- }
+ if (postunlock)
+- rt_mutex_postunlock(&wake_q);
++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
}
-@@ -1661,13 +2051,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+
+ /**
+@@ -1677,13 +2067,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@@ -2108,7 +2165,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1682,7 +2071,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1698,7 +2087,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@@ -2117,7 +2174,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
}
-@@ -1888,3 +2277,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -1916,3 +2305,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
@@ -2153,7 +2210,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
-@@ -106,7 +107,7 @@ extern void rt_mutex_init_proxy_locked(s
+@@ -107,7 +108,7 @@ extern void rt_mutex_init_proxy_locked(s
struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner);
@@ -2162,7 +2219,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task);
-@@ -123,7 +124,8 @@ extern int rt_mutex_futex_trylock(struct
+@@ -124,9 +125,11 @@ extern int rt_mutex_futex_trylock(struct
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -2170,8 +2227,12 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+ struct wake_q_head *wqh,
+ struct wake_q_head *wq_sleeper);
- extern void rt_mutex_adjust_prio(struct task_struct *task);
+-extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
++extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
++ struct wake_q_head *wq_sleeper);
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # include "rtmutex-debug.h"
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(loc
diff --git a/debian/patches/features/all/rt/rt-drop_mutex_disable_on_not_debug.patch b/debian/patches/features/all/rt/rt-drop_mutex_disable_on_not_debug.patch
index 7aafec2..de86fcb 100644
--- a/debian/patches/features/all/rt/rt-drop_mutex_disable_on_not_debug.patch
+++ b/debian/patches/features/all/rt/rt-drop_mutex_disable_on_not_debug.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 10 Feb 2017 18:21:04 +0100
Subject: rt: Drop mutex_disable() on !DEBUG configs and the GPL suffix from export symbol
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Alex Goins reported that mutex_destroy() on RT will force a GPL only symbol
which won't link and therefore fail on a non-GPL kernel module.
diff --git a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
index 847544d..eb05f88 100644
--- a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
+++ b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch
@@ -1,7 +1,7 @@
Subject: rt: Introduce cpu_chill()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 07 Mar 2012 20:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
diff --git a/debian/patches/features/all/rt/rt-local-irq-lock.patch b/debian/patches/features/all/rt/rt-local-irq-lock.patch
index 5bbffb0..9fc4f30 100644
--- a/debian/patches/features/all/rt/rt-local-irq-lock.patch
+++ b/debian/patches/features/all/rt/rt-local-irq-lock.patch
@@ -1,7 +1,7 @@
Subject: rt: Add local irq locks
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 20 Jun 2011 09:03:47 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Introduce locallock. For !RT this maps to preempt_disable()/
local_irq_disable() so there is not much that changes. For RT this will
diff --git a/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch b/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
index 76a9303..9cca067 100644
--- a/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
+++ b/debian/patches/features/all/rt/rt-locking-Reenable-migration-accross-schedule.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 8 Feb 2016 16:15:28 +0100
Subject: rt/locking: Reenable migration accross schedule
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
We currently disable migration across lock acquisition. That includes the part
where we block on the lock and schedule out. We cannot disable migration after
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -986,14 +986,19 @@ static int __try_to_take_rt_mutex(struct
+@@ -980,14 +980,19 @@ static int __try_to_take_rt_mutex(struct
* preemptible spin_lock functions:
*/
static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
}
static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
-@@ -1051,7 +1056,8 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1045,7 +1050,8 @@ static int task_blocks_on_rt_mutex(struc
* We store the current state under p->pi_lock in p->saved_state and
* the try_to_wake_up() code handles this accordingly.
*/
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
{
struct task_struct *lock_owner, *self = current;
struct rt_mutex_waiter waiter, *top_waiter;
-@@ -1095,8 +1101,13 @@ static void noinline __sched rt_spin_lo
+@@ -1089,8 +1095,13 @@ static void noinline __sched rt_spin_lo
debug_rt_mutex_print_deadlock(&waiter);
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
raw_spin_lock_irqsave(&lock->wait_lock, flags);
-@@ -1165,38 +1176,35 @@ static void noinline __sched rt_spin_lo
+@@ -1148,38 +1159,35 @@ static void noinline __sched rt_spin_lo
void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
{
diff --git a/debian/patches/features/all/rt/rt-preempt-base-config.patch b/debian/patches/features/all/rt/rt-preempt-base-config.patch
index ecbcba2..ed64104 100644
--- a/debian/patches/features/all/rt/rt-preempt-base-config.patch
+++ b/debian/patches/features/all/rt/rt-preempt-base-config.patch
@@ -1,7 +1,7 @@
Subject: rt: Provide PREEMPT_RT_BASE config switch
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 17 Jun 2011 12:39:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Introduce PREEMPT_RT_BASE which enables parts of
PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT
diff --git a/debian/patches/features/all/rt/rt-serial-warn-fix.patch b/debian/patches/features/all/rt/rt-serial-warn-fix.patch
index b2d8bd8..e9a9431 100644
--- a/debian/patches/features/all/rt/rt-serial-warn-fix.patch
+++ b/debian/patches/features/all/rt/rt-serial-warn-fix.patch
@@ -1,7 +1,7 @@
Subject: rt: Improve the serial console PASS_LIMIT
From: Ingo Molnar <mingo at elte.hu>
Date: Wed Dec 14 13:05:54 CET 2011
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Beyond the warning:
diff --git a/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch b/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
index 67db568..50815e7 100644
--- a/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
+++ b/debian/patches/features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
@@ -1,7 +1,7 @@
Subject: rtmutex: Handle non enqueued waiters gracefully
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 06 Nov 2015 18:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Yimin debugged that in case of a PI wakeup in progress when
rt_mutex_start_proxy_lock() calls task_blocks_on_rt_mutex() the latter
@@ -22,7 +22,7 @@ Cc: stable-rt at vger.kernel.org
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1682,7 +1682,7 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1697,7 +1697,7 @@ int __rt_mutex_start_proxy_lock(struct r
ret = 0;
}
diff --git a/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch b/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch
index 0b0e327..676e9d2 100644
--- a/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch
+++ b/debian/patches/features/all/rt/rtmutex-Make-lock_killable-work.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 1 Apr 2017 12:50:59 +0200
Subject: [PATCH] rtmutex: Make lock_killable work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Locking an rt mutex killable does not work because signal handling is
restricted to TASK_INTERRUPTIBLE.
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1206,18 +1206,13 @@ static int __sched
+@@ -1213,18 +1213,13 @@ static int __sched
if (try_to_take_rt_mutex(lock, current, waiter))
break;
diff --git a/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch b/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch
index be36085..b3cc618 100644
--- a/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch
+++ b/debian/patches/features/all/rt/rtmutex-Provide-locked-slowpath.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 1 Apr 2017 12:51:01 +0200
Subject: [PATCH] rtmutex: Provide locked slowpath
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The new rt rwsem implementation needs rtmutex::wait_lock to protect struct
rw_semaphore. Dropping the lock and reaquiring it for locking the rtmutex
@@ -132,9 +132,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -129,6 +129,15 @@ extern bool __rt_mutex_futex_unlock(stru
-
- extern void rt_mutex_adjust_prio(struct task_struct *task);
+@@ -131,6 +131,15 @@ extern bool __rt_mutex_futex_unlock(stru
+ extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
+ struct wake_q_head *wq_sleeper);
+/* RW semaphore special interface */
+struct ww_acquire_ctx;
diff --git a/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch b/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
index 47af3a2..126d80f 100644
--- a/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
+++ b/debian/patches/features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 1 Apr 2017 12:51:00 +0200
Subject: [PATCH] rtmutex: Provide rt_mutex_lock_state()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Allow rtmutex to be locked with arbitrary states. Preparatory patch for the
rt rwsem rework.
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -2003,21 +2003,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -2008,21 +2008,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
}
/**
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
* @lock: the rt_mutex to be locked
*
* Returns:
-@@ -2026,20 +2037,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+@@ -2031,20 +2042,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
/**
* rt_mutex_lock_killable - lock a rt_mutex killable
*
-@@ -2049,16 +2050,21 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -2054,16 +2055,21 @@ int __sched rt_mutex_futex_trylock(struc
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
diff --git a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
index c01461e..51811b7 100644
--- a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
Date: Mon, 28 Oct 2013 09:36:37 +0100
Subject: rtmutex: Add RT aware ww locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
lockdep says:
| --------------------------------------------------------------------------
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
#include "rtmutex_common.h"
-@@ -1317,6 +1318,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1300,6 +1301,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
#endif /* PREEMPT_RT_FULL */
@@ -227,7 +227,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
}
/*
-@@ -1809,29 +1940,33 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1808,29 +1939,33 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -265,7 +265,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
}
static inline int
-@@ -1876,7 +2011,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1881,7 +2016,7 @@ void __sched rt_mutex_lock(struct rt_mut
{
might_sleep();
@@ -274,7 +274,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-@@ -1893,7 +2028,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1898,7 +2033,7 @@ int __sched rt_mutex_lock_interruptible(
{
might_sleep();
@@ -283,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1920,7 +2055,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1925,7 +2060,7 @@ int __sched rt_mutex_lock_killable(struc
{
might_sleep();
@@ -292,7 +292,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1944,6 +2079,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1949,6 +2084,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -300,16 +300,16 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian at breakpoint.cc>
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2225,7 +2361,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
- set_current_state(TASK_INTERRUPTIBLE);
-
+@@ -2239,7 +2375,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+ raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
+ set_current_state(TASK_INTERRUPTIBLE);
- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
-
- raw_spin_unlock_irq(&lock->wait_lock);
-
-@@ -2278,24 +2414,88 @@ bool rt_mutex_cleanup_proxy_lock(struct
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ * have to fix that up.
+@@ -2306,24 +2442,88 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
diff --git a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
index bd20494..e6476be 100644
--- a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
+++ b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch
@@ -1,7 +1,7 @@
Subject: rtmutex: Avoid include hell
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 29 Jun 2011 20:06:39 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Include only the required raw types. This avoids pulling in the
complete spinlock header which in turn requires rtmutex.h at some point.
diff --git a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
index a5a15a4..07ab8fd 100644
--- a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
+++ b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch
@@ -1,7 +1,7 @@
Subject: rtmutex: Handle the various new futex race conditions
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 10 Jun 2011 11:04:15 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
RT opens a few new interesting race conditions in the rtmutex/futex
combo due to futex hash bucket lock being a 'sleeping' spinlock and
@@ -10,13 +10,13 @@ therefor not disabling preemption.
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
kernel/futex.c | 77 ++++++++++++++++++++++++++++++++--------
- kernel/locking/rtmutex.c | 36 +++++++++++++++---
+ kernel/locking/rtmutex.c | 37 ++++++++++++++++---
kernel/locking/rtmutex_common.h | 2 +
- 3 files changed, 94 insertions(+), 21 deletions(-)
+ 3 files changed, 95 insertions(+), 21 deletions(-)
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2009,6 +2009,16 @@ static int futex_requeue(u32 __user *uad
+@@ -2011,6 +2011,16 @@ static int futex_requeue(u32 __user *uad
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -2992,7 +3002,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2996,7 +3006,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -3048,20 +3058,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3052,20 +3062,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -3070,7 +3115,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3074,7 +3119,8 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
-@@ -3081,7 +3127,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3085,7 +3131,7 @@ static int futex_wait_requeue_pi(u32 __u
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
}
} else {
struct rt_mutex *pi_mutex;
-@@ -3095,7 +3141,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3099,7 +3145,8 @@ static int futex_wait_requeue_pi(u32 __u
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
-@@ -421,7 +426,8 @@ int max_lock_depth = 1024;
+@@ -389,7 +394,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
}
/*
-@@ -557,7 +563,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -525,7 +531,7 @@ static int rt_mutex_adjust_prio_chain(st
* reached or the state of the chain has changed while we
* dropped the locks.
*/
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
goto out_unlock_pi;
/*
-@@ -969,6 +975,23 @@ static int task_blocks_on_rt_mutex(struc
+@@ -961,6 +967,23 @@ static int task_blocks_on_rt_mutex(struc
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
@@ -192,29 +192,29 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+
+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
+
- __rt_mutex_adjust_prio(task);
+ rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
-@@ -992,7 +1015,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -985,7 +1008,7 @@ static int task_blocks_on_rt_mutex(struc
rt_mutex_enqueue_pi(owner, waiter);
- __rt_mutex_adjust_prio(owner);
+ rt_mutex_adjust_prio(owner);
- if (owner->pi_blocked_on)
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -1076,7 +1099,7 @@ static void remove_waiter(struct rt_mute
+@@ -1081,7 +1104,7 @@ static void remove_waiter(struct rt_mute
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
- struct rt_mutex *next_lock;
+ struct rt_mutex *next_lock = NULL;
- raw_spin_lock(¤t->pi_lock);
- rt_mutex_dequeue(lock, waiter);
-@@ -1100,7 +1123,8 @@ static void remove_waiter(struct rt_mute
- __rt_mutex_adjust_prio(owner);
+ lockdep_assert_held(&lock->wait_lock);
+
+@@ -1107,7 +1130,8 @@ static void remove_waiter(struct rt_mute
+ rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
- next_lock = task_blocked_on_lock(owner);
@@ -223,18 +223,19 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
raw_spin_unlock(&owner->pi_lock);
-@@ -1136,7 +1160,7 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1143,7 +1167,8 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
-- if (!waiter || (waiter->prio == task->prio &&
-+ if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
- !dl_prio(task->prio))) {
+- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
++ if (!rt_mutex_real_waiter(waiter) ||
++ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
+ }
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -98,6 +98,8 @@ enum rtmutex_chainwalk {
+@@ -99,6 +99,8 @@ enum rtmutex_chainwalk {
/*
* PI-futex support (proxy locking functions, etc.):
*/
diff --git a/debian/patches/features/all/rt/rtmutex-lock-killable.patch b/debian/patches/features/all/rt/rtmutex-lock-killable.patch
index 95ea34d..56cf9f6 100644
--- a/debian/patches/features/all/rt/rtmutex-lock-killable.patch
+++ b/debian/patches/features/all/rt/rtmutex-lock-killable.patch
@@ -1,7 +1,7 @@
Subject: rtmutex: Add rtmutex_lock_killable()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 09 Jun 2011 11:43:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Add "killable" type to rtmutex. We need this since rtmutex are used as
"normal" mutexes which do use this type.
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1524,6 +1524,25 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -1535,6 +1535,25 @@ int __sched rt_mutex_futex_trylock(struc
}
/**
diff --git a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch b/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
index 37278af..637a412 100644
--- a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
+++ b/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed 02 Dec 2015 11:34:07 +0100
Subject: rtmutex: trylock is okay on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On
-RT we don't run softirqs in IRQ context but in thread context so it is
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1535,7 +1535,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+@@ -1545,7 +1545,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
*/
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
diff --git a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch b/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
index 1f066f5..be7709f 100644
--- a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
+++ b/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Subject: rbtree: don't include the rcu header
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The RCU header pulls in spinlock.h and fails due not yet defined types:
diff --git a/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch b/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
index 267cfe1..e1dbc4e 100644
--- a/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
+++ b/debian/patches/features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 1 Apr 2017 12:51:02 +0200
Subject: [PATCH] rwsem/rt: Lift single reader restriction
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The RT specific R/W semaphore implementation restricts the number of readers
to one because a writer cannot block on multiple readers and inherit its
diff --git a/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch b/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch
index 56be550..1045320 100644
--- a/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch
+++ b/debian/patches/features/all/rt/rxrpc-remove-unused-static-variables.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 21 Oct 2016 10:54:50 +0200
Subject: [PATCH] rxrpc: remove unused static variables
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The rxrpc_security_methods and rxrpc_security_sem user has been removed
in 648af7fca159 ("rxrpc: Absorb the rxkad security module"). This was
diff --git a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch b/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
index e54594b..70087e6 100644
--- a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
+++ b/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
@@ -1,7 +1,7 @@
From: Paul Gortmaker <paul.gortmaker at windriver.com>
Date: Sat, 14 Feb 2015 11:01:16 -0500
Subject: sas-ata/isci: dont't disable interrupts in qc_issue handler
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On 3.14-rt we see the following trace on Canoe Pass for
SCSI_ISCI "Intel(R) C600 Series Chipset SAS Controller"
diff --git a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
index f4778b2..021bd5a 100644
--- a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ b/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
@@ -1,7 +1,7 @@
From: Juri Lelli <juri.lelli at gmail.com>
Date: Tue, 13 May 2014 15:30:20 +0200
Subject: sched/deadline: dl_task_timer has to be irqsafe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
As for rt_period_timer, dl_task_timer has to be irqsafe.
diff --git a/debian/patches/features/all/rt/sched-delay-put-task.patch b/debian/patches/features/all/rt/sched-delay-put-task.patch
index f8f62f1..36a33b7 100644
--- a/debian/patches/features/all/rt/sched-delay-put-task.patch
+++ b/debian/patches/features/all/rt/sched-delay-put-task.patch
@@ -1,7 +1,7 @@
Subject: sched: Move task_struct cleanup to RCU
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 31 May 2011 16:59:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
__put_task_struct() does quite some expensive work. We don't want to
burden random tasks with that.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1962,6 +1962,9 @@ struct task_struct {
+@@ -1968,6 +1968,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
-@@ -2219,6 +2222,15 @@ extern struct pid *cad_pid;
+@@ -2225,6 +2228,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -2226,6 +2238,7 @@ static inline void put_task_struct(struc
+@@ -2232,6 +2244,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
diff --git a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
index 23f71fe..30d07c8 100644
--- a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
+++ b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch
@@ -1,7 +1,7 @@
Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 18 Jul 2011 17:03:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Carsten reported problems when running:
diff --git a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
index 12d41b7..e819d3e 100644
--- a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
+++ b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch
@@ -1,7 +1,7 @@
Subject: sched: Disable TTWU_QUEUE on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 13 Sep 2011 16:42:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The queued remote wakeup mechanism can introduce rather large
latencies if the number of migrated tasks is high. Disable it for RT.
diff --git a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
index d7e9c3f..09a4732 100644
--- a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
+++ b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch
@@ -1,7 +1,7 @@
Subject: sched: Limit the number of task migrations per batch
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 06 Jun 2011 12:12:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Put an upper limit on the number of tasks which are migrated per batch
to avoid large latencies.
diff --git a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
index a9eda0e..d715f17 100644
--- a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -1,7 +1,7 @@
Subject: sched: Do not account rcu_preempt_depth on RT in might_sleep()
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 07 Jun 2011 09:19:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
RT changes the rcu_preempt_depth semantics, so we cannot check for it
in might_sleep().
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7820,7 +7820,7 @@ void __init sched_init(void)
+@@ -7862,7 +7862,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
index 5ccbb9c..a13e2ae 100644
--- a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
+++ b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
@@ -1,7 +1,7 @@
Subject: sched: Move mmdrop to RCU on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 06 Jun 2011 12:20:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Takes sleeping locks and calls into the memory allocator, so nothing
we want to do in task switch and oder atomic contexts.
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
void __user *bd_addr;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2906,6 +2906,17 @@ static inline void mmdrop(struct mm_stru
+@@ -2912,6 +2912,17 @@ static inline void mmdrop(struct mm_stru
__mmdrop(mm);
}
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5545,6 +5549,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5587,6 +5591,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -5559,7 +5565,12 @@ void idle_task_exit(void)
+@@ -5601,7 +5607,12 @@ void idle_task_exit(void)
switch_mm_irqs_off(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
}
/*
-@@ -7505,6 +7516,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7547,6 +7558,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(cpu);
hrtick_clear(rq);
diff --git a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
index 1e5815e..95ae8e3 100644
--- a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
+++ b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
@@ -1,7 +1,7 @@
Subject: sched: Add saved_state for tasks blocked on sleeping locks
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 25 Jun 2011 09:21:04 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Spinlocks are state preserving in !RT. RT changes the state when a
task gets blocked on a lock. So we need to remember the state before
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -2698,6 +2699,7 @@ extern void xtime_update(unsigned long t
+@@ -2704,6 +2705,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
diff --git a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
index 801ee0c..fdd3f0c 100644
--- a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
@@ -1,7 +1,7 @@
Subject: sched: ttwu: Return success when only changing the saved_state value
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 13 Dec 2011 21:42:19 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When a task blocks on a rt lock, it saves the current state in
p->saved_state, so a lock related wake up will not destroy the
diff --git a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index e49d2ab..e5d637f 100644
--- a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <rostedt at goodmis.org>
Date: Mon, 18 Mar 2013 15:12:49 -0400
Subject: sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
In -rt, most spin_locks() turn into mutexes. One of these spin_lock
conversions is performed on the workqueue gcwq->lock. When the idle
diff --git a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
index 9d51edf..7399920 100644
--- a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
+++ b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch
@@ -1,7 +1,7 @@
Subject: scsi/fcoe: Make RT aware.
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 12 Nov 2011 14:00:48 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Do not disable preemption while taking sleeping locks. All user look safe
for migrate_diable() only.
diff --git a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
index da4ed4c..d6d8f0f 100644
--- a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
+++ b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
@@ -1,7 +1,7 @@
Subject: scsi: qla2xxx: Use local_irq_save_nort() in qla2x00_poll
From: John Kacur <jkacur at redhat.com>
Date: Fri, 27 Apr 2012 12:48:46 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
RT triggers the following:
diff --git a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
index edc719c..56cb803 100644
--- a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
+++ b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
@@ -1,7 +1,7 @@
Subject: seqlock: Prevent rt starvation
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 22 Feb 2012 12:03:30 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
If a low prio writer gets preempted while holding the seqlock write
locked, a high prio reader spins forever on RT.
diff --git a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
index 1fb20f5..0cf77ab 100644
--- a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
+++ b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
@@ -1,7 +1,7 @@
Subject: signal: Make __lock_task_sighand() RT aware
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 22 Jul 2011 08:07:08 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
local_irq_save() + spin_lock(&sighand->siglock) does not work on
-RT. Use the nort variants.
diff --git a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
index c0efaa2..e55cee7 100644
--- a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
+++ b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch
@@ -1,7 +1,7 @@
Subject: signal: Revert ptrace preempt magic
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 21 Sep 2011 19:57:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
than a bandaid around the ptrace design trainwreck. It's not a
diff --git a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index f27d0b1..80a7420 100644
--- a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 3 Jul 2009 08:44:56 -0500
Subject: signals: Allow rt tasks to cache one sigqueue struct
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
To avoid allocation allow rt tasks to cache one sigqueue struct in
task struct.
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1685,6 +1685,7 @@ struct task_struct {
+@@ -1689,6 +1689,7 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1552,6 +1552,7 @@ static __latent_entropy struct task_stru
+@@ -1553,6 +1553,7 @@ static __latent_entropy struct task_stru
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
diff --git a/debian/patches/features/all/rt/skbufhead-raw-lock.patch b/debian/patches/features/all/rt/skbufhead-raw-lock.patch
index 0713db3..a14afde 100644
--- a/debian/patches/features/all/rt/skbufhead-raw-lock.patch
+++ b/debian/patches/features/all/rt/skbufhead-raw-lock.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 12 Jul 2011 15:38:34 +0200
Subject: net: Use skbufhead with raw lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use the rps lock as rawlock so we can keep irq-off regions. It looks low
latency. However we can't kfree() from this context therefore we defer this
diff --git a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch b/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
index a5dd61c..7a1e0a2 100644
--- a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 15 Apr 2015 19:00:47 +0200
Subject: slub: Disable SLUB_CPU_PARTIAL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
|in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7
diff --git a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
index ebb6554..7ca6270 100644
--- a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
+++ b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch
@@ -1,7 +1,7 @@
Subject: slub: Enable irqs for __GFP_WAIT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 09 Jan 2013 12:08:15 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
SYSTEM_RUNNING might be too late for enabling interrupts. Allocations
with GFP_WAIT can happen before that. So use this as an indicator.
diff --git a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch b/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
index 8a1b1cf..63167b1 100644
--- a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
+++ b/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Wed, 18 Feb 2015 15:09:23 +0100
Subject: snd/pcm: fix snd_pcm_stream_lock*() irqs_disabled() splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Locking functions previously using read_lock_irq()/read_lock_irqsave() were
changed to local_irq_disable/save(), leading to gripes. Use nort variants.
diff --git a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
index 4444241..bddc934 100644
--- a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
+++ b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
@@ -1,7 +1,7 @@
Subject: softirq: Disable softirq stacks for RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 18 Jul 2011 13:59:17 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Disable extra stacks for softirqs. We want to preempt softirqs and
having them on special IRQ-stack does not make this easier.
diff --git a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
index 4e753c7..d29afc0 100644
--- a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
+++ b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch
@@ -1,7 +1,7 @@
Subject: softirq: Check preemption after reenabling interrupts
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
raise_softirq_irqoff() disables interrupts and wakes the softirq
daemon, but after reenabling interrupts there is no preemption check,
diff --git a/debian/patches/features/all/rt/softirq-split-locks.patch b/debian/patches/features/all/rt/softirq-split-locks.patch
index 7db04c8..b635255 100644
--- a/debian/patches/features/all/rt/softirq-split-locks.patch
+++ b/debian/patches/features/all/rt/softirq-split-locks.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 04 Oct 2012 14:20:47 +0100
Subject: softirq: Split softirq locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The 3.x RT series removed the split softirq implementation in favour
of pushing softirq processing into the context of the thread which
@@ -173,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
* Are we in NMI context?
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1965,6 +1965,8 @@ struct task_struct {
+@@ -1971,6 +1971,8 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
@@ -182,7 +182,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
-@@ -2281,6 +2283,7 @@ extern void thread_group_cputime_adjuste
+@@ -2287,6 +2289,7 @@ extern void thread_group_cputime_adjuste
/*
* Per process flags
*/
diff --git a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
index 2425f69..4051ee6 100644
--- a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
+++ b/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 20 Jan 2016 16:34:17 +0100
Subject: softirq: split timer softirqs out of ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with
timer wakeup which can not happen in hardirq context. The prio has been
diff --git a/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch b/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
index 280b564..0afe6c9 100644
--- a/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
+++ b/debian/patches/features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <efault at gmx.de>
Date: Fri, 20 Jan 2017 18:10:20 +0100
Subject: [PATCH] softirq: wake the timer softirq if needed
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The irq-exit path only checks the "normal"-softirq thread if it is
running and ignores the state of the "timer"-softirq thread. It is possible
diff --git a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
index 07c203c..4ba595a 100644
--- a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
+++ b/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -1,7 +1,7 @@
From: Allen Pais <allen.pais at oracle.com>
Date: Fri, 13 Dec 2013 09:44:41 +0530
Subject: sparc64: use generic rwsem spinlocks rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Signed-off-by: Allen Pais <allen.pais at oracle.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
diff --git a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
index df365f3..d10dd0a 100644
--- a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
+++ b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch
@@ -1,7 +1,7 @@
Subject: spinlock: Split the lock types header
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 29 Jun 2011 19:34:01 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Split raw_spinlock into its own file and the remaining spinlock_t into
its own non-RT header. The non-RT header will be replaced later by sleeping
diff --git a/debian/patches/features/all/rt/stop-machine-raw-lock.patch b/debian/patches/features/all/rt/stop-machine-raw-lock.patch
index d9e5ed7..bf19ce2 100644
--- a/debian/patches/features/all/rt/stop-machine-raw-lock.patch
+++ b/debian/patches/features/all/rt/stop-machine-raw-lock.patch
@@ -1,7 +1,7 @@
Subject: stop_machine: Use raw spinlocks
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 29 Jun 2011 11:01:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use raw-locks in stomp_machine() to allow locking in irq-off regions.
diff --git a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
index 80ed18b..d2444fe 100644
--- a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+++ b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:30:27 -0500
Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Instead of playing with non-preemption, introduce explicit
startup serialization. This is more robust and cleaner as
diff --git a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
index 1c7e96f..8ee1922 100644
--- a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+++ b/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Wed, 18 Feb 2015 16:05:28 +0100
Subject: sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
|in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd
diff --git a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
index 72750bf..8e0b8cc 100644
--- a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
+++ b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 15 Jul 2010 10:29:00 +0200
Subject: suspend: Prevent might sleep splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
timekeeping suspend/resume calls read_persistant_clock() which takes
rtc_lock. That results in might sleep warnings because at that point
diff --git a/debian/patches/features/all/rt/sysfs-realtime-entry.patch b/debian/patches/features/all/rt/sysfs-realtime-entry.patch
index 115b2fd..ffb42b3 100644
--- a/debian/patches/features/all/rt/sysfs-realtime-entry.patch
+++ b/debian/patches/features/all/rt/sysfs-realtime-entry.patch
@@ -1,7 +1,7 @@
Subject: sysfs: Add /sys/kernel/realtime entry
From: Clark Williams <williams at redhat.com>
Date: Sat Jul 30 21:55:53 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Add a /sys/kernel entry to indicate that the kernel is a
realtime kernel.
diff --git a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index f5192a2..9e88420 100644
--- a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -1,7 +1,7 @@
Subject: tasklet: Prevent tasklets from going into infinite spin in RT
From: Ingo Molnar <mingo at elte.hu>
Date: Tue Nov 29 20:18:22 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
and spinlocks turn are mutexes. But this can cause issues with
diff --git a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
index 3ce54fd..4827b5e 100644
--- a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
+++ b/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch
@@ -1,7 +1,7 @@
From: Daniel Wagner <wagi at monom.org>
Date: Tue, 17 Feb 2015 09:37:44 +0100
Subject: thermal: Defer thermal wakups to threads
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will
call schedule while we run in irq context.
diff --git a/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
index 04840ed..ffc1f71 100644
--- a/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
+++ b/debian/patches/features/all/rt/tick-broadcast--Make-hrtimer-irqsafe.patch
@@ -1,7 +1,7 @@
Subject: tick/broadcast: Make broadcast hrtimer irqsafe
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 27 Feb 2016 10:47:10 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Otherwise we end up with the following:
diff --git a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
index 35cd247..26597cc 100644
--- a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
+++ b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch
@@ -1,7 +1,7 @@
Subject: timekeeping: Split jiffies seqlock
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 14 Feb 2013 22:36:59 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so
it can be taken in atomic context on RT.
diff --git a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
index f7f2d9b..defe14d 100644
--- a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+++ b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz at infradead.org>
Date: Fri, 21 Aug 2009 11:56:45 +0200
Subject: timer: delay waking softirqs from the jiffy tick
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
People were complaining about broken balancing with the recent -rt
series.
diff --git a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
index 03b71ef..5ae2694 100644
--- a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
+++ b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch
@@ -1,7 +1,7 @@
Subject: timer-fd: Prevent live lock
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 25 Jan 2012 11:08:40 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
If hrtimer_try_to_cancel() requires a retry, then depending on the
priority setting te retry loop might prevent timer callback completion
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
-@@ -460,7 +460,10 @@ static int do_timerfd_settime(int ufd, i
+@@ -471,7 +471,10 @@ static int do_timerfd_settime(int ufd, i
break;
}
spin_unlock_irq(&ctx->wqh.lock);
diff --git a/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch b/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch
index 499fd8b..e295d6b 100644
--- a/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch
+++ b/debian/patches/features/all/rt/timer-hrtimer-check-properly-for-a-running-timer.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 1 Mar 2017 16:30:49 +0100
Subject: [PATCH] timer/hrtimer: check properly for a running timer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
hrtimer_callback_running() checks only whether a timmer is running on a
CPU in hardirq-context. This is okay for !RT. For RT environment we move
diff --git a/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch b/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
index c993e64..af13f96 100644
--- a/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
+++ b/debian/patches/features/all/rt/timer-make-the-base-lock-raw.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Wed, 13 Jul 2016 18:22:23 +0200
Subject: [PATCH] timer: make the base lock raw
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The part where the base lock is held got more predictable / shorter after the
timer rework. One reason is the lack of re-cascading.
diff --git a/debian/patches/features/all/rt/timers-Don-t-wake-ktimersoftd-on-every-tick.patch b/debian/patches/features/all/rt/timers-Don-t-wake-ktimersoftd-on-every-tick.patch
index 51207f3..b886b1d 100644
--- a/debian/patches/features/all/rt/timers-Don-t-wake-ktimersoftd-on-every-tick.patch
+++ b/debian/patches/features/all/rt/timers-Don-t-wake-ktimersoftd-on-every-tick.patch
@@ -1,7 +1,7 @@
From: Haris Okanovic <haris.okanovic at ni.com>
Date: Fri, 3 Feb 2017 17:26:44 +0100
Subject: [PATCH] timers: Don't wake ktimersoftd on every tick
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
We recently upgraded from 4.1 to 4.6 and noticed a minor latency
regression caused by an additional thread wakeup (ktimersoftd) in
diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
index 21a0c7b..e8f233a 100644
--- a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
+++ b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:34 -0500
Subject: timers: Prepare for full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When softirqs can be preempted we need to make sure that cancelling
the timer from the active thread can not deadlock vs. a running timer
diff --git a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch b/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
index 1a3f987..867d681 100644
--- a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
+++ b/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch
@@ -2,7 +2,7 @@ From: Carsten Emde <C.Emde at osadl.org>
Date: Tue, 5 Jan 2016 10:21:59 +0100
Subject: trace/latency-hist: Consider new argument when probing the
sched_switch tracer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The sched_switch tracer has got a new argument. Fix the latency tracer
accordingly.
diff --git a/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch b/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
index 02709e8..3ce95fc 100644
--- a/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
+++ b/debian/patches/features/all/rt/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
@@ -1,7 +1,7 @@
Subject: trace: Use rcuidle version for preemptoff_hist trace point
From: Yang Shi <yang.shi at windriver.com>
Date: Tue, 23 Feb 2016 13:23:23 -0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When running -rt kernel with both PREEMPT_OFF_HIST and LOCKDEP enabled,
the below error is reported:
diff --git a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
index 9f925e9..e842e15 100644
--- a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <rostedt at goodmis.org>
Date: Thu, 29 Sep 2011 12:24:30 -0500
Subject: tracing: Account for preempt off in preempt_schedule()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
The preempt_schedule() uses the preempt_disable_notrace() version
because it can cause infinite recursion by the function tracer as
diff --git a/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 560143e..3a702d0 100644
--- a/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/debian/patches/features/all/rt/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Mon, 11 Apr 2016 16:55:02 +0200
Subject: [PATCH] tty: serial: 8250: don't take the trylock during oops
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
An oops with irqs off (panic() from irqsafe hrtimer like the watchdog
timer) will lead to a lockdep warning on each invocation and as such
diff --git a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 4b0ce4c..ccea3f7 100644
--- a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -1,7 +1,7 @@
Subject: net: Remove preemption disabling in netif_rx()
From: Priyanka Jain <Priyanka.Jain at freescale.com>
Date: Thu, 17 May 2012 09:35:11 +0530
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
1)enqueue_to_backlog() (called from netif_rx) should be
bind to a particluar CPU. This can be achieved by
diff --git a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
index 2b5e118..48ecb34 100644
--- a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
+++ b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 8 Nov 2013 17:34:54 +0100
Subject: usb: Use _nort in giveback function
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet
context") I see
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
-@@ -1761,9 +1761,9 @@ static void __usb_hcd_giveback_urb(struc
+@@ -1764,9 +1764,9 @@ static void __usb_hcd_giveback_urb(struc
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
*/
diff --git a/debian/patches/features/all/rt/user-use-local-irq-nort.patch b/debian/patches/features/all/rt/user-use-local-irq-nort.patch
index 9faad4a..18a471c 100644
--- a/debian/patches/features/all/rt/user-use-local-irq-nort.patch
+++ b/debian/patches/features/all/rt/user-use-local-irq-nort.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Tue, 21 Jul 2009 23:06:05 +0200
Subject: core: Do not disable interrupts on RT in kernel/users.c
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use the local_irq_*_nort variants to reduce latencies in RT. The code
is serialized by the locks. No need to disable interrupts.
diff --git a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
index 1ffd9ba..64d2354 100644
--- a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
+++ b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Mon, 28 Oct 2013 12:19:57 +0100
Subject: wait.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
| CC init/main.o
|In file included from include/linux/mmzone.h:9:0,
diff --git a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
index 52a43e3..6883546 100644
--- a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
+++ b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 01 Jul 2013 11:02:42 +0200
Subject: workqueue: Prevent workqueue versus ata-piix livelock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
An Intel i7 system regularly detected rcu_preempt stalls after the kernel
was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no
diff --git a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch b/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
index 03f6319..18136bd 100644
--- a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
+++ b/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch
@@ -1,7 +1,7 @@
From: Daniel Wagner <daniel.wagner at bmw-carit.de>
Date: Fri, 11 Jul 2014 15:26:11 +0200
Subject: work-simple: Simple work queue implemenation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Provides a framework for enqueuing callbacks from irq context
PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
diff --git a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
index f0d215e..383a9f2 100644
--- a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
+++ b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch
@@ -22,7 +22,7 @@ Cc: Jens Axboe <axboe at kernel.dk>
Cc: Linus Torvalds <torvalds at linux-foundation.org>
Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
---
kernel/sched/core.c | 81 ++++++++------------------------------------
diff --git a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch b/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
index b733f99..acc0878 100644
--- a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
+++ b/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch
@@ -1,7 +1,7 @@
Subject: workqueue: Prevent deadlock/stall on RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Fri, 27 Jun 2014 16:24:52 +0200 (CEST)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Austin reported a XFS deadlock/stall on RT where scheduled work gets
never exececuted and tasks are waiting for each other for ever.
diff --git a/debian/patches/features/all/rt/workqueue-use-locallock.patch b/debian/patches/features/all/rt/workqueue-use-locallock.patch
index 42e748e..e4410f9 100644
--- a/debian/patches/features/all/rt/workqueue-use-locallock.patch
+++ b/debian/patches/features/all/rt/workqueue-use-locallock.patch
@@ -1,7 +1,7 @@
Subject: workqueue: Use local irq lock instead of irq disable regions
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 17 Jul 2011 21:42:26 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Use a local_irq_lock as a replacement for irq off regions. We keep the
semantic of irq-off in regard to the pool->lock and remain preemptible.
diff --git a/debian/patches/features/all/rt/workqueue-use-rcu.patch b/debian/patches/features/all/rt/workqueue-use-rcu.patch
index fadfb72..eab4783 100644
--- a/debian/patches/features/all/rt/workqueue-use-rcu.patch
+++ b/debian/patches/features/all/rt/workqueue-use-rcu.patch
@@ -1,7 +1,7 @@
Subject: workqueue: Use normal rcu
From: Thomas Gleixner <tglx at linutronix.de>
Date: Wed, 24 Jul 2013 15:26:54 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
There is no need for sched_rcu. The undocumented reason why sched_rcu
is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by
diff --git a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch b/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
index 32254ba..5563990 100644
--- a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
+++ b/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti at gmail.com>
Date: Sun, 2 Nov 2014 08:31:37 +0100
Subject: x86: UV: raw_spinlock conversion
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Shrug. Lots of hobbyists have a beast in their basement, right?
diff --git a/debian/patches/features/all/rt/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch b/debian/patches/features/all/rt/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
index 929568d..29012dd 100644
--- a/debian/patches/features/all/rt/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
+++ b/debian/patches/features/all/rt/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Fri, 21 Oct 2016 10:29:11 +0200
Subject: [PATCH] x86/apic: get rid of "warning: 'acpi_ioapic_lock' defined but
not used"
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
kbuild test robot reported this against the -RT tree:
diff --git a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
index 2404f55..aae9a68 100644
--- a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
+++ b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -1,7 +1,7 @@
Subject: x86: crypto: Reduce preempt disabled regions
From: Peter Zijlstra <peterz at infradead.org>
Date: Mon, 14 Nov 2011 18:19:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Restrict the preempt disabled regions to the actual floating point
operations and enable preemption for the administrative actions.
diff --git a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch b/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
index a34253f..7718665 100644
--- a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
+++ b/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Mon, 11 Mar 2013 17:09:55 +0100
Subject: x86/highmem: Add a "already used pte" check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
This is a copy from kmap_atomic_prot().
diff --git a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
index 736d8e3..e751d4a 100644
--- a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
+++ b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo at elte.hu>
Date: Fri, 3 Jul 2009 08:29:27 -0500
Subject: x86/ioapic: Do not unmask io_apic when interrupt is in progress
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
With threaded interrupts we might see an interrupt in progress on
migration. Do not unmask it when this is the case.
diff --git a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
index 6ae2705..79390c1 100644
--- a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
+++ b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch
@@ -1,7 +1,7 @@
Subject: x86: kvm Require const tsc for RT
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 06 Nov 2011 12:26:18 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Non constant TSC is a nightmare on bare metal already, but with
virtualization it becomes a complete disaster because the workarounds
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5933,6 +5933,13 @@ int kvm_arch_init(void *opaque)
+@@ -5958,6 +5958,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
index 0a1fc99..5a16bb9 100644
--- a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
+++ b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Mon, 13 Dec 2010 16:33:39 +0100
Subject: x86: Convert mce timer to hrtimer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
mce_timer is started in atomic contexts of cpu bringup. This results
in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
@@ -35,7 +35,7 @@ fold in:
#include <linux/jump_label.h>
#include <asm/processor.h>
-@@ -1317,7 +1318,7 @@ void mce_log_therm_throt_event(__u64 sta
+@@ -1307,7 +1308,7 @@ void mce_log_therm_throt_event(__u64 sta
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -44,7 +44,7 @@ fold in:
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
-@@ -1326,32 +1327,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1316,32 +1317,18 @@ static unsigned long mce_adjust_timer_de
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
@@ -83,7 +83,7 @@ fold in:
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1374,7 +1361,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1364,7 +1351,7 @@ static void mce_timer_fn(unsigned long d
done:
__this_cpu_write(mce_next_interval, iv);
@@ -92,7 +92,7 @@ fold in:
}
/*
-@@ -1382,7 +1369,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1372,7 +1359,7 @@ static void mce_timer_fn(unsigned long d
*/
void mce_timer_kick(unsigned long interval)
{
@@ -101,7 +101,7 @@ fold in:
unsigned long iv = __this_cpu_read(mce_next_interval);
__restart_timer(t, interval);
-@@ -1397,7 +1384,7 @@ static void mce_timer_delete_all(void)
+@@ -1387,7 +1374,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@@ -110,7 +110,7 @@ fold in:
}
static void mce_do_trigger(struct work_struct *work)
-@@ -1732,7 +1719,7 @@ static void __mcheck_cpu_clear_vendor(st
+@@ -1722,7 +1709,7 @@ static void __mcheck_cpu_clear_vendor(st
}
}
@@ -119,7 +119,7 @@ fold in:
{
unsigned long iv = check_interval * HZ;
-@@ -1741,16 +1728,17 @@ static void mce_start_timer(unsigned int
+@@ -1731,16 +1718,17 @@ static void mce_start_timer(unsigned int
per_cpu(mce_next_interval, cpu) = iv;
@@ -141,7 +141,7 @@ fold in:
mce_start_timer(cpu, t);
}
-@@ -2475,6 +2463,8 @@ static void mce_disable_cpu(void *h)
+@@ -2465,6 +2453,8 @@ static void mce_disable_cpu(void *h)
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
@@ -150,7 +150,7 @@ fold in:
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
-@@ -2497,6 +2487,7 @@ static void mce_reenable_cpu(void *h)
+@@ -2487,6 +2477,7 @@ static void mce_reenable_cpu(void *h)
if (b->init)
wrmsrl(msr_ops.ctl(i), b->ctl);
}
@@ -158,7 +158,7 @@ fold in:
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2504,7 +2495,6 @@ static int
+@@ -2494,7 +2485,6 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -166,7 +166,7 @@ fold in:
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
-@@ -2524,11 +2514,9 @@ mce_cpu_callback(struct notifier_block *
+@@ -2514,11 +2504,9 @@ mce_cpu_callback(struct notifier_block *
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
diff --git a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch b/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
index fdee753..f3d0cba 100644
--- a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -1,7 +1,7 @@
Subject: x86/mce: use swait queue for mce wakeups
From: Steven Rostedt <rostedt at goodmis.org>
Date: Fri, 27 Feb 2015 15:20:37 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
We had a customer report a lockup on a 3.0-rt kernel that had the
following backtrace:
@@ -69,7 +69,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
#include <linux/jump_label.h>
#include <asm/processor.h>
-@@ -1394,6 +1395,56 @@ static void mce_do_trigger(struct work_s
+@@ -1384,6 +1385,56 @@ static void mce_do_trigger(struct work_s
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -126,7 +126,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
-@@ -1401,19 +1452,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1391,19 +1442,8 @@ static DECLARE_WORK(mce_trigger_work, mc
*/
int mce_notify_irq(void)
{
@@ -147,7 +147,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner at bmw-carit.de>
return 1;
}
return 0;
-@@ -2555,6 +2595,10 @@ static __init int mcheck_init_device(voi
+@@ -2545,6 +2585,10 @@ static __init int mcheck_init_device(voi
goto err_out;
}
diff --git a/debian/patches/features/all/rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch b/debian/patches/features/all/rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch
index e025935..6dc764e 100644
--- a/debian/patches/features/all/rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch
+++ b/debian/patches/features/all/rt/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness at linutronix.de>
Date: Mon, 30 Jan 2017 09:41:21 +0100
Subject: [PATCH] x86/mm/cpa: avoid wbinvd() for PREEMPT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Although wbinvd() is faster than flushing many individual pages, it
blocks the memory bus for "long" periods of time (>100us), thus
diff --git a/debian/patches/features/all/rt/x86-preempt-lazy.patch b/debian/patches/features/all/rt/x86-preempt-lazy.patch
index ab1339b..96e5aa5 100644
--- a/debian/patches/features/all/rt/x86-preempt-lazy.patch
+++ b/debian/patches/features/all/rt/x86-preempt-lazy.patch
@@ -1,7 +1,7 @@
Subject: x86: Support for lazy preemption
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 01 Nov 2012 11:03:47 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Implement the x86 pieces for lazy preempt.
diff --git a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch b/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
index a60e3f6..8643aa7 100644
--- a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
+++ b/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch
@@ -1,7 +1,7 @@
From: Yang Shi <yang.shi at linaro.org>
Date: Thu, 10 Dec 2015 10:58:51 -0800
Subject: x86/signal: delay calling signals on 32bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
When running some ptrace single step tests on x86-32 machine, the below problem
is triggered:
diff --git a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
index f174fb3..ef5c799 100644
--- a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
+++ b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Thu, 16 Dec 2010 14:25:18 +0100
Subject: x86: stackprotector: Avoid random pool on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
CPU bringup calls into the random pool to initialize the stack
canary. During boot that works nicely even on RT as the might sleep
diff --git a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
index a039203..b753c58 100644
--- a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sun, 26 Jul 2009 02:21:32 +0200
Subject: x86: Use generic rwsem_spinlocks on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.20-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt20.tar.xz
Simplifies the separation of anon_rw_semaphores and rw_semaphores for
-rt.
diff --git a/debian/patches/series-rt b/debian/patches/series-rt
index 2c4ee85..880a5f8 100644
--- a/debian/patches/series-rt
+++ b/debian/patches/series-rt
@@ -31,6 +31,20 @@ features/all/rt/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
features/all/rt/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
features/all/rt/0012-futex-Futex_unlock_pi-determinism.patch
features/all/rt/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
+features/all/rt/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
+features/all/rt/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
+features/all/rt/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
+features/all/rt/0004-rtmutex-Clean-up.patch
+features/all/rt/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
+features/all/rt/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
+features/all/rt/0007-rtmutex-Fix-PI-chain-order-integrity.patch
+features/all/rt/0008-rtmutex-Fix-more-prio-comparisons.patch
+features/all/rt/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
+features/all/rt/0001-futex-Avoid-freeing-an-active-timer.patch
+features/all/rt/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
+features/all/rt/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
+features/all/rt/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
+features/all/rt/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
# Those two should vanish soon (not use PIT during bootup)
features/all/rt/at91_dont_enable_disable_clock.patch
@@ -327,6 +341,7 @@ features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.
features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
features/all/rt/softirq-wake-the-timer-softirq-if-needed.patch
features/all/rt/timers-Don-t-wake-ktimersoftd-on-every-tick.patch
+features/all/rt/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
features/all/rt/rtmutex-trylock-is-okay-on-RT.patch
# compile fix due to rtmutex locks
@@ -352,6 +367,7 @@ features/all/rt/rt-drop_mutex_disable_on_not_debug.patch
features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch
features/all/rt/rtmutex-Provide-rt_mutex_lock_state.patch
features/all/rt/rtmutex-Provide-locked-slowpath.patch
+features/all/rt/futex-rtmutex-Cure-RT-double-blocking-issue.patch
features/all/rt/rwsem-rt-Lift-single-reader-restriction.patch
features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -518,6 +534,7 @@ features/all/rt/cpumask-disable-offstack-on-rt.patch
# RANDOM
features/all/rt/random-make-it-work-on-rt.patch
+features/all/rt/random-avoid-preempt_disable-ed-section.patch
# HOTPLUG
features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/kernel/linux.git
More information about the Kernel-svn-changes
mailing list