[kernel] r22019 - in dists/squeeze-backports/linux: . debian debian/config debian/config/s390 debian/config/s390x debian/installer debian/patches debian/patches/bugfix/all debian/patches/bugfix/alpha debian/patches/bugfix/mips debian/patches/bugfix/s390 debian/patches/bugfix/x86 debian/patches/debian debian/patches/features/all/drm debian/patches/features/all/igb debian/patches/features/all/rt debian/patches/features/arm debian/patches/features/x86/hyperv
Ben Hutchings
benh at moszumanska.debian.org
Sun Nov 2 01:27:57 UTC 2014
Author: benh
Date: Sun Nov 2 01:27:57 2014
New Revision: 22019
Log:
Merge changes from wheezy-security up to 3.2.63-2+deb7u1
Added:
dists/squeeze-backports/linux/debian/patches/bugfix/all/drivers-net-disable-ufo-through-virtio.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-disable-ufo-through-virtio.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ext4-fix-BUG_ON-in-mb_free_blocks.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/ext4-fix-BUG_ON-in-mb_free_blocks.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/libceph-add-process_one_ticket-helper.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/libceph-add-process_one_ticket-helper.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/libceph-do-not-hard-code-max-auth-ticket-len.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/libceph-do-not-hard-code-max-auth-ticket-len.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/nfsd-fix-acl-null-pointer-deref.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/nfsd-fix-acl-null-pointer-deref.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/udf-Avoid-infinite-loop-when-processing-indirect-ICB.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/udf-Avoid-infinite-loop-when-processing-indirect-ICB.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/nEPT-Nested-INVEPT.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/nEPT-Nested-INVEPT.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch
dists/squeeze-backports/linux/debian/patches/debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch
dists/squeeze-backports/linux/debian/patches/debian/ip-ident-avoid-abi-change-in-3.2.63.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/ip-ident-avoid-abi-change-in-3.2.63.patch
dists/squeeze-backports/linux/debian/patches/debian/irq-avoid-abi-change-in-3.2.61.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/irq-avoid-abi-change-in-3.2.61.patch
dists/squeeze-backports/linux/debian/patches/debian/libata-avoid-abi-change-in-3.2.62.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/libata-avoid-abi-change-in-3.2.62.patch
dists/squeeze-backports/linux/debian/patches/debian/nlattr-avoid-abi-change-in-3.2.61.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/nlattr-avoid-abi-change-in-3.2.61.patch
dists/squeeze-backports/linux/debian/patches/debian/ptrace-avoid-abi-change-in-3.2.61.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/ptrace-avoid-abi-change-in-3.2.61.patch
dists/squeeze-backports/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.61.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.61.patch
dists/squeeze-backports/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.62.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.62.patch
dists/squeeze-backports/linux/debian/patches/debian/sp5100_tco-reject-sb8x0-chips.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/sp5100_tco-reject-sb8x0-chips.patch
dists/squeeze-backports/linux/debian/patches/debian/trace-syscall-avoid-abi-change-in-3.2.61.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/debian/trace-syscall-avoid-abi-change-in-3.2.61.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/revert-rtmutex-changes-in-3.2.61.patch
- copied unchanged from r22011, dists/wheezy-security/linux/debian/patches/features/all/rt/revert-rtmutex-changes-in-3.2.61.patch
Deleted:
dists/squeeze-backports/linux/debian/patches/bugfix/all/0001-ptrace-x86-force-IRET-path-after-a-ptrace_stop.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ALSA-control-Don-t-access-controls-outside-of-protec.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ALSA-control-Fix-replacing-user-controls.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ALSA-control-Handle-numid-overflow.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ALSA-control-Make-sure-that-id-index-does-not-overfl.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/ALSA-control-Protect-user-controls-against-concurren.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/auditsc-audit_krule-mask-accesses-need-bounds-checki.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/net-l2tp-don-t-fall-back-on-UDP-get-set-sockopt.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/revert-net-ip-ipv6-handle-gso-skbs-in-forwarding-pat.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/revert-net-ipv4-ip_forward-fix-inverted-local_df-tes.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/sctp-fix-sk_ack_backlog-wrap-around-problem.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/skbuff-add-an-api-to-orphan-frags.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/skbuff-export-skb_copy_ubufs.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/skbuff-skb_segment-orphan-frags-before-copying.patch
dists/squeeze-backports/linux/debian/patches/bugfix/all/target-explicitly-clear-ramdisk_mcp-backend-pages.patch
dists/squeeze-backports/linux/debian/patches/bugfix/alpha/alpha-add-io-read-write-16-32-be-functions.patch
dists/squeeze-backports/linux/debian/patches/bugfix/mips/MIPS-Cleanup-flags-in-syscall-flags-handlers.patch
dists/squeeze-backports/linux/debian/patches/bugfix/mips/MIPS-asm-thread_info-Add-_TIF_SECCOMP-flag.patch
dists/squeeze-backports/linux/debian/patches/bugfix/s390/s390-ptrace-fix-PSW-mask-check.patch
dists/squeeze-backports/linux/debian/patches/bugfix/x86/x86_32-entry-Do-syscall-exit-work-on-badsys-CVE-2014.patch
Modified:
dists/squeeze-backports/linux/ (props changed)
dists/squeeze-backports/linux/debian/changelog
dists/squeeze-backports/linux/debian/config/defines
dists/squeeze-backports/linux/debian/config/s390/defines
dists/squeeze-backports/linux/debian/config/s390x/defines
dists/squeeze-backports/linux/debian/installer/package-list
dists/squeeze-backports/linux/debian/patches/debian/inetpeer-avoid-abi-change-in-3.2.52.patch
dists/squeeze-backports/linux/debian/patches/features/all/drm/drm-3.4.patch
dists/squeeze-backports/linux/debian/patches/features/all/igb/0008-igb-add-basic-runtime-PM-support.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch
dists/squeeze-backports/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch
dists/squeeze-backports/linux/debian/patches/features/arm/net-drop-NET-dependency-from-HAVE_BPF_JIT.patch
dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0003-Staging-hv-storvsc-Use-mempools-to-allocate-struct-s.patch
dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0025-Staging-hv-storvsc-Implement-per-device-memory-pools.patch
dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0050-Staging-hv-storvsc-Cleanup-storvsc_queuecommand.patch
dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0055-Staging-hv-storvsc-Get-rid-of-the-on_io_completion-i.patch
dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0067-Staging-hv-storvsc-Move-the-storage-driver-out-of-th.patch
dists/squeeze-backports/linux/debian/patches/series
dists/squeeze-backports/linux/debian/patches/series-rt
Modified: dists/squeeze-backports/linux/debian/changelog
==============================================================================
--- dists/squeeze-backports/linux/debian/changelog Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/changelog Sun Nov 2 01:27:57 2014 (r22019)
@@ -1,4 +1,4 @@
-linux (3.2.60-1+deb7u3~bpo60+1) squeeze-backports; urgency=medium
+linux (3.2.63-2+deb7u1~bpo60+1) squeeze-backports; urgency=medium
* Rebuild for squeeze:
- Use gcc-4.4 for all architectures
@@ -11,7 +11,245 @@
- Make build target depend on build-arch only, so we don't redundantly
build documentation on each architecture
- -- Ben Hutchings <ben at decadent.org.uk> Thu, 31 Jul 2014 04:07:00 +0100
+ -- Ben Hutchings <ben at decadent.org.uk> Sun, 02 Nov 2014 01:08:06 +0000
+
+linux (3.2.63-2+deb7u1) wheezy-security; urgency=high
+
+ * drivers/net,ipv6: Fix virtio/IPv6 regression in 3.2.63:
+ - ipv6: reuse ip6_frag_id from ip6_ufo_append_data (Closes: #766195)
+ (CVE-2014-7207)
+ - drivers/net: Disable UFO through virtio
+ - drivers/net,ipv6: Select IPv6 fragment idents for virtio UFO packets
+ * [x86] KVM: Check non-canonical addresses upon WRMSR (CVE-2014-3610)
+ * [x86] KVM: Improve thread safety in pit (CVE-2014-3611)
+ * [x86] KVM: nEPT: Nested INVEPT (CVE-2014-3645)
+ * [x86] kvm: vmx: handle invvpid vm exit gracefully (CVE-2014-3646)
+ * [x86] KVM: emulator: Use opcode::execute for CALL
+ * [x86] KVM: Fix wrong masking on relative jump/call
+ * [x86] KVM: Emulator fixes for eip canonical checks on near branches
+ (CVE-2014-3647)
+ * [x86] KVM: use new CS.RPL as CPL during task switch
+ * [x86] KVM: Handle errors when RIP is set during far jumps (CVE-2014-3647)
+ * net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks
+ (CVE-2014-3673)
+ * net: sctp: fix panic on duplicate ASCONF chunks (CVE-2014-3687)
+ * net: sctp: fix remote memory pressure from excessive queueing
+ (CVE-2014-3688)
+ * [x86] kvm,vmx: Preserve CR4 across VM entry (CVE-2014-3690)
+
+ -- Ben Hutchings <ben at decadent.org.uk> Wed, 29 Oct 2014 23:35:20 +0000
+
+linux (3.2.63-2) wheezy; urgency=medium
+
+ * [s390*] Ignore ABI change in lowcore structure (fixes FTBFS)
+
+ -- Ben Hutchings <ben at decadent.org.uk> Mon, 29 Sep 2014 22:35:33 +0100
+
+linux (3.2.63-1) wheezy; urgency=medium
+
+ * New upstream stable update:
+ http://www.kernel.org/pub/linux/kernel/v3.x/ChangeLog-3.2.61
+ - mm: highmem: don't treat PKMAP_ADDR(LAST_PKMAP) as a highmem address
+ - UBIFS: fix an mmap and fsync race condition
+ - HID: core: fix validation of report id 0
+ - IB/srp: Fix a sporadic crash triggered by cable pulling
+ - reiserfs: drop vmtruncate
+ - reiserfs: call truncate_setsize under tailpack mutex
+ - [arm] 8051/1: put_user: fix possible data corruption in put_user
+ - ext4: fix zeroing of page during writeback
+ - ext4: fix wrong assert in ext4_mb_normalize_request()
+ - USB: sierra: fix remote wakeup
+ - USB: option: fix runtime PM handling
+ - USB: usb_wwan: fix race between write and resume
+ - USB: usb_wwan: fix write and suspend race
+ - USB: usb_wwan: fix urb leak at shutdown
+ - USB: cdc-acm: Fix various bugs in power management
+ - USB: io_ti: fix firmware download on big-endian machines (part 2)
+ - md: always set MD_RECOVERY_INTR when aborting a reshape or other
+ "resync".
+ - [s390] lowcore: reserve 96 bytes for IRB in lowcore
+ - rtmutex: Fix deadlock detector for real
+ - xhci: delete endpoints from bandwidth list before freeing whole device
+ - IB/umad: Fix error handling
+ - RDMA/cxgb4: Fix four byte info leak in c4iw_create_cq()
+ - nfsd: getattr for FATTR4_WORD0_FILES_AVAIL needs the statfs buffer
+ - UBIFS: Remove incorrect assertion in shrink_tnc()
+ - nfsd4: use recall_lock for delegation hashing
+ - iscsi-target: Reject mutual authentication with reflected CHAP_C
+ - ptrace: fix fork event messages across pid namespaces
+ - idr: fix overflow bug during maximum ID calculation at maximum height
+ - Input: synaptics - fix resolution for manually provided min/max
+ (regression in 3.2.57)
+ - nfsd4: fix FREE_STATEID lockowner leak (regression in 3.2.60)
+ - Btrfs: fix double free in find_lock_delalloc_range
+ - mm: rmap: fix use-after-free in __put_anon_vma
+ - rtmutex: Handle deadlock detection smarter
+ - rtmutex: Detect changes in the pi lock chain
+ - rtmutex: Plug slow unlock race
+ - Bluetooth: Fix check for connection encryption
+ - Bluetooth: Fix SSP acceptor just-works confirmation without MITM
+ - tracing: Fix syscall_*regfunc() vs copy_process() race
+ - lib/lzo: Update LZO compression to current upstream version
+ - lzo: properly check for overruns (CVE-2014-4608)
+ - hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned
+ entry
+ - mm: fix crashes from mbind() merging vmas
+ - [mips] MSC: Prevent out-of-bounds writes to MIPS SC ioremap'd region
+ - SCSI: Stop accepting SCSI requests before removing a device
+ - SCSI: fix our current target reap infrastructure
+ - SCSI: dual scan thread bug fix
+ - perf: Fix race in removing an event
+ - netlink: rate-limit leftover bytes warning and print process name
+ - net: tunnels - enable module autoloading
+ - net: fix inet_getid() and ipv6_select_ident() bugs
+ - target: Explicitly clear ramdisk_mcp backend pages
+ - iommu/vt-d: Fix missing IOTLB flush in intel_iommu_unmap()
+ http://www.kernel.org/pub/linux/kernel/v3.x/ChangeLog-3.2.62
+ - ibmvscsi: Add memory barriers for send / receive
+ - cpuset,mempolicy: fix sleeping function called from invalid context
+ - nfsd: fix rare symlink decoding bug
+ - md: flush writes before starting a recovery.
+ - drm/vmwgfx: Fix incorrect write to read-only register v2:
+ (regression in 3.2.58)
+ - ACPI / EC: Avoid race condition related to advance_transaction()
+ - ACPI / EC: Fix race condition in ec_transaction_completed()
+ - hwmon: (adm1031) Fix writes to limit registers
+ - alarmtimer: Fix bug where relative alarm timers were treated as absolute
+ - dm io: fix a race condition in the wake up code for sync_io
+ - sched: Fix possible divide by zero in avg_atom() calculation
+ - locking/mutex: Disable optimistic spinning on some architectures
+ - hwmon: (adt7470) Fix writes to temperature limit registers
+ - usb: Check if port status is equal to RxDetect (regression in 3.2.38)
+ - tcp: fix tcp_match_skb_to_sack() for unaligned SACK at end of an skb
+ - igmp: fix the problem when mc leave group
+ - appletalk: Fix socket referencing in skb
+ - net: sctp: fix information leaks in ulpevent layer
+ - dns_resolver: assure that dns_query() result is null-terminated
+ - dns_resolver: Null-terminate the right string
+ - rtnetlink: fix userspace API breakage for iproute2 < v3.9.0
+ (regression in 3.2.45)
+ - netfilter: ipt_ULOG: fix info leaks
+ - xfs: fix allocbt cursor leak in xfs_alloc_ag_vextent_near
+ - xfs: really fix the cursor leak in xfs_alloc_ag_vextent_near
+ - shmem: fix faulting into a hole, not taking i_mutex (CVE-2014-4171)
+ - shmem: fix splicing from a hole while it's punched (CVE-2014-4171)
+ - [x86] x86-32, espfix: Remove filter for espfix32 due to race
+ - sym53c8xx_2: Set DID_REQUEUE return code when aborting squeue
+ - mm: hugetlb: fix copy_hugetlb_page_range() (regression in 3.2.61)
+ - [arm*] 7668/1: fix memset-related crashes caused by recent GCC (4.7.2)
+ optimizations
+ - [arm*] 7670/1: fix the memset fix
+ - ceph: fix overflow check in build_snap_context()
+ - libata: support the ata host which implements a queue depth less than 32
+ (regression in 3.2.59)
+ - libata: introduce ata_host->n_tags to avoid oops on SAS controllers
+ - [x86] x86_32, entry: Store badsys error code in %eax
+ (regression in 3.2.60-1)
+ http://www.kernel.org/pub/linux/kernel/v3.x/ChangeLog-3.2.63
+ - [x86] KVM: Inter-privilege level ret emulation is not implemeneted
+ - block: don't assume last put of shared tags is for the host
+ - debugfs: Fix corrupted loop in debugfs_remove_recursive
+ - mtd/ftl: fix the double free of the buffers allocated in build_maps()
+ - [x86] don't exclude low BIOS area when allocating address space for
+ non-PCI cards (regression in 2.6.37)
+ - scsi: handle flush errors properly
+ - hwmon: (smsc47m192) Fix temperature limit and vrm write operations
+ - staging: vt6655: Fix Warning on boot handle_irq_event_percpu.
+ - [mips,powerpc] bfa: Fix undefined bit shift on big-endian architectures
+ with 32-bit DMA address
+ - Drivers: scsi: storvsc: Implement a eh_timed_out handler
+ - iommu/vt-d: Exclude devices using RMRRs from IOMMU API domains
+ - net: sendmsg: fix NULL pointer dereference
+ - hwmon: (ads1015) Fix off-by-one for valid channel index checking
+ - [mips*] tlbex: Fix a missing statement for HUGETLB
+ - [mips*] Prevent user from setting FCSR cause bits
+ - md/raid1,raid10: always abort recover on write error.
+ - ext4: fix ext4_discard_allocated_blocks() if we can't allocate the pa
+ struct
+ - hwmon: (lm85) Fix various errors on attribute writes
+ - hwmon: (amc6821) Fix possible race condition bug
+ - crypto: af_alg - properly label AF_ALG socket
+ - mnt: Change the default remount atime from relatime to the existing value
+ - netlabel: fix a problem when setting bits below the previously lowest bit
+ - ALSA: virtuoso: Xonar DSX support (Closes: #721346)
+ - hwmon: (ads1015) Fix out-of-bounds array access
+ - [s390*] locking: Reenable optimistic spinning
+ - ring-buffer: Always reset iterator to reader page
+ - reiserfs: Fix use after free in journal teardown
+ - [powerpc*] mm: Use read barrier when creating real_pte
+ - Btrfs: fix csum tree corruption, duplicate and outdated checksums
+ - CIFS: Fix wrong directory attributes after rename
+ - md/raid6: avoid data corruption during recovery of double-degraded RAID6
+ - iommu/amd: Fix cleanup_domain for mass device removal
+ - pata_scc: propagate return value of scc_wait_after_reset
+ - kvm: iommu: fix the third parameter of kvm_iommu_put_pages
+ (CVE-2014-3601)
+ - [mips*/octeon] make get_system_type() thread-safe
+ - xhci: rework cycle bit checking for new dequeue pointers
+ (regression in 3.2.59)
+ - isofs: Fix unbounded recursion when processing relocated directories
+ (CVE-2014-5471, CVE-2014-5472)
+ - HID: logitech: perform bounds checking on device_id early enough
+ (CVE-2014-3182)
+ - USB: whiteheat: Added bounds checking for bulk command response
+ (CVE-2014-3183, CVE-2014-3184, CVE-2014-3185)
+ - HID: logitech-dj: prevent false errors to be shown
+ - ACPI / EC: Add support to disallow QR_EC to be issued when SCI_EVT isn't
+ set (regression in 3.2.62)
+ - HID: magicmouse: sanity check report size in raw_event() callback
+ (CVE-2014-3181)
+ - HID: picolcd: sanity check report size in raw_event() callback
+ (CVE-2014-3186)
+ - [armhf] 8128/1: abort: don't clear the exclusive monitors
+ - [armhf] 8129/1: errata: work around Cortex-A15 erratum 830321 using dummy
+ strex
+ - USB: serial: fix potential stack buffer overflow
+ - USB: serial: fix potential heap buffer overflow
+ - [mips*] Fix accessing to per-cpu data when flushing the cache
+ - inetpeer: get rid of ip_id_count
+ - ip: make IP identifiers less predictable
+ - tcp: Fix integer-overflows in TCP veno
+ - tcp: Fix integer-overflow in TCP vegas
+ - net: sctp: inherit auth_capable on INIT collisions (CVE-2014-5077)
+ - iovec: make sure the caller actually wants anything in
+ memcpy_fromiovecend
+ - sctp: fix possible seqlock seadlock in sctp_packet_transmit()
+ - [sparc] Fix argument sign extension for compat_sys_futex().
+ - [sparc] Handle 32-bit tasks properly in compute_effective_address().
+ - [sparc] Fix top-level fault handling bugs.
+ - [sparc] Don't bark so loudly about 32-bit tasks generating 64-bit fault
+ addresses.
+ - [sparc] Fix huge TSB mapping on pre-UltraSPARC-III cpus.
+ - [sparc] Add membar to Niagara2 memcpy code.
+ - [sparc] Do not insert non-valid PTEs into the TSB hash table.
+ - [sparc] arch/sparc/math-emu/math_32.c: drop stray break operator
+ - [amd64] Revert "x86-64, modify_ldt: Make support for 16-bit segments a
+ runtime option"
+ - [amd64] x86-64, espfix: Don't leak bits 31:16 of %esp returning to 16-bit
+ stack
+ - [amd64] x86_64/entry/xen: Do not invoke espfix64 on Xen
+ - [amd64] x86/espfix/xen: Fix allocation of pages for paravirt page tables
+
+ [ Ben Hutchings ]
+ * drm, agp: Update to 3.4.103:
+ - drm/radeon: only apply hdmi bpc pll flags when encoder mode is hdmi
+ - drm/radeon: fix typo in radeon_connector_is_dp12_capable()
+ - drm/radeon/atom: fix dithering on certain panels
+ - drm/vmwgfx: Fix incorrect write to read-only register v2:
+ - drm/radeon: stop poisoning the GART TLB
+ * nfsd: Fix ACL null pointer deref (thanks to Sergio Gelato)
+ (Closes: #754420)
+ * ext4: fix BUG_ON in mb_free_blocks() (regression in 3.2.63)
+ * udf: Avoid infinite loop when processing indirect ICBs (CVE-2014-6410)
+ * libceph: do not hard code max auth ticket len (CVE-2014-6416,
+ CVE-2014-6417, CVE-2014-6418)
+ * sp5100_tco: Reject SB8x0 chips (Closes: #726150)
+ * udeb: Add pata_rdc to pata-modules (Closes: #633128)
+
+ [ Cyril Brulebois ]
+ * udeb: Add virtio_scsi to virtio-modules (Closes: #756249).
+
+ -- Ben Hutchings <ben at decadent.org.uk> Sat, 27 Sep 2014 13:36:53 +0100
linux (3.2.60-1+deb7u3) wheezy-security; urgency=medium
Modified: dists/squeeze-backports/linux/debian/config/defines
==============================================================================
--- dists/squeeze-backports/linux/debian/config/defines Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/config/defines Sun Nov 2 01:27:57 2014 (r22019)
@@ -52,6 +52,7 @@
module:drivers/usb/serial/*
module:arch/s390/kvm/kvm
module:drivers/scsi/esp_scsi
+ module:drivers/char/tpm/tpm
# Only exported for tidspbridge, disabled for security reasons in 3.2.54
omap_dsp_get_mempool_base
# Should only be used by ioatdma
Modified: dists/squeeze-backports/linux/debian/config/s390/defines
==============================================================================
--- dists/squeeze-backports/linux/debian/config/s390/defines Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/config/s390/defines Sun Nov 2 01:27:57 2014 (r22019)
@@ -2,6 +2,8 @@
ignore-changes:
# Seems to be exported by mistake; no other architecture does
arch_pick_mmap_layout
+# Not used in any in-tree module, nor obviously used OOT either
+ lowcore_ptr
[base]
flavours:
Modified: dists/squeeze-backports/linux/debian/config/s390x/defines
==============================================================================
--- dists/squeeze-backports/linux/debian/config/s390x/defines Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/config/s390x/defines Sun Nov 2 01:27:57 2014 (r22019)
@@ -2,6 +2,8 @@
ignore-changes:
# Seems to be exported by mistake; no other architecture does
arch_pick_mmap_layout
+# Not used in any in-tree module, nor obviously used OOT either
+ lowcore_ptr
[base]
flavours:
Modified: dists/squeeze-backports/linux/debian/installer/package-list
==============================================================================
--- dists/squeeze-backports/linux/debian/installer/package-list Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/installer/package-list Sun Nov 2 01:27:57 2014 (r22019)
@@ -414,7 +414,7 @@
This package contains RTC modules.
Package: virtio-modules
-Depends: kernel-image
+Depends: kernel-image, scsi-core-modules
Priority: extra
Description: virtio modules
This package contains virtio modules.
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/drivers-net-disable-ufo-through-virtio.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-disable-ufo-through-virtio.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/drivers-net-disable-ufo-through-virtio.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-disable-ufo-through-virtio.patch)
@@ -0,0 +1,176 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Thu, 23 Oct 2014 00:15:30 +0100
+Subject: drivers/net: Disable UFO through virtio
+Forwarded: http://patchwork.ozlabs.org/patch/403358/
+
+IPv6 does not allow fragmentation by routers, so there is no
+fragmentation ID in the fixed header. UFO for IPv6 requires the ID to
+be passed separately, but there is no provision for this in the virtio
+net protocol.
+
+Until recently our software implementation of UFO/IPv6 generated a new
+ID, but this was a bug. Now we will use ID=0 for any UFO/IPv6 packet
+passed through a tap, which is even worse.
+
+Unfortunately there is no distinction between UFO/IPv4 and v6
+features, so disable UFO on taps and virtio_net completely until we
+have a proper solution.
+
+We cannot depend on VM managers respecting the tap feature flags, so
+keep accepting UFO packets but log a warning the first time we do
+this.
+
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+Fixes: 916e4cf46d02 ("ipv6: reuse ip6_frag_id from ip6_ufo_append_data")
+---
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -576,6 +576,8 @@ static int macvtap_skb_from_vnet_hdr(str
+ gso_type = SKB_GSO_TCPV6;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
++ pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
++ current->comm);
+ gso_type = SKB_GSO_UDP;
+ break;
+ default:
+@@ -621,8 +623,6 @@ static int macvtap_skb_to_vnet_hdr(const
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+- else if (sinfo->gso_type & SKB_GSO_UDP)
+- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else
+ BUG();
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+@@ -960,7 +960,7 @@ static long macvtap_ioctl(struct file *f
+ case TUNSETOFFLOAD:
+ /* let the user check for future flags */
+ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+- TUN_F_TSO_ECN | TUN_F_UFO))
++ TUN_F_TSO_ECN))
+ return -EINVAL;
+
+ /* TODO: only accept frames with the features that
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -125,7 +125,7 @@ struct tun_struct {
+ struct net_device *dev;
+ u32 set_features;
+ #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
+- NETIF_F_TSO6|NETIF_F_UFO)
++ NETIF_F_TSO6)
+ struct fasync_struct *fasync;
+
+ struct tap_filter txflt;
+@@ -705,8 +705,17 @@ static ssize_t tun_get_user(struct tun_s
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
++ {
++ static bool warned;
++ if (!warned) {
++ warned = true;
++ netdev_warn(tun->dev,
++ "%s: using disabled UFO feature; please fix this program\n",
++ current->comm);
++ }
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ break;
++ }
+ default:
+ tun->dev->stats.rx_frame_errors++;
+ kfree_skb(skb);
+@@ -792,8 +801,6 @@ static ssize_t tun_put_user(struct tun_s
+ gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+- else if (sinfo->gso_type & SKB_GSO_UDP)
+- gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else {
+ pr_err("unexpected GSO type: "
+ "0x%x, gso_size %d, hdr_len %d\n",
+@@ -1217,11 +1224,6 @@ static int set_offload(struct tun_struct
+ features |= NETIF_F_TSO6;
+ arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
+ }
+-
+- if (arg & TUN_F_UFO) {
+- features |= NETIF_F_UFO;
+- arg &= ~TUN_F_UFO;
+- }
+ }
+
+ /* This gives the user a way to test for new features in future by
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -315,8 +315,16 @@ static void receive_buf(struct net_devic
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
++ {
++ static bool warned;
++ if (!warned) {
++ warned = true;
++ netdev_warn(dev,
++ "host using disabled UFO feature; please fix it\n");
++ }
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ break;
++ }
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ break;
+@@ -581,8 +589,6 @@ static int xmit_skb(struct virtnet_info
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+- else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else
+ BUG();
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
+@@ -986,7 +992,7 @@ static int virtnet_probe(struct virtio_d
+ dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+- dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
++ dev->hw_features |= NETIF_F_TSO
+ | NETIF_F_TSO_ECN | NETIF_F_TSO6;
+ }
+ /* Individual feature bits: what can host handle? */
+@@ -996,11 +1002,9 @@ static int virtnet_probe(struct virtio_d
+ dev->hw_features |= NETIF_F_TSO6;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+ dev->hw_features |= NETIF_F_TSO_ECN;
+- if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+- dev->hw_features |= NETIF_F_UFO;
+
+ if (gso)
+- dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
++ dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ /* (!csum && gso) case will be fixed by register_netdev() */
+ }
+
+@@ -1029,8 +1033,7 @@ static int virtnet_probe(struct virtio_d
+ /* If we can receive ANY GSO packets, we must allocate large ones. */
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
++ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ vi->big_packets = true;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+@@ -1147,9 +1150,9 @@ static struct virtio_device_id id_table[
+ static unsigned int features[] = {
+ VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
+ VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
+- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
++ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
+ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+- VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
++ VIRTIO_NET_F_GUEST_ECN,
+ VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
+ VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+ };
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch)
@@ -0,0 +1,121 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Sun, 26 Oct 2014 22:59:29 +0000
+Subject: drivers/net,ipv6: Select IPv6 fragment idents for virtio UFO packets
+Forwarded: http://patchwork.ozlabs.org/patch/403359/
+
+UFO is now disabled on all drivers that work with virtio net headers,
+but userland may try to send UFO/IPv6 packets anyway. Instead of
+sending with ID=0, we should select identifiers on their behalf (as we
+used to).
+
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+Fixes: 916e4cf46d02 ("ipv6: reuse ip6_frag_id from ip6_ufo_append_data")
+---
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -15,6 +15,7 @@
+ #include <linux/cdev.h>
+ #include <linux/fs.h>
+
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/rtnetlink.h>
+ #include <net/sock.h>
+@@ -579,6 +580,8 @@ static int macvtap_skb_from_vnet_hdr(str
+ pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
+ current->comm);
+ gso_type = SKB_GSO_UDP;
++ if (skb->protocol == htons(ETH_P_IPV6))
++ ipv6_proxy_select_ident(skb);
+ break;
+ default:
+ return -EINVAL;
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -64,6 +64,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
+ #include <linux/rcupdate.h>
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
+@@ -695,6 +696,8 @@ static ssize_t tun_get_user(struct tun_s
+ break;
+ }
+
++ skb_reset_network_header(skb);
++
+ if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ pr_debug("GSO!\n");
+ switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+@@ -714,6 +717,8 @@ static ssize_t tun_get_user(struct tun_s
+ current->comm);
+ }
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
++ if (skb->protocol == htons(ETH_P_IPV6))
++ ipv6_proxy_select_ident(skb);
+ break;
+ }
+ default:
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -481,6 +481,7 @@ static inline int ipv6_addr_diff(const s
+ }
+
+ extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
++void ipv6_proxy_select_ident(struct sk_buff *skb);
+
+ /*
+ * Prototypes exported by ipv6
+--- /dev/null
++++ b/net/ipv6/output_core.c
+@@ -0,0 +1,38 @@
++#include <linux/export.h>
++#include <linux/skbuff.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++
++/* This function exists only for tap drivers that must support broken
++ * clients requesting UFO without specifying an IPv6 fragment ID.
++ *
++ * This is similar to ipv6_select_ident() but we use an independent hash
++ * seed to limit information leakage.
++ */
++void ipv6_proxy_select_ident(struct sk_buff *skb)
++{
++ static u32 ip6_proxy_idents_hashrnd __read_mostly;
++ static bool hashrnd_initialized = false;
++ struct in6_addr buf[2];
++ struct in6_addr *addrs;
++ u32 hash, id;
++
++ addrs = skb_header_pointer(skb,
++ skb_network_offset(skb) +
++ offsetof(struct ipv6hdr, saddr),
++ sizeof(buf), buf);
++ if (!addrs)
++ return;
++
++ if (unlikely(!hashrnd_initialized)) {
++ hashrnd_initialized = true;
++ get_random_bytes(&ip6_proxy_idents_hashrnd,
++ sizeof(ip6_proxy_idents_hashrnd));
++ }
++ hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
++ hash = __ipv6_addr_jhash(&addrs[0], hash);
++
++ id = ip_idents_reserve(hash, 1);
++ skb_shinfo(skb)->ip6_frag_id = htonl(id);
++}
++EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+--- a/net/ipv6/Makefile
++++ b/net/ipv6/Makefile
+@@ -37,6 +37,6 @@ obj-$(CONFIG_NETFILTER) += netfilter/
+ obj-$(CONFIG_IPV6_SIT) += sit.o
+ obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
+
+-obj-y += addrconf_core.o exthdrs_core.o
++obj-y += addrconf_core.o exthdrs_core.o output_core.o
+
+ obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/ext4-fix-BUG_ON-in-mb_free_blocks.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/ext4-fix-BUG_ON-in-mb_free_blocks.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/ext4-fix-BUG_ON-in-mb_free_blocks.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/ext4-fix-BUG_ON-in-mb_free_blocks.patch)
@@ -0,0 +1,58 @@
+From: Theodore Ts'o <tytso at mit.edu>
+Date: Sat, 23 Aug 2014 17:47:28 -0400
+Subject: ext4: fix BUG_ON in mb_free_blocks()
+Origin: https://git.kernel.org/linus/c99d1e6e83b06744c75d9f5e491ed495a7086b7b
+
+If we suffer a block allocation failure (for example due to a memory
+allocation failure), it's possible that we will call
+ext4_discard_allocated_blocks() before we've actually allocated any
+blocks. In that case, fe_len and fe_start in ac->ac_f_ex will still
+be zero, and this will result in mb_free_blocks(inode, e4b, 0, 0)
+triggering the BUG_ON on mb_free_blocks():
+
+ BUG_ON(last >= (sb->s_blocksize << 3));
+
+Fix this by bailing out of ext4_discard_allocated_blocks() if fs_len
+is zero.
+
+Also fix a missing ext4_mb_unload_buddy() call in
+ext4_discard_allocated_blocks().
+
+Google-Bug-Id: 16844242
+
+Fixes: 86f0afd463215fc3e58020493482faa4ac3a4d69
+Signed-off-by: Theodore Ts'o <tytso at mit.edu>
+Cc: stable at vger.kernel.org
+[bwh: Backported to 3.2: adjust context]
+---
+ fs/ext4/mballoc.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1312,6 +1312,8 @@ static void mb_free_blocks(struct inode
+ void *buddy2;
+ struct super_block *sb = e4b->bd_sb;
+
++ if (WARN_ON(count == 0))
++ return;
+ BUG_ON(first + count > (sb->s_blocksize << 3));
+ assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
+ mb_check_buddy(e4b);
+@@ -3132,6 +3134,8 @@ static void ext4_discard_allocated_block
+ int err;
+
+ if (pa == NULL) {
++ if (ac->ac_f_ex.fe_len == 0)
++ return;
+ err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
+ if (err) {
+ /*
+@@ -3146,6 +3150,7 @@ static void ext4_discard_allocated_block
+ mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
+ ac->ac_f_ex.fe_len);
+ ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
++ ext4_mb_unload_buddy(&e4b);
+ return;
+ }
+ if (pa->pa_type == MB_INODE_PA)
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch)
@@ -0,0 +1,34 @@
+From: Hannes Frederic Sowa <hannes at stressinduktion.org>
+Date: Fri, 21 Feb 2014 02:55:35 +0100
+Subject: ipv6: reuse ip6_frag_id from ip6_ufo_append_data
+Origin: https://git.kernel.org/linus/916e4cf46d0204806c062c8c6c4d1f633852c5b6
+
+Currently we generate a new fragmentation id on UFO segmentation. It
+is pretty hairy to identify the correct net namespace and dst there.
+Especially tunnels use IFF_XMIT_DST_RELEASE and thus have no skb_dst
+available at all.
+
+This causes unreliable or very predictable ipv6 fragmentation id
+generation while segmentation.
+
+Luckily we already have pregenerated the ip6_frag_id in
+ip6_ufo_append_data and can use it here.
+
+Signed-off-by: Hannes Frederic Sowa <hannes at stressinduktion.org>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Backported to 3.2: adjust filename, indentation]
+---
+ net/ipv6/udp_offload.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1362,7 +1362,7 @@ static struct sk_buff *udp6_ufo_fragment
+ fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ fptr->nexthdr = nexthdr;
+ fptr->reserved = 0;
+- ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
++ fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+
+ /* Fragment the skb. ipv6 header and the remaining fields of the
+ * fragment header are updated in ipv6_gso_segment()
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/libceph-add-process_one_ticket-helper.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/libceph-add-process_one_ticket-helper.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/libceph-add-process_one_ticket-helper.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/libceph-add-process_one_ticket-helper.patch)
@@ -0,0 +1,275 @@
+From: Ilya Dryomov <ilya.dryomov at inktank.com>
+Date: Mon, 8 Sep 2014 17:25:34 +0400
+Subject: libceph: add process_one_ticket() helper
+Origin: https://git.kernel.org/linus/597cda357716a3cf8d994cb11927af917c8d71fa
+
+Add a helper for processing individual cephx auth tickets. Needed for
+the next commit, which deals with allocating ticket buffers. (Most of
+the diff here is whitespace - view with git diff -b).
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Ilya Dryomov <ilya.dryomov at inktank.com>
+Reviewed-by: Sage Weil <sage at redhat.com>
+---
+ net/ceph/auth_x.c | 228 +++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 124 insertions(+), 104 deletions(-)
+
+diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
+index 96238ba..0eb146d 100644
+--- a/net/ceph/auth_x.c
++++ b/net/ceph/auth_x.c
+@@ -129,17 +129,131 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
+ kfree(th);
+ }
+
++static int process_one_ticket(struct ceph_auth_client *ac,
++ struct ceph_crypto_key *secret,
++ void **p, void *end,
++ void *dbuf, void *ticket_buf)
++{
++ struct ceph_x_info *xi = ac->private;
++ int type;
++ u8 tkt_struct_v, blob_struct_v;
++ struct ceph_x_ticket_handler *th;
++ void *dp, *dend;
++ int dlen;
++ char is_enc;
++ struct timespec validity;
++ struct ceph_crypto_key old_key;
++ void *tp, *tpend;
++ struct ceph_timespec new_validity;
++ struct ceph_crypto_key new_session_key;
++ struct ceph_buffer *new_ticket_blob;
++ unsigned long new_expires, new_renew_after;
++ u64 new_secret_id;
++ int ret;
++
++ ceph_decode_need(p, end, sizeof(u32) + 1, bad);
++
++ type = ceph_decode_32(p);
++ dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
++
++ tkt_struct_v = ceph_decode_8(p);
++ if (tkt_struct_v != 1)
++ goto bad;
++
++ th = get_ticket_handler(ac, type);
++ if (IS_ERR(th)) {
++ ret = PTR_ERR(th);
++ goto out;
++ }
++
++ /* blob for me */
++ dlen = ceph_x_decrypt(secret, p, end, dbuf,
++ TEMP_TICKET_BUF_LEN);
++ if (dlen <= 0) {
++ ret = dlen;
++ goto out;
++ }
++ dout(" decrypted %d bytes\n", dlen);
++ dp = dbuf;
++ dend = dp + dlen;
++
++ tkt_struct_v = ceph_decode_8(&dp);
++ if (tkt_struct_v != 1)
++ goto bad;
++
++ memcpy(&old_key, &th->session_key, sizeof(old_key));
++ ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
++ if (ret)
++ goto out;
++
++ ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
++ ceph_decode_timespec(&validity, &new_validity);
++ new_expires = get_seconds() + validity.tv_sec;
++ new_renew_after = new_expires - (validity.tv_sec / 4);
++ dout(" expires=%lu renew_after=%lu\n", new_expires,
++ new_renew_after);
++
++ /* ticket blob for service */
++ ceph_decode_8_safe(p, end, is_enc, bad);
++ tp = ticket_buf;
++ if (is_enc) {
++ /* encrypted */
++ dout(" encrypted ticket\n");
++ dlen = ceph_x_decrypt(&old_key, p, end, ticket_buf,
++ TEMP_TICKET_BUF_LEN);
++ if (dlen < 0) {
++ ret = dlen;
++ goto out;
++ }
++ dlen = ceph_decode_32(&tp);
++ } else {
++ /* unencrypted */
++ ceph_decode_32_safe(p, end, dlen, bad);
++ ceph_decode_need(p, end, dlen, bad);
++ ceph_decode_copy(p, ticket_buf, dlen);
++ }
++ tpend = tp + dlen;
++ dout(" ticket blob is %d bytes\n", dlen);
++ ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
++ blob_struct_v = ceph_decode_8(&tp);
++ new_secret_id = ceph_decode_64(&tp);
++ ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
++ if (ret)
++ goto out;
++
++ /* all is well, update our ticket */
++ ceph_crypto_key_destroy(&th->session_key);
++ if (th->ticket_blob)
++ ceph_buffer_put(th->ticket_blob);
++ th->session_key = new_session_key;
++ th->ticket_blob = new_ticket_blob;
++ th->validity = new_validity;
++ th->secret_id = new_secret_id;
++ th->expires = new_expires;
++ th->renew_after = new_renew_after;
++ dout(" got ticket service %d (%s) secret_id %lld len %d\n",
++ type, ceph_entity_type_name(type), th->secret_id,
++ (int)th->ticket_blob->vec.iov_len);
++ xi->have_keys |= th->service;
++
++out:
++ return ret;
++
++bad:
++ ret = -EINVAL;
++ goto out;
++}
++
+ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+ struct ceph_crypto_key *secret,
+ void *buf, void *end)
+ {
+- struct ceph_x_info *xi = ac->private;
+- int num;
+ void *p = buf;
+- int ret;
+ char *dbuf;
+ char *ticket_buf;
+ u8 reply_struct_v;
++ u32 num;
++ int ret;
+
+ dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
+ if (!dbuf)
+@@ -150,112 +264,18 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+ if (!ticket_buf)
+ goto out_dbuf;
+
+- ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
+- reply_struct_v = ceph_decode_8(&p);
++ ceph_decode_8_safe(&p, end, reply_struct_v, bad);
+ if (reply_struct_v != 1)
+- goto bad;
+- num = ceph_decode_32(&p);
+- dout("%d tickets\n", num);
+- while (num--) {
+- int type;
+- u8 tkt_struct_v, blob_struct_v;
+- struct ceph_x_ticket_handler *th;
+- void *dp, *dend;
+- int dlen;
+- char is_enc;
+- struct timespec validity;
+- struct ceph_crypto_key old_key;
+- void *tp, *tpend;
+- struct ceph_timespec new_validity;
+- struct ceph_crypto_key new_session_key;
+- struct ceph_buffer *new_ticket_blob;
+- unsigned long new_expires, new_renew_after;
+- u64 new_secret_id;
+-
+- ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
+-
+- type = ceph_decode_32(&p);
+- dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
+-
+- tkt_struct_v = ceph_decode_8(&p);
+- if (tkt_struct_v != 1)
+- goto bad;
+-
+- th = get_ticket_handler(ac, type);
+- if (IS_ERR(th)) {
+- ret = PTR_ERR(th);
+- goto out;
+- }
+-
+- /* blob for me */
+- dlen = ceph_x_decrypt(secret, &p, end, dbuf,
+- TEMP_TICKET_BUF_LEN);
+- if (dlen <= 0) {
+- ret = dlen;
+- goto out;
+- }
+- dout(" decrypted %d bytes\n", dlen);
+- dend = dbuf + dlen;
+- dp = dbuf;
+-
+- tkt_struct_v = ceph_decode_8(&dp);
+- if (tkt_struct_v != 1)
+- goto bad;
++ return -EINVAL;
+
+- memcpy(&old_key, &th->session_key, sizeof(old_key));
+- ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+- if (ret)
+- goto out;
++ ceph_decode_32_safe(&p, end, num, bad);
++ dout("%d tickets\n", num);
+
+- ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+- ceph_decode_timespec(&validity, &new_validity);
+- new_expires = get_seconds() + validity.tv_sec;
+- new_renew_after = new_expires - (validity.tv_sec / 4);
+- dout(" expires=%lu renew_after=%lu\n", new_expires,
+- new_renew_after);
+-
+- /* ticket blob for service */
+- ceph_decode_8_safe(&p, end, is_enc, bad);
+- tp = ticket_buf;
+- if (is_enc) {
+- /* encrypted */
+- dout(" encrypted ticket\n");
+- dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
+- TEMP_TICKET_BUF_LEN);
+- if (dlen < 0) {
+- ret = dlen;
+- goto out;
+- }
+- dlen = ceph_decode_32(&tp);
+- } else {
+- /* unencrypted */
+- ceph_decode_32_safe(&p, end, dlen, bad);
+- ceph_decode_need(&p, end, dlen, bad);
+- ceph_decode_copy(&p, ticket_buf, dlen);
+- }
+- tpend = tp + dlen;
+- dout(" ticket blob is %d bytes\n", dlen);
+- ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+- blob_struct_v = ceph_decode_8(&tp);
+- new_secret_id = ceph_decode_64(&tp);
+- ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
++ while (num--) {
++ ret = process_one_ticket(ac, secret, &p, end,
++ dbuf, ticket_buf);
+ if (ret)
+ goto out;
+-
+- /* all is well, update our ticket */
+- ceph_crypto_key_destroy(&th->session_key);
+- if (th->ticket_blob)
+- ceph_buffer_put(th->ticket_blob);
+- th->session_key = new_session_key;
+- th->ticket_blob = new_ticket_blob;
+- th->validity = new_validity;
+- th->secret_id = new_secret_id;
+- th->expires = new_expires;
+- th->renew_after = new_renew_after;
+- dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+- type, ceph_entity_type_name(type), th->secret_id,
+- (int)th->ticket_blob->vec.iov_len);
+- xi->have_keys |= th->service;
+ }
+
+ ret = 0;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/libceph-do-not-hard-code-max-auth-ticket-len.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/libceph-do-not-hard-code-max-auth-ticket-len.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/libceph-do-not-hard-code-max-auth-ticket-len.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/libceph-do-not-hard-code-max-auth-ticket-len.patch)
@@ -0,0 +1,193 @@
+From: Ilya Dryomov <ilya.dryomov at inktank.com>
+Date: Tue, 9 Sep 2014 19:39:15 +0400
+Subject: libceph: do not hard code max auth ticket len
+Origin: https://git.kernel.org/linus/c27a3e4d667fdcad3db7b104f75659478e0c68d8
+
+We hard code cephx auth ticket buffer size to 256 bytes. This isn't
+enough for any moderate setups and, in case tickets themselves are not
+encrypted, leads to buffer overflows (ceph_x_decrypt() errors out, but
+ceph_decode_copy() doesn't - it's just a memcpy() wrapper). Since the
+buffer is allocated dynamically anyway, allocated it a bit later, at
+the point where we know how much is going to be needed.
+
+Fixes: http://tracker.ceph.com/issues/8979
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Ilya Dryomov <ilya.dryomov at inktank.com>
+Reviewed-by: Sage Weil <sage at redhat.com>
+---
+ net/ceph/auth_x.c | 64 +++++++++++++++++++++++++------------------------------
+ 1 file changed, 29 insertions(+), 35 deletions(-)
+
+--- a/net/ceph/auth_x.c
++++ b/net/ceph/auth_x.c
+@@ -13,8 +13,6 @@
+ #include "auth_x.h"
+ #include "auth_x_protocol.h"
+
+-#define TEMP_TICKET_BUF_LEN 256
+-
+ static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
+
+ static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
+@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_cr
+ }
+
+ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
+- void **p, void *end, void *obuf, size_t olen)
++ void **p, void *end, void **obuf, size_t olen)
+ {
+ struct ceph_x_encrypt_header head;
+ size_t head_len = sizeof(head);
+@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_cr
+ return -EINVAL;
+
+ dout("ceph_x_decrypt len %d\n", len);
+- ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
+- *p, len);
++ if (*obuf == NULL) {
++ *obuf = kmalloc(len, GFP_NOFS);
++ if (!*obuf)
++ return -ENOMEM;
++ olen = len;
++ }
++
++ ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
+ if (ret)
+ return ret;
+ if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
+@@ -131,18 +135,19 @@ static void remove_ticket_handler(struct
+
+ static int process_one_ticket(struct ceph_auth_client *ac,
+ struct ceph_crypto_key *secret,
+- void **p, void *end,
+- void *dbuf, void *ticket_buf)
++ void **p, void *end)
+ {
+ struct ceph_x_info *xi = ac->private;
+ int type;
+ u8 tkt_struct_v, blob_struct_v;
+ struct ceph_x_ticket_handler *th;
++ void *dbuf = NULL;
+ void *dp, *dend;
+ int dlen;
+ char is_enc;
+ struct timespec validity;
+ struct ceph_crypto_key old_key;
++ void *ticket_buf = NULL;
+ void *tp, *tpend;
+ struct ceph_timespec new_validity;
+ struct ceph_crypto_key new_session_key;
+@@ -167,8 +172,7 @@ static int process_one_ticket(struct cep
+ }
+
+ /* blob for me */
+- dlen = ceph_x_decrypt(secret, p, end, dbuf,
+- TEMP_TICKET_BUF_LEN);
++ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
+ if (dlen <= 0) {
+ ret = dlen;
+ goto out;
+@@ -195,20 +199,25 @@ static int process_one_ticket(struct cep
+
+ /* ticket blob for service */
+ ceph_decode_8_safe(p, end, is_enc, bad);
+- tp = ticket_buf;
+ if (is_enc) {
+ /* encrypted */
+ dout(" encrypted ticket\n");
+- dlen = ceph_x_decrypt(&old_key, p, end, ticket_buf,
+- TEMP_TICKET_BUF_LEN);
++ dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out;
+ }
++ tp = ticket_buf;
+ dlen = ceph_decode_32(&tp);
+ } else {
+ /* unencrypted */
+ ceph_decode_32_safe(p, end, dlen, bad);
++ ticket_buf = kmalloc(dlen, GFP_NOFS);
++ if (!ticket_buf) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ tp = ticket_buf;
+ ceph_decode_need(p, end, dlen, bad);
+ ceph_decode_copy(p, ticket_buf, dlen);
+ }
+@@ -237,6 +246,8 @@ static int process_one_ticket(struct cep
+ xi->have_keys |= th->service;
+
+ out:
++ kfree(ticket_buf);
++ kfree(dbuf);
+ return ret;
+
+ bad:
+@@ -249,21 +260,10 @@ static int ceph_x_proc_ticket_reply(stru
+ void *buf, void *end)
+ {
+ void *p = buf;
+- char *dbuf;
+- char *ticket_buf;
+ u8 reply_struct_v;
+ u32 num;
+ int ret;
+
+- dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
+- if (!dbuf)
+- return -ENOMEM;
+-
+- ret = -ENOMEM;
+- ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
+- if (!ticket_buf)
+- goto out_dbuf;
+-
+ ceph_decode_8_safe(&p, end, reply_struct_v, bad);
+ if (reply_struct_v != 1)
+ return -EINVAL;
+@@ -272,22 +272,15 @@ static int ceph_x_proc_ticket_reply(stru
+ dout("%d tickets\n", num);
+
+ while (num--) {
+- ret = process_one_ticket(ac, secret, &p, end,
+- dbuf, ticket_buf);
++ ret = process_one_ticket(ac, secret, &p, end);
+ if (ret)
+- goto out;
++ return ret;
+ }
+
+- ret = 0;
+-out:
+- kfree(ticket_buf);
+-out_dbuf:
+- kfree(dbuf);
+- return ret;
++ return 0;
+
+ bad:
+- ret = -EINVAL;
+- goto out;
++ return -EINVAL;
+ }
+
+ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
+@@ -583,13 +576,14 @@ static int ceph_x_verify_authorizer_repl
+ struct ceph_x_ticket_handler *th;
+ int ret = 0;
+ struct ceph_x_authorize_reply reply;
++ void *preply = &reply;
+ void *p = au->reply_buf;
+ void *end = p + sizeof(au->reply_buf);
+
+ th = get_ticket_handler(ac, au->service);
+ if (IS_ERR(th))
+ return PTR_ERR(th);
+- ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
++ ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(reply))
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch)
@@ -0,0 +1,87 @@
+From: Daniel Borkmann <dborkman at redhat.com>
+Date: Thu, 9 Oct 2014 22:55:32 +0200
+Subject: net: sctp: fix panic on duplicate ASCONF chunks
+Origin: https://git.kernel.org/linus/b69040d8e39f20d5215a03502a8e8b4c6ab78395
+
+When receiving a e.g. semi-good formed connection scan in the
+form of ...
+
+ -------------- INIT[ASCONF; ASCONF_ACK] ------------->
+ <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
+ -------------------- COOKIE-ECHO -------------------->
+ <-------------------- COOKIE-ACK ---------------------
+ ---------------- ASCONF_a; ASCONF_b ----------------->
+
+... where ASCONF_a equals ASCONF_b chunk (at least both serials
+need to be equal), we panic an SCTP server!
+
+The problem is that good-formed ASCONF chunks that we reply with
+ASCONF_ACK chunks are cached per serial. Thus, when we receive a
+same ASCONF chunk twice (e.g. through a lost ASCONF_ACK), we do
+not need to process them again on the server side (that was the
+idea, also proposed in the RFC). Instead, we know it was cached
+and we just resend the cached chunk instead. So far, so good.
+
+Where things get nasty is in SCTP's side effect interpreter, that
+is, sctp_cmd_interpreter():
+
+While incoming ASCONF_a (chunk = event_arg) is being marked
+!end_of_packet and !singleton, and we have an association context,
+we do not flush the outqueue the first time after processing the
+ASCONF_ACK singleton chunk via SCTP_CMD_REPLY. Instead, we keep it
+queued up, although we set local_cork to 1. Commit 2e3216cd54b1
+changed the precedence, so that as long as we get bundled, incoming
+chunks we try possible bundling on outgoing queue as well. Before
+this commit, we would just flush the output queue.
+
+Now, while ASCONF_a's ASCONF_ACK sits in the corked outq, we
+continue to process the same ASCONF_b chunk from the packet. As
+we have cached the previous ASCONF_ACK, we find it, grab it and
+do another SCTP_CMD_REPLY command on it. So, effectively, we rip
+the chunk->list pointers and requeue the same ASCONF_ACK chunk
+another time. Since we process ASCONF_b, it's correctly marked
+with end_of_packet and we enforce an uncork, and thus flush, thus
+crashing the kernel.
+
+Fix it by testing if the ASCONF_ACK is currently pending and if
+that is the case, do not requeue it. When flushing the output
+queue we may relink the chunk for preparing an outgoing packet,
+but eventually unlink it when it's copied into the skb right
+before transmission.
+
+Joint work with Vlad Yasevich.
+
+Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet")
+Signed-off-by: Daniel Borkmann <dborkman at redhat.com>
+Signed-off-by: Vlad Yasevich <vyasevich at gmail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ include/net/sctp/sctp.h | 5 +++++
+ net/sctp/associola.c | 2 ++
+ 2 files changed, 7 insertions(+)
+
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -524,6 +524,11 @@ static inline void sctp_assoc_pending_pm
+ asoc->pmtu_pending = 0;
+ }
+
++static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
++{
++ return !list_empty(&chunk->list);
++}
++
+ /* Walk through a list of TLV parameters. Don't trust the
+ * individual parameter lengths and instead depend on
+ * the chunk length to indicate when to stop. Make sure
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1638,6 +1638,8 @@ struct sctp_chunk *sctp_assoc_lookup_asc
+ * ack chunk whose serial number matches that of the request.
+ */
+ list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
++ if (sctp_chunk_pending(ack))
++ continue;
+ if (ack->subh.addip_hdr->serial == serial) {
+ sctp_chunk_hold(ack);
+ return ack;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch)
@@ -0,0 +1,145 @@
+From: Daniel Borkmann <dborkman at redhat.com>
+Date: Thu, 9 Oct 2014 22:55:33 +0200
+Subject: net: sctp: fix remote memory pressure from excessive queueing
+Origin: https://git.kernel.org/linus/26b87c7881006311828bb0ab271a551a62dcceb4
+
+This scenario is not limited to ASCONF, just taken as one
+example triggering the issue. When receiving ASCONF probes
+in the form of ...
+
+ -------------- INIT[ASCONF; ASCONF_ACK] ------------->
+ <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
+ -------------------- COOKIE-ECHO -------------------->
+ <-------------------- COOKIE-ACK ---------------------
+ ---- ASCONF_a; [ASCONF_b; ...; ASCONF_n;] JUNK ------>
+ [...]
+ ---- ASCONF_m; [ASCONF_o; ...; ASCONF_z;] JUNK ------>
+
+... where ASCONF_a, ASCONF_b, ..., ASCONF_z are good-formed
+ASCONFs and have increasing serial numbers, we process such
+ASCONF chunk(s) marked with !end_of_packet and !singleton,
+since we have not yet reached the SCTP packet end. SCTP does
+only do verification on a chunk by chunk basis, as an SCTP
+packet is nothing more than just a container of a stream of
+chunks which it eats up one by one.
+
+We could run into the case that we receive a packet with a
+malformed tail, above marked as trailing JUNK. All previous
+chunks are here goodformed, so the stack will eat up all
+previous chunks up to this point. In case JUNK does not fit
+into a chunk header and there are no more other chunks in
+the input queue, or in case JUNK contains a garbage chunk
+header, but the encoded chunk length would exceed the skb
+tail, or we came here from an entirely different scenario
+and the chunk has pdiscard=1 mark (without having had a flush
+point), it will happen, that we will excessively queue up
+the association's output queue (a correct final chunk may
+then turn it into a response flood when flushing the
+queue ;)): I ran a simple script with incremental ASCONF
+serial numbers and could see the server side consuming
+excessive amount of RAM [before/after: up to 2GB and more].
+
+The issue at heart is that the chunk train basically ends
+with !end_of_packet and !singleton markers and since commit
+2e3216cd54b1 ("sctp: Follow security requirement of responding
+with 1 packet") therefore preventing an output queue flush
+point in sctp_do_sm() -> sctp_cmd_interpreter() on the input
+chunk (chunk = event_arg) even though local_cork is set,
+but its precedence has changed since then. In the normal
+case, the last chunk with end_of_packet=1 would trigger the
+queue flush to accommodate possible outgoing bundling.
+
+In the input queue, sctp_inq_pop() seems to do the right thing
+in terms of discarding invalid chunks. So, above JUNK will
+not enter the state machine and instead be released and exit
+the sctp_assoc_bh_rcv() chunk processing loop. It's simply
+the flush point being missing at loop exit. Adding a try-flush
+approach on the output queue might not work as the underlying
+infrastructure might be long gone at this point due to the
+side-effect interpreter run.
+
+One possibility, albeit a bit of a kludge, would be to defer
+invalid chunk freeing into the state machine in order to
+possibly trigger packet discards and thus indirectly a queue
+flush on error. It would surely be better to discard chunks
+as in the current, perhaps better controlled environment, but
+going back and forth, it's simply architecturally not possible.
+I tried various trailing JUNK attack cases and it seems to
+look good now.
+
+Joint work with Vlad Yasevich.
+
+Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet")
+Signed-off-by: Daniel Borkmann <dborkman at redhat.com>
+Signed-off-by: Vlad Yasevich <vyasevich at gmail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ net/sctp/inqueue.c | 33 +++++++--------------------------
+ net/sctp/sm_statefuns.c | 3 +++
+ 2 files changed, 10 insertions(+), 26 deletions(-)
+
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -152,18 +152,9 @@ struct sctp_chunk *sctp_inq_pop(struct s
+ } else {
+ /* Nothing to do. Next chunk in the packet, please. */
+ ch = (sctp_chunkhdr_t *) chunk->chunk_end;
+-
+ /* Force chunk->skb->data to chunk->chunk_end. */
+- skb_pull(chunk->skb,
+- chunk->chunk_end - chunk->skb->data);
+-
+- /* Verify that we have at least chunk headers
+- * worth of buffer left.
+- */
+- if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
+- sctp_chunk_free(chunk);
+- chunk = queue->in_progress = NULL;
+- }
++ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
++ /* We are guaranteed to pull a SCTP header. */
+ }
+ }
+
+@@ -199,24 +190,14 @@ struct sctp_chunk *sctp_inq_pop(struct s
+ skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
+ chunk->subh.v = NULL; /* Subheader is no longer valid. */
+
+- if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
++ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
++ skb_tail_pointer(chunk->skb)) {
+ /* This is not a singleton */
+ chunk->singleton = 0;
+ } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
+- /* RFC 2960, Section 6.10 Bundling
+- *
+- * Partial chunks MUST NOT be placed in an SCTP packet.
+- * If the receiver detects a partial chunk, it MUST drop
+- * the chunk.
+- *
+- * Since the end of the chunk is past the end of our buffer
+- * (which contains the whole packet, we can freely discard
+- * the whole packet.
+- */
+- sctp_chunk_free(chunk);
+- chunk = queue->in_progress = NULL;
+-
+- return NULL;
++ /* Discard inside state machine. */
++ chunk->pdiscard = 1;
++ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+ } else {
+ /* We are at the end of the packet, so mark the chunk
+ * in case we need to send a SACK.
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -163,6 +163,9 @@ sctp_chunk_length_valid(struct sctp_chun
+ {
+ __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
+
++ /* Previously already marked? */
++ if (unlikely(chunk->pdiscard))
++ return 0;
+ if (unlikely(chunk_length < required_length))
+ return 0;
+
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch)
@@ -0,0 +1,333 @@
+From: Daniel Borkmann <dborkman at redhat.com>
+Date: Thu, 9 Oct 2014 22:55:31 +0200
+Subject: net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks
+Origin: https://git.kernel.org/linus/9de7922bc709eee2f609cd01d98aaedc4cf5ea74
+
+Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for
+ASCONF chunk") added basic verification of ASCONF chunks, however,
+it is still possible to remotely crash a server by sending a
+special crafted ASCONF chunk, even up to pre 2.6.12 kernels:
+
+skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768
+ head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950
+ end:0x440 dev:<NULL>
+ ------------[ cut here ]------------
+kernel BUG at net/core/skbuff.c:129!
+[...]
+Call Trace:
+ <IRQ>
+ [<ffffffff8144fb1c>] skb_put+0x5c/0x70
+ [<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp]
+ [<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp]
+ [<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20
+ [<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp]
+ [<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp]
+ [<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0
+ [<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp]
+ [<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp]
+ [<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp]
+ [<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp]
+ [<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter]
+ [<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0
+ [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
+ [<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120
+ [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
+ [<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0
+ [<ffffffff81497078>] ip_local_deliver+0x98/0xa0
+ [<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440
+ [<ffffffff81496ac5>] ip_rcv+0x275/0x350
+ [<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750
+ [<ffffffff81460588>] netif_receive_skb+0x58/0x60
+
+This can be triggered e.g., through a simple scripted nmap
+connection scan injecting the chunk after the handshake, for
+example, ...
+
+ -------------- INIT[ASCONF; ASCONF_ACK] ------------->
+ <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
+ -------------------- COOKIE-ECHO -------------------->
+ <-------------------- COOKIE-ACK ---------------------
+ ------------------ ASCONF; UNKNOWN ------------------>
+
+... where ASCONF chunk of length 280 contains 2 parameters ...
+
+ 1) Add IP address parameter (param length: 16)
+ 2) Add/del IP address parameter (param length: 255)
+
+... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the
+Address Parameter in the ASCONF chunk is even missing, too.
+This is just an example and similarly-crafted ASCONF chunks
+could be used just as well.
+
+The ASCONF chunk passes through sctp_verify_asconf() as all
+parameters passed sanity checks, and after walking, we ended
+up successfully at the chunk end boundary, and thus may invoke
+sctp_process_asconf(). Parameter walking is done with
+WORD_ROUND() to take padding into account.
+
+In sctp_process_asconf()'s TLV processing, we may fail in
+sctp_process_asconf_param() e.g., due to removal of the IP
+address that is also the source address of the packet containing
+the ASCONF chunk, and thus we need to add all TLVs after the
+failure to our ASCONF response to remote via helper function
+sctp_add_asconf_response(), which basically invokes a
+sctp_addto_chunk() adding the error parameters to the given
+skb.
+
+When walking to the next parameter this time, we proceed
+with ...
+
+ length = ntohs(asconf_param->param_hdr.length);
+ asconf_param = (void *)asconf_param + length;
+
+... instead of the WORD_ROUND()'ed length, thus resulting here
+in an off-by-one that leads to reading the follow-up garbage
+parameter length of 12336, and thus throwing an skb_over_panic
+for the reply when trying to sctp_addto_chunk() next time,
+which implicitly calls the skb_put() with that length.
+
+Fix it by using sctp_walk_params() [ which is also used in
+INIT parameter processing ] macro in the verification *and*
+in ASCONF processing: it will make sure we don't spill over,
+that we walk parameters WORD_ROUND()'ed. Moreover, we're being
+more defensive and guard against unknown parameter types and
+missized addresses.
+
+Joint work with Vlad Yasevich.
+
+Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.")
+Signed-off-by: Daniel Borkmann <dborkman at redhat.com>
+Signed-off-by: Vlad Yasevich <vyasevich at gmail.com>
+Acked-by: Neil Horman <nhorman at tuxdriver.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - sctp_sf_violation_paramlen() doesn't take a struct net * parameter]
+---
+ include/net/sctp/sm.h | 6 +--
+ net/sctp/sm_make_chunk.c | 99 +++++++++++++++++++++++++++---------------------
+ net/sctp/sm_statefuns.c | 18 +--------
+ 3 files changed, 60 insertions(+), 63 deletions(-)
+
+--- a/include/net/sctp/sm.h
++++ b/include/net/sctp/sm.h
+@@ -251,9 +251,9 @@ struct sctp_chunk *sctp_make_asconf_upda
+ int, __be16);
+ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
+ union sctp_addr *addr);
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+- struct sctp_paramhdr *param_hdr, void *chunk_end,
+- struct sctp_paramhdr **errp);
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++ struct sctp_chunk *chunk, bool addr_param_needed,
++ struct sctp_paramhdr **errp);
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf);
+ int sctp_process_asconf_ack(struct sctp_association *asoc,
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -3068,50 +3068,63 @@ static __be16 sctp_process_asconf_param(
+ return SCTP_ERROR_NO_ERROR;
+ }
+
+-/* Verify the ASCONF packet before we process it. */
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+- struct sctp_paramhdr *param_hdr, void *chunk_end,
+- struct sctp_paramhdr **errp) {
+- sctp_addip_param_t *asconf_param;
++/* Verify the ASCONF packet before we process it. */
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++ struct sctp_chunk *chunk, bool addr_param_needed,
++ struct sctp_paramhdr **errp)
++{
++ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
+ union sctp_params param;
+- int length, plen;
+-
+- param.v = (sctp_paramhdr_t *) param_hdr;
+- while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
+- length = ntohs(param.p->length);
+- *errp = param.p;
++ bool addr_param_seen = false;
+
+- if (param.v > chunk_end - length ||
+- length < sizeof(sctp_paramhdr_t))
+- return 0;
++ sctp_walk_params(param, addip, addip_hdr.params) {
++ size_t length = ntohs(param.p->length);
+
++ *errp = param.p;
+ switch (param.p->type) {
++ case SCTP_PARAM_ERR_CAUSE:
++ break;
++ case SCTP_PARAM_IPV4_ADDRESS:
++ if (length != sizeof(sctp_ipv4addr_param_t))
++ return false;
++ addr_param_seen = true;
++ break;
++ case SCTP_PARAM_IPV6_ADDRESS:
++ if (length != sizeof(sctp_ipv6addr_param_t))
++ return false;
++ addr_param_seen = true;
++ break;
+ case SCTP_PARAM_ADD_IP:
+ case SCTP_PARAM_DEL_IP:
+ case SCTP_PARAM_SET_PRIMARY:
+- asconf_param = (sctp_addip_param_t *)param.v;
+- plen = ntohs(asconf_param->param_hdr.length);
+- if (plen < sizeof(sctp_addip_param_t) +
+- sizeof(sctp_paramhdr_t))
+- return 0;
++ /* In ASCONF chunks, these need to be first. */
++ if (addr_param_needed && !addr_param_seen)
++ return false;
++ length = ntohs(param.addip->param_hdr.length);
++ if (length < sizeof(sctp_addip_param_t) +
++ sizeof(sctp_paramhdr_t))
++ return false;
+ break;
+ case SCTP_PARAM_SUCCESS_REPORT:
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ if (length != sizeof(sctp_addip_param_t))
+- return 0;
+-
++ return false;
+ break;
+ default:
+- break;
++ /* This is unkown to us, reject! */
++ return false;
+ }
+-
+- param.v += WORD_ROUND(length);
+ }
+
+- if (param.v != chunk_end)
+- return 0;
++ /* Remaining sanity checks. */
++ if (addr_param_needed && !addr_param_seen)
++ return false;
++ if (!addr_param_needed && addr_param_seen)
++ return false;
++ if (param.v != chunk->chunk_end)
++ return false;
+
+- return 1;
++ return true;
+ }
+
+ /* Process an incoming ASCONF chunk with the next expected serial no. and
+@@ -3120,16 +3133,17 @@ int sctp_verify_asconf(const struct sctp
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf)
+ {
++ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
++ bool all_param_pass = true;
++ union sctp_params param;
+ sctp_addiphdr_t *hdr;
+ union sctp_addr_param *addr_param;
+ sctp_addip_param_t *asconf_param;
+ struct sctp_chunk *asconf_ack;
+-
+ __be16 err_code;
+ int length = 0;
+ int chunk_len;
+ __u32 serial;
+- int all_param_pass = 1;
+
+ chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
+ hdr = (sctp_addiphdr_t *)asconf->skb->data;
+@@ -3157,9 +3171,14 @@ struct sctp_chunk *sctp_process_asconf(s
+ goto done;
+
+ /* Process the TLVs contained within the ASCONF chunk. */
+- while (chunk_len > 0) {
++ sctp_walk_params(param, addip, addip_hdr.params) {
++ /* Skip preceeding address parameters. */
++ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
++ param.p->type == SCTP_PARAM_IPV6_ADDRESS)
++ continue;
++
+ err_code = sctp_process_asconf_param(asoc, asconf,
+- asconf_param);
++ param.addip);
+ /* ADDIP 4.1 A7)
+ * If an error response is received for a TLV parameter,
+ * all TLVs with no response before the failed TLV are
+@@ -3167,28 +3186,20 @@ struct sctp_chunk *sctp_process_asconf(s
+ * the failed response are considered unsuccessful unless
+ * a specific success indication is present for the parameter.
+ */
+- if (SCTP_ERROR_NO_ERROR != err_code)
+- all_param_pass = 0;
+-
++ if (err_code != SCTP_ERROR_NO_ERROR)
++ all_param_pass = false;
+ if (!all_param_pass)
+- sctp_add_asconf_response(asconf_ack,
+- asconf_param->crr_id, err_code,
+- asconf_param);
++ sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
++ err_code, param.addip);
+
+ /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
+ * an IP address sends an 'Out of Resource' in its response, it
+ * MUST also fail any subsequent add or delete requests bundled
+ * in the ASCONF.
+ */
+- if (SCTP_ERROR_RSRC_LOW == err_code)
++ if (err_code == SCTP_ERROR_RSRC_LOW)
+ goto done;
+-
+- /* Move to the next ASCONF param. */
+- length = ntohs(asconf_param->param_hdr.length);
+- asconf_param = (void *)asconf_param + length;
+- chunk_len -= length;
+ }
+-
+ done:
+ asoc->peer.addip_serial++;
+
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -3516,9 +3516,7 @@ sctp_disposition_t sctp_sf_do_asconf(con
+ struct sctp_chunk *asconf_ack = NULL;
+ struct sctp_paramhdr *err_param = NULL;
+ sctp_addiphdr_t *hdr;
+- union sctp_addr_param *addr_param;
+ __u32 serial;
+- int length;
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+@@ -3543,17 +3541,8 @@ sctp_disposition_t sctp_sf_do_asconf(con
+ hdr = (sctp_addiphdr_t *)chunk->skb->data;
+ serial = ntohl(hdr->serial);
+
+- addr_param = (union sctp_addr_param *)hdr->params;
+- length = ntohs(addr_param->p.length);
+- if (length < sizeof(sctp_paramhdr_t))
+- return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+- (void *)addr_param, commands);
+-
+ /* Verify the ASCONF chunk before processing it. */
+- if (!sctp_verify_asconf(asoc,
+- (sctp_paramhdr_t *)((void *)addr_param + length),
+- (void *)chunk->chunk_end,
+- &err_param))
++ if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
+ return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+ (void *)err_param, commands);
+
+@@ -3670,10 +3659,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack
+ rcvd_serial = ntohl(addip_hdr->serial);
+
+ /* Verify the ASCONF-ACK chunk before processing it. */
+- if (!sctp_verify_asconf(asoc,
+- (sctp_paramhdr_t *)addip_hdr->params,
+- (void *)asconf_ack->chunk_end,
+- &err_param))
++ if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
+ return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+ (void *)err_param, commands);
+
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/nfsd-fix-acl-null-pointer-deref.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/nfsd-fix-acl-null-pointer-deref.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/nfsd-fix-acl-null-pointer-deref.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/nfsd-fix-acl-null-pointer-deref.patch)
@@ -0,0 +1,37 @@
+From: Sergio Gelato <Sergio.Gelato at astro.su.se>
+Date: Wed, 24 Sep 2014 08:47:24 +0200
+Subject: Re: NFS regression in 3.2.60
+Bug-Debian: https://bugs.debian.org/754420
+Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1348670
+Origin: http://mid.gmane.org/20140924064724.GB4025@ebisu.astro.su.se
+
+BugLink: http://bugs.launchpad.net/bugs/1348670
+
+Fix regression introduced in pre-3.14 kernels by cherry-picking
+aa07c713ecfc0522916f3cd57ac628ea6127c0ec
+(NFSD: Call ->set_acl with a NULL ACL structure if no entries).
+
+The affected code was removed in 3.14 by commit
+4ac7249ea5a0ceef9f8269f63f33cc873c3fac61
+(nfsd: use get_acl and ->set_acl).
+The ->set_acl methods are already able to cope with a NULL argument.
+
+Signed-off-by: Sergio Gelato <Sergio.Gelato at astro.su.se>
+---
+ fs/nfsd/vfs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 446dc01..fc208e4 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -508,6 +508,9 @@
+ char *buf = NULL;
+ int error = 0;
+
++ if (!pacl)
++ return vfs_setxattr(dentry, key, NULL, 0, 0);
++
+ buflen = posix_acl_xattr_size(pacl->a_count);
+ buf = kmalloc(buflen, GFP_KERNEL);
+ error = -ENOMEM;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/all/udf-Avoid-infinite-loop-when-processing-indirect-ICB.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/udf-Avoid-infinite-loop-when-processing-indirect-ICB.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/all/udf-Avoid-infinite-loop-when-processing-indirect-ICB.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/all/udf-Avoid-infinite-loop-when-processing-indirect-ICB.patch)
@@ -0,0 +1,85 @@
+From: Jan Kara <jack at suse.cz>
+Date: Thu, 4 Sep 2014 14:06:55 +0200
+Subject: udf: Avoid infinite loop when processing indirect ICBs
+Origin: https://git.kernel.org/linus/c03aa9f6e1f938618e6db2e23afef0574efeeb65
+
+We did not implement any bound on number of indirect ICBs we follow when
+loading inode. Thus corrupted medium could cause kernel to go into an
+infinite loop, possibly causing a stack overflow.
+
+Fix the possible stack overflow by removing recursion from
+__udf_read_inode() and limit number of indirect ICBs we follow to avoid
+infinite loops.
+
+Signed-off-by: Jan Kara <jack at suse.cz>
+[bwh: Backported to 3.2: adjust context]
+---
+ fs/udf/inode.c | 35 +++++++++++++++++++++--------------
+ 1 file changed, 21 insertions(+), 14 deletions(-)
+
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1176,13 +1176,22 @@ update_time:
+ return 0;
+ }
+
++/*
++ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
++ * arbitrary - just that we hopefully don't limit any real use of rewritten
++ * inode on write-once media but avoid looping for too long on corrupted media.
++ */
++#define UDF_MAX_ICB_NESTING 1024
++
+ static void __udf_read_inode(struct inode *inode)
+ {
+ struct buffer_head *bh = NULL;
+ struct fileEntry *fe;
+ uint16_t ident;
+ struct udf_inode_info *iinfo = UDF_I(inode);
++ unsigned int indirections = 0;
+
++reread:
+ /*
+ * Set defaults, but the inode is still incomplete!
+ * Note: get_new_inode() sets the following on a new inode:
+@@ -1219,28 +1228,26 @@ static void __udf_read_inode(struct inod
+ ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
+ &ident);
+ if (ident == TAG_IDENT_IE && ibh) {
+- struct buffer_head *nbh = NULL;
+ struct kernel_lb_addr loc;
+ struct indirectEntry *ie;
+
+ ie = (struct indirectEntry *)ibh->b_data;
+ loc = lelb_to_cpu(ie->indirectICB.extLocation);
+
+- if (ie->indirectICB.extLength &&
+- (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
+- &ident))) {
+- if (ident == TAG_IDENT_FE ||
+- ident == TAG_IDENT_EFE) {
+- memcpy(&iinfo->i_location,
+- &loc,
+- sizeof(struct kernel_lb_addr));
+- brelse(bh);
+- brelse(ibh);
+- brelse(nbh);
+- __udf_read_inode(inode);
++ if (ie->indirectICB.extLength) {
++ brelse(bh);
++ brelse(ibh);
++ memcpy(&iinfo->i_location, &loc,
++ sizeof(struct kernel_lb_addr));
++ if (++indirections > UDF_MAX_ICB_NESTING) {
++ udf_err(inode->i_sb,
++ "too many ICBs in ICB hierarchy"
++ " (max %d supported)\n",
++ UDF_MAX_ICB_NESTING);
++ make_bad_inode(inode);
+ return;
+ }
+- brelse(nbh);
++ goto reread;
+ }
+ }
+ brelse(ibh);
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch)
@@ -0,0 +1,137 @@
+From: Nadav Amit <namit at cs.technion.ac.il>
+Date: Tue, 16 Sep 2014 03:24:05 +0300
+Subject: KVM: x86: Check non-canonical addresses upon WRMSR
+Origin: https://git.kernel.org/linus/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
+
+Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
+written to certain MSRs. The behavior is "almost" identical for AMD and Intel
+(ignoring MSRs that are not implemented in either architecture since they would
+anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
+non-canonical address is written on Intel but not on AMD (which ignores the top
+32-bits).
+
+Accordingly, this patch injects a #GP on the MSRs which behave identically on
+Intel and AMD. To eliminate the differences between the architecutres, the
+value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
+canonical value before writing instead of injecting a #GP.
+
+Some references from Intel and AMD manuals:
+
+According to Intel SDM description of WRMSR instruction #GP is expected on
+WRMSR "If the source register contains a non-canonical address and ECX
+specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
+IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
+
+According to AMD manual instruction manual:
+LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
+LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
+form, a general-protection exception (#GP) occurs."
+IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
+base field must be in canonical form or a #GP fault will occur."
+IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
+be in canonical form."
+
+This patch fixes CVE-2014-3610.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Nadav Amit <namit at cs.technion.ac.il>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - The various set_msr() functions all separate msr_index and data parameters]
+---
+ arch/x86/include/asm/kvm_host.h | 14 ++++++++++++++
+ arch/x86/kvm/svm.c | 2 +-
+ arch/x86/kvm/vmx.c | 2 +-
+ arch/x86/kvm/x86.c | 27 ++++++++++++++++++++++++++-
+ 4 files changed, 42 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -821,6 +821,20 @@ static inline void kvm_inject_gp(struct
+ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+ }
+
++static inline u64 get_canonical(u64 la)
++{
++ return ((int64_t)la << 16) >> 16;
++}
++
++static inline bool is_noncanonical_address(u64 la)
++{
++#ifdef CONFIG_X86_64
++ return get_canonical(la) != la;
++#else
++ return false;
++#endif
++}
++
+ #define TSS_IOPB_BASE_OFFSET 0x66
+ #define TSS_BASE_SIZE 0x68
+ #define TSS_IOPB_SIZE (65536 / 8)
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3109,7 +3109,7 @@ static int wrmsr_interception(struct vcp
+
+
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
+- if (svm_set_msr(&svm->vcpu, ecx, data)) {
++ if (kvm_set_msr(&svm->vcpu, ecx, data)) {
+ trace_kvm_msr_write_ex(ecx, data);
+ kvm_inject_gp(&svm->vcpu, 0);
+ } else {
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4548,7 +4548,7 @@ static int handle_wrmsr(struct kvm_vcpu
+ u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
+ | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+
+- if (vmx_set_msr(vcpu, ecx, data) != 0) {
++ if (kvm_set_msr(vcpu, ecx, data) != 0) {
+ trace_kvm_msr_write_ex(ecx, data);
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -893,7 +893,6 @@ void kvm_enable_efer_bits(u64 mask)
+ }
+ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+
+-
+ /*
+ * Writes msr value into into the appropriate "register".
+ * Returns 0 on success, non-0 otherwise.
+@@ -901,8 +900,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+ */
+ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+ {
++ switch (msr_index) {
++ case MSR_FS_BASE:
++ case MSR_GS_BASE:
++ case MSR_KERNEL_GS_BASE:
++ case MSR_CSTAR:
++ case MSR_LSTAR:
++ if (is_noncanonical_address(data))
++ return 1;
++ break;
++ case MSR_IA32_SYSENTER_EIP:
++ case MSR_IA32_SYSENTER_ESP:
++ /*
++ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
++ * non-canonical address is written on Intel but not on
++ * AMD (which ignores the top 32-bits, because it does
++ * not implement 64-bit SYSENTER).
++ *
++ * 64-bit code should hence be able to write a non-canonical
++ * value on AMD. Making the address canonical ensures that
++ * vmentry does not fail on Intel after writing a non-canonical
++ * value, and that something deterministic happens if the guest
++ * invokes 64-bit SYSENTER.
++ */
++ data = get_canonical(data);
++ }
+ return kvm_x86_ops->set_msr(vcpu, msr_index, data);
+ }
++EXPORT_SYMBOL_GPL(kvm_set_msr);
+
+ /*
+ * Adapt set_msr() to msr_io()'s calling convention
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch)
@@ -0,0 +1,232 @@
+From: Nadav Amit <namit at cs.technion.ac.il>
+Date: Thu, 18 Sep 2014 22:39:38 +0300
+Subject: KVM: x86: Emulator fixes for eip canonical checks on near branches
+Origin: https://git.kernel.org/linus/234f3ce485d54017f15cf5e0699cff4100121601
+
+Before changing rip (during jmp, call, ret, etc.) the target should be asserted
+to be canonical one, as real CPUs do. During sysret, both target rsp and rip
+should be canonical. If any of these values is noncanonical, a #GP exception
+should occur. The exception to this rule are syscall and sysenter instructions
+in which the assigned rip is checked during the assignment to the relevant
+MSRs.
+
+This patch fixes the emulator to behave as real CPUs do for near branches.
+Far branches are handled by the next patch.
+
+This fixes CVE-2014-3647.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Nadav Amit <namit at cs.technion.ac.il>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - Use ctxt->regs[] instead of reg_read(), reg_write(), reg_rmw()]
+---
+ arch/x86/kvm/emulate.c | 78 ++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 54 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -529,7 +529,8 @@ static int emulate_nm(struct x86_emulate
+ return emulate_exception(ctxt, NM_VECTOR, 0, false);
+ }
+
+-static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
++ int cs_l)
+ {
+ switch (ctxt->op_bytes) {
+ case 2:
+@@ -539,16 +540,25 @@ static inline void assign_eip_near(struc
+ ctxt->_eip = (u32)dst;
+ break;
+ case 8:
++ if ((cs_l && is_noncanonical_address(dst)) ||
++ (!cs_l && (dst & ~(u32)-1)))
++ return emulate_gp(ctxt, 0);
+ ctxt->_eip = dst;
+ break;
+ default:
+ WARN(1, "unsupported eip assignment size\n");
+ }
++ return X86EMUL_CONTINUE;
++}
++
++static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++{
++ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
+ }
+
+-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
++static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+ {
+- assign_eip_near(ctxt, ctxt->_eip + rel);
++ return assign_eip_near(ctxt, ctxt->_eip + rel);
+ }
+
+ static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+@@ -1787,13 +1797,15 @@ static int em_grp45(struct x86_emulate_c
+ case 2: /* call near abs */ {
+ long int old_eip;
+ old_eip = ctxt->_eip;
+- ctxt->_eip = ctxt->src.val;
++ rc = assign_eip_near(ctxt, ctxt->src.val);
++ if (rc != X86EMUL_CONTINUE)
++ break;
+ ctxt->src.val = old_eip;
+ rc = em_push(ctxt);
+ break;
+ }
+ case 4: /* jmp abs */
+- ctxt->_eip = ctxt->src.val;
++ rc = assign_eip_near(ctxt, ctxt->src.val);
+ break;
+ case 5: /* jmp far */
+ rc = em_jmp_far(ctxt);
+@@ -1825,10 +1837,14 @@ static int em_grp9(struct x86_emulate_ct
+
+ static int em_ret(struct x86_emulate_ctxt *ctxt)
+ {
+- ctxt->dst.type = OP_REG;
+- ctxt->dst.addr.reg = &ctxt->_eip;
+- ctxt->dst.bytes = ctxt->op_bytes;
+- return em_pop(ctxt);
++ int rc;
++ unsigned long eip;
++
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++
++ return assign_eip_near(ctxt, eip);
+ }
+
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+@@ -2060,7 +2076,7 @@ static int em_sysexit(struct x86_emulate
+ {
+ struct x86_emulate_ops *ops = ctxt->ops;
+ struct desc_struct cs, ss;
+- u64 msr_data;
++ u64 msr_data, rcx, rdx;
+ int usermode;
+ u16 cs_sel = 0, ss_sel = 0;
+
+@@ -2076,6 +2092,9 @@ static int em_sysexit(struct x86_emulate
+ else
+ usermode = X86EMUL_MODE_PROT32;
+
++ rcx = ctxt->regs[VCPU_REGS_RCX];
++ rdx = ctxt->regs[VCPU_REGS_RDX];
++
+ cs.dpl = 3;
+ ss.dpl = 3;
+ ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+@@ -2093,6 +2112,9 @@ static int em_sysexit(struct x86_emulate
+ ss_sel = cs_sel + 8;
+ cs.d = 0;
+ cs.l = 1;
++ if (is_noncanonical_address(rcx) ||
++ is_noncanonical_address(rdx))
++ return emulate_gp(ctxt, 0);
+ break;
+ }
+ cs_sel |= SELECTOR_RPL_MASK;
+@@ -2101,8 +2123,8 @@ static int em_sysexit(struct x86_emulate
+ ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+
+- ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
+- ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
++ ctxt->_eip = rdx;
++ ctxt->regs[VCPU_REGS_RSP] = rcx;
+
+ return X86EMUL_CONTINUE;
+ }
+@@ -2555,10 +2577,13 @@ static int em_das(struct x86_emulate_ctx
+
+ static int em_call(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc;
+ long rel = ctxt->src.val;
+
+ ctxt->src.val = (unsigned long)ctxt->_eip;
+- jmp_rel(ctxt, rel);
++ rc = jmp_rel(ctxt, rel);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
+ return em_push(ctxt);
+ }
+
+@@ -2590,11 +2615,12 @@ static int em_call_far(struct x86_emulat
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
++ unsigned long eip;
+
+- ctxt->dst.type = OP_REG;
+- ctxt->dst.addr.reg = &ctxt->_eip;
+- ctxt->dst.bytes = ctxt->op_bytes;
+- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++ rc = assign_eip_near(ctxt, eip);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
+@@ -2840,20 +2866,24 @@ static int em_lmsw(struct x86_emulate_ct
+
+ static int em_loop(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc = X86EMUL_CONTINUE;
++
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
+ if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
+ (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+
+- return X86EMUL_CONTINUE;
++ return rc;
+ }
+
+ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc = X86EMUL_CONTINUE;
++
+ if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+
+- return X86EMUL_CONTINUE;
++ return rc;
+ }
+
+ static int em_cli(struct x86_emulate_ctxt *ctxt)
+@@ -3946,7 +3976,7 @@ special_insn:
+ break;
+ case 0x70 ... 0x7f: /* jcc (short) */
+ if (test_cc(ctxt->b, ctxt->eflags))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ break;
+ case 0x8d: /* lea r16/r32, m */
+ ctxt->dst.val = ctxt->src.addr.mem.ea;
+@@ -3994,7 +4024,7 @@ special_insn:
+ goto do_io_out;
+ case 0xe9: /* jmp rel */
+ case 0xeb: /* jmp rel short */
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
+ break;
+ case 0xec: /* in al,dx */
+@@ -4160,7 +4190,7 @@ twobyte_insn:
+ break;
+ case 0x80 ... 0x8f: /* jnz rel, etc*/
+ if (test_cc(ctxt->b, ctxt->eflags))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ break;
+ case 0x90 ... 0x9f: /* setcc r/m8 */
+ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch)
@@ -0,0 +1,60 @@
+From: Nadav Amit <namit at cs.technion.ac.il>
+Date: Thu, 18 Sep 2014 22:39:37 +0300
+Subject: KVM: x86: Fix wrong masking on relative jump/call
+Origin: https://git.kernel.org/linus/05c83ec9b73c8124555b706f6af777b10adf0862
+
+Relative jumps and calls do the masking according to the operand size, and not
+according to the address size as the KVM emulator does today.
+
+This patch fixes KVM behavior.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Nadav Amit <namit at cs.technion.ac.il>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ arch/x86/kvm/emulate.c | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -456,11 +456,6 @@ register_address_increment(struct x86_em
+ *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
+ }
+
+-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+-{
+- register_address_increment(ctxt, &ctxt->_eip, rel);
+-}
+-
+ static u32 desc_limit_scaled(struct desc_struct *desc)
+ {
+ u32 limit = get_desc_limit(desc);
+@@ -534,6 +529,28 @@ static int emulate_nm(struct x86_emulate
+ return emulate_exception(ctxt, NM_VECTOR, 0, false);
+ }
+
++static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++{
++ switch (ctxt->op_bytes) {
++ case 2:
++ ctxt->_eip = (u16)dst;
++ break;
++ case 4:
++ ctxt->_eip = (u32)dst;
++ break;
++ case 8:
++ ctxt->_eip = dst;
++ break;
++ default:
++ WARN(1, "unsupported eip assignment size\n");
++ }
++}
++
++static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
++{
++ assign_eip_near(ctxt, ctxt->_eip + rel);
++}
++
+ static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+ {
+ u16 selector;
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch)
@@ -0,0 +1,244 @@
+From: Nadav Amit <namit at cs.technion.ac.il>
+Date: Thu, 18 Sep 2014 22:39:39 +0300
+Subject: KVM: x86: Handle errors when RIP is set during far jumps
+Origin: https://git.kernel.org/linus/d1442d85cc30ea75f7d399474ca738e0bc96f715
+
+Far jmp/call/ret may fault while loading a new RIP. Currently KVM does not
+handle this case, and may result in failed vm-entry once the assignment is
+done. The tricky part of doing so is that loading the new CS affects the
+VMCS/VMCB state, so if we fail during loading the new RIP, we are left in
+unconsistent state. Therefore, this patch saves on 64-bit the old CS
+descriptor and restores it if loading RIP failed.
+
+This fixes CVE-2014-3647.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Nadav Amit <namit at cs.technion.ac.il>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - __load_segment_descriptor() does not take an in_task_switch parameter]
+---
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -1234,7 +1234,8 @@ static int write_segment_descriptor(stru
+
+ /* Does not support long mode */
+ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+- u16 selector, int seg, u8 cpl)
++ u16 selector, int seg, u8 cpl,
++ struct desc_struct *desc)
+ {
+ struct desc_struct seg_desc;
+ u8 dpl, rpl;
+@@ -1342,6 +1343,8 @@ static int __load_segment_descriptor(str
+ }
+ load:
+ ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
++ if (desc)
++ *desc = seg_desc;
+ return X86EMUL_CONTINUE;
+ exception:
+ emulate_exception(ctxt, err_vec, err_code, true);
+@@ -1352,7 +1355,7 @@ static int load_segment_descriptor(struc
+ u16 selector, int seg)
+ {
+ u8 cpl = ctxt->ops->cpl(ctxt);
+- return __load_segment_descriptor(ctxt, selector, seg, cpl);
++ return __load_segment_descriptor(ctxt, selector, seg, cpl, NULL);
+ }
+
+ static void write_register_operand(struct operand *op)
+@@ -1694,17 +1697,31 @@ static int em_iret(struct x86_emulate_ct
+ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
+- unsigned short sel;
++ unsigned short sel, old_sel;
++ struct desc_struct old_desc, new_desc;
++ const struct x86_emulate_ops *ops = ctxt->ops;
++ u8 cpl = ctxt->ops->cpl(ctxt);
++
++ /* Assignment of RIP may only fail in 64-bit mode */
++ if (ctxt->mode == X86EMUL_MODE_PROT64)
++ ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
++ VCPU_SREG_CS);
+
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+
+- rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
++ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
++ &new_desc);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- ctxt->_eip = 0;
+- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
+- return X86EMUL_CONTINUE;
++ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++ if (rc != X86EMUL_CONTINUE) {
++ WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++ /* assigning eip failed; restore the old cs */
++ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
++ return rc;
++ }
++ return rc;
+ }
+
+ static int em_grp1a(struct x86_emulate_ctxt *ctxt)
+@@ -1856,21 +1873,34 @@ static int em_ret(struct x86_emulate_ctx
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
+- unsigned long cs;
++ unsigned long eip, cs;
++ u16 old_cs;
+ int cpl = ctxt->ops->cpl(ctxt);
++ struct desc_struct old_desc, new_desc;
++ const struct x86_emulate_ops *ops = ctxt->ops;
++
++ if (ctxt->mode == X86EMUL_MODE_PROT64)
++ ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
++ VCPU_SREG_CS);
+
+- rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+- if (ctxt->op_bytes == 4)
+- ctxt->_eip = (u32)ctxt->_eip;
+ rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ /* Outer-privilege level return is not implemented */
+ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+ return X86EMUL_UNHANDLEABLE;
+- rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
++ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0,
++ &new_desc);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++ rc = assign_eip_far(ctxt, eip, new_desc.l);
++ if (rc != X86EMUL_CONTINUE) {
++ WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++ }
+ return rc;
+ }
+
+@@ -2248,19 +2278,24 @@ static int load_state_from_tss16(struct
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+- ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+@@ -2373,25 +2408,32 @@ static int load_state_from_tss32(struct
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+- ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
++ cpl, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl);
++ ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
++ NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+@@ -2605,24 +2647,39 @@ static int em_call_far(struct x86_emulat
+ u16 sel, old_cs;
+ ulong old_eip;
+ int rc;
++ struct desc_struct old_desc, new_desc;
++ const struct x86_emulate_ops *ops = ctxt->ops;
++ int cpl = ctxt->ops->cpl(ctxt);
+
+- old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
+ old_eip = ctxt->_eip;
++ ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
+
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+- if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
++ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
++ &new_desc);
++ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_CONTINUE;
+
+- ctxt->_eip = 0;
+- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
++ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++ if (rc != X86EMUL_CONTINUE)
++ goto fail;
+
+ ctxt->src.val = old_cs;
+ rc = em_push(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+- return rc;
++ goto fail;
+
+ ctxt->src.val = old_eip;
+- return em_push(ctxt);
++ rc = em_push(ctxt);
++ /* If we failed, we tainted the memory, but the very least we should
++ restore cs */
++ if (rc != X86EMUL_CONTINUE)
++ goto fail;
++ return rc;
++fail:
++ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++ return rc;
++
+ }
+
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch)
@@ -0,0 +1,33 @@
+From: Andy Honig <ahonig at google.com>
+Date: Wed, 27 Aug 2014 14:42:54 -0700
+Subject: KVM: x86: Improve thread safety in pit
+Origin: https://git.kernel.org/linus/2febc839133280d5a5e8e1179c94ea674489dae2
+
+There's a race condition in the PIT emulation code in KVM. In
+__kvm_migrate_pit_timer the pit_timer object is accessed without
+synchronization. If the race condition occurs at the wrong time this
+can crash the host kernel.
+
+This fixes CVE-2014-3611.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Andrew Honig <ahonig at google.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2: adjust context]
+---
+ arch/x86/kvm/i8254.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -264,8 +264,10 @@ void __kvm_migrate_pit_timer(struct kvm_
+ return;
+
+ timer = &pit->pit_state.pit_timer.timer;
++ mutex_lock(&pit->pit_state.lock);
+ if (hrtimer_cancel(timer))
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
++ mutex_unlock(&pit->pit_state.lock);
+ }
+
+ static void destroy_pit_timer(struct kvm_pit *pit)
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch)
@@ -0,0 +1,55 @@
+From: Takuya Yoshikawa <yoshikawa.takuya at oss.ntt.co.jp>
+Date: Tue, 22 Nov 2011 15:18:35 +0900
+Subject: KVM: x86 emulator: Use opcode::execute for CALL
+Origin: https://git.kernel.org/linus/d4ddafcdf2201326ec9717172767cfad0ede1472
+
+CALL: E8
+
+Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya at oss.ntt.co.jp>
+Signed-off-by: Marcelo Tosatti <mtosatti at redhat.com>
+[bwh: Backported to 3.2: adjust context]
+---
+ arch/x86/kvm/emulate.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2536,6 +2536,15 @@ static int em_das(struct x86_emulate_ctx
+ return X86EMUL_CONTINUE;
+ }
+
++static int em_call(struct x86_emulate_ctxt *ctxt)
++{
++ long rel = ctxt->src.val;
++
++ ctxt->src.val = (unsigned long)ctxt->_eip;
++ jmp_rel(ctxt, rel);
++ return em_push(ctxt);
++}
++
+ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ {
+ u16 sel, old_cs;
+@@ -3271,7 +3280,7 @@ static struct opcode opcode_table[256] =
+ D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
+ D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
+ /* 0xE8 - 0xEF */
+- D(SrcImm | Stack), D(SrcImm | ImplicitOps),
++ I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
+ I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
+ D2bvIP(SrcDX | DstAcc, in, check_perm_in),
+ D2bvIP(SrcAcc | DstDX, out, check_perm_out),
+@@ -3966,13 +3975,6 @@ special_insn:
+ case 0xe6: /* outb */
+ case 0xe7: /* out */
+ goto do_io_out;
+- case 0xe8: /* call (near) */ {
+- long int rel = ctxt->src.val;
+- ctxt->src.val = (unsigned long) ctxt->_eip;
+- jmp_rel(ctxt, rel);
+- rc = em_push(ctxt);
+- break;
+- }
+ case 0xe9: /* jmp rel */
+ case 0xeb: /* jmp rel short */
+ jmp_rel(ctxt, ctxt->src.val);
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch)
@@ -0,0 +1,162 @@
+From: Paolo Bonzini <pbonzini at redhat.com>
+Date: Thu, 15 May 2014 17:56:57 +0200
+Subject: KVM: x86: use new CS.RPL as CPL during task switch
+Origin: https://git.kernel.org/linus/2356aaeb2f58f491679dc0c38bc3f6dbe54e7ded
+
+During task switch, all of CS.DPL, CS.RPL, SS.DPL must match (in addition
+to all the other requirements) and will be the new CPL. So far this
+worked by carefully setting the CS selector and flag before doing the
+task switch; setting CS.selector will already change the CPL.
+
+However, this will not work once we get the CPL from SS.DPL, because
+then you will have to set the full segment descriptor cache to change
+the CPL. ctxt->ops->cpl(ctxt) will then return the old CPL during the
+task switch, and the check that SS.DPL == CPL will fail.
+
+Temporarily assume that the CPL comes from CS.RPL during task switch
+to a protected-mode task. This is the same approach used in QEMU's
+emulation code, which (until version 2.0) manually tracks the CPL.
+
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - load_state_from_tss32() does not support VM86 mode]
+---
+ arch/x86/kvm/emulate.c | 60 +++++++++++++++++++++++++++-----------------------
+ 1 file changed, 33 insertions(+), 27 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -1233,11 +1233,11 @@ static int write_segment_descriptor(stru
+ }
+
+ /* Does not support long mode */
+-static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+- u16 selector, int seg)
++static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
++ u16 selector, int seg, u8 cpl)
+ {
+ struct desc_struct seg_desc;
+- u8 dpl, rpl, cpl;
++ u8 dpl, rpl;
+ unsigned err_vec = GP_VECTOR;
+ u32 err_code = 0;
+ bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+@@ -1286,7 +1286,6 @@ static int load_segment_descriptor(struc
+
+ rpl = selector & 3;
+ dpl = seg_desc.dpl;
+- cpl = ctxt->ops->cpl(ctxt);
+
+ switch (seg) {
+ case VCPU_SREG_SS:
+@@ -1349,6 +1348,13 @@ exception:
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
++static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
++ u16 selector, int seg)
++{
++ u8 cpl = ctxt->ops->cpl(ctxt);
++ return __load_segment_descriptor(ctxt, selector, seg, cpl);
++}
++
+ static void write_register_operand(struct operand *op)
+ {
+ /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
+@@ -2213,6 +2219,7 @@ static int load_state_from_tss16(struct
+ struct tss_segment_16 *tss)
+ {
+ int ret;
++ u8 cpl;
+
+ ctxt->_eip = tss->ip;
+ ctxt->eflags = tss->flag | 2;
+@@ -2235,23 +2242,25 @@ static int load_state_from_tss16(struct
+ set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
+ set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
+
++ cpl = tss->cs & 3;
++
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+- ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
++ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
++ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
++ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
++ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
++ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+@@ -2330,6 +2339,7 @@ static int load_state_from_tss32(struct
+ struct tss_segment_32 *tss)
+ {
+ int ret;
++ u8 cpl;
+
+ if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
+ return emulate_gp(ctxt, 0);
+@@ -2346,7 +2356,8 @@ static int load_state_from_tss32(struct
+
+ /*
+ * SDM says that segment selectors are loaded before segment
+- * descriptors
++ * descriptors. This is important because CPL checks will
++ * use CS.RPL.
+ */
+ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
+ set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
+@@ -2356,29 +2367,31 @@ static int load_state_from_tss32(struct
+ set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
+ set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
+
++ cpl = tss->cs & 3;
++
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+- ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
++ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
++ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
++ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
++ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
++ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
++ ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
++ ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch)
@@ -0,0 +1,67 @@
+From: Petr Matousek <pmatouse at redhat.com>
+Date: Tue, 23 Sep 2014 20:22:30 +0200
+Subject: kvm: vmx: handle invvpid vm exit gracefully
+Origin: https://git.kernel.org/linus/a642fc305053cc1c6e47e4f4df327895747ab485
+
+On systems with invvpid instruction support (corresponding bit in
+IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
+causes vm exit, which is currently not handled and results in
+propagation of unknown exit to userspace.
+
+Fix this by installing an invvpid vm exit handler.
+
+This is CVE-2014-3646.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Petr Matousek <pmatouse at redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust filename
+ - Drop inapplicable change to exit reason string array]
+---
+ arch/x86/include/asm/vmx.h | 2 ++
+ arch/x86/kvm/vmx.c | 9 ++++++++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -280,6 +280,7 @@ enum vmcs_field {
+ #define EXIT_REASON_EPT_VIOLATION 48
+ #define EXIT_REASON_EPT_MISCONFIG 49
+ #define EXIT_REASON_INVEPT 50
++#define EXIT_REASON_INVVPID 53
+ #define EXIT_REASON_WBINVD 54
+ #define EXIT_REASON_XSETBV 55
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -5624,6 +5624,12 @@ static int handle_invept(struct kvm_vcpu
+ return 1;
+ }
+
++static int handle_invvpid(struct kvm_vcpu *vcpu)
++{
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
++}
++
+ /*
+ * The exit handlers return 1 if the exit was handled fully and guest execution
+ * may resume. Otherwise they set the kvm_run parameter to indicate what needs
+@@ -5666,6 +5672,7 @@ static int (*kvm_vmx_exit_handlers[])(st
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_INVEPT] = handle_invept,
++ [EXIT_REASON_INVVPID] = handle_invvpid,
+ };
+
+ static const int kvm_vmx_max_exit_handlers =
+@@ -5850,7 +5857,7 @@ static bool nested_vmx_exit_handled(stru
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+- case EXIT_REASON_INVEPT:
++ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
+ /*
+ * VMX instructions trap unconditionally. This allows L1 to
+ * emulate them for its L2 guest, i.e., allows 3-level nesting!
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/nEPT-Nested-INVEPT.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/nEPT-Nested-INVEPT.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/nEPT-Nested-INVEPT.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/nEPT-Nested-INVEPT.patch)
@@ -0,0 +1,193 @@
+From: Nadav Har'El <nyh at il.ibm.com>
+Date: Mon, 5 Aug 2013 11:07:17 +0300
+Subject: nEPT: Nested INVEPT
+Origin: https://git.kernel.org/linus/bfd0a56b90005f8c8a004baf407ad90045c2b11e
+
+If we let L1 use EPT, we should probably also support the INVEPT instruction.
+
+In our current nested EPT implementation, when L1 changes its EPT table
+for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in
+the course of this modification already calls INVEPT. But if last level
+of shadow page is unsync not all L1's changes to EPT12 are intercepted,
+which means roots need to be synced when L1 calls INVEPT. Global INVEPT
+should not be different since roots are synced by kvm_mmu_load() each
+time EPTP02 changes.
+
+Reviewed-by: Xiao Guangrong <xiaoguangrong at linux.vnet.ibm.com>
+Signed-off-by: Nadav Har'El <nyh at il.ibm.com>
+Signed-off-by: Jun Nakajima <jun.nakajima at intel.com>
+Signed-off-by: Xinhao Xu <xinhao.xu at intel.com>
+Signed-off-by: Yang Zhang <yang.z.zhang at Intel.com>
+Signed-off-by: Gleb Natapov <gleb at redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context, filename
+ - Add definition of nested_ept_get_cr3(), added upstream by commit
+ 155a97a3d7c7 ("nEPT: MMU context for nested EPT")]
+---
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -279,6 +279,7 @@ enum vmcs_field {
+ #define EXIT_REASON_APIC_ACCESS 44
+ #define EXIT_REASON_EPT_VIOLATION 48
+ #define EXIT_REASON_EPT_MISCONFIG 49
++#define EXIT_REASON_INVEPT 50
+ #define EXIT_REASON_WBINVD 54
+ #define EXIT_REASON_XSETBV 55
+
+@@ -397,6 +398,7 @@ enum vmcs_field {
+ #define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
+ #define VMX_EPT_EXTENT_CONTEXT 1
+ #define VMX_EPT_EXTENT_GLOBAL 2
++#define VMX_EPT_EXTENT_SHIFT 24
+
+ #define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
+ #define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
+@@ -404,6 +406,7 @@ enum vmcs_field {
+ #define VMX_EPTP_WB_BIT (1ull << 14)
+ #define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
+ #define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
++#define VMX_EPT_INVEPT_BIT (1ull << 20)
+ #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
+ #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
+ #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -2869,6 +2869,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu
+ mmu_sync_roots(vcpu);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ }
++EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
+
+ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
+ u32 access, struct x86_exception *exception)
+@@ -3131,6 +3132,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *
+ ++vcpu->stat.tlb_flush;
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
++EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
+
+ static void paging_new_cr3(struct kvm_vcpu *vcpu)
+ {
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -602,6 +602,7 @@ static void nested_release_page_clean(st
+ kvm_release_page_clean(page);
+ }
+
++static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
+ static u64 construct_eptp(unsigned long root_hpa);
+ static void kvm_cpu_vmxon(u64 addr);
+ static void kvm_cpu_vmxoff(void);
+@@ -1899,6 +1900,7 @@ static u32 nested_vmx_secondary_ctls_low
+ static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
+ static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
+ static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
++static u32 nested_vmx_ept_caps;
+ static __init void nested_vmx_setup_ctls_msrs(void)
+ {
+ /*
+@@ -5554,6 +5556,74 @@ static int handle_vmptrst(struct kvm_vcp
+ return 1;
+ }
+
++/* Emulate the INVEPT instruction */
++static int handle_invept(struct kvm_vcpu *vcpu)
++{
++ u32 vmx_instruction_info, types;
++ unsigned long type;
++ gva_t gva;
++ struct x86_exception e;
++ struct {
++ u64 eptp, gpa;
++ } operand;
++ u64 eptp_mask = ((1ull << 51) - 1) & PAGE_MASK;
++
++ if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
++ !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
++ }
++
++ if (!nested_vmx_check_permission(vcpu))
++ return 1;
++
++ if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
++ }
++
++ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
++ type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
++
++ types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
++
++ if (!(types & (1UL << type))) {
++ nested_vmx_failValid(vcpu,
++ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++ return 1;
++ }
++
++ /* According to the Intel VMX instruction reference, the memory
++ * operand is read even if it isn't needed (e.g., for type==global)
++ */
++ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
++ vmx_instruction_info, &gva))
++ return 1;
++ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
++ sizeof(operand), &e)) {
++ kvm_inject_page_fault(vcpu, &e);
++ return 1;
++ }
++
++ switch (type) {
++ case VMX_EPT_EXTENT_CONTEXT:
++ if ((operand.eptp & eptp_mask) !=
++ (nested_ept_get_cr3(vcpu) & eptp_mask))
++ break;
++ case VMX_EPT_EXTENT_GLOBAL:
++ kvm_mmu_sync_roots(vcpu);
++ kvm_mmu_flush_tlb(vcpu);
++ nested_vmx_succeed(vcpu);
++ break;
++ default:
++ BUG_ON(1);
++ break;
++ }
++
++ skip_emulated_instruction(vcpu);
++ return 1;
++}
++
+ /*
+ * The exit handlers return 1 if the exit was handled fully and guest execution
+ * may resume. Otherwise they set the kvm_run parameter to indicate what needs
+@@ -5595,6 +5665,7 @@ static int (*kvm_vmx_exit_handlers[])(st
+ [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
++ [EXIT_REASON_INVEPT] = handle_invept,
+ };
+
+ static const int kvm_vmx_max_exit_handlers =
+@@ -5779,6 +5850,7 @@ static bool nested_vmx_exit_handled(stru
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
++ case EXIT_REASON_INVEPT:
+ /*
+ * VMX instructions trap unconditionally. This allows L1 to
+ * emulate them for its L2 guest, i.e., allows 3-level nesting!
+@@ -6440,6 +6512,12 @@ static void vmx_set_supported_cpuid(u32
+ entry->ecx |= bit(X86_FEATURE_VMX);
+ }
+
++static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
++{
++ /* return the page table to be shadowed - in our case, EPT12 */
++ return get_vmcs12(vcpu)->ept_pointer;
++}
++
+ /*
+ * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
+ * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
Copied: dists/squeeze-backports/linux/debian/patches/bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch (from r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch)
@@ -0,0 +1,105 @@
+From: Andy Lutomirski <luto at amacapital.net>
+Date: Wed, 8 Oct 2014 09:02:13 -0700
+Subject: x86,kvm,vmx: Preserve CR4 across VM entry
+Origin: https://git.kernel.org/linus/d974baa398f34393db76be45f7d4d04fbdbb4a0a
+
+CR4 isn't constant; at least the TSD and PCE bits can vary.
+
+TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks
+like it's correct.
+
+This adds a branch and a read from cr4 to each vm entry. Because it is
+extremely likely that consecutive entries into the same vcpu will have
+the same host cr4 value, this fixes up the vmcs instead of restoring cr4
+after the fact. A subsequent patch will add a kernel-wide cr4 shadow,
+reducing the overhead in the common case to just two memory reads and a
+branch.
+
+Signed-off-by: Andy Lutomirski <luto at amacapital.net>
+Acked-by: Paolo Bonzini <pbonzini at redhat.com>
+Cc: stable at vger.kernel.org
+Cc: Petr Matousek <pmatouse at redhat.com>
+Cc: Gleb Natapov <gleb at kernel.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - Add struct vcpu_vmx *vmx parameter to vmx_set_constant_host_state(), done
+ upstream in commit a547c6db4d2f ("KVM: VMX: Enable acknowledge interupt
+ on vmexit")]
+---
+ arch/x86/kvm/vmx.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -390,6 +390,7 @@ struct vcpu_vmx {
+ u16 fs_sel, gs_sel, ldt_sel;
+ int gs_ldt_reload_needed;
+ int fs_reload_needed;
++ unsigned long vmcs_host_cr4; /* May not match real cr4 */
+ } host_state;
+ struct {
+ int vm86_active;
+@@ -3631,16 +3632,21 @@ static void vmx_disable_intercept_for_ms
+ * Note that host-state that does change is set elsewhere. E.g., host-state
+ * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
+ */
+-static void vmx_set_constant_host_state(void)
++static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+ {
+ u32 low32, high32;
+ unsigned long tmpl;
+ struct desc_ptr dt;
++ unsigned long cr4;
+
+ vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
+- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
+
++ /* Save the most likely value for this task's CR4 in the VMCS. */
++ cr4 = read_cr4();
++ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
++ vmx->host_state.vmcs_host_cr4 = cr4;
++
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+@@ -3762,7 +3768,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
+
+ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
+ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
+- vmx_set_constant_host_state();
++ vmx_set_constant_host_state(vmx);
+ #ifdef CONFIG_X86_64
+ rdmsrl(MSR_FS_BASE, a);
+ vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
+@@ -6176,6 +6182,7 @@ static void atomic_switch_perf_msrs(stru
+ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
++ unsigned long cr4;
+
+ if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+@@ -6206,6 +6213,12 @@ static void __noclone vmx_vcpu_run(struc
+ if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+ vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+
++ cr4 = read_cr4();
++ if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
++ vmcs_writel(HOST_CR4, cr4);
++ vmx->host_state.vmcs_host_cr4 = cr4;
++ }
++
+ /* When single-stepping over STI and MOV SS, we must clear the
+ * corresponding interruptibility bits in the guest state. Otherwise
+ * vmentry fails as it then expects bit 14 (BS) in pending debug
+@@ -6670,7 +6683,7 @@ static void prepare_vmcs02(struct kvm_vc
+ * Other fields are different per CPU, and will be set later when
+ * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
+ */
+- vmx_set_constant_host_state();
++ vmx_set_constant_host_state(vmx);
+
+ /*
+ * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
Copied: dists/squeeze-backports/linux/debian/patches/debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch)
@@ -0,0 +1,35 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Mon, 27 Oct 2014 00:17:13 +0000
+Subject: drivers/net: Avoid ABI change for UFO/IPv6 fix
+Forwarded: not-needed
+
+Hide new header inclusion from genksyms.
+
+The functions exported from these drivers (macvtap_get_socket() and
+tun_get_socket()) don't seem to be used by anything in wheezy but
+it looks like they could be used by a backport of vhost.
+
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -15,7 +15,9 @@
+ #include <linux/cdev.h>
+ #include <linux/fs.h>
+
++#ifndef __GENKSYMS__
+ #include <net/ipv6.h>
++#endif
+ #include <net/net_namespace.h>
+ #include <net/rtnetlink.h>
+ #include <net/sock.h>
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -64,7 +64,9 @@
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
+ #include <linux/rcupdate.h>
++#ifndef __GENKSYMS__
+ #include <net/ipv6.h>
++#endif
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
Modified: dists/squeeze-backports/linux/debian/patches/debian/inetpeer-avoid-abi-change-in-3.2.52.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/debian/inetpeer-avoid-abi-change-in-3.2.52.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/debian/inetpeer-avoid-abi-change-in-3.2.52.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -23,8 +23,8 @@
- };
/*
* Once inet_peer is queued for deletion (refcnt == -1), following fields
- * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
-@@ -64,6 +60,13 @@ struct inet_peer {
+ * are not available: rid, tcp_ts, tcp_ts_stamp
+@@ -63,6 +59,13 @@ struct inet_peer {
/* following fields might be frequently dirtied */
__u32 dtime; /* the time of last use of not referenced entries */
atomic_t refcnt;
Copied: dists/squeeze-backports/linux/debian/patches/debian/ip-ident-avoid-abi-change-in-3.2.63.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/ip-ident-avoid-abi-change-in-3.2.63.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/ip-ident-avoid-abi-change-in-3.2.63.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/ip-ident-avoid-abi-change-in-3.2.63.patch)
@@ -0,0 +1,53 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Tue, 16 Sep 2014 02:44:19 +0100
+Subject: ip_ident: Avoid ABI change in 3.2.63
+
+Commits 64b5c251d5b2 ('inetpeer: get rid of ip_id_count') and
+04ca6973f7c1 ('ip: make IP identifiers less predictable'),
+backported into 3.2.63, removed a field from struct inetpeer
+and changed the parameters of __ip_select_ident(). Restore
+them as unused padding.
+
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -49,6 +49,7 @@ struct inet_peer {
+ union {
+ struct {
+ atomic_t rid; /* Frag reception counter */
++ atomic_t ip_id_count; /* now unused */
+ __u32 tcp_ts;
+ __u32 tcp_ts_stamp;
+ };
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -265,7 +265,8 @@ int ip_dont_fragment(struct sock *sk, st
+ }
+
+ u32 ip_idents_reserve(u32 hash, int segs);
+-void __ip_select_ident(struct iphdr *iph, int segs);
++void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst /*unused*/,
++ int segs);
+
+ static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
+ {
+@@ -284,7 +285,7 @@ static inline void ip_select_ident_segs(
+ iph->id = 0;
+ }
+ } else {
+- __ip_select_ident(iph, segs);
++ __ip_select_ident(iph, NULL, segs);
+ }
+ }
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1374,7 +1374,8 @@ u32 ip_idents_reserve(u32 hash, int segs
+ }
+ EXPORT_SYMBOL(ip_idents_reserve);
+
+-void __ip_select_ident(struct iphdr *iph, int segs)
++void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst /*unused*/,
++ int segs)
+ {
+ static u32 ip_idents_hashrnd __read_mostly;
+ static bool hashrnd_initialized = false;
Copied: dists/squeeze-backports/linux/debian/patches/debian/irq-avoid-abi-change-in-3.2.61.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/irq-avoid-abi-change-in-3.2.61.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/irq-avoid-abi-change-in-3.2.61.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/irq-avoid-abi-change-in-3.2.61.patch)
@@ -0,0 +1,32 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Mon, 07 Jul 2014 01:09:53 +0100
+Subject: irq: Avoid ABI change in 3.2.61
+Forwarded: not-needed
+
+Move the new fields to the end of struct irq_desc and hide them from
+genksyms. Although struct irq_desc is normally allocated as part of a
+static array, access is abstracted through the irq_to_desc() function
+so modules don't depend on the size of the structure.
+
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -55,8 +55,6 @@ struct irq_desc {
+ unsigned int irq_count; /* For detecting broken IRQs */
+ unsigned long last_unhandled; /* Aging timer for unhandled count */
+ unsigned int irqs_unhandled;
+- atomic_t threads_handled;
+- int threads_handled_last;
+ raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
+ #ifdef CONFIG_SMP
+@@ -74,6 +72,10 @@ struct irq_desc {
+ #endif
+ struct module *owner;
+ const char *name;
++#ifndef __GENKSYMS__
++ atomic_t threads_handled;
++ int threads_handled_last;
++#endif
+ } ____cacheline_internodealigned_in_smp;
+
+ #ifndef CONFIG_SPARSE_IRQ
Copied: dists/squeeze-backports/linux/debian/patches/debian/libata-avoid-abi-change-in-3.2.62.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/libata-avoid-abi-change-in-3.2.62.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/libata-avoid-abi-change-in-3.2.62.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/libata-avoid-abi-change-in-3.2.62.patch)
@@ -0,0 +1,77 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Tue, 05 Aug 2014 09:51:04 +0100
+Subject: libata: Avoid ABI change in 3.2.62
+
+Commit 1a112d10f03e ('libata: introduce ata_host->n_tags to avoid oops
+on SAS controllers') added a field in a structure which may be
+driver-allocated. So we can't simply move it to the end. However
+there is a flags field with many free bits, and we only need 5 bits
+(ATA_MAX_QUEUE == 32 and n_tags must be less than this). Add a
+pair of accessors to make this slightly less ugly.
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -171,6 +171,17 @@ static bool ata_sstatus_online(u32 sstat
+ return (sstatus & 0xf) == 0x3;
+ }
+
++static unsigned int ata_host_get_n_tags(const struct ata_host *host)
++{
++ return (host->flags & ATA_HOST_N_TAGS_MASK) >> ATA_HOST_N_TAGS_SHIFT;
++}
++
++static void ata_host_set_n_tags(struct ata_host *host, unsigned int n_tags)
++{
++ host->flags = ((host->flags & ~ATA_HOST_N_TAGS_MASK) |
++ (n_tags << ATA_HOST_N_TAGS_SHIFT));
++}
++
+ /**
+ * ata_link_next - link iteration helper
+ * @link: the previous link, NULL to start
+@@ -4728,7 +4739,7 @@ void swap_buf_le16(u16 *buf, unsigned in
+ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+ {
+ struct ata_queued_cmd *qc = NULL;
+- unsigned int max_queue = ap->host->n_tags;
++ unsigned int max_queue = ata_host_get_n_tags(ap->host);
+ unsigned int i, tag;
+
+ /* no command while frozen */
+@@ -5929,7 +5940,7 @@ void ata_host_init(struct ata_host *host
+ {
+ spin_lock_init(&host->lock);
+ mutex_init(&host->eh_mutex);
+- host->n_tags = ATA_MAX_QUEUE - 1;
++ ata_host_set_n_tags(host, ATA_MAX_QUEUE - 1);
+ host->dev = dev;
+ host->flags = flags;
+ host->ops = ops;
+@@ -6010,7 +6021,7 @@ int ata_host_register(struct ata_host *h
+ {
+ int i, rc;
+
+- host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
++ ata_host_set_n_tags(host, clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1));
+
+ /* host must have been started */
+ if (!(host->flags & ATA_HOST_STARTED)) {
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -248,6 +248,8 @@ enum {
+ ATA_HOST_STARTED = (1 << 1), /* Host started */
+ ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
+ ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
++ ATA_HOST_N_TAGS_SHIFT = 4,
++ ATA_HOST_N_TAGS_MASK = (ATA_MAX_QUEUE - 1) << ATA_HOST_N_TAGS_SHIFT,
+
+ /* bits 24:31 of host->flags are reserved for LLD specific flags */
+
+@@ -541,7 +543,6 @@ struct ata_host {
+ struct device *dev;
+ void __iomem * const *iomap;
+ unsigned int n_ports;
+- unsigned int n_tags; /* nr of NCQ tags */
+ void *private_data;
+ struct ata_port_operations *ops;
+ unsigned long flags;
Copied: dists/squeeze-backports/linux/debian/patches/debian/nlattr-avoid-abi-change-in-3.2.61.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/nlattr-avoid-abi-change-in-3.2.61.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/nlattr-avoid-abi-change-in-3.2.61.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/nlattr-avoid-abi-change-in-3.2.61.patch)
@@ -0,0 +1,21 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Tue, 16 Sep 2014 14:19:33 +0100
+Subject: nlattr: Avoid ABI change in 3.2.61
+
+Commit bfc5184b69cf ('netlink: rate-limit leftover bytes warning and
+print process name') added new #includes, and these changed symbol
+hashes on some architectures. Hide them from genksyms.
+
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -12,8 +12,10 @@
+ #include <linux/netdevice.h>
+ #include <linux/skbuff.h>
+ #include <linux/string.h>
++#ifndef __GENKSYMS__
+ #include <linux/ratelimit.h>
+ #include <linux/sched.h>
++#endif
+ #include <linux/types.h>
+ #include <net/netlink.h>
+
Copied: dists/squeeze-backports/linux/debian/patches/debian/ptrace-avoid-abi-change-in-3.2.61.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/ptrace-avoid-abi-change-in-3.2.61.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/ptrace-avoid-abi-change-in-3.2.61.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/ptrace-avoid-abi-change-in-3.2.61.patch)
@@ -0,0 +1,20 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Mon, 07 Jul 2014 09:52:12 +0100
+Subject: ptrace: Avoid ABI change in 3.2.61
+Forwarded: not-needed
+
+Hide a new #include from genksyms, which changed the symbol hashes for
+most of networking.
+
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -112,7 +112,9 @@
+
+ #include <linux/compiler.h> /* For unlikely. */
+ #include <linux/sched.h> /* For struct task_struct. */
++#ifndef __GENKSYMS__
+ #include <linux/pid_namespace.h> /* For task_active_pid_ns. */
++#endif
+
+
+ extern long arch_ptrace(struct task_struct *child, long request,
Copied: dists/squeeze-backports/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.61.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.61.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.61.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.61.patch)
@@ -0,0 +1,43 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Sun, 01 Jun 2014 20:47:46 +0100
+Subject: SCSI: Avoid ABI change in 3.2.61
+
+Commit e63ed0d7a980 ("scsi: fix our current target reap infrastructure")
+removed one field (ew) and changed the type of another (reap_ref).
+
+Put back 'ew' and hide the type change to 'reap_ref', which remains
+the same size and is only used within the SCSI core.
+
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -445,6 +445,8 @@ static struct scsi_target *scsi_alloc_ta
+ }
+ dev = &starget->dev;
+ device_initialize(dev);
++ /* bwh: assert binary compatibility */
++ BUILD_BUG_ON(sizeof(starget->reap_ref) != sizeof(unsigned int));
+ kref_init(&starget->reap_ref);
+ dev->parent = get_device(parent);
+ dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -239,7 +239,11 @@ struct scsi_target {
+ struct list_head siblings;
+ struct list_head devices;
+ struct device dev;
++#ifdef __GENKSYMS__
++ unsigned int reap_ref;
++#else
+ struct kref reap_ref; /* last put renders target invisible */
++#endif
+ unsigned int channel;
+ unsigned int id; /* target id ... replace
+ * scsi_device.id eventually */
+@@ -261,6 +265,7 @@ struct scsi_target {
+ #define SCSI_DEFAULT_TARGET_BLOCKED 3
+
+ char scsi_level;
++ struct execute_work ew; /* bwh: unused, for binary compatibility */
+ enum scsi_target_state state;
+ void *hostdata; /* available to low-level driver */
+ unsigned long starget_data[0]; /* for the transport */
Copied: dists/squeeze-backports/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.62.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.62.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.62.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/scsi-avoid-abi-change-in-3.2.62.patch)
@@ -0,0 +1,22 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Tue, 16 Sep 2014 02:51:20 +0100
+Subject: SCSI: Avoid ABI change in 3.2.62
+
+Commit b14bf2d0c035 ('usb-storage/SCSI: Add broken_fua blacklist flag')
+adds a new 1-bit field to struct scsi_device. This is fine because it
+had 29 bits of padding to spare, but we need to hide the new field
+from genksyms.
+
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -151,7 +151,10 @@ struct scsi_device {
+ unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */
+ unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */
+ unsigned is_visible:1; /* is the device visible in sysfs */
++#ifndef __GENKSYMS__
+ unsigned broken_fua:1; /* Don't set FUA bit */
++ /* 28 bits of padding */
++#endif
+
+ DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
+ struct list_head event_list; /* asserted events */
Copied: dists/squeeze-backports/linux/debian/patches/debian/sp5100_tco-reject-sb8x0-chips.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/sp5100_tco-reject-sb8x0-chips.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/sp5100_tco-reject-sb8x0-chips.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/sp5100_tco-reject-sb8x0-chips.patch)
@@ -0,0 +1,27 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Wed, 24 Sep 2014 03:02:28 +0100
+Subject: sp5100_tco: Reject SB8x0 chips
+Bug-Debian: https://bugs.debian.org/726150
+Forwarded: not-needed
+
+The SMBus functions of SB8x0 chips have the same PCI ID as on the
+SP5100, but the TCO timer has a different register set! They can be
+distinguished by PCI revision.
+
+Upstream commit 740fbddf5c3f ('watchdog: sp5100_tco: Add SB8x0 chipset
+support') adds support for the newer chips, but we're playing safe
+here by rejecting them completely.
+
+--- a/drivers/watchdog/sp5100_tco.c
++++ b/drivers/watchdog/sp5100_tco.c
+@@ -287,6 +287,10 @@ static unsigned char __devinit sp5100_tc
+ if (!sp5100_tco_pci)
+ return 0;
+
++ /* Higher revisions are SB8x0 which have a different register set */
++ if (sp5100_tco_pci->revision >= 0x40)
++ return 0;
++
+ /* Request the IO ports used by this driver */
+ pm_iobase = SP5100_IO_PM_INDEX_REG;
+ if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, "SP5100 TCO")) {
Copied: dists/squeeze-backports/linux/debian/patches/debian/trace-syscall-avoid-abi-change-in-3.2.61.patch (from r22011, dists/wheezy-security/linux/debian/patches/debian/trace-syscall-avoid-abi-change-in-3.2.61.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/debian/trace-syscall-avoid-abi-change-in-3.2.61.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/debian/trace-syscall-avoid-abi-change-in-3.2.61.patch)
@@ -0,0 +1,19 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Fri, 11 Jul 2014 09:07:00 +0100
+Subject: trace/syscall: Avoid ABI change in 3.2.61
+Forwarded: not-needed
+
+Hide the added #include from genksyms.
+
+--- a/include/trace/syscall.h
++++ b/include/trace/syscall.h
+@@ -4,7 +4,9 @@
+ #include <linux/tracepoint.h>
+ #include <linux/unistd.h>
+ #include <linux/ftrace_event.h>
++#ifndef __GENKSYMS__
+ #include <linux/thread_info.h>
++#endif
+
+ #include <asm/ptrace.h>
+
Modified: dists/squeeze-backports/linux/debian/patches/features/all/drm/drm-3.4.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/drm/drm-3.4.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/all/drm/drm-3.4.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -2156,7 +2156,7 @@
+}
+EXPORT_SYMBOL(drm_format_num_planes);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
-index acfe567..e9f1ef5 100644
+index 0731d43..e9f1ef5 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,14 +61,14 @@ static int drm_version(struct drm_device *dev, void *data,
@@ -2226,6 +2226,17 @@
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
+@@ -456,9 +468,8 @@ long drm_ioctl(struct file *filp,
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+- } else if (cmd & IOC_OUT) {
++ } else
+ memset(kdata, 0, usize);
+- }
+
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 72f460e..d75dccb 100644
--- a/drivers/gpu/drm/drm_edid.c
@@ -45455,7 +45466,7 @@
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-index b1bb734..e97ed61 100644
+index a0b69ae..e97ed61 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -203,9 +203,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
@@ -45470,62 +45481,18 @@
}
struct eb_objects {
-@@ -287,14 +287,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (unlikely(target_offset == 0)) {
-- DRM_ERROR("No GTT space found for object %d\n",
-+ DRM_DEBUG("No GTT space found for object %d\n",
- reloc->target_handle);
- return ret;
- }
-
- /* Validate that the target is in a valid r/w GPU domain */
- if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
-- DRM_ERROR("reloc with multiple write domains: "
-+ DRM_DEBUG("reloc with multiple write domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
@@ -303,8 +303,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->write_domain);
return ret;
}
- if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
-- DRM_ERROR("reloc with read/write CPU domains: "
+- DRM_DEBUG("reloc with read/write CPU domains: "
+ if (unlikely((reloc->write_domain | reloc->read_domains)
+ & ~I915_GEM_GPU_DOMAINS)) {
+ DRM_DEBUG("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
-@@ -315,7 +316,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
- }
- if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain)) {
-- DRM_ERROR("Write domain conflict: "
-+ DRM_DEBUG("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
-@@ -336,7 +337,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-
- /* Check that the relocation address is valid... */
- if (unlikely(reloc->offset > obj->base.size - 4)) {
-- DRM_ERROR("Relocation beyond object bounds: "
-+ DRM_DEBUG("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
-@@ -344,7 +345,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
- return ret;
- }
- if (unlikely(reloc->offset & 3)) {
-- DRM_ERROR("Relocation not 4-byte aligned: "
-+ DRM_DEBUG("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
@@ -461,11 +462,60 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret;
}
@@ -45703,13 +45670,13 @@
- i915_gem_object_unpin(obj);
+ list_for_each_entry_continue_reverse(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry;
-+
-+ if (!obj->gtt_space)
-+ continue;
- obj = list_entry(obj->exec_list.prev,
- struct drm_i915_gem_object,
- exec_list);
++ if (!obj->gtt_space)
++ continue;
++
+ entry = obj->exec_entry;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(obj);
@@ -45720,15 +45687,19 @@
}
return ret;
-@@ -704,7 +754,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
- obj = to_intel_bo(drm_gem_object_lookup(dev, file,
- exec[i].handle));
- if (&obj->base == NULL) {
-- DRM_ERROR("Invalid object handle %d at index %d\n",
-+ DRM_DEBUG("Invalid object handle %d at index %d\n",
- exec[i].handle, i);
- ret = -ENOENT;
- goto err;
+@@ -679,9 +729,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ * relocations were valid.
+ */
+ for (j = 0; j < exec[i].relocation_count; j++) {
+- if (__copy_to_user(&user_relocs[j].presumed_offset,
+- &invalid_offset,
+- sizeof(invalid_offset))) {
++ if (copy_to_user(&user_relocs[j].presumed_offset,
++ &invalid_offset,
++ sizeof(invalid_offset))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
@@ -998,6 +1048,31 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
}
@@ -45761,88 +45732,6 @@
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
-@@ -1015,7 +1090,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- int ret, mode, i;
-
- if (!i915_gem_check_execbuffer(args)) {
-- DRM_ERROR("execbuf with invalid offset/length\n");
-+ DRM_DEBUG("execbuf with invalid offset/length\n");
- return -EINVAL;
- }
-
-@@ -1030,20 +1105,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- break;
- case I915_EXEC_BSD:
- if (!HAS_BSD(dev)) {
-- DRM_ERROR("execbuf with invalid ring (BSD)\n");
-+ DRM_DEBUG("execbuf with invalid ring (BSD)\n");
- return -EINVAL;
- }
- ring = &dev_priv->ring[VCS];
- break;
- case I915_EXEC_BLT:
- if (!HAS_BLT(dev)) {
-- DRM_ERROR("execbuf with invalid ring (BLT)\n");
-+ DRM_DEBUG("execbuf with invalid ring (BLT)\n");
- return -EINVAL;
- }
- ring = &dev_priv->ring[BCS];
- break;
- default:
-- DRM_ERROR("execbuf with unknown ring: %d\n",
-+ DRM_DEBUG("execbuf with unknown ring: %d\n",
- (int)(args->flags & I915_EXEC_RING_MASK));
- return -EINVAL;
- }
-@@ -1069,18 +1144,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- }
- break;
- default:
-- DRM_ERROR("execbuf with unknown constants: %d\n", mode);
-+ DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
- return -EINVAL;
- }
-
- if (args->buffer_count < 1) {
-- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
-+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- if (args->num_cliprects != 0) {
- if (ring != &dev_priv->ring[RCS]) {
-- DRM_ERROR("clip rectangles are only valid with the render ring\n");
-+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
- return -EINVAL;
- }
-
-@@ -1130,7 +1205,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- obj = to_intel_bo(drm_gem_object_lookup(dev, file,
- exec[i].handle));
- if (&obj->base == NULL) {
-- DRM_ERROR("Invalid object handle %d at index %d\n",
-+ DRM_DEBUG("Invalid object handle %d at index %d\n",
- exec[i].handle, i);
- /* prevent error path from reading uninitialized data */
- ret = -ENOENT;
-@@ -1138,7 +1213,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- }
-
- if (!list_empty(&obj->exec_list)) {
-- DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
-+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
- obj, exec[i].handle, i);
- ret = -EINVAL;
- goto err;
-@@ -1176,7 +1251,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
-
- /* Set the pending read domains for the batch buffer to COMMAND */
- if (batch_obj->base.pending_write_domain) {
-- DRM_ERROR("Attempting to use self-modifying batch buffer\n");
-+ DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
- ret = -EINVAL;
- goto err;
- }
@@ -1193,7 +1268,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
@@ -45865,78 +45754,71 @@
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
-@@ -1275,7 +1356,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
- int ret, i;
-
- if (args->buffer_count < 1) {
-- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
-+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
+@@ -1325,21 +1406,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
-@@ -1283,7 +1364,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec_list == NULL || exec2_list == NULL) {
-- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
-+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
-@@ -1294,7 +1375,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
-- DRM_ERROR("copy %d exec entries failed %d\n",
-+ DRM_DEBUG("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
-@@ -1335,7 +1416,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
-- DRM_ERROR("failed to copy %d exec entries "
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ if (!ret) {
+- struct drm_i915_gem_exec_object __user *user_exec_list =
+- (void __user *)(uintptr_t)args->buffers_ptr;
+-
+ /* Copy the new buffer offsets back to the user's exec list. */
+- for (i = 0; i < args->buffer_count; i++) {
+- ret = __copy_to_user(&user_exec_list[i].offset,
+- &exec2_list[i].offset,
+- sizeof(user_exec_list[i].offset));
+- if (ret) {
+- ret = -EFAULT;
+- DRM_DEBUG("failed to copy %d exec entries "
+- "back to user (%d)\n",
+- args->buffer_count, ret);
+- break;
+- }
++ for (i = 0; i < args->buffer_count; i++)
++ exec_list[i].offset = exec2_list[i].offset;
++ /* ... and back out to userspace */
++ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
++ (uintptr_t) args->buffers_ptr,
++ exec_list,
++ sizeof(*exec_list) * args->buffer_count);
++ if (ret) {
++ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
++ "back to user (%d)\n",
++ args->buffer_count, ret);
}
-@@ -1356,7 +1437,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
-
- if (args->buffer_count < 1 ||
- args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
-- DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
-+ DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
- return -EINVAL;
}
-@@ -1366,7 +1447,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
- exec2_list = drm_malloc_ab(sizeof(*exec2_list),
- args->buffer_count);
- if (exec2_list == NULL) {
-- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
-+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- return -ENOMEM;
- }
-@@ -1375,7 +1456,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret != 0) {
-- DRM_ERROR("copy %d exec entries failed %d\n",
-+ DRM_DEBUG("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec2_list);
- return -EFAULT;
-@@ -1390,7 +1471,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
-- DRM_ERROR("failed to copy %d exec entries "
+@@ -1386,21 +1465,15 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+- struct drm_i915_gem_exec_object2 __user *user_exec_list =
+- (void __user *)(uintptr_t)args->buffers_ptr;
+- int i;
+-
+- for (i = 0; i < args->buffer_count; i++) {
+- ret = __copy_to_user(&user_exec_list[i].offset,
+- &exec2_list[i].offset,
+- sizeof(user_exec_list[i].offset));
+- if (ret) {
+- ret = -EFAULT;
+- DRM_DEBUG("failed to copy %d exec entries "
+- "back to user\n",
+- args->buffer_count);
+- break;
+- }
++ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
++ (uintptr_t) args->buffers_ptr,
++ exec2_list,
++ sizeof(*exec2_list) * args->buffer_count);
++ if (ret) {
++ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
++ "back to user (%d)\n",
++ args->buffer_count, ret);
}
+ }
+
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6042c5e..99a7855 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -67890,7 +67772,7 @@
shl b32 $r14 8
sub b32 $r15 $r14 $r15
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
-index 636fe98..91d44ea 100644
+index 636fe98..91d44ea6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -87,6 +87,7 @@ nvc0_graph_class(struct drm_device *dev)
@@ -71874,7 +71756,7 @@
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
-index cd98c06..dc612ef 100644
+index 927d170..d51c08d 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -231,6 +231,22 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
@@ -72073,7 +71955,7 @@
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
-@@ -935,7 +968,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+@@ -939,7 +972,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
@@ -72084,7 +71966,7 @@
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
-@@ -956,13 +991,10 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+@@ -960,13 +995,10 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
ATOM_DP_SS_ID1);
@@ -72099,7 +71981,7 @@
}
break;
case ATOM_ENCODER_MODE_LVDS:
-@@ -1007,7 +1039,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+@@ -1011,7 +1043,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
@@ -72108,7 +71990,7 @@
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
-@@ -1030,7 +1062,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+@@ -1034,7 +1066,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
ss.step = step_size;
}
@@ -72117,7 +71999,7 @@
}
}
-@@ -1047,6 +1079,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+@@ -1051,6 +1083,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
@@ -72125,7 +72007,7 @@
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
-@@ -1137,20 +1170,13 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+@@ -1141,20 +1174,13 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
break;
}
@@ -72152,7 +72034,7 @@
} else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
-@@ -1195,7 +1221,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+@@ -1199,7 +1225,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
@@ -72161,7 +72043,7 @@
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
-@@ -1364,7 +1390,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+@@ -1368,7 +1394,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
@@ -72170,7 +72052,7 @@
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
-@@ -1466,7 +1492,36 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+@@ -1470,7 +1496,36 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
struct drm_crtc *test_crtc;
uint32_t pll_in_use = 0;
@@ -72208,7 +72090,7 @@
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
-@@ -1481,6 +1536,8 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+@@ -1485,6 +1540,8 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
if (rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
@@ -72217,7 +72099,7 @@
else if (ASIC_IS_DCE5(rdev))
return ATOM_DCPLL;
}
-@@ -1507,6 +1564,26 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+@@ -1511,6 +1568,26 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
}
@@ -72244,7 +72126,7 @@
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
-@@ -1528,19 +1605,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
+@@ -1532,19 +1609,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
}
}
@@ -72264,7 +72146,7 @@
atombios_crtc_set_pll(crtc, adjusted_mode);
if (ASIC_IS_DCE4(rdev))
-@@ -1574,18 +1638,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+@@ -1578,18 +1642,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -72293,7 +72175,7 @@
}
static void atombios_crtc_disable(struct drm_crtc *crtc)
-@@ -1597,6 +1671,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+@@ -1601,6 +1675,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -72302,7 +72184,7 @@
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
-@@ -1617,6 +1693,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+@@ -1621,6 +1697,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
@@ -72316,7 +72198,7 @@
break;
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
-index 3254d51e..8d1724c 100644
+index e8a3c31..8d1724c 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -45,6 +45,41 @@ static char *pre_emph_names[] = {
@@ -72378,6 +72260,15 @@
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
+@@ -89,7 +124,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
+ /* flags not zero */
+ if (args.v1.ucReplyStatus == 2) {
+ DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
+- return -EIO;
++ return -EBUSY;
+ }
+
+ /* error */
@@ -103,7 +138,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
recv_bytes = recv_size;
@@ -72412,7 +72303,7 @@
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
-index 475a275..072229d 100644
+index 286f1fa..dd5c14e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -57,22 +57,6 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
@@ -72834,7 +72725,7 @@
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
atombios_external_encoder_setup(encoder, ext_encoder,
-@@ -1738,13 +1778,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+@@ -1741,13 +1781,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *test_encoder;
@@ -72874,7 +72765,7 @@
/* ontario follows DCE4 */
if (rdev->family == CHIP_PALM) {
if (dig->linkb)
-@@ -1832,7 +1893,7 @@ radeon_atom_encoder_init(struct radeon_device *rdev)
+@@ -1835,7 +1896,7 @@ radeon_atom_encoder_init(struct radeon_device *rdev)
break;
}
@@ -73043,7 +72934,7 @@
#include <linux/kernel.h>
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
-index 5efba47..df62c39 100644
+index 9b3f787..df62c39 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -50,6 +50,39 @@ static const u32 crtc_offsets[6] =
@@ -73581,7 +73472,15 @@
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
-@@ -3014,11 +3166,24 @@ restart_ih:
+@@ -2765,7 +2917,6 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+- wptr &= ~RB_OVERFLOW;
+ }
+ return (wptr & rdev->ih.ptr_mask);
+ }
+@@ -3015,11 +3166,24 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
@@ -73608,7 +73507,7 @@
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
-@@ -3048,6 +3213,7 @@ restart_ih:
+@@ -3049,6 +3213,7 @@ restart_ih:
static int evergreen_startup(struct radeon_device *rdev)
{
@@ -73616,7 +73515,7 @@
int r;
/* enable pcie gen2 link */
-@@ -3094,7 +3260,7 @@ static int evergreen_startup(struct radeon_device *rdev)
+@@ -3095,7 +3260,7 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
@@ -73625,7 +73524,7 @@
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
-@@ -3103,6 +3269,12 @@ static int evergreen_startup(struct radeon_device *rdev)
+@@ -3104,6 +3269,12 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
@@ -73638,7 +73537,7 @@
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
-@@ -3118,7 +3290,9 @@ static int evergreen_startup(struct radeon_device *rdev)
+@@ -3119,7 +3290,9 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
@@ -73649,7 +73548,7 @@
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
-@@ -3128,6 +3302,23 @@ static int evergreen_startup(struct radeon_device *rdev)
+@@ -3129,6 +3302,23 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
@@ -73673,7 +73572,7 @@
return 0;
}
-@@ -3147,15 +3338,11 @@ int evergreen_resume(struct radeon_device *rdev)
+@@ -3148,15 +3338,11 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
@@ -73691,7 +73590,7 @@
return r;
}
-@@ -3165,13 +3352,17 @@ int evergreen_resume(struct radeon_device *rdev)
+@@ -3166,13 +3352,17 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
@@ -73711,7 +73610,7 @@
return 0;
}
-@@ -3242,8 +3433,8 @@ int evergreen_init(struct radeon_device *rdev)
+@@ -3243,8 +3433,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
@@ -73722,7 +73621,7 @@
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
-@@ -3252,29 +3443,24 @@ int evergreen_init(struct radeon_device *rdev)
+@@ -3253,29 +3443,24 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
@@ -73759,7 +73658,7 @@
/* Don't start up if the MC ucode is missing on BTC parts.
* The default clocks and voltages before the MC ucode
-@@ -3292,15 +3478,17 @@ int evergreen_init(struct radeon_device *rdev)
+@@ -3293,15 +3478,17 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
@@ -79258,7 +79157,7 @@
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
-index f7e3cc0..1555cd6 100644
+index ab46a99..1555cd6 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -49,6 +49,7 @@
@@ -79900,7 +79799,15 @@
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
-@@ -3465,11 +3493,11 @@ restart_ih:
+@@ -3280,7 +3308,6 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+- wptr &= ~RB_OVERFLOW;
+ }
+ return (wptr & rdev->ih.ptr_mask);
+ }
+@@ -3466,11 +3493,11 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
@@ -79914,7 +79821,7 @@
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
-@@ -3502,30 +3530,6 @@ restart_ih:
+@@ -3503,30 +3530,6 @@ restart_ih:
*/
#if defined(CONFIG_DEBUG_FS)
@@ -79945,7 +79852,7 @@
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
-@@ -3539,7 +3543,6 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
+@@ -3540,7 +3543,6 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
static struct drm_info_list r600_mc_info_list[] = {
{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
@@ -85425,7 +85332,7 @@
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
-index e4e455e..6d0c32b 100644
+index 6d9c32b..6d0c32b 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -56,6 +56,10 @@ extern void
@@ -85450,6 +85357,24 @@
reg = EVERGREEN_DC_GPIO_HPD_A;
else
reg = AVIVO_DC_GPIO_HPD_A;
+@@ -1873,7 +1879,7 @@ static const char *thermal_controller_names[] = {
+ "adm1032",
+ "adm1030",
+ "max6649",
+- "lm63", /* lm64 */
++ "lm64",
+ "f75375",
+ "asc7xxx",
+ };
+@@ -1884,7 +1890,7 @@ static const char *pp_lib_thermal_controller_names[] = {
+ "adm1032",
+ "adm1030",
+ "max6649",
+- "lm63", /* lm64 */
++ "lm64",
+ "f75375",
+ "RV6xx",
+ "RV770",
@@ -1895,6 +1901,8 @@ static const char *pp_lib_thermal_controller_names[] = {
"emc2103",
"Sumo",
@@ -86004,7 +85929,7 @@
break;
default:
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
-index b101843..1334dbd 100644
+index 683cede..9184bbe 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -846,6 +846,27 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
@@ -86827,7 +86752,7 @@
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
-index 3291ab8..60404f4 100644
+index ad5d774..adc9bfd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -303,8 +303,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
@@ -86887,7 +86812,7 @@
};
static const char *connector_names[15] = {
-@@ -1084,29 +1096,36 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
+@@ -1089,29 +1101,36 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
.create_handle = radeon_user_framebuffer_create_handle,
};
@@ -86930,7 +86855,7 @@
return ERR_PTR(-ENOENT);
}
-@@ -1116,7 +1135,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
+@@ -1121,7 +1140,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
@@ -86944,7 +86869,7 @@
return &radeon_fb->base;
}
-@@ -1132,11 +1156,6 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = {
+@@ -1137,11 +1161,6 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = {
.output_poll_changed = radeon_output_poll_changed
};
@@ -86956,7 +86881,7 @@
static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
{ { 0, "driver" },
{ 1, "bios" },
-@@ -1161,86 +1180,53 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
+@@ -1166,86 +1185,53 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
@@ -87059,7 +86984,7 @@
return 0;
}
-@@ -1286,6 +1272,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
+@@ -1291,6 +1277,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.max_height = 4096;
}
@@ -87069,7 +86994,7 @@
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
ret = radeon_modeset_create_props(rdev);
-@@ -1313,9 +1302,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
+@@ -1318,9 +1307,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
return ret;
}
@@ -91633,7 +91558,7 @@
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
-index cea482a..5248001 100644
+index dc00155..739eb0d 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,25 @@
@@ -91698,7 +91623,23 @@
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
-@@ -549,7 +559,7 @@ int rs600_irq_set(struct radeon_device *rdev)
+@@ -529,11 +539,10 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+ return -EINVAL;
+ }
+ addr = addr & 0xFFFFFFFFFFFFF000ULL;
+- if (addr == rdev->dummy_page.addr)
+- addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+- else
+- addr |= (R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED |
+- R600_PTE_READABLE | R600_PTE_WRITEABLE);
++ if (addr != rdev->dummy_page.addr)
++ addr |= R600_PTE_VALID | R600_PTE_READABLE |
++ R600_PTE_WRITEABLE;
++ addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ writeq(addr, ptr + (i * 8));
+ return 0;
+ }
+@@ -552,7 +561,7 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
@@ -91707,7 +91648,7 @@
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.gui_idle) {
-@@ -642,7 +652,7 @@ int rs600_irq_process(struct radeon_device *rdev)
+@@ -645,7 +654,7 @@ int rs600_irq_process(struct radeon_device *rdev)
while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
@@ -91716,7 +91657,7 @@
}
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
-@@ -847,6 +857,12 @@ static int rs600_startup(struct radeon_device *rdev)
+@@ -850,6 +859,12 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
@@ -91729,7 +91670,7 @@
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
-@@ -862,15 +878,21 @@ static int rs600_startup(struct radeon_device *rdev)
+@@ -865,15 +880,21 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
@@ -91755,7 +91696,7 @@
return r;
}
-@@ -879,6 +901,8 @@ static int rs600_startup(struct radeon_device *rdev)
+@@ -882,6 +903,8 @@ static int rs600_startup(struct radeon_device *rdev)
int rs600_resume(struct radeon_device *rdev)
{
@@ -91764,7 +91705,7 @@
/* Make sur GART are not working */
rs600_gart_disable(rdev);
/* Resume clock before doing reset */
-@@ -895,11 +919,18 @@ int rs600_resume(struct radeon_device *rdev)
+@@ -898,11 +921,18 @@ int rs600_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
@@ -91784,7 +91725,7 @@
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
-@@ -977,7 +1008,14 @@ int rs600_init(struct radeon_device *rdev)
+@@ -980,7 +1010,14 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
@@ -99055,7 +98996,7 @@
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
-index 9e4313e..578207e 100644
+index 508c64c..578207e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -30,6 +30,9 @@
@@ -99110,7 +99051,38 @@
return 0;
}
-@@ -438,16 +434,12 @@ static int ttm_set_pages_caching(struct page **pages,
+@@ -398,18 +394,13 @@ static int ttm_pool_get_num_unused_pages(void)
+ static int ttm_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+ {
+- static DEFINE_MUTEX(lock);
+- static unsigned start_pool;
++ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned i;
+- unsigned pool_offset;
++ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ struct ttm_page_pool *pool;
+ int shrink_pages = sc->nr_to_scan;
+
+- if (shrink_pages == 0)
+- goto out;
+- if (!mutex_trylock(&lock))
+- return -1;
+- pool_offset = ++start_pool % NUM_POOLS;
++ pool_offset = pool_offset % NUM_POOLS;
+ /* select start pool in round robin fashion */
+ for (i = 0; i < NUM_POOLS; ++i) {
+ unsigned nr_free = shrink_pages;
+@@ -418,8 +409,6 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
+ pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+ shrink_pages = ttm_page_pool_free(pool, nr_free);
+ }
+- mutex_unlock(&lock);
+-out:
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_get_num_unused_pages();
+ }
+@@ -445,16 +434,12 @@ static int ttm_set_pages_caching(struct page **pages,
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
@@ -99129,7 +99101,7 @@
break;
default:
break;
-@@ -492,8 +484,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
+@@ -499,8 +484,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
@@ -99139,7 +99111,7 @@
return -ENOMEM;
}
-@@ -501,7 +492,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
+@@ -508,7 +492,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
p = alloc_page(gfp_flags);
if (!p) {
@@ -99148,7 +99120,7 @@
/* store already allocated pages in the pool after
* setting the caching state */
-@@ -599,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+@@ -606,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
@@ -99158,7 +99130,7 @@
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
-@@ -619,8 +609,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+@@ -626,8 +609,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
* @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
@@ -99171,7 +99143,7 @@
{
unsigned long irq_flags;
struct list_head *p;
-@@ -660,17 +652,63 @@ out:
+@@ -667,17 +652,63 @@ out:
return count;
}
@@ -99238,7 +99210,7 @@
int r;
/* set zero flag for page allocation if required */
-@@ -684,30 +722,33 @@ int ttm_get_pages(struct list_head *pages, int flags,
+@@ -691,30 +722,33 @@ int ttm_get_pages(struct list_head *pages, int flags,
else
gfp_flags |= GFP_HIGHUSER;
@@ -99279,7 +99251,7 @@
if (PageHighMem(p))
clear_highpage(p);
else
-@@ -716,67 +757,27 @@ int ttm_get_pages(struct list_head *pages, int flags,
+@@ -723,67 +757,27 @@ int ttm_get_pages(struct list_head *pages, int flags,
}
/* If pool didn't have enough pages allocate new one. */
@@ -99355,7 +99327,7 @@
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
-@@ -794,7 +795,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+@@ -801,7 +795,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
WARN_ON(_manager);
@@ -99364,7 +99336,7 @@
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
-@@ -829,7 +830,7 @@ void ttm_page_alloc_fini(void)
+@@ -836,7 +830,7 @@ void ttm_page_alloc_fini(void)
{
int i;
@@ -99373,7 +99345,7 @@
ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i)
-@@ -839,6 +840,62 @@ void ttm_page_alloc_fini(void)
+@@ -846,6 +840,62 @@ void ttm_page_alloc_fini(void)
_manager = NULL;
}
@@ -104327,10 +104299,10 @@
if (unlikely(ret != 0))
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
-index 907c26f..6651cb3 100644
+index 7f16ff2..7fc3dc7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
-@@ -417,10 +417,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
+@@ -416,10 +416,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
@@ -104341,7 +104313,7 @@
fb_bpp = 32;
fb_depth = 24;
-@@ -428,8 +424,8 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
+@@ -427,8 +423,8 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
@@ -104352,7 +104324,7 @@
fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
-@@ -518,19 +514,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
+@@ -517,19 +513,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->var.xres = initial_width;
info->var.yres = initial_height;
Modified: dists/squeeze-backports/linux/debian/patches/features/all/igb/0008-igb-add-basic-runtime-PM-support.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/igb/0008-igb-add-basic-runtime-PM-support.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/all/igb/0008-igb-add-basic-runtime-PM-support.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -18,13 +18,13 @@
Tested-by: Aaron Brown <aaron.f.brown at intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher at intel.com>
Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Adjust to apply after backported commit 76252723e886
+ 'igb: do a reset on SR-IOV re-init if device is down']
---
drivers/net/ethernet/intel/igb/igb_ethtool.c | 16 ++++
drivers/net/ethernet/intel/igb/igb_main.c | 136 +++++++++++++++++++++++----
2 files changed, 133 insertions(+), 19 deletions(-)
-diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
-index 40110c5..75a560c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -36,6 +36,7 @@
@@ -35,7 +35,7 @@
#include "igb.h"
-@@ -2168,6 +2169,19 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+@@ -2168,6 +2169,19 @@ static void igb_get_strings(struct net_d
}
}
@@ -55,7 +55,7 @@
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
-@@ -2194,6 +2208,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
+@@ -2194,6 +2208,8 @@ static const struct ethtool_ops igb_etht
.get_ethtool_stats = igb_get_ethtool_stats,
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
@@ -64,8 +64,6 @@
};
void igb_set_ethtool_ops(struct net_device *netdev)
-diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
-index 2706d41..4527f7f 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -53,6 +53,7 @@
@@ -76,7 +74,7 @@
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
-@@ -172,8 +173,18 @@ static int igb_check_vf_assignment(struct igb_adapter *adapter);
+@@ -172,8 +173,18 @@ static int igb_check_vf_assignment(struc
#endif
#ifdef CONFIG_PM
@@ -108,7 +106,7 @@
#endif
.shutdown = igb_shutdown,
.err_handler = &igb_err_handler
-@@ -2121,6 +2130,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
+@@ -2121,6 +2130,8 @@ static int __devinit igb_probe(struct pc
default:
break;
}
@@ -117,7 +115,7 @@
return 0;
err_register:
-@@ -2160,6 +2171,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
+@@ -2160,6 +2171,8 @@ static void __devexit igb_remove(struct
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -126,7 +124,7 @@
/*
* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
-@@ -2482,16 +2495,22 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
+@@ -2482,16 +2495,22 @@ static int __devinit igb_sw_init(struct
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
@@ -151,7 +149,7 @@
netif_carrier_off(netdev);
-@@ -2537,6 +2556,9 @@ static int igb_open(struct net_device *netdev)
+@@ -2537,6 +2556,9 @@ static int igb_open(struct net_device *n
netif_tx_start_all_queues(netdev);
@@ -214,7 +212,7 @@
/**
* igb_setup_tx_resources - allocate Tx resources (Descriptors)
* @tx_ring: tx descriptor ring (for a specific queue) to setup
-@@ -3642,6 +3682,9 @@ static void igb_watchdog_task(struct work_struct *work)
+@@ -3642,6 +3682,9 @@ static void igb_watchdog_task(struct wor
link = igb_has_link(adapter);
if (link) {
@@ -224,7 +222,7 @@
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
hw->mac.ops.get_speed_and_duplex(hw,
-@@ -3713,6 +3756,9 @@ static void igb_watchdog_task(struct work_struct *work)
+@@ -3713,6 +3756,9 @@ static void igb_watchdog_task(struct wor
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
@@ -251,16 +249,16 @@
#ifdef CONFIG_PM
int retval = 0;
#endif
-@@ -6612,7 +6659,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
+@@ -6612,7 +6659,7 @@ static int __igb_shutdown(struct pci_dev
netif_device_detach(netdev);
if (netif_running(netdev))
- igb_close(netdev);
+ __igb_close(netdev, true);
+ else
+ igb_reset(adapter);
- igb_clear_interrupt_scheme(adapter);
-
-@@ -6671,12 +6718,13 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
+@@ -6673,12 +6720,13 @@ static int __igb_shutdown(struct pci_dev
}
#ifdef CONFIG_PM
@@ -276,7 +274,7 @@
if (retval)
return retval;
-@@ -6690,8 +6738,9 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+@@ -6692,8 +6740,9 @@ static int igb_suspend(struct pci_dev *p
return 0;
}
@@ -287,7 +285,7 @@
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
-@@ -6712,7 +6761,18 @@ static int igb_resume(struct pci_dev *pdev)
+@@ -6714,7 +6763,18 @@ static int igb_resume(struct pci_dev *pd
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -307,7 +305,7 @@
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
-@@ -6725,23 +6785,61 @@ static int igb_resume(struct pci_dev *pdev)
+@@ -6727,23 +6787,61 @@ static int igb_resume(struct pci_dev *pd
wr32(E1000_WUS, ~0);
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0180-mutex-no-spin-on-rt.patch.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -4,17 +4,17 @@
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6f372b425516d80347ddb16c9d0c99de748cd4bc
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+[bwh: Adjust to apply after backported commit 4badad352a6b
+ 'locking/mutex: Disable optimistic spinning on some architectures']
---
kernel/Kconfig.locks | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
-index 5068e2a..7bd0598 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
-@@ -199,4 +199,4 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
- def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+@@ -203,4 +203,4 @@ config ARCH_SUPPORTS_ATOMIC_RMW
config MUTEX_SPIN_ON_OWNER
-- def_bool SMP && !DEBUG_MUTEXES
-+ def_bool SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL
+ def_bool y
+- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
Modified: dists/squeeze-backports/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/0278-random-Make-it-work-on-rt.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -11,6 +11,7 @@
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
Cc: stable-rt at vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+[bwh: Adjust context after 3.2.61]
---
drivers/char/random.c | 10 ++++++----
include/linux/irqdesc.h | 1 +
@@ -19,8 +20,6 @@
kernel/irq/manage.c | 6 ++++++
5 files changed, 19 insertions(+), 7 deletions(-)
-diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 94f53fe..fab8f25 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -767,18 +767,16 @@ EXPORT_SYMBOL_GPL(add_input_randomness);
@@ -44,7 +43,7 @@
input[2] = ip;
input[3] = ip >> 32;
}
-@@ -792,7 +790,11 @@ void add_interrupt_randomness(int irq, int irq_flags)
+@@ -792,7 +790,11 @@ void add_interrupt_randomness(int irq, i
fast_pool->last = now;
r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
@@ -56,11 +55,9 @@
/*
* If we don't have a valid cycle counter, and we see
* back-to-back timer interrupts, then skip giving credit for
-diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
-index f1e2527..5f4f091 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
-@@ -53,6 +53,7 @@ struct irq_desc {
+@@ -55,6 +55,7 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
@@ -68,11 +65,9 @@
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
-diff --git a/include/linux/random.h b/include/linux/random.h
-index f5e1311..1622993 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
-@@ -53,7 +53,7 @@ extern void rand_initialize_irq(int irq);
+@@ -53,7 +53,7 @@ extern void rand_initialize_irq(int irq)
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
@@ -81,11 +76,9 @@
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
-diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
-index a768885..f6b91bc 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
-@@ -116,6 +116,8 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
+@@ -116,6 +116,8 @@ static void irq_wake_thread(struct irq_d
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
@@ -94,7 +87,7 @@
irqreturn_t retval = IRQ_NONE;
unsigned int flags = 0, irq = desc->irq_data.irq;
-@@ -157,8 +159,9 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+@@ -157,8 +159,9 @@ handle_irq_event_percpu(struct irq_desc
} while (action);
#ifndef CONFIG_PREEMPT_RT_FULL
@@ -106,14 +99,12 @@
#endif
if (!noirqdebug)
-diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
-index 1ec20f5..8178df7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -825,6 +825,12 @@ static int irq_thread(void *data)
action_ret = handler_fn(desc, action);
- if (!noirqdebug)
- note_interrupt(action->irq, desc, action_ret);
+ if (action_ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ migrate_disable();
+ add_interrupt_randomness(action->irq, 0,
Copied: dists/squeeze-backports/linux/debian/patches/features/all/rt/revert-rtmutex-changes-in-3.2.61.patch (from r22011, dists/wheezy-security/linux/debian/patches/features/all/rt/revert-rtmutex-changes-in-3.2.61.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze-backports/linux/debian/patches/features/all/rt/revert-rtmutex-changes-in-3.2.61.patch Sun Nov 2 01:27:57 2014 (r22019, copy of r22011, dists/wheezy-security/linux/debian/patches/features/all/rt/revert-rtmutex-changes-in-3.2.61.patch)
@@ -0,0 +1,473 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Mon, 07 Jul 2014 01:18:47 +0100
+Subject: Revert rtmutex changes in 3.2.61
+
+These conflict with the rt changes and need to be resolved by an
+expert.
+
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index 1928f3d..f9d8482 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -81,47 +81,6 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+ owner = *p;
+ } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+ }
+-
+-/*
+- * Safe fastpath aware unlock:
+- * 1) Clear the waiters bit
+- * 2) Drop lock->wait_lock
+- * 3) Try to unlock the lock with cmpxchg
+- */
+-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+- __releases(lock->wait_lock)
+-{
+- struct task_struct *owner = rt_mutex_owner(lock);
+-
+- clear_rt_mutex_waiters(lock);
+- raw_spin_unlock(&lock->wait_lock);
+- /*
+- * If a new waiter comes in between the unlock and the cmpxchg
+- * we have two situations:
+- *
+- * unlock(wait_lock);
+- * lock(wait_lock);
+- * cmpxchg(p, owner, 0) == owner
+- * mark_rt_mutex_waiters(lock);
+- * acquire(lock);
+- * or:
+- *
+- * unlock(wait_lock);
+- * lock(wait_lock);
+- * mark_rt_mutex_waiters(lock);
+- *
+- * cmpxchg(p, owner, 0) != owner
+- * enqueue_waiter();
+- * unlock(wait_lock);
+- * lock(wait_lock);
+- * wake waiter();
+- * unlock(wait_lock);
+- * lock(wait_lock);
+- * acquire(lock);
+- */
+- return rt_mutex_cmpxchg(lock, owner, NULL);
+-}
+-
+ #else
+ # define rt_mutex_cmpxchg(l,c,n) (0)
+ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+@@ -129,17 +88,6 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+ lock->owner = (struct task_struct *)
+ ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
+ }
+-
+-/*
+- * Simple slow path only version: lock->owner is protected by lock->wait_lock.
+- */
+-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+- __releases(lock->wait_lock)
+-{
+- lock->owner = NULL;
+- raw_spin_unlock(&lock->wait_lock);
+- return true;
+-}
+ #endif
+
+ /*
+@@ -193,36 +141,14 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
+ */
+ int max_lock_depth = 1024;
+
+-static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+-{
+- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
+-}
+-
+ /*
+ * Adjust the priority chain. Also used for deadlock detection.
+ * Decreases task's usage by one - may thus free the task.
+- *
+- * @task: the task owning the mutex (owner) for which a chain walk is
+- * probably needed
+- * @deadlock_detect: do we have to carry out deadlock detection?
+- * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
+- * things for a task that has just got its priority adjusted, and
+- * is waiting on a mutex)
+- * @next_lock: the mutex on which the owner of @orig_lock was blocked before
+- * we dropped its pi_lock. Is never dereferenced, only used for
+- * comparison to detect lock chain changes.
+- * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
+- * its priority to the mutex owner (can be NULL in the case
+- * depicted above or if the top waiter is gone away and we are
+- * actually deboosting the owner)
+- * @top_task: the current top waiter
+- *
+ * Returns 0 or -EDEADLK.
+ */
+ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ int deadlock_detect,
+ struct rt_mutex *orig_lock,
+- struct rt_mutex *next_lock,
+ struct rt_mutex_waiter *orig_waiter,
+ struct task_struct *top_task)
+ {
+@@ -256,7 +182,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ }
+ put_task_struct(task);
+
+- return -EDEADLK;
++ return deadlock_detect ? -EDEADLK : 0;
+ }
+ retry:
+ /*
+@@ -281,32 +207,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ goto out_unlock_pi;
+
+ /*
+- * We dropped all locks after taking a refcount on @task, so
+- * the task might have moved on in the lock chain or even left
+- * the chain completely and blocks now on an unrelated lock or
+- * on @orig_lock.
+- *
+- * We stored the lock on which @task was blocked in @next_lock,
+- * so we can detect the chain change.
+- */
+- if (next_lock != waiter->lock)
+- goto out_unlock_pi;
+-
+- /*
+ * Drop out, when the task has no waiters. Note,
+ * top_waiter can be NULL, when we are in the deboosting
+ * mode!
+ */
+- if (top_waiter) {
+- if (!task_has_pi_waiters(task))
+- goto out_unlock_pi;
+- /*
+- * If deadlock detection is off, we stop here if we
+- * are not the top pi waiter of the task.
+- */
+- if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
+- goto out_unlock_pi;
+- }
++ if (top_waiter && (!task_has_pi_waiters(task) ||
++ top_waiter != task_top_pi_waiter(task)))
++ goto out_unlock_pi;
+
+ /*
+ * When deadlock detection is off then we check, if further
+@@ -322,16 +229,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ goto retry;
+ }
+
+- /*
+- * Deadlock detection. If the lock is the same as the original
+- * lock which caused us to walk the lock chain or if the
+- * current lock is owned by the task which initiated the chain
+- * walk, we detected a deadlock.
+- */
++ /* Deadlock detection */
+ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+ debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+ raw_spin_unlock(&lock->wait_lock);
+- ret = -EDEADLK;
++ ret = deadlock_detect ? -EDEADLK : 0;
+ goto out_unlock_pi;
+ }
+
+@@ -378,26 +280,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ __rt_mutex_adjust_prio(task);
+ }
+
+- /*
+- * Check whether the task which owns the current lock is pi
+- * blocked itself. If yes we store a pointer to the lock for
+- * the lock chain change detection above. After we dropped
+- * task->pi_lock next_lock cannot be dereferenced anymore.
+- */
+- next_lock = task_blocked_on_lock(task);
+-
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ top_waiter = rt_mutex_top_waiter(lock);
+ raw_spin_unlock(&lock->wait_lock);
+
+- /*
+- * We reached the end of the lock chain. Stop right here. No
+- * point to go back just to figure that out.
+- */
+- if (!next_lock)
+- goto out_put_task;
+-
+ if (!detect_deadlock && waiter != top_waiter)
+ goto out_put_task;
+
+@@ -508,21 +395,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ {
+ struct task_struct *owner = rt_mutex_owner(lock);
+ struct rt_mutex_waiter *top_waiter = waiter;
+- struct rt_mutex *next_lock;
+- int chain_walk = 0, res;
+ unsigned long flags;
+-
+- /*
+- * Early deadlock detection. We really don't want the task to
+- * enqueue on itself just to untangle the mess later. It's not
+- * only an optimization. We drop the locks, so another waiter
+- * can come in before the chain walk detects the deadlock. So
+- * the other will detect the deadlock and return -EDEADLOCK,
+- * which is wrong, as the other waiter is not in a deadlock
+- * situation.
+- */
+- if (owner == task)
+- return -EDEADLK;
++ int chain_walk = 0, res;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ __rt_mutex_adjust_prio(task);
+@@ -543,28 +417,20 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ if (!owner)
+ return 0;
+
+- raw_spin_lock_irqsave(&owner->pi_lock, flags);
+ if (waiter == rt_mutex_top_waiter(lock)) {
++ raw_spin_lock_irqsave(&owner->pi_lock, flags);
+ plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
+ plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+
+ __rt_mutex_adjust_prio(owner);
+ if (owner->pi_blocked_on)
+ chain_walk = 1;
+- } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
+- chain_walk = 1;
++ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
++ else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
++ chain_walk = 1;
+
+- /* Store the lock on which owner is blocked or NULL */
+- next_lock = task_blocked_on_lock(owner);
+-
+- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+- /*
+- * Even if full deadlock detection is on, if the owner is not
+- * blocked itself, we can avoid finding this out in the chain
+- * walk.
+- */
+- if (!chain_walk || !next_lock)
++ if (!chain_walk)
+ return 0;
+
+ /*
+@@ -576,8 +442,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+
+ raw_spin_unlock(&lock->wait_lock);
+
+- res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
+- next_lock, waiter, task);
++ res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
++ task);
+
+ raw_spin_lock(&lock->wait_lock);
+
+@@ -587,8 +453,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ /*
+ * Wake up the next waiter on the lock.
+ *
+- * Remove the top waiter from the current tasks pi waiter list and
+- * wake it up.
++ * Remove the top waiter from the current tasks waiter list and wake it up.
+ *
+ * Called with lock->wait_lock held.
+ */
+@@ -609,23 +474,10 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
+ */
+ plist_del(&waiter->pi_list_entry, ¤t->pi_waiters);
+
+- /*
+- * As we are waking up the top waiter, and the waiter stays
+- * queued on the lock until it gets the lock, this lock
+- * obviously has waiters. Just set the bit here and this has
+- * the added benefit of forcing all new tasks into the
+- * slow path making sure no task of lower priority than
+- * the top waiter can steal this lock.
+- */
+- lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
++ rt_mutex_set_owner(lock, NULL);
+
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
+
+- /*
+- * It's safe to dereference waiter as it cannot go away as
+- * long as we hold lock->wait_lock. The waiter task needs to
+- * acquire it in order to dequeue the waiter.
+- */
+ wake_up_process(waiter->task);
+ }
+
+@@ -640,8 +492,8 @@ static void remove_waiter(struct rt_mutex *lock,
+ {
+ int first = (waiter == rt_mutex_top_waiter(lock));
+ struct task_struct *owner = rt_mutex_owner(lock);
+- struct rt_mutex *next_lock = NULL;
+ unsigned long flags;
++ int chain_walk = 0;
+
+ raw_spin_lock_irqsave(¤t->pi_lock, flags);
+ plist_del(&waiter->list_entry, &lock->wait_list);
+@@ -665,15 +517,15 @@ static void remove_waiter(struct rt_mutex *lock,
+ }
+ __rt_mutex_adjust_prio(owner);
+
+- /* Store the lock on which owner is blocked or NULL */
+- next_lock = task_blocked_on_lock(owner);
++ if (owner->pi_blocked_on)
++ chain_walk = 1;
+
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+
+ WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+
+- if (!next_lock)
++ if (!chain_walk)
+ return;
+
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+@@ -681,7 +533,7 @@ static void remove_waiter(struct rt_mutex *lock,
+
+ raw_spin_unlock(&lock->wait_lock);
+
+- rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
++ rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
+
+ raw_spin_lock(&lock->wait_lock);
+ }
+@@ -694,7 +546,6 @@ static void remove_waiter(struct rt_mutex *lock,
+ void rt_mutex_adjust_pi(struct task_struct *task)
+ {
+ struct rt_mutex_waiter *waiter;
+- struct rt_mutex *next_lock;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+@@ -704,13 +555,12 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+- next_lock = waiter->lock;
++
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(task);
+-
+- rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
++ rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+ }
+
+ /**
+@@ -770,26 +620,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ return ret;
+ }
+
+-static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+- struct rt_mutex_waiter *w)
+-{
+- /*
+- * If the result is not -EDEADLOCK or the caller requested
+- * deadlock detection, nothing to do here.
+- */
+- if (res != -EDEADLOCK || detect_deadlock)
+- return;
+-
+- /*
+- * Yell lowdly and stop the task right here.
+- */
+- rt_mutex_print_deadlock(w);
+- while (1) {
+- set_current_state(TASK_INTERRUPTIBLE);
+- schedule();
+- }
+-}
+-
+ /*
+ * Slow path lock function:
+ */
+@@ -827,10 +657,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+
+ set_current_state(TASK_RUNNING);
+
+- if (unlikely(ret)) {
++ if (unlikely(ret))
+ remove_waiter(lock, &waiter);
+- rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
+- }
+
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit
+@@ -886,49 +714,12 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
+
+ rt_mutex_deadlock_account_unlock(current);
+
+- /*
+- * We must be careful here if the fast path is enabled. If we
+- * have no waiters queued we cannot set owner to NULL here
+- * because of:
+- *
+- * foo->lock->owner = NULL;
+- * rtmutex_lock(foo->lock); <- fast path
+- * free = atomic_dec_and_test(foo->refcnt);
+- * rtmutex_unlock(foo->lock); <- fast path
+- * if (free)
+- * kfree(foo);
+- * raw_spin_unlock(foo->lock->wait_lock);
+- *
+- * So for the fastpath enabled kernel:
+- *
+- * Nothing can set the waiters bit as long as we hold
+- * lock->wait_lock. So we do the following sequence:
+- *
+- * owner = rt_mutex_owner(lock);
+- * clear_rt_mutex_waiters(lock);
+- * raw_spin_unlock(&lock->wait_lock);
+- * if (cmpxchg(&lock->owner, owner, 0) == owner)
+- * return;
+- * goto retry;
+- *
+- * The fastpath disabled variant is simple as all access to
+- * lock->owner is serialized by lock->wait_lock:
+- *
+- * lock->owner = NULL;
+- * raw_spin_unlock(&lock->wait_lock);
+- */
+- while (!rt_mutex_has_waiters(lock)) {
+- /* Drops lock->wait_lock ! */
+- if (unlock_rt_mutex_safe(lock) == true)
+- return;
+- /* Relock the rtmutex and try again */
+- raw_spin_lock(&lock->wait_lock);
++ if (!rt_mutex_has_waiters(lock)) {
++ lock->owner = NULL;
++ raw_spin_unlock(&lock->wait_lock);
++ return;
+ }
+
+- /*
+- * The wakeup next waiter path does not suffer from the above
+- * race. See the comments there.
+- */
+ wakeup_next_waiter(lock);
+
+ raw_spin_unlock(&lock->wait_lock);
+@@ -1175,8 +966,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ return 1;
+ }
+
+- /* We enforce deadlock detection for futexes */
+- ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
++ ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+
+ if (ret && !rt_mutex_owner(lock)) {
+ /*
Modified: dists/squeeze-backports/linux/debian/patches/features/arm/net-drop-NET-dependency-from-HAVE_BPF_JIT.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/arm/net-drop-NET-dependency-from-HAVE_BPF_JIT.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/arm/net-drop-NET-dependency-from-HAVE_BPF_JIT.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -24,11 +24,9 @@
net/Kconfig | 7 ++++---
5 files changed, 8 insertions(+), 7 deletions(-)
-Index: linux/arch/arm/Kconfig
-===================================================================
---- linux.orig/arch/arm/Kconfig 2012-06-24 23:41:24.000000000 +0200
-+++ linux/arch/arm/Kconfig 2012-06-24 23:49:03.000000000 +0200
-@@ -30,7 +30,7 @@
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -30,7 +30,7 @@ config ARM
select HAVE_SPARSE_IRQ
select GENERIC_IRQ_SHOW
select CPU_PM if (SUSPEND || CPU_IDLE)
@@ -37,11 +35,9 @@
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
-Index: linux/arch/powerpc/Kconfig
-===================================================================
---- linux.orig/arch/powerpc/Kconfig 2012-06-20 00:18:30.000000000 +0200
-+++ linux/arch/powerpc/Kconfig 2012-06-24 23:49:03.000000000 +0200
-@@ -134,7 +134,7 @@
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -134,7 +134,7 @@ config PPC
select GENERIC_IRQ_SHOW_LEVEL
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
@@ -49,12 +45,10 @@
+ select HAVE_BPF_JIT if PPC64
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
-
-Index: linux/arch/x86/Kconfig
-===================================================================
---- linux.orig/arch/x86/Kconfig 2012-06-23 17:09:51.000000000 +0200
-+++ linux/arch/x86/Kconfig 2012-06-24 23:49:03.000000000 +0200
-@@ -72,7 +72,7 @@
+ select ARCH_SUPPORTS_ATOMIC_RMW
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -72,7 +72,7 @@ config X86
select GENERIC_CLOCKEVENTS_MIN_ADJUST
select IRQ_FORCED_THREADING
select USE_GENERIC_SMP_HELPERS if SMP
@@ -62,12 +56,10 @@
+ select HAVE_BPF_JIT if X86_64
select CLKEVT_I8253
select ARCH_HAVE_NMI_SAFE_CMPXCHG
-
-Index: linux/net/Kconfig
-===================================================================
---- linux.orig/net/Kconfig 2012-06-20 00:18:30.000000000 +0200
-+++ linux/net/Kconfig 2012-06-24 23:49:03.000000000 +0200
-@@ -232,9 +232,6 @@
+ select ARCH_SUPPORTS_ATOMIC_RMW
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -232,9 +232,6 @@ config XPS
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
@@ -77,7 +69,7 @@
config BPF_JIT
bool "enable BPF Just In Time compiler"
depends on HAVE_BPF_JIT
-@@ -326,3 +323,7 @@
+@@ -326,3 +323,7 @@ source "net/nfc/Kconfig"
endif # if NET
Modified: dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0003-Staging-hv-storvsc-Use-mempools-to-allocate-struct-s.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0003-Staging-hv-storvsc-Use-mempools-to-allocate-struct-s.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0003-Staging-hv-storvsc-Use-mempools-to-allocate-struct-s.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -11,12 +11,12 @@
Signed-off-by: K. Y. Srinivasan <kys at microsoft.com>
Signed-off-by: Haiyang Zhang <haiyangz at microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+[bwh: Adjusted to apply after backported commit 56b26e69c828
+ 'Drivers: scsi: storvsc: Implement a eh_timed_out handler'
---
drivers/staging/hv/storvsc_drv.c | 30 +++++++++++++++++++++++++-----
1 file changed, 25 insertions(+), 5 deletions(-)
-diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
-index ae8c33e..6a255e9 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -32,6 +32,7 @@
@@ -24,10 +24,10 @@
#include <linux/device.h>
#include <linux/hyperv.h>
+#include <linux/mempool.h>
+ #include <linux/blkdev.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
- #include <scsi/scsi_host.h>
-@@ -42,6 +43,7 @@
+@@ -43,6 +44,7 @@
#include <scsi/scsi_dbg.h>
@@ -35,7 +35,7 @@
#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
-@@ -287,6 +289,7 @@ struct storvsc_device {
+@@ -288,6 +290,7 @@ struct storvsc_device {
struct hv_host_device {
struct hv_device *dev;
struct kmem_cache *request_pool;
@@ -43,7 +43,7 @@
unsigned int port;
unsigned char path;
unsigned char target;
-@@ -974,8 +977,10 @@ static int storvsc_remove(struct hv_device *dev)
+@@ -976,8 +979,10 @@ static int storvsc_remove(struct hv_devi
storvsc_dev_remove(dev);
if (host_dev->request_pool) {
@@ -54,7 +54,7 @@
}
return 0;
}
-@@ -1120,7 +1125,7 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
+@@ -1127,7 +1132,7 @@ static void storvsc_command_completion(s
scsi_done_fn(scmnd);
@@ -62,8 +62,8 @@
+ mempool_free(cmd_request, host_dev->request_mempool);
}
- static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
-@@ -1176,12 +1181,13 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
+ /*
+@@ -1193,12 +1198,13 @@ static int storvsc_queuecommand_lck(stru
request_size = sizeof(struct storvsc_cmd_request);
@@ -78,7 +78,7 @@
/* Setup the cmd request */
cmd_request->bounce_sgl_count = 0;
-@@ -1235,8 +1241,8 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
+@@ -1252,8 +1258,8 @@ static int storvsc_queuecommand_lck(stru
if (!cmd_request->bounce_sgl) {
scmnd->scsi_done = NULL;
scmnd->host_scribble = NULL;
@@ -89,7 +89,7 @@
return SCSI_MLQUEUE_HOST_BUSY;
}
-@@ -1278,7 +1284,7 @@ retry_request:
+@@ -1295,7 +1301,7 @@ retry_request:
destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
@@ -98,7 +98,7 @@
scmnd->scsi_done = NULL;
scmnd->host_scribble = NULL;
-@@ -1348,6 +1354,7 @@ static int storvsc_probe(struct hv_device *device,
+@@ -1366,6 +1372,7 @@ static int storvsc_probe(struct hv_devic
const struct hv_vmbus_device_id *dev_id)
{
int ret;
@@ -106,7 +106,7 @@
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
-@@ -1376,8 +1383,19 @@ static int storvsc_probe(struct hv_device *device,
+@@ -1394,8 +1401,19 @@ static int storvsc_probe(struct hv_devic
return -ENOMEM;
}
@@ -126,7 +126,7 @@
kmem_cache_destroy(host_dev->request_pool);
scsi_host_put(host);
return -ENOMEM;
-@@ -1392,6 +1410,7 @@ static int storvsc_probe(struct hv_device *device,
+@@ -1410,6 +1428,7 @@ static int storvsc_probe(struct hv_devic
stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
if (ret) {
@@ -134,7 +134,7 @@
kmem_cache_destroy(host_dev->request_pool);
scsi_host_put(host);
kfree(stor_device);
-@@ -1431,6 +1450,7 @@ static int storvsc_probe(struct hv_device *device,
+@@ -1449,6 +1468,7 @@ static int storvsc_probe(struct hv_devic
err_out:
storvsc_dev_remove(device);
@@ -142,6 +142,3 @@
kmem_cache_destroy(host_dev->request_pool);
scsi_host_put(host);
return -ENODEV;
---
-1.7.9.5
-
Modified: dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0025-Staging-hv-storvsc-Implement-per-device-memory-pools.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0025-Staging-hv-storvsc-Implement-per-device-memory-pools.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0025-Staging-hv-storvsc-Implement-per-device-memory-pools.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -18,11 +18,9 @@
drivers/staging/hv/storvsc_drv.c | 106 ++++++++++++++++++++++----------------
1 file changed, 62 insertions(+), 44 deletions(-)
-diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
-index c22de06..18f8771 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
-@@ -285,10 +285,13 @@ struct storvsc_device {
+@@ -286,10 +286,13 @@ struct storvsc_device {
struct hv_storvsc_request reset_request;
};
@@ -38,7 +36,7 @@
unsigned int port;
unsigned char path;
unsigned char target;
-@@ -790,7 +793,48 @@ static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
+@@ -791,7 +794,48 @@ static void storvsc_get_ide_info(struct
static int storvsc_device_alloc(struct scsi_device *sdevice)
{
@@ -87,7 +85,7 @@
}
static int storvsc_device_configure(struct scsi_device *sdevice)
-@@ -1031,19 +1075,13 @@ static int storvsc_remove(struct hv_device *dev)
+@@ -1033,19 +1077,13 @@ static int storvsc_remove(struct hv_devi
{
struct storvsc_device *stor_device = hv_get_drvdata(dev);
struct Scsi_Host *host = stor_device->host;
@@ -108,7 +106,7 @@
return 0;
}
-@@ -1139,6 +1177,7 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
+@@ -1146,6 +1184,7 @@ static void storvsc_command_completion(s
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
struct storvsc_scan_work *wrk;
@@ -116,7 +114,7 @@
vm_srb = &request->vstor_packet.vm_srb;
if (cmd_request->bounce_sgl_count) {
-@@ -1201,7 +1240,7 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
+@@ -1208,7 +1247,7 @@ static void storvsc_command_completion(s
scsi_done_fn(scmnd);
@@ -124,8 +122,8 @@
+ mempool_free(cmd_request, memp->request_mempool);
}
- static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
-@@ -1236,6 +1275,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ /*
+@@ -1253,6 +1292,7 @@ static int storvsc_queuecommand(struct S
struct scatterlist *sgl;
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
@@ -133,7 +131,7 @@
if (storvsc_check_scsi_cmd(scmnd) == false) {
scmnd->scsi_done(scmnd);
-@@ -1253,7 +1293,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+@@ -1270,7 +1310,7 @@ static int storvsc_queuecommand(struct S
request_size = sizeof(struct storvsc_cmd_request);
@@ -142,7 +140,7 @@
GFP_ATOMIC);
if (!cmd_request)
return SCSI_MLQUEUE_DEVICE_BUSY;
-@@ -1312,7 +1352,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+@@ -1329,7 +1369,7 @@ static int storvsc_queuecommand(struct S
if (!cmd_request->bounce_sgl) {
scmnd->host_scribble = NULL;
mempool_free(cmd_request,
@@ -151,7 +149,7 @@
return SCSI_MLQUEUE_HOST_BUSY;
}
-@@ -1354,7 +1394,7 @@ retry_request:
+@@ -1371,7 +1411,7 @@ retry_request:
destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
@@ -160,15 +158,15 @@
scmnd->host_scribble = NULL;
-@@ -1372,6 +1412,7 @@ static struct scsi_host_template scsi_driver = {
- .queuecommand = storvsc_queuecommand,
+@@ -1390,6 +1430,7 @@ static struct scsi_host_template scsi_dr
.eh_host_reset_handler = storvsc_host_reset_handler,
+ .eh_timed_out = storvsc_eh_timed_out,
.slave_alloc = storvsc_device_alloc,
+ .slave_destroy = storvsc_device_destroy,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 1,
/* 64 max_queue * 1 target */
-@@ -1413,7 +1454,6 @@ static int storvsc_probe(struct hv_device *device,
+@@ -1431,7 +1472,6 @@ static int storvsc_probe(struct hv_devic
const struct hv_vmbus_device_id *dev_id)
{
int ret;
@@ -176,7 +174,7 @@
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
-@@ -1432,29 +1472,11 @@ static int storvsc_probe(struct hv_device *device,
+@@ -1450,29 +1490,11 @@ static int storvsc_probe(struct hv_devic
host_dev->port = host->host_no;
host_dev->dev = device;
@@ -207,7 +205,7 @@
}
stor_device->destroy = false;
-@@ -1466,7 +1488,7 @@ static int storvsc_probe(struct hv_device *device,
+@@ -1484,7 +1506,7 @@ static int storvsc_probe(struct hv_devic
stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
if (ret)
@@ -216,7 +214,7 @@
if (dev_is_ide)
storvsc_get_ide_info(device, &target, &path);
-@@ -1486,7 +1508,7 @@ static int storvsc_probe(struct hv_device *device,
+@@ -1504,7 +1526,7 @@ static int storvsc_probe(struct hv_devic
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
if (ret != 0)
@@ -225,7 +223,7 @@
if (!dev_is_ide) {
scsi_scan_host(host);
-@@ -1495,28 +1517,24 @@ static int storvsc_probe(struct hv_device *device,
+@@ -1513,28 +1535,24 @@ static int storvsc_probe(struct hv_devic
ret = scsi_add_device(host, 0, target, 0);
if (ret) {
scsi_remove_host(host);
@@ -259,6 +257,3 @@
scsi_host_put(host);
return ret;
}
---
-1.7.9.5
-
Modified: dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0050-Staging-hv-storvsc-Cleanup-storvsc_queuecommand.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0050-Staging-hv-storvsc-Cleanup-storvsc_queuecommand.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0050-Staging-hv-storvsc-Cleanup-storvsc_queuecommand.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -14,12 +14,10 @@
drivers/staging/hv/storvsc_drv.c | 48 +++++++++++++++++---------------------
1 file changed, 22 insertions(+), 26 deletions(-)
-diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
-index 7561d29..71e50c3 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
-@@ -1239,13 +1239,16 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
- mempool_free(cmd_request, memp->request_mempool);
+@@ -1256,13 +1256,16 @@ static enum blk_eh_timer_return storvsc_
+ return BLK_EH_RESET_TIMER;
}
-static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
@@ -37,7 +35,7 @@
case SET_WINDOW:
scmnd->result = ILLEGAL_REQUEST << 16;
allowed = false;
-@@ -1270,32 +1273,26 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+@@ -1287,32 +1290,26 @@ static int storvsc_queuecommand(struct S
struct vmscsi_request *vm_srb;
struct stor_mem_pools *memp = scmnd->device->hostdata;
@@ -76,7 +74,7 @@
cmd_request->cmd = scmnd;
scmnd->host_scribble = (unsigned char *)cmd_request;
-@@ -1344,11 +1341,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+@@ -1361,11 +1358,8 @@ static int storvsc_queuecommand(struct S
scsi_bufflen(scmnd),
vm_srb->data_in);
if (!cmd_request->bounce_sgl) {
@@ -90,7 +88,7 @@
}
cmd_request->bounce_sgl_count =
-@@ -1377,24 +1371,26 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+@@ -1394,24 +1388,26 @@ static int storvsc_queuecommand(struct S
virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
}
@@ -124,6 +122,3 @@
return ret;
}
---
-1.7.9.5
-
Modified: dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0055-Staging-hv-storvsc-Get-rid-of-the-on_io_completion-i.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0055-Staging-hv-storvsc-Get-rid-of-the-on_io_completion-i.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0055-Staging-hv-storvsc-Get-rid-of-the-on_io_completion-i.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -10,15 +10,17 @@
Signed-off-by: K. Y. Srinivasan <kys at microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
-[bwh: Adjusted to apply after backported commit 9d2696e658ef
- '[SCSI] storvsc: Initialize the sglist']
+[bwh: Adjusted to apply after backported commits 9d2696e658ef
+ '[SCSI] storvsc: Initialize the sglist' and 56b26e69c828
+ 'Drivers: scsi: storvsc: Implement a eh_timed_out handler']
+
---
drivers/staging/hv/storvsc_drv.c | 630 +++++++++++++++++++-------------------
1 file changed, 313 insertions(+), 317 deletions(-)
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
-@@ -276,7 +276,6 @@ struct hv_storvsc_request {
+@@ -277,7 +277,6 @@ struct hv_storvsc_request {
unsigned char *sense_buffer;
void *context;
@@ -26,7 +28,7 @@
struct hv_multipage_buffer data_buffer;
struct vstor_packet vstor_packet;
-@@ -436,6 +435,228 @@ get_in_err:
+@@ -437,6 +436,228 @@ get_in_err:
}
@@ -255,7 +257,7 @@
static int storvsc_channel_init(struct hv_device *device)
{
struct storvsc_device *stor_device;
-@@ -562,23 +783,100 @@ cleanup:
+@@ -563,23 +784,100 @@ cleanup:
return ret;
}
@@ -369,7 +371,7 @@
* We do this so we can distinguish truly fatal failues
* (srb status == 0x4) and off-line the device in that case.
*/
-@@ -625,7 +923,7 @@ static void storvsc_on_io_completion(str
+@@ -626,7 +924,7 @@ static void storvsc_on_io_completion(str
stor_pkt->vm_srb.data_transfer_length =
vstor_packet->vm_srb.data_transfer_length;
@@ -378,7 +380,7 @@
if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
stor_device->drain_notify)
-@@ -875,230 +1173,6 @@ static int storvsc_device_configure(stru
+@@ -876,230 +1174,6 @@ static int storvsc_device_configure(stru
return 0;
}
@@ -609,7 +611,7 @@
static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
sector_t capacity, int *info)
{
-@@ -1172,83 +1246,6 @@ static int storvsc_host_reset_handler(st
+@@ -1173,83 +1247,6 @@ static int storvsc_host_reset_handler(st
return SUCCESS;
}
@@ -690,10 +692,10 @@
- mempool_free(cmd_request, memp->request_mempool);
-}
-
- static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
- {
- bool allowed = true;
-@@ -1324,7 +1321,6 @@ static int storvsc_queuecommand(struct S
+ /*
+ * The host guarantees to respond to each command, although I/O latencies might
+ * be unbounded on Azure. Reset the timer unconditionally to give the host a
+@@ -1335,7 +1332,6 @@ static int storvsc_queuecommand(struct S
break;
}
Modified: dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0067-Staging-hv-storvsc-Move-the-storage-driver-out-of-th.patch
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0067-Staging-hv-storvsc-Move-the-storage-driver-out-of-th.patch Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/features/x86/hyperv/0067-Staging-hv-storvsc-Move-the-storage-driver-out-of-th.patch Sun Nov 2 01:27:57 2014 (r22019)
@@ -21,7 +21,8 @@
Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
[bwh: Adjusted to apply after backported commits 5c1b10ab7f93
'[SCSI] storvsc: Account for in-transit packets in the RESET path' and
- 9d2696e658ef '[SCSI] storvsc: Initialize the sglist']
+ 9d2696e658ef '[SCSI] storvsc: Initialize the sglist' and 56b26e69c828
+ 'Drivers: scsi: storvsc: Implement a eh_timed_out handler']
---
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -60,7 +61,7 @@
sd_mod-objs := sd.o
--- /dev/null
+++ b/drivers/scsi/storvsc_drv.c
-@@ -0,0 +1,1554 @@
+@@ -0,0 +1,1566 @@
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
@@ -96,6 +97,7 @@
+#include <linux/device.h>
+#include <linux/hyperv.h>
+#include <linux/mempool.h>
++#include <linux/blkdev.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
@@ -1285,6 +1287,16 @@
+ return SUCCESS;
+}
+
++/*
++ * The host guarantees to respond to each command, although I/O latencies might
++ * be unbounded on Azure. Reset the timer unconditionally to give the host a
++ * chance to perform EH.
++ */
++static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
++{
++ return BLK_EH_RESET_TIMER;
++}
++
+static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
+{
+ bool allowed = true;
@@ -1442,6 +1454,7 @@
+ .bios_param = storvsc_get_chs,
+ .queuecommand = storvsc_queuecommand,
+ .eh_host_reset_handler = storvsc_host_reset_handler,
++ .eh_timed_out = storvsc_eh_timed_out,
+ .slave_alloc = storvsc_device_alloc,
+ .slave_destroy = storvsc_device_destroy,
+ .slave_configure = storvsc_device_configure,
@@ -1660,7 +1673,7 @@
-Haiyang Zhang <haiyangz at microsoft.com>, and K. Y. Srinivasan <kys at microsoft.com>
--- a/drivers/staging/hv/storvsc_drv.c
+++ /dev/null
-@@ -1,1554 +0,0 @@
+@@ -1,1566 +0,0 @@
-/*
- * Copyright (c) 2009, Microsoft Corporation.
- *
@@ -1696,6 +1709,7 @@
-#include <linux/device.h>
-#include <linux/hyperv.h>
-#include <linux/mempool.h>
+-#include <linux/blkdev.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
@@ -2885,6 +2899,16 @@
- return SUCCESS;
-}
-
+-/*
+- * The host guarantees to respond to each command, although I/O latencies might
+- * be unbounded on Azure. Reset the timer unconditionally to give the host a
+- * chance to perform EH.
+- */
+-static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
+-{
+- return BLK_EH_RESET_TIMER;
+-}
+-
-static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
-{
- bool allowed = true;
@@ -3042,6 +3066,7 @@
- .bios_param = storvsc_get_chs,
- .queuecommand = storvsc_queuecommand,
- .eh_host_reset_handler = storvsc_host_reset_handler,
+- .eh_timed_out = storvsc_eh_timed_out,
- .slave_alloc = storvsc_device_alloc,
- .slave_destroy = storvsc_device_destroy,
- .slave_configure = storvsc_device_configure,
Modified: dists/squeeze-backports/linux/debian/patches/series
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/series Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/series Sun Nov 2 01:27:57 2014 (r22019)
@@ -48,7 +48,6 @@
bugfix/all/snapshot-Implement-compat_ioctl.patch
debian/ARM-Remove-use-of-possibly-undefined-BUILD_BUG_ON-in.patch
bugfix/arm/ARM-topdown-mmap.patch
-bugfix/alpha/alpha-add-io-read-write-16-32-be-functions.patch
features/arm/ARM-kirkwood-6282A1.patch
features/all/Input-ALPS-move-protocol-information-to-Documentatio.patch
features/all/Input-ALPS-add-protocol-version-field-in-alps_model_.patch
@@ -663,8 +662,6 @@
debian/mm-avoid-ABI-change-in-3.2.55.patch
features/all/sound-usb-emu0204-frontselection.patch
debian/net-avoid-abi-change-in-3.2.57.patch
-bugfix/all/skbuff-add-an-api-to-orphan-frags.patch
-bugfix/all/skbuff-skb_segment-orphan-frags-before-copying.patch
debian/revert-alsa-enable-config_zone_dma.patch
# Support for e1000e and igb backports
@@ -1108,13 +1105,10 @@
features/all/igb/0180-igb-disable-eee.patch
bugfix/x86/crypto-ghash-clmulni-intel-use-C-implementation-for-.patch
-bugfix/all/skbuff-export-skb_copy_ubufs.patch
bugfix/x86/revert-perf-x86-amd-ibs-fix-waking-up-from-s3-for-amd-family-10h.patch
debian/libata-avoid-abi-change-in-3.2.59.patch
debian/dmi-avoid-abi-change-in-3.2.59.patch
-bugfix/mips/MIPS-Cleanup-flags-in-syscall-flags-handlers.patch
-bugfix/mips/MIPS-asm-thread_info-Add-_TIF_SECCOMP-flag.patch
bugfix/all/netfilter-ipv4-defrag-set-local_df-flag-on-defragmen.patch
features/all/msi-sysfs/PCI-sysfs-add-per-pci-device-msi-x-irq-listing-v5.patch
features/all/msi-sysfs/PCI-msi-fix-imbalanced-refcount-of-msi-irq-sysfs-obj.patch
@@ -1124,22 +1118,39 @@
features/all/msi-sysfs/PCI-MSI-Fix-leak-of-msi_attrs.patch
features/all/msi-sysfs/PCI-MSI-Fix-memory-leak-in-free_msi_irqs.patch
features/all/msi-sysfs/pci-msi_desc-remove-unused-kobject.patch
-bugfix/all/auditsc-audit_krule-mask-accesses-need-bounds-checki.patch
bugfix/all/mm-add-pte_present-check-on-existing-hugetlb_entry-c.patch
-bugfix/x86/x86_32-entry-Do-syscall-exit-work-on-badsys-CVE-2014.patch
-bugfix/all/ALSA-control-Protect-user-controls-against-concurren.patch
-bugfix/all/ALSA-control-Don-t-access-controls-outside-of-protec.patch
-bugfix/all/ALSA-control-Fix-replacing-user-controls.patch
-bugfix/all/ALSA-control-Make-sure-that-id-index-does-not-overfl.patch
-bugfix/all/ALSA-control-Handle-numid-overflow.patch
-bugfix/all/target-explicitly-clear-ramdisk_mcp-backend-pages.patch
debian/alsa-avoid-abi-change-for-cve-2014-4652-fix.patch
# CVE-2014-4699
-bugfix/all/0001-ptrace-x86-force-IRET-path-after-a-ptrace_stop.patch
-bugfix/all/revert-net-ipv4-ip_forward-fix-inverted-local_df-tes.patch
-bugfix/all/revert-net-ip-ipv6-handle-gso-skbs-in-forwarding-pat.patch
-bugfix/all/net-l2tp-don-t-fall-back-on-UDP-get-set-sockopt.patch
-bugfix/all/sctp-fix-sk_ack_backlog-wrap-around-problem.patch
-bugfix/s390/s390-ptrace-fix-PSW-mask-check.patch
+debian/irq-avoid-abi-change-in-3.2.61.patch
+debian/ptrace-avoid-abi-change-in-3.2.61.patch
+debian/trace-syscall-avoid-abi-change-in-3.2.61.patch
+debian/scsi-avoid-abi-change-in-3.2.61.patch
+debian/libata-avoid-abi-change-in-3.2.62.patch
+debian/ip-ident-avoid-abi-change-in-3.2.63.patch
+debian/scsi-avoid-abi-change-in-3.2.62.patch
+debian/nlattr-avoid-abi-change-in-3.2.61.patch
+bugfix/all/nfsd-fix-acl-null-pointer-deref.patch
+bugfix/all/ext4-fix-BUG_ON-in-mb_free_blocks.patch
+bugfix/all/udf-Avoid-infinite-loop-when-processing-indirect-ICB.patch
+bugfix/all/libceph-add-process_one_ticket-helper.patch
+bugfix/all/libceph-do-not-hard-code-max-auth-ticket-len.patch
+debian/sp5100_tco-reject-sb8x0-chips.patch
+bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch
+bugfix/all/drivers-net-disable-ufo-through-virtio.patch
+bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch
+debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch
+bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch
+bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch
+bugfix/x86/nEPT-Nested-INVEPT.patch
+bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch
+bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch
+bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch
+bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch
+bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch
+bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch
+bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch
+bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch
+bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch
+bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch
Modified: dists/squeeze-backports/linux/debian/patches/series-rt
==============================================================================
--- dists/squeeze-backports/linux/debian/patches/series-rt Sun Nov 2 01:20:39 2014 (r22018)
+++ dists/squeeze-backports/linux/debian/patches/series-rt Sun Nov 2 01:27:57 2014 (r22019)
@@ -1,3 +1,4 @@
+features/all/rt/revert-rtmutex-changes-in-3.2.61.patch
features/all/rt/0001-Revert-workqueue-skip-nr_running-sanity-check-in-wor.patch
features/all/rt/0002-x86-Call-idle-notifier-after-irq_enter.patch
features/all/rt/0003-slab-lockdep-Annotate-all-slab-caches.patch
More information about the Kernel-svn-changes
mailing list