[kernel] r15619 - in dists/trunk/linux-2.6/debian: . patches/bugfix/all/stable patches/series
Ben Hutchings
benh at alioth.debian.org
Tue May 4 02:16:08 UTC 2010
Author: benh
Date: Tue May 4 02:16:05 2010
New Revision: 15619
Log:
Add stable 2.6.33.3
Added:
dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.33.3.patch
Modified:
dists/trunk/linux-2.6/debian/changelog
dists/trunk/linux-2.6/debian/patches/series/base
Modified: dists/trunk/linux-2.6/debian/changelog
==============================================================================
--- dists/trunk/linux-2.6/debian/changelog Tue May 4 02:12:52 2010 (r15618)
+++ dists/trunk/linux-2.6/debian/changelog Tue May 4 02:16:05 2010 (r15619)
@@ -10,6 +10,9 @@
* [x86] Enable ramzswap driver (Closes: #573912)
* [x86] Re-enable rt2860sta and rt2870sta drivers which were accidentally
disabled when moving to Linux 2.6.33 (Closes: #576723)
+ * Add stable 2.6.33.3:
+ - ACPI: EC: Allow multibyte access to EC; fixes temperature monitoring
+ on some Dell laptops (Closes: #563313)
[ Aurelien Jarno ]
* Add support for sh4 architecture, patch by Nobuhiro Iwamatsu
Added: dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.33.3.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.33.3.patch Tue May 4 02:16:05 2010 (r15619)
@@ -0,0 +1,5186 @@
+diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
+index 81c0c59..e1bb5b2 100644
+--- a/Documentation/i2c/busses/i2c-i801
++++ b/Documentation/i2c/busses/i2c-i801
+@@ -15,7 +15,8 @@ Supported adapters:
+ * Intel 82801I (ICH9)
+ * Intel EP80579 (Tolapai)
+ * Intel 82801JI (ICH10)
+- * Intel PCH
++ * Intel 3400/5 Series (PCH)
++ * Intel Cougar Point (PCH)
+ Datasheets: Publicly available at the Intel website
+
+ Authors:
+diff --git a/Makefile b/Makefile
+index 35160e3..de3e66c 100644
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index 6b84a04..cbeb6e0 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -172,7 +172,7 @@ not_angel:
+ adr r0, LC0
+ ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
+ THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
+- THUMB( ldr sp, [r0, #28] )
++ THUMB( ldr sp, [r0, #32] )
+ subs r0, r0, r1 @ calculate the delta offset
+
+ @ if delta is zero, we are
+diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
+index 5fdeec5..d76279a 100644
+--- a/arch/ia64/kvm/kvm-ia64.c
++++ b/arch/ia64/kvm/kvm-ia64.c
+@@ -1794,7 +1794,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
+ {
+ struct kvm_memory_slot *memslot;
+ int r, i;
+- long n, base;
++ long base;
++ unsigned long n;
+ unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
+ offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
+
+@@ -1807,7 +1808,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
+ if (!memslot->dirty_bitmap)
+ goto out;
+
+- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
++ n = kvm_dirty_bitmap_bytes(memslot);
+ base = memslot->base_gfn / BITS_PER_LONG;
+
+ for (i = 0; i < n/sizeof(long); ++i) {
+@@ -1823,7 +1824,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+ {
+ int r;
+- int n;
++ unsigned long n;
+ struct kvm_memory_slot *memslot;
+ int is_dirty = 0;
+
+@@ -1841,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ if (is_dirty) {
+ kvm_flush_remote_tlbs(kvm);
+ memslot = &kvm->memslots[log->slot];
+- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
++ n = kvm_dirty_bitmap_bytes(memslot);
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+ r = 0;
+diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
+index 3e294bd..e6dc595 100644
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -848,7 +848,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_vcpu *vcpu;
+ ulong ga, ga_end;
+ int is_dirty = 0;
+- int r, n;
++ int r;
++ unsigned long n;
+
+ down_write(&kvm->slots_lock);
+
+@@ -866,7 +867,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ kvm_for_each_vcpu(n, vcpu, kvm)
+ kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
+
+- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
++ n = kvm_dirty_bitmap_bytes(memslot);
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+
+diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
+index 22574e0..202d869 100644
+--- a/arch/powerpc/platforms/pseries/offline_states.h
++++ b/arch/powerpc/platforms/pseries/offline_states.h
+@@ -9,10 +9,30 @@ enum cpu_state_vals {
+ CPU_MAX_OFFLINE_STATES
+ };
+
++#ifdef CONFIG_HOTPLUG_CPU
+ extern enum cpu_state_vals get_cpu_current_state(int cpu);
+ extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
+-extern enum cpu_state_vals get_preferred_offline_state(int cpu);
+ extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
+ extern void set_default_offline_state(int cpu);
++#else
++static inline enum cpu_state_vals get_cpu_current_state(int cpu)
++{
++ return CPU_STATE_ONLINE;
++}
++
++static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state)
++{
++}
++
++static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
++{
++}
++
++static inline void set_default_offline_state(int cpu)
++{
++}
++#endif
++
++extern enum cpu_state_vals get_preferred_offline_state(int cpu);
+ extern int start_secondary(void);
+ #endif
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index 300ab01..5f91a38 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -70,12 +70,8 @@ static pte_t __ref *vmem_pte_alloc(void)
+ pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
+ if (!pte)
+ return NULL;
+- if (MACHINE_HAS_HPAGE)
+- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
+- PTRS_PER_PTE * sizeof(pte_t));
+- else
+- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
+- PTRS_PER_PTE * sizeof(pte_t));
++ clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
++ PTRS_PER_PTE * sizeof(pte_t));
+ return pte;
+ }
+
+@@ -116,8 +112,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
+ if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
+ (address + HPAGE_SIZE <= start + size) &&
+ (address >= HPAGE_SIZE)) {
+- pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
+- _SEGMENT_ENTRY_CO;
++ pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
+ pmd_val(*pm_dir) = pte_val(pte);
+ address += HPAGE_SIZE - PAGE_SIZE;
+ continue;
+diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
+index ac04255..ce830fa 100644
+--- a/arch/sh/include/asm/elf.h
++++ b/arch/sh/include/asm/elf.h
+@@ -211,7 +211,9 @@ extern void __kernel_vsyscall;
+
+ #define VSYSCALL_AUX_ENT \
+ if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
++ else \
++ NEW_AUX_ENT(AT_IGNORE, 0);
+ #else
+ #define VSYSCALL_AUX_ENT
+ #endif /* CONFIG_VSYSCALL */
+@@ -219,7 +221,7 @@ extern void __kernel_vsyscall;
+ #ifdef CONFIG_SH_FPU
+ #define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
+ #else
+-#define FPU_AUX_ENT
++#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
+ #endif
+
+ extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
+diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
+index 983e079..1d19c19 100644
+--- a/arch/sh/kernel/smp.c
++++ b/arch/sh/kernel/smp.c
+@@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondary(void)
+ unsigned int cpu;
+ struct mm_struct *mm = &init_mm;
+
++ enable_mmu();
+ atomic_inc(&mm->mm_count);
+ atomic_inc(&mm->mm_users);
+ current->active_mm = mm;
+diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
+index 7e3dfd9..e608f39 100644
+--- a/arch/sparc/kernel/ptrace_32.c
++++ b/arch/sparc/kernel/ptrace_32.c
+@@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target,
+ *k++ = regs->u_regs[pos++];
+
+ reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
++ reg_window -= 16;
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(*k++, ®_window[pos++]))
+ return -EFAULT;
+@@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target,
+ }
+
+ reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
++ reg_window -= 16;
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(reg, ®_window[pos++]) ||
+ put_user(reg, u++))
+@@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target,
+ regs->u_regs[pos++] = *k++;
+
+ reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
++ reg_window -= 16;
+ for (; count > 0 && pos < 32; count--) {
+ if (put_user(*k++, ®_window[pos++]))
+ return -EFAULT;
+@@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target,
+ }
+
+ reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
++ reg_window -= 16;
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(reg, u++) ||
+ put_user(reg, ®_window[pos++]))
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 2f6524d..aa90da0 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target,
+ *k++ = regs->u_regs[pos++];
+
+ reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
++ reg_window -= 16;
+ if (target == current) {
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(*k++, ®_window[pos++]))
+@@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target,
+ }
+
+ reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
++ reg_window -= 16;
+ if (target == current) {
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(reg, ®_window[pos++]) ||
+@@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target,
+ regs->u_regs[pos++] = *k++;
+
+ reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
++ reg_window -= 16;
+ if (target == current) {
+ for (; count > 0 && pos < 32; count--) {
+ if (put_user(*k++, ®_window[pos++]))
+@@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target,
+ }
+
+ reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
++ reg_window -= 16;
+ if (target == current) {
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(reg, u++) ||
+diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
+index 2201e9c..c1ea9eb 100644
+--- a/arch/um/sys-x86_64/Makefile
++++ b/arch/um/sys-x86_64/Makefile
+@@ -8,7 +8,8 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
+ setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
+ sysrq.o ksyms.o tls.o
+
+-subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
++subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
++ lib/rwsem_64.o
+ subarch-obj-$(CONFIG_MODULES) += kernel/module.o
+
+ ldt-y = ../sys-i386/ldt.o
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index f20ddf8..a198293 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -319,7 +319,7 @@ config X86_L1_CACHE_SHIFT
+
+ config X86_XADD
+ def_bool y
+- depends on X86_32 && !M386
++ depends on X86_64 || !M386
+
+ config X86_PPRO_FENCE
+ bool "PentiumPro memory ordering errata workaround"
+diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
+index ca7517d..606ede1 100644
+--- a/arch/x86/include/asm/rwsem.h
++++ b/arch/x86/include/asm/rwsem.h
+@@ -41,6 +41,7 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/lockdep.h>
++#include <asm/asm.h>
+
+ struct rwsem_waiter;
+
+@@ -55,17 +56,28 @@ extern asmregparm struct rw_semaphore *
+
+ /*
+ * the semaphore definition
++ *
++ * The bias values and the counter type limits the number of
++ * potential readers/writers to 32767 for 32 bits and 2147483647
++ * for 64 bits.
+ */
+
+-#define RWSEM_UNLOCKED_VALUE 0x00000000
+-#define RWSEM_ACTIVE_BIAS 0x00000001
+-#define RWSEM_ACTIVE_MASK 0x0000ffff
+-#define RWSEM_WAITING_BIAS (-0x00010000)
++#ifdef CONFIG_X86_64
++# define RWSEM_ACTIVE_MASK 0xffffffffL
++#else
++# define RWSEM_ACTIVE_MASK 0x0000ffffL
++#endif
++
++#define RWSEM_UNLOCKED_VALUE 0x00000000L
++#define RWSEM_ACTIVE_BIAS 0x00000001L
++#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
+ #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+ #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
++typedef signed long rwsem_count_t;
++
+ struct rw_semaphore {
+- signed long count;
++ rwsem_count_t count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+@@ -105,7 +117,7 @@ do { \
+ static inline void __down_read(struct rw_semaphore *sem)
+ {
+ asm volatile("# beginning down_read\n\t"
+- LOCK_PREFIX " incl (%%eax)\n\t"
++ LOCK_PREFIX _ASM_INC "(%1)\n\t"
+ /* adds 0x00000001, returns the old value */
+ " jns 1f\n"
+ " call call_rwsem_down_read_failed\n"
+@@ -121,14 +133,14 @@ static inline void __down_read(struct rw_semaphore *sem)
+ */
+ static inline int __down_read_trylock(struct rw_semaphore *sem)
+ {
+- __s32 result, tmp;
++ rwsem_count_t result, tmp;
+ asm volatile("# beginning __down_read_trylock\n\t"
+- " movl %0,%1\n\t"
++ " mov %0,%1\n\t"
+ "1:\n\t"
+- " movl %1,%2\n\t"
+- " addl %3,%2\n\t"
++ " mov %1,%2\n\t"
++ " add %3,%2\n\t"
+ " jle 2f\n\t"
+- LOCK_PREFIX " cmpxchgl %2,%0\n\t"
++ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " jnz 1b\n\t"
+ "2:\n\t"
+ "# ending __down_read_trylock\n\t"
+@@ -143,13 +155,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
+ */
+ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+ {
+- int tmp;
++ rwsem_count_t tmp;
+
+ tmp = RWSEM_ACTIVE_WRITE_BIAS;
+ asm volatile("# beginning down_write\n\t"
+- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
++ LOCK_PREFIX " xadd %1,(%2)\n\t"
+ /* subtract 0x0000ffff, returns the old value */
+- " testl %%edx,%%edx\n\t"
++ " test %1,%1\n\t"
+ /* was the count 0 before? */
+ " jz 1f\n"
+ " call call_rwsem_down_write_failed\n"
+@@ -170,9 +182,9 @@ static inline void __down_write(struct rw_semaphore *sem)
+ */
+ static inline int __down_write_trylock(struct rw_semaphore *sem)
+ {
+- signed long ret = cmpxchg(&sem->count,
+- RWSEM_UNLOCKED_VALUE,
+- RWSEM_ACTIVE_WRITE_BIAS);
++ rwsem_count_t ret = cmpxchg(&sem->count,
++ RWSEM_UNLOCKED_VALUE,
++ RWSEM_ACTIVE_WRITE_BIAS);
+ if (ret == RWSEM_UNLOCKED_VALUE)
+ return 1;
+ return 0;
+@@ -183,9 +195,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
+ */
+ static inline void __up_read(struct rw_semaphore *sem)
+ {
+- __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
++ rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
+ asm volatile("# beginning __up_read\n\t"
+- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
++ LOCK_PREFIX " xadd %1,(%2)\n\t"
+ /* subtracts 1, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n"
+@@ -201,18 +213,18 @@ static inline void __up_read(struct rw_semaphore *sem)
+ */
+ static inline void __up_write(struct rw_semaphore *sem)
+ {
++ rwsem_count_t tmp;
+ asm volatile("# beginning __up_write\n\t"
+- " movl %2,%%edx\n\t"
+- LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
++ LOCK_PREFIX " xadd %1,(%2)\n\t"
+ /* tries to transition
+ 0xffff0001 -> 0x00000000 */
+ " jz 1f\n"
+ " call call_rwsem_wake\n"
+ "1:\n\t"
+ "# ending __up_write\n"
+- : "+m" (sem->count)
+- : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
+- : "memory", "cc", "edx");
++ : "+m" (sem->count), "=d" (tmp)
++ : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
++ : "memory", "cc");
+ }
+
+ /*
+@@ -221,33 +233,38 @@ static inline void __up_write(struct rw_semaphore *sem)
+ static inline void __downgrade_write(struct rw_semaphore *sem)
+ {
+ asm volatile("# beginning __downgrade_write\n\t"
+- LOCK_PREFIX " addl %2,(%%eax)\n\t"
+- /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
++ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
++ /*
++ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
++ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
++ */
+ " jns 1f\n\t"
+ " call call_rwsem_downgrade_wake\n"
+ "1:\n\t"
+ "# ending __downgrade_write\n"
+ : "+m" (sem->count)
+- : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
++ : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
+ : "memory", "cc");
+ }
+
+ /*
+ * implement atomic add functionality
+ */
+-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
++static inline void rwsem_atomic_add(rwsem_count_t delta,
++ struct rw_semaphore *sem)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
+ : "+m" (sem->count)
+- : "ir" (delta));
++ : "er" (delta));
+ }
+
+ /*
+ * implement exchange and add functionality
+ */
+-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
++static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
++ struct rw_semaphore *sem)
+ {
+- int tmp = delta;
++ rwsem_count_t tmp = delta;
+
+ asm volatile(LOCK_PREFIX "xadd %0,%1"
+ : "+r" (tmp), "+m" (sem->count)
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index 1e79678..4cfc908 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -135,6 +135,8 @@ int native_cpu_disable(void);
+ void native_cpu_die(unsigned int cpu);
+ void native_play_dead(void);
+ void play_dead_common(void);
++void wbinvd_on_cpu(int cpu);
++int wbinvd_on_all_cpus(void);
+
+ void native_send_call_func_ipi(const struct cpumask *mask);
+ void native_send_call_func_single_ipi(int cpu);
+@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void)
+ {
+ return cpumask_weight(cpu_callout_mask);
+ }
++#else /* !CONFIG_SMP */
++#define wbinvd_on_cpu(cpu) wbinvd()
++static inline int wbinvd_on_all_cpus(void)
++{
++ wbinvd();
++ return 0;
++}
+ #endif /* CONFIG_SMP */
+
+ extern unsigned disabled_cpus __cpuinitdata;
+diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
+index adb0ba0..2e77516 100644
+--- a/arch/x86/kernel/amd_iommu.c
++++ b/arch/x86/kernel/amd_iommu.c
+@@ -2298,7 +2298,7 @@ static void cleanup_domain(struct protection_domain *domain)
+ list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
+ struct device *dev = dev_data->dev;
+
+- do_detach(dev);
++ __detach_device(dev);
+ atomic_set(&dev_data->bind, 0);
+ }
+
+@@ -2379,9 +2379,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
+
+ free_pagetable(domain);
+
+- domain_id_free(domain->id);
+-
+- kfree(domain);
++ protection_domain_free(domain);
+
+ dom->priv = NULL;
+ }
+diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
+index 9dc91b4..883d619 100644
+--- a/arch/x86/kernel/amd_iommu_init.c
++++ b/arch/x86/kernel/amd_iommu_init.c
+@@ -1288,6 +1288,8 @@ static int __init amd_iommu_init(void)
+ if (ret)
+ goto free;
+
++ enable_iommus();
++
+ if (iommu_pass_through)
+ ret = amd_iommu_init_passthrough();
+ else
+@@ -1300,8 +1302,6 @@ static int __init amd_iommu_init(void)
+
+ amd_iommu_init_notifier();
+
+- enable_iommus();
+-
+ if (iommu_pass_through)
+ goto out;
+
+@@ -1315,6 +1315,7 @@ out:
+ return ret;
+
+ free:
++ disable_iommus();
+
+ amd_iommu_uninit_devices();
+
+diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
+index f147a95..19f2c70 100644
+--- a/arch/x86/kernel/aperture_64.c
++++ b/arch/x86/kernel/aperture_64.c
+@@ -394,6 +394,7 @@ void __init gart_iommu_hole_init(void)
+ for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+ int bus;
+ int dev_base, dev_limit;
++ u32 ctl;
+
+ bus = bus_dev_ranges[i].bus;
+ dev_base = bus_dev_ranges[i].dev_base;
+@@ -407,7 +408,19 @@ void __init gart_iommu_hole_init(void)
+ gart_iommu_aperture = 1;
+ x86_init.iommu.iommu_init = gart_iommu_init;
+
+- aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
++ ctl = read_pci_config(bus, slot, 3,
++ AMD64_GARTAPERTURECTL);
++
++ /*
++ * Before we do anything else disable the GART. It may
++ * still be enabled if we boot into a crash-kernel here.
++ * Reconfiguring the GART while it is enabled could have
++ * unknown side-effects.
++ */
++ ctl &= ~GARTEN;
++ write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
++
++ aper_order = (ctl >> 1) & 7;
+ aper_size = (32 * 1024 * 1024) << aper_order;
+ aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
+ aper_base <<= 25;
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index dfca210..d4df517 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1640,8 +1640,10 @@ int __init APIC_init_uniprocessor(void)
+ }
+ #endif
+
++#ifndef CONFIG_SMP
+ enable_IR_x2apic();
+ default_setup_apic_routing();
++#endif
+
+ verify_local_APIC();
+ connect_bsp_APIC();
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index fc6c8ef..d440123 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -18,6 +18,7 @@
+ #include <asm/processor.h>
+ #include <linux/smp.h>
+ #include <asm/k8.h>
++#include <asm/smp.h>
+
+ #define LVL_1_INST 1
+ #define LVL_1_DATA 2
+@@ -150,7 +151,8 @@ struct _cpuid4_info {
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned long size;
+- unsigned long can_disable;
++ bool can_disable;
++ unsigned int l3_indices;
+ DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+ };
+
+@@ -160,7 +162,8 @@ struct _cpuid4_info_regs {
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned long size;
+- unsigned long can_disable;
++ bool can_disable;
++ unsigned int l3_indices;
+ };
+
+ unsigned short num_cache_leaves;
+@@ -290,6 +293,36 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
+ (ebx->split.ways_of_associativity + 1) - 1;
+ }
+
++struct _cache_attr {
++ struct attribute attr;
++ ssize_t (*show)(struct _cpuid4_info *, char *);
++ ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
++};
++
++#ifdef CONFIG_CPU_SUP_AMD
++static unsigned int __cpuinit amd_calc_l3_indices(void)
++{
++ /*
++ * We're called over smp_call_function_single() and therefore
++ * are on the correct cpu.
++ */
++ int cpu = smp_processor_id();
++ int node = cpu_to_node(cpu);
++ struct pci_dev *dev = node_to_k8_nb_misc(node);
++ unsigned int sc0, sc1, sc2, sc3;
++ u32 val = 0;
++
++ pci_read_config_dword(dev, 0x1C4, &val);
++
++ /* calculate subcache sizes */
++ sc0 = !(val & BIT(0));
++ sc1 = !(val & BIT(4));
++ sc2 = !(val & BIT(8)) + !(val & BIT(9));
++ sc3 = !(val & BIT(12)) + !(val & BIT(13));
++
++ return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
++}
++
+ static void __cpuinit
+ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
+ {
+@@ -299,12 +332,103 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
+ if (boot_cpu_data.x86 == 0x11)
+ return;
+
+- /* see erratum #382 */
+- if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
++ /* see errata #382 and #388 */
++ if ((boot_cpu_data.x86 == 0x10) &&
++ ((boot_cpu_data.x86_model < 0x8) ||
++ (boot_cpu_data.x86_mask < 0x1)))
+ return;
+
+- this_leaf->can_disable = 1;
++ this_leaf->can_disable = true;
++ this_leaf->l3_indices = amd_calc_l3_indices();
++}
++
++static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
++ unsigned int index)
++{
++ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
++ int node = amd_get_nb_id(cpu);
++ struct pci_dev *dev = node_to_k8_nb_misc(node);
++ unsigned int reg = 0;
++
++ if (!this_leaf->can_disable)
++ return -EINVAL;
++
++ if (!dev)
++ return -EINVAL;
++
++ pci_read_config_dword(dev, 0x1BC + index * 4, ®);
++ return sprintf(buf, "0x%08x\n", reg);
++}
++
++#define SHOW_CACHE_DISABLE(index) \
++static ssize_t \
++show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
++{ \
++ return show_cache_disable(this_leaf, buf, index); \
++}
++SHOW_CACHE_DISABLE(0)
++SHOW_CACHE_DISABLE(1)
++
++static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
++ const char *buf, size_t count, unsigned int index)
++{
++ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
++ int node = amd_get_nb_id(cpu);
++ struct pci_dev *dev = node_to_k8_nb_misc(node);
++ unsigned long val = 0;
++
++#define SUBCACHE_MASK (3UL << 20)
++#define SUBCACHE_INDEX 0xfff
++
++ if (!this_leaf->can_disable)
++ return -EINVAL;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (!dev)
++ return -EINVAL;
++
++ if (strict_strtoul(buf, 10, &val) < 0)
++ return -EINVAL;
++
++ /* do not allow writes outside of allowed bits */
++ if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
++ ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
++ return -EINVAL;
++
++ val |= BIT(30);
++ pci_write_config_dword(dev, 0x1BC + index * 4, val);
++ /*
++ * We need to WBINVD on a core on the node containing the L3 cache which
++ * indices we disable therefore a simple wbinvd() is not sufficient.
++ */
++ wbinvd_on_cpu(cpu);
++ pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
++ return count;
++}
++
++#define STORE_CACHE_DISABLE(index) \
++static ssize_t \
++store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
++ const char *buf, size_t count) \
++{ \
++ return store_cache_disable(this_leaf, buf, count, index); \
+ }
++STORE_CACHE_DISABLE(0)
++STORE_CACHE_DISABLE(1)
++
++static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
++ show_cache_disable_0, store_cache_disable_0);
++static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
++ show_cache_disable_1, store_cache_disable_1);
++
++#else /* CONFIG_CPU_SUP_AMD */
++static void __cpuinit
++amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
++{
++};
++#endif /* CONFIG_CPU_SUP_AMD */
+
+ static int
+ __cpuinit cpuid4_cache_lookup_regs(int index,
+@@ -711,82 +835,6 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
+ #define to_object(k) container_of(k, struct _index_kobject, kobj)
+ #define to_attr(a) container_of(a, struct _cache_attr, attr)
+
+-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
+- unsigned int index)
+-{
+- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+- int node = cpu_to_node(cpu);
+- struct pci_dev *dev = node_to_k8_nb_misc(node);
+- unsigned int reg = 0;
+-
+- if (!this_leaf->can_disable)
+- return -EINVAL;
+-
+- if (!dev)
+- return -EINVAL;
+-
+- pci_read_config_dword(dev, 0x1BC + index * 4, ®);
+- return sprintf(buf, "%x\n", reg);
+-}
+-
+-#define SHOW_CACHE_DISABLE(index) \
+-static ssize_t \
+-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+-{ \
+- return show_cache_disable(this_leaf, buf, index); \
+-}
+-SHOW_CACHE_DISABLE(0)
+-SHOW_CACHE_DISABLE(1)
+-
+-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
+- const char *buf, size_t count, unsigned int index)
+-{
+- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+- int node = cpu_to_node(cpu);
+- struct pci_dev *dev = node_to_k8_nb_misc(node);
+- unsigned long val = 0;
+- unsigned int scrubber = 0;
+-
+- if (!this_leaf->can_disable)
+- return -EINVAL;
+-
+- if (!capable(CAP_SYS_ADMIN))
+- return -EPERM;
+-
+- if (!dev)
+- return -EINVAL;
+-
+- if (strict_strtoul(buf, 10, &val) < 0)
+- return -EINVAL;
+-
+- val |= 0xc0000000;
+-
+- pci_read_config_dword(dev, 0x58, &scrubber);
+- scrubber &= ~0x1f000000;
+- pci_write_config_dword(dev, 0x58, scrubber);
+-
+- pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
+- wbinvd();
+- pci_write_config_dword(dev, 0x1BC + index * 4, val);
+- return count;
+-}
+-
+-#define STORE_CACHE_DISABLE(index) \
+-static ssize_t \
+-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
+- const char *buf, size_t count) \
+-{ \
+- return store_cache_disable(this_leaf, buf, count, index); \
+-}
+-STORE_CACHE_DISABLE(0)
+-STORE_CACHE_DISABLE(1)
+-
+-struct _cache_attr {
+- struct attribute attr;
+- ssize_t (*show)(struct _cpuid4_info *, char *);
+- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
+-};
+-
+ #define define_one_ro(_name) \
+ static struct _cache_attr _name = \
+ __ATTR(_name, 0444, show_##_name, NULL)
+@@ -801,23 +849,28 @@ define_one_ro(size);
+ define_one_ro(shared_cpu_map);
+ define_one_ro(shared_cpu_list);
+
+-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
+- show_cache_disable_0, store_cache_disable_0);
+-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
+- show_cache_disable_1, store_cache_disable_1);
++#define DEFAULT_SYSFS_CACHE_ATTRS \
++ &type.attr, \
++ &level.attr, \
++ &coherency_line_size.attr, \
++ &physical_line_partition.attr, \
++ &ways_of_associativity.attr, \
++ &number_of_sets.attr, \
++ &size.attr, \
++ &shared_cpu_map.attr, \
++ &shared_cpu_list.attr
+
+ static struct attribute *default_attrs[] = {
+- &type.attr,
+- &level.attr,
+- &coherency_line_size.attr,
+- &physical_line_partition.attr,
+- &ways_of_associativity.attr,
+- &number_of_sets.attr,
+- &size.attr,
+- &shared_cpu_map.attr,
+- &shared_cpu_list.attr,
++ DEFAULT_SYSFS_CACHE_ATTRS,
++ NULL
++};
++
++static struct attribute *default_l3_attrs[] = {
++ DEFAULT_SYSFS_CACHE_ATTRS,
++#ifdef CONFIG_CPU_SUP_AMD
+ &cache_disable_0.attr,
+ &cache_disable_1.attr,
++#endif
+ NULL
+ };
+
+@@ -908,6 +961,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+ unsigned int cpu = sys_dev->id;
+ unsigned long i, j;
+ struct _index_kobject *this_object;
++ struct _cpuid4_info *this_leaf;
+ int retval;
+
+ retval = cpuid4_cache_sysfs_init(cpu);
+@@ -926,6 +980,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+ this_object = INDEX_KOBJECT_PTR(cpu, i);
+ this_object->cpu = cpu;
+ this_object->index = i;
++
++ this_leaf = CPUID4_INFO_IDX(cpu, i);
++
++ if (this_leaf->can_disable)
++ ktype_cache.default_attrs = default_l3_attrs;
++ else
++ ktype_cache.default_attrs = default_attrs;
++
+ retval = kobject_init_and_add(&(this_object->kobj),
+ &ktype_cache,
+ per_cpu(ici_cache_kobject, cpu),
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 98819b3..c7ca8e2 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -245,6 +245,97 @@ static u64 __read_mostly hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+
++static const u64 westmere_hw_cache_event_ids
++ [PERF_COUNT_HW_CACHE_MAX]
++ [PERF_COUNT_HW_CACHE_OP_MAX]
++ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
++{
++ [ C(L1D) ] = {
++ [ C(OP_READ) ] = {
++ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
++ [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
++ },
++ [ C(OP_WRITE) ] = {
++ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
++ [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
++ },
++ [ C(OP_PREFETCH) ] = {
++ [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
++ [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
++ },
++ },
++ [ C(L1I ) ] = {
++ [ C(OP_READ) ] = {
++ [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
++ [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
++ },
++ [ C(OP_WRITE) ] = {
++ [ C(RESULT_ACCESS) ] = -1,
++ [ C(RESULT_MISS) ] = -1,
++ },
++ [ C(OP_PREFETCH) ] = {
++ [ C(RESULT_ACCESS) ] = 0x0,
++ [ C(RESULT_MISS) ] = 0x0,
++ },
++ },
++ [ C(LL ) ] = {
++ [ C(OP_READ) ] = {
++ [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
++ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
++ },
++ [ C(OP_WRITE) ] = {
++ [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
++ [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
++ },
++ [ C(OP_PREFETCH) ] = {
++ [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
++ [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
++ },
++ },
++ [ C(DTLB) ] = {
++ [ C(OP_READ) ] = {
++ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
++ [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
++ },
++ [ C(OP_WRITE) ] = {
++ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
++ [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
++ },
++ [ C(OP_PREFETCH) ] = {
++ [ C(RESULT_ACCESS) ] = 0x0,
++ [ C(RESULT_MISS) ] = 0x0,
++ },
++ },
++ [ C(ITLB) ] = {
++ [ C(OP_READ) ] = {
++ [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
++ [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
++ },
++ [ C(OP_WRITE) ] = {
++ [ C(RESULT_ACCESS) ] = -1,
++ [ C(RESULT_MISS) ] = -1,
++ },
++ [ C(OP_PREFETCH) ] = {
++ [ C(RESULT_ACCESS) ] = -1,
++ [ C(RESULT_MISS) ] = -1,
++ },
++ },
++ [ C(BPU ) ] = {
++ [ C(OP_READ) ] = {
++ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
++ [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
++ },
++ [ C(OP_WRITE) ] = {
++ [ C(RESULT_ACCESS) ] = -1,
++ [ C(RESULT_MISS) ] = -1,
++ },
++ [ C(OP_PREFETCH) ] = {
++ [ C(RESULT_ACCESS) ] = -1,
++ [ C(RESULT_MISS) ] = -1,
++ },
++ },
++};
++
+ static __initconst u64 nehalem_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+@@ -2118,6 +2209,7 @@ static __init int intel_pmu_init(void)
+ * Install the hw-cache-events table:
+ */
+ switch (boot_cpu_data.x86_model) {
++
+ case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+ case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
+ case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
+@@ -2129,7 +2221,9 @@ static __init int intel_pmu_init(void)
+ event_constraints = intel_core_event_constraints;
+ break;
+ default:
+- case 26:
++ case 26: /* 45 nm nehalem, "Bloomfield" */
++ case 30: /* 45 nm nehalem, "Lynnfield" */
++ case 46: /* 45 nm nehalem-ex, "Beckton" */
+ memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+@@ -2142,6 +2236,14 @@ static __init int intel_pmu_init(void)
+
+ pr_cont("Atom events, ");
+ break;
++
++ case 37: /* 32 nm nehalem, "Clarkdale" */
++ case 44: /* 32 nm nehalem, "Gulftown" */
++ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
++ sizeof(hw_cache_event_ids));
++
++ pr_cont("Westmere events, ");
++ break;
+ }
+ return 0;
+ }
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index a4849c1..ebd4c51 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -27,7 +27,6 @@
+ #include <asm/cpu.h>
+ #include <asm/reboot.h>
+ #include <asm/virtext.h>
+-#include <asm/x86_init.h>
+
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
+
+@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ #ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+ #endif
+-
+-#ifdef CONFIG_X86_64
+- x86_platform.iommu_shutdown();
+-#endif
+-
+ crash_save_cpu(regs, safe_smp_processor_id());
+ }
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index ad80a1c..773afc9 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -399,9 +399,15 @@ static int hpet_next_event(unsigned long delta,
+ * then we might have a real hardware problem. We can not do
+ * much about it here, but at least alert the user/admin with
+ * a prominent warning.
++ * An erratum on some chipsets (ICH9,..), results in comparator read
++ * immediately following a write returning old value. Workaround
++ * for this is to read this value second time, when first
++ * read returns old value.
+ */
+- WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
++ if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
++ WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
+ KERN_WARNING "hpet: compare register read back failed.\n");
++ }
+
+ return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
+ }
+diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
+index bfba601..b2258ca 100644
+--- a/arch/x86/kernel/kgdb.c
++++ b/arch/x86/kernel/kgdb.c
+@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
+ * portion of kgdb because this operation requires mutexs to
+ * complete.
+ */
++ hw_breakpoint_init(&attr);
+ attr.bp_addr = (unsigned long)kgdb_arch_init;
+- attr.type = PERF_TYPE_BREAKPOINT;
+ attr.bp_len = HW_BREAKPOINT_LEN_1;
+ attr.bp_type = HW_BREAKPOINT_W;
+ attr.disabled = 1;
+diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
+index a2c1edd..e81030f 100644
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -664,7 +664,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf)
+ {
+ unsigned long size = get_mpc_size(mpf->physptr);
+
+- reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc");
++ reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc");
+ }
+
+ static int __init smp_scan_config(unsigned long base, unsigned long length)
+@@ -693,7 +693,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
+ mpf, (u64)virt_to_phys(mpf));
+
+ mem = virt_to_phys(mpf);
+- reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf");
++ reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf");
+ if (mpf->physptr)
+ smp_reserve_memory(mpf);
+
+diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
+index 34de53b..4f41b29 100644
+--- a/arch/x86/kernel/pci-gart_64.c
++++ b/arch/x86/kernel/pci-gart_64.c
+@@ -564,6 +564,9 @@ static void enable_gart_translations(void)
+
+ enable_gart_translation(dev, __pa(agp_gatt_table));
+ }
++
++ /* Flush the GART-TLB to remove stale entries */
++ k8_flush_garts();
+ }
+
+ /*
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 89a49fb..28c3d81 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -1502,8 +1502,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
+ for_each_sp(pages, sp, parents, i) {
+ kvm_mmu_zap_page(kvm, sp);
+ mmu_pages_clear_parents(&parents);
++ zapped++;
+ }
+- zapped += pages.nr;
+ kvm_mmu_pages_init(parent, &parents, &pages);
+ }
+
+@@ -1554,14 +1554,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
+ */
+
+ if (used_pages > kvm_nr_mmu_pages) {
+- while (used_pages > kvm_nr_mmu_pages) {
++ while (used_pages > kvm_nr_mmu_pages &&
++ !list_empty(&kvm->arch.active_mmu_pages)) {
+ struct kvm_mmu_page *page;
+
+ page = container_of(kvm->arch.active_mmu_pages.prev,
+ struct kvm_mmu_page, link);
+- kvm_mmu_zap_page(kvm, page);
++ used_pages -= kvm_mmu_zap_page(kvm, page);
+ used_pages--;
+ }
++ kvm_nr_mmu_pages = used_pages;
+ kvm->arch.n_free_mmu_pages = 0;
+ }
+ else
+@@ -1608,7 +1610,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
+ && !sp->role.invalid) {
+ pgprintk("%s: zap %lx %x\n",
+ __func__, gfn, sp->role.word);
+- kvm_mmu_zap_page(kvm, sp);
++ if (kvm_mmu_zap_page(kvm, sp))
++ nn = bucket->first;
+ }
+ }
+ }
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 1d9b338..d42e191 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -698,29 +698,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
+ if (err)
+ goto free_svm;
+
++ err = -ENOMEM;
+ page = alloc_page(GFP_KERNEL);
+- if (!page) {
+- err = -ENOMEM;
++ if (!page)
+ goto uninit;
+- }
+
+- err = -ENOMEM;
+ msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+ if (!msrpm_pages)
+- goto uninit;
++ goto free_page1;
+
+ nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+ if (!nested_msrpm_pages)
+- goto uninit;
+-
+- svm->msrpm = page_address(msrpm_pages);
+- svm_vcpu_init_msrpm(svm->msrpm);
++ goto free_page2;
+
+ hsave_page = alloc_page(GFP_KERNEL);
+ if (!hsave_page)
+- goto uninit;
++ goto free_page3;
++
+ svm->nested.hsave = page_address(hsave_page);
+
++ svm->msrpm = page_address(msrpm_pages);
++ svm_vcpu_init_msrpm(svm->msrpm);
++
+ svm->nested.msrpm = page_address(nested_msrpm_pages);
+
+ svm->vmcb = page_address(page);
+@@ -737,6 +736,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
+
+ return &svm->vcpu;
+
++free_page3:
++ __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
++free_page2:
++ __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
++free_page1:
++ __free_page(page);
+ uninit:
+ kvm_vcpu_uninit(&svm->vcpu);
+ free_svm:
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 8a8e139..3acbe19 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -61,6 +61,8 @@ module_param_named(unrestricted_guest,
+ static int __read_mostly emulate_invalid_guest_state = 0;
+ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
+
++#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
++
+ /*
+ * These 2 parameters are used to config the controls for Pause-Loop Exiting:
+ * ple_gap: upper bound on the amount of time between two successive
+@@ -115,7 +117,7 @@ struct vcpu_vmx {
+ } host_state;
+ struct {
+ int vm86_active;
+- u8 save_iopl;
++ ulong save_rflags;
+ struct kvm_save_segment {
+ u16 selector;
+ unsigned long base;
+@@ -787,18 +789,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
+
+ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
+ {
+- unsigned long rflags;
++ unsigned long rflags, save_rflags;
+
+ rflags = vmcs_readl(GUEST_RFLAGS);
+- if (to_vmx(vcpu)->rmode.vm86_active)
+- rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
++ if (to_vmx(vcpu)->rmode.vm86_active) {
++ rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
++ save_rflags = to_vmx(vcpu)->rmode.save_rflags;
++ rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
++ }
+ return rflags;
+ }
+
+ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+ {
+- if (to_vmx(vcpu)->rmode.vm86_active)
++ if (to_vmx(vcpu)->rmode.vm86_active) {
++ to_vmx(vcpu)->rmode.save_rflags = rflags;
+ rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
++ }
+ vmcs_writel(GUEST_RFLAGS, rflags);
+ }
+
+@@ -1431,8 +1438,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
+ vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
+
+ flags = vmcs_readl(GUEST_RFLAGS);
+- flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+- flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
++ flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
++ flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+ vmcs_writel(GUEST_RFLAGS, flags);
+
+ vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
+@@ -1501,8 +1508,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
+ vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
+
+ flags = vmcs_readl(GUEST_RFLAGS);
+- vmx->rmode.save_iopl
+- = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
++ vmx->rmode.save_rflags = flags;
+
+ flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+
+@@ -2719,6 +2725,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
+ kvm_queue_exception(vcpu, vec);
+ return 1;
+ case BP_VECTOR:
++ /*
++ * Update instruction length as we may reinject the exception
++ * from user space while in guest debugging mode.
++ */
++ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
++ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+ return 0;
+ /* fall through */
+@@ -2841,6 +2853,13 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
+ /* fall through */
+ case BP_VECTOR:
++ /*
++ * Update instruction length as we may reinject #BP from
++ * user space while in guest debugging mode. Reading it for
++ * #DB as well causes no harm, it is not used in that case.
++ */
++ vmx->vcpu.arch.event_exit_inst_len =
++ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ kvm_run->exit_reason = KVM_EXIT_DEBUG;
+ kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
+ kvm_run->debug.arch.exception = ex_no;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e900908..dd78927 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -384,21 +384,16 @@ out:
+ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ {
+ if (cr0 & CR0_RESERVED_BITS) {
+- printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
+- cr0, vcpu->arch.cr0);
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+
+ if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
+- printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+
+ if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
+- printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
+- "and a clear PE flag\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+@@ -409,15 +404,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ int cs_db, cs_l;
+
+ if (!is_pae(vcpu)) {
+- printk(KERN_DEBUG "set_cr0: #GP, start paging "
+- "in long mode while PAE is disabled\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+ kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ if (cs_l) {
+- printk(KERN_DEBUG "set_cr0: #GP, start paging "
+- "in long mode while CS.L == 1\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+
+@@ -425,8 +416,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ } else
+ #endif
+ if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
+- printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
+- "reserved bits\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+@@ -453,28 +442,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
+
+ if (cr4 & CR4_RESERVED_BITS) {
+- printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+
+ if (is_long_mode(vcpu)) {
+ if (!(cr4 & X86_CR4_PAE)) {
+- printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
+- "in long mode\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+ } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
+ && ((cr4 ^ old_cr4) & pdptr_bits)
+ && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
+- printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+
+ if (cr4 & X86_CR4_VMXE) {
+- printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+@@ -495,21 +479,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+
+ if (is_long_mode(vcpu)) {
+ if (cr3 & CR3_L_MODE_RESERVED_BITS) {
+- printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+ } else {
+ if (is_pae(vcpu)) {
+ if (cr3 & CR3_PAE_RESERVED_BITS) {
+- printk(KERN_DEBUG
+- "set_cr3: #GP, reserved bits\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+ if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
+- printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
+- "reserved bits\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+@@ -541,7 +520,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
+ void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+ {
+ if (cr8 & CR8_RESERVED_BITS) {
+- printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+@@ -595,15 +573,12 @@ static u32 emulated_msrs[] = {
+ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+ {
+ if (efer & efer_reserved_bits) {
+- printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
+- efer);
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+
+ if (is_paging(vcpu)
+ && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+- printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+@@ -613,7 +588,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+
+ feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+ if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
+- printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+@@ -624,7 +598,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+
+ feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+ if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
+- printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
+ kvm_inject_gp(vcpu, 0);
+ return;
+ }
+@@ -913,9 +886,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ if (msr >= MSR_IA32_MC0_CTL &&
+ msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
+ u32 offset = msr - MSR_IA32_MC0_CTL;
+- /* only 0 or all 1s can be written to IA32_MCi_CTL */
++ /* only 0 or all 1s can be written to IA32_MCi_CTL
++ * some Linux kernels though clear bit 10 in bank 4 to
++ * workaround a BIOS/GART TBL issue on AMD K8s, ignore
++ * this to avoid an uncatched #GP in the guest
++ */
+ if ((offset & 0x3) == 0 &&
+- data != 0 && data != ~(u64)0)
++ data != 0 && (data | (1 << 10)) != ~(u64)0)
+ return -1;
+ vcpu->arch.mce_banks[offset] = data;
+ break;
+@@ -2366,7 +2343,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+ {
+ int r;
+- int n;
++ unsigned long n;
+ struct kvm_memory_slot *memslot;
+ int is_dirty = 0;
+
+@@ -2382,7 +2359,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ kvm_mmu_slot_remove_write_access(kvm, log->slot);
+ spin_unlock(&kvm->mmu_lock);
+ memslot = &kvm->memslots[log->slot];
+- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
++ n = kvm_dirty_bitmap_bytes(memslot);
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+ r = 0;
+@@ -4599,6 +4576,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
+ int ret = 0;
+ u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
+ u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
++ u32 desc_limit;
+
+ old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
+
+@@ -4621,7 +4599,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
+ }
+ }
+
+- if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
++ desc_limit = get_desc_limit(&nseg_desc);
++ if (!nseg_desc.p ||
++ ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
++ desc_limit < 0x2b)) {
+ kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
+ return 1;
+ }
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index cffd754..ddef409 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -14,7 +14,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c
+
+ clean-files := inat-tables.c
+
+-obj-$(CONFIG_SMP) += msr-smp.o
++obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
+
+ lib-y := delay.o
+ lib-y += thunk_$(BITS).o
+@@ -39,4 +39,5 @@ else
+ lib-y += thunk_64.o clear_page_64.o copy_page_64.o
+ lib-y += memmove_64.o memset_64.o
+ lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
++ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
+ endif
+diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
+new file mode 100644
+index 0000000..a3c6688
+--- /dev/null
++++ b/arch/x86/lib/cache-smp.c
+@@ -0,0 +1,19 @@
++#include <linux/smp.h>
++#include <linux/module.h>
++
++static void __wbinvd(void *dummy)
++{
++ wbinvd();
++}
++
++void wbinvd_on_cpu(int cpu)
++{
++ smp_call_function_single(cpu, __wbinvd, NULL, 1);
++}
++EXPORT_SYMBOL(wbinvd_on_cpu);
++
++int wbinvd_on_all_cpus(void)
++{
++ return on_each_cpu(__wbinvd, NULL, 1);
++}
++EXPORT_SYMBOL(wbinvd_on_all_cpus);
+diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
+new file mode 100644
+index 0000000..15acecf
+--- /dev/null
++++ b/arch/x86/lib/rwsem_64.S
+@@ -0,0 +1,81 @@
++/*
++ * x86-64 rwsem wrappers
++ *
++ * This interfaces the inline asm code to the slow-path
++ * C routines. We need to save the call-clobbered regs
++ * that the asm does not mark as clobbered, and move the
++ * argument from %rax to %rdi.
++ *
++ * NOTE! We don't need to save %rax, because the functions
++ * will always return the semaphore pointer in %rax (which
++ * is also the input argument to these helpers)
++ *
++ * The following can clobber %rdx because the asm clobbers it:
++ * call_rwsem_down_write_failed
++ * call_rwsem_wake
++ * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
++ */
++
++#include <linux/linkage.h>
++#include <asm/rwlock.h>
++#include <asm/alternative-asm.h>
++#include <asm/frame.h>
++#include <asm/dwarf2.h>
++
++#define save_common_regs \
++ pushq %rdi; \
++ pushq %rsi; \
++ pushq %rcx; \
++ pushq %r8; \
++ pushq %r9; \
++ pushq %r10; \
++ pushq %r11
++
++#define restore_common_regs \
++ popq %r11; \
++ popq %r10; \
++ popq %r9; \
++ popq %r8; \
++ popq %rcx; \
++ popq %rsi; \
++ popq %rdi
++
++/* Fix up special calling conventions */
++ENTRY(call_rwsem_down_read_failed)
++ save_common_regs
++ pushq %rdx
++ movq %rax,%rdi
++ call rwsem_down_read_failed
++ popq %rdx
++ restore_common_regs
++ ret
++ ENDPROC(call_rwsem_down_read_failed)
++
++ENTRY(call_rwsem_down_write_failed)
++ save_common_regs
++ movq %rax,%rdi
++ call rwsem_down_write_failed
++ restore_common_regs
++ ret
++ ENDPROC(call_rwsem_down_write_failed)
++
++ENTRY(call_rwsem_wake)
++ decw %dx /* do nothing if still outstanding active readers */
++ jnz 1f
++ save_common_regs
++ movq %rax,%rdi
++ call rwsem_wake
++ restore_common_regs
++1: ret
++ ENDPROC(call_rwsem_wake)
++
++/* Fix up special calling conventions */
++ENTRY(call_rwsem_downgrade_wake)
++ save_common_regs
++ pushq %rdx
++ movq %rax,%rdi
++ call rwsem_downgrade_wake
++ popq %rdx
++ restore_common_regs
++ ret
++ ENDPROC(call_rwsem_downgrade_wake)
+diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
+index 0696d50..b02f6d8 100644
+--- a/arch/x86/pci/irq.c
++++ b/arch/x86/pci/irq.c
+@@ -590,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
+ case PCI_DEVICE_ID_INTEL_ICH10_1:
+ case PCI_DEVICE_ID_INTEL_ICH10_2:
+ case PCI_DEVICE_ID_INTEL_ICH10_3:
++ case PCI_DEVICE_ID_INTEL_CPT_LPC1:
++ case PCI_DEVICE_ID_INTEL_CPT_LPC2:
+ r->name = "PIIX/ICH";
+ r->get = pirq_piix_get;
+ r->set = pirq_piix_set;
+diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
+index b641388..ad47dae 100644
+--- a/arch/x86/power/hibernate_asm_32.S
++++ b/arch/x86/power/hibernate_asm_32.S
+@@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend)
+ ret
+
+ ENTRY(restore_image)
++ movl mmu_cr4_features, %ecx
+ movl resume_pg_dir, %eax
+ subl $__PAGE_OFFSET, %eax
+ movl %eax, %cr3
+
++ jecxz 1f # cr4 Pentium and higher, skip if zero
++ andl $~(X86_CR4_PGE), %ecx
++ movl %ecx, %cr4; # turn off PGE
++ movl %cr3, %eax; # flush TLB
++ movl %eax, %cr3
++1:
+ movl restore_pblist, %edx
+ .p2align 4,,7
+
+@@ -54,16 +61,8 @@ done:
+ movl $swapper_pg_dir, %eax
+ subl $__PAGE_OFFSET, %eax
+ movl %eax, %cr3
+- /* Flush TLB, including "global" things (vmalloc) */
+ movl mmu_cr4_features, %ecx
+ jecxz 1f # cr4 Pentium and higher, skip if zero
+- movl %ecx, %edx
+- andl $~(X86_CR4_PGE), %edx
+- movl %edx, %cr4; # turn off PGE
+-1:
+- movl %cr3, %eax; # flush TLB
+- movl %eax, %cr3
+- jecxz 1f # cr4 Pentium and higher, skip if zero
+ movl %ecx, %cr4; # turn PGE back on
+ 1:
+
+diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
+index 52fec07..83b6252 100644
+--- a/drivers/acpi/acpica/exprep.c
++++ b/drivers/acpi/acpica/exprep.c
+@@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
+
+ acpi_ut_add_reference(obj_desc->field.region_obj);
+
++ /* allow full data read from EC address space */
++ if (obj_desc->field.region_obj->region.space_id ==
++ ACPI_ADR_SPACE_EC) {
++ if (obj_desc->common_field.bit_length > 8) {
++ unsigned width =
++ ACPI_ROUND_BITS_UP_TO_BYTES(
++ obj_desc->common_field.bit_length);
++ // access_bit_width is u8, don't overflow it
++ if (width > 8)
++ width = 8;
++ obj_desc->common_field.access_byte_width =
++ width;
++ obj_desc->common_field.access_bit_width =
++ 8 * width;
++ }
++ }
++
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
+ obj_desc->field.start_field_bit_offset,
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index d6471bb..fc67d11 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -589,12 +589,12 @@ static u32 acpi_ec_gpe_handler(void *data)
+
+ static acpi_status
+ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+- u32 bits, acpi_integer *value,
++ u32 bits, acpi_integer *value64,
+ void *handler_context, void *region_context)
+ {
+ struct acpi_ec *ec = handler_context;
+- int result = 0, i;
+- u8 temp = 0;
++ int result = 0, i, bytes = bits / 8;
++ u8 *value = (u8 *)value64;
+
+ if ((address > 0xFF) || !value || !handler_context)
+ return AE_BAD_PARAMETER;
+@@ -602,32 +602,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ if (function != ACPI_READ && function != ACPI_WRITE)
+ return AE_BAD_PARAMETER;
+
+- if (bits != 8 && acpi_strict)
+- return AE_BAD_PARAMETER;
+-
+- if (EC_FLAGS_MSI)
++ if (EC_FLAGS_MSI || bits > 8)
+ acpi_ec_burst_enable(ec);
+
+- if (function == ACPI_READ) {
+- result = acpi_ec_read(ec, address, &temp);
+- *value = temp;
+- } else {
+- temp = 0xff & (*value);
+- result = acpi_ec_write(ec, address, temp);
+- }
+-
+- for (i = 8; unlikely(bits - i > 0); i += 8) {
+- ++address;
+- if (function == ACPI_READ) {
+- result = acpi_ec_read(ec, address, &temp);
+- (*value) |= ((acpi_integer)temp) << i;
+- } else {
+- temp = 0xff & ((*value) >> i);
+- result = acpi_ec_write(ec, address, temp);
+- }
+- }
++ for (i = 0; i < bytes; ++i, ++address, ++value)
++ result = (function == ACPI_READ) ?
++ acpi_ec_read(ec, address, value) :
++ acpi_ec_write(ec, address, *value);
+
+- if (EC_FLAGS_MSI)
++ if (EC_FLAGS_MSI || bits > 8)
+ acpi_ec_burst_disable(ec);
+
+ switch (result) {
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 9e2feb6..462200d 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -570,6 +570,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
++ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
++ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
++ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
++ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
++ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
++ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 6f3f225..b5f614b 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -291,6 +291,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
++ /* SATA Controller IDE (CPT) */
++ { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
++ /* SATA Controller IDE (CPT) */
++ { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
++ /* SATA Controller IDE (CPT) */
++ { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (CPT) */
++ { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ { } /* terminate list */
+ };
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 6728328..2401c9c 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4348,6 +4348,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
+ { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
+
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
++ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
++
+ /* devices which puke on READ_NATIVE_MAX */
+ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
+ { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
+diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
+index be7c395..ad64750 100644
+--- a/drivers/ata/pata_via.c
++++ b/drivers/ata/pata_via.c
+@@ -697,6 +697,7 @@ static const struct pci_device_id via[] = {
+ { PCI_VDEVICE(VIA, 0x3164), },
+ { PCI_VDEVICE(VIA, 0x5324), },
+ { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
++ { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
+
+ { },
+ };
+diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
+index 3999a5f..8a713f1 100644
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -8,6 +8,7 @@
+ #include <linux/kernel.h>
+ #include <linux/pagemap.h>
+ #include <linux/agp_backend.h>
++#include <asm/smp.h>
+ #include "agp.h"
+
+ /*
+@@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void)
+ intel_i830_fini_flush();
+ }
+
+-static void
+-do_wbinvd(void *null)
+-{
+- wbinvd();
+-}
+-
+ /* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+@@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+
+ memset(pg, 0, 1024);
+
+- if (cpu_has_clflush) {
++ if (cpu_has_clflush)
+ clflush_cache_range(pg, 1024);
+- } else {
+- if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
+- printk(KERN_ERR "Timed out waiting for cache flush.\n");
+- }
++ else if (wbinvd_on_all_cpus() != 0)
++ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ }
+
+ /* The intel i830 automatically initializes the agp aperture during POST.
+diff --git a/drivers/char/raw.c b/drivers/char/raw.c
+index 64acd05..9abc3a1 100644
+--- a/drivers/char/raw.c
++++ b/drivers/char/raw.c
+@@ -247,6 +247,7 @@ static const struct file_operations raw_fops = {
+ .aio_read = generic_file_aio_read,
+ .write = do_sync_write,
+ .aio_write = blkdev_aio_write,
++ .fsync = block_fsync,
+ .open = raw_open,
+ .release= raw_release,
+ .ioctl = raw_ioctl,
+diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
+index dcb9083..76253cf 100644
+--- a/drivers/char/tty_io.c
++++ b/drivers/char/tty_io.c
+@@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work)
+ list_del_init(&tty->tty_files);
+ file_list_unlock();
+
++ put_pid(tty->pgrp);
++ put_pid(tty->session);
+ free_tty_struct(tty);
+ }
+
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index 7d0f00a..99907c3 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ if (connector->status == connector_status_disconnected) {
+ DRM_DEBUG_KMS("%s is disconnected\n",
+ drm_get_connector_name(connector));
++ drm_mode_connector_update_edid_property(connector, NULL);
+ goto prune;
+ }
+
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index ab6c973..bfd0e4a 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -85,6 +85,8 @@ static struct edid_quirk {
+
+ /* Envision Peripherals, Inc. EN-7100e */
+ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
++ /* Envision EN2028 */
++ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Funai Electronics PM36B */
+ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+@@ -707,15 +709,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+- /* perform the basic check for the detailed timing */
+- if (mode->hsync_end > mode->htotal ||
+- mode->vsync_end > mode->vtotal) {
+- drm_mode_destroy(dev, mode);
+- DRM_DEBUG_KMS("Incorrect detailed timing. "
+- "Sync is beyond the blank.\n");
+- return NULL;
+- }
+-
+ /* Some EDIDs have bogus h/vtotal values */
+ if (mode->hsync_end > mode->htotal)
+ mode->htotal = mode->hsync_end + 1;
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 08d14df..4804872 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp)
+ spin_unlock(&dev->count_lock);
+ }
+ out:
+- mutex_lock(&dev->struct_mutex);
+- if (minor->type == DRM_MINOR_LEGACY) {
+- BUG_ON((dev->dev_mapping != NULL) &&
+- (dev->dev_mapping != inode->i_mapping));
+- if (dev->dev_mapping == NULL)
+- dev->dev_mapping = inode->i_mapping;
++ if (!retcode) {
++ mutex_lock(&dev->struct_mutex);
++ if (minor->type == DRM_MINOR_LEGACY) {
++ if (dev->dev_mapping == NULL)
++ dev->dev_mapping = inode->i_mapping;
++ else if (dev->dev_mapping != inode->i_mapping)
++ retcode = -ENODEV;
++ }
++ mutex_unlock(&dev->struct_mutex);
+ }
+- mutex_unlock(&dev->struct_mutex);
+
+ return retcode;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 93031a7..1238bc9 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -899,6 +899,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Clientron U800",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index d75788f..b1f929d 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -881,11 +881,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
++ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ /* op needs to full dst value */
++ dst = saved;
+ shift = atom_get_src(ctx, attr, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst <<= shift;
++ dst &= atom_arg_mask[dst_align];
++ dst >>= atom_arg_shift[dst_align];
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+ }
+@@ -895,11 +900,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
++ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ /* op needs to full dst value */
++ dst = saved;
+ shift = atom_get_src(ctx, attr, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst >>= shift;
++ dst &= atom_arg_mask[dst_align];
++ dst >>= atom_arg_shift[dst_align];
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+ }
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index 43b55a0..5bdfaf2 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -364,11 +364,12 @@ void r300_gpu_init(struct radeon_device *rdev)
+
+ r100_hdp_reset(rdev);
+ /* FIXME: rv380 one pipes ? */
+- if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
++ if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
++ (rdev->family == CHIP_R350)) {
+ /* r300,r350 */
+ rdev->num_gb_pipes = 2;
+ } else {
+- /* rv350,rv370,rv380 */
++ /* rv350,rv370,rv380,r300 AD */
+ rdev->num_gb_pipes = 1;
+ }
+ rdev->num_z_pipes = 1;
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index e7b1944..81b832e 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -670,7 +670,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ dac = RBIOS8(dac_info + 0x3) & 0xf;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+ }
+- found = 1;
++ /* if the values are all zeros, use the table */
++ if (p_dac->ps2_pdac_adj)
++ found = 1;
+ }
+
+ out:
+@@ -812,7 +814,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+ bg = RBIOS8(dac_info + 0x10) & 0xf;
+ dac = RBIOS8(dac_info + 0x11) & 0xf;
+ tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+- found = 1;
++ /* if the values are all zeros, use the table */
++ if (tv_dac->ps2_tvdac_adj)
++ found = 1;
+ } else if (rev > 1) {
+ bg = RBIOS8(dac_info + 0xc) & 0xf;
+ dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
+@@ -825,7 +829,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+ bg = RBIOS8(dac_info + 0xe) & 0xf;
+ dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
+ tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+- found = 1;
++ /* if the values are all zeros, use the table */
++ if (tv_dac->ps2_tvdac_adj)
++ found = 1;
+ }
+ tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
+ }
+@@ -842,7 +848,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+ (bg << 16) | (dac << 20);
+ tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+- found = 1;
++ /* if the values are all zeros, use the table */
++ if (tv_dac->ps2_tvdac_adj)
++ found = 1;
+ } else {
+ bg = RBIOS8(dac_info + 0x4) & 0xf;
+ dac = RBIOS8(dac_info + 0x5) & 0xf;
+@@ -850,7 +858,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+ (bg << 16) | (dac << 20);
+ tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+- found = 1;
++ /* if the values are all zeros, use the table */
++ if (tv_dac->ps2_tvdac_adj)
++ found = 1;
+ }
+ } else {
+ DRM_INFO("No TV DAC info found in BIOS\n");
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 65f8194..2bdfbcd 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
+ {
+ struct drm_device *dev = connector->dev;
+ struct drm_connector *conflict;
++ struct radeon_connector *radeon_conflict;
+ int i;
+
+ list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
+ if (conflict == connector)
+ continue;
+
++ radeon_conflict = to_radeon_connector(conflict);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (conflict->encoder_ids[i] == 0)
+ break;
+@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
+ if (conflict->status != connector_status_connected)
+ continue;
+
++ if (radeon_conflict->use_digital)
++ continue;
++
+ if (priority == true) {
+ DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+ DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
+@@ -315,7 +320,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (!radeon_encoder->enc_priv)
+ return 0;
+- if (rdev->is_atom_bios) {
++ if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
+ struct radeon_encoder_atom_dac *dac_int;
+ dac_int = radeon_encoder->enc_priv;
+ dac_int->tv_std = val;
+diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
+index 06123ba..f129bbb 100644
+--- a/drivers/gpu/drm/radeon/radeon_cp.c
++++ b/drivers/gpu/drm/radeon/radeon_cp.c
+@@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
+ return -EBUSY;
+ }
+
+-static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
++static void radeon_init_pipes(struct drm_device *dev)
+ {
++ drm_radeon_private_t *dev_priv = dev->dev_private;
+ uint32_t gb_tile_config, gb_pipe_sel = 0;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
+@@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
+ dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
+ } else {
+ /* R3xx */
+- if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
++ dev->pdev->device != 0x4144) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
+ dev_priv->num_gb_pipes = 2;
+ } else {
+- /* R3Vxx */
++ /* RV3xx/R300 AD */
+ dev_priv->num_gb_pipes = 1;
+ }
+ }
+@@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
+
+ /* setup the raster pipes */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
+- radeon_init_pipes(dev_priv);
++ radeon_init_pipes(dev);
+
+ /* Reset the CP ring */
+ radeon_do_cp_reset(dev_priv);
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index e9d0850..9933c2c 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -193,11 +193,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+ radeon_bo_list_fence(&parser->validated, parser->ib->fence);
+ }
+ radeon_bo_list_unreserve(&parser->validated);
+- for (i = 0; i < parser->nrelocs; i++) {
+- if (parser->relocs[i].gobj) {
+- mutex_lock(&parser->rdev->ddev->struct_mutex);
+- drm_gem_object_unreference(parser->relocs[i].gobj);
+- mutex_unlock(&parser->rdev->ddev->struct_mutex);
++ if (parser->relocs != NULL) {
++ for (i = 0; i < parser->nrelocs; i++) {
++ if (parser->relocs[i].gobj) {
++ mutex_lock(&parser->rdev->ddev->struct_mutex);
++ drm_gem_object_unreference(parser->relocs[i].gobj);
++ mutex_unlock(&parser->rdev->ddev->struct_mutex);
++ }
+ }
+ }
+ kfree(parser->track);
+@@ -246,7 +248,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ }
+ r = radeon_cs_parser_relocs(&parser);
+ if (r) {
+- DRM_ERROR("Failed to parse relocation !\n");
++ if (r != -ERESTARTSYS)
++ DRM_ERROR("Failed to parse relocation %d!\n", r);
+ radeon_cs_parser_fini(&parser, r);
+ mutex_unlock(&rdev->cs_mutex);
+ return r;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 768b150..509ba3f 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -655,6 +655,14 @@ int radeon_device_init(struct radeon_device *rdev,
+ return r;
+ radeon_check_arguments(rdev);
+
++ /* all of the newer IGP chips have an internal gart
++ * However some rs4xx report as AGP, so remove that here.
++ */
++ if ((rdev->family >= CHIP_RS400) &&
++ (rdev->flags & RADEON_IS_IGP)) {
++ rdev->flags &= ~RADEON_IS_AGP;
++ }
++
+ if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
+ radeon_agp_disable(rdev);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
+index 3c91724..7626bd5 100644
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -1276,8 +1276,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+- atombios_tv_setup(encoder, ATOM_ENABLE);
++ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
++ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
++ atombios_tv_setup(encoder, ATOM_ENABLE);
++ else
++ atombios_tv_setup(encoder, ATOM_DISABLE);
++ }
+ break;
+ }
+ atombios_apply_encoder_quirks(encoder, adjusted_mode);
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+index 417684d..f2ed27c 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+@@ -57,6 +57,10 @@
+ #define NTSC_TV_PLL_N_14 693
+ #define NTSC_TV_PLL_P_14 7
+
++#define PAL_TV_PLL_M_14 19
++#define PAL_TV_PLL_N_14 353
++#define PAL_TV_PLL_P_14 5
++
+ #define VERT_LEAD_IN_LINES 2
+ #define FRAC_BITS 0xe
+ #define FRAC_MASK 0x3fff
+@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
+ 630627, /* defRestart */
+ 347, /* crtcPLL_N */
+ 14, /* crtcPLL_M */
+- 8, /* crtcPLL_postDiv */
++ 8, /* crtcPLL_postDiv */
+ 1022, /* pixToTV */
+ },
++ { /* PAL timing for 14 Mhz ref clk */
++ 800, /* horResolution */
++ 600, /* verResolution */
++ TV_STD_PAL, /* standard */
++ 1131, /* horTotal */
++ 742, /* verTotal */
++ 813, /* horStart */
++ 840, /* horSyncStart */
++ 633, /* verSyncStart */
++ 708369, /* defRestart */
++ 211, /* crtcPLL_N */
++ 9, /* crtcPLL_M */
++ 8, /* crtcPLL_postDiv */
++ 759, /* pixToTV */
++ },
+ };
+
+ #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
+@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
+ if (pll->reference_freq == 2700)
+ const_ptr = &available_tv_modes[1];
+ else
+- const_ptr = &available_tv_modes[1]; /* FIX ME */
++ const_ptr = &available_tv_modes[3];
+ }
+ return const_ptr;
+ }
+@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+ n = PAL_TV_PLL_N_27;
+ p = PAL_TV_PLL_P_27;
+ } else {
+- m = PAL_TV_PLL_M_27;
+- n = PAL_TV_PLL_N_27;
+- p = PAL_TV_PLL_P_27;
++ m = PAL_TV_PLL_M_14;
++ n = PAL_TV_PLL_N_14;
++ p = PAL_TV_PLL_P_14;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index c381856..a27c09f 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -175,7 +175,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
+ WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+- tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
++ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
+ WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
+index cab13e8..62416e6 100644
+--- a/drivers/hid/hid-gyration.c
++++ b/drivers/hid/hid-gyration.c
+@@ -53,10 +53,13 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+ {
+- struct input_dev *input = field->hidinput->input;
++
++ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
++ return 0;
+
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
+ (usage->hid & 0xff) == 0x82) {
++ struct input_dev *input = field->hidinput->input;
+ input_event(input, usage->type, usage->code, 1);
+ input_sync(input);
+ input_event(input, usage->type, usage->code, 0);
+diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
+index 864a371..fbc997e 100644
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -302,13 +302,13 @@ error_ret:
+ **/
+ static inline int sht15_calc_temp(struct sht15_data *data)
+ {
+- int d1 = 0;
++ int d1 = temppoints[0].d1;
+ int i;
+
+- for (i = 1; i < ARRAY_SIZE(temppoints); i++)
++ for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
+ /* Find pointer to interpolate */
+ if (data->supply_uV > temppoints[i - 1].vdd) {
+- d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
++ d1 = (data->supply_uV - temppoints[i - 1].vdd)
+ * (temppoints[i].d1 - temppoints[i - 1].d1)
+ / (temppoints[i].vdd - temppoints[i - 1].vdd)
+ + temppoints[i - 1].d1;
+@@ -541,7 +541,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
+ /* If a regulator is available, query what the supply voltage actually is!*/
+ data->reg = regulator_get(data->dev, "vcc");
+ if (!IS_ERR(data->reg)) {
+- data->supply_uV = regulator_get_voltage(data->reg);
++ int voltage;
++
++ voltage = regulator_get_voltage(data->reg);
++ if (voltage)
++ data->supply_uV = voltage;
++
+ regulator_enable(data->reg);
+ /* setup a notifier block to update this if another device
+ * causes the voltage to change */
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 5f318ce..cb9f95c 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -77,7 +77,7 @@ config I2C_AMD8111
+ will be called i2c-amd8111.
+
+ config I2C_I801
+- tristate "Intel 82801 (ICH)"
++ tristate "Intel 82801 (ICH/PCH)"
+ depends on PCI
+ help
+ If you say yes to this option, support will be included for the Intel
+@@ -97,7 +97,8 @@ config I2C_I801
+ ICH9
+ Tolapai
+ ICH10
+- PCH
++ 3400/5 Series (PCH)
++ Cougar Point (PCH)
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 5574be2..e361da7 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -41,7 +41,8 @@
+ Tolapai 0x5032 32 hard yes yes yes
+ ICH10 0x3a30 32 hard yes yes yes
+ ICH10 0x3a60 32 hard yes yes yes
+- PCH 0x3b30 32 hard yes yes yes
++ 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
++ Cougar Point (PCH) 0x1c22 32 hard yes yes yes
+
+ Features supported by this driver:
+ Software PEC no
+@@ -580,6 +581,7 @@ static struct pci_device_id i801_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
+ { 0, }
+ };
+
+@@ -709,6 +711,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
+ case PCI_DEVICE_ID_INTEL_ICH10_4:
+ case PCI_DEVICE_ID_INTEL_ICH10_5:
+ case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
++ case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
+ i801_features |= FEATURE_I2C_BLOCK_READ;
+ /* fall through */
+ case PCI_DEVICE_ID_INTEL_82801DB_3:
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 30bdf42..f8302c2 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -752,6 +752,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
+ if (++priv->tx_outstanding == ipoib_sendq_size) {
+ ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+ tx->qp->qp_num);
++ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
++ ipoib_warn(priv, "request notify on send CQ failed\n");
+ netif_stop_queue(dev);
+ }
+ }
+diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
+index fbd3987..e8d65b3 100644
+--- a/drivers/input/sparse-keymap.c
++++ b/drivers/input/sparse-keymap.c
+@@ -161,7 +161,7 @@ int sparse_keymap_setup(struct input_dev *dev,
+ return 0;
+
+ err_out:
+- kfree(keymap);
++ kfree(map);
+ return error;
+
+ }
+diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
+index 072f33b..e53ddc5 100644
+--- a/drivers/input/tablet/wacom_sys.c
++++ b/drivers/input/tablet/wacom_sys.c
+@@ -644,13 +644,15 @@ static int wacom_resume(struct usb_interface *intf)
+ int rv;
+
+ mutex_lock(&wacom->lock);
+- if (wacom->open) {
++
++ /* switch to wacom mode first */
++ wacom_query_tablet_data(intf, features);
++
++ if (wacom->open)
+ rv = usb_submit_urb(wacom->irq, GFP_NOIO);
+- /* switch to wacom mode if needed */
+- if (!wacom_retrieve_hid_descriptor(intf, features))
+- wacom_query_tablet_data(intf, features);
+- } else
++ else
+ rv = 0;
++
+ mutex_unlock(&wacom->lock);
+
+ return rv;
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index e3cf568..d7500e1 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -285,7 +285,8 @@ retry:
+ up_write(&_hash_lock);
+ }
+
+-static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
++static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
++ const char *new)
+ {
+ char *new_name, *old_name;
+ struct hash_cell *hc;
+@@ -344,7 +345,8 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
+ dm_table_put(table);
+ }
+
+- dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
++ if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie))
++ *flags |= DM_UEVENT_GENERATED_FLAG;
+
+ dm_put(hc->md);
+ up_write(&_hash_lock);
+@@ -736,10 +738,10 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
+ __hash_remove(hc);
+ up_write(&_hash_lock);
+
+- dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
++ if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
++ param->flags |= DM_UEVENT_GENERATED_FLAG;
+
+ dm_put(md);
+- param->data_size = 0;
+ return 0;
+ }
+
+@@ -773,7 +775,9 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
+ return r;
+
+ param->data_size = 0;
+- return dm_hash_rename(param->event_nr, param->name, new_name);
++
++ return dm_hash_rename(param->event_nr, ¶m->flags, param->name,
++ new_name);
+ }
+
+ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
+@@ -899,8 +903,8 @@ static int do_resume(struct dm_ioctl *param)
+
+ if (dm_suspended_md(md)) {
+ r = dm_resume(md);
+- if (!r)
+- dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
++ if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
++ param->flags |= DM_UEVENT_GENERATED_FLAG;
+ }
+
+ if (old_map)
+@@ -1477,6 +1481,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
+ {
+ /* Always clear this flag */
+ param->flags &= ~DM_BUFFER_FULL_FLAG;
++ param->flags &= ~DM_UEVENT_GENERATED_FLAG;
+
+ /* Ignores parameters */
+ if (cmd == DM_REMOVE_ALL_CMD ||
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index fa786b9..fe8889e 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2618,18 +2618,19 @@ out:
+ /*-----------------------------------------------------------------
+ * Event notification.
+ *---------------------------------------------------------------*/
+-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
++int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie)
+ {
+ char udev_cookie[DM_COOKIE_LENGTH];
+ char *envp[] = { udev_cookie, NULL };
+
+ if (!cookie)
+- kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
++ return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ else {
+ snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
+ DM_COOKIE_ENV_VAR_NAME, cookie);
+- kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
++ return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
++ action, envp);
+ }
+ }
+
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index 8dadaa5..bad1724 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -125,8 +125,8 @@ void dm_stripe_exit(void);
+ int dm_open_count(struct mapped_device *md);
+ int dm_lock_for_deletion(struct mapped_device *md);
+
+-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+- unsigned cookie);
++int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
++ unsigned cookie);
+
+ int dm_io_init(void);
+ void dm_io_exit(void);
+diff --git a/drivers/md/linear.c b/drivers/md/linear.c
+index 00435bd..001317b 100644
+--- a/drivers/md/linear.c
++++ b/drivers/md/linear.c
+@@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ * violating it, so limit max_phys_segments to 1 lying within
++ * a single page.
+ */
+- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ conf->array_sectors += rdev->sectors;
+ cnt++;
+diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
+index 32a662f..f9ee99f 100644
+--- a/drivers/md/multipath.c
++++ b/drivers/md/multipath.c
+@@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
+ rdev->data_offset << 9);
+
+ /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ * violating it, so limit ->max_phys_segments to one, lying
++ * within a single page.
+ * (Note: it is very unlikely that a device with
+ * merge_bvec_fn will be involved in multipath.)
+ */
+- if (q->merge_bvec_fn &&
+- queue_max_sectors(q) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (q->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ conf->working_disks++;
+ mddev->degraded--;
+@@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev)
+ /* as we don't honour merge_bvec_fn, we must never risk
+ * violating it, not that we ever expect a device with
+ * a merge_bvec_fn to be involved in multipath */
+- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ if (!test_bit(Faulty, &rdev->flags))
+ conf->working_disks++;
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 77605cd..41ee9de 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *mddev)
+ disk_stack_limits(mddev->gendisk, rdev1->bdev,
+ rdev1->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ * violating it, so limit ->max_phys_segments to 1, lying within
++ * a single page.
+ */
+
+- if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+-
++ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+ if (!smallest || (rdev1->sectors < smallest->sectors))
+ smallest = rdev1;
+ cnt++;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index d119b7b..047c468 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
+
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+- /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ /* as we don't honour merge_bvec_fn, we must
++ * never risk violating it, so limit
++ * ->max_phys_segments to one lying with a single
++ * page, as a one page request is never in
++ * violation.
+ */
+- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ p->head_position = 0;
+ rdev->raid_disk = mirror;
+@@ -2255,12 +2259,14 @@ static int run(mddev_t *mddev)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ * violating it, so limit max_phys_segments to 1 lying
++ * within a single page.
+ */
+- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ disk->head_position = 0;
+ }
+diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
+index 57f149b..4d353d2 100644
+--- a/drivers/net/e1000e/netdev.c
++++ b/drivers/net/e1000e/netdev.c
+@@ -660,6 +660,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
+ i = 0;
+ }
+
++ if (i == tx_ring->next_to_use)
++ break;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ }
+diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
+index 67d414b..3db85da 100644
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -3255,8 +3255,8 @@ static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
+ unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (max_frame != 16383)
+- printk(KERN_WARNING "WARNING! Changing of MTU on this NIC"
+- "May lead to frame reception errors!\n");
++ printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
++ "NIC may lead to frame reception errors!\n");
+
+ tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
+ }
+diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
+index e0799d9..0387658 100644
+--- a/drivers/net/wireless/ath/ar9170/usb.c
++++ b/drivers/net/wireless/ath/ar9170/usb.c
+@@ -414,7 +414,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
+ spin_unlock_irqrestore(&aru->common.cmdlock, flags);
+
+ usb_fill_int_urb(urb, aru->udev,
+- usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
++ usb_sndintpipe(aru->udev, AR9170_EP_CMD),
+ aru->common.cmdbuf, plen + 4,
+ ar9170_usb_tx_urb_complete, NULL, 1);
+
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 33a1071..7b1eab4 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -2721,8 +2721,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+ all_wiphys_idle = ath9k_all_wiphys_idle(sc);
+ ath9k_set_wiphy_idle(aphy, idle);
+
+- if (!idle && all_wiphys_idle)
+- enable_radio = true;
++ enable_radio = (!idle && all_wiphys_idle);
+
+ /*
+ * After we unlock here its possible another wiphy
+diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
+index 64c12e1..0a00d42 100644
+--- a/drivers/net/wireless/b43/Kconfig
++++ b/drivers/net/wireless/b43/Kconfig
+@@ -78,11 +78,11 @@ config B43_SDIO
+
+ If unsure, say N.
+
+-# Data transfers to the device via PIO
+-# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
++#Data transfers to the device via PIO. We want it as a fallback even
++# if we can do DMA.
+ config B43_PIO
+ bool
+- depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
++ depends on B43
+ select SSB_BLOCKIO
+ default y
+
+diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
+index 84772a2..5e83b6f 100644
+--- a/drivers/net/wireless/b43/Makefile
++++ b/drivers/net/wireless/b43/Makefile
+@@ -12,7 +12,7 @@ b43-y += xmit.o
+ b43-y += lo.o
+ b43-y += wa.o
+ b43-y += dma.o
+-b43-$(CONFIG_B43_PIO) += pio.o
++b43-y += pio.o
+ b43-y += rfkill.o
+ b43-$(CONFIG_B43_LEDS) += leds.o
+ b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
+diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
+index c484cc2..7df822e 100644
+--- a/drivers/net/wireless/b43/b43.h
++++ b/drivers/net/wireless/b43/b43.h
+@@ -694,6 +694,7 @@ struct b43_wldev {
+ bool radio_hw_enable; /* saved state of radio hardware enabled state */
+ bool qos_enabled; /* TRUE, if QoS is used. */
+ bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
++ bool use_pio; /* TRUE if next init should use PIO */
+
+ /* PHY/Radio device. */
+ struct b43_phy phy;
+@@ -822,11 +823,9 @@ struct b43_wl {
+ /* The device LEDs. */
+ struct b43_leds leds;
+
+-#ifdef CONFIG_B43_PIO
+ /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
+ u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
+ u8 pio_tailspace[4] __attribute__((__aligned__(8)));
+-#endif /* CONFIG_B43_PIO */
+ };
+
+ static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
+@@ -877,20 +876,15 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
+
+ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
+ {
+-#ifdef CONFIG_B43_PIO
+ return dev->__using_pio_transfers;
+-#else
+- return 0;
+-#endif
+ }
+
+ #ifdef CONFIG_B43_FORCE_PIO
+-# define B43_FORCE_PIO 1
++# define B43_PIO_DEFAULT 1
+ #else
+-# define B43_FORCE_PIO 0
++# define B43_PIO_DEFAULT 0
+ #endif
+
+-
+ /* Message printing */
+ void b43info(struct b43_wl *wl, const char *fmt, ...)
+ __attribute__ ((format(printf, 2, 3)));
+diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
+index 88d1fd0..615af22 100644
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1653,7 +1653,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
+ b43_power_saving_ctl_bits(dev, 0);
+ }
+
+-#ifdef CONFIG_B43_PIO
+ static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
+ u16 mmio_base, bool enable)
+ {
+@@ -1687,4 +1686,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
+ mmio_base = b43_dmacontroller_base(type, engine_index);
+ direct_fifo_rx(dev, type, mmio_base, enable);
+ }
+-#endif /* CONFIG_B43_PIO */
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 629c166..9eb4f5e 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -102,6 +102,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
+ module_param_named(verbose, b43_modparam_verbose, int, 0644);
+ MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
+
++int b43_modparam_pio = B43_PIO_DEFAULT;
++module_param_named(pio, b43_modparam_pio, int, 0644);
++MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
+
+ static const struct ssb_device_id b43_ssb_tbl[] = {
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
+@@ -1790,8 +1793,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
+ dma_reason[4], dma_reason[5]);
+ b43err(dev->wl, "This device does not support DMA "
+ "on your system. Please use PIO instead.\n");
+- b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in "
+- "your kernel configuration.\n");
++ /* Fall back to PIO transfers if we get fatal DMA errors! */
++ dev->use_pio = 1;
++ b43_controller_restart(dev, "DMA error");
+ return;
+ }
+ if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
+@@ -4358,7 +4362,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
+
+ if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
+ (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
+- B43_FORCE_PIO) {
++ dev->use_pio) {
+ dev->__using_pio_transfers = 1;
+ err = b43_pio_init(dev);
+ } else {
+@@ -4826,6 +4830,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
+ if (!wldev)
+ goto out;
+
++ wldev->use_pio = b43_modparam_pio;
+ wldev->dev = dev;
+ wldev->wl = wl;
+ b43_set_status(wldev, B43_STAT_UNINIT);
+diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
+index 7dd649c..7b3c42f 100644
+--- a/drivers/net/wireless/b43/pio.h
++++ b/drivers/net/wireless/b43/pio.h
+@@ -55,8 +55,6 @@
+ #define B43_PIO_MAX_NR_TXPACKETS 32
+
+
+-#ifdef CONFIG_B43_PIO
+-
+ struct b43_pio_txpacket {
+ /* Pointer to the TX queue we belong to. */
+ struct b43_pio_txqueue *queue;
+@@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *q);
+ void b43_pio_tx_suspend(struct b43_wldev *dev);
+ void b43_pio_tx_resume(struct b43_wldev *dev);
+
+-
+-#else /* CONFIG_B43_PIO */
+-
+-
+-static inline int b43_pio_init(struct b43_wldev *dev)
+-{
+- return 0;
+-}
+-static inline void b43_pio_free(struct b43_wldev *dev)
+-{
+-}
+-static inline void b43_pio_stop(struct b43_wldev *dev)
+-{
+-}
+-static inline int b43_pio_tx(struct b43_wldev *dev,
+- struct sk_buff *skb)
+-{
+- return 0;
+-}
+-static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
+- const struct b43_txstatus *status)
+-{
+-}
+-static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
+- struct ieee80211_tx_queue_stats *stats)
+-{
+-}
+-static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
+-{
+-}
+-static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
+-{
+-}
+-static inline void b43_pio_tx_resume(struct b43_wldev *dev)
+-{
+-}
+-
+-#endif /* CONFIG_B43_PIO */
+ #endif /* B43_PIO_H_ */
+diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
+index 3146281..3b4c5a4 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
+@@ -581,6 +581,8 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
+
+ iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+
++ /* reset to 0 to enable all the queue first */
++ priv->txq_ctx_active_msk = 0;
+ /* Map each Tx/cmd queue to its corresponding fifo */
+ for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+ int ac = default_queue_to_tx_fifo[i];
+@@ -2008,7 +2010,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
+ IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
+ "%d index %d\n", scd_ssn , index);
+ freed = iwl_tx_queue_reclaim(priv, txq_id, index);
+- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
++ if (qc)
++ iwl_free_tfds_in_queue(priv, sta_id,
++ tid, freed);
+
+ if (priv->mac80211_registered &&
+ (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
+@@ -2035,13 +2039,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
+
+ freed = iwl_tx_queue_reclaim(priv, txq_id, index);
+ if (qc && likely(sta_id != IWL_INVALID_STATION))
+- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
++ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
++ else if (sta_id == IWL_INVALID_STATION)
++ IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
+
+ if (priv->mac80211_registered &&
+ (iwl_queue_space(&txq->q) > txq->q.low_mark))
+ iwl_wake_queue(priv, txq_id);
+ }
+-
+ if (qc && likely(sta_id != IWL_INVALID_STATION))
+ iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
+index cffaae7..c610e5f 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
+@@ -657,6 +657,8 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
+
+ iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+
++ /* reset to 0 to enable all the queue first */
++ priv->txq_ctx_active_msk = 0;
+ /* map qos queues to fifos one-to-one */
+ for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
+ int ac = iwl5000_default_queue_to_tx_fifo[i];
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index 1c9866d..5622a55 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -2461,7 +2461,7 @@ static int iwl_setup_mac(struct iwl_priv *priv)
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
++ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ /*
+diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
+index fa1c89b..8f1b850 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
+@@ -404,21 +404,6 @@ EXPORT_SYMBOL(iwl_init_scan_params);
+
+ static int iwl_scan_initiate(struct iwl_priv *priv)
+ {
+- if (!iwl_is_ready_rf(priv)) {
+- IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
+- return -EIO;
+- }
+-
+- if (test_bit(STATUS_SCANNING, &priv->status)) {
+- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+- return -EAGAIN;
+- }
+-
+- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+- IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+- return -EAGAIN;
+- }
+-
+ IWL_DEBUG_INFO(priv, "Starting scan...\n");
+ set_bit(STATUS_SCANNING, &priv->status);
+ priv->scan_start = jiffies;
+@@ -449,6 +434,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
+ goto out_unlock;
+ }
+
++ if (test_bit(STATUS_SCANNING, &priv->status)) {
++ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
++ ret = -EAGAIN;
++ goto out_unlock;
++ }
++
++ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
++ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
++ ret = -EAGAIN;
++ goto out_unlock;
++ }
++
+ /* We don't schedule scan within next_scan_jiffies period.
+ * Avoid scanning during possible EAPOL exchange, return
+ * success immediately.
+diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+index f297865..adbb3ea 100644
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -1926,7 +1926,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
+ {
+ int i;
+
+- for (i = 0; i < IWL_RATE_COUNT; i++) {
++ for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = iwl3945_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on indexes */
+ rates[i].hw_value_short = i;
+@@ -3903,7 +3903,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
++ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 3245d33..c4fead1 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2612,6 +2612,23 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
+ return 0;
+ }
+
++/* Some architectures require additional programming to enable VGA */
++static arch_set_vga_state_t arch_set_vga_state;
++
++void __init pci_register_set_vga_state(arch_set_vga_state_t func)
++{
++ arch_set_vga_state = func; /* NULL disables */
++}
++
++static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
++ unsigned int command_bits, bool change_bridge)
++{
++ if (arch_set_vga_state)
++ return arch_set_vga_state(dev, decode, command_bits,
++ change_bridge);
++ return 0;
++}
++
+ /**
+ * pci_set_vga_state - set VGA decode state on device and parents if requested
+ * @dev: the PCI device
+@@ -2625,9 +2642,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
+ struct pci_bus *bus;
+ struct pci_dev *bridge;
+ u16 cmd;
++ int rc;
+
+ WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+
++ /* ARCH specific VGA enables */
++ rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
++ if (rc)
++ return rc;
++
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (decode == true)
+ cmd |= command_bits;
+@@ -2874,4 +2897,3 @@ EXPORT_SYMBOL(pci_target_state);
+ EXPORT_SYMBOL(pci_prepare_to_sleep);
+ EXPORT_SYMBOL(pci_back_from_sleep);
+ EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
+-
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index c28a712..e6b67f2 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -3027,14 +3027,15 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ session->state = ISCSI_STATE_TERMINATE;
+ else if (conn->stop_stage != STOP_CONN_RECOVER)
+ session->state = ISCSI_STATE_IN_RECOVERY;
++
++ old_stop_stage = conn->stop_stage;
++ conn->stop_stage = flag;
+ spin_unlock_bh(&session->lock);
+
+ del_timer_sync(&conn->transport_timer);
+ iscsi_suspend_tx(conn);
+
+ spin_lock_bh(&session->lock);
+- old_stop_stage = conn->stop_stage;
+- conn->stop_stage = flag;
+ conn->c_stage = ISCSI_CONN_STOPPED;
+ spin_unlock_bh(&session->lock);
+
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 34d4eb9..db6b071 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
+ {
+ wb->use = 0;
+ acm->transmitting--;
++ usb_autopm_put_interface_async(acm->control);
+ }
+
+ /*
+@@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn)
+ }
+
+ dbg("%s susp_count: %d", __func__, acm->susp_count);
++ usb_autopm_get_interface_async(acm->control);
+ if (acm->susp_count) {
+- acm->delayed_wb = wb;
+- schedule_work(&acm->waker);
++ if (!acm->delayed_wb)
++ acm->delayed_wb = wb;
++ else
++ usb_autopm_put_interface_async(acm->control);
+ spin_unlock_irqrestore(&acm->write_lock, flags);
+ return 0; /* A white lie */
+ }
+@@ -534,23 +538,6 @@ static void acm_softint(struct work_struct *work)
+ tty_kref_put(tty);
+ }
+
+-static void acm_waker(struct work_struct *waker)
+-{
+- struct acm *acm = container_of(waker, struct acm, waker);
+- int rv;
+-
+- rv = usb_autopm_get_interface(acm->control);
+- if (rv < 0) {
+- dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
+- return;
+- }
+- if (acm->delayed_wb) {
+- acm_start_wb(acm, acm->delayed_wb);
+- acm->delayed_wb = NULL;
+- }
+- usb_autopm_put_interface(acm->control);
+-}
+-
+ /*
+ * TTY handlers
+ */
+@@ -1178,7 +1165,6 @@ made_compressed_probe:
+ acm->urb_task.func = acm_rx_tasklet;
+ acm->urb_task.data = (unsigned long) acm;
+ INIT_WORK(&acm->work, acm_softint);
+- INIT_WORK(&acm->waker, acm_waker);
+ init_waitqueue_head(&acm->drain_wait);
+ spin_lock_init(&acm->throttle_lock);
+ spin_lock_init(&acm->write_lock);
+@@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm)
+ tasklet_enable(&acm->urb_task);
+
+ cancel_work_sync(&acm->work);
+- cancel_work_sync(&acm->waker);
+ }
+
+ static void acm_disconnect(struct usb_interface *intf)
+@@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
+ static int acm_resume(struct usb_interface *intf)
+ {
+ struct acm *acm = usb_get_intfdata(intf);
++ struct acm_wb *wb;
+ int rv = 0;
+ int cnt;
+
+@@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf)
+ mutex_lock(&acm->mutex);
+ if (acm->port.count) {
+ rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
++
++ spin_lock_irq(&acm->write_lock);
++ if (acm->delayed_wb) {
++ wb = acm->delayed_wb;
++ acm->delayed_wb = NULL;
++ spin_unlock_irq(&acm->write_lock);
++ acm_start_wb(acm, wb);
++ } else {
++ spin_unlock_irq(&acm->write_lock);
++ }
++
++ /*
++ * delayed error checking because we must
++ * do the write path at all cost
++ */
+ if (rv < 0)
+ goto err_out;
+
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index c4a0ee8..519eb63 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -112,7 +112,6 @@ struct acm {
+ struct mutex mutex;
+ struct usb_cdc_line_coding line; /* bits, stop, parity */
+ struct work_struct work; /* work queue entry for line discipline waking up */
+- struct work_struct waker;
+ wait_queue_head_t drain_wait; /* close processing */
+ struct tasklet_struct urb_task; /* rx processing */
+ spinlock_t throttle_lock; /* synchronize throtteling and read callback */
+diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
+index 2e78b07..9804ee9 100644
+--- a/drivers/video/backlight/mbp_nvidia_bl.c
++++ b/drivers/video/backlight/mbp_nvidia_bl.c
+@@ -139,6 +139,51 @@ static int mbp_dmi_match(const struct dmi_system_id *id)
+ static const struct dmi_system_id __initdata mbp_device_table[] = {
+ {
+ .callback = mbp_dmi_match,
++ .ident = "MacBook 1,1",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
++ },
++ .driver_data = (void *)&intel_chipset_data,
++ },
++ {
++ .callback = mbp_dmi_match,
++ .ident = "MacBook 2,1",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
++ },
++ .driver_data = (void *)&intel_chipset_data,
++ },
++ {
++ .callback = mbp_dmi_match,
++ .ident = "MacBook 3,1",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
++ },
++ .driver_data = (void *)&intel_chipset_data,
++ },
++ {
++ .callback = mbp_dmi_match,
++ .ident = "MacBook 4,1",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
++ },
++ .driver_data = (void *)&intel_chipset_data,
++ },
++ {
++ .callback = mbp_dmi_match,
++ .ident = "MacBook 4,2",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
++ },
++ .driver_data = (void *)&intel_chipset_data,
++ },
++ {
++ .callback = mbp_dmi_match,
+ .ident = "MacBookPro 3,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
+index 4cd5049..3803745 100644
+--- a/drivers/video/sunxvr500.c
++++ b/drivers/video/sunxvr500.c
+@@ -242,11 +242,27 @@ static int __devinit e3d_set_fbinfo(struct e3d_info *ep)
+ static int __devinit e3d_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+ {
++ struct device_node *of_node;
++ const char *device_type;
+ struct fb_info *info;
+ struct e3d_info *ep;
+ unsigned int line_length;
+ int err;
+
++ of_node = pci_device_to_OF_node(pdev);
++ if (!of_node) {
++ printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
++ pci_name(pdev));
++ return -ENODEV;
++ }
++
++ device_type = of_get_property(of_node, "device_type", NULL);
++ if (!device_type) {
++ printk(KERN_INFO "e3d: Ignoring secondary output device "
++ "at %s\n", pci_name(pdev));
++ return -ENODEV;
++ }
++
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ printk(KERN_ERR "e3d: Cannot enable PCI device %s\n",
+@@ -265,13 +281,7 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev,
+ ep->info = info;
+ ep->pdev = pdev;
+ spin_lock_init(&ep->lock);
+- ep->of_node = pci_device_to_OF_node(pdev);
+- if (!ep->of_node) {
+- printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
+- pci_name(pdev));
+- err = -ENODEV;
+- goto err_release_fb;
+- }
++ ep->of_node = of_node;
+
+ /* Read the PCI base register of the frame buffer, which we
+ * need in order to interpret the RAMDAC_VID_*FB* values in
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index a6c5674..0b91907 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -443,7 +443,7 @@ static void hpwdt_ping(void)
+ static int hpwdt_change_timer(int new_margin)
+ {
+ /* Arbitrary, can't find the card's limits */
+- if (new_margin < 30 || new_margin > 600) {
++ if (new_margin < 5 || new_margin > 600) {
+ printk(KERN_WARNING
+ "hpwdt: New value passed in is invalid: %d seconds.\n",
+ new_margin);
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index 4bdb7f1..e2ebe08 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -115,8 +115,37 @@ enum iTCO_chipsets {
+ TCO_3420, /* 3420 */
+ TCO_3450, /* 3450 */
+ TCO_EP80579, /* EP80579 */
+- TCO_CPTD, /* CPT Desktop */
+- TCO_CPTM, /* CPT Mobile */
++ TCO_CPT1, /* Cougar Point */
++ TCO_CPT2, /* Cougar Point Desktop */
++ TCO_CPT3, /* Cougar Point Mobile */
++ TCO_CPT4, /* Cougar Point */
++ TCO_CPT5, /* Cougar Point */
++ TCO_CPT6, /* Cougar Point */
++ TCO_CPT7, /* Cougar Point */
++ TCO_CPT8, /* Cougar Point */
++ TCO_CPT9, /* Cougar Point */
++ TCO_CPT10, /* Cougar Point */
++ TCO_CPT11, /* Cougar Point */
++ TCO_CPT12, /* Cougar Point */
++ TCO_CPT13, /* Cougar Point */
++ TCO_CPT14, /* Cougar Point */
++ TCO_CPT15, /* Cougar Point */
++ TCO_CPT16, /* Cougar Point */
++ TCO_CPT17, /* Cougar Point */
++ TCO_CPT18, /* Cougar Point */
++ TCO_CPT19, /* Cougar Point */
++ TCO_CPT20, /* Cougar Point */
++ TCO_CPT21, /* Cougar Point */
++ TCO_CPT22, /* Cougar Point */
++ TCO_CPT23, /* Cougar Point */
++ TCO_CPT24, /* Cougar Point */
++ TCO_CPT25, /* Cougar Point */
++ TCO_CPT26, /* Cougar Point */
++ TCO_CPT27, /* Cougar Point */
++ TCO_CPT28, /* Cougar Point */
++ TCO_CPT29, /* Cougar Point */
++ TCO_CPT30, /* Cougar Point */
++ TCO_CPT31, /* Cougar Point */
+ };
+
+ static struct {
+@@ -173,8 +202,37 @@ static struct {
+ {"3420", 2},
+ {"3450", 2},
+ {"EP80579", 2},
+- {"CPT Desktop", 2},
+- {"CPT Mobile", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
++ {"Cougar Point", 2},
+ {NULL, 0}
+ };
+
+@@ -259,8 +317,37 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
+ { ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
+ { ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
+ { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
+- { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)},
+- { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)},
++ { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)},
++ { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)},
++ { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)},
++ { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)},
++ { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)},
++ { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)},
++ { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)},
++ { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)},
++ { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)},
++ { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)},
++ { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)},
++ { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)},
++ { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)},
++ { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)},
++ { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)},
++ { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)},
++ { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)},
++ { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)},
++ { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)},
++ { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)},
++ { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)},
++ { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)},
++ { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)},
++ { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)},
++ { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)},
++ { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)},
++ { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)},
++ { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)},
++ { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)},
++ { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)},
++ { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)},
+ { 0, }, /* End of list */
+ };
+ MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index 74a0461..92f9590 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -114,7 +114,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
+ P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
+
+ /* No mandatory locks */
+- if (__mandatory_lock(inode))
++ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
+ return -ENOLCK;
+
+ if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index d11d028..8db62b2 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -404,7 +404,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
+ * NULL first argument is nfsd_sync_dir() and that's not a directory.
+ */
+
+-static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
++int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
+ {
+ struct block_device *bdev = I_BDEV(filp->f_mapping->host);
+ int error;
+@@ -418,6 +418,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
+ error = 0;
+ return error;
+ }
++EXPORT_SYMBOL(block_fsync);
+
+ /*
+ * pseudo-fs
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 941441d..4e6dbab 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -1430,6 +1430,8 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
+ __u32 bytes_sent;
+ __u16 byte_count;
+
++ *nbytes = 0;
++
+ /* cFYI(1, ("write at %lld %d bytes", offset, count));*/
+ if (tcon->ses == NULL)
+ return -ECONNABORTED;
+@@ -1512,11 +1514,18 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
+ cifs_stats_inc(&tcon->num_writes);
+ if (rc) {
+ cFYI(1, ("Send error in write = %d", rc));
+- *nbytes = 0;
+ } else {
+ *nbytes = le16_to_cpu(pSMBr->CountHigh);
+ *nbytes = (*nbytes) << 16;
+ *nbytes += le16_to_cpu(pSMBr->Count);
++
++ /*
++ * Mask off high 16 bits when bytes written as returned by the
++ * server is greater than bytes requested by the client. Some
++ * OS/2 servers are known to set incorrect CountHigh values.
++ */
++ if (*nbytes > count)
++ *nbytes &= 0xFFFF;
+ }
+
+ cifs_buf_release(pSMB);
+@@ -1605,6 +1614,14 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
+ *nbytes = le16_to_cpu(pSMBr->CountHigh);
+ *nbytes = (*nbytes) << 16;
+ *nbytes += le16_to_cpu(pSMBr->Count);
++
++ /*
++ * Mask off high 16 bits when bytes written as returned by the
++ * server is greater than bytes requested by the client. OS/2
++ * servers are known to set incorrect CountHigh values.
++ */
++ if (*nbytes > count)
++ *nbytes &= 0xFFFF;
+ }
+
+ /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 4a430ab..23dc2af 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -647,38 +647,17 @@ out_lock:
+ return rc;
+ }
+
+-static int
+-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
++static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
++ size_t *bufsiz)
+ {
++ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ char *lower_buf;
+- size_t lower_bufsiz;
+- struct dentry *lower_dentry;
+- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+- char *plaintext_name;
+- size_t plaintext_name_size;
++ size_t lower_bufsiz = PATH_MAX;
+ mm_segment_t old_fs;
+ int rc;
+
+- lower_dentry = ecryptfs_dentry_to_lower(dentry);
+- if (!lower_dentry->d_inode->i_op->readlink) {
+- rc = -EINVAL;
+- goto out;
+- }
+- mount_crypt_stat = &ecryptfs_superblock_to_private(
+- dentry->d_sb)->mount_crypt_stat;
+- /*
+- * If the lower filename is encrypted, it will result in a significantly
+- * longer name. If needed, truncate the name after decode and decrypt.
+- */
+- if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
+- lower_bufsiz = PATH_MAX;
+- else
+- lower_bufsiz = bufsiz;
+- /* Released in this function */
+ lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
+- if (lower_buf == NULL) {
+- printk(KERN_ERR "%s: Out of memory whilst attempting to "
+- "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
++ if (!lower_buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+@@ -688,29 +667,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+ (char __user *)lower_buf,
+ lower_bufsiz);
+ set_fs(old_fs);
+- if (rc >= 0) {
+- rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name,
+- &plaintext_name_size,
+- dentry, lower_buf,
+- rc);
+- if (rc) {
+- printk(KERN_ERR "%s: Error attempting to decode and "
+- "decrypt filename; rc = [%d]\n", __func__,
+- rc);
+- goto out_free_lower_buf;
+- }
+- /* Check for bufsiz <= 0 done in sys_readlinkat() */
+- rc = copy_to_user(buf, plaintext_name,
+- min((size_t) bufsiz, plaintext_name_size));
+- if (rc)
+- rc = -EFAULT;
+- else
+- rc = plaintext_name_size;
+- kfree(plaintext_name);
+- fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
+- }
+-out_free_lower_buf:
++ if (rc < 0)
++ goto out;
++ lower_bufsiz = rc;
++ rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
++ lower_buf, lower_bufsiz);
++out:
+ kfree(lower_buf);
++ return rc;
++}
++
++static int
++ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
++{
++ char *kbuf;
++ size_t kbufsiz, copied;
++ int rc;
++
++ rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
++ if (rc)
++ goto out;
++ copied = min_t(size_t, bufsiz, kbufsiz);
++ rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
++ kfree(kbuf);
++ fsstack_copy_attr_atime(dentry->d_inode,
++ ecryptfs_dentry_to_lower(dentry)->d_inode);
+ out:
+ return rc;
+ }
+@@ -1015,6 +996,28 @@ out:
+ return rc;
+ }
+
++int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
++ struct kstat *stat)
++{
++ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
++ int rc = 0;
++
++ mount_crypt_stat = &ecryptfs_superblock_to_private(
++ dentry->d_sb)->mount_crypt_stat;
++ generic_fillattr(dentry->d_inode, stat);
++ if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
++ char *target;
++ size_t targetsiz;
++
++ rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
++ if (!rc) {
++ kfree(target);
++ stat->size = targetsiz;
++ }
++ }
++ return rc;
++}
++
+ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+ {
+@@ -1039,7 +1042,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+
+ lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ if (!lower_dentry->d_inode->i_op->setxattr) {
+- rc = -ENOSYS;
++ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ mutex_lock(&lower_dentry->d_inode->i_mutex);
+@@ -1057,7 +1060,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
+ int rc = 0;
+
+ if (!lower_dentry->d_inode->i_op->getxattr) {
+- rc = -ENOSYS;
++ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ mutex_lock(&lower_dentry->d_inode->i_mutex);
+@@ -1084,7 +1087,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
+
+ lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ if (!lower_dentry->d_inode->i_op->listxattr) {
+- rc = -ENOSYS;
++ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ mutex_lock(&lower_dentry->d_inode->i_mutex);
+@@ -1101,7 +1104,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
+
+ lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ if (!lower_dentry->d_inode->i_op->removexattr) {
+- rc = -ENOSYS;
++ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ mutex_lock(&lower_dentry->d_inode->i_mutex);
+@@ -1132,6 +1135,7 @@ const struct inode_operations ecryptfs_symlink_iops = {
+ .put_link = ecryptfs_put_link,
+ .permission = ecryptfs_permission,
+ .setattr = ecryptfs_setattr,
++ .getattr = ecryptfs_getattr_link,
+ .setxattr = ecryptfs_setxattr,
+ .getxattr = ecryptfs_getxattr,
+ .listxattr = ecryptfs_listxattr,
+diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
+index b15a43a..1a037f7 100644
+--- a/fs/ecryptfs/super.c
++++ b/fs/ecryptfs/super.c
+@@ -85,7 +85,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
+ if (lower_dentry->d_inode) {
+ fput(inode_info->lower_file);
+ inode_info->lower_file = NULL;
+- d_drop(lower_dentry);
+ }
+ }
+ ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 874d169..602d5ad 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -139,8 +139,8 @@ typedef struct ext4_io_end {
+ struct inode *inode; /* file being written to */
+ unsigned int flag; /* unwritten or not */
+ int error; /* I/O error code */
+- ext4_lblk_t offset; /* offset in the file */
+- size_t size; /* size of the extent */
++ loff_t offset; /* offset in the file */
++ ssize_t size; /* size of the extent */
+ struct work_struct work; /* data work queue */
+ } ext4_io_end_t;
+
+@@ -1744,7 +1744,7 @@ extern void ext4_ext_release(struct super_block *);
+ extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
+ loff_t len);
+ extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
+- loff_t len);
++ ssize_t len);
+ extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
+ sector_t block, unsigned int max_blocks,
+ struct buffer_head *bh, int flags);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 765a482..c568779 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3603,7 +3603,7 @@ retry:
+ * Returns 0 on success.
+ */
+ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
+- loff_t len)
++ ssize_t len)
+ {
+ handle_t *handle;
+ ext4_lblk_t block;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index e119524..2059c34 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3551,7 +3551,7 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
+ {
+ struct inode *inode = io->inode;
+ loff_t offset = io->offset;
+- size_t size = io->size;
++ ssize_t size = io->size;
+ int ret = 0;
+
+ ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
+index f565f24..72646e2 100644
+--- a/fs/fat/namei_vfat.c
++++ b/fs/fat/namei_vfat.c
+@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
+ {
+ struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
+ wchar_t *ip, *ext_start, *end, *name_start;
+- unsigned char base[9], ext[4], buf[8], *p;
++ unsigned char base[9], ext[4], buf[5], *p;
+ unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
+ int chl, chi;
+ int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
+@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
+ return 0;
+ }
+
+- i = jiffies & 0xffff;
++ i = jiffies;
+ sz = (jiffies >> 16) & 0x7;
+ if (baselen > 2) {
+ baselen = numtail2_baselen;
+@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
+ name_res[baselen + 4] = '~';
+ name_res[baselen + 5] = '1' + sz;
+ while (1) {
+- sprintf(buf, "%04X", i);
++ snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
+ memcpy(&name_res[baselen], buf, 4);
+ if (vfat_find_form(dir, name_res) < 0)
+ break;
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index ee77713..bd39abc 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -1293,7 +1293,8 @@ static int nfs4_init_server(struct nfs_server *server,
+
+ /* Initialise the client representation from the mount data */
+ server->flags = data->flags;
+- server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
++ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
++ NFS_CAP_POSIX_LOCK;
+ server->options = data->options;
+
+ /* Get a client record */
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 8b5382e..af6948d 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
+ res = NULL;
+ goto out;
+ /* This turned out not to be a regular file */
++ case -EISDIR:
+ case -ENOTDIR:
+ goto no_open;
+ case -ELOOP:
+ if (!(nd->intent.open.flags & O_NOFOLLOW))
+ goto no_open;
+- /* case -EISDIR: */
+ /* case -EINVAL: */
+ default:
+ goto out;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 375f0fa..ecf6602 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1520,6 +1520,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
+ nfs_post_op_update_inode(dir, o_res->dir_attr);
+ } else
+ nfs_refresh_inode(dir, o_res->dir_attr);
++ if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
++ server->caps &= ~NFS_CAP_POSIX_LOCK;
+ if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
+ status = _nfs4_proc_open_confirm(data);
+ if (status != 0)
+@@ -1660,7 +1662,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
+ status = PTR_ERR(state);
+ if (IS_ERR(state))
+ goto err_opendata_put;
+- if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
++ if (server->caps & NFS_CAP_POSIX_LOCK)
+ set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
+ nfs4_opendata_put(opendata);
+ nfs4_put_state_owner(sp);
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index a8587e9..bbf72d8 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2121,9 +2121,15 @@ out_acl:
+ * and this is the root of a cross-mounted filesystem.
+ */
+ if (ignore_crossmnt == 0 &&
+- exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
+- err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
+- exp->ex_path.mnt->mnt_mountpoint, &stat);
++ dentry == exp->ex_path.mnt->mnt_root) {
++ struct path path = exp->ex_path;
++ path_get(&path);
++ while (follow_up(&path)) {
++ if (path.dentry != path.mnt->mnt_root)
++ break;
++ }
++ err = vfs_getattr(path.mnt, path.dentry, &stat);
++ path_put(&path);
+ if (err)
+ goto out_nfserr;
+ }
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index 0501974..8ccf0f8 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -30,6 +30,8 @@
+ #include "alloc.h"
+ #include "dlmglue.h"
+ #include "file.h"
++#include "inode.h"
++#include "journal.h"
+ #include "ocfs2_fs.h"
+
+ #include "xattr.h"
+@@ -166,6 +168,60 @@ static struct posix_acl *ocfs2_get_acl(struct inode *inode, int type)
+ }
+
+ /*
++ * Helper function to set i_mode in memory and disk. Some call paths
++ * will not have di_bh or a journal handle to pass, in which case it
++ * will create it's own.
++ */
++static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
++ handle_t *handle, umode_t new_mode)
++{
++ int ret, commit_handle = 0;
++ struct ocfs2_dinode *di;
++
++ if (di_bh == NULL) {
++ ret = ocfs2_read_inode_block(inode, &di_bh);
++ if (ret) {
++ mlog_errno(ret);
++ goto out;
++ }
++ } else
++ get_bh(di_bh);
++
++ if (handle == NULL) {
++ handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
++ OCFS2_INODE_UPDATE_CREDITS);
++ if (IS_ERR(handle)) {
++ ret = PTR_ERR(handle);
++ mlog_errno(ret);
++ goto out_brelse;
++ }
++
++ commit_handle = 1;
++ }
++
++ di = (struct ocfs2_dinode *)di_bh->b_data;
++ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
++ OCFS2_JOURNAL_ACCESS_WRITE);
++ if (ret) {
++ mlog_errno(ret);
++ goto out_commit;
++ }
++
++ inode->i_mode = new_mode;
++ di->i_mode = cpu_to_le16(inode->i_mode);
++
++ ocfs2_journal_dirty(handle, di_bh);
++
++out_commit:
++ if (commit_handle)
++ ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
++out_brelse:
++ brelse(di_bh);
++out:
++ return ret;
++}
++
++/*
+ * Set the access or default ACL of an inode.
+ */
+ static int ocfs2_set_acl(handle_t *handle,
+@@ -193,9 +249,14 @@ static int ocfs2_set_acl(handle_t *handle,
+ if (ret < 0)
+ return ret;
+ else {
+- inode->i_mode = mode;
+ if (ret == 0)
+ acl = NULL;
++
++ ret = ocfs2_acl_set_mode(inode, di_bh,
++ handle, mode);
++ if (ret)
++ return ret;
++
+ }
+ }
+ break;
+@@ -283,6 +344,7 @@ int ocfs2_init_acl(handle_t *handle,
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct posix_acl *acl = NULL;
+ int ret = 0;
++ mode_t mode;
+
+ if (!S_ISLNK(inode->i_mode)) {
+ if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+@@ -291,12 +353,17 @@ int ocfs2_init_acl(handle_t *handle,
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ }
+- if (!acl)
+- inode->i_mode &= ~current_umask();
++ if (!acl) {
++ mode = inode->i_mode & ~current_umask();
++ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++ if (ret) {
++ mlog_errno(ret);
++ goto cleanup;
++ }
++ }
+ }
+ if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
+ struct posix_acl *clone;
+- mode_t mode;
+
+ if (S_ISDIR(inode->i_mode)) {
+ ret = ocfs2_set_acl(handle, inode, di_bh,
+@@ -313,7 +380,7 @@ int ocfs2_init_acl(handle_t *handle,
+ mode = inode->i_mode;
+ ret = posix_acl_create_masq(clone, &mode);
+ if (ret >= 0) {
+- inode->i_mode = mode;
++ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ if (ret > 0) {
+ ret = ocfs2_set_acl(handle, inode,
+ di_bh, ACL_TYPE_ACCESS,
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index c30b644..79b5dac 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -152,7 +152,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
+
+ #define do_error(fmt, ...) \
+ do{ \
+- if (clean_error) \
++ if (resize) \
+ mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \
+ else \
+ ocfs2_error(sb, fmt, ##__VA_ARGS__); \
+@@ -160,7 +160,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
+
+ static int ocfs2_validate_gd_self(struct super_block *sb,
+ struct buffer_head *bh,
+- int clean_error)
++ int resize)
+ {
+ struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
+
+@@ -211,7 +211,7 @@ static int ocfs2_validate_gd_self(struct super_block *sb,
+ static int ocfs2_validate_gd_parent(struct super_block *sb,
+ struct ocfs2_dinode *di,
+ struct buffer_head *bh,
+- int clean_error)
++ int resize)
+ {
+ unsigned int max_bits;
+ struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
+@@ -233,8 +233,11 @@ static int ocfs2_validate_gd_parent(struct super_block *sb,
+ return -EINVAL;
+ }
+
+- if (le16_to_cpu(gd->bg_chain) >=
+- le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
++ /* In resize, we may meet the case bg_chain == cl_next_free_rec. */
++ if ((le16_to_cpu(gd->bg_chain) >
++ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
++ ((le16_to_cpu(gd->bg_chain) ==
++ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
+ do_error("Group descriptor #%llu has bad chain %u",
+ (unsigned long long)bh->b_blocknr,
+ le16_to_cpu(gd->bg_chain));
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 58324c2..3cd449d 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -442,12 +442,13 @@ static const struct file_operations proc_lstats_operations = {
+ unsigned long badness(struct task_struct *p, unsigned long uptime);
+ static int proc_oom_score(struct task_struct *task, char *buffer)
+ {
+- unsigned long points;
++ unsigned long points = 0;
+ struct timespec uptime;
+
+ do_posix_clock_monotonic_gettime(&uptime);
+ read_lock(&tasklist_lock);
+- points = badness(task->group_leader, uptime.tv_sec);
++ if (pid_alive(task))
++ points = badness(task, uptime.tv_sec);
+ read_unlock(&tasklist_lock);
+ return sprintf(buffer, "%lu\n", points);
+ }
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 6e722c1..6c9da00 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2321,34 +2321,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
+ if (di->dqb_valid & QIF_SPACE) {
+ dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
+ check_blim = 1;
+- __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
++ set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
+ }
+ if (di->dqb_valid & QIF_BLIMITS) {
+ dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
+ dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
+ check_blim = 1;
+- __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
++ set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
+ }
+ if (di->dqb_valid & QIF_INODES) {
+ dm->dqb_curinodes = di->dqb_curinodes;
+ check_ilim = 1;
+- __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
++ set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
+ }
+ if (di->dqb_valid & QIF_ILIMITS) {
+ dm->dqb_isoftlimit = di->dqb_isoftlimit;
+ dm->dqb_ihardlimit = di->dqb_ihardlimit;
+ check_ilim = 1;
+- __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
++ set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
+ }
+ if (di->dqb_valid & QIF_BTIME) {
+ dm->dqb_btime = di->dqb_btime;
+ check_blim = 1;
+- __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
++ set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
+ }
+ if (di->dqb_valid & QIF_ITIME) {
+ dm->dqb_itime = di->dqb_itime;
+ check_ilim = 1;
+- __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
++ set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
+ }
+
+ if (check_blim) {
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index b4a7dd0..33bc410 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1619,10 +1619,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
+ save_mount_options(s, data);
+
+ sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
+- if (!sbi) {
+- errval = -ENOMEM;
+- goto error_alloc;
+- }
++ if (!sbi)
++ return -ENOMEM;
+ s->s_fs_info = sbi;
+ /* Set default values for options: non-aggressive tails, RO on errors */
+ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+@@ -1879,12 +1877,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
+ return (0);
+
+ error:
+- reiserfs_write_unlock(s);
+-error_alloc:
+ if (jinit_done) { /* kill the commit thread, free journal ram */
+ journal_release_error(NULL, s);
+ }
+
++ reiserfs_write_unlock(s);
++
+ reiserfs_free_bitmap_cache(s);
+ if (SB_BUFFER_WITH_SB(s))
+ brelse(SB_BUFFER_WITH_SB(s));
+diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
+index 66abe36..1c65a2b 100644
+--- a/fs/xfs/linux-2.6/xfs_aops.c
++++ b/fs/xfs/linux-2.6/xfs_aops.c
+@@ -163,14 +163,17 @@ xfs_ioend_new_eof(
+ }
+
+ /*
+- * Update on-disk file size now that data has been written to disk.
+- * The current in-memory file size is i_size. If a write is beyond
+- * eof i_new_size will be the intended file size until i_size is
+- * updated. If this write does not extend all the way to the valid
+- * file size then restrict this update to the end of the write.
++ * Update on-disk file size now that data has been written to disk. The
++ * current in-memory file size is i_size. If a write is beyond eof i_new_size
++ * will be the intended file size until i_size is updated. If this write does
++ * not extend all the way to the valid file size then restrict this update to
++ * the end of the write.
++ *
++ * This function does not block as blocking on the inode lock in IO completion
++ * can lead to IO completion order dependency deadlocks.. If it can't get the
++ * inode ilock it will return EAGAIN. Callers must handle this.
+ */
+-
+-STATIC void
++STATIC int
+ xfs_setfilesize(
+ xfs_ioend_t *ioend)
+ {
+@@ -181,9 +184,11 @@ xfs_setfilesize(
+ ASSERT(ioend->io_type != IOMAP_READ);
+
+ if (unlikely(ioend->io_error))
+- return;
++ return 0;
++
++ if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
++ return EAGAIN;
+
+- xfs_ilock(ip, XFS_ILOCK_EXCL);
+ isize = xfs_ioend_new_eof(ioend);
+ if (isize) {
+ ip->i_d.di_size = isize;
+@@ -191,6 +196,28 @@ xfs_setfilesize(
+ }
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
++ return 0;
++}
++
++/*
++ * Schedule IO completion handling on a xfsdatad if this was
++ * the final hold on this ioend. If we are asked to wait,
++ * flush the workqueue.
++ */
++STATIC void
++xfs_finish_ioend(
++ xfs_ioend_t *ioend,
++ int wait)
++{
++ if (atomic_dec_and_test(&ioend->io_remaining)) {
++ struct workqueue_struct *wq;
++
++ wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
++ xfsconvertd_workqueue : xfsdatad_workqueue;
++ queue_work(wq, &ioend->io_work);
++ if (wait)
++ flush_workqueue(wq);
++ }
+ }
+
+ /*
+@@ -198,11 +225,11 @@ xfs_setfilesize(
+ */
+ STATIC void
+ xfs_end_io(
+- struct work_struct *work)
++ struct work_struct *work)
+ {
+- xfs_ioend_t *ioend =
+- container_of(work, xfs_ioend_t, io_work);
+- struct xfs_inode *ip = XFS_I(ioend->io_inode);
++ xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
++ struct xfs_inode *ip = XFS_I(ioend->io_inode);
++ int error;
+
+ /*
+ * For unwritten extents we need to issue transactions to convert a
+@@ -210,7 +237,6 @@ xfs_end_io(
+ */
+ if (ioend->io_type == IOMAP_UNWRITTEN &&
+ likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
+- int error;
+
+ error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
+ ioend->io_size);
+@@ -222,30 +248,23 @@ xfs_end_io(
+ * We might have to update the on-disk file size after extending
+ * writes.
+ */
+- if (ioend->io_type != IOMAP_READ)
+- xfs_setfilesize(ioend);
+- xfs_destroy_ioend(ioend);
+-}
+-
+-/*
+- * Schedule IO completion handling on a xfsdatad if this was
+- * the final hold on this ioend. If we are asked to wait,
+- * flush the workqueue.
+- */
+-STATIC void
+-xfs_finish_ioend(
+- xfs_ioend_t *ioend,
+- int wait)
+-{
+- if (atomic_dec_and_test(&ioend->io_remaining)) {
+- struct workqueue_struct *wq;
+-
+- wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
+- xfsconvertd_workqueue : xfsdatad_workqueue;
+- queue_work(wq, &ioend->io_work);
+- if (wait)
+- flush_workqueue(wq);
++ if (ioend->io_type != IOMAP_READ) {
++ error = xfs_setfilesize(ioend);
++ ASSERT(!error || error == EAGAIN);
+ }
++
++ /*
++ * If we didn't complete processing of the ioend, requeue it to the
++ * tail of the workqueue for another attempt later. Otherwise destroy
++ * it.
++ */
++ if (error == EAGAIN) {
++ atomic_inc(&ioend->io_remaining);
++ xfs_finish_ioend(ioend, 0);
++ /* ensure we don't spin on blocked ioends */
++ delay(1);
++ } else
++ xfs_destroy_ioend(ioend);
+ }
+
+ /*
+diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
+index 1f5e4bb..6b6b394 100644
+--- a/fs/xfs/linux-2.6/xfs_sync.c
++++ b/fs/xfs/linux-2.6/xfs_sync.c
+@@ -613,7 +613,8 @@ xfssyncd(
+ set_freezable();
+ timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
+ for (;;) {
+- timeleft = schedule_timeout_interruptible(timeleft);
++ if (list_empty(&mp->m_sync_list))
++ timeleft = schedule_timeout_interruptible(timeleft);
+ /* swsusp */
+ try_to_freeze();
+ if (kthread_should_stop() && list_empty(&mp->m_sync_list))
+@@ -633,8 +634,7 @@ xfssyncd(
+ list_add_tail(&mp->m_sync_work.w_list,
+ &mp->m_sync_list);
+ }
+- list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
+- list_move(&work->w_list, &tmp);
++ list_splice_init(&mp->m_sync_list, &tmp);
+ spin_unlock(&mp->m_sync_lock);
+
+ list_for_each_entry_safe(work, n, &tmp, w_list) {
+@@ -693,12 +693,12 @@ xfs_inode_set_reclaim_tag(
+ xfs_mount_t *mp = ip->i_mount;
+ xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
+
+- read_lock(&pag->pag_ici_lock);
++ write_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+ __xfs_inode_set_reclaim_tag(pag, ip);
+ __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
+ spin_unlock(&ip->i_flags_lock);
+- read_unlock(&pag->pag_ici_lock);
++ write_unlock(&pag->pag_ici_lock);
+ xfs_put_perag(mp, pag);
+ }
+
+diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
+index 155e798..fd21160 100644
+--- a/fs/xfs/xfs_iget.c
++++ b/fs/xfs/xfs_iget.c
+@@ -190,13 +190,12 @@ xfs_iget_cache_hit(
+ trace_xfs_iget_reclaim(ip);
+
+ /*
+- * We need to set XFS_INEW atomically with clearing the
+- * reclaimable tag so that we do have an indicator of the
+- * inode still being initialized.
++ * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
++ * from stomping over us while we recycle the inode. We can't
++ * clear the radix tree reclaimable tag yet as it requires
++ * pag_ici_lock to be held exclusive.
+ */
+- ip->i_flags |= XFS_INEW;
+- ip->i_flags &= ~XFS_IRECLAIMABLE;
+- __xfs_inode_clear_reclaim_tag(mp, pag, ip);
++ ip->i_flags |= XFS_IRECLAIM;
+
+ spin_unlock(&ip->i_flags_lock);
+ read_unlock(&pag->pag_ici_lock);
+@@ -216,7 +215,15 @@ xfs_iget_cache_hit(
+ trace_xfs_iget_reclaim(ip);
+ goto out_error;
+ }
++
++ write_lock(&pag->pag_ici_lock);
++ spin_lock(&ip->i_flags_lock);
++ ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
++ ip->i_flags |= XFS_INEW;
++ __xfs_inode_clear_reclaim_tag(mp, pag, ip);
+ inode->i_state = I_NEW;
++ spin_unlock(&ip->i_flags_lock);
++ write_unlock(&pag->pag_ici_lock);
+ } else {
+ /* If the VFS inode is being torn down, pause and try again. */
+ if (!igrab(inode)) {
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index e6f3b12..0cbdccc 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -6,6 +6,7 @@
+ {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
+@@ -375,6 +376,7 @@
+ {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++ {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0, 0, 0}
+
+ #define r128_PCI_IDS \
+diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
+index aa95508..2c445e1 100644
+--- a/include/linux/dm-ioctl.h
++++ b/include/linux/dm-ioctl.h
+@@ -266,9 +266,9 @@ enum {
+ #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
+
+ #define DM_VERSION_MAJOR 4
+-#define DM_VERSION_MINOR 16
++#define DM_VERSION_MINOR 17
+ #define DM_VERSION_PATCHLEVEL 0
+-#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
++#define DM_VERSION_EXTRA "-ioctl (2010-03-05)"
+
+ /* Status bits */
+ #define DM_READONLY_FLAG (1 << 0) /* In/Out */
+@@ -316,4 +316,9 @@ enum {
+ */
+ #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
+
++/*
++ * If set, a uevent was generated for which the caller may need to wait.
++ */
++#define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */
++
+ #endif /* _LINUX_DM_IOCTL_H */
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 5a361f8..da7e52b 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -64,9 +64,12 @@ extern bool freeze_task(struct task_struct *p, bool sig_only);
+ extern void cancel_freezing(struct task_struct *p);
+
+ #ifdef CONFIG_CGROUP_FREEZER
+-extern int cgroup_frozen(struct task_struct *task);
++extern int cgroup_freezing_or_frozen(struct task_struct *task);
+ #else /* !CONFIG_CGROUP_FREEZER */
+-static inline int cgroup_frozen(struct task_struct *task) { return 0; }
++static inline int cgroup_freezing_or_frozen(struct task_struct *task)
++{
++ return 0;
++}
+ #endif /* !CONFIG_CGROUP_FREEZER */
+
+ /*
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index f2f68ce..66b0705 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2214,6 +2214,7 @@ extern int generic_segment_checks(const struct iovec *iov,
+ /* fs/block_dev.c */
+ extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
++extern int block_fsync(struct file *filp, struct dentry *dentry, int datasync);
+
+ /* fs/splice.c */
+ extern ssize_t generic_file_splice_read(struct file *, loff_t *,
+diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
+index ece0b1c..e117b1a 100644
+--- a/include/linux/kfifo.h
++++ b/include/linux/kfifo.h
+@@ -86,7 +86,8 @@ union { \
+ */
+ #define INIT_KFIFO(name) \
+ name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \
+- sizeof(struct kfifo), name##kfifo_buffer)
++ sizeof(struct kfifo), \
++ name##kfifo_buffer + sizeof(struct kfifo))
+
+ /**
+ * DEFINE_KFIFO - macro to define and initialize a kfifo
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index bd5a616..1fe293e 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -53,7 +53,7 @@ extern struct kmem_cache *kvm_vcpu_cache;
+ */
+ struct kvm_io_bus {
+ int dev_count;
+-#define NR_IOBUS_DEVS 6
++#define NR_IOBUS_DEVS 200
+ struct kvm_io_device *devs[NR_IOBUS_DEVS];
+ };
+
+@@ -116,6 +116,11 @@ struct kvm_memory_slot {
+ int user_alloc;
+ };
+
++static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
++{
++ return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
++}
++
+ struct kvm_kernel_irq_routing_entry {
+ u32 gsi;
+ u32 type;
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 6cb1a3c..bd465d4 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -457,7 +457,7 @@ void symbol_put_addr(void *addr);
+ static inline local_t *__module_ref_addr(struct module *mod, int cpu)
+ {
+ #ifdef CONFIG_SMP
+- return (local_t *) (mod->refptr + per_cpu_offset(cpu));
++ return (local_t *) per_cpu_ptr(mod->refptr, cpu);
+ #else
+ return &mod->ref;
+ #endif
+diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
+index 34fc6be..ebc4809 100644
+--- a/include/linux/nfs_fs_sb.h
++++ b/include/linux/nfs_fs_sb.h
+@@ -176,6 +176,7 @@ struct nfs_server {
+ #define NFS_CAP_ATIME (1U << 11)
+ #define NFS_CAP_CTIME (1U << 12)
+ #define NFS_CAP_MTIME (1U << 13)
++#define NFS_CAP_POSIX_LOCK (1U << 14)
+
+
+ /* maximum number of slots to use */
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index c1968f4..0afb527 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -959,6 +959,11 @@ static inline int pci_proc_domain(struct pci_bus *bus)
+ }
+ #endif /* CONFIG_PCI_DOMAINS */
+
++/* some architectures require additional setup to direct VGA traffic */
++typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
++ unsigned int command_bits, bool change_bridge);
++extern void pci_register_set_vga_state(arch_set_vga_state_t func);
++
+ #else /* CONFIG_PCI is not enabled */
+
+ /*
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index cca8a04..0be8243 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2417,6 +2417,9 @@
+ #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
+ #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
+ #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
++#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
++#define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42
++#define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43
+ #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
+ #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
+ #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
+diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
+index 59e9ef6..eb3f34d 100644
+--- a/kernel/cgroup_freezer.c
++++ b/kernel/cgroup_freezer.c
+@@ -47,17 +47,20 @@ static inline struct freezer *task_freezer(struct task_struct *task)
+ struct freezer, css);
+ }
+
+-int cgroup_frozen(struct task_struct *task)
++int cgroup_freezing_or_frozen(struct task_struct *task)
+ {
+ struct freezer *freezer;
+ enum freezer_state state;
+
+ task_lock(task);
+ freezer = task_freezer(task);
+- state = freezer->state;
++ if (!freezer->css.cgroup->parent)
++ state = CGROUP_THAWED; /* root cgroup can't be frozen */
++ else
++ state = freezer->state;
+ task_unlock(task);
+
+- return state == CGROUP_FROZEN;
++ return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+ }
+
+ /*
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 69a3d7b..0b23ff7 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -753,6 +753,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ if (new->flags & IRQF_ONESHOT)
+ desc->status |= IRQ_ONESHOT;
+
++ /*
++ * Force MSI interrupts to run with interrupts
++ * disabled. The multi vector cards can cause stack
++ * overflows due to nested interrupts when enough of
++ * them are directed to a core and fire at the same
++ * time.
++ */
++ if (desc->msi_desc)
++ new->flags |= IRQF_DISABLED;
++
+ if (!(desc->status & IRQ_NOAUTOEN)) {
+ desc->depth = 0;
+ desc->status &= ~IRQ_DISABLED;
+diff --git a/kernel/lockdep.c b/kernel/lockdep.c
+index c62ec14..493a0ef 100644
+--- a/kernel/lockdep.c
++++ b/kernel/lockdep.c
+@@ -600,9 +600,9 @@ static int static_obj(void *obj)
+ * percpu var?
+ */
+ for_each_possible_cpu(i) {
+- start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
+- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
+- + per_cpu_offset(i);
++ start = (unsigned long) per_cpu_ptr(&__per_cpu_start, i);
++ end = (unsigned long) per_cpu_ptr(&__per_cpu_start, i)
++ + PERCPU_ENOUGH_ROOM;
+
+ if ((addr >= start) && (addr < end))
+ return 1;
+diff --git a/kernel/module.c b/kernel/module.c
+index f82386b..5b6ce39 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -405,7 +405,7 @@ static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+- memcpy(pcpudest + per_cpu_offset(cpu), from, size);
++ memcpy(per_cpu_ptr(pcpudest, cpu), from, size);
+ }
+
+ #else /* ... !CONFIG_SMP */
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 5ade1bd..de53015 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -145,7 +145,7 @@ static void thaw_tasks(bool nosig_only)
+ if (nosig_only && should_send_signal(p))
+ continue;
+
+- if (cgroup_frozen(p))
++ if (cgroup_freezing_or_frozen(p))
+ continue;
+
+ thaw_process(p);
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 7ca9345..da19c1e 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6717,7 +6717,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+ int ret;
+ cpumask_var_t mask;
+
+- if (len < cpumask_size())
++ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++ return -EINVAL;
++ if (len & (sizeof(unsigned long)-1))
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+@@ -6725,10 +6727,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+
+ ret = sched_getaffinity(pid, mask);
+ if (ret == 0) {
+- if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
++ size_t retlen = min_t(size_t, len, cpumask_size());
++
++ if (copy_to_user(user_mask_ptr, mask, retlen))
+ ret = -EFAULT;
+ else
+- ret = cpumask_size();
++ ret = retlen;
+ }
+ free_cpumask_var(mask);
+
+diff --git a/mm/readahead.c b/mm/readahead.c
+index 337b20e..fe1a069 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -502,7 +502,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
+ return;
+
+ /* be dumb */
+- if (filp->f_mode & FMODE_RANDOM) {
++ if (filp && (filp->f_mode & FMODE_RANDOM)) {
+ force_page_cache_readahead(mapping, filp, offset, req_size);
+ return;
+ }
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 6a43314..ba1fadb 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -749,9 +749,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+
+ switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_ACTION:
+- if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+- return RX_DROP_MONITOR;
+- /* fall through */
+ case IEEE80211_STYPE_PROBE_RESP:
+ case IEEE80211_STYPE_BEACON:
+ skb_queue_tail(&ifmsh->skb_queue, skb);
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index d28acb6..4eed81b 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -391,7 +391,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
+ if (SN_GT(mpath->sn, orig_sn) ||
+ (mpath->sn == orig_sn &&
+ action == MPATH_PREQ &&
+- new_metric > mpath->metric)) {
++ new_metric >= mpath->metric)) {
+ process = false;
+ fresh_info = false;
+ }
+@@ -611,7 +611,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
+
+ mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
+ cpu_to_le32(orig_sn), 0, target_addr,
+- cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount,
++ cpu_to_le32(target_sn), next_hop, hopcount,
+ ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
+ 0, sdata);
+ rcu_read_unlock();
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index da92cde..edfa036 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2355,6 +2355,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
+ /* should never get here */
+ WARN_ON(1);
+ break;
++ case MESH_PLINK_CATEGORY:
++ case MESH_PATH_SEL_CATEGORY:
++ if (ieee80211_vif_is_mesh(&sdata->vif))
++ return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
++ break;
+ }
+
+ return 1;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 70c79c3..1fdc0a5 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1945,6 +1945,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
+ void ieee80211_tx_pending(unsigned long data)
+ {
+ struct ieee80211_local *local = (struct ieee80211_local *)data;
++ struct ieee80211_sub_if_data *sdata;
+ unsigned long flags;
+ int i;
+ bool txok;
+@@ -1983,6 +1984,11 @@ void ieee80211_tx_pending(unsigned long data)
+ if (!txok)
+ break;
+ }
++
++ if (skb_queue_empty(&local->pending[i]))
++ list_for_each_entry_rcu(sdata, &local->interfaces, list)
++ netif_tx_wake_queue(
++ netdev_get_tx_queue(sdata->dev, i));
+ }
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 3848140..27212e8 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -280,13 +280,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
+ /* someone still has this queue stopped */
+ return;
+
+- if (!skb_queue_empty(&local->pending[queue]))
++ if (skb_queue_empty(&local->pending[queue])) {
++ rcu_read_lock();
++ list_for_each_entry_rcu(sdata, &local->interfaces, list)
++ netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
++ rcu_read_unlock();
++ } else
+ tasklet_schedule(&local->tx_pending_tasklet);
+-
+- rcu_read_lock();
+- list_for_each_entry_rcu(sdata, &local->interfaces, list)
+- netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
+- rcu_read_unlock();
+ }
+
+ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
+@@ -1145,6 +1145,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ }
+ }
+
++ rcu_read_lock();
++ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
++ list_for_each_entry_rcu(sta, &local->sta_list, list) {
++ ieee80211_sta_tear_down_BA_sessions(sta);
++ }
++ }
++ rcu_read_unlock();
++
+ /* add back keys */
+ list_for_each_entry(sdata, &local->interfaces, list)
+ if (netif_running(sdata->dev))
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 9ace8eb..062a8b0 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -125,6 +125,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
+ "{Intel, ICH9},"
+ "{Intel, ICH10},"
+ "{Intel, PCH},"
++ "{Intel, CPT},"
+ "{Intel, SCH},"
+ "{ATI, SB450},"
+ "{ATI, SB600},"
+@@ -449,6 +450,7 @@ struct azx {
+ /* driver types */
+ enum {
+ AZX_DRIVER_ICH,
++ AZX_DRIVER_PCH,
+ AZX_DRIVER_SCH,
+ AZX_DRIVER_ATI,
+ AZX_DRIVER_ATIHDMI,
+@@ -463,6 +465,7 @@ enum {
+
+ static char *driver_short_names[] __devinitdata = {
+ [AZX_DRIVER_ICH] = "HDA Intel",
++ [AZX_DRIVER_PCH] = "HDA Intel PCH",
+ [AZX_DRIVER_SCH] = "HDA Intel MID",
+ [AZX_DRIVER_ATI] = "HDA ATI SB",
+ [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
+@@ -1065,6 +1068,7 @@ static void azx_init_pci(struct azx *chip)
+ 0x01, NVIDIA_HDA_ENABLE_COHBIT);
+ break;
+ case AZX_DRIVER_SCH:
++ case AZX_DRIVER_PCH:
+ pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
+ if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
+ pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
+@@ -2268,6 +2272,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
++ SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
+ {}
+ };
+@@ -2357,6 +2362,8 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
+ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
+ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
++ SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
++ SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
+ {}
+ };
+
+@@ -2431,6 +2438,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
+ if (bdl_pos_adj[dev] < 0) {
+ switch (chip->driver_type) {
+ case AZX_DRIVER_ICH:
++ case AZX_DRIVER_PCH:
+ bdl_pos_adj[dev] = 1;
+ break;
+ default:
+@@ -2709,6 +2717,8 @@ static struct pci_device_id azx_ids[] = {
+ { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
+ /* PCH */
+ { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
++ /* CPT */
++ { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
+ /* SCH */
+ { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
+ /* ATI SB 450/600 */
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 7069441..263bf3b 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -1805,6 +1805,14 @@ static int patch_ad1981(struct hda_codec *codec)
+ case AD1981_THINKPAD:
+ spec->mixers[0] = ad1981_thinkpad_mixers;
+ spec->input_mux = &ad1981_thinkpad_capture_source;
++ /* set the upper-limit for mixer amp to 0dB for avoiding the
++ * possible damage by overloading
++ */
++ snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
++ (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
++ (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
++ (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
++ (1 << AC_AMPCAP_MUTE_SHIFT));
+ break;
+ case AD1981_TOSHIBA:
+ spec->mixers[0] = ad1981_hp_mixers;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a79f841..bd8a567 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9074,6 +9074,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
+
+ SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
++ SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG),
+ SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
+ SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
+ SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
+diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
+index a83d196..32f9853 100644
+--- a/sound/pci/mixart/mixart.c
++++ b/sound/pci/mixart/mixart.c
+@@ -1161,13 +1161,15 @@ static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private
+ unsigned long count, unsigned long pos)
+ {
+ struct mixart_mgr *mgr = entry->private_data;
++ unsigned long maxsize;
+
+- count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
+- if(count <= 0)
++ if (pos >= MIXART_BA0_SIZE)
+ return 0;
+- if(pos + count > MIXART_BA0_SIZE)
+- count = (long)(MIXART_BA0_SIZE - pos);
+- if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count))
++ maxsize = MIXART_BA0_SIZE - pos;
++ if (count > maxsize)
++ count = maxsize;
++ count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
++ if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count))
+ return -EFAULT;
+ return count;
+ }
+@@ -1180,13 +1182,15 @@ static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private
+ unsigned long count, unsigned long pos)
+ {
+ struct mixart_mgr *mgr = entry->private_data;
++ unsigned long maxsize;
+
+- count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
+- if(count <= 0)
++ if (pos > MIXART_BA1_SIZE)
+ return 0;
+- if(pos + count > MIXART_BA1_SIZE)
+- count = (long)(MIXART_BA1_SIZE - pos);
+- if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count))
++ maxsize = MIXART_BA1_SIZE - pos;
++ if (count > maxsize)
++ count = maxsize;
++ count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
++ if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count))
+ return -EFAULT;
+ return count;
+ }
+diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
+index b2da478..c7cb207 100644
+--- a/sound/usb/usbmidi.c
++++ b/sound/usb/usbmidi.c
+@@ -984,6 +984,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
+ DEFINE_WAIT(wait);
+ long timeout = msecs_to_jiffies(50);
+
++ if (ep->umidi->disconnected)
++ return;
+ /*
+ * The substream buffer is empty, but some data might still be in the
+ * currently active URBs, so we have to wait for those to complete.
+@@ -1121,14 +1123,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
+ * Frees an output endpoint.
+ * May be called when ep hasn't been initialized completely.
+ */
+-static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep)
++static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep)
+ {
+ unsigned int i;
+
+ for (i = 0; i < OUTPUT_URBS; ++i)
+- if (ep->urbs[i].urb)
++ if (ep->urbs[i].urb) {
+ free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
+ ep->max_transfer);
++ ep->urbs[i].urb = NULL;
++ }
++}
++
++static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep)
++{
++ snd_usbmidi_out_endpoint_clear(ep);
+ kfree(ep);
+ }
+
+@@ -1260,15 +1269,18 @@ void snd_usbmidi_disconnect(struct list_head* p)
+ usb_kill_urb(ep->out->urbs[j].urb);
+ if (umidi->usb_protocol_ops->finish_out_endpoint)
+ umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
++ ep->out->active_urbs = 0;
++ if (ep->out->drain_urbs) {
++ ep->out->drain_urbs = 0;
++ wake_up(&ep->out->drain_wait);
++ }
+ }
+ if (ep->in)
+ for (j = 0; j < INPUT_URBS; ++j)
+ usb_kill_urb(ep->in->urbs[j]);
+ /* free endpoints here; later call can result in Oops */
+- if (ep->out) {
+- snd_usbmidi_out_endpoint_delete(ep->out);
+- ep->out = NULL;
+- }
++ if (ep->out)
++ snd_usbmidi_out_endpoint_clear(ep->out);
+ if (ep->in) {
+ snd_usbmidi_in_endpoint_delete(ep->in);
+ ep->in = NULL;
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index a944be3..9dd98cb 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -636,7 +636,7 @@ skip_lpage:
+
+ /* Allocate page dirty bitmap if needed */
+ if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
+- unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
++ unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
+
+ new.dirty_bitmap = vmalloc(dirty_bytes);
+ if (!new.dirty_bitmap)
+@@ -719,7 +719,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
+ {
+ struct kvm_memory_slot *memslot;
+ int r, i;
+- int n;
++ unsigned long n;
+ unsigned long any = 0;
+
+ r = -EINVAL;
+@@ -731,7 +731,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
+ if (!memslot->dirty_bitmap)
+ goto out;
+
+- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
++ n = kvm_dirty_bitmap_bytes(memslot);
+
+ for (i = 0; !any && i < n/sizeof(long); ++i)
+ any = memslot->dirty_bitmap[i];
+@@ -1073,10 +1073,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+ memslot = gfn_to_memslot_unaliased(kvm, gfn);
+ if (memslot && memslot->dirty_bitmap) {
+ unsigned long rel_gfn = gfn - memslot->base_gfn;
++ unsigned long *p = memslot->dirty_bitmap +
++ rel_gfn / BITS_PER_LONG;
++ int offset = rel_gfn % BITS_PER_LONG;
+
+ /* avoid RMW */
+- if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
+- generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
++ if (!generic_test_le_bit(offset, p))
++ generic___set_le_bit(offset, p);
+ }
+ }
+
Modified: dists/trunk/linux-2.6/debian/patches/series/base
==============================================================================
--- dists/trunk/linux-2.6/debian/patches/series/base Tue May 4 02:12:52 2010 (r15618)
+++ dists/trunk/linux-2.6/debian/patches/series/base Tue May 4 02:16:05 2010 (r15619)
@@ -48,10 +48,6 @@
+ bugfix/ia64/ia64-Include-linux-personality.h-header-in-asm-fcntl.patch
-+ bugfix/x86/kvm-fix-memory-access-during-x86-emulation.patch
-+ bugfix/x86/kvm-Check-IOPL-level-during-io-instruction-emulation.patch
-+ bugfix/x86/kvm-Fix-popf-emulation.patch
-
+ debian/sysrq-mask.patch
+ features/arm/dns323-rev-a1-powerled.patch
@@ -62,3 +58,4 @@
+ bugfix/all/stable/2.6.33.2.patch
+ debian/arch-sh4-fix-uimage-build.patch
++ bugfix/all/stable/2.6.33.3.patch
More information about the Kernel-svn-changes
mailing list