[kernel] r18686 - in people/ukleinek/3.0-rt/linux-2.6/debian: . patches/bugfix/all/stable patches/series

Uwe Kleine-König ukleinek-guest at alioth.debian.org
Sun Feb 12 11:58:21 UTC 2012


Author: ukleinek-guest
Date: Sun Feb 12 11:58:18 2012
New Revision: 18686

Log:
add 3.0.14 up to 3.0.19

Added:
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.15.patch
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.16.patch
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.17.patch
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.18.patch
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.19.patch
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx7
Modified:
   people/ukleinek/3.0-rt/linux-2.6/debian/changelog

Modified: people/ukleinek/3.0-rt/linux-2.6/debian/changelog
==============================================================================
--- people/ukleinek/3.0-rt/linux-2.6/debian/changelog	Sat Feb 11 20:23:34 2012	(r18685)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/changelog	Sun Feb 12 11:58:18 2012	(r18686)
@@ -1,3 +1,10 @@
+linux-2.6 (3.0.0-6ptx7) UNRELEASED; urgency=low
+
+  * Add stable releases 3.0.15 up to 3.0.19
+    3.0.16 includes "oom: fix integer overflow of points in oom_badness"
+
+ -- Uwe Kleine-König <u.kleine-koenig at pengutronix.de>  Sun, 05 Feb 2012 11:03:12 +0100
+
 linux-2.6 (3.0.0-6ptx6) unstable; urgency=low
 
   * Add stable release 3.0.13

Added: people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.15.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.15.patch	Sun Feb 12 11:58:18 2012	(r18686)
@@ -0,0 +1,14 @@
+diff --git a/Makefile b/Makefile
+index f4f577b..5b8c185 100644
+diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
+index 13dfaab..e4c699d 100644
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -286,7 +286,6 @@ void clockevents_exchange_device(struct clock_event_device *old,
+ 	 * released list and do a notify add later.
+ 	 */
+ 	if (old) {
+-		old->event_handler = clockevents_handle_noop;
+ 		clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
+ 		list_del(&old->list);
+ 		list_add(&old->list, &clockevents_released);

Added: people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.16.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.16.patch	Sun Feb 12 11:58:18 2012	(r18686)
@@ -0,0 +1,3880 @@
+diff --git a/Makefile b/Makefile
+index 5b8c185..7f0d8e2 100644
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index 88bd6f7..c565971 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -133,7 +133,7 @@ static struct platform_device rx51_charger_device = {
+ static void __init rx51_charger_init(void)
+ {
+ 	WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
+-		GPIOF_OUT_INIT_LOW, "isp1704_reset"));
++		GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
+ 
+ 	platform_device_register(&rx51_charger_device);
+ }
+diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
+index c074e66..4e0a371 100644
+--- a/arch/arm/oprofile/common.c
++++ b/arch/arm/oprofile/common.c
+@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
+ 	return oprofile_perf_init(ops);
+ }
+ 
+-void __exit oprofile_arch_exit(void)
++void oprofile_arch_exit(void)
+ {
+ 	oprofile_perf_exit();
+ }
+diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
+index 7a61ef8..f4b68be 100644
+--- a/arch/arm/plat-mxc/pwm.c
++++ b/arch/arm/plat-mxc/pwm.c
+@@ -32,6 +32,9 @@
+ #define MX3_PWMSAR                0x0C    /* PWM Sample Register */
+ #define MX3_PWMPR                 0x10    /* PWM Period Register */
+ #define MX3_PWMCR_PRESCALER(x)    (((x - 1) & 0xFFF) << 4)
++#define MX3_PWMCR_DOZEEN                (1 << 24)
++#define MX3_PWMCR_WAITEN                (1 << 23)
++#define MX3_PWMCR_DBGEN			(1 << 22)
+ #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
+ #define MX3_PWMCR_CLKSRC_IPG      (1 << 16)
+ #define MX3_PWMCR_EN              (1 << 0)
+@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+ 		do_div(c, period_ns);
+ 		duty_cycles = c;
+ 
++		/*
++		 * according to imx pwm RM, the real period value should be
++		 * PERIOD value in PWMPR plus 2.
++		 */
++		if (period_cycles > 2)
++			period_cycles -= 2;
++		else
++			period_cycles = 0;
++
+ 		writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
+ 		writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
+ 
+-		cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
++		cr = MX3_PWMCR_PRESCALER(prescale) |
++			MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
++			MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
+ 
+ 		if (cpu_is_mx25())
+ 			cr |= MX3_PWMCR_CLKSRC_IPG;
+diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
+index 0e358c2..422110a 100644
+--- a/arch/s390/oprofile/init.c
++++ b/arch/s390/oprofile/init.c
+@@ -90,7 +90,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
+ 		return -EINVAL;
+ 
+ 	retval = oprofilefs_ulong_from_user(&val, buf, count);
+-	if (retval)
++	if (retval <= 0)
+ 		return retval;
+ 
+ 	if (oprofile_started)
+diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
+index b4c2d2b..e4dd5d5 100644
+--- a/arch/sh/oprofile/common.c
++++ b/arch/sh/oprofile/common.c
+@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
+ 	return oprofile_perf_init(ops);
+ }
+ 
+-void __exit oprofile_arch_exit(void)
++void oprofile_arch_exit(void)
+ {
+ 	oprofile_perf_exit();
+ 	kfree(sh_pmu_op_name);
+@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
+ 	ops->backtrace = sh_backtrace;
+ 	return -ENODEV;
+ }
+-void __exit oprofile_arch_exit(void) {}
++void oprofile_arch_exit(void) {}
+ #endif /* CONFIG_HW_PERF_EVENTS */
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index 5b31a8e..a790cc6 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
+ #define kern_addr_valid(addr) \
+ 	(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
+ 
+-extern int io_remap_pfn_range(struct vm_area_struct *vma,
+-			      unsigned long from, unsigned long pfn,
+-			      unsigned long size, pgprot_t prot);
+-
+ /*
+  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
+  * its high 4 bits.  These macros/functions put it there or get it from there.
+@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
+ #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
+ #define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
+ 
++extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
++			   unsigned long, pgprot_t);
++
++static inline int io_remap_pfn_range(struct vm_area_struct *vma,
++				     unsigned long from, unsigned long pfn,
++				     unsigned long size, pgprot_t prot)
++{
++	unsigned long long offset, space, phys_base;
++
++	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
++	space = GET_IOSPACE(pfn);
++	phys_base = offset | (space << 32ULL);
++
++	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
++}
++
+ #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+ #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ ({									  \
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 1e03c5a..9822628 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -750,10 +750,6 @@ static inline bool kern_addr_valid(unsigned long addr)
+ 
+ extern int page_in_phys_avail(unsigned long paddr);
+ 
+-extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+-			       unsigned long pfn,
+-			       unsigned long size, pgprot_t prot);
+-
+ /*
+  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
+  * its high 4 bits.  These macros/functions put it there or get it from there.
+@@ -762,6 +758,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
+ #define GET_PFN(pfn)			(pfn & 0x0fffffffffffffffUL)
+ 
++extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
++			   unsigned long, pgprot_t);
++
++static inline int io_remap_pfn_range(struct vm_area_struct *vma,
++				     unsigned long from, unsigned long pfn,
++				     unsigned long size, pgprot_t prot)
++{
++	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
++	int space = GET_IOSPACE(pfn);
++	unsigned long phys_base;
++
++	phys_base = offset | (((unsigned long) space) << 32UL);
++
++	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
++}
++
+ #include <asm-generic/pgtable.h>
+ 
+ /* We provide our own get_unmapped_area to cope with VA holes and
+diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
+index e27f8ea..0c218e4 100644
+--- a/arch/sparc/kernel/entry.h
++++ b/arch/sparc/kernel/entry.h
+@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
+ extern void fpload(unsigned long *fpregs, unsigned long *fsr);
+ 
+ #else /* CONFIG_SPARC32 */
++
++#include <asm/trap_block.h>
++
+ struct popc_3insn_patch_entry {
+ 	unsigned int	addr;
+ 	unsigned int	insns[3];
+@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
+ 	__popc_6insn_patch_end;
+ 
+ extern void __init per_cpu_patch(void);
++extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
++				    struct sun4v_1insn_patch_entry *);
++extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
++				    struct sun4v_2insn_patch_entry *);
+ extern void __init sun4v_patch(void);
+ extern void __init boot_cpu_id_too_large(int cpu);
+ extern unsigned int dcache_parity_tl1_occurred;
+diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
+index 99ba5ba..8172c18 100644
+--- a/arch/sparc/kernel/module.c
++++ b/arch/sparc/kernel/module.c
+@@ -17,6 +17,8 @@
+ #include <asm/processor.h>
+ #include <asm/spitfire.h>
+ 
++#include "entry.h"
++
+ #ifdef CONFIG_SPARC64
+ 
+ #include <linux/jump_label.h>
+@@ -220,6 +222,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
+ }
+ 
+ #ifdef CONFIG_SPARC64
++static void do_patch_sections(const Elf_Ehdr *hdr,
++			      const Elf_Shdr *sechdrs)
++{
++	const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
++	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
++
++	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
++		if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
++			sun4v_1insn = s;
++		if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
++			sun4v_2insn = s;
++	}
++
++	if (sun4v_1insn && tlb_type == hypervisor) {
++		void *p = (void *) sun4v_1insn->sh_addr;
++		sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
++	}
++	if (sun4v_2insn && tlb_type == hypervisor) {
++		void *p = (void *) sun4v_2insn->sh_addr;
++		sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
++	}
++}
++
+ int module_finalize(const Elf_Ehdr *hdr,
+ 		    const Elf_Shdr *sechdrs,
+ 		    struct module *me)
+@@ -227,6 +252,8 @@ int module_finalize(const Elf_Ehdr *hdr,
+ 	/* make jump label nops */
+ 	jump_label_apply_nops(me);
+ 
++	do_patch_sections(hdr, sechdrs);
++
+ 	/* Cheetah's I-cache is fully coherent.  */
+ 	if (tlb_type == spitfire) {
+ 		unsigned long va;
+diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
+index b01a06e..9e73c4a 100644
+--- a/arch/sparc/kernel/pci_sun4v.c
++++ b/arch/sparc/kernel/pci_sun4v.c
+@@ -848,10 +848,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
+ 	if (!irq)
+ 		return -ENOMEM;
+ 
+-	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+-		return -EINVAL;
+ 	if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
+ 		return -EINVAL;
++	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
++		return -EINVAL;
+ 
+ 	return irq;
+ }
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index 3c5bb7842..4e7d3ff 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
+ 	}
+ }
+ 
+-void __init sun4v_patch(void)
++void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
++			     struct sun4v_1insn_patch_entry *end)
+ {
+-	extern void sun4v_hvapi_init(void);
+-	struct sun4v_1insn_patch_entry *p1;
+-	struct sun4v_2insn_patch_entry *p2;
+-
+-	if (tlb_type != hypervisor)
+-		return;
++	while (start < end) {
++		unsigned long addr = start->addr;
+ 
+-	p1 = &__sun4v_1insn_patch;
+-	while (p1 < &__sun4v_1insn_patch_end) {
+-		unsigned long addr = p1->addr;
+-
+-		*(unsigned int *) (addr +  0) = p1->insn;
++		*(unsigned int *) (addr +  0) = start->insn;
+ 		wmb();
+ 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
+ 
+-		p1++;
++		start++;
+ 	}
++}
+ 
+-	p2 = &__sun4v_2insn_patch;
+-	while (p2 < &__sun4v_2insn_patch_end) {
+-		unsigned long addr = p2->addr;
++void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
++			     struct sun4v_2insn_patch_entry *end)
++{
++	while (start < end) {
++		unsigned long addr = start->addr;
+ 
+-		*(unsigned int *) (addr +  0) = p2->insns[0];
++		*(unsigned int *) (addr +  0) = start->insns[0];
+ 		wmb();
+ 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
+ 
+-		*(unsigned int *) (addr +  4) = p2->insns[1];
++		*(unsigned int *) (addr +  4) = start->insns[1];
+ 		wmb();
+ 		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
+ 
+-		p2++;
++		start++;
+ 	}
++}
++
++void __init sun4v_patch(void)
++{
++	extern void sun4v_hvapi_init(void);
++
++	if (tlb_type != hypervisor)
++		return;
++
++	sun4v_patch_1insn_range(&__sun4v_1insn_patch,
++				&__sun4v_1insn_patch_end);
++
++	sun4v_patch_2insn_range(&__sun4v_2insn_patch,
++				&__sun4v_2insn_patch_end);
+ 
+ 	sun4v_hvapi_init();
+ }
+diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
+index 5d92488..2e58328 100644
+--- a/arch/sparc/kernel/signal32.c
++++ b/arch/sparc/kernel/signal32.c
+@@ -829,21 +829,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
+  * want to handle. Thus you cannot kill init even with a SIGKILL even by
+  * mistake.
+  */
+-void do_signal32(sigset_t *oldset, struct pt_regs * regs,
+-		 int restart_syscall, unsigned long orig_i0)
++void do_signal32(sigset_t *oldset, struct pt_regs * regs)
+ {
+ 	struct k_sigaction ka;
++	unsigned long orig_i0;
++	int restart_syscall;
+ 	siginfo_t info;
+ 	int signr;
+ 	
+ 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ 
+-	/* If the debugger messes with the program counter, it clears
+-	 * the "in syscall" bit, directing us to not perform a syscall
+-	 * restart.
+-	 */
+-	if (restart_syscall && !pt_regs_is_syscall(regs))
+-		restart_syscall = 0;
++	restart_syscall = 0;
++	orig_i0 = 0;
++	if (pt_regs_is_syscall(regs) &&
++	    (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
++		restart_syscall = 1;
++		orig_i0 = regs->u_regs[UREG_G6];
++	}
+ 
+ 	if (signr > 0) {
+ 		if (restart_syscall)
+diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
+index 04ede8f..2302567 100644
+--- a/arch/sparc/kernel/signal_32.c
++++ b/arch/sparc/kernel/signal_32.c
+@@ -525,10 +525,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
+ 	siginfo_t info;
+ 	int signr;
+ 
++	/* It's a lot of work and synchronization to add a new ptrace
++	 * register for GDB to save and restore in order to get
++	 * orig_i0 correct for syscall restarts when debugging.
++	 *
++	 * Although it should be the case that most of the global
++	 * registers are volatile across a system call, glibc already
++	 * depends upon that fact that we preserve them.  So we can't
++	 * just use any global register to save away the orig_i0 value.
++	 *
++	 * In particular %g2, %g3, %g4, and %g5 are all assumed to be
++	 * preserved across a system call trap by various pieces of
++	 * code in glibc.
++	 *
++	 * %g7 is used as the "thread register".   %g6 is not used in
++	 * any fixed manner.  %g6 is used as a scratch register and
++	 * a compiler temporary, but it's value is never used across
++	 * a system call.  Therefore %g6 is usable for orig_i0 storage.
++	 */
+ 	if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
+-		restart_syscall = 1;
+-	else
+-		restart_syscall = 0;
++		regs->u_regs[UREG_G6] = orig_i0;
+ 
+ 	if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ 		oldset = &current->saved_sigmask;
+@@ -541,8 +557,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
+ 	 * the software "in syscall" bit, directing us to not perform
+ 	 * a syscall restart.
+ 	 */
+-	if (restart_syscall && !pt_regs_is_syscall(regs))
+-		restart_syscall = 0;
++	restart_syscall = 0;
++	if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
++		restart_syscall = 1;
++		orig_i0 = regs->u_regs[UREG_G6];
++	}
++
+ 
+ 	if (signr > 0) {
+ 		if (restart_syscall)
+diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
+index 47509df..d58260b 100644
+--- a/arch/sparc/kernel/signal_64.c
++++ b/arch/sparc/kernel/signal_64.c
+@@ -535,11 +535,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
+ 	siginfo_t info;
+ 	int signr;
+ 	
++	/* It's a lot of work and synchronization to add a new ptrace
++	 * register for GDB to save and restore in order to get
++	 * orig_i0 correct for syscall restarts when debugging.
++	 *
++	 * Although it should be the case that most of the global
++	 * registers are volatile across a system call, glibc already
++	 * depends upon that fact that we preserve them.  So we can't
++	 * just use any global register to save away the orig_i0 value.
++	 *
++	 * In particular %g2, %g3, %g4, and %g5 are all assumed to be
++	 * preserved across a system call trap by various pieces of
++	 * code in glibc.
++	 *
++	 * %g7 is used as the "thread register".   %g6 is not used in
++	 * any fixed manner.  %g6 is used as a scratch register and
++	 * a compiler temporary, but it's value is never used across
++	 * a system call.  Therefore %g6 is usable for orig_i0 storage.
++	 */
+ 	if (pt_regs_is_syscall(regs) &&
+-	    (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
+-		restart_syscall = 1;
+-	} else
+-		restart_syscall = 0;
++	    (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
++		regs->u_regs[UREG_G6] = orig_i0;
+ 
+ 	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+ 		oldset = &current->saved_sigmask;
+@@ -548,22 +564,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
+ 
+ #ifdef CONFIG_COMPAT
+ 	if (test_thread_flag(TIF_32BIT)) {
+-		extern void do_signal32(sigset_t *, struct pt_regs *,
+-					int restart_syscall,
+-					unsigned long orig_i0);
+-		do_signal32(oldset, regs, restart_syscall, orig_i0);
++		extern void do_signal32(sigset_t *, struct pt_regs *);
++		do_signal32(oldset, regs);
+ 		return;
+ 	}
+ #endif	
+ 
+ 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ 
+-	/* If the debugger messes with the program counter, it clears
+-	 * the software "in syscall" bit, directing us to not perform
+-	 * a syscall restart.
+-	 */
+-	if (restart_syscall && !pt_regs_is_syscall(regs))
+-		restart_syscall = 0;
++	restart_syscall = 0;
++	if (pt_regs_is_syscall(regs) &&
++	    (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
++		restart_syscall = 1;
++		orig_i0 = regs->u_regs[UREG_G6];
++	}
+ 
+ 	if (signr > 0) {
+ 		if (restart_syscall)
+diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
+index 3635771..9384a0c 100644
+--- a/arch/sparc/kernel/visemul.c
++++ b/arch/sparc/kernel/visemul.c
+@@ -713,17 +713,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
+ 			s16 b = (rs2 >> (i * 16)) & 0xffff;
+ 
+ 			if (a > b)
+-				rd_val |= 1 << i;
++				rd_val |= 8 >> i;
+ 		}
+ 		break;
+ 
+ 	case FCMPGT32_OPF:
+ 		for (i = 0; i < 2; i++) {
+-			s32 a = (rs1 >> (i * 32)) & 0xffff;
+-			s32 b = (rs2 >> (i * 32)) & 0xffff;
++			s32 a = (rs1 >> (i * 32)) & 0xffffffff;
++			s32 b = (rs2 >> (i * 32)) & 0xffffffff;
+ 
+ 			if (a > b)
+-				rd_val |= 1 << i;
++				rd_val |= 2 >> i;
+ 		}
+ 		break;
+ 
+@@ -733,17 +733,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
+ 			s16 b = (rs2 >> (i * 16)) & 0xffff;
+ 
+ 			if (a <= b)
+-				rd_val |= 1 << i;
++				rd_val |= 8 >> i;
+ 		}
+ 		break;
+ 
+ 	case FCMPLE32_OPF:
+ 		for (i = 0; i < 2; i++) {
+-			s32 a = (rs1 >> (i * 32)) & 0xffff;
+-			s32 b = (rs2 >> (i * 32)) & 0xffff;
++			s32 a = (rs1 >> (i * 32)) & 0xffffffff;
++			s32 b = (rs2 >> (i * 32)) & 0xffffffff;
+ 
+ 			if (a <= b)
+-				rd_val |= 1 << i;
++				rd_val |= 2 >> i;
+ 		}
+ 		break;
+ 
+@@ -753,17 +753,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
+ 			s16 b = (rs2 >> (i * 16)) & 0xffff;
+ 
+ 			if (a != b)
+-				rd_val |= 1 << i;
++				rd_val |= 8 >> i;
+ 		}
+ 		break;
+ 
+ 	case FCMPNE32_OPF:
+ 		for (i = 0; i < 2; i++) {
+-			s32 a = (rs1 >> (i * 32)) & 0xffff;
+-			s32 b = (rs2 >> (i * 32)) & 0xffff;
++			s32 a = (rs1 >> (i * 32)) & 0xffffffff;
++			s32 b = (rs2 >> (i * 32)) & 0xffffffff;
+ 
+ 			if (a != b)
+-				rd_val |= 1 << i;
++				rd_val |= 2 >> i;
+ 		}
+ 		break;
+ 
+@@ -773,17 +773,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
+ 			s16 b = (rs2 >> (i * 16)) & 0xffff;
+ 
+ 			if (a == b)
+-				rd_val |= 1 << i;
++				rd_val |= 8 >> i;
+ 		}
+ 		break;
+ 
+ 	case FCMPEQ32_OPF:
+ 		for (i = 0; i < 2; i++) {
+-			s32 a = (rs1 >> (i * 32)) & 0xffff;
+-			s32 b = (rs2 >> (i * 32)) & 0xffff;
++			s32 a = (rs1 >> (i * 32)) & 0xffffffff;
++			s32 b = (rs2 >> (i * 32)) & 0xffffffff;
+ 
+ 			if (a == b)
+-				rd_val |= 1 << i;
++				rd_val |= 2 >> i;
+ 		}
+ 		break;
+ 	}
+diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
+index 34fe657..4d8c497 100644
+--- a/arch/sparc/lib/memcpy.S
++++ b/arch/sparc/lib/memcpy.S
+@@ -7,40 +7,12 @@
+  * Copyright (C) 1996 Jakub Jelinek (jj at sunsite.mff.cuni.cz)
+  */
+ 
+-#ifdef __KERNEL__
+-
+-#define FUNC(x) 											\
++#define FUNC(x) 		\
+ 	.globl	x;		\
+ 	.type	x, at function;	\
+-	.align	4;											\
++	.align	4;		\
+ x:
+ 
+-#undef FASTER_REVERSE
+-#undef FASTER_NONALIGNED
+-#define FASTER_ALIGNED
+-
+-/* In kernel these functions don't return a value.
+- * One should use macros in asm/string.h for that purpose.
+- * We return 0, so that bugs are more apparent.
+- */
+-#define SETUP_RETL
+-#define RETL_INSN	clr	%o0
+-
+-#else
+-
+-/* libc */
+-
+-#include "DEFS.h"
+-
+-#define FASTER_REVERSE
+-#define FASTER_NONALIGNED
+-#define FASTER_ALIGNED
+-
+-#define SETUP_RETL	mov	%o0, %g6
+-#define RETL_INSN	mov	%g6, %o0
+-
+-#endif
+-
+ /* Both these macros have to start with exactly the same insn */
+ #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ 	ldd	[%src + (offset) + 0x00], %t0; \
+@@ -164,30 +136,6 @@ x:
+ 	.text
+ 	.align	4
+ 
+-#ifdef FASTER_REVERSE
+-
+-70:	/* rdword_align */
+-
+-	andcc		%o1, 1, %g0
+-	be		4f
+-	 andcc		%o1, 2, %g0
+-
+-	ldub		[%o1 - 1], %g2
+-	sub		%o1, 1, %o1
+-	stb		%g2, [%o0 - 1]
+-	sub		%o2, 1, %o2
+-	be		3f
+-	 sub		%o0, 1, %o0
+-4:
+-	lduh		[%o1 - 2], %g2
+-	sub		%o1, 2, %o1
+-	sth		%g2, [%o0 - 2]
+-	sub		%o2, 2, %o2
+-	b		3f
+-	 sub		%o0, 2, %o0
+-
+-#endif /* FASTER_REVERSE */
+-
+ 0:
+ 	retl
+ 	 nop		! Only bcopy returns here and it retuns void...
+@@ -198,7 +146,7 @@ FUNC(__memmove)
+ #endif
+ FUNC(memmove)
+ 	cmp		%o0, %o1
+-	SETUP_RETL
++	mov		%o0, %g7
+ 	bleu		9f
+ 	 sub		%o0, %o1, %o4
+ 
+@@ -207,8 +155,6 @@ FUNC(memmove)
+ 	bleu		0f
+ 	 andcc		%o4, 3, %o5
+ 
+-#ifndef FASTER_REVERSE
+-
+ 	add		%o1, %o2, %o1
+ 	add		%o0, %o2, %o0
+ 	sub		%o1, 1, %o1
+@@ -224,295 +170,7 @@ FUNC(memmove)
+ 	 sub		%o0, 1, %o0
+ 
+ 	retl
+-	 RETL_INSN
+-
+-#else /* FASTER_REVERSE */
+-
+-	add		%o1, %o2, %o1
+-	add		%o0, %o2, %o0
+-	bne		77f
+-	 cmp		%o2, 15
+-	bleu		91f
+-	 andcc		%o1, 3, %g0
+-	bne		70b
+-3:
+-	 andcc		%o1, 4, %g0
+-
+-	be		2f
+-	 mov		%o2, %g1
+-
+-	ld		[%o1 - 4], %o4
+-	sub		%g1, 4, %g1
+-	st		%o4, [%o0 - 4]
+-	sub		%o1, 4, %o1
+-	sub		%o0, 4, %o0
+-2:
+-	andcc		%g1, 0xffffff80, %g7
+-	be		3f
+-	 andcc		%o0, 4, %g0
+-
+-	be		74f + 4
+-5:
+-	RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+-	RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+-	RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+-	RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+-	subcc		%g7, 128, %g7
+-	sub		%o1, 128, %o1
+-	bne		5b
+-	 sub		%o0, 128, %o0
+-3:
+-	andcc		%g1, 0x70, %g7
+-	be		72f
+-	 andcc		%g1, 8, %g0
+-
+-	sethi		%hi(72f), %o5
+-	srl		%g7, 1, %o4
+-	add		%g7, %o4, %o4
+-	sub		%o1, %g7, %o1
+-	sub		%o5, %o4, %o5
+-	jmpl		%o5 + %lo(72f), %g0
+-	 sub		%o0, %g7, %o0
+-
+-71:	/* rmemcpy_table */
+-	RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+-	RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+-	RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+-	RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+-	RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+-	RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+-	RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+-
+-72:	/* rmemcpy_table_end */
+-
+-	be		73f
+-	 andcc		%g1, 4, %g0
+-
+-	ldd		[%o1 - 0x08], %g2
+-	sub		%o0, 8, %o0
+-	sub		%o1, 8, %o1
+-	st		%g2, [%o0]
+-	st		%g3, [%o0 + 0x04]
+-
+-73:	/* rmemcpy_last7 */
+-
+-	be		1f
+-	 andcc		%g1, 2, %g0
+-
+-	ld		[%o1 - 4], %g2
+-	sub		%o1, 4, %o1
+-	st		%g2, [%o0 - 4]
+-	sub		%o0, 4, %o0
+-1:
+-	be		1f
+-	 andcc		%g1, 1, %g0
+-
+-	lduh		[%o1 - 2], %g2
+-	sub		%o1, 2, %o1
+-	sth		%g2, [%o0 - 2]
+-	sub		%o0, 2, %o0
+-1:
+-	be		1f
+-	 nop
+-
+-	ldub		[%o1 - 1], %g2
+-	stb		%g2, [%o0 - 1]
+-1:
+-	retl
+- 	 RETL_INSN
+-
+-74:	/* rldd_std */
+-	RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+-	RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+-	RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+-	RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+-	subcc		%g7, 128, %g7
+-	sub		%o1, 128, %o1
+-	bne		74b
+-	 sub		%o0, 128, %o0
+-
+-	andcc		%g1, 0x70, %g7
+-	be		72b
+-	 andcc		%g1, 8, %g0
+-
+-	sethi		%hi(72b), %o5
+-	srl		%g7, 1, %o4
+-	add		%g7, %o4, %o4
+-	sub		%o1, %g7, %o1
+-	sub		%o5, %o4, %o5
+-	jmpl		%o5 + %lo(72b), %g0
+-	 sub		%o0, %g7, %o0
+-
+-75:	/* rshort_end */
+-
+-	and		%o2, 0xe, %o3
+-2:
+-	sethi		%hi(76f), %o5
+-	sll		%o3, 3, %o4
+-	sub		%o0, %o3, %o0
+-	sub		%o5, %o4, %o5
+-	sub		%o1, %o3, %o1
+-	jmpl		%o5 + %lo(76f), %g0
+-	 andcc		%o2, 1, %g0
+-
+-	RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+-	RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+-	RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+-	RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+-	RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+-	RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+-	RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+-
+-76:	/* rshort_table_end */
+-
+-	be		1f
+-	 nop
+-	ldub		[%o1 - 1], %g2
+-	stb		%g2, [%o0 - 1]
+-1:
+-	retl
+- 	 RETL_INSN
+-
+-91:	/* rshort_aligned_end */
+-
+-	bne		75b
+-	 andcc		%o2, 8, %g0
+-
+-	be		1f
+-	 andcc		%o2, 4, %g0
+-
+-	ld		[%o1 - 0x08], %g2
+-	ld		[%o1 - 0x04], %g3
+-	sub		%o1, 8, %o1
+-	st		%g2, [%o0 - 0x08]
+-	st		%g3, [%o0 - 0x04]
+-	sub		%o0, 8, %o0
+-1:
+-	b		73b
+-	 mov		%o2, %g1
+-
+-77:	/* rnon_aligned */
+-	cmp		%o2, 15
+-	bleu		75b
+-	 andcc		%o0, 3, %g0
+-	be		64f
+-	 andcc		%o0, 1, %g0
+-	be		63f
+-	 andcc		%o0, 2, %g0
+-	ldub		[%o1 - 1], %g5
+-	sub		%o1, 1, %o1
+-	stb		%g5, [%o0 - 1]
+-	sub		%o0, 1, %o0
+-	be		64f
+-	 sub		%o2, 1, %o2
+-63:
+-	ldub		[%o1 - 1], %g5
+-	sub		%o1, 2, %o1
+-	stb		%g5, [%o0 - 1]
+-	sub		%o0, 2, %o0
+-	ldub		[%o1], %g5
+-	sub		%o2, 2, %o2
+-	stb		%g5, [%o0]
+-64:	
+-	and		%o1, 3, %g2
+-	and		%o1, -4, %o1
+-	and		%o2, 0xc, %g3
+-	add		%o1, 4, %o1
+-	cmp		%g3, 4
+-	sll		%g2, 3, %g4
+-	mov		32, %g2
+-	be		4f
+-	 sub		%g2, %g4, %g7
+-
+-	blu		3f
+-	 cmp		%g3, 8
+-
+-	be		2f
+-	 srl		%o2, 2, %g3
+-
+-	ld		[%o1 - 4], %o3
+-	add		%o0, -8, %o0
+-	ld		[%o1 - 8], %o4
+-	add		%o1, -16, %o1
+-	b		7f
+-	 add		%g3, 1, %g3
+-2:
+-	ld		[%o1 - 4], %o4
+-	add		%o0, -4, %o0
+-	ld		[%o1 - 8], %g1
+-	add		%o1, -12, %o1
+-	b		8f
+-	 add		%g3, 2, %g3
+-3:
+-	ld		[%o1 - 4], %o5
+-	add		%o0, -12, %o0
+-	ld		[%o1 - 8], %o3
+-	add		%o1, -20, %o1
+-	b		6f
+-	 srl		%o2, 2, %g3
+-4:
+-	ld		[%o1 - 4], %g1
+-	srl		%o2, 2, %g3
+-	ld		[%o1 - 8], %o5
+-	add		%o1, -24, %o1
+-	add		%o0, -16, %o0
+-	add		%g3, -1, %g3
+-
+-	ld		[%o1 + 12], %o3
+-5:
+-	sll		%o5, %g4, %g2
+-	srl		%g1, %g7, %g5
+-	or		%g2, %g5, %g2
+-	st		%g2, [%o0 + 12]
+-6:
+-	ld		[%o1 + 8], %o4
+-	sll		%o3, %g4, %g2
+-	srl		%o5, %g7, %g5
+-	or		%g2, %g5, %g2
+-	st		%g2, [%o0 + 8]
+-7:
+-	ld		[%o1 + 4], %g1
+-	sll		%o4, %g4, %g2
+-	srl		%o3, %g7, %g5
+-	or		%g2, %g5, %g2
+-	st		%g2, [%o0 + 4]
+-8:
+-	ld		[%o1], %o5
+-	sll		%g1, %g4, %g2
+-	srl		%o4, %g7, %g5
+-	addcc		%g3, -4, %g3
+-	or		%g2, %g5, %g2
+-	add		%o1, -16, %o1
+-	st		%g2, [%o0]
+-	add		%o0, -16, %o0
+-	bne,a		5b	
+-	 ld		[%o1 + 12], %o3
+-	sll		%o5, %g4, %g2
+-	srl		%g1, %g7, %g5
+-	srl		%g4, 3, %g3
+-	or		%g2, %g5, %g2
+-	add		%o1, %g3, %o1
+-	andcc		%o2, 2, %g0
+-	st		%g2, [%o0 + 12]
+-	be		1f
+-	 andcc		%o2, 1, %g0
+-	
+-	ldub		[%o1 + 15], %g5
+-	add		%o1, -2, %o1
+-	stb		%g5, [%o0 + 11]
+-	add		%o0, -2, %o0
+-	ldub		[%o1 + 16], %g5
+-	stb		%g5, [%o0 + 12]
+-1:
+-	be		1f
+-	 nop
+-	ldub		[%o1 + 15], %g5
+-	stb		%g5, [%o0 + 11]
+-1:
+-	retl
+-	 RETL_INSN
+-
+-#endif /* FASTER_REVERSE */
++	 mov		%g7, %o0
+ 
+ /* NOTE: This code is executed just for the cases,
+          where %src (=%o1) & 3 is != 0.
+@@ -546,7 +204,7 @@ FUNC(memmove)
+ FUNC(memcpy)	/* %o0=dst %o1=src %o2=len */
+ 
+ 	sub		%o0, %o1, %o4
+-	SETUP_RETL
++	mov		%o0, %g7
+ 9:
+ 	andcc		%o4, 3, %o5
+ 0:
+@@ -569,7 +227,7 @@ FUNC(memcpy)	/* %o0=dst %o1=src %o2=len */
+ 	add		%o1, 4, %o1
+ 	add		%o0, 4, %o0
+ 2:
+-	andcc		%g1, 0xffffff80, %g7
++	andcc		%g1, 0xffffff80, %g0
+ 	be		3f
+ 	 andcc		%o0, 4, %g0
+ 
+@@ -579,22 +237,23 @@ FUNC(memcpy)	/* %o0=dst %o1=src %o2=len */
+ 	MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ 	MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ 	MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+-	subcc		%g7, 128, %g7
++	sub		%g1, 128, %g1
+ 	add		%o1, 128, %o1
+-	bne		5b
++	cmp		%g1, 128
++	bge		5b
+ 	 add		%o0, 128, %o0
+ 3:
+-	andcc		%g1, 0x70, %g7
++	andcc		%g1, 0x70, %g4
+ 	be		80f
+ 	 andcc		%g1, 8, %g0
+ 
+ 	sethi		%hi(80f), %o5
+-	srl		%g7, 1, %o4
+-	add		%g7, %o4, %o4
+-	add		%o1, %g7, %o1
++	srl		%g4, 1, %o4
++	add		%g4, %o4, %o4
++	add		%o1, %g4, %o1
+ 	sub		%o5, %o4, %o5
+ 	jmpl		%o5 + %lo(80f), %g0
+-	 add		%o0, %g7, %o0
++	 add		%o0, %g4, %o0
+ 
+ 79:	/* memcpy_table */
+ 
+@@ -641,43 +300,28 @@ FUNC(memcpy)	/* %o0=dst %o1=src %o2=len */
+ 	stb		%g2, [%o0]
+ 1:
+ 	retl
+- 	 RETL_INSN
++	 mov		%g7, %o0
+ 
+ 82:	/* ldd_std */
+ 	MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ 	MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ 	MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ 	MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+-	subcc		%g7, 128, %g7
++	subcc		%g1, 128, %g1
+ 	add		%o1, 128, %o1
+-	bne		82b
++	cmp		%g1, 128
++	bge		82b
+ 	 add		%o0, 128, %o0
+ 
+-#ifndef FASTER_ALIGNED
+-
+-	andcc		%g1, 0x70, %g7
+-	be		80b
+-	 andcc		%g1, 8, %g0
+-
+-	sethi		%hi(80b), %o5
+-	srl		%g7, 1, %o4
+-	add		%g7, %o4, %o4
+-	add		%o1, %g7, %o1
+-	sub		%o5, %o4, %o5
+-	jmpl		%o5 + %lo(80b), %g0
+-	 add		%o0, %g7, %o0
+-
+-#else /* FASTER_ALIGNED */
+-
+-	andcc		%g1, 0x70, %g7
++	andcc		%g1, 0x70, %g4
+ 	be		84f
+ 	 andcc		%g1, 8, %g0
+ 
+ 	sethi		%hi(84f), %o5
+-	add		%o1, %g7, %o1
+-	sub		%o5, %g7, %o5
++	add		%o1, %g4, %o1
++	sub		%o5, %g4, %o5
+ 	jmpl		%o5 + %lo(84f), %g0
+-	 add		%o0, %g7, %o0
++	 add		%o0, %g4, %o0
+ 
+ 83:	/* amemcpy_table */
+ 
+@@ -721,382 +365,132 @@ FUNC(memcpy)	/* %o0=dst %o1=src %o2=len */
+ 	stb		%g2, [%o0]
+ 1:
+ 	retl
+- 	 RETL_INSN
+-
+-#endif /* FASTER_ALIGNED */
++	 mov		%g7, %o0
+ 
+ 86:	/* non_aligned */
+ 	cmp		%o2, 6
+ 	bleu		88f
++	 nop
+ 
+-#ifdef FASTER_NONALIGNED
+-
+-	 cmp		%o2, 256
+-	bcc		87f
+-
+-#endif /* FASTER_NONALIGNED */
+-
+-	 andcc		%o0, 3, %g0
++	save		%sp, -96, %sp
++	andcc		%i0, 3, %g0
+ 	be		61f
+-	 andcc		%o0, 1, %g0
++	 andcc		%i0, 1, %g0
+ 	be		60f
+-	 andcc		%o0, 2, %g0
++	 andcc		%i0, 2, %g0
+ 
+-	ldub		[%o1], %g5
+-	add		%o1, 1, %o1
+-	stb		%g5, [%o0]
+-	sub		%o2, 1, %o2
++	ldub		[%i1], %g5
++	add		%i1, 1, %i1
++	stb		%g5, [%i0]
++	sub		%i2, 1, %i2
+ 	bne		61f
+-	 add		%o0, 1, %o0
++	 add		%i0, 1, %i0
+ 60:
+-	ldub		[%o1], %g3
+-	add		%o1, 2, %o1
+-	stb		%g3, [%o0]
+-	sub		%o2, 2, %o2
+-	ldub		[%o1 - 1], %g3
+-	add		%o0, 2, %o0
+-	stb		%g3, [%o0 - 1]
++	ldub		[%i1], %g3
++	add		%i1, 2, %i1
++	stb		%g3, [%i0]
++	sub		%i2, 2, %i2
++	ldub		[%i1 - 1], %g3
++	add		%i0, 2, %i0
++	stb		%g3, [%i0 - 1]
+ 61:
+-	and		%o1, 3, %g2
+-	and		%o2, 0xc, %g3
+-	and		%o1, -4, %o1
++	and		%i1, 3, %g2
++	and		%i2, 0xc, %g3
++	and		%i1, -4, %i1
+ 	cmp		%g3, 4
+ 	sll		%g2, 3, %g4
+ 	mov		32, %g2
+ 	be		4f
+-	 sub		%g2, %g4, %g7
++	 sub		%g2, %g4, %l0
+ 	
+ 	blu		3f
+ 	 cmp		%g3, 0x8
+ 
+ 	be		2f
+-	 srl		%o2, 2, %g3
++	 srl		%i2, 2, %g3
+ 
+-	ld		[%o1], %o3
+-	add		%o0, -8, %o0
+-	ld		[%o1 + 4], %o4
++	ld		[%i1], %i3
++	add		%i0, -8, %i0
++	ld		[%i1 + 4], %i4
+ 	b		8f
+ 	 add		%g3, 1, %g3
+ 2:
+-	ld		[%o1], %o4
+-	add		%o0, -12, %o0
+-	ld		[%o1 + 4], %o5
++	ld		[%i1], %i4
++	add		%i0, -12, %i0
++	ld		[%i1 + 4], %i5
+ 	add		%g3, 2, %g3
+ 	b		9f
+-	 add		%o1, -4, %o1
++	 add		%i1, -4, %i1
+ 3:
+-	ld		[%o1], %g1
+-	add		%o0, -4, %o0
+-	ld		[%o1 + 4], %o3
+-	srl		%o2, 2, %g3
++	ld		[%i1], %g1
++	add		%i0, -4, %i0
++	ld		[%i1 + 4], %i3
++	srl		%i2, 2, %g3
+ 	b		7f
+-	 add		%o1, 4, %o1
++	 add		%i1, 4, %i1
+ 4:
+-	ld		[%o1], %o5
+-	cmp		%o2, 7
+-	ld		[%o1 + 4], %g1
+-	srl		%o2, 2, %g3
++	ld		[%i1], %i5
++	cmp		%i2, 7
++	ld		[%i1 + 4], %g1
++	srl		%i2, 2, %g3
+ 	bleu		10f
+-	 add		%o1, 8, %o1
++	 add		%i1, 8, %i1
+ 
+-	ld		[%o1], %o3
++	ld		[%i1], %i3
+ 	add		%g3, -1, %g3
+ 5:
+-	sll		%o5, %g4, %g2
+-	srl		%g1, %g7, %g5
++	sll		%i5, %g4, %g2
++	srl		%g1, %l0, %g5
+ 	or		%g2, %g5, %g2
+-	st		%g2, [%o0]
++	st		%g2, [%i0]
+ 7:
+-	ld		[%o1 + 4], %o4
++	ld		[%i1 + 4], %i4
+ 	sll		%g1, %g4, %g2
+-	srl		%o3, %g7, %g5
++	srl		%i3, %l0, %g5
+ 	or		%g2, %g5, %g2
+-	st		%g2, [%o0 + 4]
++	st		%g2, [%i0 + 4]
+ 8:
+-	ld		[%o1 + 8], %o5
+-	sll		%o3, %g4, %g2
+-	srl		%o4, %g7, %g5
++	ld		[%i1 + 8], %i5
++	sll		%i3, %g4, %g2
++	srl		%i4, %l0, %g5
+ 	or		%g2, %g5, %g2
+-	st		%g2, [%o0 + 8]
++	st		%g2, [%i0 + 8]
+ 9:
+-	ld		[%o1 + 12], %g1
+-	sll		%o4, %g4, %g2
+-	srl		%o5, %g7, %g5
++	ld		[%i1 + 12], %g1
++	sll		%i4, %g4, %g2
++	srl		%i5, %l0, %g5
+ 	addcc		%g3, -4, %g3
+ 	or		%g2, %g5, %g2
+-	add		%o1, 16, %o1
+-	st		%g2, [%o0 + 12]
+-	add		%o0, 16, %o0
++	add		%i1, 16, %i1
++	st		%g2, [%i0 + 12]
++	add		%i0, 16, %i0
+ 	bne,a		5b
+-	 ld		[%o1], %o3
++	 ld		[%i1], %i3
+ 10:
+-	sll		%o5, %g4, %g2
+-	srl		%g1, %g7, %g5
+-	srl		%g7, 3, %g3
++	sll		%i5, %g4, %g2
++	srl		%g1, %l0, %g5
++	srl		%l0, 3, %g3
+ 	or		%g2, %g5, %g2
+-	sub		%o1, %g3, %o1
+-	andcc		%o2, 2, %g0
+-	st		%g2, [%o0]
++	sub		%i1, %g3, %i1
++	andcc		%i2, 2, %g0
++	st		%g2, [%i0]
+ 	be		1f
+-	 andcc		%o2, 1, %g0
+-
+-	ldub		[%o1], %g2
+-	add		%o1, 2, %o1
+-	stb		%g2, [%o0 + 4]
+-	add		%o0, 2, %o0
+-	ldub		[%o1 - 1], %g2
+-	stb		%g2, [%o0 + 3]
++	 andcc		%i2, 1, %g0
++
++	ldub		[%i1], %g2
++	add		%i1, 2, %i1
++	stb		%g2, [%i0 + 4]
++	add		%i0, 2, %i0
++	ldub		[%i1 - 1], %g2
++	stb		%g2, [%i0 + 3]
+ 1:
+ 	be		1f
+ 	 nop
+-	ldub		[%o1], %g2
+-	stb		%g2, [%o0 + 4]
+-1:
+-	retl
+-	 RETL_INSN
+-
+-#ifdef FASTER_NONALIGNED
+-
+-87:	/* faster_nonaligned */
+-
+-	andcc		%o1, 3, %g0
+-	be		3f
+-	 andcc		%o1, 1, %g0
+-
+-	be		4f
+-	 andcc		%o1, 2, %g0
+-
+-	ldub		[%o1], %g2
+-	add		%o1, 1, %o1
+-	stb		%g2, [%o0]
+-	sub		%o2, 1, %o2
+-	bne		3f
+-	 add		%o0, 1, %o0
+-4:
+-	lduh		[%o1], %g2
+-	add		%o1, 2, %o1
+-	srl		%g2, 8, %g3
+-	sub		%o2, 2, %o2
+-	stb		%g3, [%o0]
+-	add		%o0, 2, %o0
+-	stb		%g2, [%o0 - 1]
+-3:
+-	 andcc		%o1, 4, %g0
+-
+-	bne		2f
+-	 cmp		%o5, 1
+-
+-	ld		[%o1], %o4
+-	srl		%o4, 24, %g2
+-	stb		%g2, [%o0]
+-	srl		%o4, 16, %g3
+-	stb		%g3, [%o0 + 1]
+-	srl		%o4, 8, %g2
+-	stb		%g2, [%o0 + 2]
+-	sub		%o2, 4, %o2
+-	stb		%o4, [%o0 + 3]
+-	add		%o1, 4, %o1
+-	add		%o0, 4, %o0
+-2:
+-	be		33f
+-	 cmp		%o5, 2
+-	be		32f
+-	 sub		%o2, 4, %o2
+-31:
+-	ld		[%o1], %g2
+-	add		%o1, 4, %o1
+-	srl		%g2, 24, %g3
+-	and		%o0, 7, %g5
+-	stb		%g3, [%o0]
+-	cmp		%g5, 7
+-	sll		%g2, 8, %g1
+-	add		%o0, 4, %o0
+-	be		41f
+-	 and		%o2, 0xffffffc0, %o3
+-	ld		[%o0 - 7], %o4
+-4:
+-	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	subcc		%o3, 64, %o3
+-	add		%o1, 64, %o1
+-	bne		4b
+-	 add		%o0, 64, %o0
+-
+-	andcc		%o2, 0x30, %o3
+-	be,a		1f
+-	 srl		%g1, 16, %g2
+-4:
+-	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	subcc		%o3, 16, %o3
+-	add		%o1, 16, %o1
+-	bne		4b
+-	 add		%o0, 16, %o0
+-
+-	srl		%g1, 16, %g2
+-1:
+-	st		%o4, [%o0 - 7]
+-	sth		%g2, [%o0 - 3]
+-	srl		%g1, 8, %g4
+-	b		88f
+-	 stb		%g4, [%o0 - 1]
+-32:
+-	ld		[%o1], %g2
+-	add		%o1, 4, %o1
+-	srl		%g2, 16, %g3
+-	and		%o0, 7, %g5
+-	sth		%g3, [%o0]
+-	cmp		%g5, 6
+-	sll		%g2, 16, %g1
+-	add		%o0, 4, %o0
+-	be		42f
+-	 and		%o2, 0xffffffc0, %o3
+-	ld		[%o0 - 6], %o4
+-4:
+-	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	subcc		%o3, 64, %o3
+-	add		%o1, 64, %o1
+-	bne		4b
+-	 add		%o0, 64, %o0
+-
+-	andcc		%o2, 0x30, %o3
+-	be,a		1f
+-	 srl		%g1, 16, %g2
+-4:
+-	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	subcc		%o3, 16, %o3
+-	add		%o1, 16, %o1
+-	bne		4b
+-	 add		%o0, 16, %o0
+-
+-	srl		%g1, 16, %g2
+-1:
+-	st		%o4, [%o0 - 6]
+-	b		88f
+-	 sth		%g2, [%o0 - 2]
+-33:
+-	ld		[%o1], %g2
+-	sub		%o2, 4, %o2
+-	srl		%g2, 24, %g3
+-	and		%o0, 7, %g5
+-	stb		%g3, [%o0]
+-	cmp		%g5, 5
+-	srl		%g2, 8, %g4
+-	sll		%g2, 24, %g1
+-	sth		%g4, [%o0 + 1]
+-	add		%o1, 4, %o1
+-	be		43f
+-	 and		%o2, 0xffffffc0, %o3
+-
+-	ld		[%o0 - 1], %o4
+-	add		%o0, 4, %o0
+-4:
+-	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+-	SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+-	SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+-	SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+-	subcc		%o3, 64, %o3
+-	add		%o1, 64, %o1
+-	bne		4b
+-	 add		%o0, 64, %o0
+-
+-	andcc		%o2, 0x30, %o3
+-	be,a		1f
+-	 srl		%g1, 24, %g2
+-4:
+-	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+-	subcc		%o3, 16, %o3
+-	add		%o1, 16, %o1
+-	bne		4b
+-	 add		%o0, 16, %o0
+-
+-	srl		%g1, 24, %g2
+-1:
+-	st		%o4, [%o0 - 5]
+-	b		88f
+-	 stb		%g2, [%o0 - 1]
+-41:
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	subcc		%o3, 64, %o3
+-	add		%o1, 64, %o1
+-	bne		41b
+-	 add		%o0, 64, %o0
+-	 
+-	andcc		%o2, 0x30, %o3
+-	be,a		1f
+-	 srl		%g1, 16, %g2
+-4:
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+-	subcc		%o3, 16, %o3
+-	add		%o1, 16, %o1
+-	bne		4b
+-	 add		%o0, 16, %o0
+-
+-	srl		%g1, 16, %g2
++	ldub		[%i1], %g2
++	stb		%g2, [%i0 + 4]
+ 1:
+-	sth		%g2, [%o0 - 3]
+-	srl		%g1, 8, %g4
+-	b		88f
+-	 stb		%g4, [%o0 - 1]
+-43:
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+-	subcc		%o3, 64, %o3
+-	add		%o1, 64, %o1
+-	bne		43b
+-	 add		%o0, 64, %o0
+-
+-	andcc		%o2, 0x30, %o3
+-	be,a		1f
+-	 srl		%g1, 24, %g2
+-4:
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+-	subcc		%o3, 16, %o3
+-	add		%o1, 16, %o1
+-	bne		4b
+-	 add		%o0, 16, %o0
+-
+-	srl		%g1, 24, %g2
+-1:
+-	stb		%g2, [%o0 + 3]
+-	b		88f
+-	 add		%o0, 4, %o0
+-42:
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	subcc		%o3, 64, %o3
+-	add		%o1, 64, %o1
+-	bne		42b
+-	 add		%o0, 64, %o0
+-	 
+-	andcc		%o2, 0x30, %o3
+-	be,a		1f
+-	 srl		%g1, 16, %g2
+-4:
+-	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+-	subcc		%o3, 16, %o3
+-	add		%o1, 16, %o1
+-	bne		4b
+-	 add		%o0, 16, %o0
+-
+-	srl		%g1, 16, %g2
+-1:
+-	sth		%g2, [%o0 - 2]
+-
+-	/* Fall through */
+-	 
+-#endif /* FASTER_NONALIGNED */
++	ret
++	 restore	%g7, %g0, %o0
+ 
+ 88:	/* short_end */
+ 
+@@ -1127,7 +521,7 @@ FUNC(memcpy)	/* %o0=dst %o1=src %o2=len */
+ 	stb		%g2, [%o0]
+ 1:
+ 	retl
+- 	 RETL_INSN
++	 mov		%g7, %o0
+ 
+ 90:	/* short_aligned_end */
+ 	bne		88b
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index 79836a7..3b6e248 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o
+ obj-y                   += fault_$(BITS).o
+ obj-y                   += init_$(BITS).o
+ obj-$(CONFIG_SPARC32)   += loadmmu.o
+-obj-y                   += generic_$(BITS).o
+ obj-$(CONFIG_SPARC32)   += extable.o btfixup.o srmmu.o iommu.o io-unit.o
+ obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
+ obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
+diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
+index 5175ac2..8a7f817 100644
+--- a/arch/sparc/mm/btfixup.c
++++ b/arch/sparc/mm/btfixup.c
+@@ -302,8 +302,7 @@ void __init btfixup(void)
+ 				case 'i':	/* INT */
+ 					if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
+ 						set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
+-					else if ((insn & 0x80002000) == 0x80002000 &&
+-					         (insn & 0x01800000) != 0x01800000) /* %LO */
++					else if ((insn & 0x80002000) == 0x80002000) /* %LO */
+ 						set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
+ 					else {
+ 						prom_printf(insn_i, p, addr, insn);
+diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
+deleted file mode 100644
+index e6067b7..0000000
+--- a/arch/sparc/mm/generic_32.c
++++ /dev/null
+@@ -1,98 +0,0 @@
+-/*
+- * generic.c: Generic Sparc mm routines that are not dependent upon
+- *            MMU type but are Sparc specific.
+- *
+- * Copyright (C) 1996 David S. Miller (davem at caip.rutgers.edu)
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/mm.h>
+-#include <linux/swap.h>
+-#include <linux/pagemap.h>
+-
+-#include <asm/pgalloc.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/cacheflush.h>
+-#include <asm/tlbflush.h>
+-
+-/* Remap IO memory, the same way as remap_pfn_range(), but use
+- * the obio memory space.
+- *
+- * They use a pgprot that sets PAGE_IO and does not check the
+- * mem_map table as this is independent of normal memory.
+- */
+-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
+-	unsigned long offset, pgprot_t prot, int space)
+-{
+-	unsigned long end;
+-
+-	address &= ~PMD_MASK;
+-	end = address + size;
+-	if (end > PMD_SIZE)
+-		end = PMD_SIZE;
+-	do {
+-		set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
+-		address += PAGE_SIZE;
+-		offset += PAGE_SIZE;
+-		pte++;
+-	} while (address < end);
+-}
+-
+-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
+-	unsigned long offset, pgprot_t prot, int space)
+-{
+-	unsigned long end;
+-
+-	address &= ~PGDIR_MASK;
+-	end = address + size;
+-	if (end > PGDIR_SIZE)
+-		end = PGDIR_SIZE;
+-	offset -= address;
+-	do {
+-		pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
+-		if (!pte)
+-			return -ENOMEM;
+-		io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
+-		address = (address + PMD_SIZE) & PMD_MASK;
+-		pmd++;
+-	} while (address < end);
+-	return 0;
+-}
+-
+-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+-		       unsigned long pfn, unsigned long size, pgprot_t prot)
+-{
+-	int error = 0;
+-	pgd_t * dir;
+-	unsigned long beg = from;
+-	unsigned long end = from + size;
+-	struct mm_struct *mm = vma->vm_mm;
+-	int space = GET_IOSPACE(pfn);
+-	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+-
+-	/* See comment in mm/memory.c remap_pfn_range */
+-	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+-	vma->vm_pgoff = (offset >> PAGE_SHIFT) |
+-		((unsigned long)space << 28UL);
+-
+-	offset -= from;
+-	dir = pgd_offset(mm, from);
+-	flush_cache_range(vma, beg, end);
+-
+-	while (from < end) {
+-		pmd_t *pmd = pmd_alloc(mm, dir, from);
+-		error = -ENOMEM;
+-		if (!pmd)
+-			break;
+-		error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
+-		if (error)
+-			break;
+-		from = (from + PGDIR_SIZE) & PGDIR_MASK;
+-		dir++;
+-	}
+-
+-	flush_tlb_range(vma, beg, end);
+-	return error;
+-}
+-EXPORT_SYMBOL(io_remap_pfn_range);
+diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
+deleted file mode 100644
+index 3cb00df..0000000
+--- a/arch/sparc/mm/generic_64.c
++++ /dev/null
+@@ -1,164 +0,0 @@
+-/*
+- * generic.c: Generic Sparc mm routines that are not dependent upon
+- *            MMU type but are Sparc specific.
+- *
+- * Copyright (C) 1996 David S. Miller (davem at caip.rutgers.edu)
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/mm.h>
+-#include <linux/swap.h>
+-#include <linux/pagemap.h>
+-
+-#include <asm/pgalloc.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/tlbflush.h>
+-
+-/* Remap IO memory, the same way as remap_pfn_range(), but use
+- * the obio memory space.
+- *
+- * They use a pgprot that sets PAGE_IO and does not check the
+- * mem_map table as this is independent of normal memory.
+- */
+-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
+-				      unsigned long address,
+-				      unsigned long size,
+-				      unsigned long offset, pgprot_t prot,
+-				      int space)
+-{
+-	unsigned long end;
+-
+-	/* clear hack bit that was used as a write_combine side-effect flag */
+-	offset &= ~0x1UL;
+-	address &= ~PMD_MASK;
+-	end = address + size;
+-	if (end > PMD_SIZE)
+-		end = PMD_SIZE;
+-	do {
+-		pte_t entry;
+-		unsigned long curend = address + PAGE_SIZE;
+-		
+-		entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
+-		if (!(address & 0xffff)) {
+-			if (PAGE_SIZE < (4 * 1024 * 1024) &&
+-			    !(address & 0x3fffff) &&
+-			    !(offset & 0x3ffffe) &&
+-			    end >= address + 0x400000) {
+-				entry = mk_pte_io(offset, prot, space,
+-						  4 * 1024 * 1024);
+-				curend = address + 0x400000;
+-				offset += 0x400000;
+-			} else if (PAGE_SIZE < (512 * 1024) &&
+-				   !(address & 0x7ffff) &&
+-				   !(offset & 0x7fffe) &&
+-				   end >= address + 0x80000) {
+-				entry = mk_pte_io(offset, prot, space,
+-						  512 * 1024 * 1024);
+-				curend = address + 0x80000;
+-				offset += 0x80000;
+-			} else if (PAGE_SIZE < (64 * 1024) &&
+-				   !(offset & 0xfffe) &&
+-				   end >= address + 0x10000) {
+-				entry = mk_pte_io(offset, prot, space,
+-						  64 * 1024);
+-				curend = address + 0x10000;
+-				offset += 0x10000;
+-			} else
+-				offset += PAGE_SIZE;
+-		} else
+-			offset += PAGE_SIZE;
+-
+-		if (pte_write(entry))
+-			entry = pte_mkdirty(entry);
+-		do {
+-			BUG_ON(!pte_none(*pte));
+-			set_pte_at(mm, address, pte, entry);
+-			address += PAGE_SIZE;
+-			pte_val(entry) += PAGE_SIZE;
+-			pte++;
+-		} while (address < curend);
+-	} while (address < end);
+-}
+-
+-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
+-	unsigned long offset, pgprot_t prot, int space)
+-{
+-	unsigned long end;
+-
+-	address &= ~PGDIR_MASK;
+-	end = address + size;
+-	if (end > PGDIR_SIZE)
+-		end = PGDIR_SIZE;
+-	offset -= address;
+-	do {
+-		pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
+-		if (!pte)
+-			return -ENOMEM;
+-		io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
+-		pte_unmap(pte);
+-		address = (address + PMD_SIZE) & PMD_MASK;
+-		pmd++;
+-	} while (address < end);
+-	return 0;
+-}
+-
+-static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
+-	unsigned long offset, pgprot_t prot, int space)
+-{
+-	unsigned long end;
+-
+-	address &= ~PUD_MASK;
+-	end = address + size;
+-	if (end > PUD_SIZE)
+-		end = PUD_SIZE;
+-	offset -= address;
+-	do {
+-		pmd_t *pmd = pmd_alloc(mm, pud, address);
+-		if (!pud)
+-			return -ENOMEM;
+-		io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
+-		address = (address + PUD_SIZE) & PUD_MASK;
+-		pud++;
+-	} while (address < end);
+-	return 0;
+-}
+-
+-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+-		unsigned long pfn, unsigned long size, pgprot_t prot)
+-{
+-	int error = 0;
+-	pgd_t * dir;
+-	unsigned long beg = from;
+-	unsigned long end = from + size;
+-	struct mm_struct *mm = vma->vm_mm;
+-	int space = GET_IOSPACE(pfn);
+-	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+-	unsigned long phys_base;
+-
+-	phys_base = offset | (((unsigned long) space) << 32UL);
+-
+-	/* See comment in mm/memory.c remap_pfn_range */
+-	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+-	vma->vm_pgoff = phys_base >> PAGE_SHIFT;
+-
+-	offset -= from;
+-	dir = pgd_offset(mm, from);
+-	flush_cache_range(vma, beg, end);
+-
+-	while (from < end) {
+-		pud_t *pud = pud_alloc(mm, dir, from);
+-		error = -ENOMEM;
+-		if (!pud)
+-			break;
+-		error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
+-		if (error)
+-			break;
+-		from = (from + PGDIR_SIZE) & PGDIR_MASK;
+-		dir++;
+-	}
+-
+-	flush_tlb_range(vma, beg, end);
+-	return error;
+-}
+-EXPORT_SYMBOL(io_remap_pfn_range);
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index bfab3fa..7b65f75 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -568,8 +568,8 @@ cond_branch:			f_offset = addrs[i + filter[i].jf] - addrs[i];
+ 					break;
+ 				}
+ 				if (filter[i].jt != 0) {
+-					if (filter[i].jf)
+-						t_offset += is_near(f_offset) ? 2 : 6;
++					if (filter[i].jf && f_offset)
++						t_offset += is_near(f_offset) ? 2 : 5;
+ 					EMIT_COND_JMP(t_op, t_offset);
+ 					if (filter[i].jf)
+ 						EMIT_JMP(f_offset);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 847d04e..35ae52d 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -418,6 +418,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+ 	q->backing_dev_info.state = 0;
+ 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+ 	q->backing_dev_info.name = "block";
++	q->node = node_id;
+ 
+ 	err = bdi_init(&q->backing_dev_info);
+ 	if (err) {
+@@ -502,7 +503,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
+ 	if (!uninit_q)
+ 		return NULL;
+ 
+-	q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
++	q = blk_init_allocated_queue(uninit_q, rfn, lock);
+ 	if (!q)
+ 		blk_cleanup_queue(uninit_q);
+ 
+@@ -514,18 +515,9 @@ struct request_queue *
+ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ 			 spinlock_t *lock)
+ {
+-	return blk_init_allocated_queue_node(q, rfn, lock, -1);
+-}
+-EXPORT_SYMBOL(blk_init_allocated_queue);
+-
+-struct request_queue *
+-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
+-			      spinlock_t *lock, int node_id)
+-{
+ 	if (!q)
+ 		return NULL;
+ 
+-	q->node = node_id;
+ 	if (blk_init_free_list(q))
+ 		return NULL;
+ 
+@@ -555,7 +547,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(blk_init_allocated_queue_node);
++EXPORT_SYMBOL(blk_init_allocated_queue);
+ 
+ int blk_get_queue(struct request_queue *q)
+ {
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index ae21919..23500ac 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -3169,7 +3169,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
+ 		}
+ 	}
+ 
+-	if (ret)
++	if (ret && ret != -EEXIST)
+ 		printk(KERN_ERR "cfq: cic link failed!\n");
+ 
+ 	return ret;
+@@ -3185,6 +3185,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ {
+ 	struct io_context *ioc = NULL;
+ 	struct cfq_io_context *cic;
++	int ret;
+ 
+ 	might_sleep_if(gfp_mask & __GFP_WAIT);
+ 
+@@ -3192,6 +3193,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ 	if (!ioc)
+ 		return NULL;
+ 
++retry:
+ 	cic = cfq_cic_lookup(cfqd, ioc);
+ 	if (cic)
+ 		goto out;
+@@ -3200,7 +3202,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ 	if (cic == NULL)
+ 		goto err;
+ 
+-	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
++	ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
++	if (ret == -EEXIST) {
++		/* someone has linked cic to ioc already */
++		cfq_cic_free(cic);
++		goto retry;
++	} else if (ret)
+ 		goto err_free;
+ 
+ out:
+@@ -4015,6 +4022,11 @@ static void *cfq_init_queue(struct request_queue *q)
+ 
+ 	if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
+ 		kfree(cfqg);
++
++		spin_lock(&cic_index_lock);
++		ida_remove(&cic_index_ida, cfqd->cic_index);
++		spin_unlock(&cic_index_lock);
++
+ 		kfree(cfqd);
+ 		return NULL;
+ 	}
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 7eef6e1..ef16443 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1451,6 +1451,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+ 
+ 	diff1 = now - dev_priv->last_time1;
+ 
++	/* Prevent division-by-zero if we are asking too fast.
++	 * Also, we don't get interesting results if we are polling
++	 * faster than once in 10ms, so just return the saved value
++	 * in such cases.
++	 */
++	if (diff1 <= 10)
++		return dev_priv->chipset_power;
++
+ 	count1 = I915_READ(DMIEC);
+ 	count2 = I915_READ(DDREC);
+ 	count3 = I915_READ(CSIEC);
+@@ -1481,6 +1489,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+ 	dev_priv->last_count1 = total_count;
+ 	dev_priv->last_time1 = now;
+ 
++	dev_priv->chipset_power = ret;
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index e0d0e27..335564e 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -702,6 +702,7 @@ typedef struct drm_i915_private {
+ 
+ 	u64 last_count1;
+ 	unsigned long last_time1;
++	unsigned long chipset_power;
+ 	u64 last_count2;
+ 	struct timespec last_time2;
+ 	unsigned long gfx_power;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 21c5aa0..fe052c6 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3257,6 +3257,18 @@ int evergreen_init(struct radeon_device *rdev)
+ 			rdev->accel_working = false;
+ 		}
+ 	}
++
++	/* Don't start up if the MC ucode is missing on BTC parts.
++	 * The default clocks and voltages before the MC ucode
++	 * is loaded are not suffient for advanced operations.
++	 */
++	if (ASIC_IS_DCE5(rdev)) {
++		if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
++			DRM_ERROR("radeon: MC ucode required for NI+.\n");
++			return -EINVAL;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 285acc4..a098edc 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -2568,7 +2568,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+ 
+ 	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ 	rdev->pm.current_clock_mode_index = 0;
+-	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
++	if (rdev->pm.default_power_state_index >= 0)
++		rdev->pm.current_vddc =
++			rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
++	else
++		rdev->pm.current_vddc = 0;
+ }
+ 
+ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index e06e045..6ad728f 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -24,6 +24,7 @@
+  */
+ 
+ #include <linux/module.h>
++#include <linux/delay.h>
+ #include <linux/dmi.h>
+ #include <linux/input/mt.h>
+ #include <linux/serio.h>
+@@ -760,6 +761,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
+ 
+ 	do {
+ 		psmouse_reset(psmouse);
++		if (retry) {
++			/*
++			 * On some boxes, right after resuming, the touchpad
++			 * needs some time to finish initializing (I assume
++			 * it needs time to calibrate) and start responding
++			 * to Synaptics-specific queries, so let's wait a
++			 * bit.
++			 */
++			ssleep(1);
++		}
+ 		error = synaptics_detect(psmouse, 0);
+ 	} while (error && ++retry < 3);
+ 
+diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
+index bdf19ad..e9babcb 100644
+--- a/drivers/media/video/s5p-fimc/fimc-core.c
++++ b/drivers/media/video/s5p-fimc/fimc-core.c
+@@ -36,7 +36,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
+ static struct fimc_fmt fimc_formats[] = {
+ 	{
+ 		.name		= "RGB565",
+-		.fourcc		= V4L2_PIX_FMT_RGB565X,
++		.fourcc		= V4L2_PIX_FMT_RGB565,
+ 		.depth		= { 16 },
+ 		.color		= S5P_FIMC_RGB565,
+ 		.memplanes	= 1,
+diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
+index b8f2a4e..f82413a 100644
+--- a/drivers/mfd/twl-core.c
++++ b/drivers/mfd/twl-core.c
+@@ -362,13 +362,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+ 		pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ 		return -EPERM;
+ 	}
+-	sid = twl_map[mod_no].sid;
+-	twl = &twl_modules[sid];
+-
+ 	if (unlikely(!inuse)) {
+-		pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
++		pr_err("%s: not initialized\n", DRIVER_NAME);
+ 		return -EPERM;
+ 	}
++	sid = twl_map[mod_no].sid;
++	twl = &twl_modules[sid];
++
+ 	mutex_lock(&twl->xfer_lock);
+ 	/*
+ 	 * [MSG1]: fill the register address data
+@@ -419,13 +419,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+ 		pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ 		return -EPERM;
+ 	}
+-	sid = twl_map[mod_no].sid;
+-	twl = &twl_modules[sid];
+-
+ 	if (unlikely(!inuse)) {
+-		pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
++		pr_err("%s: not initialized\n", DRIVER_NAME);
+ 		return -EPERM;
+ 	}
++	sid = twl_map[mod_no].sid;
++	twl = &twl_modules[sid];
++
+ 	mutex_lock(&twl->xfer_lock);
+ 	/* [MSG1] fill the register address data */
+ 	msg = &twl->xfer_msg[0];
+diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
+index 3941ddc..834f824 100644
+--- a/drivers/mfd/twl4030-madc.c
++++ b/drivers/mfd/twl4030-madc.c
+@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
+ 	u8 ch_msb, ch_lsb;
+ 	int ret;
+ 
+-	if (!req)
++	if (!req || !twl4030_madc)
+ 		return -EINVAL;
++
+ 	mutex_lock(&twl4030_madc->lock);
+ 	if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
+ 		ret = -EINVAL;
+@@ -530,13 +531,13 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
+ 	if (ret) {
+ 		dev_err(twl4030_madc->dev,
+ 			"unable to write sel register 0x%X\n", method->sel + 1);
+-		return ret;
++		goto out;
+ 	}
+ 	ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
+ 	if (ret) {
+ 		dev_err(twl4030_madc->dev,
+ 			"unable to write sel register 0x%X\n", method->sel + 1);
+-		return ret;
++		goto out;
+ 	}
+ 	/* Select averaging for all channels if do_avg is set */
+ 	if (req->do_avg) {
+@@ -546,7 +547,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
+ 			dev_err(twl4030_madc->dev,
+ 				"unable to write avg register 0x%X\n",
+ 				method->avg + 1);
+-			return ret;
++			goto out;
+ 		}
+ 		ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ 				       ch_lsb, method->avg);
+@@ -554,7 +555,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
+ 			dev_err(twl4030_madc->dev,
+ 				"unable to write sel reg 0x%X\n",
+ 				method->sel + 1);
+-			return ret;
++			goto out;
+ 		}
+ 	}
+ 	if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
+@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
+ 	if (!madc)
+ 		return -ENOMEM;
+ 
++	madc->dev = &pdev->dev;
++
+ 	/*
+ 	 * Phoenix provides 2 interrupt lines. The first one is connected to
+ 	 * the OMAP. The other one can be connected to the other processor such
+@@ -737,6 +740,28 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
+ 			TWL4030_BCI_BCICTL1);
+ 		goto err_i2c;
+ 	}
++
++	/* Check that MADC clock is on */
++	ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &regval, TWL4030_REG_GPBR1);
++	if (ret) {
++		dev_err(&pdev->dev, "unable to read reg GPBR1 0x%X\n",
++				TWL4030_REG_GPBR1);
++		goto err_i2c;
++	}
++
++	/* If MADC clk is not on, turn it on */
++	if (!(regval & TWL4030_GPBR1_MADC_HFCLK_EN)) {
++		dev_info(&pdev->dev, "clk disabled, enabling\n");
++		regval |= TWL4030_GPBR1_MADC_HFCLK_EN;
++		ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, regval,
++				       TWL4030_REG_GPBR1);
++		if (ret) {
++			dev_err(&pdev->dev, "unable to write reg GPBR1 0x%X\n",
++					TWL4030_REG_GPBR1);
++			goto err_i2c;
++		}
++	}
++
+ 	platform_set_drvdata(pdev, madc);
+ 	mutex_init(&madc->lock);
+ 	ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index fe14072..9394d0b 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -557,7 +557,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
+ 	      unsigned int status)
+ {
+ 	/* First check for errors */
+-	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
++	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
++		      MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ 		u32 remain, success;
+ 
+ 		/* Terminate the DMA transfer */
+@@ -636,8 +637,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
+ 	}
+ 
+ 	if (!cmd->data || cmd->error) {
+-		if (host->data)
++		if (host->data) {
++			/* Terminate the DMA transfer */
++			if (dma_inprogress(host))
++				mmci_dma_data_error(host);
+ 			mmci_stop_data(host);
++		}
+ 		mmci_request_end(host, cmd->mrq);
+ 	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ 		mmci_start_data(host, cmd->data);
+@@ -837,8 +842,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
+ 		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+ 
+ 		data = host->data;
+-		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
+-			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
++		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
++			      MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
++			      MCI_DATABLOCKEND) && data)
+ 			mmci_data_irq(host, data, status);
+ 
+ 		cmd = host->cmd;
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index d4455ff..52f4b64 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
+ static int firmware_rom_wait_states = 0x1C;
+ #endif
+ 
+-module_param(firmware_rom_wait_states, bool, 0644);
++module_param(firmware_rom_wait_states, int, 0644);
+ MODULE_PARM_DESC(firmware_rom_wait_states,
+ 		 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
+ 
+diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
+index 1286fe2..4b3a68b 100644
+--- a/drivers/net/pptp.c
++++ b/drivers/net/pptp.c
+@@ -418,10 +418,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 	lock_sock(sk);
+ 
+ 	opt->src_addr = sp->sa_addr.pptp;
+-	if (add_chan(po)) {
+-		release_sock(sk);
++	if (add_chan(po))
+ 		error = -EBUSY;
+-	}
+ 
+ 	release_sock(sk);
+ 	return error;
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 5362306..a126a3e 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1828,6 +1828,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
+ 	struct ath_softc *sc = hw->priv;
+ 	struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ 
++	if (!(sc->sc_flags & SC_OP_TXAGGR))
++		return;
++
+ 	switch (cmd) {
+ 	case STA_NOTIFY_SLEEP:
+ 		an->sleeping = true;
+diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
+index ba7f36a..ea35843 100644
+--- a/drivers/net/wireless/ath/ath9k/rc.c
++++ b/drivers/net/wireless/ath/ath9k/rc.c
+@@ -1252,7 +1252,9 @@ static void ath_rc_init(struct ath_softc *sc,
+ 
+ 	ath_rc_priv->max_valid_rate = k;
+ 	ath_rc_sort_validrates(rate_table, ath_rc_priv);
+-	ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
++	ath_rc_priv->rate_max_phy = (k > 4) ?
++					ath_rc_priv->valid_rate_index[k-4] :
++					ath_rc_priv->valid_rate_index[k-1];
+ 	ath_rc_priv->rate_table = rate_table;
+ 
+ 	ath_dbg(common, ATH_DBG_CONFIG,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+index b849ad7..39a3c9c 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+@@ -490,8 +490,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+ 			if (ctx->ht.enabled) {
+ 				/* if HT40 is used, it should not change
+ 				 * after associated except channel switch */
+-				if (iwl_is_associated_ctx(ctx) &&
+-				     !ctx->ht.is_40mhz)
++				if (!ctx->ht.is_40mhz ||
++						!iwl_is_associated_ctx(ctx))
+ 					iwlagn_config_ht40(conf, ctx);
+ 			} else
+ 				ctx->ht.is_40mhz = false;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+index 4974cd7..67cd2e3 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+@@ -385,7 +385,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
+ 		tx_cmd->tid_tspec = qc[0] & 0xf;
+ 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ 	} else {
+-		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
++		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
++			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
++		else
++			tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ 	}
+ 
+ 	priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
+@@ -775,10 +778,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+ 	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ 	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+ 
+-	/* Set up entry for this TFD in Tx byte-count array */
+-	if (info->flags & IEEE80211_TX_CTL_AMPDU)
+-		iwlagn_txq_update_byte_cnt_tbl(priv, txq,
+-					       le16_to_cpu(tx_cmd->len));
++	iwlagn_txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len));
+ 
+ 	pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
+ 				       firstlen, PCI_DMA_BIDIRECTIONAL);
+diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
+index 89f6345..84a208d 100644
+--- a/drivers/oprofile/oprofile_files.c
++++ b/drivers/oprofile/oprofile_files.c
+@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
+ 		return -EINVAL;
+ 
+ 	retval = oprofilefs_ulong_from_user(&val, buf, count);
+-	if (retval)
++	if (retval <= 0)
+ 		return retval;
+ 
+ 	retval = oprofile_set_timeout(val);
+@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
+ 		return -EINVAL;
+ 
+ 	retval = oprofilefs_ulong_from_user(&val, buf, count);
+-	if (retval)
++	if (retval <= 0)
+ 		return retval;
+ 
+ 	retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
+@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
+ 		return -EINVAL;
+ 
+ 	retval = oprofilefs_ulong_from_user(&val, buf, count);
+-	if (retval)
++	if (retval <= 0)
+ 		return retval;
+ 
++	retval = 0;
+ 	if (val)
+ 		retval = oprofile_start();
+ 	else
+diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
+index e9ff6f7..1c0b799 100644
+--- a/drivers/oprofile/oprofilefs.c
++++ b/drivers/oprofile/oprofilefs.c
+@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
+ }
+ 
+ 
++/*
++ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
++ * unchanged and might be uninitialized. This follows write syscall
++ * implementation when count is zero: "If count is zero ... [and if]
++ * no errors are detected, 0 will be returned without causing any
++ * other effect." (man 2 write)
++ */
+ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
+ {
+ 	char tmpbuf[TMPBUFSIZE];
+@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
+ 	spin_lock_irqsave(&oprofilefs_lock, flags);
+ 	*val = simple_strtoul(tmpbuf, NULL, 0);
+ 	spin_unlock_irqrestore(&oprofilefs_lock, flags);
+-	return 0;
++	return count;
+ }
+ 
+ 
+@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
+ 		return -EINVAL;
+ 
+ 	retval = oprofilefs_ulong_from_user(&value, buf, count);
+-	if (retval)
++	if (retval <= 0)
+ 		return retval;
+ 
+ 	retval = oprofile_set_ulong(file->private_data, value);
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index bbb6f85..eb4c883 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -318,20 +318,6 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ }
+ EXPORT_SYMBOL_GPL(rtc_read_alarm);
+ 
+-static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+-{
+-	int err;
+-
+-	if (!rtc->ops)
+-		err = -ENODEV;
+-	else if (!rtc->ops->set_alarm)
+-		err = -EINVAL;
+-	else
+-		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+-
+-	return err;
+-}
+-
+ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ {
+ 	struct rtc_time tm;
+@@ -355,7 +341,14 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 	 * over right here, before we set the alarm.
+ 	 */
+ 
+-	return ___rtc_set_alarm(rtc, alarm);
++	if (!rtc->ops)
++		err = -ENODEV;
++	else if (!rtc->ops->set_alarm)
++		err = -EINVAL;
++	else
++		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
++
++	return err;
+ }
+ 
+ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+@@ -769,20 +762,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+ 	return 0;
+ }
+ 
+-static void rtc_alarm_disable(struct rtc_device *rtc)
+-{
+-	struct rtc_wkalrm alarm;
+-	struct rtc_time tm;
+-
+-	__rtc_read_time(rtc, &tm);
+-
+-	alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+-				     ktime_set(300, 0)));
+-	alarm.enabled = 0;
+-
+-	___rtc_set_alarm(rtc, &alarm);
+-}
+-
+ /**
+  * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
+  * @rtc rtc device
+@@ -804,10 +783,8 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+ 		struct rtc_wkalrm alarm;
+ 		int err;
+ 		next = timerqueue_getnext(&rtc->timerqueue);
+-		if (!next) {
+-			rtc_alarm_disable(rtc);
++		if (!next)
+ 			return;
+-		}
+ 		alarm.time = rtc_ktime_to_tm(next->expires);
+ 		alarm.enabled = 1;
+ 		err = __rtc_set_alarm(rtc, &alarm);
+@@ -869,8 +846,7 @@ again:
+ 		err = __rtc_set_alarm(rtc, &alarm);
+ 		if (err == -ETIME)
+ 			goto again;
+-	} else
+-		rtc_alarm_disable(rtc);
++	}
+ 
+ 	mutex_unlock(&rtc->ops_lock);
+ }
+diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
+index eda128f..64aedd8 100644
+--- a/drivers/rtc/rtc-m41t80.c
++++ b/drivers/rtc/rtc-m41t80.c
+@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+ static struct rtc_class_ops m41t80_rtc_ops = {
+ 	.read_time = m41t80_rtc_read_time,
+ 	.set_time = m41t80_rtc_set_time,
++	/*
++	 * XXX - m41t80 alarm functionality is reported broken.
++	 * until it is fixed, don't register alarm functions.
++	 *
+ 	.read_alarm = m41t80_rtc_read_alarm,
+ 	.set_alarm = m41t80_rtc_set_alarm,
++	*/
+ 	.proc = m41t80_rtc_proc,
++	/*
++	 * See above comment on broken alarm
++	 *
+ 	.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
++	*/
+ };
+ 
+ #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 2a4991d..3a417df 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -57,6 +57,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
+ {
+ 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ 
++	/* if previous slave_alloc returned early, there is nothing to do */
++	if (!zfcp_sdev->port)
++		return;
++
+ 	zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
+ 	put_device(&zfcp_sdev->port->dev);
+ }
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index 8885b3e..f829adc 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -1561,6 +1561,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
+ 	stats->InvalidCRCCount++;
+ 	if (stats->InvalidCRCCount < 5)
+ 		printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
++	put_cpu();
+ 	return -EINVAL;
+ }
+ 
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index 83035bd..39e81cd 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -1082,41 +1082,6 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
+ }
+ 
+ /**
+- * _base_save_msix_table - backup msix vector table
+- * @ioc: per adapter object
+- *
+- * This address an errata where diag reset clears out the table
+- */
+-static void
+-_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
+-{
+-	int i;
+-
+-	if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
+-		return;
+-
+-	for (i = 0; i < ioc->msix_vector_count; i++)
+-		ioc->msix_table_backup[i] = ioc->msix_table[i];
+-}
+-
+-/**
+- * _base_restore_msix_table - this restores the msix vector table
+- * @ioc: per adapter object
+- *
+- */
+-static void
+-_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
+-{
+-	int i;
+-
+-	if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
+-		return;
+-
+-	for (i = 0; i < ioc->msix_vector_count; i++)
+-		ioc->msix_table[i] = ioc->msix_table_backup[i];
+-}
+-
+-/**
+  * _base_check_enable_msix - checks MSIX capabable.
+  * @ioc: per adapter object
+  *
+@@ -1128,7 +1093,7 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+ {
+ 	int base;
+ 	u16 message_control;
+-	u32 msix_table_offset;
++
+ 
+ 	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ 	if (!base) {
+@@ -1141,14 +1106,8 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+ 	pci_read_config_word(ioc->pdev, base + 2, &message_control);
+ 	ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+ 
+-	/* get msix table  */
+-	pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
+-	msix_table_offset &= 0xFFFFFFF8;
+-	ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
+-
+ 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
+-	    "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
+-	    ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
++	    "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
+ 	return 0;
+ }
+ 
+@@ -1162,8 +1121,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
+ {
+ 	if (ioc->msix_enable) {
+ 		pci_disable_msix(ioc->pdev);
+-		kfree(ioc->msix_table_backup);
+-		ioc->msix_table_backup = NULL;
+ 		ioc->msix_enable = 0;
+ 	}
+ }
+@@ -1189,14 +1146,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+ 	if (_base_check_enable_msix(ioc) != 0)
+ 		goto try_ioapic;
+ 
+-	ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
+-	    sizeof(u32), GFP_KERNEL);
+-	if (!ioc->msix_table_backup) {
+-		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
+-		    "msix_table_backup failed!!!\n", ioc->name));
+-		goto try_ioapic;
+-	}
+-
+ 	memset(&entries, 0, sizeof(struct msix_entry));
+ 	r = pci_enable_msix(ioc->pdev, &entries, 1);
+ 	if (r) {
+@@ -3513,9 +3462,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ 	u32 hcb_size;
+ 
+ 	printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
+-
+-	_base_save_msix_table(ioc);
+-
+ 	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
+ 	    ioc->name));
+ 
+@@ -3611,7 +3557,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ 		goto out;
+ 	}
+ 
+-	_base_restore_msix_table(ioc);
+ 	printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
+ 	return 0;
+ 
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
+index 41a57a7..e1735f9 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
+@@ -626,8 +626,6 @@ struct mpt2sas_port_facts {
+  * @wait_for_port_enable_to_complete:
+  * @msix_enable: flag indicating msix is enabled
+  * @msix_vector_count: number msix vectors
+- * @msix_table: virt address to the msix table
+- * @msix_table_backup: backup msix table
+  * @scsi_io_cb_idx: shost generated commands
+  * @tm_cb_idx: task management commands
+  * @scsih_cb_idx: scsih internal commands
+@@ -768,8 +766,6 @@ struct MPT2SAS_ADAPTER {
+ 
+ 	u8		msix_enable;
+ 	u16		msix_vector_count;
+-	u32		*msix_table;
+-	u32		*msix_table_backup;
+ 	u32		ioc_reset_count;
+ 
+ 	/* internal commands, callback index */
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 5690f09..f88e52a 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -4145,7 +4145,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+ 	/* insert into event log */
+ 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
+-	event_reply = kzalloc(sz, GFP_KERNEL);
++	event_reply = kzalloc(sz, GFP_ATOMIC);
+ 	if (!event_reply) {
+ 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ 		    ioc->name, __FILE__, __LINE__, __func__);
+diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
+index d6620ad..c828151 100644
+--- a/drivers/ssb/driver_pcicore.c
++++ b/drivers/ssb/driver_pcicore.c
+@@ -516,10 +516,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
+ 
+ static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
+ {
+-	ssb_pcicore_fix_sprom_core_index(pc);
++	struct ssb_device *pdev = pc->dev;
++	struct ssb_bus *bus = pdev->bus;
++
++	if (bus->bustype == SSB_BUSTYPE_PCI)
++		ssb_pcicore_fix_sprom_core_index(pc);
+ 
+ 	/* Disable PCI interrupts. */
+-	ssb_write32(pc->dev, SSB_INTVEC, 0);
++	ssb_write32(pdev, SSB_INTVEC, 0);
+ 
+ 	/* Additional PCIe always once-executed workarounds */
+ 	if (pc->dev->id.coreid == SSB_DEV_PCIE) {
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index 8cb2685..9cb60df 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -216,6 +216,7 @@ static int __devinit cru_detect(unsigned long map_entry,
+ 
+ 	cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+ 
++	set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
+ 	asminline_call(&cmn_regs, bios32_entrypoint);
+ 
+ 	if (cmn_regs.u1.ral != 0) {
+@@ -233,8 +234,10 @@ static int __devinit cru_detect(unsigned long map_entry,
+ 		if ((physical_bios_base + physical_bios_offset)) {
+ 			cru_rom_addr =
+ 				ioremap(cru_physical_address, cru_length);
+-			if (cru_rom_addr)
++			if (cru_rom_addr) {
++				set_memory_x((unsigned long)cru_rom_addr, cru_length);
+ 				retval = 0;
++			}
+ 		}
+ 
+ 		printk(KERN_DEBUG "hpwdt: CRU Base Address:   0x%lx\n",
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 84f317e..fd60dff 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -162,7 +162,7 @@ void __init xen_swiotlb_init(int verbose)
+ 	/*
+ 	 * Get IO TLB memory from any location.
+ 	 */
+-	xen_io_tlb_start = alloc_bootmem(bytes);
++	xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
+ 	if (!xen_io_tlb_start)
+ 		panic("Cannot allocate SWIOTLB buffer");
+ 
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index e97dd21..87822a3 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1519,16 +1519,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
+ {
+ 	if (!flags)
+ 		return;
+-	else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
++	if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+ 		nfs41_handle_server_reboot(clp);
+-	else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
++	if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+ 			    SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
+ 			    SEQ4_STATUS_ADMIN_STATE_REVOKED |
+ 			    SEQ4_STATUS_LEASE_MOVED))
+ 		nfs41_handle_state_revoked(clp);
+-	else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
++	if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+ 		nfs41_handle_recallable_state_revoked(clp);
+-	else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
++	if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ 			    SEQ4_STATUS_BACKCHANNEL_FAULT |
+ 			    SEQ4_STATUS_CB_PATH_DOWN_SESSION))
+ 		nfs41_handle_cb_path_down(clp);
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index 41d6743..3e65427 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -842,6 +842,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	case FS_IOC32_GETVERSION:
+ 		cmd = FS_IOC_GETVERSION;
+ 		break;
++	case NILFS_IOCTL_CHANGE_CPMODE:
++	case NILFS_IOCTL_DELETE_CHECKPOINT:
++	case NILFS_IOCTL_GET_CPINFO:
++	case NILFS_IOCTL_GET_CPSTAT:
++	case NILFS_IOCTL_GET_SUINFO:
++	case NILFS_IOCTL_GET_SUSTAT:
++	case NILFS_IOCTL_GET_VINFO:
++	case NILFS_IOCTL_GET_BDESCS:
++	case NILFS_IOCTL_CLEAN_SEGMENTS:
++	case NILFS_IOCTL_SYNC:
++	case NILFS_IOCTL_RESIZE:
++	case NILFS_IOCTL_SET_ALLOC_RANGE:
++		break;
+ 	default:
+ 		return -ENOIOCTLCMD;
+ 	}
+diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
+index 28de70b..e6ac98c 100644
+--- a/fs/xfs/linux-2.6/xfs_super.c
++++ b/fs/xfs/linux-2.6/xfs_super.c
+@@ -871,27 +871,6 @@ xfs_fs_dirty_inode(
+ }
+ 
+ STATIC int
+-xfs_log_inode(
+-	struct xfs_inode	*ip)
+-{
+-	struct xfs_mount	*mp = ip->i_mount;
+-	struct xfs_trans	*tp;
+-	int			error;
+-
+-	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+-	error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+-	if (error) {
+-		xfs_trans_cancel(tp, 0);
+-		return error;
+-	}
+-
+-	xfs_ilock(ip, XFS_ILOCK_EXCL);
+-	xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
+-	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+-	return xfs_trans_commit(tp, 0);
+-}
+-
+-STATIC int
+ xfs_fs_write_inode(
+ 	struct inode		*inode,
+ 	struct writeback_control *wbc)
+@@ -904,10 +883,8 @@ xfs_fs_write_inode(
+ 
+ 	if (XFS_FORCED_SHUTDOWN(mp))
+ 		return -XFS_ERROR(EIO);
+-	if (!ip->i_update_core)
+-		return 0;
+ 
+-	if (wbc->sync_mode == WB_SYNC_ALL) {
++	if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
+ 		/*
+ 		 * Make sure the inode has made it it into the log.  Instead
+ 		 * of forcing it all the way to stable storage using a
+@@ -916,11 +893,14 @@ xfs_fs_write_inode(
+ 		 * of synchronous log foces dramatically.
+ 		 */
+ 		xfs_ioend_wait(ip);
+-		error = xfs_log_inode(ip);
++		error = xfs_log_dirty_inode(ip, NULL, 0);
+ 		if (error)
+ 			goto out;
+ 		return 0;
+ 	} else {
++		if (!ip->i_update_core)
++			return 0;
++
+ 		/*
+ 		 * We make this non-blocking if the inode is contended, return
+ 		 * EAGAIN to indicate to the caller that they did not succeed.
+diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
+index b69688d..2f277a0 100644
+--- a/fs/xfs/linux-2.6/xfs_sync.c
++++ b/fs/xfs/linux-2.6/xfs_sync.c
+@@ -336,6 +336,32 @@ xfs_sync_fsdata(
+ 	return xfs_bwrite(mp, bp);
+ }
+ 
++int
++xfs_log_dirty_inode(
++	struct xfs_inode	*ip,
++	struct xfs_perag	*pag,
++	int			flags)
++{
++	struct xfs_mount	*mp = ip->i_mount;
++	struct xfs_trans	*tp;
++	int			error;
++
++	if (!ip->i_update_core)
++		return 0;
++
++	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
++	error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
++	if (error) {
++		xfs_trans_cancel(tp, 0);
++		return error;
++	}
++
++	xfs_ilock(ip, XFS_ILOCK_EXCL);
++	xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
++	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
++	return xfs_trans_commit(tp, 0);
++}
++
+ /*
+  * When remounting a filesystem read-only or freezing the filesystem, we have
+  * two phases to execute. This first phase is syncing the data before we
+@@ -365,6 +391,17 @@ xfs_quiesce_data(
+ 
+ 	/* push and block till complete */
+ 	xfs_sync_data(mp, SYNC_WAIT);
++
++	/*
++	 * Log all pending size and timestamp updates.  The vfs writeback
++	 * code is supposed to do this, but due to its overagressive
++	 * livelock detection it will skip inodes where appending writes
++	 * were written out in the first non-blocking sync phase if their
++	 * completion took long enough that it happened after taking the
++	 * timestamp for the cut-off in the blocking phase.
++	 */
++	xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
++
+ 	xfs_qm_sync(mp, SYNC_WAIT);
+ 
+ 	/* write superblock and hoover up shutdown errors */
+diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
+index e3a6ad2..ef5b2ce 100644
+--- a/fs/xfs/linux-2.6/xfs_sync.h
++++ b/fs/xfs/linux-2.6/xfs_sync.h
+@@ -42,6 +42,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
+ 
+ void xfs_flush_inodes(struct xfs_inode *ip);
+ 
++int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
++
+ int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
+ 
+ void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 1a23722..cd93f99 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -798,9 +798,6 @@ extern void blk_unprep_request(struct request *);
+  */
+ extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
+ 					spinlock_t *lock, int node_id);
+-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
+-							   request_fn_proc *,
+-							   spinlock_t *, int node_id);
+ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
+ 						      request_fn_proc *, spinlock_t *);
+diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
+index 6427d29..530e11b 100644
+--- a/include/linux/i2c/twl4030-madc.h
++++ b/include/linux/i2c/twl4030-madc.h
+@@ -129,6 +129,10 @@ enum sample_type {
+ #define REG_BCICTL2             0x024
+ #define TWL4030_BCI_ITHSENS	0x007
+ 
++/* Register and bits for GPBR1 register */
++#define TWL4030_REG_GPBR1		0x0c
++#define TWL4030_GPBR1_MADC_HFCLK_EN	(1 << 7)
++
+ struct twl4030_madc_user_parms {
+ 	int channel;
+ 	int average;
+diff --git a/include/linux/lglock.h b/include/linux/lglock.h
+index f549056..87f402c 100644
+--- a/include/linux/lglock.h
++++ b/include/linux/lglock.h
+@@ -22,6 +22,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/lockdep.h>
+ #include <linux/percpu.h>
++#include <linux/cpu.h>
+ 
+ /* can make br locks by using local lock for read side, global lock for write */
+ #define br_lock_init(name)	name##_lock_init()
+@@ -72,9 +73,31 @@
+ 
+ #define DEFINE_LGLOCK(name)						\
+ 									\
++ DEFINE_SPINLOCK(name##_cpu_lock);					\
++ cpumask_t name##_cpus __read_mostly;					\
+  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);				\
+  DEFINE_LGLOCK_LOCKDEP(name);						\
+ 									\
++ static int								\
++ name##_lg_cpu_callback(struct notifier_block *nb,			\
++				unsigned long action, void *hcpu)	\
++ {									\
++	switch (action & ~CPU_TASKS_FROZEN) {				\
++	case CPU_UP_PREPARE:						\
++		spin_lock(&name##_cpu_lock);				\
++		cpu_set((unsigned long)hcpu, name##_cpus);		\
++		spin_unlock(&name##_cpu_lock);				\
++		break;							\
++	case CPU_UP_CANCELED: case CPU_DEAD:				\
++		spin_lock(&name##_cpu_lock);				\
++		cpu_clear((unsigned long)hcpu, name##_cpus);		\
++		spin_unlock(&name##_cpu_lock);				\
++	}								\
++	return NOTIFY_OK;						\
++ }									\
++ static struct notifier_block name##_lg_cpu_notifier = {		\
++	.notifier_call = name##_lg_cpu_callback,			\
++ };									\
+  void name##_lock_init(void) {						\
+ 	int i;								\
+ 	LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+@@ -83,6 +106,11 @@
+ 		lock = &per_cpu(name##_lock, i);			\
+ 		*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;	\
+ 	}								\
++	register_hotcpu_notifier(&name##_lg_cpu_notifier);		\
++	get_online_cpus();						\
++	for_each_online_cpu(i)						\
++		cpu_set(i, name##_cpus);				\
++	put_online_cpus();						\
+  }									\
+  EXPORT_SYMBOL(name##_lock_init);					\
+ 									\
+@@ -124,9 +152,9 @@
+ 									\
+  void name##_global_lock_online(void) {					\
+ 	int i;								\
+-	preempt_disable();						\
++	spin_lock(&name##_cpu_lock);					\
+ 	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
+-	for_each_online_cpu(i) {					\
++	for_each_cpu(i, &name##_cpus) {					\
+ 		arch_spinlock_t *lock;					\
+ 		lock = &per_cpu(name##_lock, i);			\
+ 		arch_spin_lock(lock);					\
+@@ -137,12 +165,12 @@
+  void name##_global_unlock_online(void) {				\
+ 	int i;								\
+ 	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
+-	for_each_online_cpu(i) {					\
++	for_each_cpu(i, &name##_cpus) {					\
+ 		arch_spinlock_t *lock;					\
+ 		lock = &per_cpu(name##_lock, i);			\
+ 		arch_spin_unlock(lock);					\
+ 	}								\
+-	preempt_enable();						\
++	spin_unlock(&name##_cpu_lock);					\
+  }									\
+  EXPORT_SYMBOL(name##_global_unlock_online);				\
+ 									\
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 7df327a..c388421 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -236,6 +236,9 @@ extern struct sctp_globals {
+ 	 * bits is an indicator of when to send and window update SACK.
+ 	 */
+ 	int rwnd_update_shift;
++
++	/* Threshold for autoclose timeout, in seconds. */
++	unsigned long max_autoclose;
+ } sctp_globals;
+ 
+ #define sctp_rto_initial		(sctp_globals.rto_initial)
+@@ -271,6 +274,7 @@ extern struct sctp_globals {
+ #define sctp_auth_enable		(sctp_globals.auth_enable)
+ #define sctp_checksum_disable		(sctp_globals.checksum_disable)
+ #define sctp_rwnd_upd_shift		(sctp_globals.rwnd_update_shift)
++#define sctp_max_autoclose		(sctp_globals.max_autoclose)
+ 
+ /* SCTP Socket type: UDP or TCP style. */
+ typedef enum {
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 2731d11..575a5e7 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2095,11 +2095,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
+ 			continue;
+ 		/* get old css_set pointer */
+ 		task_lock(tsk);
+-		if (tsk->flags & PF_EXITING) {
+-			/* ignore this task if it's going away */
+-			task_unlock(tsk);
+-			continue;
+-		}
+ 		oldcg = tsk->cgroups;
+ 		get_css_set(oldcg);
+ 		task_unlock(tsk);
+diff --git a/kernel/exit.c b/kernel/exit.c
+index f2b321b..303bed2 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1553,8 +1553,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
+ 	}
+ 
+ 	/* dead body doesn't have much to contribute */
+-	if (p->exit_state == EXIT_DEAD)
++	if (unlikely(p->exit_state == EXIT_DEAD)) {
++		/*
++		 * But do not ignore this task until the tracer does
++		 * wait_task_zombie()->do_notify_parent().
++		 */
++		if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
++			wo->notask_error = 0;
+ 		return 0;
++	}
+ 
+ 	/* slay zombie? */
+ 	if (p->exit_state == EXIT_ZOMBIE) {
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 8b6da25..6487e4c 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -314,17 +314,29 @@ again:
+ #endif
+ 
+ 	lock_page(page_head);
++
++	/*
++	 * If page_head->mapping is NULL, then it cannot be a PageAnon
++	 * page; but it might be the ZERO_PAGE or in the gate area or
++	 * in a special mapping (all cases which we are happy to fail);
++	 * or it may have been a good file page when get_user_pages_fast
++	 * found it, but truncated or holepunched or subjected to
++	 * invalidate_complete_page2 before we got the page lock (also
++	 * cases which we are happy to fail).  And we hold a reference,
++	 * so refcount care in invalidate_complete_page's remove_mapping
++	 * prevents drop_caches from setting mapping to NULL beneath us.
++	 *
++	 * The case we do have to guard against is when memory pressure made
++	 * shmem_writepage move it from filecache to swapcache beneath us:
++	 * an unlikely race, but we do need to retry for page_head->mapping.
++	 */
+ 	if (!page_head->mapping) {
++		int shmem_swizzled = PageSwapCache(page_head);
+ 		unlock_page(page_head);
+ 		put_page(page_head);
+-		/*
+-		* ZERO_PAGE pages don't have a mapping. Avoid a busy loop
+-		* trying to find one. RW mapping would have COW'd (and thus
+-		* have a mapping) so this page is RO and won't ever change.
+-		*/
+-		if ((page_head == ZERO_PAGE(address)))
+-			return -EFAULT;
+-		goto again;
++		if (shmem_swizzled)
++			goto again;
++		return -EFAULT;
+ 	}
+ 
+ 	/*
+diff --git a/kernel/hung_task.c b/kernel/hung_task.c
+index ea64012..e972276 100644
+--- a/kernel/hung_task.c
++++ b/kernel/hung_task.c
+@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
+ 
+ 	/*
+ 	 * Ensure the task is not frozen.
+-	 * Also, when a freshly created task is scheduled once, changes
+-	 * its state to TASK_UNINTERRUPTIBLE without having ever been
+-	 * switched out once, it musn't be checked.
++	 * Also, skip vfork and any other user process that freezer should skip.
+ 	 */
+-	if (unlikely(t->flags & PF_FROZEN || !switch_count))
++	if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
++	    return;
++
++	/*
++	 * When a freshly created task is scheduled once, changes its state to
++	 * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
++	 * musn't be checked.
++	 */
++	if (unlikely(!switch_count))
+ 		return;
+ 
+ 	if (switch_count != t->last_switch_count) {
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index 3b8e028..e055e8b 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
+ 
+ 	fput(file);
+ out_putname:
+-	putname(pathname);
++	__putname(pathname);
+ out:
+ 	return result;
+ }
+diff --git a/mm/filemap.c b/mm/filemap.c
+index a8251a8..dd828ea 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1807,7 +1807,7 @@ repeat:
+ 		page = __page_cache_alloc(gfp | __GFP_COLD);
+ 		if (!page)
+ 			return ERR_PTR(-ENOMEM);
+-		err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
++		err = add_to_page_cache_lru(page, mapping, index, gfp);
+ 		if (unlikely(err)) {
+ 			page_cache_release(page);
+ 			if (err == -EEXIST)
+@@ -1904,10 +1904,7 @@ static struct page *wait_on_page_read(struct page *page)
+  * @gfp:	the page allocator flags to use if allocating
+  *
+  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
+- * any new page allocations done using the specified allocation flags. Note
+- * that the Radix tree operations will still use GFP_KERNEL, so you can't
+- * expect to do this atomically or anything like that - but you can pass in
+- * other page requirements.
++ * any new page allocations done using the specified allocation flags.
+  *
+  * If the page does not get brought uptodate, return -EIO.
+  */
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 80936a1..f9c5849 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -901,7 +901,6 @@ retry:
+ 	h->resv_huge_pages += delta;
+ 	ret = 0;
+ 
+-	spin_unlock(&hugetlb_lock);
+ 	/* Free the needed pages to the hugetlb pool */
+ 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ 		if ((--needed) < 0)
+@@ -915,6 +914,7 @@ retry:
+ 		VM_BUG_ON(page_count(page));
+ 		enqueue_huge_page(h, page);
+ 	}
++	spin_unlock(&hugetlb_lock);
+ 
+ 	/* Free unnecessary surplus pages to the buddy allocator */
+ free:
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 59ac5d6..d99217b 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4963,9 +4963,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
+ 		int cpu;
+ 		enable_swap_cgroup();
+ 		parent = NULL;
+-		root_mem_cgroup = mem;
+ 		if (mem_cgroup_soft_limit_tree_init())
+ 			goto free_out;
++		root_mem_cgroup = mem;
+ 		for_each_possible_cpu(cpu) {
+ 			struct memcg_stock_pcp *stock =
+ 						&per_cpu(memcg_stock, cpu);
+@@ -5004,7 +5004,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
+ 	return &mem->css;
+ free_out:
+ 	__mem_cgroup_free(mem);
+-	root_mem_cgroup = NULL;
+ 	return ERR_PTR(error);
+ }
+ 
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 8093fc7..7c72487 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p,
+ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
+ 		      const nodemask_t *nodemask, unsigned long totalpages)
+ {
+-	int points;
++	long points;
+ 
+ 	if (oom_unkillable_task(p, mem, nodemask))
+ 		return 0;
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 93b5a7c..0ae7a09 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1011,9 +1011,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
+ 		if (!is_vmalloc_addr(addr))
+ 			return __pa(addr);
+ 		else
+-			return page_to_phys(vmalloc_to_page(addr));
++			return page_to_phys(vmalloc_to_page(addr)) +
++			       offset_in_page(addr);
+ 	} else
+-		return page_to_phys(pcpu_addr_to_page(addr));
++		return page_to_phys(pcpu_addr_to_page(addr)) +
++		       offset_in_page(addr);
+ }
+ 
+ /**
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 4155abc..7d7fb20 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
+ 			     void __user *buffer,
+ 			     size_t *lenp, loff_t *ppos)
+ {
++	int old_value = *(int *)ctl->data;
+ 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
++	int new_value = *(int *)ctl->data;
+ 
+ 	if (write) {
+ 		struct ipv4_devconf *cnf = ctl->extra1;
+@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
+ 
+ 		if (cnf == net->ipv4.devconf_dflt)
+ 			devinet_copy_dflt_conf(net, i);
++		if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
++			if ((new_value == 0) && (old_value != 0))
++				rt_cache_flush(net, 0);
+ 	}
+ 
+ 	return ret;
+diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
+index ab7e554..7fbcaba 100644
+--- a/net/ipv4/ipconfig.c
++++ b/net/ipv4/ipconfig.c
+@@ -252,6 +252,10 @@ static int __init ic_open_devs(void)
+ 		}
+ 	}
+ 
++	/* no point in waiting if we could not bring up at least one device */
++	if (!ic_first_dev)
++		goto have_carrier;
++
+ 	/* wait for a carrier on at least one device */
+ 	start = jiffies;
+ 	while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index 378b20b..6f06f7f 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
+ 	if (register_netdevice(dev) < 0)
+ 		goto failed_free;
+ 
++	strcpy(nt->parms.name, dev->name);
++
+ 	dev_hold(dev);
+ 	ipip_tunnel_link(ipn, nt);
+ 	return nt;
+@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
+ 	struct ip_tunnel *tunnel = netdev_priv(dev);
+ 
+ 	tunnel->dev = dev;
+-	strcpy(tunnel->parms.name, dev->name);
+ 
+ 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
+ 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
+@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
+ static int __net_init ipip_init_net(struct net *net)
+ {
+ 	struct ipip_net *ipn = net_generic(net, ipip_net_id);
++	struct ip_tunnel *t;
+ 	int err;
+ 
+ 	ipn->tunnels[0] = ipn->tunnels_wc;
+@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
+ 	if ((err = register_netdev(ipn->fb_tunnel_dev)))
+ 		goto err_reg_dev;
+ 
++	t = netdev_priv(ipn->fb_tunnel_dev);
++
++	strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
+ 	return 0;
+ 
+ err_reg_dev:
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 75ef66f..4845bfe 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -91,6 +91,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/times.h>
+ #include <linux/slab.h>
++#include <linux/prefetch.h>
+ #include <net/dst.h>
+ #include <net/net_namespace.h>
+ #include <net/protocol.h>
+@@ -132,6 +133,9 @@ static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
+ static int ip_rt_min_advmss __read_mostly	= 256;
+ static int rt_chain_length_max __read_mostly	= 20;
+ 
++static struct delayed_work expires_work;
++static unsigned long expires_ljiffies;
++
+ /*
+  *	Interface to generic destination cache.
+  */
+@@ -821,6 +825,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
+ 	return ONE;
+ }
+ 
++static void rt_check_expire(void)
++{
++	static unsigned int rover;
++	unsigned int i = rover, goal;
++	struct rtable *rth;
++	struct rtable __rcu **rthp;
++	unsigned long samples = 0;
++	unsigned long sum = 0, sum2 = 0;
++	unsigned long delta;
++	u64 mult;
++
++	delta = jiffies - expires_ljiffies;
++	expires_ljiffies = jiffies;
++	mult = ((u64)delta) << rt_hash_log;
++	if (ip_rt_gc_timeout > 1)
++		do_div(mult, ip_rt_gc_timeout);
++	goal = (unsigned int)mult;
++	if (goal > rt_hash_mask)
++		goal = rt_hash_mask + 1;
++	for (; goal > 0; goal--) {
++		unsigned long tmo = ip_rt_gc_timeout;
++		unsigned long length;
++
++		i = (i + 1) & rt_hash_mask;
++		rthp = &rt_hash_table[i].chain;
++
++		if (need_resched())
++			cond_resched();
++
++		samples++;
++
++		if (rcu_dereference_raw(*rthp) == NULL)
++			continue;
++		length = 0;
++		spin_lock_bh(rt_hash_lock_addr(i));
++		while ((rth = rcu_dereference_protected(*rthp,
++					lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
++			prefetch(rth->dst.rt_next);
++			if (rt_is_expired(rth)) {
++				*rthp = rth->dst.rt_next;
++				rt_free(rth);
++				continue;
++			}
++			if (rth->dst.expires) {
++				/* Entry is expired even if it is in use */
++				if (time_before_eq(jiffies, rth->dst.expires)) {
++nofree:
++					tmo >>= 1;
++					rthp = &rth->dst.rt_next;
++					/*
++					 * We only count entries on
++					 * a chain with equal hash inputs once
++					 * so that entries for different QOS
++					 * levels, and other non-hash input
++					 * attributes don't unfairly skew
++					 * the length computation
++					 */
++					length += has_noalias(rt_hash_table[i].chain, rth);
++					continue;
++				}
++			} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
++				goto nofree;
++
++			/* Cleanup aged off entries. */
++			*rthp = rth->dst.rt_next;
++			rt_free(rth);
++		}
++		spin_unlock_bh(rt_hash_lock_addr(i));
++		sum += length;
++		sum2 += length*length;
++	}
++	if (samples) {
++		unsigned long avg = sum / samples;
++		unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
++		rt_chain_length_max = max_t(unsigned long,
++					ip_rt_gc_elasticity,
++					(avg + 4*sd) >> FRACT_BITS);
++	}
++	rover = i;
++}
++
++/*
++ * rt_worker_func() is run in process context.
++ * we call rt_check_expire() to scan part of the hash table
++ */
++static void rt_worker_func(struct work_struct *work)
++{
++	rt_check_expire();
++	schedule_delayed_work(&expires_work, ip_rt_gc_interval);
++}
++
+ /*
+  * Perturbation of rt_genid by a small quantity [1..256]
+  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
+@@ -3088,6 +3183,13 @@ static ctl_table ipv4_route_table[] = {
+ 		.proc_handler	= proc_dointvec_jiffies,
+ 	},
+ 	{
++		.procname	= "gc_interval",
++		.data		= &ip_rt_gc_interval,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_jiffies,
++	},
++	{
+ 		.procname	= "redirect_load",
+ 		.data		= &ip_rt_redirect_load,
+ 		.maxlen		= sizeof(int),
+@@ -3297,6 +3399,11 @@ int __init ip_rt_init(void)
+ 	devinet_init();
+ 	ip_fib_init();
+ 
++	INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
++	expires_ljiffies = jiffies;
++	schedule_delayed_work(&expires_work,
++		net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
++
+ 	if (ip_rt_proc_init())
+ 		printk(KERN_ERR "Unable to create route proc files\n");
+ #ifdef CONFIG_XFRM
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 1cca576..38490d5 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
+ 	if (register_netdevice(dev) < 0)
+ 		goto failed_free;
+ 
++	strcpy(nt->parms.name, dev->name);
++
+ 	dev_hold(dev);
+ 
+ 	ipip6_tunnel_link(sitn, nt);
+@@ -1141,7 +1143,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ 	struct ip_tunnel *tunnel = netdev_priv(dev);
+ 
+ 	tunnel->dev = dev;
+-	strcpy(tunnel->parms.name, dev->name);
+ 
+ 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
+ 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
+@@ -1204,6 +1205,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
+ static int __net_init sit_init_net(struct net *net)
+ {
+ 	struct sit_net *sitn = net_generic(net, sit_net_id);
++	struct ip_tunnel *t;
+ 	int err;
+ 
+ 	sitn->tunnels[0] = sitn->tunnels_wc;
+@@ -1228,6 +1230,9 @@ static int __net_init sit_init_net(struct net *net)
+ 	if ((err = register_netdev(sitn->fb_tunnel_dev)))
+ 		goto err_reg_dev;
+ 
++	t = netdev_priv(sitn->fb_tunnel_dev);
++
++	strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
+ 	return 0;
+ 
+ err_reg_dev:
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index dfd3a64..a18e6c3 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		copied += used;
+ 		len -= used;
+ 
++		/* For non stream protcols we get one packet per recvmsg call */
++		if (sk->sk_type != SOCK_STREAM)
++			goto copy_uaddr;
++
+ 		if (!(flags & MSG_PEEK)) {
+ 			sk_eat_skb(sk, skb, 0);
+ 			*seq = 0;
+ 		}
+ 
+-		/* For non stream protcols we get one packet per recvmsg call */
+-		if (sk->sk_type != SOCK_STREAM)
+-			goto copy_uaddr;
+-
+ 		/* Partial read */
+ 		if (used + offset < skb->len)
+ 			continue;
+@@ -857,6 +857,12 @@ copy_uaddr:
+ 	}
+ 	if (llc_sk(sk)->cmsg_flags)
+ 		llc_cmsg_rcv(msg, skb);
++
++	if (!(flags & MSG_PEEK)) {
++			sk_eat_skb(sk, skb, 0);
++			*seq = 0;
++	}
++
+ 	goto out;
+ }
+ 
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
+index db7db43..b7f4f5c 100644
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -304,6 +304,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
+ 	__release(agg_queue);
+ }
+ 
++/*
++ * splice packets from the STA's pending to the local pending,
++ * requires a call to ieee80211_agg_splice_finish later
++ */
++static void __acquires(agg_queue)
++ieee80211_agg_splice_packets(struct ieee80211_local *local,
++			     struct tid_ampdu_tx *tid_tx, u16 tid)
++{
++	int queue = ieee80211_ac_from_tid(tid);
++	unsigned long flags;
++
++	ieee80211_stop_queue_agg(local, tid);
++
++	if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
++			  " from the pending queue\n", tid))
++		return;
++
++	if (!skb_queue_empty(&tid_tx->pending)) {
++		spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
++		/* copy over remaining packets */
++		skb_queue_splice_tail_init(&tid_tx->pending,
++					   &local->pending[queue]);
++		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
++	}
++}
++
++static void __releases(agg_queue)
++ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
++{
++	ieee80211_wake_queue_agg(local, tid);
++}
++
+ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ {
+ 	struct tid_ampdu_tx *tid_tx;
+@@ -315,19 +347,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+ 
+ 	/*
+-	 * While we're asking the driver about the aggregation,
+-	 * stop the AC queue so that we don't have to worry
+-	 * about frames that came in while we were doing that,
+-	 * which would require us to put them to the AC pending
+-	 * afterwards which just makes the code more complex.
++	 * Start queuing up packets for this aggregation session.
++	 * We're going to release them once the driver is OK with
++	 * that.
+ 	 */
+-	ieee80211_stop_queue_agg(local, tid);
+-
+ 	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+ 
+ 	/*
+-	 * make sure no packets are being processed to get
+-	 * valid starting sequence number
++	 * Make sure no packets are being processed. This ensures that
++	 * we have a valid starting sequence number and that in-flight
++	 * packets have been flushed out and no packets for this TID
++	 * will go into the driver during the ampdu_action call.
+ 	 */
+ 	synchronize_net();
+ 
+@@ -341,17 +371,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ 					" tid %d\n", tid);
+ #endif
+ 		spin_lock_bh(&sta->lock);
++		ieee80211_agg_splice_packets(local, tid_tx, tid);
+ 		ieee80211_assign_tid_tx(sta, tid, NULL);
++		ieee80211_agg_splice_finish(local, tid);
+ 		spin_unlock_bh(&sta->lock);
+ 
+-		ieee80211_wake_queue_agg(local, tid);
+ 		kfree_rcu(tid_tx, rcu_head);
+ 		return;
+ 	}
+ 
+-	/* we can take packets again now */
+-	ieee80211_wake_queue_agg(local, tid);
+-
+ 	/* activate the timer for the recipient's addBA response */
+ 	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
+ #ifdef CONFIG_MAC80211_HT_DEBUG
+@@ -471,38 +499,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
+ }
+ EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
+ 
+-/*
+- * splice packets from the STA's pending to the local pending,
+- * requires a call to ieee80211_agg_splice_finish later
+- */
+-static void __acquires(agg_queue)
+-ieee80211_agg_splice_packets(struct ieee80211_local *local,
+-			     struct tid_ampdu_tx *tid_tx, u16 tid)
+-{
+-	int queue = ieee80211_ac_from_tid(tid);
+-	unsigned long flags;
+-
+-	ieee80211_stop_queue_agg(local, tid);
+-
+-	if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+-			  " from the pending queue\n", tid))
+-		return;
+-
+-	if (!skb_queue_empty(&tid_tx->pending)) {
+-		spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+-		/* copy over remaining packets */
+-		skb_queue_splice_tail_init(&tid_tx->pending,
+-					   &local->pending[queue]);
+-		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+-	}
+-}
+-
+-static void __releases(agg_queue)
+-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+-{
+-	ieee80211_wake_queue_agg(local, tid);
+-}
+-
+ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
+ 					 struct sta_info *sta, u16 tid)
+ {
+diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
+index b9493a0..6cd8ddf 100644
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
+ 	struct gred_sched_data *q;
+ 
+ 	if (table->tab[dp] == NULL) {
+-		table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
++		table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
+ 		if (table->tab[dp] == NULL)
+ 			return -ENOMEM;
+ 	}
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index ea17cbe..59b26b8 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -106,7 +106,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+ 	if (!netif_is_multiqueue(dev))
+ 		return -EOPNOTSUPP;
+ 
+-	if (nla_len(opt) < sizeof(*qopt))
++	if (!opt || nla_len(opt) < sizeof(*qopt))
+ 		return -EINVAL;
+ 
+ 	qopt = nla_data(opt);
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 4a62888..17a6e65 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
+ 	asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
+ 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
+ 	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
+-		(unsigned long)sp->autoclose * HZ;
++		min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
+ 
+ 	/* Initializes the timers */
+ 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 08b3cea..817174e 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
+ 	/* Keep track of how many bytes are in flight to the receiver. */
+ 	asoc->outqueue.outstanding_bytes += datasize;
+ 
+-	/* Update our view of the receiver's rwnd. Include sk_buff overhead
+-	 * while updating peer.rwnd so that it reduces the chances of a
+-	 * receiver running out of receive buffer space even when receive
+-	 * window is still open. This can happen when a sender is sending
+-	 * sending small messages.
+-	 */
+-	datasize += sizeof(struct sk_buff);
++	/* Update our view of the receiver's rwnd. */
+ 	if (datasize < rwnd)
+ 		rwnd -= datasize;
+ 	else
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index d036821..1f2938f 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
+ 					chunk->transport->flight_size -=
+ 							sctp_data_size(chunk);
+ 				q->outstanding_bytes -= sctp_data_size(chunk);
+-				q->asoc->peer.rwnd += (sctp_data_size(chunk) +
+-							sizeof(struct sk_buff));
++				q->asoc->peer.rwnd += sctp_data_size(chunk);
+ 			}
+ 			continue;
+ 		}
+@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
+ 			 * (Section 7.2.4)), add the data size of those
+ 			 * chunks to the rwnd.
+ 			 */
+-			q->asoc->peer.rwnd += (sctp_data_size(chunk) +
+-						sizeof(struct sk_buff));
++			q->asoc->peer.rwnd += sctp_data_size(chunk);
+ 			q->outstanding_bytes -= sctp_data_size(chunk);
+ 			if (chunk->transport)
+ 				transport->flight_size -= sctp_data_size(chunk);
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 207175b..946afd6 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -1144,6 +1144,9 @@ SCTP_STATIC __init int sctp_init(void)
+ 	sctp_max_instreams    		= SCTP_DEFAULT_INSTREAMS;
+ 	sctp_max_outstreams   		= SCTP_DEFAULT_OUTSTREAMS;
+ 
++	/* Initialize maximum autoclose timeout. */
++	sctp_max_autoclose		= INT_MAX / HZ;
++
+ 	/* Initialize handle used for association ids. */
+ 	idr_init(&sctp_assocs_id);
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index d3ccf79..fa9b5c7 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -2129,8 +2129,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
+ 		return -EINVAL;
+ 	if (copy_from_user(&sp->autoclose, optval, optlen))
+ 		return -EFAULT;
+-	/* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
+-	sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
+ 
+ 	return 0;
+ }
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index 50cb57f..6752f48 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
+ static int sack_timer_max = 500;
+ static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
+ static int rwnd_scale_max = 16;
++static unsigned long max_autoclose_min = 0;
++static unsigned long max_autoclose_max =
++	(MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
++	? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
+ 
+ extern long sysctl_sctp_mem[3];
+ extern int sysctl_sctp_rmem[3];
+@@ -251,6 +255,15 @@ static ctl_table sctp_table[] = {
+ 		.extra1		= &one,
+ 		.extra2		= &rwnd_scale_max,
+ 	},
++	{
++		.procname	= "max_autoclose",
++		.data		= &sctp_max_autoclose,
++		.maxlen		= sizeof(unsigned long),
++		.mode		= 0644,
++		.proc_handler	= &proc_doulongvec_minmax,
++		.extra1		= &max_autoclose_min,
++		.extra2		= &max_autoclose_max,
++	},
+ 
+ 	{ /* sentinel */ }
+ };
+diff --git a/security/selinux/netport.c b/security/selinux/netport.c
+index cfe2d72..e2b74eb 100644
+--- a/security/selinux/netport.c
++++ b/security/selinux/netport.c
+@@ -139,7 +139,9 @@ static void sel_netport_insert(struct sel_netport *port)
+ 	if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
+ 		struct sel_netport *tail;
+ 		tail = list_entry(
+-			rcu_dereference(sel_netport_hash[idx].list.prev),
++			rcu_dereference_protected(
++				sel_netport_hash[idx].list.prev,
++				lockdep_is_held(&sel_netport_lock)),
+ 			struct sel_netport, list);
+ 		list_del_rcu(&tail->list);
+ 		call_rcu(&tail->rcu, sel_netport_free);

Added: people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.17.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.17.patch	Sun Feb 12 11:58:18 2012	(r18686)
@@ -0,0 +1,1403 @@
+diff --git a/Documentation/HOWTO b/Documentation/HOWTO
+index 81bc1a9..f7ade3b 100644
+--- a/Documentation/HOWTO
++++ b/Documentation/HOWTO
+@@ -275,8 +275,8 @@ versions.
+ If no 2.6.x.y kernel is available, then the highest numbered 2.6.x
+ kernel is the current stable kernel.
+ 
+-2.6.x.y are maintained by the "stable" team <stable at kernel.org>, and are
+-released as needs dictate.  The normal release period is approximately 
++2.6.x.y are maintained by the "stable" team <stable at vger.kernel.org>, and
++are released as needs dictate.  The normal release period is approximately
+ two weeks, but it can be longer if there are no pressing problems.  A
+ security-related problem, instead, can cause a release to happen almost
+ instantly.
+diff --git a/Documentation/development-process/5.Posting b/Documentation/development-process/5.Posting
+index 903a254..8a48c9b 100644
+--- a/Documentation/development-process/5.Posting
++++ b/Documentation/development-process/5.Posting
+@@ -271,10 +271,10 @@ copies should go to:
+    the linux-kernel list.
+ 
+  - If you are fixing a bug, think about whether the fix should go into the
+-   next stable update.  If so, stable at kernel.org should get a copy of the
+-   patch.  Also add a "Cc: stable at kernel.org" to the tags within the patch
+-   itself; that will cause the stable team to get a notification when your
+-   fix goes into the mainline.
++   next stable update.  If so, stable at vger.kernel.org should get a copy of
++   the patch.  Also add a "Cc: stable at vger.kernel.org" to the tags within
++   the patch itself; that will cause the stable team to get a notification
++   when your fix goes into the mainline.
+ 
+ When selecting recipients for a patch, it is good to have an idea of who
+ you think will eventually accept the patch and get it merged.  While it
+diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
+index a4efa04..5335fa8 100644
+--- a/Documentation/usb/usbmon.txt
++++ b/Documentation/usb/usbmon.txt
+@@ -47,10 +47,11 @@ This allows to filter away annoying devices that talk continuously.
+ 
+ 2. Find which bus connects to the desired device
+ 
+-Run "cat /proc/bus/usb/devices", and find the T-line which corresponds to
+-the device. Usually you do it by looking for the vendor string. If you have
+-many similar devices, unplug one and compare two /proc/bus/usb/devices outputs.
+-The T-line will have a bus number. Example:
++Run "cat /sys/kernel/debug/usb/devices", and find the T-line which corresponds
++to the device. Usually you do it by looking for the vendor string. If you have
++many similar devices, unplug one and compare the two
++/sys/kernel/debug/usb/devices outputs. The T-line will have a bus number.
++Example:
+ 
+ T:  Bus=03 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#=  2 Spd=12  MxCh= 0
+ D:  Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs=  1
+@@ -58,7 +59,10 @@ P:  Vendor=0557 ProdID=2004 Rev= 1.00
+ S:  Manufacturer=ATEN
+ S:  Product=UC100KM V2.00
+ 
+-Bus=03 means it's bus 3.
++"Bus=03" means it's bus 3. Alternatively, you can look at the output from
++"lsusb" and get the bus number from the appropriate line. Example:
++
++Bus 003 Device 002: ID 0557:2004 ATEN UC100KM V2.00
+ 
+ 3. Start 'cat'
+ 
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 34e2418..de85391 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -6039,7 +6039,7 @@ F:	arch/alpha/kernel/srm_env.c
+ 
+ STABLE BRANCH
+ M:	Greg Kroah-Hartman <greg at kroah.com>
+-L:	stable at kernel.org
++L:	stable at vger.kernel.org
+ S:	Maintained
+ 
+ STAGING SUBSYSTEM
+diff --git a/Makefile b/Makefile
+index 7f0d8e2..295fbda 100644
+diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
+index fe6f7c2..bc3c745 100644
+--- a/arch/powerpc/include/asm/time.h
++++ b/arch/powerpc/include/asm/time.h
+@@ -219,5 +219,7 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
+ extern void secondary_cpu_time_init(void);
+ extern void iSeries_time_init_early(void);
+ 
++extern void decrementer_check_overflow(void);
++
+ #endif /* __KERNEL__ */
+ #endif /* __POWERPC_TIME_H */
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 5b428e3..ca2987d 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -170,16 +170,13 @@ notrace void arch_local_irq_restore(unsigned long en)
+ 	 */
+ 	local_paca->hard_enabled = en;
+ 
+-#ifndef CONFIG_BOOKE
+-	/* On server, re-trigger the decrementer if it went negative since
+-	 * some processors only trigger on edge transitions of the sign bit.
+-	 *
+-	 * BookE has a level sensitive decrementer (latches in TSR) so we
+-	 * don't need that
++	/*
++	 * Trigger the decrementer if we have a pending event. Some processors
++	 * only trigger on edge transitions of the sign bit. We might also
++	 * have disabled interrupts long enough that the decrementer wrapped
++	 * to positive.
+ 	 */
+-	if ((int)mfspr(SPRN_DEC) < 0)
+-		mtspr(SPRN_DEC, 1);
+-#endif /* CONFIG_BOOKE */
++	decrementer_check_overflow();
+ 
+ 	/*
+ 	 * Force the delivery of pending soft-disabled interrupts on PS3.
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 03b29a6..2de304a 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -889,6 +889,15 @@ static void __init clocksource_init(void)
+ 	       clock->name, clock->mult, clock->shift);
+ }
+ 
++void decrementer_check_overflow(void)
++{
++	u64 now = get_tb_or_rtc();
++	struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
++
++	if (now >= decrementer->next_tb)
++		set_dec(1);
++}
++
+ static int decrementer_set_next_event(unsigned long evt,
+ 				      struct clock_event_device *dev)
+ {
+diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
+index f106662..c9311cf 100644
+--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
++++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
+@@ -109,7 +109,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
+ 	if (opcode > MAX_HCALL_OPCODE)
+ 		return;
+ 
+-	h = &get_cpu_var(hcall_stats)[opcode / 4];
++	h = &__get_cpu_var(hcall_stats)[opcode / 4];
+ 	h->tb_start = mftb();
+ 	h->purr_start = mfspr(SPRN_PURR);
+ }
+@@ -126,8 +126,6 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
+ 	h->num_calls++;
+ 	h->tb_total += mftb() - h->tb_start;
+ 	h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
+-
+-	put_cpu_var(hcall_stats);
+ }
+ 
+ static int __init hcall_inst_init(void)
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index ed96b37..81e30d9 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -745,6 +745,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
+ 		goto out;
+ 
+ 	(*depth)++;
++	preempt_disable();
+ 	trace_hcall_entry(opcode, args);
+ 	(*depth)--;
+ 
+@@ -767,6 +768,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
+ 
+ 	(*depth)++;
+ 	trace_hcall_exit(opcode, retval, retbuf);
++	preempt_enable();
+ 	(*depth)--;
+ 
+ out:
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index 06ed6b4..3719c94 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev,
+ 	int loading = simple_strtol(buf, NULL, 10);
+ 	int i;
+ 
++	mutex_lock(&fw_lock);
++
++	if (!fw_priv->fw)
++		goto out;
++
+ 	switch (loading) {
+ 	case 1:
+-		mutex_lock(&fw_lock);
+-		if (!fw_priv->fw) {
+-			mutex_unlock(&fw_lock);
+-			break;
+-		}
+ 		firmware_free_data(fw_priv->fw);
+ 		memset(fw_priv->fw, 0, sizeof(struct firmware));
+ 		/* If the pages are not owned by 'struct firmware' */
+@@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev,
+ 		fw_priv->page_array_size = 0;
+ 		fw_priv->nr_pages = 0;
+ 		set_bit(FW_STATUS_LOADING, &fw_priv->status);
+-		mutex_unlock(&fw_lock);
+ 		break;
+ 	case 0:
+ 		if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
+@@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev,
+ 		fw_load_abort(fw_priv);
+ 		break;
+ 	}
+-
++out:
++	mutex_unlock(&fw_lock);
+ 	return count;
+ }
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
+index d8ca0a0..65df26c 100644
+--- a/drivers/infiniband/hw/qib/qib_iba6120.c
++++ b/drivers/infiniband/hw/qib/qib_iba6120.c
+@@ -2076,9 +2076,11 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
+ static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ 				    u32 updegr, u32 egrhd, u32 npkts)
+ {
+-	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ 	if (updegr)
+ 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
++	mmiowb();
++	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
++	mmiowb();
+ }
+ 
+ static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
+diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
+index c765a2e..759bb63 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7220.c
++++ b/drivers/infiniband/hw/qib/qib_iba7220.c
+@@ -2704,9 +2704,11 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
+ static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ 				    u32 updegr, u32 egrhd, u32 npkts)
+ {
+-	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ 	if (updegr)
+ 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
++	mmiowb();
++	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
++	mmiowb();
+ }
+ 
+ static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index 8ec5237..49e4a58 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -4060,10 +4060,12 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ 	 */
+ 	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
+ 		adjust_rcv_timeout(rcd, npkts);
+-	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+-	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ 	if (updegr)
+ 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
++	mmiowb();
++	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
++	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
++	mmiowb();
+ }
+ 
+ static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 2065cb4..0b65c5f 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1905,7 +1905,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 				 "but new slave device does not support netpoll.\n",
+ 				 bond_dev->name);
+ 			res = -EBUSY;
+-			goto err_close;
++			goto err_detach;
+ 		}
+ 	}
+ #endif
+@@ -1914,7 +1914,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 
+ 	res = bond_create_slave_symlinks(bond_dev, slave_dev);
+ 	if (res)
+-		goto err_close;
++		goto err_detach;
+ 
+ 	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
+ 					 new_slave);
+@@ -1935,6 +1935,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ err_dest_symlinks:
+ 	bond_destroy_slave_symlinks(bond_dev, slave_dev);
+ 
++err_detach:
++	write_lock_bh(&bond->lock);
++	bond_detach_slave(bond, new_slave);
++	write_unlock_bh(&bond->lock);
++
+ err_close:
+ 	dev_close(slave_dev);
+ 
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index c5c4b4d..7105577 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -371,7 +371,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 
+ 		skb_pull(skb, (size + 1) & 0xfffe);
+ 
+-		if (skb->len == 0)
++		if (skb->len < sizeof(header))
+ 			break;
+ 
+ 		head = (u8 *) skb->data;
+@@ -1560,6 +1560,10 @@ static const struct usb_device_id	products [] = {
+ 	// ASIX 88772a
+ 	USB_DEVICE(0x0db0, 0xa877),
+ 	.driver_info = (unsigned long) &ax88772_info,
++}, {
++	// Asus USB Ethernet Adapter
++	USB_DEVICE (0x0b95, 0x7e2b),
++	.driver_info = (unsigned long) &ax88772_info,
+ },
+ 	{ },		// END
+ };
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 6e7fe94..d4e9eac 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -878,6 +878,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x13b1, 0x0031) },
+ 	{ USB_DEVICE(0x1737, 0x0070) },
+ 	{ USB_DEVICE(0x1737, 0x0071) },
++	{ USB_DEVICE(0x1737, 0x0077) },
+ 	/* Logitec */
+ 	{ USB_DEVICE(0x0789, 0x0162) },
+ 	{ USB_DEVICE(0x0789, 0x0163) },
+@@ -1069,7 +1070,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x1740, 0x0605) },
+ 	{ USB_DEVICE(0x1740, 0x0615) },
+ 	/* Linksys */
+-	{ USB_DEVICE(0x1737, 0x0077) },
+ 	{ USB_DEVICE(0x1737, 0x0078) },
+ 	/* Logitec */
+ 	{ USB_DEVICE(0x0789, 0x0168) },
+diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
+index b07f8b7..e0e1688 100644
+--- a/drivers/net/wireless/wl12xx/boot.c
++++ b/drivers/net/wireless/wl12xx/boot.c
+@@ -328,6 +328,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
+ 		nvs_ptr += 3;
+ 
+ 		for (i = 0; i < burst_len; i++) {
++			if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
++				goto out_badnvs;
++
+ 			val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
+ 			       | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
+ 
+@@ -339,6 +342,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
+ 			nvs_ptr += 4;
+ 			dest_addr += 4;
+ 		}
++
++		if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
++			goto out_badnvs;
+ 	}
+ 
+ 	/*
+@@ -350,6 +356,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
+ 	 */
+ 	nvs_ptr = (u8 *)wl->nvs +
+ 			ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
++
++	if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
++		goto out_badnvs;
++
+ 	nvs_len -= nvs_ptr - (u8 *)wl->nvs;
+ 
+ 	/* Now we must set the partition correctly */
+@@ -365,6 +375,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
+ 
+ 	kfree(nvs_aligned);
+ 	return 0;
++
++out_badnvs:
++	wl1271_error("nvs data is malformed");
++	return -EILSEQ;
+ }
+ 
+ static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
+diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
+index 42935ac..b8ec8cd 100644
+--- a/drivers/net/wireless/wl12xx/cmd.c
++++ b/drivers/net/wireless/wl12xx/cmd.c
+@@ -121,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
+ 	if (!wl->nvs)
+ 		return -ENODEV;
+ 
++	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
++		wl1271_warning("FEM index from INI out of bounds");
++		return -EINVAL;
++	}
++
+ 	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
+ 	if (!gen_parms)
+ 		return -ENOMEM;
+@@ -144,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
+ 	gp->tx_bip_fem_manufacturer =
+ 		gen_parms->general_params.tx_bip_fem_manufacturer;
+ 
++	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
++		wl1271_warning("FEM index from FW out of bounds");
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
+ 		     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+ 
+@@ -163,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
+ 	if (!wl->nvs)
+ 		return -ENODEV;
+ 
++	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
++		wl1271_warning("FEM index from ini out of bounds");
++		return -EINVAL;
++	}
++
+ 	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
+ 	if (!gen_parms)
+ 		return -ENOMEM;
+@@ -187,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
+ 	gp->tx_bip_fem_manufacturer =
+ 		gen_parms->general_params.tx_bip_fem_manufacturer;
+ 
++	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
++		wl1271_warning("FEM index from FW out of bounds");
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
+ 		     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+ 
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index f88e52a..c79857e 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -7211,6 +7211,7 @@ _scsih_remove(struct pci_dev *pdev)
+ 	}
+ 
+ 	sas_remove_host(shost);
++	mpt2sas_base_detach(ioc);
+ 	list_del(&ioc->list);
+ 	scsi_remove_host(shost);
+ 	scsi_host_put(shost);
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index af9b781..b989495 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -199,8 +199,9 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+ {
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ 	unsigned int mode;
++	unsigned long flags;
+ 
+-	spin_lock(&port->lock);
++	spin_lock_irqsave(&port->lock, flags);
+ 
+ 	/* Disable interrupts */
+ 	UART_PUT_IDR(port, atmel_port->tx_done_mask);
+@@ -231,7 +232,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+ 	/* Enable interrupts */
+ 	UART_PUT_IER(port, atmel_port->tx_done_mask);
+ 
+-	spin_unlock(&port->lock);
++	spin_unlock_irqrestore(&port->lock, flags);
+ 
+ }
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 8faa23c..158f631 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -554,10 +554,18 @@ static void acm_port_down(struct acm *acm)
+ 
+ static void acm_tty_hangup(struct tty_struct *tty)
+ {
+-	struct acm *acm = tty->driver_data;
+-	tty_port_hangup(&acm->port);
++	struct acm *acm;
++
+ 	mutex_lock(&open_mutex);
++	acm = tty->driver_data;
++
++	if (!acm)
++		goto out;
++
++	tty_port_hangup(&acm->port);
+ 	acm_port_down(acm);
++
++out:
+ 	mutex_unlock(&open_mutex);
+ }
+ 
+@@ -1183,6 +1191,8 @@ made_compressed_probe:
+ 		i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
+ 		if (i < 0) {
+ 			kfree(acm->country_codes);
++			acm->country_codes = NULL;
++			acm->country_code_size = 0;
+ 			goto skip_countries;
+ 		}
+ 
+@@ -1191,6 +1201,8 @@ made_compressed_probe:
+ 		if (i < 0) {
+ 			device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
+ 			kfree(acm->country_codes);
++			acm->country_codes = NULL;
++			acm->country_code_size = 0;
+ 			goto skip_countries;
+ 		}
+ 	}
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 39ea00b..691d212 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1387,11 +1387,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ 					ret = -EAGAIN;
+ 				else
+ 					urb->transfer_flags |= URB_DMA_MAP_SG;
+-				if (n != urb->num_sgs) {
+-					urb->num_sgs = n;
++				urb->num_mapped_sgs = n;
++				if (n != urb->num_sgs)
+ 					urb->transfer_flags |=
+ 							URB_DMA_SG_COMBINED;
+-				}
+ 			} else if (urb->sg) {
+ 				struct scatterlist *sg = urb->sg;
+ 				urb->transfer_dma = dma_map_page(
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index ecf12e1..4c65eb6 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -117,9 +117,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+ 
+-	/* Guillemot Webcam Hercules Dualpix Exchange*/
++	/* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
+ 	{ USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Guillemot Webcam Hercules Dualpix Exchange*/
++	{ USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	/* M-Systems Flash Disk Pioneers */
+ 	{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index 0917e3a..2499b3b 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -649,7 +649,7 @@ qh_urb_transaction (
+ 	/*
+ 	 * data transfer stage:  buffer setup
+ 	 */
+-	i = urb->num_sgs;
++	i = urb->num_mapped_sgs;
+ 	if (len > 0 && i > 0) {
+ 		sg = urb->sg;
+ 		buf = sg_dma_address(sg);
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index f9cf3f0..23107e2 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd)
+ 	struct ohci_hcd *ohci;
+ 
+ 	ohci = hcd_to_ohci (hcd);
+-	ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+-	ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
++	ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
+ 
+-	/* If the SHUTDOWN quirk is set, don't put the controller in RESET */
+-	ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ?
+-			OHCI_CTRL_RWC | OHCI_CTRL_HCFS :
+-			OHCI_CTRL_RWC);
+-	ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
++	/* Software reset, after which the controller goes into SUSPEND */
++	ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
++	ohci_readl(ohci, &ohci->regs->cmdstatus);	/* flush the writes */
++	udelay(10);
+ 
+-	/* flush the writes */
+-	(void) ohci_readl (ohci, &ohci->regs->control);
++	ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
+ }
+ 
+ static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
+diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
+index ad8166c..bc01b06 100644
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
+ 	return 0;
+ }
+ 
+-/* nVidia controllers continue to drive Reset signalling on the bus
+- * even after system shutdown, wasting power.  This flag tells the
+- * shutdown routine to leave the controller OPERATIONAL instead of RESET.
+- */
+-static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
+-{
+-	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+-	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+-
+-	/* Evidently nVidia fixed their later hardware; this is a guess at
+-	 * the changeover point.
+-	 */
+-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB		0x026d
+-
+-	if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
+-		ohci->flags |= OHCI_QUIRK_SHUTDOWN;
+-		ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
+-	}
+-
+-	return 0;
+-}
+-
+ static void sb800_prefetch(struct ohci_hcd *ohci, int on)
+ {
+ 	struct pci_dev *pdev;
+@@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
+ 		.driver_data = (unsigned long)ohci_quirk_amd700,
+ 	},
+-	{
+-		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
+-		.driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
+-	},
+ 
+ 	/* FIXME for some of the early AMD 760 southbridges, OHCI
+ 	 * won't work at all.  blacklist them.
+diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
+index 35e5fd6..0795b93 100644
+--- a/drivers/usb/host/ohci.h
++++ b/drivers/usb/host/ohci.h
+@@ -403,7 +403,6 @@ struct ohci_hcd {
+ #define	OHCI_QUIRK_HUB_POWER	0x100			/* distrust firmware power/oc setup */
+ #define	OHCI_QUIRK_AMD_PLL	0x200			/* AMD PLL quirk*/
+ #define	OHCI_QUIRK_AMD_PREFETCH	0x400			/* pre-fetch for ISO transfer */
+-#define	OHCI_QUIRK_SHUTDOWN	0x800			/* nVidia power bug */
+ 	// there are also chip quirks/bugs in init logic
+ 
+ 	struct work_struct	nec_work;	/* Worker for NEC quirk */
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index a495d48..23e04fb 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -36,6 +36,7 @@
+ #define OHCI_INTRENABLE		0x10
+ #define OHCI_INTRDISABLE	0x14
+ #define OHCI_FMINTERVAL		0x34
++#define OHCI_HCFS		(3 << 6)	/* hc functional state */
+ #define OHCI_HCR		(1 << 0)	/* host controller reset */
+ #define OHCI_OCR		(1 << 3)	/* ownership change request */
+ #define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
+@@ -465,6 +466,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
+ {
+ 	void __iomem *base;
+ 	u32 control;
++	u32 fminterval;
++	int cnt;
+ 
+ 	if (!mmio_resource_enabled(pdev, 0))
+ 		return;
+@@ -497,41 +500,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
+ 	}
+ #endif
+ 
+-	/* reset controller, preserving RWC (and possibly IR) */
+-	writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+-	readl(base + OHCI_CONTROL);
++	/* disable interrupts */
++	writel((u32) ~0, base + OHCI_INTRDISABLE);
+ 
+-	/* Some NVIDIA controllers stop working if kept in RESET for too long */
+-	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
+-		u32 fminterval;
+-		int cnt;
++	/* Reset the USB bus, if the controller isn't already in RESET */
++	if (control & OHCI_HCFS) {
++		/* Go into RESET, preserving RWC (and possibly IR) */
++		writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
++		readl(base + OHCI_CONTROL);
+ 
+-		/* drive reset for at least 50 ms (7.1.7.5) */
++		/* drive bus reset for at least 50 ms (7.1.7.5) */
+ 		msleep(50);
++	}
+ 
+-		/* software reset of the controller, preserving HcFmInterval */
+-		fminterval = readl(base + OHCI_FMINTERVAL);
+-		writel(OHCI_HCR, base + OHCI_CMDSTATUS);
++	/* software reset of the controller, preserving HcFmInterval */
++	fminterval = readl(base + OHCI_FMINTERVAL);
++	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
+ 
+-		/* reset requires max 10 us delay */
+-		for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
+-			if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
+-				break;
+-			udelay(1);
+-		}
+-		writel(fminterval, base + OHCI_FMINTERVAL);
+-
+-		/* Now we're in the SUSPEND state with all devices reset
+-		 * and wakeups and interrupts disabled
+-		 */
++	/* reset requires max 10 us delay */
++	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
++		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
++			break;
++		udelay(1);
+ 	}
++	writel(fminterval, base + OHCI_FMINTERVAL);
+ 
+-	/*
+-	 * disable interrupts
+-	 */
+-	writel(~(u32)0, base + OHCI_INTRDISABLE);
+-	writel(~(u32)0, base + OHCI_INTRSTATUS);
+-
++	/* Now the controller is safely in SUSPEND and nothing can wake it up */
+ 	iounmap(base);
+ }
+ 
+diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
+index 84ed28b..8253991 100644
+--- a/drivers/usb/host/uhci-q.c
++++ b/drivers/usb/host/uhci-q.c
+@@ -943,7 +943,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
+ 	if (usb_pipein(urb->pipe))
+ 		status |= TD_CTRL_SPD;
+ 
+-	i = urb->num_sgs;
++	i = urb->num_mapped_sgs;
+ 	if (len > 0 && i > 0) {
+ 		sg = urb->sg;
+ 		data = sg_dma_address(sg);
+diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
+index a403b53..76083ae 100644
+--- a/drivers/usb/host/whci/qset.c
++++ b/drivers/usb/host/whci/qset.c
+@@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
+ 
+ 	remaining = urb->transfer_buffer_length;
+ 
+-	for_each_sg(urb->sg, sg, urb->num_sgs, i) {
++	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+ 		dma_addr_t dma_addr;
+ 		size_t dma_remaining;
+ 		dma_addr_t sp, ep;
+@@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
+ 
+ 	remaining = urb->transfer_buffer_length;
+ 
+-	for_each_sg(urb->sg, sg, urb->num_sgs, i) {
++	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+ 		size_t len;
+ 		size_t sg_remaining;
+ 		void *orig;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index b4b0691..c0c5d6c 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2570,7 +2570,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
+ 	struct scatterlist *sg;
+ 
+ 	sg = NULL;
+-	num_sgs = urb->num_sgs;
++	num_sgs = urb->num_mapped_sgs;
+ 	temp = urb->transfer_buffer_length;
+ 
+ 	xhci_dbg(xhci, "count sg list trbs: \n");
+@@ -2754,7 +2754,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		return -EINVAL;
+ 
+ 	num_trbs = count_sg_trbs_needed(xhci, urb);
+-	num_sgs = urb->num_sgs;
++	num_sgs = urb->num_mapped_sgs;
+ 	total_packet_count = roundup(urb->transfer_buffer_length,
+ 			le16_to_cpu(urb->ep->desc.wMaxPacketSize));
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 221f14e..107438e 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1568,6 +1568,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
+ 		/* FIXME: can we allocate more resources for the HC? */
+ 		break;
+ 	case COMP_BW_ERR:
++	case COMP_2ND_BW_ERR:
+ 		dev_warn(&udev->dev, "Not enough bandwidth "
+ 				"for new device state.\n");
+ 		ret = -ENOSPC;
+@@ -2183,8 +2184,7 @@ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		max_streams = USB_SS_MAX_STREAMS(
+-				eps[i]->ss_ep_comp.bmAttributes);
++		max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
+ 		if (max_streams < (*num_streams - 1)) {
+ 			xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
+ 					eps[i]->desc.bEndpointAddress,
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 49ce76c..3e7c3a6 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -900,7 +900,6 @@ struct xhci_transfer_event {
+ /* Invalid Stream ID Error */
+ #define COMP_STRID_ERR	34
+ /* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
+-/* FIXME - check for this */
+ #define COMP_2ND_BW_ERR	35
+ /* Split Transaction Error */
+ #define	COMP_SPLIT_ERR	36
+diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
+index fe1d443..8f725f6 100644
+--- a/drivers/usb/misc/isight_firmware.c
++++ b/drivers/usb/misc/isight_firmware.c
+@@ -55,8 +55,9 @@ static int isight_firmware_load(struct usb_interface *intf,
+ 
+ 	ptr = firmware->data;
+ 
++	buf[0] = 0x01;
+ 	if (usb_control_msg
+-	    (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\1", 1,
++	    (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
+ 	     300) != 1) {
+ 		printk(KERN_ERR
+ 		       "Failed to initialise isight firmware loader\n");
+@@ -100,8 +101,9 @@ static int isight_firmware_load(struct usb_interface *intf,
+ 		}
+ 	}
+ 
++	buf[0] = 0x00;
+ 	if (usb_control_msg
+-	    (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1,
++	    (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
+ 	     300) != 1) {
+ 		printk(KERN_ERR "isight firmware loading completion failed\n");
+ 		ret = -ENODEV;
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index dce7182..a0232a7 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2078,8 +2078,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ 	if (status < 0)
+ 		goto fail3;
+ 
+-	pm_runtime_put(musb->controller);
+-
+ 	status = musb_init_debugfs(musb);
+ 	if (status < 0)
+ 		goto fail4;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index fd67cc5..a1a324b 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -92,6 +92,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
+ 	{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
+ 	{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
++	{ USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */
+ 	{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+ 	{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
+ 	{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
+diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
+index 60f38d5..0a8c1e6 100644
+--- a/drivers/usb/serial/omninet.c
++++ b/drivers/usb/serial/omninet.c
+@@ -315,7 +315,7 @@ static int omninet_write_room(struct tty_struct *tty)
+ 	int room = 0; /* Default: no room */
+ 
+ 	/* FIXME: no consistent locking for write_urb_busy */
+-	if (wport->write_urb_busy)
++	if (!wport->write_urb_busy)
+ 		room = wport->bulk_out_size - OMNINET_HEADERLEN;
+ 
+ 	dbg("%s - returns %d", __func__, room);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index d2becb9..c96b6b6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -472,6 +472,14 @@ static void option_instat_callback(struct urb *urb);
+ #define YUGA_PRODUCT_CLU528			0x260D
+ #define YUGA_PRODUCT_CLU526			0x260F
+ 
++/* Viettel products */
++#define VIETTEL_VENDOR_ID			0x2262
++#define VIETTEL_PRODUCT_VT1000			0x0002
++
++/* ZD Incorporated */
++#define ZD_VENDOR_ID				0x0685
++#define ZD_PRODUCT_7000				0x7000
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ 		OPTION_BLACKLIST_NONE = 0,
+@@ -1173,6 +1181,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
+ 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
+ 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index c325e69..9e069ef 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -1073,6 +1073,7 @@ static struct usb_driver usb_storage_driver = {
+ 	.id_table =	usb_storage_usb_ids,
+ 	.supports_autosuspend = 1,
+ 	.soft_unbind =	1,
++	.no_dynamic_id = 1,
+ };
+ 
+ static int __init usb_stor_init(void)
+diff --git a/drivers/video/offb.c b/drivers/video/offb.c
+index cb163a5..3251a02 100644
+--- a/drivers/video/offb.c
++++ b/drivers/video/offb.c
+@@ -100,36 +100,32 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ 			  u_int transp, struct fb_info *info)
+ {
+ 	struct offb_par *par = (struct offb_par *) info->par;
+-	int i, depth;
+-	u32 *pal = info->pseudo_palette;
+-
+-	depth = info->var.bits_per_pixel;
+-	if (depth == 16)
+-		depth = (info->var.green.length == 5) ? 15 : 16;
+-
+-	if (regno > 255 ||
+-	    (depth == 16 && regno > 63) ||
+-	    (depth == 15 && regno > 31))
+-		return 1;
+-
+-	if (regno < 16) {
+-		switch (depth) {
+-		case 15:
+-			pal[regno] = (regno << 10) | (regno << 5) | regno;
+-			break;
+-		case 16:
+-			pal[regno] = (regno << 11) | (regno << 5) | regno;
+-			break;
+-		case 24:
+-			pal[regno] = (regno << 16) | (regno << 8) | regno;
+-			break;
+-		case 32:
+-			i = (regno << 8) | regno;
+-			pal[regno] = (i << 16) | i;
+-			break;
++
++	if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
++		u32 *pal = info->pseudo_palette;
++		u32 cr = red >> (16 - info->var.red.length);
++		u32 cg = green >> (16 - info->var.green.length);
++		u32 cb = blue >> (16 - info->var.blue.length);
++		u32 value;
++
++		if (regno >= 16)
++			return -EINVAL;
++
++		value = (cr << info->var.red.offset) |
++			(cg << info->var.green.offset) |
++			(cb << info->var.blue.offset);
++		if (info->var.transp.length > 0) {
++			u32 mask = (1 << info->var.transp.length) - 1;
++			mask <<= info->var.transp.offset;
++			value |= mask;
+ 		}
++		pal[regno] = value;
++		return 0;
+ 	}
+ 
++	if (regno > 255)
++		return -EINVAL;
++
+ 	red >>= 8;
+ 	green >>= 8;
+ 	blue >>= 8;
+@@ -381,7 +377,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
+ 				int pitch, unsigned long address,
+ 				int foreign_endian, struct device_node *dp)
+ {
+-	unsigned long res_size = pitch * height * (depth + 7) / 8;
++	unsigned long res_size = pitch * height;
+ 	struct offb_par *par = &default_par;
+ 	unsigned long res_start = address;
+ 	struct fb_fix_screeninfo *fix;
+diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
+index 3451d23..db9ba1a 100644
+--- a/fs/ext3/inode.c
++++ b/fs/ext3/inode.c
+@@ -1568,7 +1568,13 @@ static int ext3_ordered_writepage(struct page *page,
+ 	int err;
+ 
+ 	J_ASSERT(PageLocked(page));
+-	WARN_ON_ONCE(IS_RDONLY(inode));
++	/*
++	 * We don't want to warn for emergency remount. The condition is
++	 * ordered to avoid dereferencing inode->i_sb in non-error case to
++	 * avoid slow-downs.
++	 */
++	WARN_ON_ONCE(IS_RDONLY(inode) &&
++		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
+ 
+ 	/*
+ 	 * We give up here if we're reentered, because it might be for a
+@@ -1642,7 +1648,13 @@ static int ext3_writeback_writepage(struct page *page,
+ 	int err;
+ 
+ 	J_ASSERT(PageLocked(page));
+-	WARN_ON_ONCE(IS_RDONLY(inode));
++	/*
++	 * We don't want to warn for emergency remount. The condition is
++	 * ordered to avoid dereferencing inode->i_sb in non-error case to
++	 * avoid slow-downs.
++	 */
++	WARN_ON_ONCE(IS_RDONLY(inode) &&
++		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
+ 
+ 	if (ext3_journal_current_handle())
+ 		goto out_fail;
+@@ -1684,7 +1696,13 @@ static int ext3_journalled_writepage(struct page *page,
+ 	int err;
+ 
+ 	J_ASSERT(PageLocked(page));
+-	WARN_ON_ONCE(IS_RDONLY(inode));
++	/*
++	 * We don't want to warn for emergency remount. The condition is
++	 * ordered to avoid dereferencing inode->i_sb in non-error case to
++	 * avoid slow-downs.
++	 */
++	WARN_ON_ONCE(IS_RDONLY(inode) &&
++		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
+ 
+ 	if (ext3_journal_current_handle())
+ 		goto no_write;
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index aa91089..f19dfbf 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -453,16 +453,20 @@ int remove_save_link(struct inode *inode, int truncate)
+ static void reiserfs_kill_sb(struct super_block *s)
+ {
+ 	if (REISERFS_SB(s)) {
+-		if (REISERFS_SB(s)->xattr_root) {
+-			d_invalidate(REISERFS_SB(s)->xattr_root);
+-			dput(REISERFS_SB(s)->xattr_root);
+-			REISERFS_SB(s)->xattr_root = NULL;
+-		}
+-		if (REISERFS_SB(s)->priv_root) {
+-			d_invalidate(REISERFS_SB(s)->priv_root);
+-			dput(REISERFS_SB(s)->priv_root);
+-			REISERFS_SB(s)->priv_root = NULL;
+-		}
++		/*
++		 * Force any pending inode evictions to occur now. Any
++		 * inodes to be removed that have extended attributes
++		 * associated with them need to clean them up before
++		 * we can release the extended attribute root dentries.
++		 * shrink_dcache_for_umount will BUG if we don't release
++		 * those before it's called so ->put_super is too late.
++		 */
++		shrink_dcache_sb(s);
++
++		dput(REISERFS_SB(s)->xattr_root);
++		REISERFS_SB(s)->xattr_root = NULL;
++		dput(REISERFS_SB(s)->priv_root);
++		REISERFS_SB(s)->priv_root = NULL;
+ 	}
+ 
+ 	kill_block_super(s);
+@@ -1164,7 +1168,8 @@ static void handle_quota_files(struct super_block *s, char **qf_names,
+ 			kfree(REISERFS_SB(s)->s_qf_names[i]);
+ 		REISERFS_SB(s)->s_qf_names[i] = qf_names[i];
+ 	}
+-	REISERFS_SB(s)->s_jquota_fmt = *qfmt;
++	if (*qfmt)
++		REISERFS_SB(s)->s_jquota_fmt = *qfmt;
+ }
+ #endif
+ 
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index 2a346bb..0c0c9d3 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -125,7 +125,6 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ 			err = udf_expand_file_adinicb(inode);
+ 			if (err) {
+ 				udf_debug("udf_expand_adinicb: err=%d\n", err);
+-				up_write(&iinfo->i_data_sem);
+ 				return err;
+ 			}
+ 		} else {
+@@ -133,9 +132,10 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ 				iinfo->i_lenAlloc = pos + count;
+ 			else
+ 				iinfo->i_lenAlloc = inode->i_size;
++			up_write(&iinfo->i_data_sem);
+ 		}
+-	}
+-	up_write(&iinfo->i_data_sem);
++	} else
++		up_write(&iinfo->i_data_sem);
+ 
+ 	retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
+ 	if (retval > 0)
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 1d1358e..262050f 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -145,6 +145,12 @@ const struct address_space_operations udf_aops = {
+ 	.bmap		= udf_bmap,
+ };
+ 
++/*
++ * Expand file stored in ICB to a normal one-block-file
++ *
++ * This function requires i_data_sem for writing and releases it.
++ * This function requires i_mutex held
++ */
+ int udf_expand_file_adinicb(struct inode *inode)
+ {
+ 	struct page *page;
+@@ -163,9 +169,15 @@ int udf_expand_file_adinicb(struct inode *inode)
+ 			iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
+ 		/* from now on we have normal address_space methods */
+ 		inode->i_data.a_ops = &udf_aops;
++		up_write(&iinfo->i_data_sem);
+ 		mark_inode_dirty(inode);
+ 		return 0;
+ 	}
++	/*
++	 * Release i_data_sem so that we can lock a page - page lock ranks
++	 * above i_data_sem. i_mutex still protects us against file changes.
++	 */
++	up_write(&iinfo->i_data_sem);
+ 
+ 	page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
+ 	if (!page)
+@@ -181,6 +193,7 @@ int udf_expand_file_adinicb(struct inode *inode)
+ 		SetPageUptodate(page);
+ 		kunmap(page);
+ 	}
++	down_write(&iinfo->i_data_sem);
+ 	memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
+ 	       iinfo->i_lenAlloc);
+ 	iinfo->i_lenAlloc = 0;
+@@ -190,17 +203,20 @@ int udf_expand_file_adinicb(struct inode *inode)
+ 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
+ 	/* from now on we have normal address_space methods */
+ 	inode->i_data.a_ops = &udf_aops;
++	up_write(&iinfo->i_data_sem);
+ 	err = inode->i_data.a_ops->writepage(page, &udf_wbc);
+ 	if (err) {
+ 		/* Restore everything back so that we don't lose data... */
+ 		lock_page(page);
+ 		kaddr = kmap(page);
++		down_write(&iinfo->i_data_sem);
+ 		memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
+ 		       inode->i_size);
+ 		kunmap(page);
+ 		unlock_page(page);
+ 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
+ 		inode->i_data.a_ops = &udf_adinicb_aops;
++		up_write(&iinfo->i_data_sem);
+ 	}
+ 	page_cache_release(page);
+ 	mark_inode_dirty(inode);
+@@ -1105,10 +1121,9 @@ int udf_setsize(struct inode *inode, loff_t newsize)
+ 			if (bsize <
+ 			    (udf_file_entry_alloc_offset(inode) + newsize)) {
+ 				err = udf_expand_file_adinicb(inode);
+-				if (err) {
+-					up_write(&iinfo->i_data_sem);
++				if (err)
+ 					return err;
+-				}
++				down_write(&iinfo->i_data_sem);
+ 			} else
+ 				iinfo->i_lenAlloc = newsize;
+ 		}
+diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
+index 4b9fb91..f86e034 100644
+--- a/fs/xfs/linux-2.6/xfs_acl.c
++++ b/fs/xfs/linux-2.6/xfs_acl.c
+@@ -39,7 +39,7 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
+ 	struct posix_acl_entry *acl_e;
+ 	struct posix_acl *acl;
+ 	struct xfs_acl_entry *ace;
+-	int count, i;
++	unsigned int count, i;
+ 
+ 	count = be32_to_cpu(aclp->acl_cnt);
+ 	if (count > XFS_ACL_MAX_ENTRIES)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 73c7df4..b08e04c 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1202,6 +1202,7 @@ struct urb {
+ 	void *transfer_buffer;		/* (in) associated data buffer */
+ 	dma_addr_t transfer_dma;	/* (in) dma addr for transfer_buffer */
+ 	struct scatterlist *sg;		/* (in) scatter gather buffer list */
++	int num_mapped_sgs;		/* (internal) mapped sg entries */
+ 	int num_sgs;			/* (in) number of entries in the sg list */
+ 	u32 transfer_buffer_length;	/* (in) data buffer length */
+ 	u32 actual_length;		/* (return) actual transfer length */
+diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
+index 0fd3fbd..cf65b5c 100644
+--- a/include/linux/usb/ch9.h
++++ b/include/linux/usb/ch9.h
+@@ -583,8 +583,26 @@ struct usb_ss_ep_comp_descriptor {
+ } __attribute__ ((packed));
+ 
+ #define USB_DT_SS_EP_COMP_SIZE		6
++
+ /* Bits 4:0 of bmAttributes if this is a bulk endpoint */
+-#define USB_SS_MAX_STREAMS(p)		(1 << ((p) & 0x1f))
++static inline int
++usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp)
++{
++	int		max_streams;
++
++	if (!comp)
++		return 0;
++
++	max_streams = comp->bmAttributes & 0x1f;
++
++	if (!max_streams)
++		return 0;
++
++	max_streams = 1 << max_streams;
++
++	return max_streams;
++}
++
+ /* Bits 1:0 of bmAttributes if this is an isoc endpoint */
+ #define USB_SS_MULT(p)			(1 + ((p) & 0x3))
+ 
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 575a5e7..2efce77 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1173,10 +1173,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
+ 
+ 	/*
+ 	 * If the 'all' option was specified select all the subsystems,
+-	 * otherwise 'all, 'none' and a subsystem name options were not
+-	 * specified, let's default to 'all'
++	 * otherwise if 'none', 'name=' and a subsystem name options
++	 * were not specified, let's default to 'all'
+ 	 */
+-	if (all_ss || (!all_ss && !one_ss && !opts->none)) {
++	if (all_ss || (!one_ss && !opts->none && !opts->name)) {
+ 		for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ 			struct cgroup_subsys *ss = subsys[i];
+ 			if (ss == NULL)
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 12b7458..aa39dd7 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -15,6 +15,7 @@
+ #include <linux/stop_machine.h>
+ #include <linux/mutex.h>
+ #include <linux/gfp.h>
++#include <linux/suspend.h>
+ 
+ #ifdef CONFIG_SMP
+ /* Serializes the updates to cpu_online_mask, cpu_present_mask */
+@@ -476,6 +477,79 @@ static int alloc_frozen_cpus(void)
+ 	return 0;
+ }
+ core_initcall(alloc_frozen_cpus);
++
++/*
++ * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
++ * hotplug when tasks are about to be frozen. Also, don't allow the freezer
++ * to continue until any currently running CPU hotplug operation gets
++ * completed.
++ * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
++ * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
++ * CPU hotplug path and released only after it is complete. Thus, we
++ * (and hence the freezer) will block here until any currently running CPU
++ * hotplug operation gets completed.
++ */
++void cpu_hotplug_disable_before_freeze(void)
++{
++	cpu_maps_update_begin();
++	cpu_hotplug_disabled = 1;
++	cpu_maps_update_done();
++}
++
++
++/*
++ * When tasks have been thawed, re-enable regular CPU hotplug (which had been
++ * disabled while beginning to freeze tasks).
++ */
++void cpu_hotplug_enable_after_thaw(void)
++{
++	cpu_maps_update_begin();
++	cpu_hotplug_disabled = 0;
++	cpu_maps_update_done();
++}
++
++/*
++ * When callbacks for CPU hotplug notifications are being executed, we must
++ * ensure that the state of the system with respect to the tasks being frozen
++ * or not, as reported by the notification, remains unchanged *throughout the
++ * duration* of the execution of the callbacks.
++ * Hence we need to prevent the freezer from racing with regular CPU hotplug.
++ *
++ * This synchronization is implemented by mutually excluding regular CPU
++ * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
++ * Hibernate notifications.
++ */
++static int
++cpu_hotplug_pm_callback(struct notifier_block *nb,
++			unsigned long action, void *ptr)
++{
++	switch (action) {
++
++	case PM_SUSPEND_PREPARE:
++	case PM_HIBERNATION_PREPARE:
++		cpu_hotplug_disable_before_freeze();
++		break;
++
++	case PM_POST_SUSPEND:
++	case PM_POST_HIBERNATION:
++		cpu_hotplug_enable_after_thaw();
++		break;
++
++	default:
++		return NOTIFY_DONE;
++	}
++
++	return NOTIFY_OK;
++}
++
++
++int cpu_hotplug_pm_sync_init(void)
++{
++	pm_notifier(cpu_hotplug_pm_callback, 0);
++	return 0;
++}
++core_initcall(cpu_hotplug_pm_sync_init);
++
+ #endif /* CONFIG_PM_SLEEP_SMP */
+ 
+ /**
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index d577199..e0d42db 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -875,6 +875,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
+ 		 * to be intended in a v3 query.
+ 		 */
+ 		max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
++		if (!max_delay)
++			max_delay = 1;	/* can't mod w/ 0 */
+ 	} else { /* v3 */
+ 		if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
+ 			return;
+diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
+index 6c164dc..bf54c48 100644
+--- a/tools/perf/util/trace-event-parse.c
++++ b/tools/perf/util/trace-event-parse.c
+@@ -1582,6 +1582,8 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok)
+ 	field = malloc_or_die(sizeof(*field));
+ 
+ 	type = process_arg(event, field, &token);
++	while (type == EVENT_OP)
++		type = process_op(event, field, &token);
+ 	if (test_type_token(type, token, EVENT_DELIM, ","))
+ 		goto out_free;
+ 

Added: people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.18.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.18.patch	Sun Feb 12 11:58:18 2012	(r18686)
@@ -0,0 +1,3379 @@
+diff --git a/Makefile b/Makefile
+index 295fbda..581b8e9 100644
+diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
+index 3be485a..f19de9f 100644
+--- a/arch/ia64/kernel/acpi.c
++++ b/arch/ia64/kernel/acpi.c
+@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
+ static struct acpi_table_slit __initdata *slit_table;
+ cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
+ 
+-static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
++static int __init
++get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
+ {
+ 	int pxm;
+ 
+ 	pxm = pa->proximity_domain_lo;
+-	if (ia64_platform_is("sn2"))
++	if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
+ 		pxm += pa->proximity_domain_hi[0] << 8;
+ 	return pxm;
+ }
+ 
+-static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
++static int __init
++get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
+ {
+ 	int pxm;
+ 
+ 	pxm = ma->proximity_domain;
+-	if (!ia64_platform_is("sn2"))
++	if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
+ 		pxm &= 0xff;
+ 
+ 	return pxm;
+diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
+index 577abba..83bb960 100644
+--- a/arch/score/kernel/entry.S
++++ b/arch/score/kernel/entry.S
+@@ -408,7 +408,7 @@ ENTRY(handle_sys)
+ 	sw	r9, [r0, PT_EPC]
+ 
+ 	cmpi.c	r27, __NR_syscalls 	# check syscall number
+-	bgtu	illegal_syscall
++	bgeu	illegal_syscall
+ 
+ 	slli	r8, r27, 2		# get syscall routine
+ 	la	r11, sys_call_table
+diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
+index 67f87f2..78a1eff 100644
+--- a/arch/x86/include/asm/amd_nb.h
++++ b/arch/x86/include/asm/amd_nb.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_X86_AMD_NB_H
+ #define _ASM_X86_AMD_NB_H
+ 
++#include <linux/ioport.h>
+ #include <linux/pci.h>
+ 
+ struct amd_nb_bus_dev_range {
+@@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
+ extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
+ 
+ extern bool early_is_amd_nb(u32 value);
++extern struct resource *amd_get_mmconfig_range(struct resource *res);
+ extern int amd_cache_northbridges(void);
+ extern void amd_flush_garts(void);
+ extern int amd_numa_init(void);
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 4c39baa..bae1efe 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
+ 	return false;
+ }
+ 
++struct resource *amd_get_mmconfig_range(struct resource *res)
++{
++	u32 address;
++	u64 base, msr;
++	unsigned segn_busn_bits;
++
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
++		return NULL;
++
++	/* assume all cpus from fam10h have mmconfig */
++        if (boot_cpu_data.x86 < 0x10)
++		return NULL;
++
++	address = MSR_FAM10H_MMIO_CONF_BASE;
++	rdmsrl(address, msr);
++
++	/* mmconfig is not enabled */
++	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
++		return NULL;
++
++	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
++
++	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
++			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
++
++	res->flags = IORESOURCE_MEM;
++	res->start = base;
++	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
++	return res;
++}
++
+ int amd_get_subcaches(int cpu)
+ {
+ 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index cfeb978..874c208 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -779,7 +779,12 @@ void __init uv_system_init(void)
+ 	for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
+ 		uv_possible_blades +=
+ 		  hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
+-	printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
++
++	/* uv_num_possible_blades() is really the hub count */
++	printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
++			is_uv1_hub() ? uv_num_possible_blades() :
++			(uv_num_possible_blades() + 1) / 2,
++			uv_num_possible_blades());
+ 
+ 	bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
+ 	uv_blade_info = kzalloc(bytes, GFP_KERNEL);
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 1dab519..f927429 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -87,9 +87,9 @@ static unsigned long mmap_rnd(void)
+ 	*/
+ 	if (current->flags & PF_RANDOMIZE) {
+ 		if (mmap_is_ia32())
+-			rnd = (long)get_random_int() % (1<<8);
++			rnd = get_random_int() % (1<<8);
+ 		else
+-			rnd = (long)(get_random_int() % (1<<28));
++			rnd = get_random_int() % (1<<28);
+ 	}
+ 	return rnd << PAGE_SHIFT;
+ }
+diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
+index 81dbfde..7efd0c6 100644
+--- a/arch/x86/mm/srat.c
++++ b/arch/x86/mm/srat.c
+@@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+ 	if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+ 		return;
+ 	pxm = pa->proximity_domain_lo;
++	if (acpi_srat_revision >= 2)
++		pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
+ 	node = setup_node(pxm);
+ 	if (node < 0) {
+ 		printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
+@@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
+ 	start = ma->base_address;
+ 	end = start + ma->length;
+ 	pxm = ma->proximity_domain;
++	if (acpi_srat_revision <= 1)
++		pxm &= 0xff;
+ 	node = setup_node(pxm);
+ 	if (node < 0) {
+ 		printk(KERN_ERR "SRAT: Too many proximity domains.\n");
+diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
+index 6b8759f..d24d3da 100644
+--- a/arch/x86/pci/Makefile
++++ b/arch/x86/pci/Makefile
+@@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ)		+= numaq_32.o
+ obj-$(CONFIG_X86_MRST)		+= mrst.o
+ 
+ obj-y				+= common.o early.o
+-obj-y				+= amd_bus.o bus_numa.o
++obj-y				+= bus_numa.o
+ 
++obj-$(CONFIG_AMD_NB)		+= amd_bus.o
+ obj-$(CONFIG_PCI_CNB20LE_QUIRK)	+= broadcom_bus.o
+ 
+ ifeq ($(CONFIG_PCI_DEBUG),y)
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index 50b3f14..53f9e68 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
+ 	struct acpi_resource_address64 addr;
+ 	acpi_status status;
+ 	unsigned long flags;
+-	u64 start, end;
++	u64 start, orig_end, end;
+ 
+ 	status = resource_to_addr(acpi_res, &addr);
+ 	if (!ACPI_SUCCESS(status))
+@@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
+ 		return AE_OK;
+ 
+ 	start = addr.minimum + addr.translation_offset;
+-	end = addr.maximum + addr.translation_offset;
++	orig_end = end = addr.maximum + addr.translation_offset;
++
++	/* Exclude non-addressable range or non-addressable portion of range */
++	end = min(end, (u64)iomem_resource.end);
++	if (end <= start) {
++		dev_info(&info->bridge->dev,
++			"host bridge window [%#llx-%#llx] "
++			"(ignored, not CPU addressable)\n", start, orig_end);
++		return AE_OK;
++	} else if (orig_end != end) {
++		dev_info(&info->bridge->dev,
++			"host bridge window [%#llx-%#llx] "
++			"([%#llx-%#llx] ignored, not CPU addressable)\n",
++			start, orig_end, end + 1, orig_end);
++	}
+ 
+ 	res = &info->res[info->res_num];
+ 	res->name = info->name;
+diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
+index 026e493..385a940 100644
+--- a/arch/x86/pci/amd_bus.c
++++ b/arch/x86/pci/amd_bus.c
+@@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
+ 	{ 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
+ };
+ 
+-static u64 __initdata fam10h_mmconf_start;
+-static u64 __initdata fam10h_mmconf_end;
+-static void __init get_pci_mmcfg_amd_fam10h_range(void)
+-{
+-	u32 address;
+-	u64 base, msr;
+-	unsigned segn_busn_bits;
+-
+-	/* assume all cpus from fam10h have mmconf */
+-        if (boot_cpu_data.x86 < 0x10)
+-		return;
+-
+-	address = MSR_FAM10H_MMIO_CONF_BASE;
+-	rdmsrl(address, msr);
+-
+-	/* mmconfig is not enable */
+-	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
+-		return;
+-
+-	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
+-
+-	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
+-			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
+-
+-	fam10h_mmconf_start = base;
+-	fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
+-}
+-
+ #define RANGE_NUM 16
+ 
+ /**
+@@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
+ 	u64 val;
+ 	u32 address;
+ 	bool found;
++	struct resource fam10h_mmconf_res, *fam10h_mmconf;
++	u64 fam10h_mmconf_start;
++	u64 fam10h_mmconf_end;
+ 
+ 	if (!early_pci_allowed())
+ 		return -1;
+@@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
+ 		subtract_range(range, RANGE_NUM, 0, end);
+ 
+ 	/* get mmconfig */
+-	get_pci_mmcfg_amd_fam10h_range();
++	fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
+ 	/* need to take out mmconf range */
+-	if (fam10h_mmconf_end) {
+-		printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
++	if (fam10h_mmconf) {
++		printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
++		fam10h_mmconf_start = fam10h_mmconf->start;
++		fam10h_mmconf_end = fam10h_mmconf->end;
+ 		subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
+ 				 fam10h_mmconf_end + 1);
++	} else {
++		fam10h_mmconf_start = 0;
++		fam10h_mmconf_end = 0;
+ 	}
+ 
+ 	/* mmio resource */
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index 82cff4a..edf435b 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -1575,14 +1575,14 @@ static int calculate_destination_timeout(void)
+ 		ts_ns = base * mult1 * mult2;
+ 		ret = ts_ns / 1000;
+ 	} else {
+-		/* 4 bits  0/1 for 10/80us, 3 bits of multiplier */
+-		mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
++		/* 4 bits  0/1 for 10/80us base, 3 bits of multiplier */
++		mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
+ 		mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
+ 		if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
+-			mult1 = 80;
++			base = 80;
+ 		else
+-			mult1 = 10;
+-		base = mmr_image & UV2_ACK_MASK;
++			base = 10;
++		mult1 = mmr_image & UV2_ACK_MASK;
+ 		ret = mult1 * base;
+ 	}
+ 	return ret;
+@@ -1820,6 +1820,8 @@ static int __init uv_bau_init(void)
+ 			uv_base_pnode = uv_blade_to_pnode(uvhub);
+ 	}
+ 
++	enable_timeouts();
++
+ 	if (init_per_cpu(nuvhubs, uv_base_pnode)) {
+ 		nobau = 1;
+ 		return 0;
+@@ -1830,7 +1832,6 @@ static int __init uv_bau_init(void)
+ 		if (uv_blade_nr_possible_cpus(uvhub))
+ 			init_uvhub(uvhub, vector, uv_base_pnode);
+ 
+-	enable_timeouts();
+ 	alloc_intr_gate(vector, uv_bau_message_intr1);
+ 
+ 	for_each_possible_blade(uvhub) {
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 4f4230b..5ef1f4c 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -24,6 +24,7 @@
+ #include <linux/capability.h>
+ #include <linux/completion.h>
+ #include <linux/cdrom.h>
++#include <linux/ratelimit.h>
+ #include <linux/slab.h>
+ #include <linux/times.h>
+ #include <asm/uaccess.h>
+@@ -691,6 +692,57 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
+ }
+ EXPORT_SYMBOL(scsi_cmd_ioctl);
+ 
++int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
++{
++	if (bd && bd == bd->bd_contains)
++		return 0;
++
++	/* Actually none of these is particularly useful on a partition,
++	 * but they are safe.
++	 */
++	switch (cmd) {
++	case SCSI_IOCTL_GET_IDLUN:
++	case SCSI_IOCTL_GET_BUS_NUMBER:
++	case SCSI_IOCTL_GET_PCI:
++	case SCSI_IOCTL_PROBE_HOST:
++	case SG_GET_VERSION_NUM:
++	case SG_SET_TIMEOUT:
++	case SG_GET_TIMEOUT:
++	case SG_GET_RESERVED_SIZE:
++	case SG_SET_RESERVED_SIZE:
++	case SG_EMULATED_HOST:
++		return 0;
++	case CDROM_GET_CAPABILITY:
++		/* Keep this until we remove the printk below.  udev sends it
++		 * and we do not want to spam dmesg about it.   CD-ROMs do
++		 * not have partitions, so we get here only for disks.
++		 */
++		return -ENOTTY;
++	default:
++		break;
++	}
++
++	/* In particular, rule out all resets and host-specific ioctls.  */
++	printk_ratelimited(KERN_WARNING
++			   "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
++
++	return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
++}
++EXPORT_SYMBOL(scsi_verify_blk_ioctl);
++
++int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
++		       unsigned int cmd, void __user *arg)
++{
++	int ret;
++
++	ret = scsi_verify_blk_ioctl(bd, cmd);
++	if (ret < 0)
++		return ret;
++
++	return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
++}
++EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
++
+ static int __init blk_scsi_ioctl_init(void)
+ {
+ 	blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
+diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
+index 8c7b997..42163d8 100644
+--- a/drivers/acpi/acpica/dsargs.c
++++ b/drivers/acpi/acpica/dsargs.c
+@@ -387,5 +387,29 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
+ 	status = acpi_ds_execute_arguments(node, node->parent,
+ 					   extra_desc->extra.aml_length,
+ 					   extra_desc->extra.aml_start);
++	if (ACPI_FAILURE(status)) {
++		return_ACPI_STATUS(status);
++	}
++
++	/* Validate the region address/length via the host OS */
++
++	status = acpi_os_validate_address(obj_desc->region.space_id,
++					  obj_desc->region.address,
++					  (acpi_size) obj_desc->region.length,
++					  acpi_ut_get_node_name(node));
++
++	if (ACPI_FAILURE(status)) {
++		/*
++		 * Invalid address/length. We will emit an error message and mark
++		 * the region as invalid, so that it will cause an additional error if
++		 * it is ever used. Then return AE_OK.
++		 */
++		ACPI_EXCEPTION((AE_INFO, status,
++				"During address validation of OpRegion [%4.4s]",
++				node->name.ascii));
++		obj_desc->common.flags |= AOPOBJ_INVALID;
++		status = AE_OK;
++	}
++
+ 	return_ACPI_STATUS(status);
+ }
+diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
+index 3b5c318..e56f3be 100644
+--- a/drivers/acpi/numa.c
++++ b/drivers/acpi/numa.c
+@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
+ static int node_to_pxm_map[MAX_NUMNODES]
+ 			= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ 
++unsigned char acpi_srat_revision __initdata;
++
+ int pxm_to_node(int pxm)
+ {
+ 	if (pxm < 0)
+@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
+ 
+ static int __init acpi_parse_srat(struct acpi_table_header *table)
+ {
++	struct acpi_table_srat *srat;
+ 	if (!table)
+ 		return -EINVAL;
+ 
++	srat = (struct acpi_table_srat *)table;
++	acpi_srat_revision = srat->header.revision;
++
+ 	/* Real work done in acpi_table_parse_srat below. */
+ 
+ 	return 0;
+diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
+index 02d2a4c..0c0669f 100644
+--- a/drivers/acpi/processor_core.c
++++ b/drivers/acpi/processor_core.c
+@@ -172,8 +172,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+ 	apic_id = map_mat_entry(handle, type, acpi_id);
+ 	if (apic_id == -1)
+ 		apic_id = map_madt_entry(type, acpi_id);
+-	if (apic_id == -1)
+-		return apic_id;
++	if (apic_id == -1) {
++		/*
++		 * On UP processor, there is no _MAT or MADT table.
++		 * So above apic_id is always set to -1.
++		 *
++		 * BIOS may define multiple CPU handles even for UP processor.
++		 * For example,
++		 *
++		 * Scope (_PR)
++                 * {
++		 *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
++		 *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
++		 *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
++		 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
++		 * }
++		 *
++		 * Ignores apic_id and always return 0 for CPU0's handle.
++		 * Return -1 for other CPU's handle.
++		 */
++		if (acpi_id == 0)
++			return acpi_id;
++		else
++			return apic_id;
++	}
+ 
+ #ifdef CONFIG_SMP
+ 	for_each_possible_cpu(i) {
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index c2f9b3e..1dab802 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -1716,7 +1716,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ 	case CCISS_BIG_PASSTHRU:
+ 		return cciss_bigpassthru(h, argp);
+ 
+-	/* scsi_cmd_ioctl handles these, below, though some are not */
++	/* scsi_cmd_blk_ioctl handles these, below, though some are not */
+ 	/* very meaningful for cciss.  SG_IO is the main one people want. */
+ 
+ 	case SG_GET_VERSION_NUM:
+@@ -1727,9 +1727,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ 	case SG_EMULATED_HOST:
+ 	case SG_IO:
+ 	case SCSI_IOCTL_SEND_COMMAND:
+-		return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
++		return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+ 
+-	/* scsi_cmd_ioctl would normally handle these, below, but */
++	/* scsi_cmd_blk_ioctl would normally handle these, below, but */
+ 	/* they aren't a good fit for cciss, as CD-ROMs are */
+ 	/* not supported, and we don't have any bus/target/lun */
+ 	/* which we present to the kernel. */
+diff --git a/drivers/block/ub.c b/drivers/block/ub.c
+index 0e376d4..7333b9e 100644
+--- a/drivers/block/ub.c
++++ b/drivers/block/ub.c
+@@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
+ static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
+     unsigned int cmd, unsigned long arg)
+ {
+-	struct gendisk *disk = bdev->bd_disk;
+ 	void __user *usermem = (void __user *) arg;
+ 	int ret;
+ 
+ 	mutex_lock(&ub_mutex);
+-	ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
++	ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
+ 	mutex_unlock(&ub_mutex);
+ 
+ 	return ret;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 079c088..5d7a934 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -236,8 +236,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+ 	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
+ 		return -ENOTTY;
+ 
+-	return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
+-			      (void __user *)data);
++	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
++				  (void __user *)data);
+ }
+ 
+ /* We provide getgeo only to please some old bootloader/partitioning tools */
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 75fb965..b693cbd 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2741,12 +2741,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ {
+ 	void __user *argp = (void __user *)arg;
+ 	int ret;
+-	struct gendisk *disk = bdev->bd_disk;
+ 
+ 	/*
+ 	 * Try the generic SCSI command ioctl's first.
+ 	 */
+-	ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
++	ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+ 	if (ret != -ENOTTY)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index b94d871..7642495 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -2069,6 +2069,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev)
+ void r100_bm_disable(struct radeon_device *rdev)
+ {
+ 	u32 tmp;
++	u16 tmp16;
+ 
+ 	/* disable bus mastering */
+ 	tmp = RREG32(R_000030_BUS_CNTL);
+@@ -2079,8 +2080,8 @@ void r100_bm_disable(struct radeon_device *rdev)
+ 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ 	tmp = RREG32(RADEON_BUS_CNTL);
+ 	mdelay(1);
+-	pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+-	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
++	pci_read_config_word(rdev->pdev, 0x4, &tmp16);
++	pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
+ 	mdelay(1);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index f5ac7e7..c45d921 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
+ 	frame[0xD] = (right_bar >> 8);
+ 
+ 	r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
++	/* Our header values (type, version, length) should be alright, Intel
++	 * is using the same. Checksum function also seems to be OK, it works
++	 * fine for audio infoframe. However calculated value is always lower
++	 * by 2 in comparison to fglrx. It breaks displaying anything in case
++	 * of TVs that strictly check the checksum. Hack it manually here to
++	 * workaround this issue. */
++	frame[0x0] += 2;
+ 
+ 	WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
+ 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 440e6ec..5d0c123 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -223,8 +223,11 @@ int radeon_wb_init(struct radeon_device *rdev)
+ 	if (radeon_no_wb == 1)
+ 		rdev->wb.enabled = false;
+ 	else {
+-		/* often unreliable on AGP */
+ 		if (rdev->flags & RADEON_IS_AGP) {
++			/* often unreliable on AGP */
++			rdev->wb.enabled = false;
++		} else if (rdev->family < CHIP_R300) {
++			/* often unreliable on pre-r300 */
+ 			rdev->wb.enabled = false;
+ 		} else {
+ 			rdev->wb.enabled = true;
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index a37a1ef..21acfb5 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
+ 
+ void rs600_bm_disable(struct radeon_device *rdev)
+ {
+-	u32 tmp;
++	u16 tmp;
+ 
+ 	/* disable bus mastering */
+-	pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
++	pci_read_config_word(rdev->pdev, 0x4, &tmp);
+ 	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ 	mdelay(1);
+ }
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 763797d..2f855b1 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -361,7 +361,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
+ 
+ 	case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
+ 		parser->global.report_size = item_udata(item);
+-		if (parser->global.report_size > 32) {
++		if (parser->global.report_size > 96) {
+ 			dbg_hid("invalid report_size %d\n",
+ 					parser->global.report_size);
+ 			return -1;
+@@ -1382,11 +1382,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 206f750..e0a28ad 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -21,6 +21,7 @@
+ #define USB_VENDOR_ID_3M		0x0596
+ #define USB_DEVICE_ID_3M1968		0x0500
+ #define USB_DEVICE_ID_3M2256		0x0502
++#define USB_DEVICE_ID_3M3266		0x0506
+ 
+ #define USB_VENDOR_ID_A4TECH		0x09da
+ #define USB_DEVICE_ID_A4TECH_WCP32PU	0x0006
+@@ -230,11 +231,14 @@
+ 
+ #define USB_VENDOR_ID_DWAV		0x0eef
+ #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER	0x0001
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH	0x480d
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1	0x720c
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2	0x72a1
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3	0x480e
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4	0x726b
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D	0x480d
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E	0x480e
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C	0x720c
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B	0x726b
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1	0x72a1
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA	0x72fa
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302	0x7302
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001	0xa001
+ 
+ #define USB_VENDOR_ID_ELECOM		0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084	0x0061
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 685d8e4..1308703 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -593,6 +593,9 @@ static const struct hid_device_id mt_devices[] = {
+ 	{ .driver_data = MT_CLS_3M,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_3M,
+ 			USB_DEVICE_ID_3M2256) },
++	{ .driver_data = MT_CLS_3M,
++		HID_USB_DEVICE(USB_VENDOR_ID_3M,
++			USB_DEVICE_ID_3M3266) },
+ 
+ 	/* ActionStar panels */
+ 	{ .driver_data = MT_CLS_DEFAULT,
+@@ -629,23 +632,32 @@ static const struct hid_device_id mt_devices[] = {
+ 			USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+ 
+ 	/* eGalax devices (resistive) */
+-	{  .driver_data = MT_CLS_EGALAX,
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+-	{  .driver_data = MT_CLS_EGALAX,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
+ 
+ 	/* eGalax devices (capacitive) */
+-	{  .driver_data = MT_CLS_EGALAX,
++	{ .driver_data = MT_CLS_EGALAX,
++		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
++	{ .driver_data = MT_CLS_EGALAX,
++		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
++	{ .driver_data = MT_CLS_EGALAX,
++		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
+-	{  .driver_data = MT_CLS_EGALAX,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
+-	{  .driver_data = MT_CLS_EGALAX,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ 
+ 	/* Elo TouchSystems IntelliTouch Plus panel */
+ 	{ .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
+diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
+index dd36417..cd7ac5c 100644
+--- a/drivers/i2c/busses/i2c-ali1535.c
++++ b/drivers/i2c/busses/i2c-ali1535.c
+@@ -140,7 +140,7 @@ static unsigned short ali1535_smba;
+    defined to make the transition easier. */
+ static int __devinit ali1535_setup(struct pci_dev *dev)
+ {
+-	int retval = -ENODEV;
++	int retval;
+ 	unsigned char temp;
+ 
+ 	/* Check the following things:
+@@ -155,6 +155,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 	if (ali1535_smba == 0) {
+ 		dev_warn(&dev->dev,
+ 			"ALI1535_smb region uninitialized - upgrade BIOS?\n");
++		retval = -ENODEV;
+ 		goto exit;
+ 	}
+ 
+@@ -167,6 +168,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 			    ali1535_driver.name)) {
+ 		dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
+ 			ali1535_smba);
++		retval = -EBUSY;
+ 		goto exit;
+ 	}
+ 
+@@ -174,6 +176,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 	pci_read_config_byte(dev, SMBCFG, &temp);
+ 	if ((temp & ALI1535_SMBIO_EN) == 0) {
+ 		dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
++		retval = -ENODEV;
+ 		goto exit_free;
+ 	}
+ 
+@@ -181,6 +184,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 	pci_read_config_byte(dev, SMBHSTCFG, &temp);
+ 	if ((temp & 1) == 0) {
+ 		dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
++		retval = -ENODEV;
+ 		goto exit_free;
+ 	}
+ 
+@@ -198,12 +202,11 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 	dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
+ 	dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
+ 
+-	retval = 0;
+-exit:
+-	return retval;
++	return 0;
+ 
+ exit_free:
+ 	release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
++exit:
+ 	return retval;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
+index 8abfa4a..656b028 100644
+--- a/drivers/i2c/busses/i2c-eg20t.c
++++ b/drivers/i2c/busses/i2c-eg20t.c
+@@ -242,7 +242,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
+ 	if (pch_clk > PCH_MAX_CLK)
+ 		pch_clk = 62500;
+ 
+-	pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
++	pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
+ 	/* Set transfer speed in I2CBC */
+ 	iowrite32(pch_i2cbc, p + PCH_I2CBC);
+ 
+diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
+index ff1e127..4853b52 100644
+--- a/drivers/i2c/busses/i2c-nforce2.c
++++ b/drivers/i2c/busses/i2c-nforce2.c
+@@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
+ 	error = acpi_check_region(smbus->base, smbus->size,
+ 				  nforce2_driver.name);
+ 	if (error)
+-		return -1;
++		return error;
+ 
+ 	if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
+ 		dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 58a58c7..137e1a3 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -235,7 +235,7 @@ const static u8 omap4_reg_map[] = {
+ 	[OMAP_I2C_BUF_REG] = 0x94,
+ 	[OMAP_I2C_CNT_REG] = 0x98,
+ 	[OMAP_I2C_DATA_REG] = 0x9c,
+-	[OMAP_I2C_SYSC_REG] = 0x20,
++	[OMAP_I2C_SYSC_REG] = 0x10,
+ 	[OMAP_I2C_CON_REG] = 0xa4,
+ 	[OMAP_I2C_OA_REG] = 0xa8,
+ 	[OMAP_I2C_SA_REG] = 0xac,
+diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
+index 4375866..6d60284 100644
+--- a/drivers/i2c/busses/i2c-sis5595.c
++++ b/drivers/i2c/busses/i2c-sis5595.c
+@@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
+ 	u16 a;
+ 	u8 val;
+ 	int *i;
+-	int retval = -ENODEV;
++	int retval;
+ 
+ 	/* Look for imposters */
+ 	for (i = blacklist; *i != 0; i++) {
+@@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
+ 
+ error:
+ 	release_region(sis5595_base + SMB_INDEX, 2);
+-	return retval;
++	return -ENODEV;
+ }
+ 
+ static int sis5595_transaction(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
+index e6f539e..b617fd0 100644
+--- a/drivers/i2c/busses/i2c-sis630.c
++++ b/drivers/i2c/busses/i2c-sis630.c
+@@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+ {
+ 	unsigned char b;
+ 	struct pci_dev *dummy = NULL;
+-	int retval = -ENODEV, i;
++	int retval, i;
+ 
+ 	/* check for supported SiS devices */
+ 	for (i=0; supported[i] > 0 ; i++) {
+@@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+ 	*/
+ 	if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
+ 		dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
++		retval = -ENODEV;
+ 		goto exit;
+ 	}
+ 	/* if ACPI already enabled , do nothing */
+ 	if (!(b & 0x80) &&
+ 	    pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
+ 		dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
++		retval = -ENODEV;
+ 		goto exit;
+ 	}
+ 
+ 	/* Determine the ACPI base address */
+ 	if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
+ 		dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
++		retval = -ENODEV;
+ 		goto exit;
+ 	}
+ 
+@@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+ 			    sis630_driver.name)) {
+ 		dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
+ 			"in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
++		retval = -EBUSY;
+ 		goto exit;
+ 	}
+ 
+diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
+index 0b012f1..58261d4 100644
+--- a/drivers/i2c/busses/i2c-viapro.c
++++ b/drivers/i2c/busses/i2c-viapro.c
+@@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
+ 				 const struct pci_device_id *id)
+ {
+ 	unsigned char temp;
+-	int error = -ENODEV;
++	int error;
+ 
+ 	/* Determine the address of the SMBus areas */
+ 	if (force_addr) {
+@@ -390,6 +390,7 @@ found:
+ 			dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
+ 				"controller not enabled! - upgrade BIOS or "
+ 				"use force=1\n");
++			error = -ENODEV;
+ 			goto release_region;
+ 		}
+ 	}
+@@ -422,9 +423,11 @@ found:
+ 		 "SMBus Via Pro adapter at %04x", vt596_smba);
+ 
+ 	vt596_pdev = pci_dev_get(pdev);
+-	if (i2c_add_adapter(&vt596_adapter)) {
++	error = i2c_add_adapter(&vt596_adapter);
++	if (error) {
+ 		pci_dev_put(vt596_pdev);
+ 		vt596_pdev = NULL;
++		goto release_region;
+ 	}
+ 
+ 	/* Always return failure here.  This is to allow other drivers to bind
+diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
+index d267b7a..a22ca84 100644
+--- a/drivers/ide/ide-floppy_ioctl.c
++++ b/drivers/ide/ide-floppy_ioctl.c
+@@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
+ 	 * and CDROM_SEND_PACKET (legacy) ioctls
+ 	 */
+ 	if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
+-		err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
+-				mode, cmd, argp);
++		err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+ 
+ 	if (err == -ENOTTY)
+ 		err = generic_ide_ioctl(drive, bdev, cmd, arg);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index a46dddf..026f9aa 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -321,7 +321,8 @@ static int intel_idle_probe(void)
+ 	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+ 
+ 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+-		!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
++	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
++	    !mwait_substates)
+ 			return -ENODEV;
+ 
+ 	pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
+@@ -367,7 +368,7 @@ static int intel_idle_probe(void)
+ 	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
+ 		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ 	else {
+-		smp_call_function(__setup_broadcast_timer, (void *)true, 1);
++		on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+ 		register_cpu_notifier(&setup_broadcast_notifier);
+ 	}
+ 
+@@ -459,7 +460,7 @@ static int intel_idle_cpuidle_devices_init(void)
+ 		}
+ 	}
+ 	if (auto_demotion_disable_flags)
+-		smp_call_function(auto_demotion_disable, NULL, 1);
++		on_each_cpu(auto_demotion_disable, NULL, 1);
+ 
+ 	return 0;
+ }
+@@ -499,7 +500,7 @@ static void __exit intel_idle_exit(void)
+ 	cpuidle_unregister_driver(&intel_idle_driver);
+ 
+ 	if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
+-		smp_call_function(__setup_broadcast_timer, (void *)false, 1);
++		on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
+ 		unregister_cpu_notifier(&setup_broadcast_notifier);
+ 	}
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index ea79062..3e90b80 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -149,8 +149,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
+ static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+ {
+ 	struct flakey_c *fc = ti->private;
++	struct dm_dev *dev = fc->dev;
++	int r = 0;
+ 
+-	return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
++	/*
++	 * Only pass ioctls through if the device sizes match exactly.
++	 */
++	if (fc->start ||
++	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
++		r = scsi_verify_blk_ioctl(NULL, cmd);
++
++	return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+ }
+ 
+ static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
+index 3921e3b..9728839 100644
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
+ 			unsigned long arg)
+ {
+ 	struct linear_c *lc = (struct linear_c *) ti->private;
+-	return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
++	struct dm_dev *dev = lc->dev;
++	int r = 0;
++
++	/*
++	 * Only pass ioctls through if the device sizes match exactly.
++	 */
++	if (lc->start ||
++	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
++		r = scsi_verify_blk_ioctl(NULL, cmd);
++
++	return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+ }
+ 
+ static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 209991b..70373bf 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1584,6 +1584,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
+ 
+ 	spin_unlock_irqrestore(&m->lock, flags);
+ 
++	/*
++	 * Only pass ioctls through if the device sizes match exactly.
++	 */
++	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
++		r = scsi_verify_blk_ioctl(NULL, cmd);
++
+ 	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ }
+ 
+diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
+index 543a803..dbefdb0 100644
+--- a/drivers/media/video/uvc/uvc_v4l2.c
++++ b/drivers/media/video/uvc/uvc_v4l2.c
+@@ -65,6 +65,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
+ 			goto done;
+ 		}
+ 
++		/* Prevent excessive memory consumption, as well as integer
++		 * overflows.
++		 */
++		if (xmap->menu_count == 0 ||
++		    xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
++			ret = -EINVAL;
++			goto done;
++		}
++
+ 		size = xmap->menu_count * sizeof(*map->menu_info);
+ 		map->menu_info = kmalloc(size, GFP_KERNEL);
+ 		if (map->menu_info == NULL) {
+diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
+index 2a38d5e..cf2401a 100644
+--- a/drivers/media/video/uvc/uvcvideo.h
++++ b/drivers/media/video/uvc/uvcvideo.h
+@@ -200,6 +200,7 @@ struct uvc_xu_control {
+ 
+ /* Maximum allowed number of control mappings per device */
+ #define UVC_MAX_CONTROL_MAPPINGS	1024
++#define UVC_MAX_CONTROL_MENU_ENTRIES	32
+ 
+ /* Devices quirks */
+ #define UVC_QUIRK_STATUS_INTERVAL	0x00000001
+diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
+index 69e8c6f..bda252f 100644
+--- a/drivers/media/video/v4l2-ioctl.c
++++ b/drivers/media/video/v4l2-ioctl.c
+@@ -2289,6 +2289,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+ 		struct v4l2_ext_controls *ctrls = parg;
+ 
+ 		if (ctrls->count != 0) {
++			if (ctrls->count > V4L2_CID_MAX_CTRLS) {
++				ret = -EINVAL;
++				break;
++			}
+ 			*user_ptr = (void __user *)ctrls->controls;
+ 			*kernel_ptr = (void **)&ctrls->controls;
+ 			*array_size = sizeof(struct v4l2_ext_control)
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 20b42c8..f601180 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -830,7 +830,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ 			 *
+ 			 * WARNING: eMMC rules are NOT the same as SD DDR
+ 			 */
+-			if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
++			if (ddr == MMC_1_2V_DDR_MODE) {
+ 				err = mmc_set_signal_voltage(host,
+ 					MMC_SIGNAL_VOLTAGE_120, 0);
+ 				if (err)
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 6d3de08..153008f 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1340,8 +1340,7 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 		if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
+ 		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
+ 		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
+-		    (ios->timing == MMC_TIMING_UHS_SDR25) ||
+-		    (ios->timing == MMC_TIMING_UHS_SDR12))
++		    (ios->timing == MMC_TIMING_UHS_SDR25))
+ 			ctrl |= SDHCI_CTRL_HISPD;
+ 
+ 		ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+@@ -2227,9 +2226,8 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+ 	/* Disable tuning since we are suspending */
+ 	if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
+ 	    host->tuning_mode == SDHCI_TUNING_MODE_1) {
++		del_timer_sync(&host->tuning_timer);
+ 		host->flags &= ~SDHCI_NEEDS_RETUNING;
+-		mod_timer(&host->tuning_timer, jiffies +
+-			host->tuning_count * HZ);
+ 	}
+ 
+ 	ret = mmc_suspend_host(host->mmc);
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index ca38569..bff8d46 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 
+ 	mutex_lock(&dev->lock);
+ 
+-	if (dev->open++)
++	if (dev->open)
+ 		goto unlock;
+ 
+ 	kref_get(&dev->ref);
+@@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 		goto error_release;
+ 
+ unlock:
++	dev->open++;
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
+index e3e40f4..43130e8 100644
+--- a/drivers/mtd/mtdoops.c
++++ b/drivers/mtd/mtdoops.c
+@@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt)
+ 	size_t retlen;
+ 
+ 	for (page = 0; page < cxt->oops_pages; page++) {
++		if (mtd->block_isbad &&
++		    mtd->block_isbad(mtd, page * record_size))
++			continue;
+ 		/* Assume the page is used */
+ 		mark_page_used(cxt, page);
+ 		ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+@@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
+ 
+ 	/* oops_page_used is a bit field */
+ 	cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
+-			BITS_PER_LONG));
++			BITS_PER_LONG) * sizeof(unsigned long));
+ 	if (!cxt->oops_page_used) {
+ 		printk(KERN_ERR "mtdoops: could not allocate page array\n");
+ 		return;
+diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
+index 531625f..129bad2 100644
+--- a/drivers/mtd/tests/mtd_stresstest.c
++++ b/drivers/mtd/tests/mtd_stresstest.c
+@@ -277,6 +277,12 @@ static int __init mtd_stresstest_init(void)
+ 	       (unsigned long long)mtd->size, mtd->erasesize,
+ 	       pgsize, ebcnt, pgcnt, mtd->oobsize);
+ 
++	if (ebcnt < 2) {
++		printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
++		err = -ENOSPC;
++		goto out_put_mtd;
++	}
++
+ 	/* Read or write up 2 eraseblocks at a time */
+ 	bufsize = mtd->erasesize * 2;
+ 
+@@ -315,6 +321,7 @@ out:
+ 	kfree(bbt);
+ 	vfree(writebuf);
+ 	vfree(readbuf);
++out_put_mtd:
+ 	put_mtd_device(mtd);
+ 	if (err)
+ 		printk(PRINT_PREF "error %d occurred\n", err);
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index 191f3bb..cdea669 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -628,6 +628,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
+ 	if (req->alignment != 1 && n)
+ 		goto bad;
+ 
++	if (!req->name[0] || !req->name_len)
++		goto bad;
++
+ 	if (req->name_len > UBI_VOL_NAME_MAX) {
+ 		err = -ENAMETOOLONG;
+ 		goto bad;
+diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
+index 3f1a09c..5f0e4c2 100644
+--- a/drivers/mtd/ubi/debug.h
++++ b/drivers/mtd/ubi/debug.h
+@@ -51,7 +51,10 @@ struct ubi_mkvol_req;
+ 	pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
+ 
+ /* Just a debugging messages not related to any specific UBI subsystem */
+-#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
++#define dbg_msg(fmt, ...)                                    \
++	printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
++	       current->pid, __func__, ##__VA_ARGS__)
++
+ /* General debugging messages */
+ #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
+ /* Messages from the eraseblock association sub-system */
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 4be6718..c696c94 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ 	 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+ 	 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+ 	 * LEB is already locked, we just do not move it and return
+-	 * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
++	 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
++	 * we do not know the reasons of the contention - it may be just a
++	 * normal I/O on this LEB, so we want to re-try.
+ 	 */
+ 	err = leb_write_trylock(ubi, vol_id, lnum);
+ 	if (err) {
+ 		dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+-		return MOVE_CANCEL_RACE;
++		return MOVE_RETRY;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index c6c2229..bbfa88d 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -121,6 +121,7 @@ enum {
+  *                     PEB
+  * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
+  *                       target PEB
++ * MOVE_RETRY: retry scrubbing the PEB
+  */
+ enum {
+ 	MOVE_CANCEL_RACE = 1,
+@@ -128,6 +129,7 @@ enum {
+ 	MOVE_TARGET_RD_ERR,
+ 	MOVE_TARGET_WR_ERR,
+ 	MOVE_CANCEL_BITFLIPS,
++	MOVE_RETRY,
+ };
+ 
+ /**
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index ff2c495..12e44c9 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -792,7 +792,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 			protect = 1;
+ 			goto out_not_moved;
+ 		}
+-
++		if (err == MOVE_RETRY) {
++			scrubbing = 1;
++			goto out_not_moved;
++		}
+ 		if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ 		    err == MOVE_TARGET_RD_ERR) {
+ 			/*
+@@ -1046,7 +1049,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ 
+ 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
+ 	kfree(wl_wrk);
+-	kmem_cache_free(ubi_wl_entry_slab, e);
+ 
+ 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+ 	    err == -EBUSY) {
+@@ -1059,14 +1061,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ 			goto out_ro;
+ 		}
+ 		return err;
+-	} else if (err != -EIO) {
++	}
++
++	kmem_cache_free(ubi_wl_entry_slab, e);
++	if (err != -EIO)
+ 		/*
+ 		 * If this is not %-EIO, we have no idea what to do. Scheduling
+ 		 * this physical eraseblock for erasure again would cause
+ 		 * errors again and again. Well, lets switch to R/O mode.
+ 		 */
+ 		goto out_ro;
+-	}
+ 
+ 	/* It is %-EIO, the PEB went bad */
+ 
+diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
+index 47c8339a..2843c90 100644
+--- a/drivers/net/phy/mdio-gpio.c
++++ b/drivers/net/phy/mdio-gpio.c
+@@ -241,7 +241,7 @@ MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
+ 
+ static struct platform_driver mdio_ofgpio_driver = {
+ 	.driver = {
+-		.name = "mdio-gpio",
++		.name = "mdio-ofgpio",
+ 		.owner = THIS_MODULE,
+ 		.of_match_table = mdio_ofgpio_match,
+ 	},
+diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+index 421d5c8..a935585 100644
+--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
++++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+@@ -2910,14 +2910,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+ 		IWL_WARN(priv, "Invalid scan band\n");
+ 		return -EIO;
+ 	}
+-
+ 	/*
+-	 * If active scaning is requested but a certain channel
+-	 * is marked passive, we can do active scanning if we
+-	 * detect transmissions.
++	 * If active scaning is requested but a certain channel is marked
++	 * passive, we can do active scanning if we detect transmissions. For
++	 * passive only scanning disable switching to active on any channel.
+ 	 */
+ 	scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+-					IWL_GOOD_CRC_TH_DISABLED;
++					IWL_GOOD_CRC_TH_NEVER;
+ 
+ 	if (!priv->is_internal_short_scan) {
+ 		scan->tx_cmd.len = cpu_to_le16(
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+index f803fb6..857cf61 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+@@ -2023,6 +2023,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
+ 	case IEEE80211_SMPS_STATIC:
+ 	case IEEE80211_SMPS_DYNAMIC:
+ 		return IWL_NUM_IDLE_CHAINS_SINGLE;
++	case IEEE80211_SMPS_AUTOMATIC:
+ 	case IEEE80211_SMPS_OFF:
+ 		return active_cnt;
+ 	default:
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+index 39a3c9c..272bcdf 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+@@ -442,6 +442,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+ 
+ 	mutex_lock(&priv->mutex);
+ 
++	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
++		goto out;
++
+ 	if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
+ 		IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+ 		goto out;
+diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
+index 55cd3e1..dab7dc1 100644
+--- a/drivers/net/wireless/rt2x00/rt2800pci.c
++++ b/drivers/net/wireless/rt2x00/rt2800pci.c
+@@ -426,7 +426,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
+ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ 				 enum dev_state state)
+ {
+-	int mask = (state == STATE_RADIO_IRQ_ON);
+ 	u32 reg;
+ 	unsigned long flags;
+ 
+@@ -448,25 +447,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ 	}
+ 
+ 	spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+-	rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
++	reg = 0;
++	if (state == STATE_RADIO_IRQ_ON) {
++		rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
++		rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
++		rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
++		rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
++		rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
++	}
+ 	rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ 	spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+ 
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+index 3b5af01..0c77a14 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+@@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
+ 		/* Allocate skb buffer to contain firmware */
+ 		/* info and tx descriptor info. */
+ 		skb = dev_alloc_skb(frag_length);
++		if (!skb)
++			return false;
+ 		skb_reserve(skb, extra_descoffset);
+ 		seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
+ 					extra_descoffset));
+@@ -575,6 +577,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
+ 
+ 	len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
+ 	skb = dev_alloc_skb(len);
++	if (!skb)
++		return false;
+ 	cb_desc = (struct rtl_tcb_desc *)(skb->cb);
+ 	cb_desc->queue_index = TXCMD_QUEUE;
+ 	cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
+diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
+index 2f10328..e174982 100644
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -869,5 +869,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
+ 
+ void pci_msi_init_pci_dev(struct pci_dev *dev)
+ {
++	int pos;
+ 	INIT_LIST_HEAD(&dev->msi_list);
++
++	/* Disable the msi hardware to avoid screaming interrupts
++	 * during boot.  This is the power on reset default so
++	 * usually this should be a noop.
++	 */
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++	if (pos)
++		msi_set_enable(dev, pos, 0);
++	msix_set_enable(dev, 0);
+ }
+diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
+index dfbd5a6..258fef2 100644
+--- a/drivers/pnp/quirks.c
++++ b/drivers/pnp/quirks.c
+@@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
+ 	}
+ }
+ 
++#ifdef CONFIG_AMD_NB
++
++#include <asm/amd_nb.h>
++
++static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
++{
++	resource_size_t start, end;
++	struct pnp_resource *pnp_res;
++	struct resource *res;
++	struct resource mmconfig_res, *mmconfig;
++
++	mmconfig = amd_get_mmconfig_range(&mmconfig_res);
++	if (!mmconfig)
++		return;
++
++	list_for_each_entry(pnp_res, &dev->resources, list) {
++		res = &pnp_res->res;
++		if (res->end < mmconfig->start || res->start > mmconfig->end ||
++		    (res->start == mmconfig->start && res->end == mmconfig->end))
++			continue;
++
++		dev_info(&dev->dev, FW_BUG
++			 "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
++			 res, mmconfig);
++		if (mmconfig->start < res->start) {
++			start = mmconfig->start;
++			end = res->start - 1;
++			pnp_add_mem_resource(dev, start, end, 0);
++		}
++		if (mmconfig->end > res->end) {
++			start = res->end + 1;
++			end = mmconfig->end;
++			pnp_add_mem_resource(dev, start, end, 0);
++		}
++		break;
++	}
++}
++#endif
++
+ /*
+  *  PnP Quirks
+  *  Cards or devices that need some tweaking due to incomplete resource info
+@@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
+ 	/* PnP resources that might overlap PCI BARs */
+ 	{"PNP0c01", quirk_system_pci_resources},
+ 	{"PNP0c02", quirk_system_pci_resources},
++#ifdef CONFIG_AMD_NB
++	{"PNP0c01", quirk_amd_mmconfig_area},
++#endif
+ 	{""}
+ };
+ 
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index eb4c883..38d1dc7 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -227,11 +227,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 		alarm->time.tm_hour = now.tm_hour;
+ 
+ 	/* For simplicity, only support date rollover for now */
+-	if (alarm->time.tm_mday == -1) {
++	if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
+ 		alarm->time.tm_mday = now.tm_mday;
+ 		missing = day;
+ 	}
+-	if (alarm->time.tm_mon == -1) {
++	if ((unsigned)alarm->time.tm_mon >= 12) {
+ 		alarm->time.tm_mon = now.tm_mon;
+ 		if (missing == none)
+ 			missing = month;
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index 39e81cd..10f16a3 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -66,6 +66,8 @@ static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
+ 
+ #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+ 
++#define MAX_HBA_QUEUE_DEPTH	30000
++#define MAX_CHAIN_DEPTH		100000
+ static int max_queue_depth = -1;
+ module_param(max_queue_depth, int, 0);
+ MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
+@@ -2098,8 +2100,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
+ 		}
+ 		if (ioc->chain_dma_pool)
+ 			pci_pool_destroy(ioc->chain_dma_pool);
+-	}
+-	if (ioc->chain_lookup) {
+ 		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+ 		ioc->chain_lookup = NULL;
+ 	}
+@@ -2117,9 +2117,7 @@ static int
+ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
+ {
+ 	struct mpt2sas_facts *facts;
+-	u32 queue_size, queue_diff;
+ 	u16 max_sge_elements;
+-	u16 num_of_reply_frames;
+ 	u16 chains_needed_per_io;
+ 	u32 sz, total_sz;
+ 	u32 retry_sz;
+@@ -2146,7 +2144,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
+ 		max_request_credit = (max_queue_depth < facts->RequestCredit)
+ 		    ? max_queue_depth : facts->RequestCredit;
+ 	else
+-		max_request_credit = facts->RequestCredit;
++		max_request_credit = min_t(u16, facts->RequestCredit,
++		    MAX_HBA_QUEUE_DEPTH);
+ 
+ 	ioc->hba_queue_depth = max_request_credit;
+ 	ioc->hi_priority_depth = facts->HighPriorityCredit;
+@@ -2187,50 +2186,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
+ 	}
+ 	ioc->chains_needed_per_io = chains_needed_per_io;
+ 
+-	/* reply free queue sizing - taking into account for events */
+-	num_of_reply_frames = ioc->hba_queue_depth + 32;
+-
+-	/* number of replies frames can't be a multiple of 16 */
+-	/* decrease number of reply frames by 1 */
+-	if (!(num_of_reply_frames % 16))
+-		num_of_reply_frames--;
+-
+-	/* calculate number of reply free queue entries
+-	 *  (must be multiple of 16)
+-	 */
+-
+-	/* (we know reply_free_queue_depth is not a multiple of 16) */
+-	queue_size = num_of_reply_frames;
+-	queue_size += 16 - (queue_size % 16);
+-	ioc->reply_free_queue_depth = queue_size;
+-
+-	/* reply descriptor post queue sizing */
+-	/* this size should be the number of request frames + number of reply
+-	 * frames
+-	 */
+-
+-	queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
+-	/* round up to 16 byte boundary */
+-	if (queue_size % 16)
+-		queue_size += 16 - (queue_size % 16);
+-
+-	/* check against IOC maximum reply post queue depth */
+-	if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
+-		queue_diff = queue_size -
+-		    facts->MaxReplyDescriptorPostQueueDepth;
++	/* reply free queue sizing - taking into account for 64 FW events */
++	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+ 
+-		/* round queue_diff up to multiple of 16 */
+-		if (queue_diff % 16)
+-			queue_diff += 16 - (queue_diff % 16);
+-
+-		/* adjust hba_queue_depth, reply_free_queue_depth,
+-		 * and queue_size
+-		 */
+-		ioc->hba_queue_depth -= (queue_diff / 2);
+-		ioc->reply_free_queue_depth -= (queue_diff / 2);
+-		queue_size = facts->MaxReplyDescriptorPostQueueDepth;
++	/* align the reply post queue on the next 16 count boundary */
++	if (!ioc->reply_free_queue_depth % 16)
++		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
++	else
++		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
++				32 - (ioc->reply_free_queue_depth % 16);
++	if (ioc->reply_post_queue_depth >
++	    facts->MaxReplyDescriptorPostQueueDepth) {
++		ioc->reply_post_queue_depth = min_t(u16,
++		    (facts->MaxReplyDescriptorPostQueueDepth -
++		    (facts->MaxReplyDescriptorPostQueueDepth % 16)),
++		    (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
++		ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
++		ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
+ 	}
+-	ioc->reply_post_queue_depth = queue_size;
++
+ 
+ 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
+ 	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
+@@ -2316,15 +2290,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
+ 	    "depth(%d)\n", ioc->name, ioc->request,
+ 	    ioc->scsiio_depth));
+ 
+-	/* loop till the allocation succeeds */
+-	do {
+-		sz = ioc->chain_depth * sizeof(struct chain_tracker);
+-		ioc->chain_pages = get_order(sz);
+-		ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+-		    GFP_KERNEL, ioc->chain_pages);
+-		if (ioc->chain_lookup == NULL)
+-			ioc->chain_depth -= 100;
+-	} while (ioc->chain_lookup == NULL);
++	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
++	sz = ioc->chain_depth * sizeof(struct chain_tracker);
++	ioc->chain_pages = get_order(sz);
++
++	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
++	    GFP_KERNEL, ioc->chain_pages);
+ 	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+ 	    ioc->request_sz, 16, 0);
+ 	if (!ioc->chain_dma_pool) {
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index c79857e..aa51195 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -974,8 +974,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+ 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ 	if (list_empty(&ioc->free_chain_list)) {
+ 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+-		printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
+-		    ioc->name);
++		dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
++			"available\n", ioc->name));
+ 		return NULL;
+ 	}
+ 	chain_req = list_entry(ioc->free_chain_list.next,
+@@ -6425,6 +6425,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
+ 			} else
+ 				sas_target_priv_data = NULL;
+ 			raid_device->responding = 1;
++			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ 			starget_printk(KERN_INFO, raid_device->starget,
+ 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
+ 			    (unsigned long long)raid_device->wwid);
+@@ -6435,16 +6436,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
+ 			 */
+ 			_scsih_init_warpdrive_properties(ioc, raid_device);
+ 			if (raid_device->handle == handle)
+-				goto out;
++				return;
+ 			printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
+ 			    raid_device->handle);
+ 			raid_device->handle = handle;
+ 			if (sas_target_priv_data)
+ 				sas_target_priv_data->handle = handle;
+-			goto out;
++			return;
+ 		}
+ 	}
+- out:
++
+ 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 953773c..7d8b5d8 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1073,6 +1073,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ 	SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
+ 						disk->disk_name, cmd));
+ 
++	error = scsi_verify_blk_ioctl(bdev, cmd);
++	if (error < 0)
++		return error;
++
+ 	/*
+ 	 * If we are in the middle of error recovery, don't let anyone
+ 	 * else try and use this device.  Also, if error recovery fails, it
+@@ -1095,7 +1099,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ 			error = scsi_ioctl(sdp, cmd, p);
+ 			break;
+ 		default:
+-			error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
++			error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
+ 			if (error != -ENOTTY)
+ 				break;
+ 			error = scsi_ioctl(sdp, cmd, p);
+@@ -1265,6 +1269,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ 			   unsigned int cmd, unsigned long arg)
+ {
+ 	struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
++	int ret;
++
++	ret = scsi_verify_blk_ioctl(bdev, cmd);
++	if (ret < 0)
++		return -ENOIOCTLCMD;
+ 
+ 	/*
+ 	 * If we are in the middle of error recovery, don't let anyone
+@@ -1276,8 +1285,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ 		return -ENODEV;
+ 	       
+ 	if (sdev->host->hostt->compat_ioctl) {
+-		int ret;
+-
+ 		ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+ 
+ 		return ret;
+diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
+index b4543f5..36d1ed7 100644
+--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
++++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
+@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
+ 	struct sym_lcb *lp = sym_lp(tp, sdev->lun);
+ 	unsigned long flags;
+ 
++	/* if slave_alloc returned before allocating a sym_lcb, return */
++	if (!lp)
++		return;
++
+ 	spin_lock_irqsave(np->s.host->host_lock, flags);
+ 
+ 	if (lp->busy_itlq || lp->busy_itl) {
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 7f19c8b..f044d45 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -84,6 +84,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
+ 	buf[2] = dev->transport->get_device_rev(dev);
+ 
+ 	/*
++	 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
++	 *
++	 * SPC4 says:
++	 *   A RESPONSE DATA FORMAT field set to 2h indicates that the
++	 *   standard INQUIRY data is in the format defined in this
++	 *   standard. Response data format values less than 2h are
++	 *   obsolete. Response data format values greater than 2h are
++	 *   reserved.
++	 */
++	buf[3] = 2;
++
++	/*
+ 	 * Enable SCCS and TPGS fields for Emulated ALUA
+ 	 */
+ 	if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 1340ffd..bb86655 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -5668,6 +5668,8 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_SECTOR_COUNT_TOO_MANY:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ILLEGAL REQUEST */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ 		/* INVALID COMMAND OPERATION CODE */
+@@ -5676,6 +5678,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_UNKNOWN_MODE_PAGE:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ILLEGAL REQUEST */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ 		/* INVALID FIELD IN CDB */
+@@ -5684,6 +5687,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_CHECK_CONDITION_ABORT_CMD:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* BUS DEVICE RESET FUNCTION OCCURRED */
+@@ -5693,6 +5697,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_INCORRECT_AMOUNT_OF_DATA:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* WRITE ERROR */
+@@ -5703,6 +5708,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_INVALID_CDB_FIELD:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* INVALID FIELD IN CDB */
+@@ -5711,6 +5717,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_INVALID_PARAMETER_LIST:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* INVALID FIELD IN PARAMETER LIST */
+@@ -5719,6 +5726,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_UNEXPECTED_UNSOLICITED_DATA:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* WRITE ERROR */
+@@ -5729,6 +5737,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_SERVICE_CRC_ERROR:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* PROTOCOL SERVICE CRC ERROR */
+@@ -5739,6 +5748,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_SNACK_REJECTED:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* READ ERROR */
+@@ -5749,6 +5759,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_WRITE_PROTECTED:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* DATA PROTECT */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+ 		/* WRITE PROTECTED */
+@@ -5757,6 +5768,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* UNIT ATTENTION */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+ 		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+@@ -5766,6 +5778,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_CHECK_CONDITION_NOT_READY:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* Not Ready */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+ 		transport_get_sense_codes(cmd, &asc, &ascq);
+@@ -5776,6 +5789,7 @@ int transport_send_check_condition_and_sense(
+ 	default:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ILLEGAL REQUEST */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ 		/* LOGICAL UNIT COMMUNICATION FAILURE */
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index 5534690..daee5db 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -801,6 +801,12 @@ static int process_msg(void)
+ 		goto out;
+ 	}
+ 
++	if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
++		kfree(msg);
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
+ 	if (body == NULL) {
+ 		kfree(msg);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index d2f8feb..f598b98 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -241,6 +241,7 @@ static void dentry_lru_add(struct dentry *dentry)
+ static void __dentry_lru_del(struct dentry *dentry)
+ {
+ 	list_del_init(&dentry->d_lru);
++	dentry->d_flags &= ~DCACHE_SHRINK_LIST;
+ 	dentry->d_sb->s_nr_dentry_unused--;
+ 	dentry_stat.nr_unused--;
+ }
+@@ -753,6 +754,7 @@ relock:
+ 			spin_unlock(&dentry->d_lock);
+ 		} else {
+ 			list_move_tail(&dentry->d_lru, &tmp);
++			dentry->d_flags |= DCACHE_SHRINK_LIST;
+ 			spin_unlock(&dentry->d_lock);
+ 			if (!--cnt)
+ 				break;
+@@ -1144,14 +1146,18 @@ resume:
+ 		/* 
+ 		 * move only zero ref count dentries to the end 
+ 		 * of the unused list for prune_dcache
++		 *
++		 * Those which are presently on the shrink list, being processed
++		 * by shrink_dentry_list(), shouldn't be moved.  Otherwise the
++		 * loop in shrink_dcache_parent() might not make any progress
++		 * and loop forever.
+ 		 */
+-		if (!dentry->d_count) {
++		if (dentry->d_count) {
++			dentry_lru_del(dentry);
++		} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
+ 			dentry_lru_move_tail(dentry);
+ 			found++;
+-		} else {
+-			dentry_lru_del(dentry);
+ 		}
+-
+ 		/*
+ 		 * We can return to the caller if we have found some (this
+ 		 * ensures forward progress). We'll be coming back to find
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 7aa77f0..df121b2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1957,17 +1957,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ 	struct ext4_group_desc *gdp = NULL;
+ 	ext4_group_t flex_group_count;
+ 	ext4_group_t flex_group;
+-	int groups_per_flex = 0;
++	unsigned int groups_per_flex = 0;
+ 	size_t size;
+ 	int i;
+ 
+ 	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
+-	groups_per_flex = 1 << sbi->s_log_groups_per_flex;
+-
+-	if (groups_per_flex < 2) {
++	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
+ 		sbi->s_log_groups_per_flex = 0;
+ 		return 1;
+ 	}
++	groups_per_flex = 1 << sbi->s_log_groups_per_flex;
+ 
+ 	/* We allocate both existing and potentially added groups */
+ 	flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
+index aaa09e9..b5c826e 100644
+--- a/fs/nfs/callback_proc.c
++++ b/fs/nfs/callback_proc.c
+@@ -324,7 +324,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
+ 	dprintk("%s enter. slotid %d seqid %d\n",
+ 		__func__, args->csa_slotid, args->csa_sequenceid);
+ 
+-	if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
++	if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
+ 		return htonl(NFS4ERR_BADSLOT);
+ 
+ 	slot = tbl->slots + args->csa_slotid;
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index 1d1dc1ee..75fe694 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -1006,7 +1006,8 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
+ static struct pnfs_layoutdriver_type objlayout_type = {
+ 	.id = LAYOUT_OSD2_OBJECTS,
+ 	.name = "LAYOUT_OSD2_OBJECTS",
+-	.flags                   = PNFS_LAYOUTRET_ON_SETATTR,
++	.flags                   = PNFS_LAYOUTRET_ON_SETATTR |
++				   PNFS_LAYOUTRET_ON_ERROR,
+ 
+ 	.alloc_layout_hdr        = objlayout_alloc_layout_hdr,
+ 	.free_layout_hdr         = objlayout_free_layout_hdr,
+diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
+index 1d06f8e..fefa122 100644
+--- a/fs/nfs/objlayout/objlayout.c
++++ b/fs/nfs/objlayout/objlayout.c
+@@ -294,9 +294,11 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
+ 	dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof);
+ 	rdata = state->rpcdata;
+ 	rdata->task.tk_status = status;
+-	if (status >= 0) {
++	if (likely(status >= 0)) {
+ 		rdata->res.count = status;
+ 		rdata->res.eof = eof;
++	} else {
++		rdata->pnfs_error = status;
+ 	}
+ 	objlayout_iodone(state);
+ 	/* must not use state after this point */
+@@ -380,15 +382,17 @@ objlayout_write_done(struct objlayout_io_state *state, ssize_t status,
+ 	wdata = state->rpcdata;
+ 	state->status = status;
+ 	wdata->task.tk_status = status;
+-	if (status >= 0) {
++	if (likely(status >= 0)) {
+ 		wdata->res.count = status;
+ 		wdata->verf.committed = state->committed;
+ 		dprintk("%s: Return status %d committed %d\n",
+ 			__func__, wdata->task.tk_status,
+ 			wdata->verf.committed);
+-	} else
++	} else {
++		wdata->pnfs_error = status;
+ 		dprintk("%s: Return status %d\n",
+ 			__func__, wdata->task.tk_status);
++	}
+ 	objlayout_iodone(state);
+ 	/* must not use state after this point */
+ 
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 36d2a29..9951887 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1119,6 +1119,14 @@ pnfs_ld_write_done(struct nfs_write_data *data)
+ 		data->mds_ops->rpc_release(data);
+ 		return 0;
+ 	}
++	if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
++					PNFS_LAYOUTRET_ON_ERROR) {
++		/* Don't lo_commit on error, Server will needs to
++		 * preform a file recovery.
++		 */
++		clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(data->inode)->flags);
++		pnfs_return_layout(data->inode);
++	}
+ 
+ 	dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
+ 		data->pnfs_error);
+@@ -1167,6 +1175,10 @@ pnfs_ld_read_done(struct nfs_read_data *data)
+ 		return 0;
+ 	}
+ 
++	if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
++						PNFS_LAYOUTRET_ON_ERROR)
++		pnfs_return_layout(data->inode);
++
+ 	dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
+ 		data->pnfs_error);
+ 	status = nfs_initiate_read(data, NFS_CLIENT(data->inode),
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
+index 9d147d9..bb8b324 100644
+--- a/fs/nfs/pnfs.h
++++ b/fs/nfs/pnfs.h
+@@ -68,6 +68,7 @@ enum {
+ enum layoutdriver_policy_flags {
+ 	/* Should the pNFS client commit and return the layout upon a setattr */
+ 	PNFS_LAYOUTRET_ON_SETATTR	= 1 << 0,
++	PNFS_LAYOUTRET_ON_ERROR		= 1 << 1,
+ };
+ 
+ struct nfs4_deviceid_node;
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 858d31b..7e8b07d 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -904,10 +904,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
+ 		data->auth_flavor_len	= 1;
+ 		data->version		= version;
+ 		data->minorversion	= 0;
++		security_init_mnt_opts(&data->lsm_opts);
+ 	}
+ 	return data;
+ }
+ 
++static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
++{
++	if (data) {
++		kfree(data->client_address);
++		kfree(data->mount_server.hostname);
++		kfree(data->nfs_server.export_path);
++		kfree(data->nfs_server.hostname);
++		kfree(data->fscache_uniq);
++		security_free_mnt_opts(&data->lsm_opts);
++		kfree(data);
++	}
++}
++
+ /*
+  * Sanity-check a server address provided by the mount command.
+  *
+@@ -2218,9 +2232,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ 	data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
+ 	mntfh = nfs_alloc_fhandle();
+ 	if (data == NULL || mntfh == NULL)
+-		goto out_free_fh;
+-
+-	security_init_mnt_opts(&data->lsm_opts);
++		goto out;
+ 
+ 	/* Validate the mount data */
+ 	error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
+@@ -2232,8 +2244,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ #ifdef CONFIG_NFS_V4
+ 	if (data->version == 4) {
+ 		mntroot = nfs4_try_mount(flags, dev_name, data);
+-		kfree(data->client_address);
+-		kfree(data->nfs_server.export_path);
+ 		goto out;
+ 	}
+ #endif	/* CONFIG_NFS_V4 */
+@@ -2284,13 +2294,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ 	s->s_flags |= MS_ACTIVE;
+ 
+ out:
+-	kfree(data->nfs_server.hostname);
+-	kfree(data->mount_server.hostname);
+-	kfree(data->fscache_uniq);
+-	security_free_mnt_opts(&data->lsm_opts);
+-out_free_fh:
++	nfs_free_parsed_mount_data(data);
+ 	nfs_free_fhandle(mntfh);
+-	kfree(data);
+ 	return mntroot;
+ 
+ out_err_nosb:
+@@ -2613,9 +2618,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
+ 
+ 	mntfh = nfs_alloc_fhandle();
+ 	if (data == NULL || mntfh == NULL)
+-		goto out_free_fh;
+-
+-	security_init_mnt_opts(&data->lsm_opts);
++		goto out;
+ 
+ 	/* Get a volume representation */
+ 	server = nfs4_create_server(data, mntfh);
+@@ -2663,13 +2666,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
+ 
+ 	s->s_flags |= MS_ACTIVE;
+ 
+-	security_free_mnt_opts(&data->lsm_opts);
+ 	nfs_free_fhandle(mntfh);
+ 	return mntroot;
+ 
+ out:
+-	security_free_mnt_opts(&data->lsm_opts);
+-out_free_fh:
+ 	nfs_free_fhandle(mntfh);
+ 	return ERR_PTR(error);
+ 
+@@ -2855,7 +2855,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
+ 
+ 	data = nfs_alloc_parsed_mount_data(4);
+ 	if (data == NULL)
+-		goto out_free_data;
++		goto out;
+ 
+ 	/* Validate the mount data */
+ 	error = nfs4_validate_mount_data(raw_data, data, dev_name);
+@@ -2869,12 +2869,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
+ 		error = PTR_ERR(res);
+ 
+ out:
+-	kfree(data->client_address);
+-	kfree(data->nfs_server.export_path);
+-	kfree(data->nfs_server.hostname);
+-	kfree(data->fscache_uniq);
+-out_free_data:
+-	kfree(data);
++	nfs_free_parsed_mount_data(data);
+ 	dprintk("<-- nfs4_mount() = %d%s\n", error,
+ 			error != 0 ? " [error]" : "");
+ 	return res;
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index b9566e4..4b470f6 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -88,7 +88,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	struct svc_expkey key;
+ 	struct svc_expkey *ek = NULL;
+ 
+-	if (mesg[mlen-1] != '\n')
++	if (mlen < 1 || mesg[mlen-1] != '\n')
+ 		return -EINVAL;
+ 	mesg[mlen-1] = 0;
+ 
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index 252ab1f..42ed195 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -135,9 +135,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+ 
+ 	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
+ 
+-	/* 1 from caller and 1 for being on i_list/g_list */
+-	BUG_ON(atomic_read(&mark->refcnt) < 2);
+-
+ 	spin_lock(&group->mark_lock);
+ 
+ 	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
+@@ -182,6 +179,11 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+ 		iput(inode);
+ 
+ 	/*
++	 * We don't necessarily have a ref on mark from caller so the above iput
++	 * may have already destroyed it.  Don't touch from now on.
++	 */
++
++	/*
+ 	 * it's possible that this group tried to destroy itself, but this
+ 	 * this mark was simultaneously being freed by inode.  If that's the
+ 	 * case, we finish freeing the group here.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index f039017..7b28f27 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -194,65 +194,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
+ 	return result;
+ }
+ 
+-static struct mm_struct *__check_mem_permission(struct task_struct *task)
+-{
+-	struct mm_struct *mm;
+-
+-	mm = get_task_mm(task);
+-	if (!mm)
+-		return ERR_PTR(-EINVAL);
+-
+-	/*
+-	 * A task can always look at itself, in case it chooses
+-	 * to use system calls instead of load instructions.
+-	 */
+-	if (task == current)
+-		return mm;
+-
+-	/*
+-	 * If current is actively ptrace'ing, and would also be
+-	 * permitted to freshly attach with ptrace now, permit it.
+-	 */
+-	if (task_is_stopped_or_traced(task)) {
+-		int match;
+-		rcu_read_lock();
+-		match = (tracehook_tracer_task(task) == current);
+-		rcu_read_unlock();
+-		if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
+-			return mm;
+-	}
+-
+-	/*
+-	 * No one else is allowed.
+-	 */
+-	mmput(mm);
+-	return ERR_PTR(-EPERM);
+-}
+-
+-/*
+- * If current may access user memory in @task return a reference to the
+- * corresponding mm, otherwise ERR_PTR.
+- */
+-static struct mm_struct *check_mem_permission(struct task_struct *task)
+-{
+-	struct mm_struct *mm;
+-	int err;
+-
+-	/*
+-	 * Avoid racing if task exec's as we might get a new mm but validate
+-	 * against old credentials.
+-	 */
+-	err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+-	if (err)
+-		return ERR_PTR(err);
+-
+-	mm = __check_mem_permission(task);
+-	mutex_unlock(&task->signal->cred_guard_mutex);
+-
+-	return mm;
+-}
+-
+-struct mm_struct *mm_for_maps(struct task_struct *task)
++static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+ {
+ 	struct mm_struct *mm;
+ 	int err;
+@@ -263,7 +205,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
+ 
+ 	mm = get_task_mm(task);
+ 	if (mm && mm != current->mm &&
+-			!ptrace_may_access(task, PTRACE_MODE_READ)) {
++			!ptrace_may_access(task, mode)) {
+ 		mmput(mm);
+ 		mm = ERR_PTR(-EACCES);
+ 	}
+@@ -272,6 +214,11 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
+ 	return mm;
+ }
+ 
++struct mm_struct *mm_for_maps(struct task_struct *task)
++{
++	return mm_access(task, PTRACE_MODE_READ);
++}
++
+ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+ {
+ 	int res = 0;
+@@ -816,38 +763,39 @@ static const struct file_operations proc_single_file_operations = {
+ 
+ static int mem_open(struct inode* inode, struct file* file)
+ {
+-	file->private_data = (void*)((long)current->self_exec_id);
++	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
++	struct mm_struct *mm;
++
++	if (!task)
++		return -ESRCH;
++
++	mm = mm_access(task, PTRACE_MODE_ATTACH);
++	put_task_struct(task);
++
++	if (IS_ERR(mm))
++		return PTR_ERR(mm);
++
+ 	/* OK to pass negative loff_t, we can catch out-of-range */
+ 	file->f_mode |= FMODE_UNSIGNED_OFFSET;
++	file->private_data = mm;
++
+ 	return 0;
+ }
+ 
+ static ssize_t mem_read(struct file * file, char __user * buf,
+ 			size_t count, loff_t *ppos)
+ {
+-	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
++	int ret;
+ 	char *page;
+ 	unsigned long src = *ppos;
+-	int ret = -ESRCH;
+-	struct mm_struct *mm;
++	struct mm_struct *mm = file->private_data;
+ 
+-	if (!task)
+-		goto out_no_task;
++	if (!mm)
++		return 0;
+ 
+-	ret = -ENOMEM;
+ 	page = (char *)__get_free_page(GFP_TEMPORARY);
+ 	if (!page)
+-		goto out;
+-
+-	mm = check_mem_permission(task);
+-	ret = PTR_ERR(mm);
+-	if (IS_ERR(mm))
+-		goto out_free;
+-
+-	ret = -EIO;
+- 
+-	if (file->private_data != (void*)((long)current->self_exec_id))
+-		goto out_put;
++		return -ENOMEM;
+ 
+ 	ret = 0;
+  
+@@ -874,13 +822,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
+ 	}
+ 	*ppos = src;
+ 
+-out_put:
+-	mmput(mm);
+-out_free:
+ 	free_page((unsigned long) page);
+-out:
+-	put_task_struct(task);
+-out_no_task:
+ 	return ret;
+ }
+ 
+@@ -889,27 +831,15 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
+ {
+ 	int copied;
+ 	char *page;
+-	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ 	unsigned long dst = *ppos;
+-	struct mm_struct *mm;
++	struct mm_struct *mm = file->private_data;
+ 
+-	copied = -ESRCH;
+-	if (!task)
+-		goto out_no_task;
++	if (!mm)
++		return 0;
+ 
+-	copied = -ENOMEM;
+ 	page = (char *)__get_free_page(GFP_TEMPORARY);
+ 	if (!page)
+-		goto out_task;
+-
+-	mm = check_mem_permission(task);
+-	copied = PTR_ERR(mm);
+-	if (IS_ERR(mm))
+-		goto out_free;
+-
+-	copied = -EIO;
+-	if (file->private_data != (void *)((long)current->self_exec_id))
+-		goto out_mm;
++		return -ENOMEM;
+ 
+ 	copied = 0;
+ 	while (count > 0) {
+@@ -933,13 +863,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
+ 	}
+ 	*ppos = dst;
+ 
+-out_mm:
+-	mmput(mm);
+-out_free:
+ 	free_page((unsigned long) page);
+-out_task:
+-	put_task_struct(task);
+-out_no_task:
+ 	return copied;
+ }
+ 
+@@ -959,11 +883,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
+ 	return file->f_pos;
+ }
+ 
++static int mem_release(struct inode *inode, struct file *file)
++{
++	struct mm_struct *mm = file->private_data;
++
++	mmput(mm);
++	return 0;
++}
++
+ static const struct file_operations proc_mem_operations = {
+ 	.llseek		= mem_lseek,
+ 	.read		= mem_read,
+ 	.write		= mem_write,
+ 	.open		= mem_open,
++	.release	= mem_release,
+ };
+ 
+ static ssize_t environ_read(struct file *file, char __user *buf,
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index c7d4ee6..3487b06 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -516,6 +516,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
+ 		if (!page)
+ 			continue;
+ 
++		if (PageReserved(page))
++			continue;
++
+ 		/* Clear accessed and referenced bits. */
+ 		ptep_test_and_clear_young(vma, addr, pte);
+ 		ClearPageReferenced(page);
+diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
+index 766b1d4..29166ec 100644
+--- a/fs/proc/uptime.c
++++ b/fs/proc/uptime.c
+@@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
+ {
+ 	struct timespec uptime;
+ 	struct timespec idle;
++	cputime64_t idletime;
++	u64 nsec;
++	u32 rem;
+ 	int i;
+-	cputime_t idletime = cputime_zero;
+ 
++	idletime = 0;
+ 	for_each_possible_cpu(i)
+ 		idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
+ 
+ 	do_posix_clock_monotonic_gettime(&uptime);
+ 	monotonic_to_bootbased(&uptime);
+-	cputime_to_timespec(idletime, &idle);
++	nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
++	idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
++	idle.tv_nsec = rem;
+ 	seq_printf(m, "%lu.%02lu %lu.%02lu\n",
+ 			(unsigned long) uptime.tv_sec,
+ 			(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
+diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
+index a811ac4..fd75b63 100644
+--- a/fs/ubifs/debug.h
++++ b/fs/ubifs/debug.h
+@@ -121,20 +121,21 @@ const char *dbg_key_str1(const struct ubifs_info *c,
+ 			 const union ubifs_key *key);
+ 
+ /*
+- * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
+- * macros.
++ * TODO: these macros are now broken because there is no locking around them
++ * and we use a global buffer for the key string. This means that in case of
++ * concurrent execution we will end up with incorrect and messy key strings.
+  */
+ #define DBGKEY(key) dbg_key_str0(c, (key))
+ #define DBGKEY1(key) dbg_key_str1(c, (key))
+ 
+-#define ubifs_dbg_msg(type, fmt, ...) do {                        \
+-	spin_lock(&dbg_lock);                                     \
+-	pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
+-	spin_unlock(&dbg_lock);                                   \
+-} while (0)
++#define ubifs_dbg_msg(type, fmt, ...) \
++	pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
+ 
+ /* Just a debugging messages not related to any specific UBIFS subsystem */
+-#define dbg_msg(fmt, ...)   ubifs_dbg_msg("msg", fmt, ##__VA_ARGS__)
++#define dbg_msg(fmt, ...)                                                     \
++	printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid,  \
++	       __func__, ##__VA_ARGS__)
++
+ /* General messages */
+ #define dbg_gen(fmt, ...)   ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
+ /* Additional journal messages */
+diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
+index 1739726..451823c 100644
+--- a/include/acpi/acpi_numa.h
++++ b/include/acpi/acpi_numa.h
+@@ -15,6 +15,7 @@ extern int pxm_to_node(int);
+ extern int node_to_pxm(int);
+ extern void __acpi_map_pxm_to_node(int, int);
+ extern int acpi_map_pxm_to_node(int);
++extern unsigned char acpi_srat_revision;
+ 
+ #endif				/* CONFIG_ACPI_NUMA */
+ #endif				/* __ACP_NUMA_H */
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index cd93f99..1b13021 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -670,6 +670,9 @@ extern int blk_insert_cloned_request(struct request_queue *q,
+ 				     struct request *rq);
+ extern void blk_delay_queue(struct request_queue *, unsigned long);
+ extern void blk_recount_segments(struct request_queue *, struct bio *);
++extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
++extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
++			      unsigned int, void __user *);
+ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+ 			  unsigned int, void __user *);
+ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 8f848e4..f13bb6d 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -207,6 +207,7 @@ struct dentry_operations {
+ 
+ #define DCACHE_CANT_MOUNT	0x0100
+ #define DCACHE_GENOCIDE		0x0200
++#define DCACHE_SHRINK_LIST	0x0400
+ 
+ #define DCACHE_OP_HASH		0x1000
+ #define DCACHE_OP_COMPARE	0x2000
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index 50940da..313a00e 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -119,6 +119,8 @@ struct zone_reclaim_stat*
+ mem_cgroup_get_reclaim_stat_from_page(struct page *page);
+ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+ 					struct task_struct *p);
++extern void mem_cgroup_replace_page_cache(struct page *oldpage,
++					struct page *newpage);
+ 
+ #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+ extern int do_swap_account;
+@@ -370,6 +372,10 @@ static inline
+ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+ {
+ }
++static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
++				struct page *newpage)
++{
++}
+ #endif /* CONFIG_CGROUP_MEM_CONT */
+ 
+ #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
+diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
+index e884096..dad7d9a 100644
+--- a/include/linux/pci_regs.h
++++ b/include/linux/pci_regs.h
+@@ -392,7 +392,7 @@
+ #define  PCI_EXP_TYPE_DOWNSTREAM 0x6	/* Downstream Port */
+ #define  PCI_EXP_TYPE_PCI_BRIDGE 0x7	/* PCI/PCI-X Bridge */
+ #define  PCI_EXP_TYPE_RC_END	0x9	/* Root Complex Integrated Endpoint */
+-#define  PCI_EXP_TYPE_RC_EC	0x10	/* Root Complex Event Collector */
++#define  PCI_EXP_TYPE_RC_EC	0xa	/* Root Complex Event Collector */
+ #define PCI_EXP_FLAGS_SLOT	0x0100	/* Slot implemented */
+ #define PCI_EXP_FLAGS_IRQ	0x3e00	/* Interrupt message number */
+ #define PCI_EXP_DEVCAP		4	/* Device capabilities */
+diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
+index 85c50b4..c84e974 100644
+--- a/include/linux/sunrpc/svcsock.h
++++ b/include/linux/sunrpc/svcsock.h
+@@ -34,7 +34,7 @@ struct svc_sock {
+ /*
+  * Function prototypes.
+  */
+-void		svc_close_all(struct list_head *);
++void		svc_close_all(struct svc_serv *);
+ int		svc_recv(struct svc_rqst *, long);
+ int		svc_send(struct svc_rqst *);
+ void		svc_drop(struct svc_rqst *);
+diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
+index 8a4c309..eeeda13 100644
+--- a/include/linux/videodev2.h
++++ b/include/linux/videodev2.h
+@@ -1075,6 +1075,7 @@ struct v4l2_querymenu {
+ #define V4L2_CTRL_FLAG_NEXT_CTRL	0x80000000
+ 
+ /*  User-class control IDs defined by V4L2 */
++#define V4L2_CID_MAX_CTRLS		1024
+ #define V4L2_CID_BASE			(V4L2_CTRL_CLASS_USER | 0x900)
+ #define V4L2_CID_USER_BASE 		V4L2_CID_BASE
+ /*  IDs reserved for driver specific controls */
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 561ac99..0fe6679 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -36,6 +36,7 @@
+ #define TRANSPORT_SENSE_BUFFER			SCSI_SENSE_BUFFERSIZE
+ /* Used by transport_send_check_condition_and_sense() */
+ #define SPC_SENSE_KEY_OFFSET			2
++#define SPC_ADD_SENSE_LEN_OFFSET		7
+ #define SPC_ASC_KEY_OFFSET			12
+ #define SPC_ASCQ_KEY_OFFSET			13
+ #define TRANSPORT_IQN_LEN			224
+diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
+index 99fcffb..454ee26 100644
+--- a/include/xen/interface/io/xs_wire.h
++++ b/include/xen/interface/io/xs_wire.h
+@@ -84,4 +84,7 @@ struct xenstore_domain_interface {
+     XENSTORE_RING_IDX rsp_cons, rsp_prod;
+ };
+ 
++/* Violating this is very bad.  See docs/misc/xenstore.txt. */
++#define XENSTORE_PAYLOAD_MAX 4096
++
+ #endif /* _XS_WIRE_H */
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index c0851a8..ef6478f 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -360,15 +360,42 @@ out:
+ }
+  
+ #ifdef CONFIG_ROOT_NFS
++
++#define NFSROOT_TIMEOUT_MIN	5
++#define NFSROOT_TIMEOUT_MAX	30
++#define NFSROOT_RETRY_MAX	5
++
+ static int __init mount_nfs_root(void)
+ {
+ 	char *root_dev, *root_data;
++	unsigned int timeout;
++	int try, err;
+ 
+-	if (nfs_root_data(&root_dev, &root_data) != 0)
+-		return 0;
+-	if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
++	err = nfs_root_data(&root_dev, &root_data);
++	if (err != 0)
+ 		return 0;
+-	return 1;
++
++	/*
++	 * The server or network may not be ready, so try several
++	 * times.  Stop after a few tries in case the client wants
++	 * to fall back to other boot methods.
++	 */
++	timeout = NFSROOT_TIMEOUT_MIN;
++	for (try = 1; ; try++) {
++		err = do_mount_root(root_dev, "nfs",
++					root_mountflags, root_data);
++		if (err == 0)
++			return 1;
++		if (try > NFSROOT_RETRY_MAX)
++			break;
++
++		/* Wait, in case the server refused us immediately */
++		ssleep(timeout);
++		timeout <<= 1;
++		if (timeout > NFSROOT_TIMEOUT_MAX)
++			timeout = NFSROOT_TIMEOUT_MAX;
++	}
++	return 0;
+ }
+ #endif
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 7798181..e0f0bdd 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
+ 		/* Early boot.  kretprobe_table_locks not yet initialized. */
+ 		return;
+ 
++	INIT_HLIST_HEAD(&empty_rp);
+ 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
+ 	head = &kretprobe_inst_table[hash];
+ 	kretprobe_table_lock(hash, &flags);
+@@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
+ 			recycle_rp_inst(ri, &empty_rp);
+ 	}
+ 	kretprobe_table_unlock(hash, &flags);
+-	INIT_HLIST_HEAD(&empty_rp);
+ 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ 		hlist_del(&ri->hlist);
+ 		kfree(ri);
+diff --git a/mm/filemap.c b/mm/filemap.c
+index dd828ea..3c981ba 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -396,24 +396,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
+ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ {
+ 	int error;
+-	struct mem_cgroup *memcg = NULL;
+ 
+ 	VM_BUG_ON(!PageLocked(old));
+ 	VM_BUG_ON(!PageLocked(new));
+ 	VM_BUG_ON(new->mapping);
+ 
+-	/*
+-	 * This is not page migration, but prepare_migration and
+-	 * end_migration does enough work for charge replacement.
+-	 *
+-	 * In the longer term we probably want a specialized function
+-	 * for moving the charge from old to new in a more efficient
+-	 * manner.
+-	 */
+-	error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
+-	if (error)
+-		return error;
+-
+ 	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ 	if (!error) {
+ 		struct address_space *mapping = old->mapping;
+@@ -435,13 +422,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ 		if (PageSwapBacked(new))
+ 			__inc_zone_page_state(new, NR_SHMEM);
+ 		spin_unlock_irq(&mapping->tree_lock);
++		/* mem_cgroup codes must not be called under tree_lock */
++		mem_cgroup_replace_page_cache(old, new);
+ 		radix_tree_preload_end();
+ 		if (freepage)
+ 			freepage(old);
+ 		page_cache_release(old);
+-		mem_cgroup_end_migration(memcg, old, new, true);
+-	} else {
+-		mem_cgroup_end_migration(memcg, old, new, false);
+ 	}
+ 
+ 	return error;
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index d99217b..3791581 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3422,6 +3422,50 @@ int mem_cgroup_shmem_charge_fallback(struct page *page,
+ 	return ret;
+ }
+ 
++/*
++ * At replace page cache, newpage is not under any memcg but it's on
++ * LRU. So, this function doesn't touch res_counter but handles LRU
++ * in correct way. Both pages are locked so we cannot race with uncharge.
++ */
++void mem_cgroup_replace_page_cache(struct page *oldpage,
++				  struct page *newpage)
++{
++	struct mem_cgroup *memcg;
++	struct page_cgroup *pc;
++	struct zone *zone;
++	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
++	unsigned long flags;
++
++	if (mem_cgroup_disabled())
++		return;
++
++	pc = lookup_page_cgroup(oldpage);
++	/* fix accounting on old pages */
++	lock_page_cgroup(pc);
++	memcg = pc->mem_cgroup;
++	mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
++	ClearPageCgroupUsed(pc);
++	unlock_page_cgroup(pc);
++
++	if (PageSwapBacked(oldpage))
++		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
++
++	zone = page_zone(newpage);
++	pc = lookup_page_cgroup(newpage);
++	/*
++	 * Even if newpage->mapping was NULL before starting replacement,
++	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
++	 * LRU while we overwrite pc->mem_cgroup.
++	 */
++	spin_lock_irqsave(&zone->lru_lock, flags);
++	if (PageLRU(newpage))
++		del_page_from_lru_list(zone, newpage, page_lru(newpage));
++	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
++	if (PageLRU(newpage))
++		add_page_to_lru_list(zone, newpage, page_lru(newpage));
++	spin_unlock_irqrestore(&zone->lru_lock, flags);
++}
++
+ #ifdef CONFIG_DEBUG_VM
+ static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
+ {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 8439d2a..947a7e9 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5565,6 +5565,17 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
+ bool is_pageblock_removable_nolock(struct page *page)
+ {
+ 	struct zone *zone = page_zone(page);
++	unsigned long pfn = page_to_pfn(page);
++
++	/*
++	 * We have to be careful here because we are iterating over memory
++	 * sections which are not zone aware so we might end up outside of
++	 * the zone but still within the section.
++	 */
++	if (!zone || zone->zone_start_pfn > pfn ||
++			zone->zone_start_pfn + zone->spanned_pages <= pfn)
++		return false;
++
+ 	return __count_immobile_pages(zone, page, 0);
+ }
+ 
+diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
+index c1f4154..c7056b2 100644
+--- a/net/ipv4/ah4.c
++++ b/net/ipv4/ah4.c
+@@ -136,8 +136,6 @@ static void ah_output_done(struct crypto_async_request *base, int err)
+ 		memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
+ 	}
+ 
+-	err = ah->nexthdr;
+-
+ 	kfree(AH_SKB_CB(skb)->tmp);
+ 	xfrm_output_resume(skb, err);
+ }
+@@ -264,12 +262,12 @@ static void ah_input_done(struct crypto_async_request *base, int err)
+ 	if (err)
+ 		goto out;
+ 
++	err = ah->nexthdr;
++
+ 	skb->network_header += ah_hlen;
+ 	memcpy(skb_network_header(skb), work_iph, ihl);
+ 	__skb_pull(skb, ah_hlen + ihl);
+ 	skb_set_transport_header(skb, -ihl);
+-
+-	err = ah->nexthdr;
+ out:
+ 	kfree(AH_SKB_CB(skb)->tmp);
+ 	xfrm_input_resume(skb, err);
+diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
+index 2195ae6..7a33aaa 100644
+--- a/net/ipv6/ah6.c
++++ b/net/ipv6/ah6.c
+@@ -324,8 +324,6 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
+ #endif
+ 	}
+ 
+-	err = ah->nexthdr;
+-
+ 	kfree(AH_SKB_CB(skb)->tmp);
+ 	xfrm_output_resume(skb, err);
+ }
+@@ -466,12 +464,12 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
+ 	if (err)
+ 		goto out;
+ 
++	err = ah->nexthdr;
++
+ 	skb->network_header += ah_hlen;
+ 	memcpy(skb_network_header(skb), work_iph, hdr_len);
+ 	__skb_pull(skb, ah_hlen + hdr_len);
+ 	skb_set_transport_header(skb, -hdr_len);
+-
+-	err = ah->nexthdr;
+ out:
+ 	kfree(AH_SKB_CB(skb)->tmp);
+ 	xfrm_input_resume(skb, err);
+diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
+index 8f6a302..aa1c40a 100644
+--- a/net/mac80211/wpa.c
++++ b/net/mac80211/wpa.c
+@@ -109,7 +109,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
+ 		if (status->flag & RX_FLAG_MMIC_ERROR)
+ 			goto mic_fail;
+ 
+-		if (!(status->flag & RX_FLAG_IV_STRIPPED))
++		if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
+ 			goto update_iv;
+ 
+ 		return RX_CONTINUE;
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 2b90292..ce5f111 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
+ 
+ fail_free:
+ 	kfree(m->to_pool);
++	m->to_pool = NULL;
+ fail:
+ 	return -ENOMEM;
+ }
+@@ -287,7 +288,9 @@ svc_pool_map_put(void)
+ 	if (!--m->count) {
+ 		m->mode = SVC_POOL_DEFAULT;
+ 		kfree(m->to_pool);
++		m->to_pool = NULL;
+ 		kfree(m->pool_to);
++		m->pool_to = NULL;
+ 		m->npools = 0;
+ 	}
+ 
+@@ -472,17 +475,20 @@ svc_destroy(struct svc_serv *serv)
+ 		printk("svc_destroy: no threads for serv=%p!\n", serv);
+ 
+ 	del_timer_sync(&serv->sv_temptimer);
+-
+-	svc_close_all(&serv->sv_tempsocks);
++	/*
++	 * The set of xprts (contained in the sv_tempsocks and
++	 * sv_permsocks lists) is now constant, since it is modified
++	 * only by accepting new sockets (done by service threads in
++	 * svc_recv) or aging old ones (done by sv_temptimer), or
++	 * configuration changes (excluded by whatever locking the
++	 * caller is using--nfsd_mutex in the case of nfsd).  So it's
++	 * safe to traverse those lists and shut everything down:
++	 */
++	svc_close_all(serv);
+ 
+ 	if (serv->sv_shutdown)
+ 		serv->sv_shutdown(serv);
+ 
+-	svc_close_all(&serv->sv_permsocks);
+-
+-	BUG_ON(!list_empty(&serv->sv_permsocks));
+-	BUG_ON(!list_empty(&serv->sv_tempsocks));
+-
+ 	cache_clean_deferred(serv);
+ 
+ 	if (svc_serv_is_pooled(serv))
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index bd31208..9d7ed0b 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -901,14 +901,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
+ 	spin_lock_bh(&serv->sv_lock);
+ 	if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
+ 		list_del_init(&xprt->xpt_list);
+-	/*
+-	 * The only time we're called while xpt_ready is still on a list
+-	 * is while the list itself is about to be destroyed (in
+-	 * svc_destroy).  BUT svc_xprt_enqueue could still be attempting
+-	 * to add new entries to the sp_sockets list, so we can't leave
+-	 * a freed xprt on it.
+-	 */
+-	list_del_init(&xprt->xpt_ready);
++	BUG_ON(!list_empty(&xprt->xpt_ready));
+ 	if (test_bit(XPT_TEMP, &xprt->xpt_flags))
+ 		serv->sv_tmpcnt--;
+ 	spin_unlock_bh(&serv->sv_lock);
+@@ -936,22 +929,48 @@ void svc_close_xprt(struct svc_xprt *xprt)
+ }
+ EXPORT_SYMBOL_GPL(svc_close_xprt);
+ 
+-void svc_close_all(struct list_head *xprt_list)
++static void svc_close_list(struct list_head *xprt_list)
++{
++	struct svc_xprt *xprt;
++
++	list_for_each_entry(xprt, xprt_list, xpt_list) {
++		set_bit(XPT_CLOSE, &xprt->xpt_flags);
++		set_bit(XPT_BUSY, &xprt->xpt_flags);
++	}
++}
++
++void svc_close_all(struct svc_serv *serv)
+ {
++	struct svc_pool *pool;
+ 	struct svc_xprt *xprt;
+ 	struct svc_xprt *tmp;
++	int i;
++
++	svc_close_list(&serv->sv_tempsocks);
++	svc_close_list(&serv->sv_permsocks);
+ 
++	for (i = 0; i < serv->sv_nrpools; i++) {
++		pool = &serv->sv_pools[i];
++
++		spin_lock_bh(&pool->sp_lock);
++		while (!list_empty(&pool->sp_sockets)) {
++			xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
++			list_del_init(&xprt->xpt_ready);
++		}
++		spin_unlock_bh(&pool->sp_lock);
++	}
+ 	/*
+-	 * The server is shutting down, and no more threads are running.
+-	 * svc_xprt_enqueue() might still be running, but at worst it
+-	 * will re-add the xprt to sp_sockets, which will soon get
+-	 * freed.  So we don't bother with any more locking, and don't
+-	 * leave the close to the (nonexistent) server threads:
++	 * At this point the sp_sockets lists will stay empty, since
++	 * svc_enqueue will not add new entries without taking the
++	 * sp_lock and checking XPT_BUSY.
+ 	 */
+-	list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
+-		set_bit(XPT_CLOSE, &xprt->xpt_flags);
++	list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list)
+ 		svc_delete_xprt(xprt);
+-	}
++	list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
++		svc_delete_xprt(xprt);
++
++	BUG_ON(!list_empty(&serv->sv_permsocks));
++	BUG_ON(!list_empty(&serv->sv_tempsocks));
+ }
+ 
+ /*
+diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
+index a4fe923..25f1e71 100644
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -242,33 +242,61 @@ if ($kconfig) {
+     read_kconfig($kconfig);
+ }
+ 
++sub convert_vars {
++    my ($line, %vars) = @_;
++
++    my $process = "";
++
++    while ($line =~ s/^(.*?)(\$\((.*?)\))//) {
++	my $start = $1;
++	my $variable = $2;
++	my $var = $3;
++
++	if (defined($vars{$var})) {
++	    $process .= $start . $vars{$var};
++	} else {
++	    $process .= $start . $variable;
++	}
++    }
++
++    $process .= $line;
++
++    return $process;
++}
++
+ # Read all Makefiles to map the configs to the objects
+ foreach my $makefile (@makefiles) {
+ 
+-    my $cont = 0;
++    my $line = "";
++    my %make_vars;
+ 
+     open(MIN,$makefile) || die "Can't open $makefile";
+     while (<MIN>) {
++	# if this line ends with a backslash, continue
++	chomp;
++	if (/^(.*)\\$/) {
++	    $line .= $1;
++	    next;
++	}
++
++	$line .= $_;
++	$_ = $line;
++	$line = "";
++
+ 	my $objs;
+ 
+-	# is this a line after a line with a backslash?
+-	if ($cont && /(\S.*)$/) {
+-	    $objs = $1;
+-	}
+-	$cont = 0;
++	$_ = convert_vars($_, %make_vars);
+ 
+ 	# collect objects after obj-$(CONFIG_FOO_BAR)
+ 	if (/obj-\$\((CONFIG_[^\)]*)\)\s*[+:]?=\s*(.*)/) {
+ 	    $var = $1;
+ 	    $objs = $2;
++
++	# check if variables are set
++	} elsif (/^\s*(\S+)\s*[:]?=\s*(.*\S)/) {
++	    $make_vars{$1} = $2;
+ 	}
+ 	if (defined($objs)) {
+-	    # test if the line ends with a backslash
+-	    if ($objs =~ m,(.*)\\$,) {
+-		$objs = $1;
+-		$cont = 1;
+-	    }
+-
+ 	    foreach my $obj (split /\s+/,$objs) {
+ 		$obj =~ s/-/_/g;
+ 		if ($obj =~ /(.*)\.o$/) {
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index f40a6af6..54e35c1 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -462,7 +462,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr,  /* is SHT_REL or SHT_RELA */
+ 		succeed_file();
+ 	}
+ 	if (w(txthdr->sh_type) != SHT_PROGBITS ||
+-	    !(w(txthdr->sh_flags) & SHF_EXECINSTR))
++	    !(_w(txthdr->sh_flags) & SHF_EXECINSTR))
+ 		return NULL;
+ 	return txtname;
+ }
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index da36d2c..5335605 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -177,8 +177,8 @@ void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
+ 	strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
+ 
+ 	result = ima_store_template(entry, violation, inode);
+-	if (!result)
++	if (!result || result == -EEXIST)
+ 		iint->flags |= IMA_MEASURED;
+-	else
++	if (result < 0)
+ 		kfree(entry);
+ }
+diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
+index 8e28f04..55a6271 100644
+--- a/security/integrity/ima/ima_queue.c
++++ b/security/integrity/ima/ima_queue.c
+@@ -23,6 +23,8 @@
+ #include <linux/slab.h>
+ #include "ima.h"
+ 
++#define AUDIT_CAUSE_LEN_MAX 32
++
+ LIST_HEAD(ima_measurements);	/* list of all measurements */
+ 
+ /* key: inode (before secure-hashing a file) */
+@@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash)
+ 
+ 	result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
+ 	if (result != 0)
+-		pr_err("IMA: Error Communicating to TPM chip\n");
++		pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
++		       result);
+ 	return result;
+ }
+ 
+@@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ {
+ 	u8 digest[IMA_DIGEST_SIZE];
+ 	const char *audit_cause = "hash_added";
++	char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
+ 	int audit_info = 1;
+-	int result = 0;
++	int result = 0, tpmresult = 0;
+ 
+ 	mutex_lock(&ima_extend_list_mutex);
+ 	if (!violation) {
+ 		memcpy(digest, entry->digest, sizeof digest);
+ 		if (ima_lookup_digest_entry(digest)) {
+ 			audit_cause = "hash_exists";
++			result = -EEXIST;
+ 			goto out;
+ 		}
+ 	}
+@@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ 	if (violation)		/* invalidate pcr */
+ 		memset(digest, 0xff, sizeof digest);
+ 
+-	result = ima_pcr_extend(digest);
+-	if (result != 0) {
+-		audit_cause = "TPM error";
++	tpmresult = ima_pcr_extend(digest);
++	if (tpmresult != 0) {
++		snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
++			 tpmresult);
++		audit_cause = tpm_audit_cause;
+ 		audit_info = 0;
+ 	}
+ out:
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
+index 08ec073..e289a13 100644
+--- a/sound/pci/hda/hda_local.h
++++ b/sound/pci/hda/hda_local.h
+@@ -474,7 +474,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
+ }
+ 
+ /* get the widget type from widget capability bits */
+-#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
++static inline int get_wcaps_type(unsigned int wcaps)
++{
++	if (!wcaps)
++		return -1; /* invalid type */
++	return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
++}
+ 
+ static inline unsigned int get_wcaps_channels(u32 wcaps)
+ {
+diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
+index bfe74c2..6fe944a 100644
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value)
+ 		[AC_WID_BEEP] = "Beep Generator Widget",
+ 		[AC_WID_VENDOR] = "Vendor Defined Widget",
+ 	};
++	if (wid_value == -1)
++		return "UNKNOWN Widget";
+ 	wid_value &= 0xf;
+ 	if (names[wid_value])
+ 		return names[wid_value];
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 5d2e97a..0d8db75 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1602,7 +1602,7 @@ static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
+ 				"Dell Studio 1557", STAC_DELL_M6_DMIC),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
+-				"Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
++				"Dell Studio XPS 1645", STAC_DELL_M6_DMIC),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
+ 				"Dell Studio 1558", STAC_DELL_M6_DMIC),
+ 	{} /* terminator */
+diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c
+index e328cfb..e525da2 100644
+--- a/sound/pci/ice1712/amp.c
++++ b/sound/pci/ice1712/amp.c
+@@ -68,8 +68,11 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
+ 
+ static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
+ {
+-	/* we use pins 39 and 41 of the VT1616 for left and right read outputs */
+-	snd_ac97_write_cache(ice->ac97, 0x5a, snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
++	if (ice->ac97)
++		/* we use pins 39 and 41 of the VT1616 for left and right
++		read outputs */
++		snd_ac97_write_cache(ice->ac97, 0x5a,
++			snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
+index 42d1ab1..915546a 100644
+--- a/sound/pci/oxygen/xonar_wm87x6.c
++++ b/sound/pci/oxygen/xonar_wm87x6.c
+@@ -177,6 +177,7 @@ static void wm8776_registers_init(struct oxygen *chip)
+ 	struct xonar_wm87x6 *data = chip->model_data;
+ 
+ 	wm8776_write(chip, WM8776_RESET, 0);
++	wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK);
+ 	wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN |
+ 		     WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT);
+ 	wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0);
+diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
+index c400ade..1e7a47a 100644
+--- a/sound/usb/usx2y/usb_stream.c
++++ b/sound/usb/usx2y/usb_stream.c
+@@ -674,7 +674,7 @@ dotry:
+ 		inurb->transfer_buffer_length =
+ 			inurb->number_of_packets *
+ 			inurb->iso_frame_desc[0].length;
+-		preempt_disable();
++
+ 		if (u == 0) {
+ 			int now;
+ 			struct usb_device *dev = inurb->dev;
+@@ -686,19 +686,17 @@ dotry:
+ 		}
+ 		err = usb_submit_urb(inurb, GFP_ATOMIC);
+ 		if (err < 0) {
+-			preempt_enable();
+ 			snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])"
+ 				   " returned %i\n", u, err);
+ 			return err;
+ 		}
+ 		err = usb_submit_urb(outurb, GFP_ATOMIC);
+ 		if (err < 0) {
+-			preempt_enable();
+ 			snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])"
+ 				   " returned %i\n", u, err);
+ 			return err;
+ 		}
+-		preempt_enable();
++
+ 		if (inurb->start_frame != outurb->start_frame) {
+ 			snd_printd(KERN_DEBUG
+ 				   "u[%i] start_frames differ in:%u out:%u\n",

Added: people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.19.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/bugfix/all/stable/3.0.19.patch	Sun Feb 12 11:58:18 2012	(r18686)
@@ -0,0 +1,2210 @@
+diff --git a/Makefile b/Makefile
+index 581b8e9..1e57901 100644
+diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
+index 9a9706c..6ebdb0d 100644
+--- a/arch/arm/mach-ux500/Kconfig
++++ b/arch/arm/mach-ux500/Kconfig
+@@ -7,6 +7,7 @@ config UX500_SOC_COMMON
+ 	select HAS_MTU
+ 	select ARM_ERRATA_753970
+ 	select ARM_ERRATA_754322
++	select ARM_ERRATA_764369
+ 
+ menu "Ux500 SoC"
+ 
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 089c0b5..b6ba103 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -270,10 +270,6 @@ cpu_resume_l1_flags:
+  *	Initialise TLB, Caches, and MMU state ready to switch the MMU
+  *	on.  Return in r0 the new CP15 C1 control register setting.
+  *
+- *	We automatically detect if we have a Harvard cache, and use the
+- *	Harvard cache control instructions insead of the unified cache
+- *	control instructions.
+- *
+  *	This should be able to cover all ARMv7 cores.
+  *
+  *	It is assumed that:
+@@ -363,9 +359,7 @@ __v7_setup:
+ #endif
+ 
+ 3:	mov	r10, #0
+-#ifdef HARVARD_CACHE
+ 	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
+-#endif
+ 	dsb
+ #ifdef CONFIG_MMU
+ 	mcr	p15, 0, r10, c8, c7, 0		@ invalidate I + D TLBs
+diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
+index 54a13aa..21f7385 100644
+--- a/arch/x86/include/asm/uv/uv_hub.h
++++ b/arch/x86/include/asm/uv/uv_hub.h
+@@ -318,13 +318,13 @@ uv_gpa_in_mmr_space(unsigned long gpa)
+ /* UV global physical address --> socket phys RAM */
+ static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
+ {
+-	unsigned long paddr = gpa & uv_hub_info->gpa_mask;
++	unsigned long paddr;
+ 	unsigned long remap_base = uv_hub_info->lowmem_remap_base;
+ 	unsigned long remap_top =  uv_hub_info->lowmem_remap_top;
+ 
+ 	gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
+ 		((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
+-	gpa = gpa & uv_hub_info->gpa_mask;
++	paddr = gpa & uv_hub_info->gpa_mask;
+ 	if (paddr >= remap_base && paddr < remap_base + remap_top)
+ 		paddr -= remap_base;
+ 	return paddr;
+diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
+index c561038..b727450 100644
+--- a/arch/x86/kernel/microcode_amd.c
++++ b/arch/x86/kernel/microcode_amd.c
+@@ -298,13 +298,33 @@ free_table:
+ 	return state;
+ }
+ 
++/*
++ * AMD microcode firmware naming convention, up to family 15h they are in
++ * the legacy file:
++ *
++ *    amd-ucode/microcode_amd.bin
++ *
++ * This legacy file is always smaller than 2K in size.
++ *
++ * Starting at family 15h they are in family specific firmware files:
++ *
++ *    amd-ucode/microcode_amd_fam15h.bin
++ *    amd-ucode/microcode_amd_fam16h.bin
++ *    ...
++ *
++ * These might be larger than 2K.
++ */
+ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
+ {
+-	const char *fw_name = "amd-ucode/microcode_amd.bin";
++	char fw_name[36] = "amd-ucode/microcode_amd.bin";
+ 	const struct firmware *fw;
+ 	enum ucode_state ret = UCODE_NFOUND;
++	struct cpuinfo_x86 *c = &cpu_data(cpu);
++
++	if (c->x86 >= 0x15)
++		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
+ 
+-	if (request_firmware(&fw, fw_name, device)) {
++	if (request_firmware(&fw, (const char *)fw_name, device)) {
+ 		pr_err("failed to load file %s\n", fw_name);
+ 		goto out;
+ 	}
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 7b65f75..7c1b765 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
+ 	cleanup_addr = proglen; /* epilogue address */
+ 
+ 	for (pass = 0; pass < 10; pass++) {
++		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
+ 		/* no prologue/epilogue for trivial filters (RET something) */
+ 		proglen = 0;
+ 		prog = temp;
+ 
+-		if (seen) {
++		if (seen_or_pass0) {
+ 			EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
+ 			EMIT4(0x48, 0x83, 0xec, 96);	/* subq  $96,%rsp	*/
+ 			/* note : must save %rbx in case bpf_error is hit */
+-			if (seen & (SEEN_XREG | SEEN_DATAREF))
++			if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
+ 				EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
+-			if (seen & SEEN_XREG)
++			if (seen_or_pass0 & SEEN_XREG)
+ 				CLEAR_X(); /* make sure we dont leek kernel memory */
+ 
+ 			/*
+@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
+ 			 *  r9 = skb->len - skb->data_len
+ 			 *  r8 = skb->data
+ 			 */
+-			if (seen & SEEN_DATAREF) {
++			if (seen_or_pass0 & SEEN_DATAREF) {
+ 				if (offsetof(struct sk_buff, len) <= 127)
+ 					/* mov    off8(%rdi),%r9d */
+ 					EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
+@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
+ 			case BPF_S_ALU_DIV_X: /* A /= X; */
+ 				seen |= SEEN_XREG;
+ 				EMIT2(0x85, 0xdb);	/* test %ebx,%ebx */
+-				if (pc_ret0 != -1)
+-					EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
+-				else {
++				if (pc_ret0 > 0) {
++					/* addrs[pc_ret0 - 1] is start address of target
++					 * (addrs[i] - 4) is the address following this jmp
++					 * ("xor %edx,%edx; div %ebx" being 4 bytes long)
++					 */
++					EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
++								(addrs[i] - 4));
++				} else {
+ 					EMIT_COND_JMP(X86_JNE, 2 + 5);
+ 					CLEAR_A();
+ 					EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
+@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
+ 				}
+ 				/* fallinto */
+ 			case BPF_S_RET_A:
+-				if (seen) {
++				if (seen_or_pass0) {
+ 					if (i != flen - 1) {
+ 						EMIT_JMP(cleanup_addr - addrs[i]);
+ 						break;
+ 					}
+-					if (seen & SEEN_XREG)
++					if (seen_or_pass0 & SEEN_XREG)
+ 						EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
+ 					EMIT1(0xc9);		/* leaveq */
+ 				}
+@@ -483,8 +489,9 @@ common_load:			seen |= SEEN_DATAREF;
+ 				goto common_load;
+ 			case BPF_S_LDX_B_MSH:
+ 				if ((int)K < 0) {
+-					if (pc_ret0 != -1) {
+-						EMIT_JMP(addrs[pc_ret0] - addrs[i]);
++					if (pc_ret0 > 0) {
++						/* addrs[pc_ret0 - 1] is the start address */
++						EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
+ 						break;
+ 					}
+ 					CLEAR_A();
+@@ -599,13 +606,14 @@ cond_branch:			f_offset = addrs[i + filter[i].jf] - addrs[i];
+ 		 * use it to give the cleanup instruction(s) addr
+ 		 */
+ 		cleanup_addr = proglen - 1; /* ret */
+-		if (seen)
++		if (seen_or_pass0)
+ 			cleanup_addr -= 1; /* leaveq */
+-		if (seen & SEEN_XREG)
++		if (seen_or_pass0 & SEEN_XREG)
+ 			cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
+ 
+ 		if (image) {
+-			WARN_ON(proglen != oldproglen);
++			if (proglen != oldproglen)
++				pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
+ 			break;
+ 		}
+ 		if (proglen == oldproglen) {
+diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
+index 9ed9f60..88f160b 100644
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -21,8 +21,6 @@
+ #include <linux/percpu.h>
+ #include <asm/byteorder.h>
+ 
+-static DEFINE_PER_CPU(u64[80], msg_schedule);
+-
+ static inline u64 Ch(u64 x, u64 y, u64 z)
+ {
+         return z ^ (x & (y ^ z));
+@@ -80,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
+ 
+ static inline void BLEND_OP(int I, u64 *W)
+ {
+-	W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
++	W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
+ }
+ 
+ static void
+@@ -89,38 +87,48 @@ sha512_transform(u64 *state, const u8 *input)
+ 	u64 a, b, c, d, e, f, g, h, t1, t2;
+ 
+ 	int i;
+-	u64 *W = get_cpu_var(msg_schedule);
++	u64 W[16];
+ 
+ 	/* load the input */
+         for (i = 0; i < 16; i++)
+                 LOAD_OP(i, W, input);
+ 
+-        for (i = 16; i < 80; i++) {
+-                BLEND_OP(i, W);
+-        }
+-
+ 	/* load the state into our registers */
+ 	a=state[0];   b=state[1];   c=state[2];   d=state[3];
+ 	e=state[4];   f=state[5];   g=state[6];   h=state[7];
+ 
+-	/* now iterate */
+-	for (i=0; i<80; i+=8) {
+-		t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i  ] + W[i  ];
+-		t2 = e0(a) + Maj(a,b,c);    d+=t1;    h=t1+t2;
+-		t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
+-		t2 = e0(h) + Maj(h,a,b);    c+=t1;    g=t1+t2;
+-		t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
+-		t2 = e0(g) + Maj(g,h,a);    b+=t1;    f=t1+t2;
+-		t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
+-		t2 = e0(f) + Maj(f,g,h);    a+=t1;    e=t1+t2;
+-		t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
+-		t2 = e0(e) + Maj(e,f,g);    h+=t1;    d=t1+t2;
+-		t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
+-		t2 = e0(d) + Maj(d,e,f);    g+=t1;    c=t1+t2;
+-		t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
+-		t2 = e0(c) + Maj(c,d,e);    f+=t1;    b=t1+t2;
+-		t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
+-		t2 = e0(b) + Maj(b,c,d);    e+=t1;    a=t1+t2;
++#define SHA512_0_15(i, a, b, c, d, e, f, g, h)			\
++	t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i];	\
++	t2 = e0(a) + Maj(a, b, c);				\
++	d += t1;						\
++	h = t1 + t2
++
++#define SHA512_16_79(i, a, b, c, d, e, f, g, h)			\
++	BLEND_OP(i, W);						\
++	t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16];	\
++	t2 = e0(a) + Maj(a, b, c);				\
++	d += t1;						\
++	h = t1 + t2
++
++	for (i = 0; i < 16; i += 8) {
++		SHA512_0_15(i, a, b, c, d, e, f, g, h);
++		SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
++		SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
++		SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
++		SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
++		SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
++		SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
++		SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
++	}
++	for (i = 16; i < 80; i += 8) {
++		SHA512_16_79(i, a, b, c, d, e, f, g, h);
++		SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
++		SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
++		SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
++		SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
++		SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
++		SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
++		SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
+ 	}
+ 
+ 	state[0] += a; state[1] += b; state[2] += c; state[3] += d;
+@@ -128,8 +136,6 @@ sha512_transform(u64 *state, const u8 *input)
+ 
+ 	/* erase our data */
+ 	a = b = c = d = e = f = g = h = t1 = t2 = 0;
+-	memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
+-	put_cpu_var(msg_schedule);
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
+index 3f46772..ba23790 100644
+--- a/drivers/gpu/drm/drm_auth.c
++++ b/drivers/gpu/drm/drm_auth.c
+@@ -101,7 +101,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
+  * Searches and unlinks the entry in drm_device::magiclist with the magic
+  * number hash key, while holding the drm_device::struct_mutex lock.
+  */
+-static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
++int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+ {
+ 	struct drm_magic_entry *pt;
+ 	struct drm_hash_item *hash;
+@@ -136,6 +136,8 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+  * If there is a magic number in drm_file::magic then use it, otherwise
+  * searches an unique non-zero magic number and add it associating it with \p
+  * file_priv.
++ * This ioctl needs protection by the drm_global_mutex, which protects
++ * struct drm_file::magic and struct drm_magic_entry::priv.
+  */
+ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+@@ -173,6 +175,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+  * \return zero if authentication successed, or a negative number otherwise.
+  *
+  * Checks if \p file_priv is associated with the magic number passed in \arg.
++ * This ioctl needs protection by the drm_global_mutex, which protects
++ * struct drm_file::magic and struct drm_magic_entry::priv.
+  */
+ int drm_authmagic(struct drm_device *dev, void *data,
+ 		  struct drm_file *file_priv)
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 2ec7d48..c42e12c 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -486,6 +486,11 @@ int drm_release(struct inode *inode, struct file *filp)
+ 		  (long)old_encode_dev(file_priv->minor->device),
+ 		  dev->open_count);
+ 
++	/* Release any auth tokens that might point to this file_priv,
++	   (do that under the drm_global_mutex) */
++	if (file_priv->magic)
++		(void) drm_remove_magic(file_priv->master, file_priv->magic);
++
+ 	/* if the master has gone away we can't do anything with the lock */
+ 	if (file_priv->minor->master)
+ 		drm_master_release(dev, filp);
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 30fe554..bdda08e 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -1059,15 +1059,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ 
+ 	/* Set the SDVO control regs. */
+ 	if (INTEL_INFO(dev)->gen >= 4) {
+-		sdvox = 0;
++		/* The real mode polarity is set by the SDVO commands, using
++		 * struct intel_sdvo_dtd. */
++		sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
+ 		if (intel_sdvo->is_hdmi)
+ 			sdvox |= intel_sdvo->color_range;
+ 		if (INTEL_INFO(dev)->gen < 5)
+ 			sdvox |= SDVO_BORDER_ENABLE;
+-		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+-			sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+-		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+-			sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+ 	} else {
+ 		sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ 		switch (intel_sdvo->sdvo_reg) {
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index fecc1aa..5feb6e9 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -134,6 +134,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
+ 	/* Dell RS690 only seems to work with MSIs. */
+ 	if ((rdev->pdev->device == 0x791f) &&
+ 	    (rdev->pdev->subsystem_vendor == 0x1028) &&
++	    (rdev->pdev->subsystem_device == 0x01fc))
++		return true;
++
++	/* Dell RS690 only seems to work with MSIs. */
++	if ((rdev->pdev->device == 0x791f) &&
++	    (rdev->pdev->subsystem_vendor == 0x1028) &&
+ 	    (rdev->pdev->subsystem_device == 0x01fd))
+ 		return true;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index dfe32e6..8a38c91 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -313,7 +313,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
+ 				  unsigned int *handle)
+ {
+ 	if (handle)
+-		handle = 0;
++		*handle = 0;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
+index 92f9497..6dbfd3e 100644
+--- a/drivers/hwmon/f71805f.c
++++ b/drivers/hwmon/f71805f.c
+@@ -283,11 +283,11 @@ static inline long temp_from_reg(u8 reg)
+ 
+ static inline u8 temp_to_reg(long val)
+ {
+-	if (val < 0)
+-		val = 0;
+-	else if (val > 1000 * 0xff)
+-		val = 0xff;
+-	return ((val + 500) / 1000);
++	if (val <= 0)
++		return 0;
++	if (val >= 1000 * 0xff)
++		return 0xff;
++	return (val + 500) / 1000;
+ }
+ 
+ /*
+diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
+index cf4330b..9594cdb 100644
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -883,7 +883,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
+ 
+ static int __devinit sht15_probe(struct platform_device *pdev)
+ {
+-	int ret = 0;
++	int ret;
+ 	struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
+ 	u8 status = 0;
+ 
+@@ -901,6 +901,7 @@ static int __devinit sht15_probe(struct platform_device *pdev)
+ 	init_waitqueue_head(&data->wait_queue);
+ 
+ 	if (pdev->dev.platform_data == NULL) {
++		ret = -EINVAL;
+ 		dev_err(&pdev->dev, "no platform data supplied\n");
+ 		goto err_free_data;
+ 	}
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index 4b2fc50..6284515 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -1295,6 +1295,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
+ {
+ 	struct w83627ehf_data *data = dev_get_drvdata(dev);
+ 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
++	struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ 	int nr = sensor_attr->index;
+ 	unsigned long val;
+ 	int err;
+@@ -1306,6 +1307,11 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
+ 
+ 	if (val > 1)
+ 		return -EINVAL;
++
++	/* On NCT67766F, DC mode is only supported for pwm1 */
++	if (sio_data->kind == nct6776 && nr && val != 1)
++		return -EINVAL;
++
+ 	mutex_lock(&data->update_lock);
+ 	reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
+ 	data->pwm_mode[nr] = val;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 2df9276..5e725e0 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -871,16 +871,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+ 	}
+ }
+ 
+-/* hw is a boolean parameter that determines whether we should try and
+- * set the hw address of the device as well as the hw address of the
+- * net_device
+- */
+-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
++static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
+ {
+ 	struct net_device *dev = slave->dev;
+ 	struct sockaddr s_addr;
+ 
+-	if (!hw) {
++	if (slave->bond->params.mode == BOND_MODE_TLB) {
+ 		memcpy(dev->dev_addr, addr, dev->addr_len);
+ 		return 0;
+ 	}
+@@ -910,8 +906,8 @@ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct
+ 	u8 tmp_mac_addr[ETH_ALEN];
+ 
+ 	memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
+-	alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
+-	alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
++	alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
++	alb_set_slave_mac_addr(slave2, tmp_mac_addr);
+ 
+ }
+ 
+@@ -1058,8 +1054,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
+ 
+ 		/* Try setting slave mac to bond address and fall-through
+ 		   to code handling that situation below... */
+-		alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
+-				       bond->alb_info.rlb_enabled);
++		alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
+ 	}
+ 
+ 	/* The slave's address is equal to the address of the bond.
+@@ -1095,8 +1090,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
+ 	}
+ 
+ 	if (free_mac_slave) {
+-		alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
+-				       bond->alb_info.rlb_enabled);
++		alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
+ 
+ 		pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+ 			   bond->dev->name, slave->dev->name,
+@@ -1452,8 +1446,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
+ {
+ 	int res;
+ 
+-	res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
+-				     bond->alb_info.rlb_enabled);
++	res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
+ 	if (res) {
+ 		return res;
+ 	}
+@@ -1604,8 +1597,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
+ 		alb_swap_mac_addr(bond, swap_slave, new_slave);
+ 	} else {
+ 		/* set the new_slave to the bond mac address */
+-		alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
+-				       bond->alb_info.rlb_enabled);
++		alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
+ 	}
+ 
+ 	if (swap_slave) {
+@@ -1665,8 +1657,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
+ 		alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
+ 		alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
+ 	} else {
+-		alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
+-				       bond->alb_info.rlb_enabled);
++		alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
+ 
+ 		read_lock(&bond->lock);
+ 		alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index f5f6831..4a4733e 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1733,9 +1733,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+ {
+ 	struct uart_amba_port *uap = amba_ports[co->index];
+ 	unsigned int status, old_cr, new_cr;
++	unsigned long flags;
++	int locked = 1;
+ 
+ 	clk_enable(uap->clk);
+ 
++	local_irq_save(flags);
++	if (uap->port.sysrq)
++		locked = 0;
++	else if (oops_in_progress)
++		locked = spin_trylock(&uap->port.lock);
++	else
++		spin_lock(&uap->port.lock);
++
+ 	/*
+ 	 *	First save the CR then disable the interrupts
+ 	 */
+@@ -1755,6 +1765,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+ 	} while (status & UART01x_FR_BUSY);
+ 	writew(old_cr, uap->port.membase + UART011_CR);
+ 
++	if (locked)
++		spin_unlock(&uap->port.lock);
++	local_irq_restore(flags);
++
+ 	clk_disable(uap->clk);
+ }
+ 
+diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
+index 2aaafa9..6c12d94 100644
+--- a/drivers/tty/serial/jsm/jsm_driver.c
++++ b/drivers/tty/serial/jsm/jsm_driver.c
+@@ -269,6 +269,7 @@ static void jsm_io_resume(struct pci_dev *pdev)
+ 	struct jsm_board *brd = pci_get_drvdata(pdev);
+ 
+ 	pci_restore_state(pdev);
++	pci_save_state(pdev);
+ 
+ 	jsm_uart_port_init(brd);
+ }
+diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
+index 33d37d2..a4aaca0 100644
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -227,7 +227,6 @@ int tty_port_block_til_ready(struct tty_port *port,
+ 	int do_clocal = 0, retval;
+ 	unsigned long flags;
+ 	DEFINE_WAIT(wait);
+-	int cd;
+ 
+ 	/* block if port is in the process of being closed */
+ 	if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
+@@ -284,11 +283,14 @@ int tty_port_block_til_ready(struct tty_port *port,
+ 				retval = -ERESTARTSYS;
+ 			break;
+ 		}
+-		/* Probe the carrier. For devices with no carrier detect this
+-		   will always return true */
+-		cd = tty_port_carrier_raised(port);
++		/*
++		 * Probe the carrier. For devices with no carrier detect
++		 * tty_port_carrier_raised will always return true.
++		 * Never ask drivers if CLOCAL is set, this causes troubles
++		 * on some hardware.
++		 */
+ 		if (!(port->flags & ASYNC_CLOSING) &&
+-				(do_clocal || cd))
++				(do_clocal || tty_port_carrier_raised(port)))
+ 			break;
+ 		if (signal_pending(current)) {
+ 			retval = -ERESTARTSYS;
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 2b9ff51..90581a8 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -57,6 +57,8 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
+ 
+ #define WDM_MAX			16
+ 
++/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
++#define WDM_DEFAULT_BUFSIZE	256
+ 
+ static DEFINE_MUTEX(wdm_mutex);
+ 
+@@ -88,7 +90,8 @@ struct wdm_device {
+ 	int			count;
+ 	dma_addr_t		shandle;
+ 	dma_addr_t		ihandle;
+-	struct mutex		lock;
++	struct mutex		wlock;
++	struct mutex		rlock;
+ 	wait_queue_head_t	wait;
+ 	struct work_struct	rxwork;
+ 	int			werr;
+@@ -323,7 +326,7 @@ static ssize_t wdm_write
+ 	}
+ 
+ 	/* concurrent writes and disconnect */
+-	r = mutex_lock_interruptible(&desc->lock);
++	r = mutex_lock_interruptible(&desc->wlock);
+ 	rv = -ERESTARTSYS;
+ 	if (r) {
+ 		kfree(buf);
+@@ -386,7 +389,7 @@ static ssize_t wdm_write
+ out:
+ 	usb_autopm_put_interface(desc->intf);
+ outnp:
+-	mutex_unlock(&desc->lock);
++	mutex_unlock(&desc->wlock);
+ outnl:
+ 	return rv < 0 ? rv : count;
+ }
+@@ -399,7 +402,7 @@ static ssize_t wdm_read
+ 	struct wdm_device *desc = file->private_data;
+ 
+ 
+-	rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */
++	rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */
+ 	if (rv < 0)
+ 		return -ERESTARTSYS;
+ 
+@@ -467,14 +470,16 @@ retry:
+ 	for (i = 0; i < desc->length - cntr; i++)
+ 		desc->ubuf[i] = desc->ubuf[i + cntr];
+ 
++	spin_lock_irq(&desc->iuspin);
+ 	desc->length -= cntr;
++	spin_unlock_irq(&desc->iuspin);
+ 	/* in case we had outstanding data */
+ 	if (!desc->length)
+ 		clear_bit(WDM_READ, &desc->flags);
+ 	rv = cntr;
+ 
+ err:
+-	mutex_unlock(&desc->lock);
++	mutex_unlock(&desc->rlock);
+ 	return rv;
+ }
+ 
+@@ -540,7 +545,8 @@ static int wdm_open(struct inode *inode, struct file *file)
+ 	}
+ 	intf->needs_remote_wakeup = 1;
+ 
+-	mutex_lock(&desc->lock);
++	/* using write lock to protect desc->count */
++	mutex_lock(&desc->wlock);
+ 	if (!desc->count++) {
+ 		desc->werr = 0;
+ 		desc->rerr = 0;
+@@ -553,7 +559,7 @@ static int wdm_open(struct inode *inode, struct file *file)
+ 	} else {
+ 		rv = 0;
+ 	}
+-	mutex_unlock(&desc->lock);
++	mutex_unlock(&desc->wlock);
+ 	usb_autopm_put_interface(desc->intf);
+ out:
+ 	mutex_unlock(&wdm_mutex);
+@@ -565,9 +571,11 @@ static int wdm_release(struct inode *inode, struct file *file)
+ 	struct wdm_device *desc = file->private_data;
+ 
+ 	mutex_lock(&wdm_mutex);
+-	mutex_lock(&desc->lock);
++
++	/* using write lock to protect desc->count */
++	mutex_lock(&desc->wlock);
+ 	desc->count--;
+-	mutex_unlock(&desc->lock);
++	mutex_unlock(&desc->wlock);
+ 
+ 	if (!desc->count) {
+ 		dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
+@@ -630,7 +638,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	struct usb_cdc_dmm_desc *dmhd;
+ 	u8 *buffer = intf->altsetting->extra;
+ 	int buflen = intf->altsetting->extralen;
+-	u16 maxcom = 0;
++	u16 maxcom = WDM_DEFAULT_BUFSIZE;
+ 
+ 	if (!buffer)
+ 		goto out;
+@@ -665,7 +673,8 @@ next_desc:
+ 	desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
+ 	if (!desc)
+ 		goto out;
+-	mutex_init(&desc->lock);
++	mutex_init(&desc->rlock);
++	mutex_init(&desc->wlock);
+ 	spin_lock_init(&desc->iuspin);
+ 	init_waitqueue_head(&desc->wait);
+ 	desc->wMaxCommand = maxcom;
+@@ -716,7 +725,7 @@ next_desc:
+ 		goto err;
+ 
+ 	desc->inbuf = usb_alloc_coherent(interface_to_usbdev(intf),
+-					 desc->bMaxPacketSize0,
++					 desc->wMaxCommand,
+ 					 GFP_KERNEL,
+ 					 &desc->response->transfer_dma);
+ 	if (!desc->inbuf)
+@@ -779,11 +788,13 @@ static void wdm_disconnect(struct usb_interface *intf)
+ 	/* to terminate pending flushes */
+ 	clear_bit(WDM_IN_USE, &desc->flags);
+ 	spin_unlock_irqrestore(&desc->iuspin, flags);
+-	mutex_lock(&desc->lock);
++	wake_up_all(&desc->wait);
++	mutex_lock(&desc->rlock);
++	mutex_lock(&desc->wlock);
+ 	kill_urbs(desc);
+ 	cancel_work_sync(&desc->rxwork);
+-	mutex_unlock(&desc->lock);
+-	wake_up_all(&desc->wait);
++	mutex_unlock(&desc->wlock);
++	mutex_unlock(&desc->rlock);
+ 	if (!desc->count)
+ 		cleanup(desc);
+ 	mutex_unlock(&wdm_mutex);
+@@ -798,8 +809,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
+ 	dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
+ 
+ 	/* if this is an autosuspend the caller does the locking */
+-	if (!(message.event & PM_EVENT_AUTO))
+-		mutex_lock(&desc->lock);
++	if (!(message.event & PM_EVENT_AUTO)) {
++		mutex_lock(&desc->rlock);
++		mutex_lock(&desc->wlock);
++	}
+ 	spin_lock_irq(&desc->iuspin);
+ 
+ 	if ((message.event & PM_EVENT_AUTO) &&
+@@ -815,8 +828,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
+ 		kill_urbs(desc);
+ 		cancel_work_sync(&desc->rxwork);
+ 	}
+-	if (!(message.event & PM_EVENT_AUTO))
+-		mutex_unlock(&desc->lock);
++	if (!(message.event & PM_EVENT_AUTO)) {
++		mutex_unlock(&desc->wlock);
++		mutex_unlock(&desc->rlock);
++	}
+ 
+ 	return rv;
+ }
+@@ -854,7 +869,8 @@ static int wdm_pre_reset(struct usb_interface *intf)
+ {
+ 	struct wdm_device *desc = usb_get_intfdata(intf);
+ 
+-	mutex_lock(&desc->lock);
++	mutex_lock(&desc->rlock);
++	mutex_lock(&desc->wlock);
+ 	kill_urbs(desc);
+ 
+ 	/*
+@@ -876,7 +892,8 @@ static int wdm_post_reset(struct usb_interface *intf)
+ 	int rv;
+ 
+ 	rv = recover_from_urb_loss(desc);
+-	mutex_unlock(&desc->lock);
++	mutex_unlock(&desc->wlock);
++	mutex_unlock(&desc->rlock);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
+index f380bf9..bc7f166 100644
+--- a/drivers/usb/host/ehci-fsl.c
++++ b/drivers/usb/host/ehci-fsl.c
+@@ -125,7 +125,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
+ 	 */
+ 	if (pdata->init && pdata->init(pdev)) {
+ 		retval = -ENODEV;
+-		goto err3;
++		goto err4;
+ 	}
+ 
+ 	/* Enable USB controller, 83xx or 8536 */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index c0c5d6c..edcedc4 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1218,6 +1218,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
+  *
+  * Returns a zero-based port number, which is suitable for indexing into each of
+  * the split roothubs' port arrays and bus state arrays.
++ * Add one to it in order to call xhci_find_slot_id_by_port.
+  */
+ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
+ 		struct xhci_hcd *xhci, u32 port_id)
+@@ -1340,7 +1341,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 			temp |= PORT_LINK_STROBE | XDEV_U0;
+ 			xhci_writel(xhci, temp, port_array[faked_port_index]);
+ 			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+-					faked_port_index);
++					faked_port_index + 1);
+ 			if (!slot_id) {
+ 				xhci_dbg(xhci, "slot_id is zero\n");
+ 				goto cleanup;
+@@ -3381,7 +3382,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		/* Check TD length */
+ 		if (running_total != td_len) {
+ 			xhci_err(xhci, "ISOC TD length unmatch\n");
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto cleanup;
+ 		}
+ 	}
+ 
+diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
+index 417b8f2..59689fa 100644
+--- a/drivers/usb/misc/usbsevseg.c
++++ b/drivers/usb/misc/usbsevseg.c
+@@ -24,7 +24,7 @@
+ 
+ #define VENDOR_ID	0x0fc5
+ #define PRODUCT_ID	0x1227
+-#define MAXLEN		6
++#define MAXLEN		8
+ 
+ /* table of devices that work with this driver */
+ static const struct usb_device_id id_table[] = {
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index a1a324b..a515237 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -39,6 +39,8 @@ static void cp210x_get_termios(struct tty_struct *,
+ 	struct usb_serial_port *port);
+ static void cp210x_get_termios_port(struct usb_serial_port *port,
+ 	unsigned int *cflagp, unsigned int *baudp);
++static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
++							struct ktermios *);
+ static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
+ 							struct ktermios*);
+ static int cp210x_tiocmget(struct tty_struct *);
+@@ -138,6 +140,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
++	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ 	{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
+ 	{ } /* Terminating Entry */
+ };
+@@ -201,6 +204,8 @@ static struct usb_serial_driver cp210x_device = {
+ #define CP210X_EMBED_EVENTS	0x15
+ #define CP210X_GET_EVENTSTATE	0x16
+ #define CP210X_SET_CHARS	0x19
++#define CP210X_GET_BAUDRATE	0x1D
++#define CP210X_SET_BAUDRATE	0x1E
+ 
+ /* CP210X_IFC_ENABLE */
+ #define UART_ENABLE		0x0001
+@@ -354,8 +359,8 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port,
+  * Quantises the baud rate as per AN205 Table 1
+  */
+ static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
+-	if      (baud <= 56)       baud = 0;
+-	else if (baud <= 300)      baud = 300;
++	if (baud <= 300)
++		baud = 300;
+ 	else if (baud <= 600)      baud = 600;
+ 	else if (baud <= 1200)     baud = 1200;
+ 	else if (baud <= 1800)     baud = 1800;
+@@ -383,17 +388,15 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
+ 	else if (baud <= 491520)   baud = 460800;
+ 	else if (baud <= 567138)   baud = 500000;
+ 	else if (baud <= 670254)   baud = 576000;
+-	else if (baud <= 1053257)  baud = 921600;
+-	else if (baud <= 1474560)  baud = 1228800;
+-	else if (baud <= 2457600)  baud = 1843200;
+-	else                       baud = 3686400;
++	else if (baud < 1000000)
++		baud = 921600;
++	else if (baud > 2000000)
++		baud = 2000000;
+ 	return baud;
+ }
+ 
+ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+-	int result;
+-
+ 	dbg("%s - port %d", __func__, port->number);
+ 
+ 	if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) {
+@@ -402,13 +405,14 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		return -EPROTO;
+ 	}
+ 
+-	result = usb_serial_generic_open(tty, port);
+-	if (result)
+-		return result;
+-
+ 	/* Configure the termios structure */
+ 	cp210x_get_termios(tty, port);
+-	return 0;
++
++	/* The baud rate must be initialised on cp2104 */
++	if (tty)
++		cp210x_change_speed(tty, port, NULL);
++
++	return usb_serial_generic_open(tty, port);
+ }
+ 
+ static void cp210x_close(struct usb_serial_port *port)
+@@ -460,10 +464,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
+ 
+ 	dbg("%s - port %d", __func__, port->number);
+ 
+-	cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2);
+-	/* Convert to baudrate */
+-	if (baud)
+-		baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
++	cp210x_get_config(port, CP210X_GET_BAUDRATE, &baud, 4);
+ 
+ 	dbg("%s - baud rate = %d", __func__, baud);
+ 	*baudp = baud;
+@@ -577,11 +578,64 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
+ 	*cflagp = cflag;
+ }
+ 
++/*
++ * CP2101 supports the following baud rates:
++ *
++ *	300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800,
++ *	38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600
++ *
++ * CP2102 and CP2103 support the following additional rates:
++ *
++ *	4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000,
++ *	576000
++ *
++ * The device will map a requested rate to a supported one, but the result
++ * of requests for rates greater than 1053257 is undefined (see AN205).
++ *
++ * CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud,
++ * respectively, with an error less than 1%. The actual rates are determined
++ * by
++ *
++ *	div = round(freq / (2 x prescale x request))
++ *	actual = freq / (2 x prescale x div)
++ *
++ * For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps
++ * or 1 otherwise.
++ * For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1
++ * otherwise.
++ */
++static void cp210x_change_speed(struct tty_struct *tty,
++		struct usb_serial_port *port, struct ktermios *old_termios)
++{
++	u32 baud;
++
++	baud = tty->termios->c_ospeed;
++
++	/* This maps the requested rate to a rate valid on cp2102 or cp2103,
++	 * or to an arbitrary rate in [1M,2M].
++	 *
++	 * NOTE: B0 is not implemented.
++	 */
++	baud = cp210x_quantise_baudrate(baud);
++
++	dbg("%s - setting baud rate to %u", __func__, baud);
++	if (cp210x_set_config(port, CP210X_SET_BAUDRATE, &baud,
++							sizeof(baud))) {
++		dev_warn(&port->dev, "failed to set baud rate to %u\n", baud);
++		if (old_termios)
++			baud = old_termios->c_ospeed;
++		else
++			baud = 9600;
++	}
++
++	tty_encode_baud_rate(tty, baud, baud);
++}
++
+ static void cp210x_set_termios(struct tty_struct *tty,
+ 		struct usb_serial_port *port, struct ktermios *old_termios)
+ {
+ 	unsigned int cflag, old_cflag;
+-	unsigned int baud = 0, bits;
++	unsigned int bits;
+ 	unsigned int modem_ctl[4];
+ 
+ 	dbg("%s - port %d", __func__, port->number);
+@@ -592,20 +646,9 @@ static void cp210x_set_termios(struct tty_struct *tty,
+ 	tty->termios->c_cflag &= ~CMSPAR;
+ 	cflag = tty->termios->c_cflag;
+ 	old_cflag = old_termios->c_cflag;
+-	baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
+-
+-	/* If the baud rate is to be updated*/
+-	if (baud != tty_termios_baud_rate(old_termios) && baud != 0) {
+-		dbg("%s - Setting baud rate to %d baud", __func__,
+-				baud);
+-		if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
+-					((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
+-			dbg("Baud rate requested not supported by device");
+-			baud = tty_termios_baud_rate(old_termios);
+-		}
+-	}
+-	/* Report back the resulting baud rate */
+-	tty_encode_baud_rate(tty, baud, baud);
++
++	if (tty->termios->c_ospeed != old_termios->c_ospeed)
++		cp210x_change_speed(tty, port, old_termios);
+ 
+ 	/* If the number of data bits is to be updated */
+ 	if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b02fd50..a872cc2 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -796,6 +796,7 @@ static struct usb_device_id id_table_combined [] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++	{ USB_DEVICE(HORNBY_VID, HORNBY_ELITE_PID) },
+ 	{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
+ 	{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+@@ -804,6 +805,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++	{ USB_DEVICE(FTDI_VID, TI_XDS100V2_PID),
++		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
+ 	{ USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
+ 	{ USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
+@@ -840,6 +843,7 @@ static struct usb_device_id id_table_combined [] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
++	{ USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
+ 	{ },					/* Optional parameter entry */
+ 	{ }					/* Terminating entry */
+ };
+@@ -1326,8 +1330,7 @@ static int set_serial_info(struct tty_struct *tty,
+ 		goto check_and_exit;
+ 	}
+ 
+-	if ((new_serial.baud_base != priv->baud_base) &&
+-	    (new_serial.baud_base < 9600)) {
++	if (new_serial.baud_base != priv->baud_base) {
+ 		mutex_unlock(&priv->cfg_lock);
+ 		return -EINVAL;
+ 	}
+@@ -1816,6 +1819,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
+ 
+ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
++	struct ktermios dummy;
+ 	struct usb_device *dev = port->serial->dev;
+ 	struct ftdi_private *priv = usb_get_serial_port_data(port);
+ 	int result;
+@@ -1834,8 +1838,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	   This is same behaviour as serial.c/rs_open() - Kuba */
+ 
+ 	/* ftdi_set_termios  will send usb control messages */
+-	if (tty)
+-		ftdi_set_termios(tty, port, tty->termios);
++	if (tty) {
++		memset(&dummy, 0, sizeof(dummy));
++		ftdi_set_termios(tty, port, &dummy);
++	}
+ 
+ 	/* Start reading from the device */
+ 	result = usb_serial_generic_open(tty, port);
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 055b64e..76d4f31 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -39,6 +39,13 @@
+ /* www.candapter.com Ewert Energy Systems CANdapter device */
+ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+ 
++/*
++ * Texas Instruments XDS100v2 JTAG / BeagleBone A3
++ * http://processors.wiki.ti.com/index.php/XDS100
++ * http://beagleboard.org/bone
++ */
++#define TI_XDS100V2_PID		0xa6d0
++
+ #define FTDI_NXTCAM_PID		0xABB8 /* NXTCam for Mindstorms NXT */
+ 
+ /* US Interface Navigator (http://www.usinterface.com/) */
+@@ -525,6 +532,12 @@
+ #define ADI_GNICEPLUS_PID	0xF001
+ 
+ /*
++ * Hornby Elite
++ */
++#define HORNBY_VID		0x04D8
++#define HORNBY_ELITE_PID	0x000A
++
++/*
+  * RATOC REX-USB60F
+  */
+ #define RATOC_VENDOR_ID		0x0584
+@@ -1168,3 +1181,9 @@
+  */
+ /* TagTracer MIFARE*/
+ #define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID	0xF7C0
++
++/*
++ * Rainforest Automation
++ */
++/* ZigBee controller */
++#define FTDI_RF_R106		0x8A28
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 0aac00a..8a90d58 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -2677,15 +2677,7 @@ cleanup:
+ 
+ static void edge_disconnect(struct usb_serial *serial)
+ {
+-	int i;
+-	struct edgeport_port *edge_port;
+-
+ 	dbg("%s", __func__);
+-
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		edge_port = usb_get_serial_port_data(serial->port[i]);
+-		edge_remove_sysfs_attrs(edge_port->port);
+-	}
+ }
+ 
+ static void edge_release(struct usb_serial *serial)
+@@ -2764,6 +2756,7 @@ static struct usb_serial_driver edgeport_1port_device = {
+ 	.disconnect		= edge_disconnect,
+ 	.release		= edge_release,
+ 	.port_probe		= edge_create_sysfs_attrs,
++	.port_remove		= edge_remove_sysfs_attrs,
+ 	.ioctl			= edge_ioctl,
+ 	.set_termios		= edge_set_termios,
+ 	.tiocmget		= edge_tiocmget,
+@@ -2795,6 +2788,7 @@ static struct usb_serial_driver edgeport_2port_device = {
+ 	.disconnect		= edge_disconnect,
+ 	.release		= edge_release,
+ 	.port_probe		= edge_create_sysfs_attrs,
++	.port_remove		= edge_remove_sysfs_attrs,
+ 	.ioctl			= edge_ioctl,
+ 	.set_termios		= edge_set_termios,
+ 	.tiocmget		= edge_tiocmget,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c96b6b6..2a9ed6e 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -480,6 +480,10 @@ static void option_instat_callback(struct urb *urb);
+ #define ZD_VENDOR_ID				0x0685
+ #define ZD_PRODUCT_7000				0x7000
+ 
++/* LG products */
++#define LG_VENDOR_ID				0x1004
++#define LG_PRODUCT_L02C				0x618f
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ 		OPTION_BLACKLIST_NONE = 0,
+@@ -1183,6 +1187,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
+index 30b73e6..a348198 100644
+--- a/drivers/usb/serial/qcaux.c
++++ b/drivers/usb/serial/qcaux.c
+@@ -36,6 +36,7 @@
+ #define UTSTARCOM_PRODUCT_UM175_V1		0x3712
+ #define UTSTARCOM_PRODUCT_UM175_V2		0x3714
+ #define UTSTARCOM_PRODUCT_UM175_ALLTEL		0x3715
++#define PANTECH_PRODUCT_UML190_VZW		0x3716
+ #define PANTECH_PRODUCT_UML290_VZW		0x3718
+ 
+ /* CMOTECH devices */
+@@ -67,7 +68,11 @@ static struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) },  /* NMEA */
++	{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) },  /* WMC */
++	{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },  /* DIAG */
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index 7cf5c3e..c6602d2 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -417,17 +417,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
+ 			(unsigned long long)(extent_base + extent_offset), rc);
+ 		goto out;
+ 	}
+-	if (unlikely(ecryptfs_verbosity > 0)) {
+-		ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
+-				"with iv:\n");
+-		ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
+-		ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
+-				"encryption:\n");
+-		ecryptfs_dump_hex((char *)
+-				  (page_address(page)
+-				   + (extent_offset * crypt_stat->extent_size)),
+-				  8);
+-	}
+ 	rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
+ 					  page, (extent_offset
+ 						 * crypt_stat->extent_size),
+@@ -440,14 +429,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
+ 		goto out;
+ 	}
+ 	rc = 0;
+-	if (unlikely(ecryptfs_verbosity > 0)) {
+-		ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; "
+-			"rc = [%d]\n",
+-			(unsigned long long)(extent_base + extent_offset), rc);
+-		ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
+-				"encryption:\n");
+-		ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
+-	}
+ out:
+ 	return rc;
+ }
+@@ -543,17 +524,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
+ 			(unsigned long long)(extent_base + extent_offset), rc);
+ 		goto out;
+ 	}
+-	if (unlikely(ecryptfs_verbosity > 0)) {
+-		ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
+-				"with iv:\n");
+-		ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
+-		ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
+-				"decryption:\n");
+-		ecryptfs_dump_hex((char *)
+-				  (page_address(enc_extent_page)
+-				   + (extent_offset * crypt_stat->extent_size)),
+-				  8);
+-	}
+ 	rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
+ 					  (extent_offset
+ 					   * crypt_stat->extent_size),
+@@ -567,16 +537,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
+ 		goto out;
+ 	}
+ 	rc = 0;
+-	if (unlikely(ecryptfs_verbosity > 0)) {
+-		ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; "
+-			"rc = [%d]\n",
+-			(unsigned long long)(extent_base + extent_offset), rc);
+-		ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
+-				"decryption:\n");
+-		ecryptfs_dump_hex((char *)(page_address(page)
+-					   + (extent_offset
+-					      * crypt_stat->extent_size)), 8);
+-	}
+ out:
+ 	return rc;
+ }
+@@ -1618,7 +1578,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
+ 		rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
+ 		if (rc) {
+ 			printk(KERN_DEBUG "Valid eCryptfs headers not found in "
+-			       "file header region or xattr region\n");
++			       "file header region or xattr region, inode %lu\n",
++				ecryptfs_inode->i_ino);
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+@@ -1627,7 +1588,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
+ 						ECRYPTFS_DONT_VALIDATE_HEADER_SIZE);
+ 		if (rc) {
+ 			printk(KERN_DEBUG "Valid eCryptfs headers not found in "
+-			       "file xattr region either\n");
++			       "file xattr region either, inode %lu\n",
++				ecryptfs_inode->i_ino);
+ 			rc = -EINVAL;
+ 		}
+ 		if (crypt_stat->mount_crypt_stat->flags
+@@ -1638,7 +1600,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
+ 			       "crypto metadata only in the extended attribute "
+ 			       "region, but eCryptfs was mounted without "
+ 			       "xattr support enabled. eCryptfs will not treat "
+-			       "this like an encrypted file.\n");
++			       "this like an encrypted file, inode %lu\n",
++				ecryptfs_inode->i_ino);
+ 			rc = -EINVAL;
+ 		}
+ 	}
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 4a4fad7f..e3562f2 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -854,18 +854,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
+ 		size_t num_zeros = (PAGE_CACHE_SIZE
+ 				    - (ia->ia_size & ~PAGE_CACHE_MASK));
+ 
+-
+-		/*
+-		 * XXX(truncate) this should really happen at the begginning
+-		 * of ->setattr.  But the code is too messy to that as part
+-		 * of a larger patch.  ecryptfs is also totally missing out
+-		 * on the inode_change_ok check at the beginning of
+-		 * ->setattr while would include this.
+-		 */
+-		rc = inode_newsize_ok(inode, ia->ia_size);
+-		if (rc)
+-			goto out;
+-
+ 		if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
+ 			truncate_setsize(inode, ia->ia_size);
+ 			lower_ia->ia_size = ia->ia_size;
+@@ -915,6 +903,28 @@ out:
+ 	return rc;
+ }
+ 
++static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset)
++{
++	struct ecryptfs_crypt_stat *crypt_stat;
++	loff_t lower_oldsize, lower_newsize;
++
++	crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
++	lower_oldsize = upper_size_to_lower_size(crypt_stat,
++						 i_size_read(inode));
++	lower_newsize = upper_size_to_lower_size(crypt_stat, offset);
++	if (lower_newsize > lower_oldsize) {
++		/*
++		 * The eCryptfs inode and the new *lower* size are mixed here
++		 * because we may not have the lower i_mutex held and/or it may
++		 * not be appropriate to call inode_newsize_ok() with inodes
++		 * from other filesystems.
++		 */
++		return inode_newsize_ok(inode, lower_newsize);
++	}
++
++	return 0;
++}
++
+ /**
+  * ecryptfs_truncate
+  * @dentry: The ecryptfs layer dentry
+@@ -931,6 +941,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
+ 	struct iattr lower_ia = { .ia_valid = 0 };
+ 	int rc;
+ 
++	rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length);
++	if (rc)
++		return rc;
++
+ 	rc = truncate_upper(dentry, &ia, &lower_ia);
+ 	if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
+ 		struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+@@ -1012,6 +1026,16 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
+ 		}
+ 	}
+ 	mutex_unlock(&crypt_stat->cs_mutex);
++
++	rc = inode_change_ok(inode, ia);
++	if (rc)
++		goto out;
++	if (ia->ia_valid & ATTR_SIZE) {
++		rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size);
++		if (rc)
++			goto out;
++	}
++
+ 	if (S_ISREG(inode->i_mode)) {
+ 		rc = filemap_write_and_wait(inode->i_mapping);
+ 		if (rc)
+diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
+index 940a82e..0dc5a3d 100644
+--- a/fs/ecryptfs/miscdev.c
++++ b/fs/ecryptfs/miscdev.c
+@@ -409,11 +409,47 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
+ 	ssize_t sz = 0;
+ 	char *data;
+ 	uid_t euid = current_euid();
++	unsigned char packet_size_peek[3];
+ 	int rc;
+ 
+-	if (count == 0)
++	if (count == 0) {
+ 		goto out;
++	} else if (count == (1 + 4)) {
++		/* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
++		goto memdup;
++	} else if (count < (1 + 4 + 1)
++		   || count > (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
++			       + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)) {
++		printk(KERN_WARNING "%s: Acceptable packet size range is "
++		       "[%d-%lu], but amount of data written is [%zu].",
++		       __func__, (1 + 4 + 1),
++		       (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
++			+ ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES), count);
++		return -EINVAL;
++	}
++
++	if (copy_from_user(packet_size_peek, (buf + 1 + 4),
++			   sizeof(packet_size_peek))) {
++		printk(KERN_WARNING "%s: Error while inspecting packet size\n",
++		       __func__);
++		return -EFAULT;
++	}
++
++	rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size,
++					  &packet_size_length);
++	if (rc) {
++		printk(KERN_WARNING "%s: Error parsing packet length; "
++		       "rc = [%d]\n", __func__, rc);
++		return rc;
++	}
++
++	if ((1 + 4 + packet_size_length + packet_size) != count) {
++		printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
++		       packet_size);
++		return -EINVAL;
++	}
+ 
++memdup:
+ 	data = memdup_user(buf, count);
+ 	if (IS_ERR(data)) {
+ 		printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
+@@ -435,23 +471,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
+ 		}
+ 		memcpy(&counter_nbo, &data[i], 4);
+ 		seq = be32_to_cpu(counter_nbo);
+-		i += 4;
+-		rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
+-						  &packet_size_length);
+-		if (rc) {
+-			printk(KERN_WARNING "%s: Error parsing packet length; "
+-			       "rc = [%d]\n", __func__, rc);
+-			goto out_free;
+-		}
+-		i += packet_size_length;
+-		if ((1 + 4 + packet_size_length + packet_size) != count) {
+-			printk(KERN_WARNING "%s: (1 + packet_size_length([%zd])"
+-			       " + packet_size([%zd]))([%zd]) != "
+-			       "count([%zd]). Invalid packet format.\n",
+-			       __func__, packet_size_length, packet_size,
+-			       (1 + packet_size_length + packet_size), count);
+-			goto out_free;
+-		}
++		i += 4 + packet_size_length;
+ 		rc = ecryptfs_miscdev_response(&data[i], packet_size,
+ 					       euid, current_user_ns(),
+ 					       task_pid(current), seq);
+diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
+index 3745f7c..54eb14c 100644
+--- a/fs/ecryptfs/read_write.c
++++ b/fs/ecryptfs/read_write.c
+@@ -132,6 +132,11 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
+ 		size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
+ 		size_t total_remaining_bytes = ((offset + size) - pos);
+ 
++		if (fatal_signal_pending(current)) {
++			rc = -EINTR;
++			break;
++		}
++
+ 		if (num_bytes > total_remaining_bytes)
+ 			num_bytes = total_remaining_bytes;
+ 		if (pos < offset) {
+@@ -193,15 +198,19 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
+ 		}
+ 		pos += num_bytes;
+ 	}
+-	if ((offset + size) > ecryptfs_file_size) {
+-		i_size_write(ecryptfs_inode, (offset + size));
++	if (pos > ecryptfs_file_size) {
++		i_size_write(ecryptfs_inode, pos);
+ 		if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
+-			rc = ecryptfs_write_inode_size_to_metadata(
++			int rc2;
++
++			rc2 = ecryptfs_write_inode_size_to_metadata(
+ 								ecryptfs_inode);
+-			if (rc) {
++			if (rc2) {
+ 				printk(KERN_ERR	"Problem with "
+ 				       "ecryptfs_write_inode_size_to_metadata; "
+-				       "rc = [%d]\n", rc);
++				       "rc = [%d]\n", rc2);
++				if (!rc)
++					rc = rc2;
+ 				goto out;
+ 			}
+ 		}
+diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c
+index 244e797..572494f 100644
+--- a/fs/xfs/linux-2.6/xfs_discard.c
++++ b/fs/xfs/linux-2.6/xfs_discard.c
+@@ -68,7 +68,7 @@ xfs_trim_extents(
+ 	 * Look up the longest btree in the AGF and start with it.
+ 	 */
+ 	error = xfs_alloc_lookup_le(cur, 0,
+-				    XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
++			    be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
+ 	if (error)
+ 		goto out_del_cursor;
+ 
+@@ -84,7 +84,7 @@ xfs_trim_extents(
+ 		if (error)
+ 			goto out_del_cursor;
+ 		XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
+-		ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
++		ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
+ 
+ 		/*
+ 		 * Too small?  Give up.
+diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
+index 6cc4d41..59509ae 100644
+--- a/fs/xfs/xfs_vnodeops.c
++++ b/fs/xfs/xfs_vnodeops.c
+@@ -554,7 +554,8 @@ xfs_readlink(
+ 			 __func__, (unsigned long long) ip->i_ino,
+ 			 (long long) pathlen);
+ 		ASSERT(0);
+-		return XFS_ERROR(EFSCORRUPTED);
++		error = XFS_ERROR(EFSCORRUPTED);
++		goto out;
+ 	}
+ 
+ 
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index 738b3a5..40aaebf 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -1323,6 +1323,7 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
+ 			struct drm_file *file_priv);
+ extern int drm_authmagic(struct drm_device *dev, void *data,
+ 			 struct drm_file *file_priv);
++extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
+ 
+ /* Cache management (drm_cache.c) */
+ void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
+index 3419bf5..d55f434 100644
+--- a/include/net/netns/generic.h
++++ b/include/net/netns/generic.h
+@@ -41,6 +41,7 @@ static inline void *net_generic(const struct net *net, int id)
+ 	ptr = ng->ptr[id - 1];
+ 	rcu_read_unlock();
+ 
++	BUG_ON(!ptr);
+ 	return ptr;
+ }
+ #endif
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 084982f..3fc4708 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -318,8 +318,10 @@ static int check_syslog_permissions(int type, bool from_file)
+ 			return 0;
+ 		/* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
+ 		if (capable(CAP_SYS_ADMIN)) {
+-			WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
+-				 "but no CAP_SYSLOG (deprecated).\n");
++			printk_once(KERN_WARNING "%s (%d): "
++				 "Attempt to access syslog with CAP_SYS_ADMIN "
++				 "but no CAP_SYSLOG (deprecated).\n",
++				 current->comm, task_pid_nr(current));
+ 			return 0;
+ 		}
+ 		return -EPERM;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index ef9271b..9f8e2e1 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -952,7 +952,7 @@ struct ftrace_func_probe {
+ };
+ 
+ enum {
+-	FTRACE_ENABLE_CALLS		= (1 << 0),
++	FTRACE_UPDATE_CALLS		= (1 << 0),
+ 	FTRACE_DISABLE_CALLS		= (1 << 1),
+ 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
+ 	FTRACE_START_FUNC_RET		= (1 << 3),
+@@ -1182,8 +1182,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
+ 	return NULL;
+ }
+ 
++static void
++ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
++static void
++ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
++
+ static int
+-ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
++ftrace_hash_move(struct ftrace_ops *ops, int enable,
++		 struct ftrace_hash **dst, struct ftrace_hash *src)
+ {
+ 	struct ftrace_func_entry *entry;
+ 	struct hlist_node *tp, *tn;
+@@ -1193,9 +1199,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
+ 	unsigned long key;
+ 	int size = src->count;
+ 	int bits = 0;
++	int ret;
+ 	int i;
+ 
+ 	/*
++	 * Remove the current set, update the hash and add
++	 * them back.
++	 */
++	ftrace_hash_rec_disable(ops, enable);
++
++	/*
+ 	 * If the new source is empty, just free dst and assign it
+ 	 * the empty_hash.
+ 	 */
+@@ -1215,9 +1228,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
+ 	if (bits > FTRACE_HASH_MAX_BITS)
+ 		bits = FTRACE_HASH_MAX_BITS;
+ 
++	ret = -ENOMEM;
+ 	new_hash = alloc_ftrace_hash(bits);
+ 	if (!new_hash)
+-		return -ENOMEM;
++		goto out;
+ 
+ 	size = 1 << src->size_bits;
+ 	for (i = 0; i < size; i++) {
+@@ -1236,7 +1250,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
+ 	rcu_assign_pointer(*dst, new_hash);
+ 	free_ftrace_hash_rcu(old_hash);
+ 
+-	return 0;
++	ret = 0;
++ out:
++	/*
++	 * Enable regardless of ret:
++	 *  On success, we enable the new hash.
++	 *  On failure, we re-enable the original hash.
++	 */
++	ftrace_hash_rec_enable(ops, enable);
++
++	return ret;
+ }
+ 
+ /*
+@@ -1498,7 +1521,7 @@ int ftrace_text_reserved(void *start, void *end)
+ 
+ 
+ static int
+-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
++__ftrace_replace_code(struct dyn_ftrace *rec, int update)
+ {
+ 	unsigned long ftrace_addr;
+ 	unsigned long flag = 0UL;
+@@ -1506,17 +1529,17 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+ 	ftrace_addr = (unsigned long)FTRACE_ADDR;
+ 
+ 	/*
+-	 * If we are enabling tracing:
++	 * If we are updating calls:
+ 	 *
+ 	 *   If the record has a ref count, then we need to enable it
+ 	 *   because someone is using it.
+ 	 *
+ 	 *   Otherwise we make sure its disabled.
+ 	 *
+-	 * If we are disabling tracing, then disable all records that
++	 * If we are disabling calls, then disable all records that
+ 	 * are enabled.
+ 	 */
+-	if (enable && (rec->flags & ~FTRACE_FL_MASK))
++	if (update && (rec->flags & ~FTRACE_FL_MASK))
+ 		flag = FTRACE_FL_ENABLED;
+ 
+ 	/* If the state of this record hasn't changed, then do nothing */
+@@ -1532,7 +1555,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+ 	return ftrace_make_nop(NULL, rec, ftrace_addr);
+ }
+ 
+-static void ftrace_replace_code(int enable)
++static void ftrace_replace_code(int update)
+ {
+ 	struct dyn_ftrace *rec;
+ 	struct ftrace_page *pg;
+@@ -1546,7 +1569,7 @@ static void ftrace_replace_code(int enable)
+ 		if (rec->flags & FTRACE_FL_FREE)
+ 			continue;
+ 
+-		failed = __ftrace_replace_code(rec, enable);
++		failed = __ftrace_replace_code(rec, update);
+ 		if (failed) {
+ 			ftrace_bug(failed, rec->ip);
+ 			/* Stop processing */
+@@ -1596,7 +1619,7 @@ static int __ftrace_modify_code(void *data)
+ {
+ 	int *command = data;
+ 
+-	if (*command & FTRACE_ENABLE_CALLS)
++	if (*command & FTRACE_UPDATE_CALLS)
+ 		ftrace_replace_code(1);
+ 	else if (*command & FTRACE_DISABLE_CALLS)
+ 		ftrace_replace_code(0);
+@@ -1652,7 +1675,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
+ 		return -ENODEV;
+ 
+ 	ftrace_start_up++;
+-	command |= FTRACE_ENABLE_CALLS;
++	command |= FTRACE_UPDATE_CALLS;
+ 
+ 	/* ops marked global share the filter hashes */
+ 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
+@@ -1704,8 +1727,7 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+ 	if (ops != &global_ops || !global_start_up)
+ 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+ 
+-	if (!ftrace_start_up)
+-		command |= FTRACE_DISABLE_CALLS;
++	command |= FTRACE_UPDATE_CALLS;
+ 
+ 	if (saved_ftrace_func != ftrace_trace_function) {
+ 		saved_ftrace_func = ftrace_trace_function;
+@@ -1727,7 +1749,7 @@ static void ftrace_startup_sysctl(void)
+ 	saved_ftrace_func = NULL;
+ 	/* ftrace_start_up is true if we want ftrace running */
+ 	if (ftrace_start_up)
+-		ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ }
+ 
+ static void ftrace_shutdown_sysctl(void)
+@@ -2877,7 +2899,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
+ 		ftrace_match_records(hash, buf, len);
+ 
+ 	mutex_lock(&ftrace_lock);
+-	ret = ftrace_hash_move(orig_hash, hash);
++	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
++	if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
++	    && ftrace_enabled)
++		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
++
+ 	mutex_unlock(&ftrace_lock);
+ 
+ 	mutex_unlock(&ftrace_regex_lock);
+@@ -3060,18 +3086,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
+ 			orig_hash = &iter->ops->notrace_hash;
+ 
+ 		mutex_lock(&ftrace_lock);
+-		/*
+-		 * Remove the current set, update the hash and add
+-		 * them back.
+-		 */
+-		ftrace_hash_rec_disable(iter->ops, filter_hash);
+-		ret = ftrace_hash_move(orig_hash, iter->hash);
+-		if (!ret) {
+-			ftrace_hash_rec_enable(iter->ops, filter_hash);
+-			if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
+-			    && ftrace_enabled)
+-				ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+-		}
++		ret = ftrace_hash_move(iter->ops, filter_hash,
++				       orig_hash, iter->hash);
++		if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
++		    && ftrace_enabled)
++			ftrace_run_update_code(FTRACE_UPDATE_CALLS);
++
+ 		mutex_unlock(&ftrace_lock);
+ 	}
+ 	free_ftrace_hash(iter->hash);
+diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
+index dbdaa95..5ba4366 100644
+--- a/net/caif/caif_dev.c
++++ b/net/caif/caif_dev.c
+@@ -53,7 +53,6 @@ struct cfcnfg *get_cfcnfg(struct net *net)
+ 	struct caif_net *caifn;
+ 	BUG_ON(!net);
+ 	caifn = net_generic(net, caif_net_id);
+-	BUG_ON(!caifn);
+ 	return caifn->cfg;
+ }
+ EXPORT_SYMBOL(get_cfcnfg);
+@@ -63,7 +62,6 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
+ 	struct caif_net *caifn;
+ 	BUG_ON(!net);
+ 	caifn = net_generic(net, caif_net_id);
+-	BUG_ON(!caifn);
+ 	return &caifn->caifdevs;
+ }
+ 
+@@ -92,7 +90,6 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
+ 	struct caif_device_entry *caifd;
+ 
+ 	caifdevs = caif_device_list(dev_net(dev));
+-	BUG_ON(!caifdevs);
+ 
+ 	caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
+ 	if (!caifd)
+@@ -108,7 +105,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
+ 	struct caif_device_entry_list *caifdevs =
+ 	    caif_device_list(dev_net(dev));
+ 	struct caif_device_entry *caifd;
+-	BUG_ON(!caifdevs);
++
+ 	list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
+ 		if (caifd->netdev == dev)
+ 			return caifd;
+@@ -349,7 +346,7 @@ static struct notifier_block caif_device_notifier = {
+ static int caif_init_net(struct net *net)
+ {
+ 	struct caif_net *caifn = net_generic(net, caif_net_id);
+-	BUG_ON(!caifn);
++
+ 	INIT_LIST_HEAD(&caifn->caifdevs.list);
+ 	mutex_init(&caifn->caifdevs.lock);
+ 
+@@ -414,7 +411,7 @@ static int __init caif_device_init(void)
+ {
+ 	int result;
+ 
+-	result = register_pernet_device(&caif_net_ops);
++	result = register_pernet_subsys(&caif_net_ops);
+ 
+ 	if (result)
+ 		return result;
+@@ -427,7 +424,7 @@ static int __init caif_device_init(void)
+ 
+ static void __exit caif_device_exit(void)
+ {
+-	unregister_pernet_device(&caif_net_ops);
++	unregister_pernet_subsys(&caif_net_ops);
+ 	unregister_netdevice_notifier(&caif_device_notifier);
+ 	dev_remove_pack(&caif_packet_type);
+ }
+diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
+index 52fe33b..bca32d7 100644
+--- a/net/caif/cfcnfg.c
++++ b/net/caif/cfcnfg.c
+@@ -313,7 +313,6 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
+ 	int err;
+ 	struct cfctrl_link_param param;
+ 	struct cfcnfg *cfg = get_cfcnfg(net);
+-	caif_assert(cfg != NULL);
+ 
+ 	rcu_read_lock();
+ 	err = caif_connect_req_to_link_param(cfg, conn_req, &param);
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index ea489db..0b0211d 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -29,6 +29,20 @@ EXPORT_SYMBOL(init_net);
+ 
+ #define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
+ 
++static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
++
++static struct net_generic *net_alloc_generic(void)
++{
++	struct net_generic *ng;
++	size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
++
++	ng = kzalloc(generic_size, GFP_KERNEL);
++	if (ng)
++		ng->len = max_gen_ptrs;
++
++	return ng;
++}
++
+ static int net_assign_generic(struct net *net, int id, void *data)
+ {
+ 	struct net_generic *ng, *old_ng;
+@@ -42,8 +56,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
+ 	if (old_ng->len >= id)
+ 		goto assign;
+ 
+-	ng = kzalloc(sizeof(struct net_generic) +
+-			id * sizeof(void *), GFP_KERNEL);
++	ng = net_alloc_generic();
+ 	if (ng == NULL)
+ 		return -ENOMEM;
+ 
+@@ -58,7 +71,6 @@ static int net_assign_generic(struct net *net, int id, void *data)
+ 	 * the old copy for kfree after a grace period.
+ 	 */
+ 
+-	ng->len = id;
+ 	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
+ 
+ 	rcu_assign_pointer(net->gen, ng);
+@@ -159,18 +171,6 @@ out_undo:
+ 	goto out;
+ }
+ 
+-static struct net_generic *net_alloc_generic(void)
+-{
+-	struct net_generic *ng;
+-	size_t generic_size = sizeof(struct net_generic) +
+-		INITIAL_NET_GEN_PTRS * sizeof(void *);
+-
+-	ng = kzalloc(generic_size, GFP_KERNEL);
+-	if (ng)
+-		ng->len = INITIAL_NET_GEN_PTRS;
+-
+-	return ng;
+-}
+ 
+ #ifdef CONFIG_NET_NS
+ static struct kmem_cache *net_cachep;
+@@ -481,6 +481,7 @@ again:
+ 			}
+ 			return error;
+ 		}
++		max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
+ 	}
+ 	error = __register_pernet_operations(list, ops);
+ 	if (error) {
+diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
+index c7056b2..36d1440 100644
+--- a/net/ipv4/ah4.c
++++ b/net/ipv4/ah4.c
+@@ -369,8 +369,6 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
+ 		if (err == -EINPROGRESS)
+ 			goto out;
+ 
+-		if (err == -EBUSY)
+-			err = NET_XMIT_DROP;
+ 		goto out_free;
+ 	}
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 69790aa..53b0125 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -630,7 +630,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+ 	arg.iov[0].iov_len  = sizeof(rep.th);
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+-	key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
++	key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
+ 	if (key) {
+ 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
+ 				   (TCPOPT_NOP << 16) |
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 882e0b0..faf257b 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1134,11 +1134,9 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
+ 	sk_mem_uncharge(sk, len);
+ 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+ 
+-	/* Any change of skb->len requires recalculation of tso
+-	 * factor and mss.
+-	 */
++	/* Any change of skb->len requires recalculation of tso factor. */
+ 	if (tcp_skb_pcount(skb) > 1)
+-		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
++		tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
+ 
+ 	return 0;
+ }
+diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
+index 7a33aaa..4c0f894 100644
+--- a/net/ipv6/ah6.c
++++ b/net/ipv6/ah6.c
+@@ -581,8 +581,6 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
+ 		if (err == -EINPROGRESS)
+ 			goto out;
+ 
+-		if (err == -EBUSY)
+-			err = NET_XMIT_DROP;
+ 		goto out_free;
+ 	}
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 296510a..51587a0 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1096,7 +1096,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+ 	if (sk)
+-		key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
++		key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
+ #endif
+ 
+ 	if (th->ack)
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index b6466e7..858ca23 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -393,11 +393,6 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
+ {
+ 	int rc;
+ 
+-	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+-		goto drop;
+-
+-	nf_reset(skb);
+-
+ 	/* Charge it to the socket, dropping if the queue is full. */
+ 	rc = sock_queue_rcv_skb(sk, skb);
+ 	if (rc < 0)
+diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
+index bb6ad81..424ff62 100644
+--- a/net/rds/af_rds.c
++++ b/net/rds/af_rds.c
+@@ -68,7 +68,6 @@ static int rds_release(struct socket *sock)
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct rds_sock *rs;
+-	unsigned long flags;
+ 
+ 	if (!sk)
+ 		goto out;
+@@ -94,10 +93,10 @@ static int rds_release(struct socket *sock)
+ 	rds_rdma_drop_keys(rs);
+ 	rds_notify_queue_get(rs, NULL);
+ 
+-	spin_lock_irqsave(&rds_sock_lock, flags);
++	spin_lock_bh(&rds_sock_lock);
+ 	list_del_init(&rs->rs_item);
+ 	rds_sock_count--;
+-	spin_unlock_irqrestore(&rds_sock_lock, flags);
++	spin_unlock_bh(&rds_sock_lock);
+ 
+ 	rds_trans_put(rs->rs_transport);
+ 
+@@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = {
+ 
+ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
+ {
+-	unsigned long flags;
+ 	struct rds_sock *rs;
+ 
+ 	sock_init_data(sock, sk);
+@@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
+ 	spin_lock_init(&rs->rs_rdma_lock);
+ 	rs->rs_rdma_keys = RB_ROOT;
+ 
+-	spin_lock_irqsave(&rds_sock_lock, flags);
++	spin_lock_bh(&rds_sock_lock);
+ 	list_add_tail(&rs->rs_item, &rds_sock_list);
+ 	rds_sock_count++;
+-	spin_unlock_irqrestore(&rds_sock_lock, flags);
++	spin_unlock_bh(&rds_sock_lock);
+ 
+ 	return 0;
+ }
+@@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
+ {
+ 	struct rds_sock *rs;
+ 	struct rds_incoming *inc;
+-	unsigned long flags;
+ 	unsigned int total = 0;
+ 
+ 	len /= sizeof(struct rds_info_message);
+ 
+-	spin_lock_irqsave(&rds_sock_lock, flags);
++	spin_lock_bh(&rds_sock_lock);
+ 
+ 	list_for_each_entry(rs, &rds_sock_list, rs_item) {
+ 		read_lock(&rs->rs_recv_lock);
+@@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
+ 		read_unlock(&rs->rs_recv_lock);
+ 	}
+ 
+-	spin_unlock_irqrestore(&rds_sock_lock, flags);
++	spin_unlock_bh(&rds_sock_lock);
+ 
+ 	lens->nr = total;
+ 	lens->each = sizeof(struct rds_info_message);
+@@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len,
+ {
+ 	struct rds_info_socket sinfo;
+ 	struct rds_sock *rs;
+-	unsigned long flags;
+ 
+ 	len /= sizeof(struct rds_info_socket);
+ 
+-	spin_lock_irqsave(&rds_sock_lock, flags);
++	spin_lock_bh(&rds_sock_lock);
+ 
+ 	if (len < rds_sock_count)
+ 		goto out;
+@@ -529,7 +525,7 @@ out:
+ 	lens->nr = rds_sock_count;
+ 	lens->each = sizeof(struct rds_info_socket);
+ 
+-	spin_unlock_irqrestore(&rds_sock_lock, flags);
++	spin_unlock_bh(&rds_sock_lock);
+ }
+ 
+ static void rds_exit(void)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index eb0a141..51412e1 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -16419,6 +16419,7 @@ static const struct alc_config_preset alc861_presets[] = {
+ /* Pin config fixes */
+ enum {
+ 	PINFIX_FSC_AMILO_PI1505,
++	PINFIX_ASUS_A6RP,
+ };
+ 
+ static const struct alc_fixup alc861_fixups[] = {
+@@ -16430,9 +16431,19 @@ static const struct alc_fixup alc861_fixups[] = {
+ 			{ }
+ 		}
+ 	},
++	[PINFIX_ASUS_A6RP] = {
++		.type = ALC_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			/* node 0x0f VREF seems controlling the master output */
++			{ 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50 },
++			{ }
++		},
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
++	SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", PINFIX_ASUS_A6RP),
++	SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP),
+ 	SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505),
+ 	{}
+ };
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 0d8db75..43d88c7 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -4162,13 +4162,15 @@ static int enable_pin_detect(struct hda_codec *codec, hda_nid_t nid,
+ 	return 1;
+ }
+ 
+-static int is_nid_hp_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
++static int is_nid_out_jack_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
+ {
+ 	int i;
+ 	for (i = 0; i < cfg->hp_outs; i++)
+ 		if (cfg->hp_pins[i] == nid)
+ 			return 1; /* nid is a HP-Out */
+-
++	for (i = 0; i < cfg->line_outs; i++)
++		if (cfg->line_out_pins[i] == nid)
++			return 1; /* nid is a line-Out */
+ 	return 0; /* nid is not a HP-Out */
+ };
+ 
+@@ -4354,7 +4356,7 @@ static int stac92xx_init(struct hda_codec *codec)
+ 			continue;
+ 		}
+ 
+-		if (is_nid_hp_pin(cfg, nid))
++		if (is_nid_out_jack_pin(cfg, nid))
+ 			continue; /* already has an unsol event */
+ 
+ 		pinctl = snd_hda_codec_read(codec, nid, 0,

Added: people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx7
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx7	Sun Feb 12 11:58:18 2012	(r18686)
@@ -0,0 +1,6 @@
++ bugfix/all/stable/3.0.15.patch
+- bugfix/all/oom-fix-integer-overflow-of-points-in-oom_badness.patch
++ bugfix/all/stable/3.0.16.patch
++ bugfix/all/stable/3.0.17.patch
++ bugfix/all/stable/3.0.18.patch
++ bugfix/all/stable/3.0.19.patch



More information about the Kernel-svn-changes mailing list