[kernel] r16145 - in dists/trunk/linux-2.6/debian: . patches/bugfix/all/stable patches/series

Maximilian Attems maks at alioth.debian.org
Sat Aug 14 11:27:04 UTC 2010


Author: maks
Date: Sat Aug 14 11:27:01 2010
New Revision: 16145

Log:
add stable release 2.6.35.1 and 2.6.35.2

just apply fine without conflicts.

Added:
   dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.35.1.patch
   dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.35.2.patch
   dists/trunk/linux-2.6/debian/patches/series/1~experimental.2
Modified:
   dists/trunk/linux-2.6/debian/changelog

Modified: dists/trunk/linux-2.6/debian/changelog
==============================================================================
--- dists/trunk/linux-2.6/debian/changelog	Sat Aug 14 02:20:49 2010	(r16144)
+++ dists/trunk/linux-2.6/debian/changelog	Sat Aug 14 11:27:01 2010	(r16145)
@@ -1,6 +1,7 @@
 linux-2.6 (2.6.35-1~experimental.2) UNRELEASED; urgency=low
 
   * images: Nuke modules.devname on removal.
+  * Add stable 2.6.35.1 and 2.6.35.2.
 
  -- maximilian attems <maks at debian.org>  Tue, 04 Aug 2010 20:21:16 +0200
 

Added: dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.35.1.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.35.1.patch	Sat Aug 14 11:27:01 2010	(r16145)
@@ -0,0 +1,1617 @@
+diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
+index 71437c6..9ebbd31 100644
+--- a/arch/arm/plat-mxc/gpio.c
++++ b/arch/arm/plat-mxc/gpio.c
+@@ -214,13 +214,16 @@ static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
+ 	struct mxc_gpio_port *port =
+ 		container_of(chip, struct mxc_gpio_port, chip);
+ 	u32 l;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&port->lock, flags);
+ 	l = __raw_readl(port->base + GPIO_GDIR);
+ 	if (dir)
+ 		l |= 1 << offset;
+ 	else
+ 		l &= ~(1 << offset);
+ 	__raw_writel(l, port->base + GPIO_GDIR);
++	spin_unlock_irqrestore(&port->lock, flags);
+ }
+ 
+ static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+@@ -229,9 +232,12 @@ static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ 		container_of(chip, struct mxc_gpio_port, chip);
+ 	void __iomem *reg = port->base + GPIO_DR;
+ 	u32 l;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&port->lock, flags);
+ 	l = (__raw_readl(reg) & (~(1 << offset))) | (value << offset);
+ 	__raw_writel(l, reg);
++	spin_unlock_irqrestore(&port->lock, flags);
+ }
+ 
+ static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset)
+@@ -285,6 +291,8 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
+ 		port[i].chip.base = i * 32;
+ 		port[i].chip.ngpio = 32;
+ 
++		spin_lock_init(&port[i].lock);
++
+ 		/* its a serious configuration bug when it fails */
+ 		BUG_ON( gpiochip_add(&port[i].chip) < 0 );
+ 
+diff --git a/arch/arm/plat-mxc/include/mach/gpio.h b/arch/arm/plat-mxc/include/mach/gpio.h
+index 894d2f8..6bd932c 100644
+--- a/arch/arm/plat-mxc/include/mach/gpio.h
++++ b/arch/arm/plat-mxc/include/mach/gpio.h
+@@ -36,6 +36,7 @@ struct mxc_gpio_port {
+ 	int virtual_irq_start;
+ 	struct gpio_chip chip;
+ 	u32 both_edges;
++	spinlock_t lock;
+ };
+ 
+ int mxc_gpio_init(struct mxc_gpio_port*, int);
+diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
+index 4c247e0..df971fa 100644
+--- a/arch/parisc/kernel/firmware.c
++++ b/arch/parisc/kernel/firmware.c
+@@ -1123,7 +1123,6 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096];
+  */
+ int pdc_iodc_print(const unsigned char *str, unsigned count)
+ {
+-	static int posx;        /* for simple TAB-Simulation... */
+ 	unsigned int i;
+ 	unsigned long flags;
+ 
+@@ -1133,19 +1132,12 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
+ 			iodc_dbuf[i+0] = '\r';
+ 			iodc_dbuf[i+1] = '\n';
+ 			i += 2;
+-			posx = 0;
+ 			goto print;
+-		case '\t':
+-			while (posx & 7) {
+-				iodc_dbuf[i] = ' ';
+-				i++, posx++;
+-			}
+-			break;
+ 		case '\b':	/* BS */
+-			posx -= 2;
++			i--; /* overwrite last */
+ 		default:
+ 			iodc_dbuf[i] = str[i];
+-			i++, posx++;
++			i++;
+ 			break;
+ 		}
+ 	}
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 65d8d79..27dff06 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -927,7 +927,7 @@ static const struct pv_init_ops xen_init_ops __initdata = {
+ };
+ 
+ static const struct pv_time_ops xen_time_ops __initdata = {
+-	.sched_clock = xen_sched_clock,
++	.sched_clock = xen_clocksource_read,
+ };
+ 
+ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index b3c6c59..a86df42 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -155,45 +155,6 @@ static void do_stolen_accounting(void)
+ 	account_idle_ticks(ticks);
+ }
+ 
+-/*
+- * Xen sched_clock implementation.  Returns the number of unstolen
+- * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
+- * states.
+- */
+-unsigned long long xen_sched_clock(void)
+-{
+-	struct vcpu_runstate_info state;
+-	cycle_t now;
+-	u64 ret;
+-	s64 offset;
+-
+-	/*
+-	 * Ideally sched_clock should be called on a per-cpu basis
+-	 * anyway, so preempt should already be disabled, but that's
+-	 * not current practice at the moment.
+-	 */
+-	preempt_disable();
+-
+-	now = xen_clocksource_read();
+-
+-	get_runstate_snapshot(&state);
+-
+-	WARN_ON(state.state != RUNSTATE_running);
+-
+-	offset = now - state.state_entry_time;
+-	if (offset < 0)
+-		offset = 0;
+-
+-	ret = state.time[RUNSTATE_blocked] +
+-		state.time[RUNSTATE_running] +
+-		offset;
+-
+-	preempt_enable();
+-
+-	return ret;
+-}
+-
+-
+ /* Get the TSC speed from Xen */
+ unsigned long xen_tsc_khz(void)
+ {
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index ac9f798..c20a6c9 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -178,7 +178,7 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
+ 
+ 	default:
+ 		amd64_printk(KERN_ERR, "Unsupported family!\n");
+-		break;
++		return -EINVAL;
+ 	}
+ 	return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
+ 			min_scrubrate);
+@@ -1430,7 +1430,7 @@ static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
+ 	u64 chan_off;
+ 
+ 	if (hi_range_sel) {
+-		if (!(dct_sel_base_addr & 0xFFFFF800) &&
++		if (!(dct_sel_base_addr & 0xFFFF0000) &&
+ 		   hole_valid && (sys_addr >= 0x100000000ULL))
+ 			chan_off = hole_off << 16;
+ 		else
+@@ -1679,7 +1679,7 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
+ 	 * ganged. Otherwise @chan should already contain the channel at
+ 	 * this point.
+ 	 */
+-	if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL)
++	if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
+ 		chan = get_channel_from_ecc_syndrome(mci, syndrome);
+ 
+ 	if (chan >= 0)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 5e21b31..8a84306 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -42,6 +42,7 @@
+ bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
+ static void intel_update_watermarks(struct drm_device *dev);
+ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
++static void intel_crtc_update_cursor(struct drm_crtc *crtc);
+ 
+ typedef struct {
+     /* given values */
+@@ -3403,6 +3404,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		return -EINVAL;
+ 	}
+ 
++	/* Ensure that the cursor is valid for the new mode before changing... */
++	intel_crtc_update_cursor(crtc);
++
+ 	if (is_lvds && dev_priv->lvds_downclock_avail) {
+ 		has_reduced_clock = limit->find_pll(limit, crtc,
+ 							    dev_priv->lvds_downclock,
+@@ -3939,6 +3943,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
+ 	}
+ }
+ 
++/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
++static void intel_crtc_update_cursor(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	int pipe = intel_crtc->pipe;
++	int x = intel_crtc->cursor_x;
++	int y = intel_crtc->cursor_y;
++	uint32_t base, pos;
++	bool visible;
++
++	pos = 0;
++
++	if (crtc->fb) {
++		base = intel_crtc->cursor_addr;
++		if (x > (int) crtc->fb->width)
++			base = 0;
++
++		if (y > (int) crtc->fb->height)
++			base = 0;
++	} else
++		base = 0;
++
++	if (x < 0) {
++		if (x + intel_crtc->cursor_width < 0)
++			base = 0;
++
++		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
++		x = -x;
++	}
++	pos |= x << CURSOR_X_SHIFT;
++
++	if (y < 0) {
++		if (y + intel_crtc->cursor_height < 0)
++			base = 0;
++
++		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
++		y = -y;
++	}
++	pos |= y << CURSOR_Y_SHIFT;
++
++	visible = base != 0;
++	if (!visible && !intel_crtc->cursor_visble)
++		return;
++
++	I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
++	if (intel_crtc->cursor_visble != visible) {
++		uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
++		if (base) {
++			/* Hooray for CUR*CNTR differences */
++			if (IS_MOBILE(dev) || IS_I9XX(dev)) {
++				cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
++				cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
++				cntl |= pipe << 28; /* Connect to correct pipe */
++			} else {
++				cntl &= ~(CURSOR_FORMAT_MASK);
++				cntl |= CURSOR_ENABLE;
++				cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
++			}
++		} else {
++			if (IS_MOBILE(dev) || IS_I9XX(dev)) {
++				cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
++				cntl |= CURSOR_MODE_DISABLE;
++			} else {
++				cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
++			}
++		}
++		I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
++
++		intel_crtc->cursor_visble = visible;
++	}
++	/* and commit changes on next vblank */
++	I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
++
++	if (visible)
++		intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
++}
++
+ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 				 struct drm_file *file_priv,
+ 				 uint32_t handle,
+@@ -3949,11 +4032,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	struct drm_gem_object *bo;
+ 	struct drm_i915_gem_object *obj_priv;
+-	int pipe = intel_crtc->pipe;
+-	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+-	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+-	uint32_t temp = I915_READ(control);
+-	size_t addr;
++	uint32_t addr;
+ 	int ret;
+ 
+ 	DRM_DEBUG_KMS("\n");
+@@ -3961,12 +4040,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 	/* if we want to turn off the cursor ignore width and height */
+ 	if (!handle) {
+ 		DRM_DEBUG_KMS("cursor off\n");
+-		if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+-			temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+-			temp |= CURSOR_MODE_DISABLE;
+-		} else {
+-			temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+-		}
+ 		addr = 0;
+ 		bo = NULL;
+ 		mutex_lock(&dev->struct_mutex);
+@@ -4008,7 +4081,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 
+ 		addr = obj_priv->gtt_offset;
+ 	} else {
+-		ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
++		ret = i915_gem_attach_phys_object(dev, bo,
++						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
+ 		if (ret) {
+ 			DRM_ERROR("failed to attach phys object\n");
+ 			goto fail_locked;
+@@ -4019,21 +4093,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 	if (!IS_I9XX(dev))
+ 		I915_WRITE(CURSIZE, (height << 12) | width);
+ 
+-	/* Hooray for CUR*CNTR differences */
+-	if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+-		temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+-		temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+-		temp |= (pipe << 28); /* Connect to correct pipe */
+-	} else {
+-		temp &= ~(CURSOR_FORMAT_MASK);
+-		temp |= CURSOR_ENABLE;
+-		temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
+-	}
+-
+  finish:
+-	I915_WRITE(control, temp);
+-	I915_WRITE(base, addr);
+-
+ 	if (intel_crtc->cursor_bo) {
+ 		if (dev_priv->info->cursor_needs_physical) {
+ 			if (intel_crtc->cursor_bo != bo)
+@@ -4047,6 +4107,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 
+ 	intel_crtc->cursor_addr = addr;
+ 	intel_crtc->cursor_bo = bo;
++	intel_crtc->cursor_width = width;
++	intel_crtc->cursor_height = height;
++
++	intel_crtc_update_cursor(crtc);
+ 
+ 	return 0;
+ fail_unpin:
+@@ -4060,34 +4124,12 @@ fail:
+ 
+ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+ {
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-	struct intel_framebuffer *intel_fb;
+-	int pipe = intel_crtc->pipe;
+-	uint32_t temp = 0;
+-	uint32_t adder;
+-
+-	if (crtc->fb) {
+-		intel_fb = to_intel_framebuffer(crtc->fb);
+-		intel_mark_busy(dev, intel_fb->obj);
+-	}
+-
+-	if (x < 0) {
+-		temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+-		x = -x;
+-	}
+-	if (y < 0) {
+-		temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+-		y = -y;
+-	}
+ 
+-	temp |= x << CURSOR_X_SHIFT;
+-	temp |= y << CURSOR_Y_SHIFT;
++	intel_crtc->cursor_x = x;
++	intel_crtc->cursor_y = y;
+ 
+-	adder = intel_crtc->cursor_addr;
+-	I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+-	I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
++	intel_crtc_update_cursor(crtc);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 2f7970b..2702652 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -143,8 +143,6 @@ struct intel_crtc {
+ 	struct drm_crtc base;
+ 	enum pipe pipe;
+ 	enum plane plane;
+-	struct drm_gem_object *cursor_bo;
+-	uint32_t cursor_addr;
+ 	u8 lut_r[256], lut_g[256], lut_b[256];
+ 	int dpms_mode;
+ 	bool busy; /* is scanout buffer being updated frequently? */
+@@ -153,6 +151,12 @@ struct intel_crtc {
+ 	struct intel_overlay *overlay;
+ 	struct intel_unpin_work *unpin_work;
+ 	int fdi_lanes;
++
++	struct drm_gem_object *cursor_bo;
++	uint32_t cursor_addr;
++	int16_t cursor_x, cursor_y;
++	int16_t cursor_width, cursor_height;
++	bool cursor_visble;
+ };
+ 
+ #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
+index d7ad513..fe05ba2 100644
+--- a/drivers/gpu/drm/i915/intel_overlay.c
++++ b/drivers/gpu/drm/i915/intel_overlay.c
+@@ -958,7 +958,7 @@ static int check_overlay_src(struct drm_device *dev,
+ 	    || rec->src_width < N_HORIZ_Y_TAPS*4)
+ 		return -EINVAL;
+ 
+-	/* check alingment constrains */
++	/* check alignment constraints */
+ 	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ 		case I915_OVERLAY_RGB:
+ 			/* not implemented */
+@@ -990,7 +990,10 @@ static int check_overlay_src(struct drm_device *dev,
+ 		return -EINVAL;
+ 
+ 	/* stride checking */
+-	stride_mask = 63;
++	if (IS_I830(dev) || IS_845G(dev))
++		stride_mask = 255;
++	else
++		stride_mask = 63;
+ 
+ 	if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index e100f69..bb3de01 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -869,7 +869,17 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
+ 	u32 tmp;
+ 
+ 	/* flush hdp cache so updates hit vram */
+-	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
++		void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
++		u32 tmp;
++
++		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
++		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
++		 */
++		WREG32(HDP_DEBUG1, 0);
++		tmp = readl((void __iomem *)ptr);
++	} else
++		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ 
+ 	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
+ 	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+@@ -3512,5 +3522,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
+  */
+ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+ {
+-	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
++	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
++	 */
++	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
++		void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
++		u32 tmp;
++
++		WREG32(HDP_DEBUG1, 0);
++		tmp = readl((void __iomem *)ptr);
++	} else
++		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ }
+diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
+index 59c1f87..84bc28e 100644
+--- a/drivers/gpu/drm/radeon/r600d.h
++++ b/drivers/gpu/drm/radeon/r600d.h
+@@ -245,6 +245,7 @@
+ #define	HDP_NONSURFACE_SIZE				0x2C0C
+ #define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+ #define	HDP_TILING_CONFIG				0x2F3C
++#define HDP_DEBUG1                                      0x2F34
+ 
+ #define MC_VM_AGP_TOP					0x2184
+ #define MC_VM_AGP_BOT					0x2188
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index d5b9373..d33b6c9 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -110,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+ 	bo->surface_reg = -1;
+ 	INIT_LIST_HEAD(&bo->list);
+ 
++retry:
+ 	radeon_ttm_placement_from_domain(bo, domain);
+ 	/* Kernel allocation are uninterruptible */
+ 	mutex_lock(&rdev->vram_mutex);
+@@ -118,10 +119,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+ 			&radeon_ttm_bo_destroy);
+ 	mutex_unlock(&rdev->vram_mutex);
+ 	if (unlikely(r != 0)) {
+-		if (r != -ERESTARTSYS)
++		if (r != -ERESTARTSYS) {
++			if (domain == RADEON_GEM_DOMAIN_VRAM) {
++				domain |= RADEON_GEM_DOMAIN_GTT;
++				goto retry;
++			}
+ 			dev_err(rdev->dev,
+ 				"object_init failed for (%lu, 0x%08X)\n",
+ 				size, domain);
++		}
+ 		return r;
+ 	}
+ 	*bo_ptr = bo;
+@@ -321,6 +327,7 @@ int radeon_bo_list_validate(struct list_head *head)
+ {
+ 	struct radeon_bo_list *lobj;
+ 	struct radeon_bo *bo;
++	u32 domain;
+ 	int r;
+ 
+ 	list_for_each_entry(lobj, head, list) {
+@@ -333,17 +340,19 @@ int radeon_bo_list_validate(struct list_head *head)
+ 	list_for_each_entry(lobj, head, list) {
+ 		bo = lobj->bo;
+ 		if (!bo->pin_count) {
+-			if (lobj->wdomain) {
+-				radeon_ttm_placement_from_domain(bo,
+-								lobj->wdomain);
+-			} else {
+-				radeon_ttm_placement_from_domain(bo,
+-								lobj->rdomain);
+-			}
++			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
++
++		retry:
++			radeon_ttm_placement_from_domain(bo, domain);
+ 			r = ttm_bo_validate(&bo->tbo, &bo->placement,
+ 						true, false, false);
+-			if (unlikely(r))
++			if (unlikely(r)) {
++				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
++					domain |= RADEON_GEM_DOMAIN_GTT;
++					goto retry;
++				}
+ 				return r;
++			}
+ 		}
+ 		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+ 		lobj->tiling_flags = bo->tiling_flags;
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index ce4ecbe..76c768b 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -398,7 +398,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
+ 	struct drm_display_mode *mode1 = NULL;
+ 	struct rs690_watermark wm0;
+ 	struct rs690_watermark wm1;
+-	u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
++	u32 tmp;
++	u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
++	u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+ 	fixed20_12 priority_mark02, priority_mark12, fill_rate;
+ 	fixed20_12 a, b;
+ 
+@@ -495,10 +497,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
+ 			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ 			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ 		}
+-		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+-		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+-		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+-		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+ 	} else if (mode0) {
+ 		if (dfixed_trunc(wm0.dbpp) > 64)
+ 			a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
+@@ -528,13 +526,7 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
+ 		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ 		if (rdev->disp_priority == 2)
+ 			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+-		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+-		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+-		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
+-			S_006D48_D2MODE_PRIORITY_A_OFF(1));
+-		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
+-			S_006D4C_D2MODE_PRIORITY_B_OFF(1));
+-	} else {
++	} else if (mode1) {
+ 		if (dfixed_trunc(wm1.dbpp) > 64)
+ 			a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ 		else
+@@ -563,13 +555,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
+ 		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ 		if (rdev->disp_priority == 2)
+ 			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+-		WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
+-			S_006548_D1MODE_PRIORITY_A_OFF(1));
+-		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
+-			S_00654C_D1MODE_PRIORITY_B_OFF(1));
+-		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+-		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+ 	}
++
++	WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
++	WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
++	WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
++	WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+ }
+ 
+ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
+index 0c9c169..7e4fbdb 100644
+--- a/drivers/gpu/drm/radeon/rv515.c
++++ b/drivers/gpu/drm/radeon/rv515.c
+@@ -925,7 +925,9 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
+ 	struct drm_display_mode *mode1 = NULL;
+ 	struct rv515_watermark wm0;
+ 	struct rv515_watermark wm1;
+-	u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
++	u32 tmp;
++	u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
++	u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
+ 	fixed20_12 priority_mark02, priority_mark12, fill_rate;
+ 	fixed20_12 a, b;
+ 
+@@ -999,10 +1001,6 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
+ 			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ 			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ 		}
+-		WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+-		WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+-		WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+-		WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+ 	} else if (mode0) {
+ 		if (dfixed_trunc(wm0.dbpp) > 64)
+ 			a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
+@@ -1032,11 +1030,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
+ 		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ 		if (rdev->disp_priority == 2)
+ 			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+-		WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+-		WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+-		WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
+-		WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
+-	} else {
++	} else if (mode1) {
+ 		if (dfixed_trunc(wm1.dbpp) > 64)
+ 			a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
+ 		else
+@@ -1065,11 +1059,12 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
+ 		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ 		if (rdev->disp_priority == 2)
+ 			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+-		WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
+-		WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
+-		WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+-		WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+ 	}
++
++	WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
++	WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
++	WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
++	WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+ }
+ 
+ void rv515_bandwidth_update(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index b7fd820..1d6fb79 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -189,7 +189,10 @@ static void rv770_mc_program(struct radeon_device *rdev)
+ 		WREG32((0x2c20 + j), 0x00000000);
+ 		WREG32((0x2c24 + j), 0x00000000);
+ 	}
+-	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
++	/* r7xx hw bug.  Read from HDP_DEBUG1 rather
++	 * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
++	 */
++	tmp = RREG32(HDP_DEBUG1);
+ 
+ 	rv515_mc_stop(rdev, &save);
+ 	if (r600_mc_wait_for_idle(rdev)) {
+diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
+index 9506f8c..6111a02 100644
+--- a/drivers/gpu/drm/radeon/rv770d.h
++++ b/drivers/gpu/drm/radeon/rv770d.h
+@@ -128,6 +128,7 @@
+ #define	HDP_NONSURFACE_SIZE				0x2C0C
+ #define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+ #define	HDP_TILING_CONFIG				0x2F3C
++#define HDP_DEBUG1                                      0x2F34
+ 
+ #define MC_SHARED_CHMAP						0x2004
+ #define		NOOFCHAN_SHIFT					12
+diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
+index 48c84a5..00e5fcac8 100644
+--- a/drivers/misc/enclosure.c
++++ b/drivers/misc/enclosure.c
+@@ -285,8 +285,11 @@ enclosure_component_register(struct enclosure_device *edev,
+ 	cdev->groups = enclosure_groups;
+ 
+ 	err = device_register(cdev);
+-	if (err)
+-		ERR_PTR(err);
++	if (err) {
++		ecomp->number = -1;
++		put_device(cdev);
++		return ERR_PTR(err);
++	}
+ 
+ 	return ecomp;
+ }
+diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
+index 5d1220d..664ed58 100644
+--- a/drivers/net/e1000e/hw.h
++++ b/drivers/net/e1000e/hw.h
+@@ -308,7 +308,7 @@ enum e1e_registers {
+ #define E1000_KMRNCTRLSTA_INBAND_PARAM	0x9    /* Kumeran InBand Parameters */
+ #define E1000_KMRNCTRLSTA_DIAG_NELPBK	0x1000 /* Nearend Loopback mode */
+ #define E1000_KMRNCTRLSTA_K1_CONFIG	0x7
+-#define E1000_KMRNCTRLSTA_K1_ENABLE	0x140E
++#define E1000_KMRNCTRLSTA_K1_ENABLE	0x0002
+ #define E1000_KMRNCTRLSTA_K1_DISABLE	0x1400
+ 
+ #define IFE_PHY_EXTENDED_STATUS_CONTROL	0x10
+diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
+index 57a7e41..79e38dc 100644
+--- a/drivers/net/e1000e/netdev.c
++++ b/drivers/net/e1000e/netdev.c
+@@ -3419,13 +3419,18 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
+ 
+ 	/* disable SERR in case the MSI write causes a master abort */
+ 	pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+-	pci_write_config_word(adapter->pdev, PCI_COMMAND,
+-			      pci_cmd & ~PCI_COMMAND_SERR);
++	if (pci_cmd & PCI_COMMAND_SERR)
++		pci_write_config_word(adapter->pdev, PCI_COMMAND,
++				      pci_cmd & ~PCI_COMMAND_SERR);
+ 
+ 	err = e1000_test_msi_interrupt(adapter);
+ 
+-	/* restore previous setting of command word */
+-	pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
++	/* re-enable SERR */
++	if (pci_cmd & PCI_COMMAND_SERR) {
++		pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
++		pci_cmd |= PCI_COMMAND_SERR;
++		pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
++	}
+ 
+ 	/* success ! */
+ 	if (!err)
+diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
+index cea37e0..df2a6d7 100644
+--- a/drivers/net/igb/igb_main.c
++++ b/drivers/net/igb/igb_main.c
+@@ -630,9 +630,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
+ 			for (; i < adapter->rss_queues; i++)
+ 				adapter->rx_ring[i]->reg_idx = rbase_offset +
+ 				                               Q_IDX_82576(i);
+-			for (; j < adapter->rss_queues; j++)
+-				adapter->tx_ring[j]->reg_idx = rbase_offset +
+-				                               Q_IDX_82576(j);
+ 		}
+ 	case e1000_82575:
+ 	case e1000_82580:
+@@ -996,7 +993,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
+ 
+ 	/* Number of supported queues. */
+ 	adapter->num_rx_queues = adapter->rss_queues;
+-	adapter->num_tx_queues = adapter->rss_queues;
++	if (adapter->vfs_allocated_count)
++		adapter->num_tx_queues = 1;
++	else
++		adapter->num_tx_queues = adapter->rss_queues;
+ 
+ 	/* start with one vector for every rx queue */
+ 	numvecs = adapter->num_rx_queues;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+index 5fdbb53..dabafb8 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+@@ -239,7 +239,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+ 			if (qCoff > 15)
+ 				qCoff = 15;
+ 			else if (qCoff <= -16)
+-				qCoff = 16;
++				qCoff = -16;
+ 
+ 			ath_print(common, ATH_DBG_CALIBRATE,
+ 				  "Chn %d : iCoff = 0x%x  qCoff = 0x%x\n",
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+index 23eb60e..cb4e7da 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+@@ -944,7 +944,7 @@ static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
+ 	return 1;
+ }
+ 
+-static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
++static u32 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
+ 						  struct ath9k_channel *chan)
+ {
+ 	return -EINVAL;
+diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
+index 21354c1..5f01a0f 100644
+--- a/drivers/net/wireless/ath/ath9k/eeprom.h
++++ b/drivers/net/wireless/ath/ath9k/eeprom.h
+@@ -669,7 +669,7 @@ struct eeprom_ops {
+ 	int (*get_eeprom_ver)(struct ath_hw *hw);
+ 	int (*get_eeprom_rev)(struct ath_hw *hw);
+ 	u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band);
+-	u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
++	u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
+ 				      struct ath9k_channel *chan);
+ 	void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
+ 	void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
+diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+index 41a77d1..1576bbb 100644
+--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
++++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+@@ -1149,13 +1149,13 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
+ 	}
+ }
+ 
+-static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
++static u32 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
+ 					      struct ath9k_channel *chan)
+ {
+ 	struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
+ 	struct modal_eep_4k_header *pModal = &eep->modalHeader;
+ 
+-	return pModal->antCtrlCommon & 0xFFFF;
++	return pModal->antCtrlCommon;
+ }
+ 
+ static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah,
+diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+index b471db5..2705eb0 100644
+--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
++++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+@@ -1131,13 +1131,13 @@ static u8 ath9k_hw_AR9287_get_num_ant_config(struct ath_hw *ah,
+ 	return 1;
+ }
+ 
+-static u16 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
++static u32 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
+ 						  struct ath9k_channel *chan)
+ {
+ 	struct ar9287_eeprom *eep = &ah->eeprom.map9287;
+ 	struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
+ 
+-	return pModal->antCtrlCommon & 0xFFFF;
++	return pModal->antCtrlCommon;
+ }
+ 
+ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
+diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
+index 7e1ed78..54ce34e 100644
+--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
++++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
+@@ -729,7 +729,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 				    vpdTableI[i][sizeCurrVpdTable - 2]);
+ 		vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
+ 
+-		if (tgtIndex > maxIndex) {
++		if (tgtIndex >= maxIndex) {
+ 			while ((ss <= tgtIndex) &&
+ 			       (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ 				tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
+@@ -1437,14 +1437,14 @@ static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
+ 	return num_ant_config;
+ }
+ 
+-static u16 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah,
++static u32 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah,
+ 					       struct ath9k_channel *chan)
+ {
+ 	struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+ 	struct modal_eep_header *pModal =
+ 		&(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
+ 
+-	return pModal->antCtrlCommon & 0xFFFF;
++	return pModal->antCtrlCommon;
+ }
+ 
+ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index c33f17d..2feee1d 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -537,7 +537,8 @@ static int __ath9k_hw_init(struct ath_hw *ah)
+ 
+ 	if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
+ 		if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
+-		    (AR_SREV_9280(ah) && !ah->is_pciexpress)) {
++		    ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
++		     !ah->is_pciexpress)) {
+ 			ah->config.serialize_regmode =
+ 				SER_REG_MODE_ON;
+ 		} else {
+@@ -1232,9 +1233,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ 
+ 	if (!ah->chip_fullsleep) {
+ 		ath9k_hw_abortpcurecv(ah);
+-		if (!ath9k_hw_stopdmarecv(ah))
++		if (!ath9k_hw_stopdmarecv(ah)) {
+ 			ath_print(common, ATH_DBG_XMIT,
+ 				"Failed to stop receive dma\n");
++			bChannelChange = false;
++		}
+ 	}
+ 
+ 	if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
+@@ -1265,7 +1268,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ 	macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
+ 
+ 	/* For chips on which RTC reset is done, save TSF before it gets cleared */
+-	if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
++	if (AR_SREV_9100(ah) ||
++	    (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
+ 		tsf = ath9k_hw_gettsf64(ah);
+ 
+ 	saveLedState = REG_READ(ah, AR_CFG_LED) &
+@@ -1297,7 +1301,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ 	}
+ 
+ 	/* Restore TSF */
+-	if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
++	if (tsf)
+ 		ath9k_hw_settsf64(ah, tsf);
+ 
+ 	if (AR_SREV_9280_10_OR_LATER(ah))
+@@ -1307,6 +1311,17 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ 	if (r)
+ 		return r;
+ 
++	/*
++	 * Some AR91xx SoC devices frequently fail to accept TSF writes
++	 * right after the chip reset. When that happens, write a new
++	 * value after the initvals have been applied, with an offset
++	 * based on measured time difference
++	 */
++	if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
++		tsf += 1500;
++		ath9k_hw_settsf64(ah, tsf);
++	}
++
+ 	/* Setup MFP options for CCMP */
+ 	if (AR_SREV_9280_20_OR_LATER(ah)) {
+ 		/* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 859aa4a..d8dd503 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -328,6 +328,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ 	u32 ba[WME_BA_BMP_SIZE >> 5];
+ 	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
+ 	bool rc_update = true;
++	struct ieee80211_tx_rate rates[4];
+ 
+ 	skb = bf->bf_mpdu;
+ 	hdr = (struct ieee80211_hdr *)skb->data;
+@@ -335,12 +336,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ 	tx_info = IEEE80211_SKB_CB(skb);
+ 	hw = bf->aphy->hw;
+ 
++	memcpy(rates, tx_info->control.rates, sizeof(rates));
++
+ 	rcu_read_lock();
+ 
+ 	/* XXX: use ieee80211_find_sta! */
+ 	sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
+ 	if (!sta) {
+ 		rcu_read_unlock();
++
++		INIT_LIST_HEAD(&bf_head);
++		while (bf) {
++			bf_next = bf->bf_next;
++
++			bf->bf_state.bf_type |= BUF_XRETRY;
++			if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
++			    !bf->bf_stale || bf_next != NULL)
++				list_move_tail(&bf->list, &bf_head);
++
++			ath_tx_rc_status(bf, ts, 0, 0, false);
++			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
++				0, 0);
++
++			bf = bf_next;
++		}
+ 		return;
+ 	}
+ 
+@@ -375,6 +394,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ 		txfail = txpending = 0;
+ 		bf_next = bf->bf_next;
+ 
++		skb = bf->bf_mpdu;
++		tx_info = IEEE80211_SKB_CB(skb);
++
+ 		if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
+ 			/* transmit completion, subframe is
+ 			 * acked by block ack */
+@@ -428,6 +450,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ 			spin_unlock_bh(&txq->axq_lock);
+ 
+ 			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
++				memcpy(tx_info->control.rates, rates, sizeof(rates));
+ 				ath_tx_rc_status(bf, ts, nbad, txok, true);
+ 				rc_update = false;
+ 			} else {
+@@ -487,6 +510,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ 		bf = bf_next;
+ 	}
+ 
++	/* prepend un-acked frames to the beginning of the pending frame queue */
++	if (!list_empty(&bf_pending)) {
++		spin_lock_bh(&txq->axq_lock);
++		list_splice(&bf_pending, &tid->buf_q);
++		ath_tx_queue_tid(txq, tid);
++		spin_unlock_bh(&txq->axq_lock);
++	}
++
+ 	if (tid->state & AGGR_CLEANUP) {
+ 		if (tid->baw_head == tid->baw_tail) {
+ 			tid->state &= ~AGGR_ADDBA_COMPLETE;
+@@ -499,14 +530,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ 		return;
+ 	}
+ 
+-	/* prepend un-acked frames to the beginning of the pending frame queue */
+-	if (!list_empty(&bf_pending)) {
+-		spin_lock_bh(&txq->axq_lock);
+-		list_splice(&bf_pending, &tid->buf_q);
+-		ath_tx_queue_tid(txq, tid);
+-		spin_unlock_bh(&txq->axq_lock);
+-	}
+-
+ 	rcu_read_unlock();
+ 
+ 	if (needreset)
+@@ -2050,7 +2073,7 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
+ 		tx_info->status.rates[i].idx = -1;
+ 	}
+ 
+-	tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
++	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+ }
+ 
+ static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
+@@ -2161,7 +2184,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
+ 			 * This frame is sent out as a single frame.
+ 			 * Use hardware retry status for this frame.
+ 			 */
+-			bf->bf_retries = ts.ts_longretry;
+ 			if (ts.ts_status & ATH9K_TXERR_XRETRY)
+ 				bf->bf_state.bf_type |= BUF_XRETRY;
+ 			ath_tx_rc_status(bf, &ts, 0, txok, true);
+@@ -2280,7 +2302,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
+ 		txok = !(txs.ts_status & ATH9K_TXERR_MASK);
+ 
+ 		if (!bf_isampdu(bf)) {
+-			bf->bf_retries = txs.ts_longretry;
+ 			if (txs.ts_status & ATH9K_TXERR_XRETRY)
+ 				bf->bf_state.bf_type |= BUF_XRETRY;
+ 			ath_tx_rc_status(bf, &txs, 0, txok, true);
+@@ -2449,37 +2470,37 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
+ 
+ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
+ {
+-	int i;
+-	struct ath_atx_ac *ac, *ac_tmp;
+-	struct ath_atx_tid *tid, *tid_tmp;
++	struct ath_atx_ac *ac;
++	struct ath_atx_tid *tid;
+ 	struct ath_txq *txq;
++	int i, tidno;
+ 
+-	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+-		if (ATH_TXQ_SETUP(sc, i)) {
+-			txq = &sc->tx.txq[i];
++	for (tidno = 0, tid = &an->tid[tidno];
++	     tidno < WME_NUM_TID; tidno++, tid++) {
++		i = tid->ac->qnum;
+ 
+-			spin_lock_bh(&txq->axq_lock);
++		if (!ATH_TXQ_SETUP(sc, i))
++			continue;
+ 
+-			list_for_each_entry_safe(ac,
+-					ac_tmp, &txq->axq_acq, list) {
+-				tid = list_first_entry(&ac->tid_q,
+-						struct ath_atx_tid, list);
+-				if (tid && tid->an != an)
+-					continue;
+-				list_del(&ac->list);
+-				ac->sched = false;
+-
+-				list_for_each_entry_safe(tid,
+-						tid_tmp, &ac->tid_q, list) {
+-					list_del(&tid->list);
+-					tid->sched = false;
+-					ath_tid_drain(sc, txq, tid);
+-					tid->state &= ~AGGR_ADDBA_COMPLETE;
+-					tid->state &= ~AGGR_CLEANUP;
+-				}
+-			}
++		txq = &sc->tx.txq[i];
++		ac = tid->ac;
+ 
+-			spin_unlock_bh(&txq->axq_lock);
++		spin_lock_bh(&txq->axq_lock);
++
++		if (tid->sched) {
++			list_del(&tid->list);
++			tid->sched = false;
++		}
++
++		if (ac->sched) {
++			list_del(&ac->list);
++			tid->ac->sched = false;
+ 		}
++
++		ath_tid_drain(sc, txq, tid);
++		tid->state &= ~AGGR_ADDBA_COMPLETE;
++		tid->state &= ~AGGR_CLEANUP;
++
++		spin_unlock_bh(&txq->axq_lock);
+ 	}
+ }
+diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
+index 386c5f9..e1af9fd 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
+@@ -420,11 +420,10 @@ void iwl_bg_scan_check(struct work_struct *data)
+ 		return;
+ 
+ 	mutex_lock(&priv->mutex);
+-	if (test_bit(STATUS_SCANNING, &priv->status) ||
+-	    test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+-		IWL_DEBUG_SCAN(priv, "Scan completion watchdog resetting "
+-			"adapter (%dms)\n",
+-			jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
++	if (test_bit(STATUS_SCANNING, &priv->status) &&
++	    !test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
++		IWL_DEBUG_SCAN(priv, "Scan completion watchdog (%dms)\n",
++			       jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
+ 
+ 		if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+ 			iwl_send_scan_abort(priv);
+@@ -489,12 +488,11 @@ void iwl_bg_abort_scan(struct work_struct *work)
+ 	    !test_bit(STATUS_GEO_CONFIGURED, &priv->status))
+ 		return;
+ 
+-	mutex_lock(&priv->mutex);
+-
+-	cancel_delayed_work_sync(&priv->scan_check);
+-	set_bit(STATUS_SCAN_ABORTING, &priv->status);
+-	iwl_send_scan_abort(priv);
++	cancel_delayed_work(&priv->scan_check);
+ 
++	mutex_lock(&priv->mutex);
++	if (test_bit(STATUS_SCAN_ABORTING, &priv->status))
++		iwl_send_scan_abort(priv);
+ 	mutex_unlock(&priv->mutex);
+ }
+ EXPORT_SYMBOL(iwl_bg_abort_scan);
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index d504e2b..b50fedc 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1621,6 +1621,7 @@ static void backend_changed(struct xenbus_device *dev,
+ 		if (xennet_connect(netdev) != 0)
+ 			break;
+ 		xenbus_switch_state(dev, XenbusStateConnected);
++		netif_notify_peers(netdev);
+ 		break;
+ 
+ 	case XenbusStateClosing:
+diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
+index 188bc84..d02be78 100644
+--- a/drivers/parisc/led.c
++++ b/drivers/parisc/led.c
+@@ -176,16 +176,18 @@ static ssize_t led_proc_write(struct file *file, const char *buf,
+ 	size_t count, loff_t *pos)
+ {
+ 	void *data = PDE(file->f_path.dentry->d_inode)->data;
+-	char *cur, lbuf[count + 1];
++	char *cur, lbuf[32];
+ 	int d;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 
+-	memset(lbuf, 0, count + 1);
++	if (count >= sizeof(lbuf))
++		count = sizeof(lbuf)-1;
+ 
+ 	if (copy_from_user(lbuf, buf, count))
+ 		return -EFAULT;
++	lbuf[count] = 0;
+ 
+ 	cur = lbuf;
+ 
+diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
+index 59ae76b..d9c7e54 100644
+--- a/drivers/ssb/driver_chipcommon.c
++++ b/drivers/ssb/driver_chipcommon.c
+@@ -235,6 +235,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
+ 		return; /* We don't have a ChipCommon */
+ 	if (cc->dev->id.revision >= 11)
+ 		cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
++	ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
+ 	ssb_pmu_init(cc);
+ 	chipco_powercontrol_init(cc);
+ 	ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
+diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
+index 6dcda86..6e88d2b 100644
+--- a/drivers/ssb/pci.c
++++ b/drivers/ssb/pci.c
+@@ -626,11 +626,22 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
+ 		return -ENODEV;
+ 	}
+ 	if (bus->chipco.dev) {	/* can be unavailible! */
+-		bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
+-			SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
++		/*
++		 * get SPROM offset: SSB_SPROM_BASE1 except for
++		 * chipcommon rev >= 31 or chip ID is 0x4312 and
++		 * chipcommon status & 3 == 2
++		 */
++		if (bus->chipco.dev->id.revision >= 31)
++			bus->sprom_offset = SSB_SPROM_BASE31;
++		else if (bus->chip_id == 0x4312 &&
++			 (bus->chipco.status & 0x03) == 2)
++			bus->sprom_offset = SSB_SPROM_BASE31;
++		else
++			bus->sprom_offset = SSB_SPROM_BASE1;
+ 	} else {
+ 		bus->sprom_offset = SSB_SPROM_BASE1;
+ 	}
++	ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
+ 
+ 	buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
+ 	if (!buf)
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 99d6af8..b3171fb 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -681,8 +681,8 @@ retry:
+ 	if (!bd_may_claim(bdev, whole, holder))
+ 		return -EBUSY;
+ 
+-	/* if someone else is claiming, wait for it to finish */
+-	if (whole->bd_claiming && whole->bd_claiming != holder) {
++	/* if claiming is already in progress, wait for it to finish */
++	if (whole->bd_claiming) {
+ 		wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
+ 		DEFINE_WAIT(wait);
+ 
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 2d428b0..3a9940e 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -146,6 +146,8 @@
+ 	{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x688C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
+@@ -161,6 +163,7 @@
+ 	{0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+@@ -174,6 +177,7 @@
+ 	{0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+@@ -314,6 +318,7 @@
+ 	{0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x945E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+@@ -324,6 +329,7 @@
+ 	{0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x948A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+@@ -366,6 +372,7 @@
+ 	{0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x955f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index b21e405..142bf18 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1775,6 +1775,8 @@ extern void netif_carrier_on(struct net_device *dev);
+ 
+ extern void netif_carrier_off(struct net_device *dev);
+ 
++extern void netif_notify_peers(struct net_device *dev);
++
+ /**
+  *	netif_dormant_on - mark device as dormant.
+  *	@dev: network device
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index 540703b..22c2abb 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -210,6 +210,7 @@ static inline int notifier_to_errno(int ret)
+ #define NETDEV_POST_INIT	0x0010
+ #define NETDEV_UNREGISTER_BATCH 0x0011
+ #define NETDEV_BONDING_DESLAVE  0x0012
++#define NETDEV_NOTIFY_PEERS	0x0012
+ 
+ #define SYS_DOWN	0x0001	/* Notify of system down */
+ #define SYS_RESTART	SYS_DOWN
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 906ae5a..bded651 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -637,7 +637,7 @@ static inline bool si_fromuser(const struct siginfo *info)
+ 
+ /*
+  * Bad permissions for sending the signal
+- * - the caller must hold at least the RCU read lock
++ * - the caller must hold the RCU read lock
+  */
+ static int check_kill_permission(int sig, struct siginfo *info,
+ 				 struct task_struct *t)
+@@ -1127,11 +1127,14 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
+ 
+ /*
+  * send signal info to all the members of a group
+- * - the caller must hold the RCU read lock at least
+  */
+ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+ {
+-	int ret = check_kill_permission(sig, info, p);
++	int ret;
++
++	rcu_read_lock();
++	ret = check_kill_permission(sig, info, p);
++	rcu_read_unlock();
+ 
+ 	if (!ret && sig)
+ 		ret = do_send_sig_info(sig, info, p, true);
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 98ce9bc..c85109d 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -948,7 +948,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
+ 
+ 	csocket = NULL;
+ 
+-	if (strlen(addr) > UNIX_PATH_MAX) {
++	if (strlen(addr) >= UNIX_PATH_MAX) {
+ 		P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
+ 			addr);
+ 		return -ENAMETOOLONG;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 382bc76..da14c49 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1081,6 +1081,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
+ 		}
+ 		ip_mc_up(in_dev);
+ 		/* fall through */
++	case NETDEV_NOTIFY_PEERS:
+ 	case NETDEV_CHANGEADDR:
+ 		/* Send gratuitous ARP to notify of link change */
+ 		if (IN_DEV_ARP_NOTIFY(in_dev)) {
+diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
+index 3cd5f7b..ea13a80 100644
+--- a/net/mac80211/mesh_plink.c
++++ b/net/mac80211/mesh_plink.c
+@@ -65,7 +65,6 @@ void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
+ {
+ 	atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
+ 	mesh_accept_plinks_update(sdata);
+-	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ }
+ 
+ static inline
+@@ -73,7 +72,6 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
+ {
+ 	atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
+ 	mesh_accept_plinks_update(sdata);
+-	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ }
+ 
+ /**
+@@ -115,7 +113,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
+ }
+ 
+ /**
+- * mesh_plink_deactivate - deactivate mesh peer link
++ * __mesh_plink_deactivate - deactivate mesh peer link
+  *
+  * @sta: mesh peer link to deactivate
+  *
+@@ -123,18 +121,23 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
+  *
+  * Locking: the caller must hold sta->lock
+  */
+-static void __mesh_plink_deactivate(struct sta_info *sta)
++static bool __mesh_plink_deactivate(struct sta_info *sta)
+ {
+ 	struct ieee80211_sub_if_data *sdata = sta->sdata;
++	bool deactivated = false;
+ 
+-	if (sta->plink_state == PLINK_ESTAB)
++	if (sta->plink_state == PLINK_ESTAB) {
+ 		mesh_plink_dec_estab_count(sdata);
++		deactivated = true;
++	}
+ 	sta->plink_state = PLINK_BLOCKED;
+ 	mesh_path_flush_by_nexthop(sta);
++
++	return deactivated;
+ }
+ 
+ /**
+- * __mesh_plink_deactivate - deactivate mesh peer link
++ * mesh_plink_deactivate - deactivate mesh peer link
+  *
+  * @sta: mesh peer link to deactivate
+  *
+@@ -142,9 +145,15 @@ static void __mesh_plink_deactivate(struct sta_info *sta)
+  */
+ void mesh_plink_deactivate(struct sta_info *sta)
+ {
++	struct ieee80211_sub_if_data *sdata = sta->sdata;
++	bool deactivated;
++
+ 	spin_lock_bh(&sta->lock);
+-	__mesh_plink_deactivate(sta);
++	deactivated = __mesh_plink_deactivate(sta);
+ 	spin_unlock_bh(&sta->lock);
++
++	if (deactivated)
++		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ }
+ 
+ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+@@ -381,10 +390,16 @@ int mesh_plink_open(struct sta_info *sta)
+ 
+ void mesh_plink_block(struct sta_info *sta)
+ {
++	struct ieee80211_sub_if_data *sdata = sta->sdata;
++	bool deactivated;
++
+ 	spin_lock_bh(&sta->lock);
+-	__mesh_plink_deactivate(sta);
++	deactivated = __mesh_plink_deactivate(sta);
+ 	sta->plink_state = PLINK_BLOCKED;
+ 	spin_unlock_bh(&sta->lock);
++
++	if (deactivated)
++		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ }
+ 
+ 
+@@ -397,6 +412,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
+ 	enum plink_event event;
+ 	enum plink_frame_type ftype;
+ 	size_t baselen;
++	bool deactivated;
+ 	u8 ie_len;
+ 	u8 *baseaddr;
+ 	__le16 plid, llid, reason;
+@@ -651,8 +667,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
+ 		case CNF_ACPT:
+ 			del_timer(&sta->plink_timer);
+ 			sta->plink_state = PLINK_ESTAB;
+-			mesh_plink_inc_estab_count(sdata);
+ 			spin_unlock_bh(&sta->lock);
++			mesh_plink_inc_estab_count(sdata);
++			ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ 			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
+ 				sta->sta.addr);
+ 			break;
+@@ -684,8 +701,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
+ 		case OPN_ACPT:
+ 			del_timer(&sta->plink_timer);
+ 			sta->plink_state = PLINK_ESTAB;
+-			mesh_plink_inc_estab_count(sdata);
+ 			spin_unlock_bh(&sta->lock);
++			mesh_plink_inc_estab_count(sdata);
++			ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ 			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
+ 				sta->sta.addr);
+ 			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
+@@ -702,11 +720,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
+ 		case CLS_ACPT:
+ 			reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ 			sta->reason = reason;
+-			__mesh_plink_deactivate(sta);
++			deactivated = __mesh_plink_deactivate(sta);
+ 			sta->plink_state = PLINK_HOLDING;
+ 			llid = sta->llid;
+ 			mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
+ 			spin_unlock_bh(&sta->lock);
++			if (deactivated)
++				ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ 			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
+ 					    plid, reason);
+ 			break;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index a63029e..bd1892f 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -327,6 +327,24 @@ void netif_carrier_off(struct net_device *dev)
+ }
+ EXPORT_SYMBOL(netif_carrier_off);
+ 
++/**
++ * 	netif_notify_peers - notify network peers about existence of @dev
++ * 	@dev: network device
++ *
++ * Generate traffic such that interested network peers are aware of
++ * @dev, such as by generating a gratuitous ARP. This may be used when
++ * a device wants to inform the rest of the network about some sort of
++ * reconfiguration such as a failover event or virtual machine
++ * migration.
++ */
++void netif_notify_peers(struct net_device *dev)
++{
++	rtnl_lock();
++	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
++	rtnl_unlock();
++}
++EXPORT_SYMBOL(netif_notify_peers);
++
+ /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
+    under all circumstances. It is difficult to invent anything faster or
+    cheaper.
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index 48ead6f..ef17fcf 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -44,10 +44,10 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
+ 		}
+ 	}
+ 
+-	WARN_ON(!done);
+-
+-	nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
+-	cfg80211_sme_rx_auth(dev, buf, len);
++	if (done) {
++		nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
++		cfg80211_sme_rx_auth(dev, buf, len);
++	}
+ 
+ 	wdev_unlock(wdev);
+ }
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 58401d2..5ca8c71 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -275,6 +275,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
+ {
+ 	struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
+ 	struct cfg80211_internal_bss *bss, *res = NULL;
++	unsigned long now = jiffies;
+ 
+ 	spin_lock_bh(&dev->bss_lock);
+ 
+@@ -283,6 +284,10 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
+ 			continue;
+ 		if (channel && bss->pub.channel != channel)
+ 			continue;
++		/* Don't get expired BSS structs */
++		if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) &&
++		    !atomic_read(&bss->hold))
++			continue;
+ 		if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
+ 			res = bss;
+ 			kref_get(&res->ref);

Added: dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.35.2.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.35.2.patch	Sat Aug 14 11:27:01 2010	(r16145)
@@ -0,0 +1,2824 @@
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 98922f7..4824fb4 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1027,6 +1027,18 @@ config PL310_ERRATA_588369
+ 	   is not correctly implemented in PL310 as clean lines are not
+ 	   invalidated as a result of these operations. Note that this errata
+ 	   uses Texas Instrument's secure monitor api.
++
++config ARM_ERRATA_720789
++	bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
++	depends on CPU_V7 && SMP
++	help
++	  This option enables the workaround for the 720789 Cortex-A9 (prior to
++	  r2p0) erratum. A faulty ASID can be sent to the other CPUs for the
++	  broadcasted CP15 TLB maintenance operations TLBIASIDIS and TLBIMVAIS.
++	  As a consequence of this erratum, some TLB entries which should be
++	  invalidated are not, resulting in an incoherency in the system page
++	  tables. The workaround changes the TLB flushing routines to invalidate
++	  entries regardless of the ASID.
+ endmenu
+ 
+ source "arch/arm/common/Kconfig"
+diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
+index bd863d8..33b546a 100644
+--- a/arch/arm/include/asm/tlbflush.h
++++ b/arch/arm/include/asm/tlbflush.h
+@@ -378,7 +378,11 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
+ 	if (tlb_flag(TLB_V6_I_ASID))
+ 		asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
+ 	if (tlb_flag(TLB_V7_UIS_ASID))
++#ifdef CONFIG_ARM_ERRATA_720789
++		asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
++#else
+ 		asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
++#endif
+ 
+ 	if (tlb_flag(TLB_BTB)) {
+ 		/* flush the branch target cache */
+@@ -424,7 +428,11 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+ 	if (tlb_flag(TLB_V6_I_PAGE))
+ 		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
+ 	if (tlb_flag(TLB_V7_UIS_PAGE))
++#ifdef CONFIG_ARM_ERRATA_720789
++		asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc");
++#else
+ 		asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
++#endif
+ 
+ 	if (tlb_flag(TLB_BTB)) {
+ 		/* flush the branch target cache */
+diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
+index fdda6be..d717b49 100644
+--- a/arch/arm/mach-pxa/cm-x300.c
++++ b/arch/arm/mach-pxa/cm-x300.c
+@@ -745,9 +745,10 @@ static void __init cm_x300_init(void)
+ {
+ 	cm_x300_init_mfp();
+ 
+-	pxa_set_ffuart_info(NULL);
+ 	pxa_set_btuart_info(NULL);
+ 	pxa_set_stuart_info(NULL);
++	if (cpu_is_pxa300())
++		pxa_set_ffuart_info(NULL);
+ 
+ 	cm_x300_init_da9030();
+ 	cm_x300_init_dm9000();
+diff --git a/arch/arm/plat-mxc/include/mach/gpio.h b/arch/arm/plat-mxc/include/mach/gpio.h
+index 6bd932c..7a0dc5a 100644
+--- a/arch/arm/plat-mxc/include/mach/gpio.h
++++ b/arch/arm/plat-mxc/include/mach/gpio.h
+@@ -19,6 +19,7 @@
+ #ifndef __ASM_ARCH_MXC_GPIO_H__
+ #define __ASM_ARCH_MXC_GPIO_H__
+ 
++#include <linux/spinlock.h>
+ #include <mach/hardware.h>
+ #include <asm-generic/gpio.h>
+ 
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 77cfe7a..5d2f17d 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -163,9 +163,11 @@ drivers-$(CONFIG_OPROFILE)	+= arch/powerpc/oprofile/
+ # Default to zImage, override when needed
+ all: zImage
+ 
+-BOOT_TARGETS = zImage zImage.initrd uImage zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
++# With make 3.82 we cannot mix normal and wildcard targets
++BOOT_TARGETS1 := zImage zImage.initrd uImaged
++BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
+ 
+-PHONY += $(BOOT_TARGETS)
++PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
+ 
+ boot := arch/$(ARCH)/boot
+ 
+@@ -180,10 +182,16 @@ relocs_check: arch/powerpc/relocs_check.pl vmlinux
+ zImage: relocs_check
+ endif
+ 
+-$(BOOT_TARGETS): vmlinux
++$(BOOT_TARGETS1): vmlinux
++	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
++$(BOOT_TARGETS2): vmlinux
++	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
++
++
++bootwrapper_install:
+ 	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+ 
+-bootwrapper_install %.dtb:
++%.dtb:
+ 	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+ 
+ define archhelp
+diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
+index babccee..4339d20 100644
+--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
++++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
+@@ -569,6 +569,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
+ 		struct perf_sample_data data;
+ 
+ 		perf_sample_data_init(&data, 0);
++		data.period = event->hw.last_period;
+ 
+ 		if (perf_event_overflow(event, nmi, &data, regs)) {
+ 			/*
+diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
+index 8859e12..c1cf59d 100644
+--- a/arch/x86/include/asm/cmpxchg_32.h
++++ b/arch/x86/include/asm/cmpxchg_32.h
+@@ -27,20 +27,20 @@ struct __xchg_dummy {
+ 	switch (size) {							\
+ 	case 1:								\
+ 		asm volatile("xchgb %b0,%1"				\
+-			     : "=q" (__x)				\
+-			     : "m" (*__xg(ptr)), "0" (__x)		\
++			     : "=q" (__x), "+m" (*__xg(ptr))		\
++			     : "0" (__x)				\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 2:								\
+ 		asm volatile("xchgw %w0,%1"				\
+-			     : "=r" (__x)				\
+-			     : "m" (*__xg(ptr)), "0" (__x)		\
++			     : "=r" (__x), "+m" (*__xg(ptr))		\
++			     : "0" (__x)				\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 4:								\
+ 		asm volatile("xchgl %0,%1"				\
+-			     : "=r" (__x)				\
+-			     : "m" (*__xg(ptr)), "0" (__x)		\
++			     : "=r" (__x), "+m" (*__xg(ptr))		\
++			     : "0" (__x)				\
+ 			     : "memory");				\
+ 		break;							\
+ 	default:							\
+@@ -70,14 +70,14 @@ static inline void __set_64bit(unsigned long long *ptr,
+ 			       unsigned int low, unsigned int high)
+ {
+ 	asm volatile("\n1:\t"
+-		     "movl (%0), %%eax\n\t"
+-		     "movl 4(%0), %%edx\n\t"
+-		     LOCK_PREFIX "cmpxchg8b (%0)\n\t"
++		     "movl (%1), %%eax\n\t"
++		     "movl 4(%1), %%edx\n\t"
++		     LOCK_PREFIX "cmpxchg8b (%1)\n\t"
+ 		     "jnz 1b"
+-		     : /* no outputs */
+-		     : "D"(ptr),
+-		       "b"(low),
+-		       "c"(high)
++		     : "=m" (*ptr)
++		     : "D" (ptr),
++		       "b" (low),
++		       "c" (high)
+ 		     : "ax", "dx", "memory");
+ }
+ 
+@@ -121,21 +121,21 @@ extern void __cmpxchg_wrong_size(void);
+ 	__typeof__(*(ptr)) __new = (new);				\
+ 	switch (size) {							\
+ 	case 1:								\
+-		asm volatile(lock "cmpxchgb %b1,%2"			\
+-			     : "=a"(__ret)				\
+-			     : "q"(__new), "m"(*__xg(ptr)), "0"(__old)	\
++		asm volatile(lock "cmpxchgb %b2,%1"			\
++			     : "=a" (__ret), "+m" (*__xg(ptr))		\
++			     : "q" (__new), "0" (__old)			\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 2:								\
+-		asm volatile(lock "cmpxchgw %w1,%2"			\
+-			     : "=a"(__ret)				\
+-			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
++		asm volatile(lock "cmpxchgw %w2,%1"			\
++			     : "=a" (__ret), "+m" (*__xg(ptr))		\
++			     : "r" (__new), "0" (__old)			\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 4:								\
+-		asm volatile(lock "cmpxchgl %1,%2"			\
+-			     : "=a"(__ret)				\
+-			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
++		asm volatile(lock "cmpxchgl %2,%1"			\
++			     : "=a" (__ret), "+m" (*__xg(ptr))		\
++			     : "r" (__new), "0" (__old)			\
+ 			     : "memory");				\
+ 		break;							\
+ 	default:							\
+@@ -180,12 +180,12 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr,
+ 					     unsigned long long new)
+ {
+ 	unsigned long long prev;
+-	asm volatile(LOCK_PREFIX "cmpxchg8b %3"
+-		     : "=A"(prev)
+-		     : "b"((unsigned long)new),
+-		       "c"((unsigned long)(new >> 32)),
+-		       "m"(*__xg(ptr)),
+-		       "0"(old)
++	asm volatile(LOCK_PREFIX "cmpxchg8b %1"
++		     : "=A" (prev),
++		       "+m" (*__xg(ptr))
++		     : "b" ((unsigned long)new),
++		       "c" ((unsigned long)(new >> 32)),
++		       "0" (old)
+ 		     : "memory");
+ 	return prev;
+ }
+@@ -195,12 +195,12 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
+ 						   unsigned long long new)
+ {
+ 	unsigned long long prev;
+-	asm volatile("cmpxchg8b %3"
+-		     : "=A"(prev)
+-		     : "b"((unsigned long)new),
+-		       "c"((unsigned long)(new >> 32)),
+-		       "m"(*__xg(ptr)),
+-		       "0"(old)
++	asm volatile("cmpxchg8b %1"
++		     : "=A" (prev),
++		       "+m" (*__xg(ptr))
++		     : "b" ((unsigned long)new),
++		       "c" ((unsigned long)(new >> 32)),
++		       "0" (old)
+ 		     : "memory");
+ 	return prev;
+ }
+diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
+index 485ae41..b92f147 100644
+--- a/arch/x86/include/asm/cmpxchg_64.h
++++ b/arch/x86/include/asm/cmpxchg_64.h
+@@ -26,26 +26,26 @@ extern void __cmpxchg_wrong_size(void);
+ 	switch (size) {							\
+ 	case 1:								\
+ 		asm volatile("xchgb %b0,%1"				\
+-			     : "=q" (__x)				\
+-			     : "m" (*__xg(ptr)), "0" (__x)		\
++			     : "=q" (__x), "+m" (*__xg(ptr))		\
++			     : "0" (__x)				\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 2:								\
+ 		asm volatile("xchgw %w0,%1"				\
+-			     : "=r" (__x)				\
+-			     : "m" (*__xg(ptr)), "0" (__x)		\
++			     : "=r" (__x), "+m" (*__xg(ptr))		\
++			     : "0" (__x)				\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 4:								\
+ 		asm volatile("xchgl %k0,%1"				\
+-			     : "=r" (__x)				\
+-			     : "m" (*__xg(ptr)), "0" (__x)		\
++			     : "=r" (__x), "+m" (*__xg(ptr))		\
++			     : "0" (__x)				\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 8:								\
+ 		asm volatile("xchgq %0,%1"				\
+-			     : "=r" (__x)				\
+-			     : "m" (*__xg(ptr)), "0" (__x)		\
++			     : "=r" (__x), "+m" (*__xg(ptr))		\
++			     : "0" (__x)				\
+ 			     : "memory");				\
+ 		break;							\
+ 	default:							\
+@@ -71,27 +71,27 @@ extern void __cmpxchg_wrong_size(void);
+ 	__typeof__(*(ptr)) __new = (new);				\
+ 	switch (size) {							\
+ 	case 1:								\
+-		asm volatile(lock "cmpxchgb %b1,%2"			\
+-			     : "=a"(__ret)				\
+-			     : "q"(__new), "m"(*__xg(ptr)), "0"(__old)	\
++		asm volatile(lock "cmpxchgb %b2,%1"			\
++			     : "=a" (__ret), "+m" (*__xg(ptr))		\
++			     : "q" (__new), "0" (__old)			\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 2:								\
+-		asm volatile(lock "cmpxchgw %w1,%2"			\
+-			     : "=a"(__ret)				\
+-			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
++		asm volatile(lock "cmpxchgw %w2,%1"			\
++			     : "=a" (__ret), "+m" (*__xg(ptr))		\
++			     : "r" (__new), "0" (__old)			\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 4:								\
+-		asm volatile(lock "cmpxchgl %k1,%2"			\
+-			     : "=a"(__ret)				\
+-			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
++		asm volatile(lock "cmpxchgl %k2,%1"			\
++			     : "=a" (__ret), "+m" (*__xg(ptr))		\
++			     : "r" (__new), "0" (__old)			\
+ 			     : "memory");				\
+ 		break;							\
+ 	case 8:								\
+-		asm volatile(lock "cmpxchgq %1,%2"			\
+-			     : "=a"(__ret)				\
+-			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
++		asm volatile(lock "cmpxchgq %2,%1"			\
++			     : "=a" (__ret), "+m" (*__xg(ptr))		\
++			     : "r" (__new), "0" (__old)			\
+ 			     : "memory");				\
+ 		break;							\
+ 	default:							\
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index 79556bd..01c0f3e 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -35,6 +35,7 @@
+ 
+ #include <linux/types.h> /* FIXME: kvm_para.h needs this */
+ 
++#include <linux/stop_machine.h>
+ #include <linux/kvm_para.h>
+ #include <linux/uaccess.h>
+ #include <linux/module.h>
+@@ -143,22 +144,28 @@ struct set_mtrr_data {
+ 	mtrr_type	smp_type;
+ };
+ 
++static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work);
++
+ /**
+- * ipi_handler - Synchronisation handler. Executed by "other" CPUs.
++ * mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs.
+  * @info: pointer to mtrr configuration data
+  *
+  * Returns nothing.
+  */
+-static void ipi_handler(void *info)
++static int mtrr_work_handler(void *info)
+ {
+ #ifdef CONFIG_SMP
+ 	struct set_mtrr_data *data = info;
+ 	unsigned long flags;
+ 
++	atomic_dec(&data->count);
++	while (!atomic_read(&data->gate))
++		cpu_relax();
++
+ 	local_irq_save(flags);
+ 
+ 	atomic_dec(&data->count);
+-	while (!atomic_read(&data->gate))
++	while (atomic_read(&data->gate))
+ 		cpu_relax();
+ 
+ 	/*  The master has cleared me to execute  */
+@@ -173,12 +180,13 @@ static void ipi_handler(void *info)
+ 	}
+ 
+ 	atomic_dec(&data->count);
+-	while (atomic_read(&data->gate))
++	while (!atomic_read(&data->gate))
+ 		cpu_relax();
+ 
+ 	atomic_dec(&data->count);
+ 	local_irq_restore(flags);
+ #endif
++	return 0;
+ }
+ 
+ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
+@@ -198,7 +206,7 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
+  *
+  * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
+  *
+- * 1. Send IPI to do the following:
++ * 1. Queue work to do the following on all processors:
+  * 2. Disable Interrupts
+  * 3. Wait for all procs to do so
+  * 4. Enter no-fill cache mode
+@@ -215,14 +223,17 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
+  * 15. Enable interrupts.
+  *
+  * What does that mean for us? Well, first we set data.count to the number
+- * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
+- * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
+- * Meanwhile, they are waiting for that flag to be set. Once it's set, each
++ * of CPUs. As each CPU announces that it started the rendezvous handler by
++ * decrementing the count, We reset data.count and set the data.gate flag
++ * allowing all the cpu's to proceed with the work. As each cpu disables
++ * interrupts, it'll decrement data.count once. We wait until it hits 0 and
++ * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
++ * are waiting for that flag to be cleared. Once it's cleared, each
+  * CPU goes through the transition of updating MTRRs.
+  * The CPU vendors may each do it differently,
+  * so we call mtrr_if->set() callback and let them take care of it.
+  * When they're done, they again decrement data->count and wait for data.gate
+- * to be reset.
++ * to be set.
+  * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
+  * Everyone then enables interrupts and we all continue on.
+  *
+@@ -234,6 +245,9 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
+ {
+ 	struct set_mtrr_data data;
+ 	unsigned long flags;
++	int cpu;
++
++	preempt_disable();
+ 
+ 	data.smp_reg = reg;
+ 	data.smp_base = base;
+@@ -246,10 +260,15 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
+ 	atomic_set(&data.gate, 0);
+ 
+ 	/* Start the ball rolling on other CPUs */
+-	if (smp_call_function(ipi_handler, &data, 0) != 0)
+-		panic("mtrr: timed out waiting for other CPUs\n");
++	for_each_online_cpu(cpu) {
++		struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
++
++		if (cpu == smp_processor_id())
++			continue;
++
++		stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
++	}
+ 
+-	local_irq_save(flags);
+ 
+ 	while (atomic_read(&data.count))
+ 		cpu_relax();
+@@ -259,6 +278,16 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
+ 	smp_wmb();
+ 	atomic_set(&data.gate, 1);
+ 
++	local_irq_save(flags);
++
++	while (atomic_read(&data.count))
++		cpu_relax();
++
++	/* Ok, reset count and toggle gate */
++	atomic_set(&data.count, num_booting_cpus() - 1);
++	smp_wmb();
++	atomic_set(&data.gate, 0);
++
+ 	/* Do our MTRR business */
+ 
+ 	/*
+@@ -279,7 +308,7 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
+ 
+ 	atomic_set(&data.count, num_booting_cpus() - 1);
+ 	smp_wmb();
+-	atomic_set(&data.gate, 0);
++	atomic_set(&data.gate, 1);
+ 
+ 	/*
+ 	 * Wait here for everyone to have seen the gate change
+@@ -289,6 +318,7 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
+ 		cpu_relax();
+ 
+ 	local_irq_restore(flags);
++	preempt_enable();
+ }
+ 
+ /**
+diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
+index b9d1ff5..227b044 100644
+--- a/arch/x86/kernel/cpu/vmware.c
++++ b/arch/x86/kernel/cpu/vmware.c
+@@ -51,7 +51,7 @@ static inline int __vmware_platform(void)
+ 
+ static unsigned long vmware_get_tsc_khz(void)
+ {
+-	uint64_t tsc_hz;
++	uint64_t tsc_hz, lpj;
+ 	uint32_t eax, ebx, ecx, edx;
+ 
+ 	VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
+@@ -62,6 +62,13 @@ static unsigned long vmware_get_tsc_khz(void)
+ 	printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
+ 			 (unsigned long) tsc_hz / 1000,
+ 			 (unsigned long) tsc_hz % 1000);
++
++	if (!preset_lpj) {
++		lpj = ((u64)tsc_hz * 1000);
++		do_div(lpj, HZ);
++		preset_lpj = lpj;
++	}
++
+ 	return tsc_hz;
+ }
+ 
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index c4f33b2..11015fd 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -816,6 +816,13 @@ do_rest:
+ 			if (cpumask_test_cpu(cpu, cpu_callin_mask))
+ 				break;	/* It has booted */
+ 			udelay(100);
++			/*
++			 * Allow other tasks to run while we wait for the
++			 * AP to come online. This also gives a chance
++			 * for the MTRR work(triggered by the AP coming online)
++			 * to be completed in the stop machine context.
++			 */
++			schedule();
+ 		}
+ 
+ 		if (cpumask_test_cpu(cpu, cpu_callin_mask))
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index f627779..4c4508e 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -802,8 +802,10 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+ 	up_read(&mm->mmap_sem);
+ 
+ 	/* Kernel mode? Handle exceptions or die: */
+-	if (!(error_code & PF_USER))
++	if (!(error_code & PF_USER)) {
+ 		no_context(regs, error_code, address);
++		return;
++	}
+ 
+ 	/* User-space => ok to do another page fault: */
+ 	if (is_prefetch(regs, error_code, address))
+diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
+index 5d0e67f..e5d5e2c 100644
+--- a/arch/x86/mm/kmmio.c
++++ b/arch/x86/mm/kmmio.c
+@@ -45,6 +45,8 @@ struct kmmio_fault_page {
+ 	 * Protected by kmmio_lock, when linked into kmmio_page_table.
+ 	 */
+ 	int count;
++
++	bool scheduled_for_release;
+ };
+ 
+ struct kmmio_delayed_release {
+@@ -398,8 +400,11 @@ static void release_kmmio_fault_page(unsigned long page,
+ 	BUG_ON(f->count < 0);
+ 	if (!f->count) {
+ 		disarm_kmmio_fault_page(f);
+-		f->release_next = *release_list;
+-		*release_list = f;
++		if (!f->scheduled_for_release) {
++			f->release_next = *release_list;
++			*release_list = f;
++			f->scheduled_for_release = true;
++		}
+ 	}
+ }
+ 
+@@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
+ 			prevp = &f->release_next;
+ 		} else {
+ 			*prevp = f->release_next;
++			f->release_next = NULL;
++			f->scheduled_for_release = false;
+ 		}
+-		f = f->release_next;
++		f = *prevp;
+ 	}
+ 	spin_unlock_irqrestore(&kmmio_lock, flags);
+ 
+@@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
+ 	kmmio_count--;
+ 	spin_unlock_irqrestore(&kmmio_lock, flags);
+ 
++	if (!release_list)
++		return;
++
+ 	drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
+ 	if (!drelease) {
+ 		pr_crit("leaking kmmio_fault_page objects.\n");
+diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
+index 8565d94..38868ad 100644
+--- a/arch/x86/mm/testmmiotrace.c
++++ b/arch/x86/mm/testmmiotrace.c
+@@ -90,6 +90,27 @@ static void do_test(unsigned long size)
+ 	iounmap(p);
+ }
+ 
++/*
++ * Tests how mmiotrace behaves in face of multiple ioremap / iounmaps in
++ * a short time. We had a bug in deferred freeing procedure which tried
++ * to free this region multiple times (ioremap can reuse the same address
++ * for many mappings).
++ */
++static void do_test_bulk_ioremapping(void)
++{
++	void __iomem *p;
++	int i;
++
++	for (i = 0; i < 10; ++i) {
++		p = ioremap_nocache(mmio_address, PAGE_SIZE);
++		if (p)
++			iounmap(p);
++	}
++
++	/* Force freeing. If it will crash we will know why. */
++	synchronize_rcu();
++}
++
+ static int __init init(void)
+ {
+ 	unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
+@@ -104,6 +125,7 @@ static int __init init(void)
+ 		   "and writing 16 kB of rubbish in there.\n",
+ 		   size >> 10, mmio_address);
+ 	do_test(size);
++	do_test_bulk_ioremapping();
+ 	pr_info("All done.\n");
+ 	return 0;
+ }
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index 2ec04c4..15466c0 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -34,6 +34,15 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
+ 		},
+ 	},
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
++	/* 2006 AMD HT/VIA system with two host bridges */
++        {
++		.callback = set_use_crs,
++		.ident = "ASRock ALiveSATA2-GLAN",
++		.matches = {
++			DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
++                },
++        },
+ 	{}
+ };
+ 
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 9d9434f..df332c1 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -96,6 +96,14 @@ config CRYPTO_MANAGER2
+ 	select CRYPTO_BLKCIPHER2
+ 	select CRYPTO_PCOMP
+ 
++config CRYPTO_MANAGER_TESTS
++	bool "Run algolithms' self-tests"
++	default y
++	depends on CRYPTO_MANAGER2
++	help
++	  Run cryptomanager's tests for the new crypto algorithms being
++	  registered.
++
+ config CRYPTO_GF128MUL
+ 	tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
+ 	depends on EXPERIMENTAL
+diff --git a/crypto/algboss.c b/crypto/algboss.c
+index c3c196b..40bd391 100644
+--- a/crypto/algboss.c
++++ b/crypto/algboss.c
+@@ -206,6 +206,7 @@ err:
+ 	return NOTIFY_OK;
+ }
+ 
++#ifdef CONFIG_CRYPTO_MANAGER_TESTS
+ static int cryptomgr_test(void *data)
+ {
+ 	struct crypto_test_param *param = data;
+@@ -266,6 +267,7 @@ err_put_module:
+ err:
+ 	return NOTIFY_OK;
+ }
++#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
+ 
+ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
+ 			    void *data)
+@@ -273,8 +275,10 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
+ 	switch (msg) {
+ 	case CRYPTO_MSG_ALG_REQUEST:
+ 		return cryptomgr_schedule_probe(data);
++#ifdef CONFIG_CRYPTO_MANAGER_TESTS
+ 	case CRYPTO_MSG_ALG_REGISTER:
+ 		return cryptomgr_schedule_test(data);
++#endif
+ 	}
+ 
+ 	return NOTIFY_DONE;
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 5c8aaa0..abd980c 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -22,6 +22,17 @@
+ #include <crypto/rng.h>
+ 
+ #include "internal.h"
++
++#ifndef CONFIG_CRYPTO_MANAGER_TESTS
++
++/* a perfect nop */
++int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
++{
++	return 0;
++}
++
++#else
++
+ #include "testmgr.h"
+ 
+ /*
+@@ -2530,4 +2541,7 @@ notest:
+ non_fips_alg:
+ 	return -EINVAL;
+ }
++
++#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
++
+ EXPORT_SYMBOL_GPL(alg_test);
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 7409f98..3971bc0 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -158,6 +158,7 @@ struct piix_map_db {
+ struct piix_host_priv {
+ 	const int *map;
+ 	u32 saved_iocfg;
++	spinlock_t sidpr_lock;	/* FIXME: remove once locking in EH is fixed */
+ 	void __iomem *sidpr;
+ };
+ 
+@@ -951,12 +952,15 @@ static int piix_sidpr_scr_read(struct ata_link *link,
+ 			       unsigned int reg, u32 *val)
+ {
+ 	struct piix_host_priv *hpriv = link->ap->host->private_data;
++	unsigned long flags;
+ 
+ 	if (reg >= ARRAY_SIZE(piix_sidx_map))
+ 		return -EINVAL;
+ 
++	spin_lock_irqsave(&hpriv->sidpr_lock, flags);
+ 	piix_sidpr_sel(link, reg);
+ 	*val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
++	spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
+ 	return 0;
+ }
+ 
+@@ -964,12 +968,15 @@ static int piix_sidpr_scr_write(struct ata_link *link,
+ 				unsigned int reg, u32 val)
+ {
+ 	struct piix_host_priv *hpriv = link->ap->host->private_data;
++	unsigned long flags;
+ 
+ 	if (reg >= ARRAY_SIZE(piix_sidx_map))
+ 		return -EINVAL;
+ 
++	spin_lock_irqsave(&hpriv->sidpr_lock, flags);
+ 	piix_sidpr_sel(link, reg);
+ 	iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
++	spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
+ 	return 0;
+ }
+ 
+@@ -1566,6 +1573,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
+ 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ 	if (!hpriv)
+ 		return -ENOMEM;
++	spin_lock_init(&hpriv->sidpr_lock);
+ 
+ 	/* Save IOCFG, this will be used for cable detection, quirk
+ 	 * detection and restoration on detach.  This is necessary
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index ded76c4..3613422 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -781,7 +781,8 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
+ 	sk_for_each(s, node, head) {
+ 		vcc = atm_sk(s);
+ 		if (vcc->dev == dev && vcc->vci == vci &&
+-		    vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE)
++		    vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
++		    test_bit(ATM_VF_READY, &vcc->flags))
+ 			goto out;
+ 	}
+ 	vcc = NULL;
+@@ -907,6 +908,10 @@ static void pclose(struct atm_vcc *vcc)
+ 	clear_bit(ATM_VF_ADDR, &vcc->flags);
+ 	clear_bit(ATM_VF_READY, &vcc->flags);
+ 
++	/* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the
++	   tasklet has finished processing any incoming packets (and, more to
++	   the point, using the vcc pointer). */
++	tasklet_unlock_wait(&card->tlet);
+ 	return;
+ }
+ 
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 7258c95..60bbfcf 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -2371,11 +2371,7 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+ 
+ static void consider_delay_probes(struct drbd_conf *mdev)
+ {
+-	if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93)
+-		return;
+-
+-	if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt)
+-		drbd_send_delay_probes(mdev);
++	return;
+ }
+ 
+ static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+@@ -2660,9 +2656,24 @@ static void drbd_unplug_fn(struct request_queue *q)
+ 
+ static void drbd_set_defaults(struct drbd_conf *mdev)
+ {
+-	mdev->sync_conf.after      = DRBD_AFTER_DEF;
+-	mdev->sync_conf.rate       = DRBD_RATE_DEF;
+-	mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF;
++	/* This way we get a compile error when sync_conf grows,
++	   and we forgot to initialize it here */
++	mdev->sync_conf = (struct syncer_conf) {
++		/* .rate = */		DRBD_RATE_DEF,
++		/* .after = */		DRBD_AFTER_DEF,
++		/* .al_extents = */	DRBD_AL_EXTENTS_DEF,
++		/* .dp_volume = */	DRBD_DP_VOLUME_DEF,
++		/* .dp_interval = */	DRBD_DP_INTERVAL_DEF,
++		/* .throttle_th = */	DRBD_RS_THROTTLE_TH_DEF,
++		/* .hold_off_th = */	DRBD_RS_HOLD_OFF_TH_DEF,
++		/* .verify_alg = */	{}, 0,
++		/* .cpu_mask = */	{}, 0,
++		/* .csums_alg = */	{}, 0,
++		/* .use_rle = */	0
++	};
++
++	/* Have to use that way, because the layout differs between
++	   big endian and little endian */
+ 	mdev->state = (union drbd_state) {
+ 		{ .role = R_SECONDARY,
+ 		  .peer = R_UNKNOWN,
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 5d9cc53..6fcb971 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -59,6 +59,9 @@ static struct usb_device_id btusb_table[] = {
+ 	/* Generic Bluetooth USB device */
+ 	{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+ 
++	/* Apple iMac11,1 */
++	{ USB_DEVICE(0x05ac, 0x8215) },
++
+ 	/* AVM BlueFRITZ! USB v2.0 */
+ 	{ USB_DEVICE(0x057c, 0x3800) },
+ 
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 094bdc3..ff68e7c 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -2176,6 +2176,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
+ 	info->io.addr_data = res->start;
+ 
+ 	info->io.regspacing = DEFAULT_REGSPACING;
++	res = pnp_get_resource(dev,
++			       (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
++					IORESOURCE_IO : IORESOURCE_MEM,
++			       1);
++	if (res) {
++		if (res->start > info->io.addr_data)
++			info->io.regspacing = res->start - info->io.addr_data;
++	}
+ 	info->io.regsize = DEFAULT_REGSPACING;
+ 	info->io.regshift = 0;
+ 
+diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
+index a663800..18af923 100644
+--- a/drivers/char/nozomi.c
++++ b/drivers/char/nozomi.c
+@@ -1611,6 +1611,8 @@ static int ntty_install(struct tty_driver *driver, struct tty_struct *tty)
+ 	ret = tty_init_termios(tty);
+ 	if (ret == 0) {
+ 		tty_driver_kref_get(driver);
++		tty->count++;
++		tty->driver_data = port;
+ 		driver->ttys[tty->index] = tty;
+ 	}
+ 	return ret;
+@@ -1639,7 +1641,7 @@ static int ntty_activate(struct tty_port *tport, struct tty_struct *tty)
+ 
+ static int ntty_open(struct tty_struct *tty, struct file *filp)
+ {
+-	struct port *port = get_port_by_tty(tty);
++	struct port *port = tty->driver_data;
+ 	return tty_port_open(&port->port, tty, filp);
+ }
+ 
+diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
+index 6d3a73b..5216c8a 100644
+--- a/drivers/dma/ioat/dma.h
++++ b/drivers/dma/ioat/dma.h
+@@ -97,6 +97,7 @@ struct ioat_chan_common {
+ 	#define IOAT_RESET_PENDING 2
+ 	#define IOAT_KOBJ_INIT_FAIL 3
+ 	#define IOAT_RESHAPE_PENDING 4
++	#define IOAT_RUN 5
+ 	struct timer_list timer;
+ 	#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
+ 	#define IDLE_TIMEOUT msecs_to_jiffies(2000)
+diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
+index 3c8b32a..216f9d3 100644
+--- a/drivers/dma/ioat/dma_v2.c
++++ b/drivers/dma/ioat/dma_v2.c
+@@ -287,7 +287,10 @@ void ioat2_timer_event(unsigned long data)
+ 			chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ 			dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ 				__func__, chanerr);
+-			BUG_ON(is_ioat_bug(chanerr));
++			if (test_bit(IOAT_RUN, &chan->state))
++				BUG_ON(is_ioat_bug(chanerr));
++			else /* we never got off the ground */
++				return;
+ 		}
+ 
+ 		/* if we haven't made progress and we have already
+@@ -492,6 +495,8 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf
+ 	return ring;
+ }
+ 
++void ioat2_free_chan_resources(struct dma_chan *c);
++
+ /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
+  * @chan: channel to be initialized
+  */
+@@ -500,6 +505,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
+ 	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ 	struct ioat_chan_common *chan = &ioat->base;
+ 	struct ioat_ring_ent **ring;
++	u64 status;
+ 	int order;
+ 
+ 	/* have we already been set up? */
+@@ -540,7 +546,20 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
+ 	tasklet_enable(&chan->cleanup_task);
+ 	ioat2_start_null_desc(ioat);
+ 
+-	return 1 << ioat->alloc_order;
++	/* check that we got off the ground */
++	udelay(5);
++	status = ioat_chansts(chan);
++	if (is_ioat_active(status) || is_ioat_idle(status)) {
++		set_bit(IOAT_RUN, &chan->state);
++		return 1 << ioat->alloc_order;
++	} else {
++		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
++
++		dev_WARN(to_dev(chan),
++			"failed to start channel chanerr: %#x\n", chanerr);
++		ioat2_free_chan_resources(c);
++		return -EFAULT;
++	}
+ }
+ 
+ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
+@@ -778,6 +797,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
+ 	del_timer_sync(&chan->timer);
+ 	device->cleanup_fn((unsigned long) c);
+ 	device->reset_hw(chan);
++	clear_bit(IOAT_RUN, &chan->state);
+ 
+ 	spin_lock_bh(&chan->cleanup_lock);
+ 	spin_lock_bh(&ioat->prep_lock);
+diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
+index 1cdd22e..d0f4990 100644
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -361,7 +361,10 @@ static void ioat3_timer_event(unsigned long data)
+ 			chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ 			dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ 				__func__, chanerr);
+-			BUG_ON(is_ioat_bug(chanerr));
++			if (test_bit(IOAT_RUN, &chan->state))
++				BUG_ON(is_ioat_bug(chanerr));
++			else /* we never got off the ground */
++				return;
+ 		}
+ 
+ 		/* if we haven't made progress and we have already
+diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
+index 64207df..2de76cc 100644
+--- a/drivers/ide/ide-cd.c
++++ b/drivers/ide/ide-cd.c
+@@ -506,15 +506,22 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
+ 	return (flags & REQ_FAILED) ? -EIO : 0;
+ }
+ 
+-static void ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
++/*
++ * returns true if rq has been completed
++ */
++static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
+ {
+ 	unsigned int nr_bytes = cmd->nbytes - cmd->nleft;
+ 
+ 	if (cmd->tf_flags & IDE_TFLAG_WRITE)
+ 		nr_bytes -= cmd->last_xfer_len;
+ 
+-	if (nr_bytes > 0)
++	if (nr_bytes > 0) {
+ 		ide_complete_rq(drive, 0, nr_bytes);
++		return true;
++	}
++
++	return false;
+ }
+ 
+ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
+@@ -679,7 +686,8 @@ out_end:
+ 		}
+ 
+ 		if (uptodate == 0 && rq->bio)
+-			ide_cd_error_cmd(drive, cmd);
++			if (ide_cd_error_cmd(drive, cmd))
++				return ide_stopped;
+ 
+ 		/* make sure it's fully ended */
+ 		if (blk_fs_request(rq) == 0) {
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index cb20d0b..7476d95 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -532,13 +532,17 @@ static void mddev_unlock(mddev_t * mddev)
+ 		 * an access to the files will try to take reconfig_mutex
+ 		 * while holding the file unremovable, which leads to
+ 		 * a deadlock.
+-		 * So hold open_mutex instead - we are allowed to take
+-		 * it while holding reconfig_mutex, and md_run can
+-		 * use it to wait for the remove to complete.
++		 * So hold set sysfs_active while the remove in happeing,
++		 * and anything else which might set ->to_remove or my
++		 * otherwise change the sysfs namespace will fail with
++		 * -EBUSY if sysfs_active is still set.
++		 * We set sysfs_active under reconfig_mutex and elsewhere
++		 * test it under the same mutex to ensure its correct value
++		 * is seen.
+ 		 */
+ 		struct attribute_group *to_remove = mddev->to_remove;
+ 		mddev->to_remove = NULL;
+-		mutex_lock(&mddev->open_mutex);
++		mddev->sysfs_active = 1;
+ 		mutex_unlock(&mddev->reconfig_mutex);
+ 
+ 		if (to_remove != &md_redundancy_group)
+@@ -550,7 +554,7 @@ static void mddev_unlock(mddev_t * mddev)
+ 				sysfs_put(mddev->sysfs_action);
+ 			mddev->sysfs_action = NULL;
+ 		}
+-		mutex_unlock(&mddev->open_mutex);
++		mddev->sysfs_active = 0;
+ 	} else
+ 		mutex_unlock(&mddev->reconfig_mutex);
+ 
+@@ -2960,7 +2964,9 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
+ 	 *  - new personality will access other array.
+ 	 */
+ 
+-	if (mddev->sync_thread || mddev->reshape_position != MaxSector)
++	if (mddev->sync_thread ||
++	    mddev->reshape_position != MaxSector ||
++	    mddev->sysfs_active)
+ 		return -EBUSY;
+ 
+ 	if (!mddev->pers->quiesce) {
+@@ -4344,13 +4350,9 @@ static int md_run(mddev_t *mddev)
+ 
+ 	if (mddev->pers)
+ 		return -EBUSY;
+-
+-	/* These two calls synchronise us with the
+-	 * sysfs_remove_group calls in mddev_unlock,
+-	 * so they must have completed.
+-	 */
+-	mutex_lock(&mddev->open_mutex);
+-	mutex_unlock(&mddev->open_mutex);
++	/* Cannot run until previous stop completes properly */
++	if (mddev->sysfs_active)
++		return -EBUSY;
+ 
+ 	/*
+ 	 * Analyze all RAID superblock(s)
+@@ -4711,12 +4713,13 @@ out:
+  */
+ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
+ {
+-	int err = 0;
++	int err = 0, revalidate = 0;
+ 	struct gendisk *disk = mddev->gendisk;
+ 	mdk_rdev_t *rdev;
+ 
+ 	mutex_lock(&mddev->open_mutex);
+-	if (atomic_read(&mddev->openers) > is_open) {
++	if (atomic_read(&mddev->openers) > is_open ||
++	    mddev->sysfs_active) {
+ 		printk("md: %s still in use.\n",mdname(mddev));
+ 		err = -EBUSY;
+ 	} else if (mddev->pers) {
+@@ -4740,7 +4743,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
+ 			}
+ 
+ 		set_capacity(disk, 0);
+-		revalidate_disk(disk);
++		revalidate = 1;
+ 
+ 		if (mddev->ro)
+ 			mddev->ro = 0;
+@@ -4748,6 +4751,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
+ 		err = 0;
+ 	}
+ 	mutex_unlock(&mddev->open_mutex);
++	if (revalidate)
++		revalidate_disk(disk);
+ 	if (err)
+ 		return err;
+ 	/*
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 10597bf..9ec208e 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -125,6 +125,10 @@ struct mddev_s
+ 	int				suspended;
+ 	atomic_t			active_io;
+ 	int				ro;
++	int				sysfs_active; /* set when sysfs deletes
++						       * are happening, so run/
++						       * takeover/stop are not safe
++						       */
+ 
+ 	struct gendisk			*gendisk;
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 42e64e4..d1d6891 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -825,11 +825,29 @@ static int make_request(mddev_t *mddev, struct bio * bio)
+ 		 */
+ 		bp = bio_split(bio,
+ 			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
++
++		/* Each of these 'make_request' calls will call 'wait_barrier'.
++		 * If the first succeeds but the second blocks due to the resync
++		 * thread raising the barrier, we will deadlock because the
++		 * IO to the underlying device will be queued in generic_make_request
++		 * and will never complete, so will never reduce nr_pending.
++		 * So increment nr_waiting here so no new raise_barriers will
++		 * succeed, and so the second wait_barrier cannot block.
++		 */
++		spin_lock_irq(&conf->resync_lock);
++		conf->nr_waiting++;
++		spin_unlock_irq(&conf->resync_lock);
++
+ 		if (make_request(mddev, &bp->bio1))
+ 			generic_make_request(&bp->bio1);
+ 		if (make_request(mddev, &bp->bio2))
+ 			generic_make_request(&bp->bio2);
+ 
++		spin_lock_irq(&conf->resync_lock);
++		conf->nr_waiting--;
++		wake_up(&conf->wait_barrier);
++		spin_unlock_irq(&conf->resync_lock);
++
+ 		bio_pair_release(bp);
+ 		return 0;
+ 	bad_map:
+diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
+index 82e9438..8878503 100644
+--- a/drivers/mtd/nand/mxc_nand.c
++++ b/drivers/mtd/nand/mxc_nand.c
+@@ -604,8 +604,8 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
+ 	/* Command pre-processing step */
+ 	switch (command) {
+ 	case NAND_CMD_RESET:
+-		send_cmd(host, command, false);
+ 		preset(mtd);
++		send_cmd(host, command, false);
+ 		break;
+ 
+ 	case NAND_CMD_STATUS:
+diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
+index 8d46731..90e143e 100644
+--- a/drivers/mtd/nand/plat_nand.c
++++ b/drivers/mtd/nand/plat_nand.c
+@@ -91,7 +91,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Scan to find existance of the device */
+-	if (nand_scan(&data->mtd, 1)) {
++	if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
+ 		err = -ENXIO;
+ 		goto out;
+ 	}
+diff --git a/drivers/net/e100.c b/drivers/net/e100.c
+index b194bad..8e2eab4 100644
+--- a/drivers/net/e100.c
++++ b/drivers/net/e100.c
+@@ -1779,6 +1779,7 @@ static int e100_tx_clean(struct nic *nic)
+ 	for (cb = nic->cb_to_clean;
+ 	    cb->status & cpu_to_le16(cb_complete);
+ 	    cb = nic->cb_to_clean = cb->next) {
++		rmb(); /* read skb after status */
+ 		netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
+ 			     "cb[%d]->status = 0x%04X\n",
+ 			     (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
+@@ -1927,6 +1928,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
+ 
+ 	netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
+ 		     "status=0x%04X\n", rfd_status);
++	rmb(); /* read size after status bit */
+ 
+ 	/* If data isn't ready, nothing to indicate */
+ 	if (unlikely(!(rfd_status & cb_complete))) {
+diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
+index 68a8089..a2680bf 100644
+--- a/drivers/net/e1000/e1000_main.c
++++ b/drivers/net/e1000/e1000_main.c
+@@ -3448,6 +3448,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ 	       (count < tx_ring->count)) {
+ 		bool cleaned = false;
++		rmb();	/* read buffer_info after eop_desc */
+ 		for ( ; !cleaned; count++) {
+ 			tx_desc = E1000_TX_DESC(*tx_ring, i);
+ 			buffer_info = &tx_ring->buffer_info[i];
+@@ -3637,6 +3638,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ 		if (*work_done >= work_to_do)
+ 			break;
+ 		(*work_done)++;
++		rmb(); /* read descriptor and rx_buffer_info after status DD */
+ 
+ 		status = rx_desc->status;
+ 		skb = buffer_info->skb;
+@@ -3843,6 +3845,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ 		if (*work_done >= work_to_do)
+ 			break;
+ 		(*work_done)++;
++		rmb(); /* read descriptor and rx_buffer_info after status DD */
+ 
+ 		status = rx_desc->status;
+ 		skb = buffer_info->skb;
+diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
+index 79e38dc..ba24679 100644
+--- a/drivers/net/e1000e/netdev.c
++++ b/drivers/net/e1000e/netdev.c
+@@ -774,6 +774,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ 		if (*work_done >= work_to_do)
+ 			break;
+ 		(*work_done)++;
++		rmb();	/* read descriptor and rx_buffer_info after status DD */
+ 
+ 		status = rx_desc->status;
+ 		skb = buffer_info->skb;
+@@ -984,6 +985,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
+ 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ 	       (count < tx_ring->count)) {
+ 		bool cleaned = false;
++		rmb(); /* read buffer_info after eop_desc */
+ 		for (; !cleaned; count++) {
+ 			tx_desc = E1000_TX_DESC(*tx_ring, i);
+ 			buffer_info = &tx_ring->buffer_info[i];
+@@ -1080,6 +1082,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+ 			break;
+ 		(*work_done)++;
+ 		skb = buffer_info->skb;
++		rmb();	/* read descriptor and rx_buffer_info after status DD */
+ 
+ 		/* in the packet split case this is header only */
+ 		prefetch(skb->data - NET_IP_ALIGN);
+@@ -1279,6 +1282,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ 		if (*work_done >= work_to_do)
+ 			break;
+ 		(*work_done)++;
++		rmb();	/* read descriptor and rx_buffer_info after status DD */
+ 
+ 		status = rx_desc->status;
+ 		skb = buffer_info->skb;
+diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
+index df2a6d7..c9cb9c4 100644
+--- a/drivers/net/igb/igb_main.c
++++ b/drivers/net/igb/igb_main.c
+@@ -5344,6 +5344,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+ 
+ 	while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ 	       (count < tx_ring->count)) {
++		rmb();	/* read buffer_info after eop_desc status */
+ 		for (cleaned = false; !cleaned; count++) {
+ 			tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
+ 			buffer_info = &tx_ring->buffer_info[i];
+@@ -5549,6 +5550,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
+ 		if (*work_done >= budget)
+ 			break;
+ 		(*work_done)++;
++		rmb(); /* read descriptor and rx_buffer_info after status DD */
+ 
+ 		skb = buffer_info->skb;
+ 		prefetch(skb->data - NET_IP_ALIGN);
+diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
+index 5e2b2a8..57b5fee 100644
+--- a/drivers/net/igbvf/netdev.c
++++ b/drivers/net/igbvf/netdev.c
+@@ -248,6 +248,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
+ 		if (*work_done >= work_to_do)
+ 			break;
+ 		(*work_done)++;
++		rmb(); /* read descriptor and rx_buffer_info after status DD */
+ 
+ 		buffer_info = &rx_ring->buffer_info[i];
+ 
+@@ -780,6 +781,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
+ 
+ 	while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ 	       (count < tx_ring->count)) {
++		rmb();	/* read buffer_info after eop_desc status */
+ 		for (cleaned = false; !cleaned; count++) {
+ 			tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
+ 			buffer_info = &tx_ring->buffer_info[i];
+diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
+index c6b75c8..45fc89b 100644
+--- a/drivers/net/ixgb/ixgb_main.c
++++ b/drivers/net/ixgb/ixgb_main.c
+@@ -1816,6 +1816,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
+ 
+ 	while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
+ 
++		rmb(); /* read buffer_info after eop_desc */
+ 		for (cleaned = false; !cleaned; ) {
+ 			tx_desc = IXGB_TX_DESC(*tx_ring, i);
+ 			buffer_info = &tx_ring->buffer_info[i];
+@@ -1976,6 +1977,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
+ 			break;
+ 
+ 		(*work_done)++;
++		rmb();	/* read descriptor and rx_buffer_info after status DD */
+ 		status = rx_desc->status;
+ 		skb = buffer_info->skb;
+ 		buffer_info->skb = NULL;
+diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
+index 74d9b6d..649c867 100644
+--- a/drivers/net/ixgbe/ixgbe_main.c
++++ b/drivers/net/ixgbe/ixgbe_main.c
+@@ -748,6 +748,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
+ 	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+ 	       (count < tx_ring->work_limit)) {
+ 		bool cleaned = false;
++		rmb(); /* read buffer_info after eop_desc */
+ 		for ( ; !cleaned; count++) {
+ 			struct sk_buff *skb;
+ 			tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
+index a16cff7..3ea59f1 100644
+--- a/drivers/net/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ixgbevf/ixgbevf_main.c
+@@ -231,6 +231,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+ 	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+ 	       (count < tx_ring->work_limit)) {
+ 		bool cleaned = false;
++		rmb(); /* read buffer_info after eop_desc */
+ 		for ( ; !cleaned; count++) {
+ 			struct sk_buff *skb;
+ 			tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+@@ -518,6 +519,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ 			break;
+ 		(*work_done)++;
+ 
++		rmb(); /* read descriptor and rx_buffer_info after status DD */
+ 		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ 			hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
+ 			len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
+index cc55974..7a7b01a 100644
+--- a/drivers/net/smsc911x.c
++++ b/drivers/net/smsc911x.c
+@@ -84,8 +84,7 @@ struct smsc911x_data {
+ 	 */
+ 	spinlock_t mac_lock;
+ 
+-	/* spinlock to ensure 16-bit accesses are serialised.
+-	 * unused with a 32-bit bus */
++	/* spinlock to ensure register accesses are serialised */
+ 	spinlock_t dev_lock;
+ 
+ 	struct phy_device *phy_dev;
+@@ -118,37 +117,33 @@ struct smsc911x_data {
+ 	unsigned int hashlo;
+ };
+ 
+-/* The 16-bit access functions are significantly slower, due to the locking
+- * necessary.  If your bus hardware can be configured to do this for you
+- * (in response to a single 32-bit operation from software), you should use
+- * the 32-bit access functions instead. */
+-
+-static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
++static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
+ {
+ 	if (pdata->config.flags & SMSC911X_USE_32BIT)
+ 		return readl(pdata->ioaddr + reg);
+ 
+-	if (pdata->config.flags & SMSC911X_USE_16BIT) {
+-		u32 data;
+-		unsigned long flags;
+-
+-		/* these two 16-bit reads must be performed consecutively, so
+-		 * must not be interrupted by our own ISR (which would start
+-		 * another read operation) */
+-		spin_lock_irqsave(&pdata->dev_lock, flags);
+-		data = ((readw(pdata->ioaddr + reg) & 0xFFFF) |
++	if (pdata->config.flags & SMSC911X_USE_16BIT)
++		return ((readw(pdata->ioaddr + reg) & 0xFFFF) |
+ 			((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16));
+-		spin_unlock_irqrestore(&pdata->dev_lock, flags);
+-
+-		return data;
+-	}
+ 
+ 	BUG();
+ 	return 0;
+ }
+ 
+-static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
+-				      u32 val)
++static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
++{
++	u32 data;
++	unsigned long flags;
++
++	spin_lock_irqsave(&pdata->dev_lock, flags);
++	data = __smsc911x_reg_read(pdata, reg);
++	spin_unlock_irqrestore(&pdata->dev_lock, flags);
++
++	return data;
++}
++
++static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
++					u32 val)
+ {
+ 	if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ 		writel(val, pdata->ioaddr + reg);
+@@ -156,44 +151,54 @@ static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
+ 	}
+ 
+ 	if (pdata->config.flags & SMSC911X_USE_16BIT) {
+-		unsigned long flags;
+-
+-		/* these two 16-bit writes must be performed consecutively, so
+-		 * must not be interrupted by our own ISR (which would start
+-		 * another read operation) */
+-		spin_lock_irqsave(&pdata->dev_lock, flags);
+ 		writew(val & 0xFFFF, pdata->ioaddr + reg);
+ 		writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2);
+-		spin_unlock_irqrestore(&pdata->dev_lock, flags);
+ 		return;
+ 	}
+ 
+ 	BUG();
+ }
+ 
++static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
++				      u32 val)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&pdata->dev_lock, flags);
++	__smsc911x_reg_write(pdata, reg, val);
++	spin_unlock_irqrestore(&pdata->dev_lock, flags);
++}
++
+ /* Writes a packet to the TX_DATA_FIFO */
+ static inline void
+ smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
+ 		      unsigned int wordcount)
+ {
++	unsigned long flags;
++
++	spin_lock_irqsave(&pdata->dev_lock, flags);
++
+ 	if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ 		while (wordcount--)
+-			smsc911x_reg_write(pdata, TX_DATA_FIFO, swab32(*buf++));
+-		return;
++			__smsc911x_reg_write(pdata, TX_DATA_FIFO,
++					     swab32(*buf++));
++		goto out;
+ 	}
+ 
+ 	if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ 		writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
+-		return;
++		goto out;
+ 	}
+ 
+ 	if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ 		while (wordcount--)
+-			smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++);
+-		return;
++			__smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++);
++		goto out;
+ 	}
+ 
+ 	BUG();
++out:
++	spin_unlock_irqrestore(&pdata->dev_lock, flags);
+ }
+ 
+ /* Reads a packet out of the RX_DATA_FIFO */
+@@ -201,24 +206,31 @@ static inline void
+ smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
+ 		     unsigned int wordcount)
+ {
++	unsigned long flags;
++
++	spin_lock_irqsave(&pdata->dev_lock, flags);
++
+ 	if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ 		while (wordcount--)
+-			*buf++ = swab32(smsc911x_reg_read(pdata, RX_DATA_FIFO));
+-		return;
++			*buf++ = swab32(__smsc911x_reg_read(pdata,
++							    RX_DATA_FIFO));
++		goto out;
+ 	}
+ 
+ 	if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ 		readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
+-		return;
++		goto out;
+ 	}
+ 
+ 	if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ 		while (wordcount--)
+-			*buf++ = smsc911x_reg_read(pdata, RX_DATA_FIFO);
+-		return;
++			*buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO);
++		goto out;
+ 	}
+ 
+ 	BUG();
++out:
++	spin_unlock_irqrestore(&pdata->dev_lock, flags);
+ }
+ 
+ /* waits for MAC not busy, with timeout.  Only called by smsc911x_mac_read
+diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+index ae7319b..4cf864c 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
++++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+@@ -193,7 +193,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
+ 		__entry->framelen = buf0_len + buf1_len;
+ 		memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
+ 		memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
+-		memcpy(__get_dynamic_array(buf1), buf1, buf0_len);
++		memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+ 	),
+ 	TP_printk("[%p] TX %.2x (%zu bytes)",
+ 		  __entry->priv,
+diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
+index 515817d..15f09e8 100644
+--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
++++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
+@@ -688,6 +688,8 @@ void rtl8180_beacon_work(struct work_struct *work)
+ 
+ 	/* grab a fresh beacon */
+ 	skb = ieee80211_beacon_get(dev, vif);
++	if (!skb)
++		goto resched;
+ 
+ 	/*
+ 	 * update beacon timestamp w/ TSF value
+diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
+index c9171be..435fbbc 100644
+--- a/drivers/pci/intel-iommu.c
++++ b/drivers/pci/intel-iommu.c
+@@ -3030,6 +3030,34 @@ static void __init iommu_exit_mempool(void)
+ 
+ }
+ 
++static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
++{
++	struct dmar_drhd_unit *drhd;
++	u32 vtbar;
++	int rc;
++
++	/* We know that this device on this chipset has its own IOMMU.
++	 * If we find it under a different IOMMU, then the BIOS is lying
++	 * to us. Hope that the IOMMU for this device is actually
++	 * disabled, and it needs no translation...
++	 */
++	rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
++	if (rc) {
++		/* "can't" happen */
++		dev_info(&pdev->dev, "failed to run vt-d quirk\n");
++		return;
++	}
++	vtbar &= 0xffff0000;
++
++	/* we know that the this iommu should be at offset 0xa000 from vtbar */
++	drhd = dmar_find_matched_drhd_unit(pdev);
++	if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
++			    TAINT_FIRMWARE_WORKAROUND,
++			    "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
++		pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
++}
++DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
++
+ static void __init init_no_remapping_devices(void)
+ {
+ 	struct dmar_drhd_unit *drhd;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 477345d..e28524e 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2115,6 +2115,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disabl
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
+ 
+ /* Disable MSI on chipsets that are known to not support it */
+ static void __devinit quirk_disable_msi(struct pci_dev *dev)
+@@ -2390,6 +2391,9 @@ static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
+ 	int pos;
+ 	int found;
+ 
++	if (!pci_msi_enabled())
++		return;
++
+ 	/* check if there is HT MSI cap or enabled on this device */
+ 	found = ht_check_msi_mapping(dev);
+ 
+diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
+index a4cd9ad..015e274 100644
+--- a/drivers/pcmcia/pcmcia_resource.c
++++ b/drivers/pcmcia/pcmcia_resource.c
+@@ -651,7 +651,7 @@ EXPORT_SYMBOL(__pcmcia_request_exclusive_irq);
+ #ifdef CONFIG_PCMCIA_PROBE
+ 
+ /* mask of IRQs already reserved by other cards, we should avoid using them */
+-static u8 pcmcia_used_irq[NR_IRQS];
++static u8 pcmcia_used_irq[32];
+ 
+ static irqreturn_t test_action(int cpl, void *dev_id)
+ {
+@@ -674,6 +674,9 @@ static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
+ 	for (try = 0; try < 64; try++) {
+ 		irq = try % 32;
+ 
++		if (irq > NR_IRQS)
++			continue;
++
+ 		/* marked as available by driver, not blocked by userspace? */
+ 		if (!((mask >> irq) & 1))
+ 			continue;
+diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
+index 746a446..53be4d3 100644
+--- a/drivers/serial/8250_pci.c
++++ b/drivers/serial/8250_pci.c
+@@ -994,6 +994,7 @@ static int skip_tx_en_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_TITAN_800E	0xA014
+ #define PCI_DEVICE_ID_TITAN_200EI	0xA016
+ #define PCI_DEVICE_ID_TITAN_200EISI	0xA017
++#define PCI_DEVICE_ID_OXSEMI_16PCI958	0x9538
+ 
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584	0x1584
+@@ -1542,6 +1543,8 @@ enum pci_board_num_t {
+ 	pbn_b2_4_921600,
+ 	pbn_b2_8_921600,
+ 
++	pbn_b2_8_1152000,
++
+ 	pbn_b2_bt_1_115200,
+ 	pbn_b2_bt_2_115200,
+ 	pbn_b2_bt_4_115200,
+@@ -1960,6 +1963,13 @@ static struct pciserial_board pci_boards[] __devinitdata = {
+ 		.uart_offset	= 8,
+ 	},
+ 
++	[pbn_b2_8_1152000] = {
++		.flags		= FL_BASE2,
++		.num_ports	= 8,
++		.base_baud	= 1152000,
++		.uart_offset	= 8,
++	},
++
+ 	[pbn_b2_bt_1_115200] = {
+ 		.flags		= FL_BASE2|FL_BASE_BARS,
+ 		.num_ports	= 1,
+@@ -2875,6 +2885,9 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 	{	PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952,
+ 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ 		pbn_b0_bt_2_921600 },
++	{	PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI958,
++		PCI_ANY_ID , PCI_ANY_ID, 0, 0,
++		pbn_b2_8_1152000 },
+ 
+ 	/*
+ 	 * Oxford Semiconductor Inc. Tornado PCI express device range.
+diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
+index 97480f5..7455c80 100644
+--- a/drivers/staging/hv/Kconfig
++++ b/drivers/staging/hv/Kconfig
+@@ -17,7 +17,7 @@ config HYPERV_STORAGE
+ 
+ config HYPERV_BLOCK
+ 	tristate "Microsoft Hyper-V virtual block driver"
+-	depends on BLOCK && SCSI && LBDAF
++	depends on BLOCK && SCSI && (LBDAF || 64BIT)
+ 	default HYPERV
+ 	help
+ 	  Select this option to enable the Hyper-V virtual block driver.
+diff --git a/drivers/staging/line6/Kconfig b/drivers/staging/line6/Kconfig
+index 7852d4a..bc1ffbe 100644
+--- a/drivers/staging/line6/Kconfig
++++ b/drivers/staging/line6/Kconfig
+@@ -2,6 +2,7 @@ config LINE6_USB
+ 	tristate "Line6 USB support"
+ 	depends on USB && SND
+ 	select SND_RAWMIDI
++	select SND_PCM
+ 	help
+ 	  This is a driver for the guitar amp, cab, and effects modeller
+ 	  PODxt Pro by Line6 (and similar devices), supporting the
+diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
+index 9ca0e9e..6474c3a 100644
+--- a/drivers/staging/panel/panel.c
++++ b/drivers/staging/panel/panel.c
+@@ -2179,6 +2179,7 @@ int panel_init(void)
+ 		if (pprt) {
+ 			parport_release(pprt);
+ 			parport_unregister_device(pprt);
++			pprt = NULL;
+ 		}
+ 		parport_unregister_driver(&panel_driver);
+ 		printk(KERN_ERR "Panel driver version " PANEL_VERSION
+@@ -2228,6 +2229,7 @@ static void __exit panel_cleanup_module(void)
+ 		/* TODO: free all input signals */
+ 		parport_release(pprt);
+ 		parport_unregister_device(pprt);
++		pprt = NULL;
+ 	}
+ 	parport_unregister_driver(&panel_driver);
+ }
+diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
+index 674769d..c48e85d 100644
+--- a/drivers/staging/rt2860/usb_main_dev.c
++++ b/drivers/staging/rt2860/usb_main_dev.c
+@@ -64,6 +64,7 @@ struct usb_device_id rtusb_usb_id[] = {
+ 	{USB_DEVICE(0x14B2, 0x3C07)},	/* AL */
+ 	{USB_DEVICE(0x050D, 0x8053)},	/* Belkin */
+ 	{USB_DEVICE(0x050D, 0x825B)},	/* Belkin */
++	{USB_DEVICE(0x050D, 0x935B)},	/* Belkin F6D4050 v2 */
+ 	{USB_DEVICE(0x14B2, 0x3C23)},	/* Airlink */
+ 	{USB_DEVICE(0x14B2, 0x3C27)},	/* Airlink */
+ 	{USB_DEVICE(0x07AA, 0x002F)},	/* Corega */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 70cccc7..ffc80e3 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -20,6 +20,7 @@
+ #include <linux/usb.h>
+ #include <linux/usbdevice_fs.h>
+ #include <linux/usb/hcd.h>
++#include <linux/usb/quirks.h>
+ #include <linux/kthread.h>
+ #include <linux/mutex.h>
+ #include <linux/freezer.h>
+@@ -1801,7 +1802,6 @@ int usb_new_device(struct usb_device *udev)
+ 	pm_runtime_set_active(&udev->dev);
+ 	pm_runtime_enable(&udev->dev);
+ 
+-	usb_detect_quirks(udev);
+ 	err = usb_enumerate_device(udev);	/* Read descriptors */
+ 	if (err < 0)
+ 		goto fail;
+@@ -3111,6 +3111,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+ 		if (status < 0)
+ 			goto loop;
+ 
++		usb_detect_quirks(udev);
++		if (udev->quirks & USB_QUIRK_DELAY_INIT)
++			msleep(1000);
++
+ 		/* consecutive bus-powered hubs aren't reliable; they can
+ 		 * violate the voltage drop budget.  if the new child has
+ 		 * a "powered" LED, users should notice we didn't enable it
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index db99c08..25719da 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -38,6 +38,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Creative SB Audigy 2 NX */
+ 	{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Logitech Harmony 700-series */
++	{ USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Philips PSC805 audio device */
+ 	{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
+index 7c05555..419e6b3 100644
+--- a/drivers/usb/core/urb.c
++++ b/drivers/usb/core/urb.c
+@@ -137,6 +137,16 @@ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
+ }
+ EXPORT_SYMBOL_GPL(usb_anchor_urb);
+ 
++/* Callers must hold anchor->lock */
++static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
++{
++	urb->anchor = NULL;
++	list_del(&urb->anchor_list);
++	usb_put_urb(urb);
++	if (list_empty(&anchor->urb_list))
++		wake_up(&anchor->wait);
++}
++
+ /**
+  * usb_unanchor_urb - unanchors an URB
+  * @urb: pointer to the urb to anchor
+@@ -156,17 +166,14 @@ void usb_unanchor_urb(struct urb *urb)
+ 		return;
+ 
+ 	spin_lock_irqsave(&anchor->lock, flags);
+-	if (unlikely(anchor != urb->anchor)) {
+-		/* we've lost the race to another thread */
+-		spin_unlock_irqrestore(&anchor->lock, flags);
+-		return;
+-	}
+-	urb->anchor = NULL;
+-	list_del(&urb->anchor_list);
++	/*
++	 * At this point, we could be competing with another thread which
++	 * has the same intention. To protect the urb from being unanchored
++	 * twice, only the winner of the race gets the job.
++	 */
++	if (likely(anchor == urb->anchor))
++		__usb_unanchor_urb(urb, anchor);
+ 	spin_unlock_irqrestore(&anchor->lock, flags);
+-	usb_put_urb(urb);
+-	if (list_empty(&anchor->urb_list))
+-		wake_up(&anchor->wait);
+ }
+ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
+ 
+@@ -749,20 +756,11 @@ EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
+ void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
+ {
+ 	struct urb *victim;
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&anchor->lock, flags);
+-	while (!list_empty(&anchor->urb_list)) {
+-		victim = list_entry(anchor->urb_list.prev, struct urb,
+-				    anchor_list);
+-		usb_get_urb(victim);
+-		spin_unlock_irqrestore(&anchor->lock, flags);
+-		/* this will unanchor the URB */
++	while ((victim = usb_get_from_anchor(anchor)) != NULL) {
+ 		usb_unlink_urb(victim);
+ 		usb_put_urb(victim);
+-		spin_lock_irqsave(&anchor->lock, flags);
+ 	}
+-	spin_unlock_irqrestore(&anchor->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
+ 
+@@ -799,12 +797,11 @@ struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
+ 		victim = list_entry(anchor->urb_list.next, struct urb,
+ 				    anchor_list);
+ 		usb_get_urb(victim);
+-		spin_unlock_irqrestore(&anchor->lock, flags);
+-		usb_unanchor_urb(victim);
++		__usb_unanchor_urb(victim, anchor);
+ 	} else {
+-		spin_unlock_irqrestore(&anchor->lock, flags);
+ 		victim = NULL;
+ 	}
++	spin_unlock_irqrestore(&anchor->lock, flags);
+ 
+ 	return victim;
+ }
+@@ -826,12 +823,7 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
+ 	while (!list_empty(&anchor->urb_list)) {
+ 		victim = list_entry(anchor->urb_list.prev, struct urb,
+ 				    anchor_list);
+-		usb_get_urb(victim);
+-		spin_unlock_irqrestore(&anchor->lock, flags);
+-		/* this may free the URB */
+-		usb_unanchor_urb(victim);
+-		usb_put_urb(victim);
+-		spin_lock_irqsave(&anchor->lock, flags);
++		__usb_unanchor_urb(victim, anchor);
+ 	}
+ 	spin_unlock_irqrestore(&anchor->lock, flags);
+ }
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index d43d176..19f5070 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -114,6 +114,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
+ 		break;
+ 	case PCI_VENDOR_ID_INTEL:
+ 		ehci->need_io_watchdog = 0;
++		ehci->fs_i_thresh = 1;
+ 		if (pdev->device == 0x27cc) {
+ 			ehci->broken_periodic = 1;
+ 			ehci_info(ehci, "using broken periodic workaround\n");
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 805ec63..93f58e5 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -1400,7 +1400,6 @@ iso_stream_schedule (
+ 	int			status;
+ 	unsigned		mod = ehci->periodic_size << 3;
+ 	struct ehci_iso_sched	*sched = urb->hcpriv;
+-	struct pci_dev		*pdev;
+ 
+ 	if (sched->span > (mod - SCHEDULE_SLOP)) {
+ 		ehci_dbg (ehci, "iso request %p too long\n", urb);
+@@ -1427,15 +1426,14 @@ iso_stream_schedule (
+ 	 * slot in the schedule, implicitly assuming URB_ISO_ASAP.
+ 	 */
+ 	if (likely (!list_empty (&stream->td_list))) {
+-		pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
+ 		start = stream->next_uframe;
+ 
+ 		/* For high speed devices, allow scheduling within the
+-		 * isochronous scheduling threshold.  For full speed devices,
+-		 * don't. (Work around for Intel ICH9 bug.)
++		 * isochronous scheduling threshold.  For full speed devices
++		 * and Intel PCI-based controllers, don't (work around for
++		 * Intel ICH9 bug).
+ 		 */
+-		if (!stream->highspeed &&
+-				pdev->vendor == PCI_VENDOR_ID_INTEL)
++		if (!stream->highspeed && ehci->fs_i_thresh)
+ 			next = now + ehci->i_thresh;
+ 		else
+ 			next = now;
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
+index 650a687..8b3d9c8 100644
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -130,6 +130,7 @@ struct ehci_hcd {			/* one per controller */
+ 	unsigned		has_amcc_usb23:1;
+ 	unsigned		need_io_watchdog:1;
+ 	unsigned		broken_periodic:1;
++	unsigned		fs_i_thresh:1;	/* Intel iso scheduling */
+ 
+ 	/* required for usb32 quirk */
+ 	#define OHCI_CTRL_HCFS          (3 << 6)
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index 16dffe9..c3049c7 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -1378,7 +1378,6 @@ static void iso_callback (struct urb *urb)
+ 			break;
+ 		}
+ 	}
+-	simple_free_urb (urb);
+ 
+ 	ctx->pending--;
+ 	if (ctx->pending == 0) {
+@@ -1495,6 +1494,7 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
+ 			}
+ 
+ 			simple_free_urb (urbs [i]);
++			urbs[i] = NULL;
+ 			context.pending--;
+ 			context.submit_error = 1;
+ 			break;
+@@ -1504,6 +1504,10 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
+ 
+ 	wait_for_completion (&context.done);
+ 
++	for (i = 0; i < param->sglen; i++) {
++		if (urbs[i])
++			simple_free_urb(urbs[i]);
++	}
+ 	/*
+ 	 * Isochronous transfers are expected to fail sometimes.  As an
+ 	 * arbitrary limit, we will report an error if any submissions
+diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
+index 61c76b1..56c93ca 100644
+--- a/drivers/usb/mon/mon_bin.c
++++ b/drivers/usb/mon/mon_bin.c
+@@ -1009,7 +1009,7 @@ static int mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 
+ 		mutex_lock(&rp->fetch_lock);
+ 		spin_lock_irqsave(&rp->b_lock, flags);
+-		mon_free_buff(rp->b_vec, size/CHUNK_SIZE);
++		mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
+ 		kfree(rp->b_vec);
+ 		rp->b_vec  = vec;
+ 		rp->b_size = size;
+diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
+index bba76af..c79a5e3 100644
+--- a/drivers/usb/musb/musb_debugfs.c
++++ b/drivers/usb/musb/musb_debugfs.c
+@@ -92,29 +92,29 @@ static const struct musb_register_map musb_regmap[] = {
+ 	{ "LS_EOF1",		0x7E,	8 },
+ 	{ "SOFT_RST",		0x7F,	8 },
+ 	{ "DMA_CNTLch0",	0x204,	16 },
+-	{ "DMA_ADDRch0",	0x208,	16 },
+-	{ "DMA_COUNTch0",	0x20C,	16 },
++	{ "DMA_ADDRch0",	0x208,	32 },
++	{ "DMA_COUNTch0",	0x20C,	32 },
+ 	{ "DMA_CNTLch1",	0x214,	16 },
+-	{ "DMA_ADDRch1",	0x218,	16 },
+-	{ "DMA_COUNTch1",	0x21C,	16 },
++	{ "DMA_ADDRch1",	0x218,	32 },
++	{ "DMA_COUNTch1",	0x21C,	32 },
+ 	{ "DMA_CNTLch2",	0x224,	16 },
+-	{ "DMA_ADDRch2",	0x228,	16 },
+-	{ "DMA_COUNTch2",	0x22C,	16 },
++	{ "DMA_ADDRch2",	0x228,	32 },
++	{ "DMA_COUNTch2",	0x22C,	32 },
+ 	{ "DMA_CNTLch3",	0x234,	16 },
+-	{ "DMA_ADDRch3",	0x238,	16 },
+-	{ "DMA_COUNTch3",	0x23C,	16 },
++	{ "DMA_ADDRch3",	0x238,	32 },
++	{ "DMA_COUNTch3",	0x23C,	32 },
+ 	{ "DMA_CNTLch4",	0x244,	16 },
+-	{ "DMA_ADDRch4",	0x248,	16 },
+-	{ "DMA_COUNTch4",	0x24C,	16 },
++	{ "DMA_ADDRch4",	0x248,	32 },
++	{ "DMA_COUNTch4",	0x24C,	32 },
+ 	{ "DMA_CNTLch5",	0x254,	16 },
+-	{ "DMA_ADDRch5",	0x258,	16 },
+-	{ "DMA_COUNTch5",	0x25C,	16 },
++	{ "DMA_ADDRch5",	0x258,	32 },
++	{ "DMA_COUNTch5",	0x25C,	32 },
+ 	{ "DMA_CNTLch6",	0x264,	16 },
+-	{ "DMA_ADDRch6",	0x268,	16 },
+-	{ "DMA_COUNTch6",	0x26C,	16 },
++	{ "DMA_ADDRch6",	0x268,	32 },
++	{ "DMA_COUNTch6",	0x26C,	32 },
+ 	{ "DMA_CNTLch7",	0x274,	16 },
+-	{ "DMA_ADDRch7",	0x278,	16 },
+-	{ "DMA_COUNTch7",	0x27C,	16 },
++	{ "DMA_ADDRch7",	0x278,	32 },
++	{ "DMA_COUNTch7",	0x27C,	32 },
+ 	{  }	/* Terminating Entry */
+ };
+ 
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 8b8c797..2bef441 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -126,6 +126,10 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
++	{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
++	{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
++	{ USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
++	{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+ 	{ } /* Terminating Entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index e298dc4..eb12d9b 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -157,6 +157,9 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_USINT_CAT_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_USINT_WKEY_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_USINT_RS232_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) },
+@@ -746,6 +749,7 @@ static struct usb_device_id id_table_combined [] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++	{ USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
+ 	{ },					/* Optional parameter entry */
+ 	{ }					/* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index d01946d..6e612c5 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -40,6 +40,11 @@
+ 
+ #define FTDI_NXTCAM_PID		0xABB8 /* NXTCam for Mindstorms NXT */
+ 
++/* US Interface Navigator (http://www.usinterface.com/) */
++#define FTDI_USINT_CAT_PID	0xb810	/* Navigator CAT and 2nd PTT lines */
++#define FTDI_USINT_WKEY_PID	0xb811	/* Navigator WKEY and FSK lines */
++#define FTDI_USINT_RS232_PID	0xb812	/* Navigator RS232 and CONFIG lines */
++
+ /* OOCDlink by Joern Kaipf <joernk at web.de>
+  * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
+ #define FTDI_OOCDLINK_PID	0xbaf8	/* Amontec JTAGkey */
+@@ -1032,3 +1037,8 @@
+ #define XVERVE_SIGNALYZER_SH2_PID	0xBCA2
+ #define XVERVE_SIGNALYZER_SH4_PID	0xBCA4
+ 
++/*
++ * Segway Robotic Mobility Platform USB interface (using VID 0x0403)
++ * Submitted by John G. Rogers
++ */
++#define SEGWAY_RMP200_PID	0xe729
+diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
+index a817ced..ca92f67 100644
+--- a/drivers/usb/serial/generic.c
++++ b/drivers/usb/serial/generic.c
+@@ -208,18 +208,23 @@ retry:
+ 	urb->transfer_buffer_length = count;
+ 	usb_serial_debug_data(debug, &port->dev, __func__, count,
+ 						urb->transfer_buffer);
++	spin_lock_irqsave(&port->lock, flags);
++	port->tx_bytes += count;
++	spin_unlock_irqrestore(&port->lock, flags);
++
++	clear_bit(i, &port->write_urbs_free);
+ 	result = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (result) {
+ 		dev_err(&port->dev, "%s - error submitting urb: %d\n",
+ 						__func__, result);
++		set_bit(i, &port->write_urbs_free);
++		spin_lock_irqsave(&port->lock, flags);
++		port->tx_bytes -= count;
++		spin_unlock_irqrestore(&port->lock, flags);
++
+ 		clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+ 		return result;
+ 	}
+-	clear_bit(i, &port->write_urbs_free);
+-
+-	spin_lock_irqsave(&port->lock, flags);
+-	port->tx_bytes += count;
+-	spin_unlock_irqrestore(&port->lock, flags);
+ 
+ 	/* Try sending off another urb, unless in irq context (in which case
+ 	 * there will be no free urb). */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5cd30e4..5c35b3a 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -145,7 +145,10 @@ static void option_instat_callback(struct urb *urb);
+ #define HUAWEI_PRODUCT_E143D			0x143D
+ #define HUAWEI_PRODUCT_E143E			0x143E
+ #define HUAWEI_PRODUCT_E143F			0x143F
++#define HUAWEI_PRODUCT_K4505			0x1464
++#define HUAWEI_PRODUCT_K3765			0x1465
+ #define HUAWEI_PRODUCT_E14AC			0x14AC
++#define HUAWEI_PRODUCT_ETS1220			0x1803
+ 
+ #define QUANTA_VENDOR_ID			0x0408
+ #define QUANTA_PRODUCT_Q101			0xEA02
+@@ -482,6 +485,9 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
+ 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
+ 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
+@@ -1017,6 +1023,13 @@ static int option_probe(struct usb_serial *serial,
+ 		serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
+ 		return -ENODEV;
+ 
++	/* Don't bind network interfaces on Huawei K3765 & K4505 */
++	if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
++		(serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
++			serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) &&
++		serial->interface->cur_altsetting->desc.bInterfaceNumber == 1)
++		return -ENODEV;
++
+ 	data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
+ 
+ 	if (!data)
+diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
+index e66b8b1..d8b12c3 100644
+--- a/drivers/video/w100fb.c
++++ b/drivers/video/w100fb.c
+@@ -858,9 +858,9 @@ unsigned long w100fb_gpio_read(int port)
+ void w100fb_gpio_write(int port, unsigned long value)
+ {
+ 	if (port==W100_GPIO_PORT_A)
+-		value = writel(value, remapped_regs + mmGPIO_DATA);
++		writel(value, remapped_regs + mmGPIO_DATA);
+ 	else
+-		value = writel(value, remapped_regs + mmGPIO_DATA2);
++		writel(value, remapped_regs + mmGPIO_DATA2);
+ }
+ EXPORT_SYMBOL(w100fb_gpio_read);
+ EXPORT_SYMBOL(w100fb_gpio_write);
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index db8f506..28f133a 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -536,6 +536,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+ 	if (irq < 0)
+ 		return irq;
+ 
++	irqflags |= IRQF_NO_SUSPEND;
+ 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
+ 	if (retval != 0) {
+ 		unbind_from_irq(irq);
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index b3171fb..4c54c86 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1339,10 +1339,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 	/*
+ 	 * hooks: /n/, see "layering violations".
+ 	 */
+-	ret = devcgroup_inode_permission(bdev->bd_inode, perm);
+-	if (ret != 0) {
+-		bdput(bdev);
+-		return ret;
++	if (!for_part) {
++		ret = devcgroup_inode_permission(bdev->bd_inode, perm);
++		if (ret != 0) {
++			bdput(bdev);
++			return ret;
++		}
+ 	}
+ 
+ 	lock_kernel();
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index e8fcf4e..622c9514 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -199,7 +199,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
+ 			       "the persistent file for the dentry with name "
+ 			       "[%s]; rc = [%d]\n", __func__,
+ 			       ecryptfs_dentry->d_name.name, rc);
+-			goto out;
++			goto out_free;
+ 		}
+ 	}
+ 	if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
+@@ -207,7 +207,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
+ 		rc = -EPERM;
+ 		printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
+ 		       "file must hence be opened RO\n", __func__);
+-		goto out;
++		goto out_free;
+ 	}
+ 	ecryptfs_set_file_lower(
+ 		file, ecryptfs_inode_to_private(inode)->lower_file);
+@@ -292,12 +292,40 @@ static int ecryptfs_fasync(int fd, struct file *file, int flag)
+ 	return rc;
+ }
+ 
+-static int ecryptfs_ioctl(struct inode *inode, struct file *file,
+-			  unsigned int cmd, unsigned long arg);
++static long
++ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++	struct file *lower_file = NULL;
++	long rc = -ENOTTY;
++
++	if (ecryptfs_file_to_private(file))
++		lower_file = ecryptfs_file_to_lower(file);
++	if (lower_file && lower_file->f_op && lower_file->f_op->unlocked_ioctl)
++		rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
++	return rc;
++}
++
++#ifdef CONFIG_COMPAT
++static long
++ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++	struct file *lower_file = NULL;
++	long rc = -ENOIOCTLCMD;
++
++	if (ecryptfs_file_to_private(file))
++		lower_file = ecryptfs_file_to_lower(file);
++	if (lower_file && lower_file->f_op && lower_file->f_op->compat_ioctl)
++		rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
++	return rc;
++}
++#endif
+ 
+ const struct file_operations ecryptfs_dir_fops = {
+ 	.readdir = ecryptfs_readdir,
+-	.ioctl = ecryptfs_ioctl,
++	.unlocked_ioctl = ecryptfs_unlocked_ioctl,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = ecryptfs_compat_ioctl,
++#endif
+ 	.open = ecryptfs_open,
+ 	.flush = ecryptfs_flush,
+ 	.release = ecryptfs_release,
+@@ -313,7 +341,10 @@ const struct file_operations ecryptfs_main_fops = {
+ 	.write = do_sync_write,
+ 	.aio_write = generic_file_aio_write,
+ 	.readdir = ecryptfs_readdir,
+-	.ioctl = ecryptfs_ioctl,
++	.unlocked_ioctl = ecryptfs_unlocked_ioctl,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = ecryptfs_compat_ioctl,
++#endif
+ 	.mmap = generic_file_mmap,
+ 	.open = ecryptfs_open,
+ 	.flush = ecryptfs_flush,
+@@ -322,20 +353,3 @@ const struct file_operations ecryptfs_main_fops = {
+ 	.fasync = ecryptfs_fasync,
+ 	.splice_read = generic_file_splice_read,
+ };
+-
+-static int
+-ecryptfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+-	       unsigned long arg)
+-{
+-	int rc = 0;
+-	struct file *lower_file = NULL;
+-
+-	if (ecryptfs_file_to_private(file))
+-		lower_file = ecryptfs_file_to_lower(file);
+-	if (lower_file && lower_file->f_op && lower_file->f_op->ioctl)
+-		rc = lower_file->f_op->ioctl(ecryptfs_inode_to_lower(inode),
+-					     lower_file, cmd, arg);
+-	else
+-		rc = -ENOTTY;
+-	return rc;
+-}
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 31ef525..8cd617b 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -264,7 +264,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
+ 		printk(KERN_ERR "%s: Out of memory whilst attempting "
+ 		       "to allocate ecryptfs_dentry_info struct\n",
+ 			__func__);
+-		goto out_dput;
++		goto out_put;
+ 	}
+ 	ecryptfs_set_dentry_lower(ecryptfs_dentry, lower_dentry);
+ 	ecryptfs_set_dentry_lower_mnt(ecryptfs_dentry, lower_mnt);
+@@ -339,8 +339,9 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
+ out_free_kmem:
+ 	kmem_cache_free(ecryptfs_header_cache_2, page_virt);
+ 	goto out;
+-out_dput:
++out_put:
+ 	dput(lower_dentry);
++	mntput(lower_mnt);
+ 	d_drop(ecryptfs_dentry);
+ out:
+ 	return rc;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 4e8983a..a45ced9 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -241,7 +241,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
+ 	if (sb->s_flags & MS_RDONLY)
+ 		return ERR_PTR(-EROFS);
+ 
+-	vfs_check_frozen(sb, SB_FREEZE_WRITE);
++	vfs_check_frozen(sb, SB_FREEZE_TRANS);
+ 	/* Special case here: if the journal has aborted behind our
+ 	 * backs (eg. EIO in the commit thread), then we still need to
+ 	 * take the FS itself readonly cleanly. */
+@@ -3491,7 +3491,7 @@ int ext4_force_commit(struct super_block *sb)
+ 
+ 	journal = EXT4_SB(sb)->s_journal;
+ 	if (journal) {
+-		vfs_check_frozen(sb, SB_FREEZE_WRITE);
++		vfs_check_frozen(sb, SB_FREEZE_TRANS);
+ 		ret = ext4_journal_force_commit(journal);
+ 	}
+ 
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index fa96bbb..2d7f165 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -86,46 +86,25 @@ struct ea_buffer {
+ #define EA_MALLOC	0x0008
+ 
+ 
++static int is_known_namespace(const char *name)
++{
++	if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
++	    strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
++	    strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
++	    strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
++		return false;
++
++	return true;
++}
++
+ /*
+  * These three routines are used to recognize on-disk extended attributes
+  * that are in a recognized namespace.  If the attribute is not recognized,
+  * "os2." is prepended to the name
+  */
+-static inline int is_os2_xattr(struct jfs_ea *ea)
++static int is_os2_xattr(struct jfs_ea *ea)
+ {
+-	/*
+-	 * Check for "system."
+-	 */
+-	if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) &&
+-	    !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+-		return false;
+-	/*
+-	 * Check for "user."
+-	 */
+-	if ((ea->namelen >= XATTR_USER_PREFIX_LEN) &&
+-	    !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+-		return false;
+-	/*
+-	 * Check for "security."
+-	 */
+-	if ((ea->namelen >= XATTR_SECURITY_PREFIX_LEN) &&
+-	    !strncmp(ea->name, XATTR_SECURITY_PREFIX,
+-		     XATTR_SECURITY_PREFIX_LEN))
+-		return false;
+-	/*
+-	 * Check for "trusted."
+-	 */
+-	if ((ea->namelen >= XATTR_TRUSTED_PREFIX_LEN) &&
+-	    !strncmp(ea->name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+-		return false;
+-	/*
+-	 * Add any other valid namespace prefixes here
+-	 */
+-
+-	/*
+-	 * We assume it's OS/2's flat namespace
+-	 */
+-	return true;
++	return !is_known_namespace(ea->name);
+ }
+ 
+ static inline int name_size(struct jfs_ea *ea)
+@@ -764,13 +743,23 @@ static int can_set_xattr(struct inode *inode, const char *name,
+ 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ 		return can_set_system_xattr(inode, name, value, value_len);
+ 
++	if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) {
++		/*
++		 * This makes sure that we aren't trying to set an
++		 * attribute in a different namespace by prefixing it
++		 * with "os2."
++		 */
++		if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN))
++				return -EOPNOTSUPP;
++		return 0;
++	}
++
+ 	/*
+ 	 * Don't allow setting an attribute in an unknown namespace.
+ 	 */
+ 	if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
+ 	    strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+-	    strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+-	    strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))
++	    strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ 		return -EOPNOTSUPP;
+ 
+ 	return 0;
+@@ -952,19 +941,8 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ 	int xattr_size;
+ 	ssize_t size;
+ 	int namelen = strlen(name);
+-	char *os2name = NULL;
+ 	char *value;
+ 
+-	if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
+-		os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
+-				  GFP_KERNEL);
+-		if (!os2name)
+-			return -ENOMEM;
+-		strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
+-		name = os2name;
+-		namelen -= XATTR_OS2_PREFIX_LEN;
+-	}
+-
+ 	down_read(&JFS_IP(inode)->xattr_sem);
+ 
+ 	xattr_size = ea_get(inode, &ea_buf, 0);
+@@ -1002,8 +980,6 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+       out:
+ 	up_read(&JFS_IP(inode)->xattr_sem);
+ 
+-	kfree(os2name);
+-
+ 	return size;
+ }
+ 
+@@ -1012,6 +988,19 @@ ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
+ {
+ 	int err;
+ 
++	if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
++		/*
++		 * skip past "os2." prefix
++		 */
++		name += XATTR_OS2_PREFIX_LEN;
++		/*
++		 * Don't allow retrieving properly prefixed attributes
++		 * by prepending them with "os2."
++		 */
++		if (is_known_namespace(name))
++			return -EOPNOTSUPP;
++	}
++
+ 	err = __jfs_getxattr(dentry->d_inode, name, data, buf_size);
+ 
+ 	return err;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 88058de..32dcd24 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1984,7 +1984,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
+ 	if (flags & MS_RDONLY)
+ 		mnt_flags |= MNT_READONLY;
+ 
+-	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
++	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
+ 		   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+ 		   MS_STRICTATIME);
+ 
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index f329849c..1c5a6ad 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -88,6 +88,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
+ 		 err |= __put_user(kinfo->si_tid, &uinfo->ssi_tid);
+ 		 err |= __put_user(kinfo->si_overrun, &uinfo->ssi_overrun);
+ 		 err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
++		 err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
+ 		break;
+ 	case __SI_POLL:
+ 		err |= __put_user(kinfo->si_band, &uinfo->ssi_band);
+@@ -111,6 +112,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
+ 		err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
+ 		err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
+ 		err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
++		err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
+ 		break;
+ 	default:
+ 		/*
+diff --git a/fs/super.c b/fs/super.c
+index 938119a..c7765bd 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -305,8 +305,13 @@ retry:
+ 			if (s) {
+ 				up_write(&s->s_umount);
+ 				destroy_super(s);
++				s = NULL;
+ 			}
+ 			down_write(&old->s_umount);
++			if (unlikely(!(old->s_flags & MS_BORN))) {
++				deactivate_locked_super(old);
++				goto retry;
++			}
+ 			return old;
+ 		}
+ 	}
+@@ -909,6 +914,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
+ 		goto out_free_secdata;
+ 	BUG_ON(!mnt->mnt_sb);
+ 	WARN_ON(!mnt->mnt_sb->s_bdi);
++	mnt->mnt_sb->s_flags |= MS_BORN;
+ 
+ 	error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
+ 	if (error)
+diff --git a/include/linux/Kbuild b/include/linux/Kbuild
+index 2fc8e14..9aa9bca 100644
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -276,6 +276,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \
+ 		  $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),)
+ unifdef-y += kvm_para.h
+ endif
++unifdef-y += l2tp.h
+ unifdef-y += llc.h
+ unifdef-y += loop.h
+ unifdef-y += lp.h
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 68ca1b0..f0f447a 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -145,11 +145,11 @@ struct inodes_stat_t {
+  *
+  */
+ #define RW_MASK		1
+-#define RWA_MASK	2
++#define RWA_MASK		16
+ #define READ 0
+ #define WRITE 1
+-#define READA 2		/* read-ahead  - don't block if no resources */
+-#define SWRITE 3	/* for ll_rw_block() - wait for buffer lock */
++#define READA			16 /* readahead - don't block if no resources */
++#define SWRITE			17 /* for ll_rw_block(), wait for buffer lock */
+ #define READ_SYNC	(READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
+ #define READ_META	(READ | (1 << BIO_RW_META))
+ #define WRITE_SYNC_PLUG	(WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
+@@ -209,6 +209,7 @@ struct inodes_stat_t {
+ #define MS_KERNMOUNT	(1<<22) /* this is a kern_mount call */
+ #define MS_I_VERSION	(1<<23) /* Update inode I_version field */
+ #define MS_STRICTATIME	(1<<24) /* Always perform atime updates */
++#define MS_BORN		(1<<29)
+ #define MS_ACTIVE	(1<<30)
+ #define MS_NOUSER	(1<<31)
+ 
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index c233113..a0384a4 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -53,16 +53,21 @@
+  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
+  *                Used by threaded interrupts which need to keep the
+  *                irq line disabled until the threaded handler has been run.
++ * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
++ *
+  */
+ #define IRQF_DISABLED		0x00000020
+ #define IRQF_SAMPLE_RANDOM	0x00000040
+ #define IRQF_SHARED		0x00000080
+ #define IRQF_PROBE_SHARED	0x00000100
+-#define IRQF_TIMER		0x00000200
++#define __IRQF_TIMER		0x00000200
+ #define IRQF_PERCPU		0x00000400
+ #define IRQF_NOBALANCING	0x00000800
+ #define IRQF_IRQPOLL		0x00001000
+ #define IRQF_ONESHOT		0x00002000
++#define IRQF_NO_SUSPEND		0x00004000
++
++#define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND)
+ 
+ /*
+  * Bits used by threaded handlers:
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index 22c2abb..b2f1a4d 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -210,7 +210,7 @@ static inline int notifier_to_errno(int ret)
+ #define NETDEV_POST_INIT	0x0010
+ #define NETDEV_UNREGISTER_BATCH 0x0011
+ #define NETDEV_BONDING_DESLAVE  0x0012
+-#define NETDEV_NOTIFY_PEERS	0x0012
++#define NETDEV_NOTIFY_PEERS	0x0013
+ 
+ #define SYS_DOWN	0x0001	/* Notify of system down */
+ #define SYS_RESTART	SYS_DOWN
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 0478888..5ee397e 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -274,17 +274,11 @@ extern cpumask_var_t nohz_cpu_mask;
+ #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
+ extern int select_nohz_load_balancer(int cpu);
+ extern int get_nohz_load_balancer(void);
+-extern int nohz_ratelimit(int cpu);
+ #else
+ static inline int select_nohz_load_balancer(int cpu)
+ {
+ 	return 0;
+ }
+-
+-static inline int nohz_ratelimit(int cpu)
+-{
+-	return 0;
+-}
+ #endif
+ 
+ /*
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 16b7f33..3e93de7 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -26,4 +26,8 @@
+    and can't handle talking to these interfaces */
+ #define USB_QUIRK_HONOR_BNUMINTERFACES	0x00000020
+ 
++/* device needs a pause during initialization, after we read the device
++   descriptor */
++#define USB_QUIRK_DELAY_INIT		0x00000040
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index e149748..c3003e9 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -216,7 +216,7 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
+ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
+ {
+ 	if (suspend) {
+-		if (!desc->action || (desc->action->flags & IRQF_TIMER))
++		if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
+ 			return;
+ 		desc->status |= IRQ_SUSPENDED;
+ 	}
+diff --git a/kernel/sched.c b/kernel/sched.c
+index f52a880..63b4a14 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1232,16 +1232,6 @@ void wake_up_idle_cpu(int cpu)
+ 		smp_send_reschedule(cpu);
+ }
+ 
+-int nohz_ratelimit(int cpu)
+-{
+-	struct rq *rq = cpu_rq(cpu);
+-	u64 diff = rq->clock - rq->nohz_stamp;
+-
+-	rq->nohz_stamp = rq->clock;
+-
+-	return diff < (NSEC_PER_SEC / HZ) >> 1;
+-}
+-
+ #endif /* CONFIG_NO_HZ */
+ 
+ static u64 sched_avg_period(void)
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 813993b..f898af6 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -325,7 +325,7 @@ void tick_nohz_stop_sched_tick(int inidle)
+ 	} while (read_seqretry(&xtime_lock, seq));
+ 
+ 	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+-	    arch_needs_cpu(cpu) || nohz_ratelimit(cpu)) {
++	    arch_needs_cpu(cpu)) {
+ 		next_jiffies = last_jiffies + 1;
+ 		delta_jiffies = 1;
+ 	} else {
+diff --git a/mm/memory.c b/mm/memory.c
+index bde42c6..aaaedbd 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2760,6 +2760,26 @@ out_release:
+ }
+ 
+ /*
++ * This is like a special single-page "expand_downwards()",
++ * except we must first make sure that 'address-PAGE_SIZE'
++ * doesn't hit another vma.
++ *
++ * The "find_vma()" will do the right thing even if we wrap
++ */
++static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
++{
++	address &= PAGE_MASK;
++	if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
++		address -= PAGE_SIZE;
++		if (find_vma(vma->vm_mm, address) != vma)
++			return -ENOMEM;
++
++		expand_stack(vma, address);
++	}
++	return 0;
++}
++
++/*
+  * We enter with non-exclusive mmap_sem (to exclude vma changes,
+  * but allow concurrent faults), and pte mapped but not yet locked.
+  * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2772,6 +2792,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	spinlock_t *ptl;
+ 	pte_t entry;
+ 
++	if (check_stack_guard_page(vma, address) < 0) {
++		pte_unmap(page_table);
++		return VM_FAULT_SIGBUS;
++	}
++
+ 	if (!(flags & FAULT_FLAG_WRITE)) {
+ 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ 						vma->vm_page_prot));
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 03aa2d5..f08d165 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -318,8 +318,10 @@ checks:
+ 	if (offset > si->highest_bit)
+ 		scan_base = offset = si->lowest_bit;
+ 
+-	/* reuse swap entry of cache-only swap if not busy. */
+-	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
++	/* reuse swap entry of cache-only swap if not hibernation. */
++	if (vm_swap_full()
++		&& usage == SWAP_HAS_CACHE
++		&& si->swap_map[offset] == SWAP_HAS_CACHE) {
+ 		int swap_was_freed;
+ 		spin_unlock(&swap_lock);
+ 		swap_was_freed = __try_to_reclaim_swap(si, offset);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 596ea2f..aa7cc51 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5183,6 +5183,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
+ 
+ static struct snd_pci_quirk beep_white_list[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
++	SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
+ 	{}
+ };
+ 

Added: dists/trunk/linux-2.6/debian/patches/series/1~experimental.2
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux-2.6/debian/patches/series/1~experimental.2	Sat Aug 14 11:27:01 2010	(r16145)
@@ -0,0 +1,2 @@
++ bugfix/all/stable/2.6.35.1.patch
++ bugfix/all/stable/2.6.35.2.patch



More information about the Kernel-svn-changes mailing list