[kernel] r14233 - in dists/sid/linux-2.6/debian: . patches/bugfix/all/stable patches/series
Bastian Blank
waldi at alioth.debian.org
Tue Sep 15 12:19:12 UTC 2009
Author: waldi
Date: Tue Sep 15 12:19:09 2009
New Revision: 14233
Log:
* Add stable release 2.6.30.5.
* Add stable release 2.6.30.6.
* debian/changelog: Update.
* debian/patches/bugfix/all/stable/2.6.30.5.patch,
debian/patches/bugfix/all/stable/2.6.30.6.patch: Add.
* debian/patches/series/7
- Add new patches.
- Remove already applied variants.
- Reapply old ABI breaker.
Added:
dists/sid/linux-2.6/debian/patches/bugfix/all/stable/2.6.30.5.patch
dists/sid/linux-2.6/debian/patches/bugfix/all/stable/2.6.30.6.patch
Modified:
dists/sid/linux-2.6/debian/changelog
dists/sid/linux-2.6/debian/patches/series/7
Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog Mon Sep 14 10:20:45 2009 (r14232)
+++ dists/sid/linux-2.6/debian/changelog Tue Sep 15 12:19:09 2009 (r14233)
@@ -17,6 +17,10 @@
* intel-agp: Fix cache flushing on i8xx chipsets, avoiding graphics
corruption and GPU lock-ups (Closes: #541307)
+ [ Bastian Blank ]
+ * Add stable release 2.6.30.5.
+ * Add stable release 2.6.30.6.
+
-- Martin Michlmayr <tbm at cyrius.com> Sun, 23 Aug 2009 22:42:34 +0900
linux-2.6 (2.6.30-6) unstable; urgency=high
Added: dists/sid/linux-2.6/debian/patches/bugfix/all/stable/2.6.30.5.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/all/stable/2.6.30.5.patch Tue Sep 15 12:19:09 2009 (r14233)
@@ -0,0 +1,2676 @@
+diff --git a/arch/powerpc/boot/dts/asp834x-redboot.dts b/arch/powerpc/boot/dts/asp834x-redboot.dts
+index 7da84fd..261d10c 100644
+--- a/arch/powerpc/boot/dts/asp834x-redboot.dts
++++ b/arch/powerpc/boot/dts/asp834x-redboot.dts
+@@ -167,7 +167,7 @@
+ interrupt-parent = <&ipic>;
+ interrupts = <39 0x8>;
+ phy_type = "ulpi";
+- port1;
++ port0;
+ };
+ /* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
+ usb at 23000 {
+diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
+index 1ae38f0..e540d44 100644
+--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
++++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
+@@ -156,7 +156,7 @@
+ interrupt-parent = <&ipic>;
+ interrupts = <39 0x8>;
+ phy_type = "ulpi";
+- port1;
++ port0;
+ };
+
+ usb at 23000 {
+diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
+index d9f0a23..a667fe7 100644
+--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
++++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
+@@ -153,7 +153,7 @@
+ interrupt-parent = <&ipic>;
+ interrupts = <39 0x8>;
+ phy_type = "ulpi";
+- port1;
++ port0;
+ };
+ /* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
+ usb at 23000 {
+diff --git a/arch/powerpc/boot/dts/sbc8349.dts b/arch/powerpc/boot/dts/sbc8349.dts
+index a36dbbc..c7e1c4b 100644
+--- a/arch/powerpc/boot/dts/sbc8349.dts
++++ b/arch/powerpc/boot/dts/sbc8349.dts
+@@ -144,7 +144,7 @@
+ interrupt-parent = <&ipic>;
+ interrupts = <39 0x8>;
+ phy_type = "ulpi";
+- port1;
++ port0;
+ };
+ /* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
+ usb at 23000 {
+diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
+index 83cfe51..d1dc5b0 100644
+--- a/arch/powerpc/platforms/83xx/mpc83xx.h
++++ b/arch/powerpc/platforms/83xx/mpc83xx.h
+@@ -22,8 +22,8 @@
+ /* system i/o configuration register low */
+ #define MPC83XX_SICRL_OFFS 0x114
+ #define MPC834X_SICRL_USB_MASK 0x60000000
+-#define MPC834X_SICRL_USB0 0x40000000
+-#define MPC834X_SICRL_USB1 0x20000000
++#define MPC834X_SICRL_USB0 0x20000000
++#define MPC834X_SICRL_USB1 0x40000000
+ #define MPC831X_SICRL_USB_MASK 0x00000c00
+ #define MPC831X_SICRL_USB_ULPI 0x00000800
+ #define MPC8315_SICRL_USB_MASK 0x000000fc
+diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
+index 11e1fac..3ba4bb7 100644
+--- a/arch/powerpc/platforms/83xx/usb.c
++++ b/arch/powerpc/platforms/83xx/usb.c
+@@ -47,25 +47,25 @@ int mpc834x_usb_cfg(void)
+ sccr |= MPC83XX_SCCR_USB_DRCM_11; /* 1:3 */
+
+ prop = of_get_property(np, "phy_type", NULL);
++ port1_is_dr = 1;
+ if (prop && (!strcmp(prop, "utmi") ||
+ !strcmp(prop, "utmi_wide"))) {
+ sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1;
+ sicrh |= MPC834X_SICRH_USB_UTMI;
+- port1_is_dr = 1;
++ port0_is_dr = 1;
+ } else if (prop && !strcmp(prop, "serial")) {
+ dr_mode = of_get_property(np, "dr_mode", NULL);
+ if (dr_mode && !strcmp(dr_mode, "otg")) {
+ sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1;
+- port1_is_dr = 1;
++ port0_is_dr = 1;
+ } else {
+- sicrl |= MPC834X_SICRL_USB0;
++ sicrl |= MPC834X_SICRL_USB1;
+ }
+ } else if (prop && !strcmp(prop, "ulpi")) {
+- sicrl |= MPC834X_SICRL_USB0;
++ sicrl |= MPC834X_SICRL_USB1;
+ } else {
+ printk(KERN_WARNING "834x USB PHY type not supported\n");
+ }
+- port0_is_dr = 1;
+ of_node_put(np);
+ }
+ np = of_find_compatible_node(NULL, NULL, "fsl-usb2-mph");
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index 2bdab21..c6ccbe7 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -12,9 +12,15 @@ static inline unsigned long native_save_fl(void)
+ {
+ unsigned long flags;
+
++ /*
++ * Note: this needs to be "=r" not "=rm", because we have the
++ * stack offset from what gcc expects at the time the "pop" is
++ * executed, and so a memory reference with respect to the stack
++ * would end up using the wrong address.
++ */
+ asm volatile("# __raw_save_flags\n\t"
+ "pushf ; pop %0"
+- : "=g" (flags)
++ : "=r" (flags)
+ : /* no input */
+ : "memory");
+
+diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
+index 95deb9f..c6a61d2 100644
+--- a/arch/x86/kernel/vmi_32.c
++++ b/arch/x86/kernel/vmi_32.c
+@@ -441,7 +441,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
+ ap.ds = __USER_DS;
+ ap.es = __USER_DS;
+ ap.fs = __KERNEL_PERCPU;
+- ap.gs = 0;
++ ap.gs = __KERNEL_STACK_CANARY;
+
+ ap.eflags = 0;
+
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index e17efed..133bdba 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -590,9 +590,12 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ unsigned int level;
+ pte_t *kpte, old_pte;
+
+- if (cpa->flags & CPA_PAGES_ARRAY)
+- address = (unsigned long)page_address(cpa->pages[cpa->curpage]);
+- else if (cpa->flags & CPA_ARRAY)
++ if (cpa->flags & CPA_PAGES_ARRAY) {
++ struct page *page = cpa->pages[cpa->curpage];
++ if (unlikely(PageHighMem(page)))
++ return 0;
++ address = (unsigned long)page_address(page);
++ } else if (cpa->flags & CPA_ARRAY)
+ address = cpa->vaddr[cpa->curpage];
+ else
+ address = *cpa->vaddr;
+@@ -695,9 +698,12 @@ static int cpa_process_alias(struct cpa_data *cpa)
+ * No need to redo, when the primary call touched the direct
+ * mapping already:
+ */
+- if (cpa->flags & CPA_PAGES_ARRAY)
+- vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]);
+- else if (cpa->flags & CPA_ARRAY)
++ if (cpa->flags & CPA_PAGES_ARRAY) {
++ struct page *page = cpa->pages[cpa->curpage];
++ if (unlikely(PageHighMem(page)))
++ return 0;
++ vaddr = (unsigned long)page_address(page);
++ } else if (cpa->flags & CPA_ARRAY)
+ vaddr = cpa->vaddr[cpa->curpage];
+ else
+ vaddr = *cpa->vaddr;
+@@ -996,12 +1002,15 @@ EXPORT_SYMBOL(set_memory_array_uc);
+ int _set_memory_wc(unsigned long addr, int numpages)
+ {
+ int ret;
++ unsigned long addr_copy = addr;
++
+ ret = change_page_attr_set(&addr, numpages,
+ __pgprot(_PAGE_CACHE_UC_MINUS), 0);
+-
+ if (!ret) {
+- ret = change_page_attr_set(&addr, numpages,
+- __pgprot(_PAGE_CACHE_WC), 0);
++ ret = change_page_attr_set_clr(&addr_copy, numpages,
++ __pgprot(_PAGE_CACHE_WC),
++ __pgprot(_PAGE_CACHE_MASK),
++ 0, 0, NULL);
+ }
+ return ret;
+ }
+@@ -1118,7 +1127,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
+ int free_idx;
+
+ for (i = 0; i < addrinarray; i++) {
+- start = (unsigned long)page_address(pages[i]);
++ if (PageHighMem(pages[i]))
++ continue;
++ start = page_to_pfn(pages[i]) << PAGE_SHIFT;
+ end = start + PAGE_SIZE;
+ if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
+ goto err_out;
+@@ -1131,7 +1142,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
+ err_out:
+ free_idx = i;
+ for (i = 0; i < free_idx; i++) {
+- start = (unsigned long)page_address(pages[i]);
++ if (PageHighMem(pages[i]))
++ continue;
++ start = page_to_pfn(pages[i]) << PAGE_SHIFT;
+ end = start + PAGE_SIZE;
+ free_memtype(start, end);
+ }
+@@ -1160,7 +1173,9 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
+ return retval;
+
+ for (i = 0; i < addrinarray; i++) {
+- start = (unsigned long)page_address(pages[i]);
++ if (PageHighMem(pages[i]))
++ continue;
++ start = page_to_pfn(pages[i]) << PAGE_SHIFT;
+ end = start + PAGE_SIZE;
+ free_memtype(start, end);
+ }
+diff --git a/block/Kconfig b/block/Kconfig
+index e7d1278..f817911 100644
+--- a/block/Kconfig
++++ b/block/Kconfig
+@@ -45,9 +45,9 @@ config LBD
+ If unsure, say N.
+
+ config BLK_DEV_BSG
+- bool "Block layer SG support v4 (EXPERIMENTAL)"
+- depends on EXPERIMENTAL
+- ---help---
++ bool "Block layer SG support v4"
++ default y
++ help
+ Saying Y here will enable generic SG (SCSI generic) v4 support
+ for any block device.
+
+@@ -57,7 +57,10 @@ config BLK_DEV_BSG
+ protocols (e.g. Task Management Functions and SMP in Serial
+ Attached SCSI).
+
+- If unsure, say N.
++ This option is required by recent UDEV versions to properly
++ access device serial numbers, etc.
++
++ If unsure, say Y.
+
+ config BLK_DEV_INTEGRITY
+ bool "Block layer data integrity support"
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 01574a0..42159a2 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -397,6 +397,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ },
+ },
+ {
++ .callback = init_set_sci_en_on_resume,
++ .ident = "Hewlett-Packard HP G7000 Notebook PC",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
++ },
++ },
++ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Panasonic CF51-2L",
+ .matches = {
+diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
+index 2406c2c..d4ec605 100644
+--- a/drivers/edac/x38_edac.c
++++ b/drivers/edac/x38_edac.c
+@@ -30,7 +30,7 @@
+ /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
+
+ #define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
+-#define X38_MCHBAR_HIGH 0x4b
++#define X38_MCHBAR_HIGH 0x4c
+ #define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
+ #define X38_MMR_WINDOW_SIZE 16384
+
+diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
+index 2bcf515..9b0a094 100644
+--- a/drivers/firewire/fw-sbp2.c
++++ b/drivers/firewire/fw-sbp2.c
+@@ -190,6 +190,12 @@ struct sbp2_target {
+ #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
+
+ /*
++ * There is no transport protocol limit to the CDB length, but we implement
++ * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
++ */
++#define SBP2_MAX_CDB_SIZE 16
++
++/*
+ * The default maximum s/g segment size of a FireWire controller is
+ * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
+ * be quadlet-aligned, we set the length limit to 0xffff & ~3.
+@@ -301,7 +307,7 @@ struct sbp2_command_orb {
+ struct sbp2_pointer next;
+ struct sbp2_pointer data_descriptor;
+ __be32 misc;
+- u8 command_block[12];
++ u8 command_block[SBP2_MAX_CDB_SIZE];
+ } request;
+ struct scsi_cmnd *cmd;
+ scsi_done_fn_t done;
+@@ -1135,6 +1141,8 @@ static int sbp2_probe(struct device *dev)
+ if (fw_device_enable_phys_dma(device) < 0)
+ goto fail_shost_put;
+
++ shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
++
+ if (scsi_add_host(shost, &unit->device) < 0)
+ goto fail_shost_put;
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 0ccb63e..bb58797 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1153,8 +1153,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ #endif
+
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+- if (IS_GM45(dev))
++ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
++ if (IS_G4X(dev)) {
++ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
++ }
+
+ i915_gem_load(dev);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index c431fa5..fcaa544 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -285,6 +285,13 @@ typedef struct drm_i915_private {
+ u8 saveDACMASK;
+ u8 saveCR[37];
+ uint64_t saveFENCE[16];
++ u32 saveCURACNTR;
++ u32 saveCURAPOS;
++ u32 saveCURABASE;
++ u32 saveCURBCNTR;
++ u32 saveCURBPOS;
++ u32 saveCURBBASE;
++ u32 saveCURSIZE;
+
+ struct {
+ struct drm_mm gtt_space;
+@@ -642,6 +649,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
+ void i915_gem_free_all_phys_object(struct drm_device *dev);
+ int i915_gem_object_get_pages(struct drm_gem_object *obj);
+ void i915_gem_object_put_pages(struct drm_gem_object *obj);
++void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+
+ /* i915_gem_tiling.c */
+ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 39f5c65..91ad93d 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -4154,6 +4154,7 @@ i915_gem_lastclose(struct drm_device *dev)
+ void
+ i915_gem_load(struct drm_device *dev)
+ {
++ int i;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ spin_lock_init(&dev_priv->mm.active_list_lock);
+@@ -4173,6 +4174,18 @@ i915_gem_load(struct drm_device *dev)
+ else
+ dev_priv->num_fence_regs = 8;
+
++ /* Initialize fence registers to zero */
++ if (IS_I965G(dev)) {
++ for (i = 0; i < 16; i++)
++ I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
++ } else {
++ for (i = 0; i < 8; i++)
++ I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
++ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
++ for (i = 0; i < 8; i++)
++ I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
++ }
++
+ i915_gem_detect_bit_6_swizzle(dev);
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 98bb4c8..8ee0969 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -572,8 +572,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
+
+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+-
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index ce8a213..a98e283 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -295,6 +295,16 @@ int i915_save_state(struct drm_device *dev)
+ i915_save_palette(dev, PIPE_B);
+ dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
++ /* Cursor state */
++ dev_priv->saveCURACNTR = I915_READ(CURACNTR);
++ dev_priv->saveCURAPOS = I915_READ(CURAPOS);
++ dev_priv->saveCURABASE = I915_READ(CURABASE);
++ dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
++ dev_priv->saveCURBPOS = I915_READ(CURBPOS);
++ dev_priv->saveCURBBASE = I915_READ(CURBBASE);
++ if (!IS_I9XX(dev))
++ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
++
+ /* CRT state */
+ dev_priv->saveADPA = I915_READ(ADPA);
+
+@@ -480,6 +490,16 @@ int i915_restore_state(struct drm_device *dev)
+ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+
++ /* Cursor state */
++ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
++ I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
++ I915_WRITE(CURABASE, dev_priv->saveCURABASE);
++ I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
++ I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
++ I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
++ if (!IS_I9XX(dev))
++ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
++
+ /* CRT state */
+ I915_WRITE(ADPA, dev_priv->saveADPA);
+
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 9d78cff..cf2a971 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -185,10 +185,12 @@ parse_general_features(struct drm_i915_private *dev_priv,
+ dev_priv->lvds_use_ssc = general->enable_ssc;
+
+ if (dev_priv->lvds_use_ssc) {
+- if (IS_I855(dev_priv->dev))
+- dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
+- else
+- dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
++ if (IS_I85X(dev_priv->dev))
++ dev_priv->lvds_ssc_freq =
++ general->ssc_freq ? 66 : 48;
++ else
++ dev_priv->lvds_ssc_freq =
++ general->ssc_freq ? 100 : 96;
+ }
+ }
+ }
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index c9d6f10..5469f2c 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1590,6 +1590,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
+ }
+
+ encoder->crtc = crtc;
++ intel_output->base.encoder = encoder;
+ intel_output->load_detect_temp = true;
+
+ intel_crtc = to_intel_crtc(crtc);
+@@ -1625,6 +1626,7 @@ void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_
+
+ if (intel_output->load_detect_temp) {
+ encoder->crtc = NULL;
++ intel_output->base.encoder = NULL;
+ intel_output->load_detect_temp = false;
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ drm_helper_disable_unused_functions(dev);
+diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
+index e4652dc..7a66b91 100644
+--- a/drivers/gpu/drm/i915/intel_fb.c
++++ b/drivers/gpu/drm/i915/intel_fb.c
+@@ -857,9 +857,15 @@ void intelfb_restore(void)
+ drm_crtc_helper_set_config(&kernelfb_mode);
+ }
+
++static void intelfb_restore_work_fn(struct work_struct *ignored)
++{
++ intelfb_restore();
++}
++static DECLARE_WORK(intelfb_restore_work, intelfb_restore_work_fn);
++
+ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
+ {
+- intelfb_restore();
++ schedule_work(&intelfb_restore_work);
+ }
+
+ static struct sysrq_key_op sysrq_intelfb_restore_op = {
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 53cccfa..1e06379 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -420,8 +420,21 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
+ },
+ },
+-
+- /* FIXME: add a check for the Aopen Mini PC */
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "AOpen Mini PC",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
++ },
++ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Aopen i945GTt-VFA",
++ .matches = {
++ DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
+index bff0103..fe4fa29 100644
+--- a/drivers/hwmon/asus_atk0110.c
++++ b/drivers/hwmon/asus_atk0110.c
+@@ -593,7 +593,11 @@ static int atk_add_sensor(struct atk_data *data, union acpi_object *obj)
+ sensor->data = data;
+ sensor->id = flags->integer.value;
+ sensor->limit1 = limit1->integer.value;
+- sensor->limit2 = limit2->integer.value;
++ if (data->old_interface)
++ sensor->limit2 = limit2->integer.value;
++ else
++ /* The upper limit is expressed as delta from lower limit */
++ sensor->limit2 = sensor->limit1 + limit2->integer.value;
+
+ snprintf(sensor->input_attr_name, ATTR_NAME_SIZE,
+ "%s%d_input", base_name, start + *num);
+diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
+index a92dbb9..ba75bfc 100644
+--- a/drivers/hwmon/smsc47m1.c
++++ b/drivers/hwmon/smsc47m1.c
+@@ -86,6 +86,7 @@ superio_exit(void)
+ #define SUPERIO_REG_ACT 0x30
+ #define SUPERIO_REG_BASE 0x60
+ #define SUPERIO_REG_DEVID 0x20
++#define SUPERIO_REG_DEVREV 0x21
+
+ /* Logical device registers */
+
+@@ -429,6 +430,9 @@ static int __init smsc47m1_find(unsigned short *addr,
+ * The LPC47M292 (device id 0x6B) is somewhat compatible, but it
+ * supports a 3rd fan, and the pin configuration registers are
+ * unfortunately different.
++ * The LPC47M233 has the same device id (0x6B) but is not compatible.
++ * We check the high bit of the device revision register to
++ * differentiate them.
+ */
+ switch (val) {
+ case 0x51:
+@@ -448,6 +452,13 @@ static int __init smsc47m1_find(unsigned short *addr,
+ sio_data->type = smsc47m1;
+ break;
+ case 0x6B:
++ if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) {
++ pr_debug(DRVNAME ": "
++ "Found SMSC LPC47M233, unsupported\n");
++ superio_exit();
++ return -ENODEV;
++ }
++
+ pr_info(DRVNAME ": Found SMSC LPC47M292\n");
+ sio_data->type = smsc47m2;
+ break;
+diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c
+index 1a9cc13..b96f302 100644
+--- a/drivers/i2c/chips/tsl2550.c
++++ b/drivers/i2c/chips/tsl2550.c
+@@ -27,7 +27,7 @@
+ #include <linux/delay.h>
+
+ #define TSL2550_DRV_NAME "tsl2550"
+-#define DRIVER_VERSION "1.1.1"
++#define DRIVER_VERSION "1.1.2"
+
+ /*
+ * Defines
+@@ -189,13 +189,16 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
+ u8 r = 128;
+
+ /* Avoid division by 0 and count 1 cannot be greater than count 0 */
+- if (c0 && (c1 <= c0))
+- r = c1 * 128 / c0;
++ if (c1 <= c0)
++ if (c0) {
++ r = c1 * 128 / c0;
++
++ /* Calculate LUX */
++ lux = ((c0 - c1) * ratio_lut[r]) / 256;
++ } else
++ lux = 0;
+ else
+- return -1;
+-
+- /* Calculate LUX */
+- lux = ((c0 - c1) * ratio_lut[r]) / 256;
++ return -EAGAIN;
+
+ /* LUX range check */
+ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
+diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
+index a9fbe2c..08f0fe0 100644
+--- a/drivers/ide/ide-disk.c
++++ b/drivers/ide/ide-disk.c
+@@ -413,6 +413,7 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
+ rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->special = cmd;
++ cmd->rq = rq;
+ }
+
+ ide_devset_get(multcount, mult_count);
+diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
+index a0b8cab..3194e1d 100644
+--- a/drivers/ide/ide-dma.c
++++ b/drivers/ide/ide-dma.c
+@@ -362,9 +362,6 @@ static int ide_tune_dma(ide_drive_t *drive)
+ if (__ide_dma_bad_drive(drive))
+ return 0;
+
+- if (ide_id_dma_bug(drive))
+- return 0;
+-
+ if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
+ return config_drive_for_dma(drive);
+
+@@ -395,24 +392,6 @@ static int ide_dma_check(ide_drive_t *drive)
+ return -1;
+ }
+
+-int ide_id_dma_bug(ide_drive_t *drive)
+-{
+- u16 *id = drive->id;
+-
+- if (id[ATA_ID_FIELD_VALID] & 4) {
+- if ((id[ATA_ID_UDMA_MODES] >> 8) &&
+- (id[ATA_ID_MWDMA_MODES] >> 8))
+- goto err_out;
+- } else if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
+- (id[ATA_ID_SWDMA_MODES] >> 8))
+- goto err_out;
+-
+- return 0;
+-err_out:
+- printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
+- return 1;
+-}
+-
+ int ide_set_dma(ide_drive_t *drive)
+ {
+ int rc;
+diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
+index 06fe002..dd5c557 100644
+--- a/drivers/ide/ide-iops.c
++++ b/drivers/ide/ide-iops.c
+@@ -306,9 +306,6 @@ int ide_driveid_update(ide_drive_t *drive)
+
+ kfree(id);
+
+- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive))
+- ide_dma_off(drive);
+-
+ return 1;
+ out_err:
+ SELECT_MASK(drive, 0);
+diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
+index c895ed5..34846cc 100644
+--- a/drivers/ide/ide-probe.c
++++ b/drivers/ide/ide-probe.c
+@@ -830,6 +830,24 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
+ return j;
+ }
+
++static void ide_host_enable_irqs(struct ide_host *host)
++{
++ ide_hwif_t *hwif;
++ int i;
++
++ ide_host_for_each_port(i, hwif, host) {
++ if (hwif == NULL)
++ continue;
++
++ /* clear any pending IRQs */
++ hwif->tp_ops->read_status(hwif);
++
++ /* unmask IRQs */
++ if (hwif->io_ports.ctl_addr)
++ hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
++ }
++}
++
+ /*
+ * This routine sets up the IRQ for an IDE interface.
+ */
+@@ -843,9 +861,6 @@ static int init_irq (ide_hwif_t *hwif)
+ if (irq_handler == NULL)
+ irq_handler = ide_intr;
+
+- if (io_ports->ctl_addr)
+- hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
+-
+ if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+ goto out_up;
+
+@@ -1389,6 +1404,8 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
+ ide_port_tune_devices(hwif);
+ }
+
++ ide_host_enable_irqs(host);
++
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
+index a51ab23..f599f49 100644
+--- a/drivers/ieee1394/sbp2.c
++++ b/drivers/ieee1394/sbp2.c
+@@ -880,6 +880,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
+ }
+
+ shost->hostdata[0] = (unsigned long)lu;
++ shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
+
+ if (!scsi_add_host(shost, &ud->device)) {
+ lu->shost = shost;
+diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
+index c5036f1..64a3a66 100644
+--- a/drivers/ieee1394/sbp2.h
++++ b/drivers/ieee1394/sbp2.h
+@@ -25,6 +25,12 @@
+ #define SBP2_DEVICE_NAME "sbp2"
+
+ /*
++ * There is no transport protocol limit to the CDB length, but we implement
++ * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
++ */
++#define SBP2_MAX_CDB_SIZE 16
++
++/*
+ * SBP-2 specific definitions
+ */
+
+@@ -51,7 +57,7 @@ struct sbp2_command_orb {
+ u32 data_descriptor_hi;
+ u32 data_descriptor_lo;
+ u32 misc;
+- u8 cdb[12];
++ u8 cdb[SBP2_MAX_CDB_SIZE];
+ } __attribute__((packed));
+
+ #define SBP2_LOGIN_REQUEST 0x0
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index eb1b73f..751a315 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1410,8 +1410,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
+ if (rdev2->desc_nr+1 > max_dev)
+ max_dev = rdev2->desc_nr+1;
+
+- if (max_dev > le32_to_cpu(sb->max_dev))
++ if (max_dev > le32_to_cpu(sb->max_dev)) {
++ int bmask;
+ sb->max_dev = cpu_to_le32(max_dev);
++ rdev->sb_size = max_dev * 2 + 256;
++ bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
++ if (rdev->sb_size & bmask)
++ rdev->sb_size = (rdev->sb_size | bmask) + 1;
++ }
+ for (i=0; i<max_dev;i++)
+ sb->dev_roles[i] = cpu_to_le16(0xfffe);
+
+@@ -2680,6 +2686,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
+ ssize_t rv = len;
+ struct mdk_personality *pers;
+ void *priv;
++ mdk_rdev_t *rdev;
+
+ if (mddev->pers == NULL) {
+ if (len == 0)
+@@ -2759,6 +2766,12 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
+ mddev_suspend(mddev);
+ mddev->pers->stop(mddev);
+ module_put(mddev->pers->owner);
++ /* Invalidate devices that are now superfluous */
++ list_for_each_entry(rdev, &mddev->disks, same_set)
++ if (rdev->raid_disk >= mddev->raid_disks) {
++ rdev->raid_disk = -1;
++ clear_bit(In_sync, &rdev->flags);
++ }
+ mddev->pers = pers;
+ mddev->private = priv;
+ strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 1f98ea4..75e0ecc 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -4310,6 +4310,15 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+ return sectors * (raid_disks - conf->max_degraded);
+ }
+
++static void free_conf(raid5_conf_t *conf)
++{
++ shrink_stripes(conf);
++ safe_put_page(conf->spare_page);
++ kfree(conf->disks);
++ kfree(conf->stripe_hashtbl);
++ kfree(conf);
++}
++
+ static raid5_conf_t *setup_conf(mddev_t *mddev)
+ {
+ raid5_conf_t *conf;
+@@ -4439,11 +4448,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
+
+ abort:
+ if (conf) {
+- shrink_stripes(conf);
+- safe_put_page(conf->spare_page);
+- kfree(conf->disks);
+- kfree(conf->stripe_hashtbl);
+- kfree(conf);
++ free_conf(conf);
+ return ERR_PTR(-EIO);
+ } else
+ return ERR_PTR(-ENOMEM);
+@@ -4609,12 +4614,8 @@ abort:
+ md_unregister_thread(mddev->thread);
+ mddev->thread = NULL;
+ if (conf) {
+- shrink_stripes(conf);
+ print_raid5_conf(conf);
+- safe_put_page(conf->spare_page);
+- kfree(conf->disks);
+- kfree(conf->stripe_hashtbl);
+- kfree(conf);
++ free_conf(conf);
+ }
+ mddev->private = NULL;
+ printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
+@@ -4629,13 +4630,10 @@ static int stop(mddev_t *mddev)
+
+ md_unregister_thread(mddev->thread);
+ mddev->thread = NULL;
+- shrink_stripes(conf);
+- kfree(conf->stripe_hashtbl);
+ mddev->queue->backing_dev_info.congested_fn = NULL;
+ blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
+- kfree(conf->disks);
+- kfree(conf);
++ free_conf(conf);
+ mddev->private = NULL;
+ return 0;
+ }
+diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
+index e1658ef..2a1120a 100644
+--- a/drivers/net/atl1c/atl1c.h
++++ b/drivers/net/atl1c/atl1c.h
+@@ -188,14 +188,14 @@ struct atl1c_tpd_ext_desc {
+ #define RRS_HDS_TYPE_DATA 2
+
+ #define RRS_IS_NO_HDS_TYPE(flag) \
+- (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == 0)
++ ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0)
+
+ #define RRS_IS_HDS_HEAD(flag) \
+- (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
++ ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
+ RRS_HDS_TYPE_HEAD)
+
+ #define RRS_IS_HDS_DATA(flag) \
+- (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
++ ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
+ RRS_HDS_TYPE_DATA)
+
+ /* rrs word 3 bit 0:31 */
+@@ -245,7 +245,7 @@ struct atl1c_tpd_ext_desc {
+ #define RRS_PACKET_TYPE_802_3 1
+ #define RRS_PACKET_TYPE_ETH 0
+ #define RRS_PACKET_IS_ETH(word) \
+- (((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK == \
++ ((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \
+ RRS_PACKET_TYPE_ETH)
+ #define RRS_RXD_IS_VALID(word) \
+ ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1)
+diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
+index 45c5b73..e4afbd6 100644
+--- a/drivers/net/atl1c/atl1c_ethtool.c
++++ b/drivers/net/atl1c/atl1c_ethtool.c
+@@ -271,7 +271,7 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+- WAKE_MCAST | WAKE_BCAST | WAKE_MCAST))
++ WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+ return -EOPNOTSUPP;
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
+index 83a1212..6cf3608 100644
+--- a/drivers/net/atl1c/atl1c_main.c
++++ b/drivers/net/atl1c/atl1c_main.c
+@@ -1701,7 +1701,7 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
+ if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
+ rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
+ RRS_RX_RFD_CNT_MASK;
+- if (unlikely(rfd_num) != 1)
++ if (unlikely(rfd_num != 1))
+ /* TODO support mul rfd*/
+ if (netif_msg_rx_err(adapter))
+ dev_warn(&pdev->dev,
+diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
+index b4bb06f..97e45e0 100644
+--- a/drivers/net/benet/be.h
++++ b/drivers/net/benet/be.h
+@@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev)
+ #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
+
+ #define BE_MAX_LRO_DESCRIPTORS 16
+-#define BE_MAX_FRAGS_PER_FRAME 16
++#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
+
+ struct be_dma_mem {
+ void *va;
+diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
+index 9592f22..cccc541 100644
+--- a/drivers/net/benet/be_ethtool.c
++++ b/drivers/net/benet/be_ethtool.c
+@@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+ return -EINVAL;
+
+ adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
+- if (adapter->max_rx_coal > MAX_SKB_FRAGS)
+- adapter->max_rx_coal = MAX_SKB_FRAGS - 1;
++ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
++ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
+
+ /* if AIC is being turned on now, start with an EQD of 0 */
+ if (rx_eq->enable_aic == 0 &&
+diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
+index 5c378b5..db19d17 100644
+--- a/drivers/net/benet/be_main.c
++++ b/drivers/net/benet/be_main.c
+@@ -682,7 +682,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
+ {
+ struct be_queue_info *rxq = &adapter->rx_obj.q;
+ struct be_rx_page_info *page_info;
+- u16 rxq_idx, i, num_rcvd;
++ u16 rxq_idx, i, num_rcvd, j;
+ u32 pktsize, hdr_len, curr_frag_len;
+ u8 *start;
+
+@@ -725,22 +725,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
+
+ /* More frags present for this completion */
+ pktsize -= curr_frag_len; /* account for above copied frag */
+- for (i = 1; i < num_rcvd; i++) {
++ for (i = 1, j = 0; i < num_rcvd; i++) {
+ index_inc(&rxq_idx, rxq->len);
+ page_info = get_rx_page_info(adapter, rxq_idx);
+
+ curr_frag_len = min(pktsize, rx_frag_size);
+
+- skb_shinfo(skb)->frags[i].page = page_info->page;
+- skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
+- skb_shinfo(skb)->frags[i].size = curr_frag_len;
++ /* Coalesce all frags from the same physical page in one slot */
++ if (page_info->page_offset == 0) {
++ /* Fresh page */
++ j++;
++ skb_shinfo(skb)->frags[j].page = page_info->page;
++ skb_shinfo(skb)->frags[j].page_offset =
++ page_info->page_offset;
++ skb_shinfo(skb)->frags[j].size = 0;
++ skb_shinfo(skb)->nr_frags++;
++ } else {
++ put_page(page_info->page);
++ }
++
++ skb_shinfo(skb)->frags[j].size += curr_frag_len;
+ skb->len += curr_frag_len;
+ skb->data_len += curr_frag_len;
+- skb_shinfo(skb)->nr_frags++;
+ pktsize -= curr_frag_len;
+
+ memset(page_info, 0, sizeof(*page_info));
+ }
++ BUG_ON(j > MAX_SKB_FRAGS);
+
+ be_rx_stats_update(adapter, pktsize, num_rcvd);
+ return;
+@@ -803,7 +814,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
+ struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
+ struct be_queue_info *rxq = &adapter->rx_obj.q;
+ u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
+- u16 i, rxq_idx = 0, vid;
++ u16 i, rxq_idx = 0, vid, j;
+
+ num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
+ pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
+@@ -811,20 +822,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
+ rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
+
+ remaining = pkt_size;
+- for (i = 0; i < num_rcvd; i++) {
++ for (i = 0, j = -1; i < num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxq_idx);
+
+ curr_frag_len = min(remaining, rx_frag_size);
+
+- rx_frags[i].page = page_info->page;
+- rx_frags[i].page_offset = page_info->page_offset;
+- rx_frags[i].size = curr_frag_len;
+- remaining -= curr_frag_len;
++ /* Coalesce all frags from the same physical page in one slot */
++ if (i == 0 || page_info->page_offset == 0) {
++ /* First frag or Fresh page */
++ j++;
++ rx_frags[j].page = page_info->page;
++ rx_frags[j].page_offset = page_info->page_offset;
++ rx_frags[j].size = 0;
++ } else {
++ put_page(page_info->page);
++ }
++ rx_frags[j].size += curr_frag_len;
+
++ remaining -= curr_frag_len;
+ index_inc(&rxq_idx, rxq->len);
+-
+ memset(page_info, 0, sizeof(*page_info));
+ }
++ BUG_ON(j > MAX_SKB_FRAGS);
+
+ if (likely(!vlanf)) {
+ lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
+diff --git a/drivers/net/e100.c b/drivers/net/e100.c
+index 0f9ee13..014dfb6 100644
+--- a/drivers/net/e100.c
++++ b/drivers/net/e100.c
+@@ -1762,6 +1762,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
+
+ if (ioread8(&nic->csr->scb.status) & rus_no_res)
+ nic->ru_running = RU_SUSPENDED;
++ pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
++ sizeof(struct rfd),
++ PCI_DMA_BIDIRECTIONAL);
+ return -ENODATA;
+ }
+
+diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
+index 6714a9d..bb59ba4 100644
+--- a/drivers/net/sky2.c
++++ b/drivers/net/sky2.c
+@@ -2441,7 +2441,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
+ if (likely(status >> 16 == (status & 0xffff))) {
+ skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+- skb->csum = status & 0xffff;
++ skb->csum = le16_to_cpu(status);
+ } else {
+ printk(KERN_NOTICE PFX "%s: hardware receive "
+ "checksum problem (status = %#x)\n",
+diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
+index c66b9c3..ca39ace 100644
+--- a/drivers/net/usb/cdc_subset.c
++++ b/drivers/net/usb/cdc_subset.c
+@@ -307,9 +307,10 @@ static const struct usb_device_id products [] = {
+ USB_DEVICE (0x1286, 0x8001), // "blob" bootloader
+ .driver_info = (unsigned long) &blob_info,
+ }, {
+- // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config
+- // e.g. Gumstix, current OpenZaurus, ...
+- USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
++ // Linux Ethernet/RNDIS gadget, mostly on PXA, second config
++ // e.g. Gumstix, current OpenZaurus, ... or anything else
++ // that just enables this gadget option.
++ USB_DEVICE (0x0525, 0xa4a2),
+ .driver_info = (unsigned long) &linuxdev_info,
+ },
+ #endif
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index f46ba24..cd20df1 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -3101,27 +3101,10 @@ static ssize_t show_power_level(struct device *d,
+ struct device_attribute *attr, char *buf)
+ {
+ struct iwl_priv *priv = dev_get_drvdata(d);
+- int mode = priv->power_data.user_power_setting;
+- int system = priv->power_data.system_power_setting;
+ int level = priv->power_data.power_mode;
+ char *p = buf;
+
+- switch (system) {
+- case IWL_POWER_SYS_AUTO:
+- p += sprintf(p, "SYSTEM:auto");
+- break;
+- case IWL_POWER_SYS_AC:
+- p += sprintf(p, "SYSTEM:ac");
+- break;
+- case IWL_POWER_SYS_BATTERY:
+- p += sprintf(p, "SYSTEM:battery");
+- break;
+- }
+-
+- p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO) ?
+- "fixed" : "auto");
+- p += sprintf(p, "\tINDEX:%d", level);
+- p += sprintf(p, "\n");
++ p += sprintf(p, "%d\n", level);
+ return p - buf + 1;
+ }
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+index ff4d0e4..f7cdae6 100644
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -4623,27 +4623,10 @@ static ssize_t show_power_level(struct device *d,
+ struct device_attribute *attr, char *buf)
+ {
+ struct iwl_priv *priv = dev_get_drvdata(d);
+- int mode = priv->power_data.user_power_setting;
+- int system = priv->power_data.system_power_setting;
+ int level = priv->power_data.power_mode;
+ char *p = buf;
+
+- switch (system) {
+- case IWL_POWER_SYS_AUTO:
+- p += sprintf(p, "SYSTEM:auto");
+- break;
+- case IWL_POWER_SYS_AC:
+- p += sprintf(p, "SYSTEM:ac");
+- break;
+- case IWL_POWER_SYS_BATTERY:
+- p += sprintf(p, "SYSTEM:battery");
+- break;
+- }
+-
+- p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO) ?
+- "fixed" : "auto");
+- p += sprintf(p, "\tINDEX:%d", level);
+- p += sprintf(p, "\n");
++ p += sprintf(p, "%d\n", level);
+ return p - buf + 1;
+ }
+
+diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
+index 685d94e..8c0b26e 100644
+--- a/drivers/parisc/eisa_eeprom.c
++++ b/drivers/parisc/eisa_eeprom.c
+@@ -55,7 +55,7 @@ static ssize_t eisa_eeprom_read(struct file * file,
+ ssize_t ret;
+ int i;
+
+- if (*ppos >= HPEE_MAX_LENGTH)
++ if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH)
+ return 0;
+
+ count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos;
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 284ebac..0b159b5 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -265,6 +265,7 @@ config THINKPAD_ACPI_DOCK
+ bool "Legacy Docking Station Support"
+ depends on THINKPAD_ACPI
+ depends on ACPI_DOCK=n
++ depends on BROKEN
+ default n
+ ---help---
+ Allows the thinkpad_acpi driver to handle docking station events.
+@@ -278,7 +279,8 @@ config THINKPAD_ACPI_DOCK
+ config THINKPAD_ACPI_BAY
+ bool "Legacy Removable Bay Support"
+ depends on THINKPAD_ACPI
+- default y
++ depends on BROKEN
++ default n
+ ---help---
+ Allows the thinkpad_acpi driver to handle removable bays. It will
+ electrically disable the device in the bay, and also generate
+diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
+index e6ac59c..fe8b74c 100644
+--- a/drivers/scsi/libsas/sas_port.c
++++ b/drivers/scsi/libsas/sas_port.c
+@@ -56,7 +56,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
+ }
+ }
+
+- /* find a port */
++ /* see if the phy should be part of a wide port */
+ spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
+ for (i = 0; i < sas_ha->num_phys; i++) {
+ port = sas_ha->sas_port[i];
+@@ -69,12 +69,23 @@ static void sas_form_port(struct asd_sas_phy *phy)
+ SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
+ port->id);
+ break;
+- } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) {
+- memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
+- break;
+ }
+ spin_unlock(&port->phy_list_lock);
+ }
++ /* The phy does not match any existing port, create a new one */
++ if (i == sas_ha->num_phys) {
++ for (i = 0; i < sas_ha->num_phys; i++) {
++ port = sas_ha->sas_port[i];
++ spin_lock(&port->phy_list_lock);
++ if (*(u64 *)port->sas_addr == 0
++ && port->num_phys == 0) {
++ memcpy(port->sas_addr, phy->sas_addr,
++ SAS_ADDR_SIZE);
++ break;
++ }
++ spin_unlock(&port->phy_list_lock);
++ }
++ }
+
+ if (i >= sas_ha->num_phys) {
+ printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
+diff --git a/drivers/staging/rt2870/2870_main_dev.c b/drivers/staging/rt2870/2870_main_dev.c
+index 9d59e31..04c764d 100644
+--- a/drivers/staging/rt2870/2870_main_dev.c
++++ b/drivers/staging/rt2870/2870_main_dev.c
+@@ -265,7 +265,7 @@ INT MlmeThread(
+ */
+ DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__));
+
+- pObj->MLMEThr_task = NULL;
++ pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE;
+
+ complete_and_exit (&pAd->mlmeComplete, 0);
+ return 0;
+@@ -373,7 +373,7 @@ INT RTUSBCmdThread(
+ */
+ DBGPRINT(RT_DEBUG_TRACE,( "<---RTUSBCmdThread\n"));
+
+- pObj->RTUSBCmdThr_task = NULL;
++ pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE;
+
+ complete_and_exit (&pAd->CmdQComplete, 0);
+ return 0;
+@@ -467,7 +467,7 @@ INT TimerQThread(
+ */
+ DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__));
+
+- pObj->TimerQThr_task = NULL;
++ pObj->TimerQThr_pid = THREAD_PID_INIT_VALUE;
+
+ complete_and_exit(&pAd->TimerQComplete, 0);
+ return 0;
+@@ -944,46 +944,69 @@ VOID RT28xxThreadTerminate(
+ RTUSBCancelPendingIRPs(pAd);
+
+ // Terminate Threads
+- BUG_ON(pObj->TimerQThr_task == NULL);
+- CHECK_PID_LEGALITY(task_pid(pObj->TimerQThr_task))
++ CHECK_PID_LEGALITY(pObj->TimerQThr_pid)
+ {
+ POS_COOKIE pObj = (POS_COOKIE)pAd->OS_Cookie;
+
+- printk(KERN_DEBUG "Terminate the TimerQThr pid=%d!\n",
+- pid_nr(task_pid(pObj->TimerQThr_task)));
++ printk("Terminate the TimerQThr_pid=%d!\n", GET_PID_NUMBER(pObj->TimerQThr_pid));
+ mb();
+ pAd->TimerFunc_kill = 1;
+ mb();
+- kthread_stop(pObj->TimerQThr_task);
+- pObj->TimerQThr_task = NULL;
++ ret = KILL_THREAD_PID(pObj->TimerQThr_pid, SIGTERM, 1);
++ if (ret)
++ {
++ printk(KERN_WARNING "%s: unable to stop TimerQThread, pid=%d, ret=%d!\n",
++ pAd->net_dev->name, GET_PID_NUMBER(pObj->TimerQThr_pid), ret);
++ }
++ else
++ {
++ wait_for_completion(&pAd->TimerQComplete);
++ pObj->TimerQThr_pid = THREAD_PID_INIT_VALUE;
++ }
+ }
+
+- BUG_ON(pObj->MLMEThr_task == NULL);
+- CHECK_PID_LEGALITY(task_pid(pObj->MLMEThr_task))
++ CHECK_PID_LEGALITY(pObj->MLMEThr_pid)
+ {
+- printk(KERN_DEBUG "Terminate the MLMEThr pid=%d!\n",
+- pid_nr(task_pid(pObj->MLMEThr_task)));
++ printk("Terminate the MLMEThr_pid=%d!\n", GET_PID_NUMBER(pObj->MLMEThr_pid));
+ mb();
+ pAd->mlme_kill = 1;
+ //RT28XX_MLME_HANDLER(pAd);
+ mb();
+- kthread_stop(pObj->MLMEThr_task);
+- pObj->MLMEThr_task = NULL;
++ ret = KILL_THREAD_PID(pObj->MLMEThr_pid, SIGTERM, 1);
++ if (ret)
++ {
++ printk (KERN_WARNING "%s: unable to Mlme thread, pid=%d, ret=%d!\n",
++ pAd->net_dev->name, GET_PID_NUMBER(pObj->MLMEThr_pid), ret);
++ }
++ else
++ {
++ //wait_for_completion (&pAd->notify);
++ wait_for_completion (&pAd->mlmeComplete);
++ pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE;
++ }
+ }
+
+- BUG_ON(pObj->RTUSBCmdThr_task == NULL);
+- CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task))
++ CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid)
+ {
+- printk(KERN_DEBUG "Terminate the RTUSBCmdThr pid=%d!\n",
+- pid_nr(task_pid(pObj->RTUSBCmdThr_task)));
++ printk("Terminate the RTUSBCmdThr_pid=%d!\n", GET_PID_NUMBER(pObj->RTUSBCmdThr_pid));
+ mb();
+ NdisAcquireSpinLock(&pAd->CmdQLock);
+ pAd->CmdQ.CmdQState = RT2870_THREAD_STOPED;
+ NdisReleaseSpinLock(&pAd->CmdQLock);
+ mb();
+ //RTUSBCMDUp(pAd);
+- kthread_stop(pObj->RTUSBCmdThr_task);
+- pObj->RTUSBCmdThr_task = NULL;
++ ret = KILL_THREAD_PID(pObj->RTUSBCmdThr_pid, SIGTERM, 1);
++ if (ret)
++ {
++ printk(KERN_WARNING "%s: unable to RTUSBCmd thread, pid=%d, ret=%d!\n",
++ pAd->net_dev->name, GET_PID_NUMBER(pObj->RTUSBCmdThr_pid), ret);
++ }
++ else
++ {
++ //wait_for_completion (&pAd->notify);
++ wait_for_completion (&pAd->CmdQComplete);
++ pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE;
++ }
+ }
+
+
+@@ -1044,7 +1067,7 @@ BOOLEAN RT28XXChipsetCheck(
+ if (dev_p->descriptor.idVendor == rtusb_usb_id[i].idVendor &&
+ dev_p->descriptor.idProduct == rtusb_usb_id[i].idProduct)
+ {
+- printk(KERN_DEBUG "rt2870: idVendor = 0x%x, idProduct = 0x%x\n",
++ printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n",
+ dev_p->descriptor.idVendor, dev_p->descriptor.idProduct);
+ break;
+ }
+diff --git a/drivers/staging/rt2870/common/2870_rtmp_init.c b/drivers/staging/rt2870/common/2870_rtmp_init.c
+index cb16d2f..9f5143b 100644
+--- a/drivers/staging/rt2870/common/2870_rtmp_init.c
++++ b/drivers/staging/rt2870/common/2870_rtmp_init.c
+@@ -727,8 +727,8 @@ NDIS_STATUS AdapterBlockAllocateMemory(
+
+ usb_dev = pObj->pUsb_Dev;
+
+- pObj->MLMEThr_task = NULL;
+- pObj->RTUSBCmdThr_task = NULL;
++ pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE;
++ pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE;
+
+ *ppAd = (PVOID)vmalloc(sizeof(RTMP_ADAPTER));
+
+@@ -765,7 +765,7 @@ NDIS_STATUS CreateThreads(
+ {
+ PRTMP_ADAPTER pAd = net_dev->ml_priv;
+ POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie;
+- struct task_struct *tsk;
++ pid_t pid_number = -1;
+
+ //init_MUTEX(&(pAd->usbdev_semaphore));
+
+@@ -779,39 +779,36 @@ NDIS_STATUS CreateThreads(
+ init_completion (&pAd->TimerQComplete);
+
+ // Creat MLME Thread
+- pObj->MLMEThr_task = NULL;
+- tsk = kthread_run(MlmeThread, pAd, pAd->net_dev->name);
+-
+- if (IS_ERR(tsk)) {
++ pObj->MLMEThr_pid= THREAD_PID_INIT_VALUE;
++ pid_number = kernel_thread(MlmeThread, pAd, CLONE_VM);
++ if (pid_number < 0)
++ {
+ printk (KERN_WARNING "%s: unable to start Mlme thread\n",pAd->net_dev->name);
+ return NDIS_STATUS_FAILURE;
+ }
+-
+- pObj->MLMEThr_task = tsk;
++ pObj->MLMEThr_pid = GET_PID(pid_number);
+ // Wait for the thread to start
+ wait_for_completion(&(pAd->mlmeComplete));
+
+ // Creat Command Thread
+- pObj->RTUSBCmdThr_task = NULL;
+- tsk = kthread_run(RTUSBCmdThread, pAd, pAd->net_dev->name);
+-
+- if (IS_ERR(tsk) < 0)
++ pObj->RTUSBCmdThr_pid= THREAD_PID_INIT_VALUE;
++ pid_number = kernel_thread(RTUSBCmdThread, pAd, CLONE_VM);
++ if (pid_number < 0)
+ {
+ printk (KERN_WARNING "%s: unable to start RTUSBCmd thread\n",pAd->net_dev->name);
+ return NDIS_STATUS_FAILURE;
+ }
+-
+- pObj->RTUSBCmdThr_task = tsk;
++ pObj->RTUSBCmdThr_pid = GET_PID(pid_number);
+ wait_for_completion(&(pAd->CmdQComplete));
+
+- pObj->TimerQThr_task = NULL;
+- tsk = kthread_run(TimerQThread, pAd, pAd->net_dev->name);
+- if (IS_ERR(tsk) < 0)
++ pObj->TimerQThr_pid= THREAD_PID_INIT_VALUE;
++ pid_number = kernel_thread(TimerQThread, pAd, CLONE_VM);
++ if (pid_number < 0)
+ {
+ printk (KERN_WARNING "%s: unable to start TimerQThread\n",pAd->net_dev->name);
+ return NDIS_STATUS_FAILURE;
+ }
+- pObj->TimerQThr_task = tsk;
++ pObj->TimerQThr_pid = GET_PID(pid_number);
+ // Wait for the thread to start
+ wait_for_completion(&(pAd->TimerQComplete));
+
+diff --git a/drivers/staging/rt2870/common/cmm_data.c b/drivers/staging/rt2870/common/cmm_data.c
+index f8e0ebd..fd809ab 100644
+--- a/drivers/staging/rt2870/common/cmm_data.c
++++ b/drivers/staging/rt2870/common/cmm_data.c
+@@ -709,6 +709,9 @@ BOOLEAN RTMP_FillTxBlkInfo(
+ }
+
+ return TRUE;
++
++FillTxBlkErr:
++ return FALSE;
+ }
+
+
+diff --git a/drivers/staging/rt2870/common/rtmp_init.c b/drivers/staging/rt2870/common/rtmp_init.c
+index 099b6a8..870a00d 100644
+--- a/drivers/staging/rt2870/common/rtmp_init.c
++++ b/drivers/staging/rt2870/common/rtmp_init.c
+@@ -3655,7 +3655,7 @@ VOID UserCfgInit(
+ #ifdef RALINK_28xx_QA
+ //pAd->ate.Repeat = 0;
+ pAd->ate.TxStatus = 0;
+- pAd->ate.AtePid = NULL;
++ pAd->ate.AtePid = THREAD_PID_INIT_VALUE;
+ #endif // RALINK_28xx_QA //
+ #endif // RALINK_ATE //
+
+diff --git a/drivers/staging/rt2870/common/rtusb_io.c b/drivers/staging/rt2870/common/rtusb_io.c
+index afde136..6db443e 100644
+--- a/drivers/staging/rt2870/common/rtusb_io.c
++++ b/drivers/staging/rt2870/common/rtusb_io.c
+@@ -958,8 +958,7 @@ NDIS_STATUS RTUSBEnqueueCmdFromNdis(
+ POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie;
+
+
+- BUG_ON(pObj->RTUSBCmdThr_task == NULL);
+- CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task))
++ CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid)
+ return (NDIS_STATUS_RESOURCES);
+
+ status = RTMPAllocateMemory((PVOID *)&cmdqelmt, sizeof(CmdQElmt));
+diff --git a/drivers/staging/rt2870/rt2870.h b/drivers/staging/rt2870/rt2870.h
+index fef14a4..39fa918 100644
+--- a/drivers/staging/rt2870/rt2870.h
++++ b/drivers/staging/rt2870/rt2870.h
+@@ -580,16 +580,14 @@ VOID RTUSBBulkRxComplete(purbb_t pUrb, struct pt_regs *pt_regs);
+ #define RTUSBMlmeUp(pAd) \
+ { \
+ POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \
+- BUG_ON(pObj->MLMEThr_task == NULL); \
+- CHECK_PID_LEGALITY(task_pid(pObj->MLMEThr_task)) \
++ CHECK_PID_LEGALITY(pObj->MLMEThr_pid) \
+ up(&(pAd->mlme_semaphore)); \
+ }
+
+ #define RTUSBCMDUp(pAd) \
+ { \
+ POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \
+- BUG_ON(pObj->RTUSBCmdThr_task == NULL); \
+- CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task)) \
++ CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid) \
+ up(&(pAd->RTUSBCmd_semaphore)); \
+ }
+
+diff --git a/drivers/staging/rt2870/rt_linux.h b/drivers/staging/rt2870/rt_linux.h
+index 5a6ee6a..49ad37f 100644
+--- a/drivers/staging/rt2870/rt_linux.h
++++ b/drivers/staging/rt2870/rt_linux.h
+@@ -44,7 +44,6 @@
+ #include <linux/module.h>
+ #include <linux/version.h>
+ #include <linux/kernel.h>
+-#include <linux/kthread.h>
+
+ #include <linux/spinlock.h>
+ #include <linux/init.h>
+@@ -166,12 +165,14 @@ typedef int (*HARD_START_XMIT_FUNC)(struct sk_buff *skb, struct net_device *net_
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+ typedef struct pid * THREAD_PID;
++#define THREAD_PID_INIT_VALUE NULL
+ #define GET_PID(_v) find_get_pid(_v)
+ #define GET_PID_NUMBER(_v) pid_nr(_v)
+ #define CHECK_PID_LEGALITY(_pid) if (pid_nr(_pid) >= 0)
+ #define KILL_THREAD_PID(_A, _B, _C) kill_pid(_A, _B, _C)
+ #else
+ typedef pid_t THREAD_PID;
++#define THREAD_PID_INIT_VALUE -1
+ #define GET_PID(_v) _v
+ #define GET_PID_NUMBER(_v) _v
+ #define CHECK_PID_LEGALITY(_pid) if (_pid >= 0)
+@@ -187,11 +188,11 @@ struct os_lock {
+ struct os_cookie {
+
+ #ifdef RT2870
+- struct usb_device *pUsb_Dev;
++ struct usb_device *pUsb_Dev;
+
+- struct task_struct *MLMEThr_task;
+- struct task_struct *RTUSBCmdThr_task;
+- struct task_struct *TimerQThr_task;
++ THREAD_PID MLMEThr_pid;
++ THREAD_PID RTUSBCmdThr_pid;
++ THREAD_PID TimerQThr_pid;
+ #endif // RT2870 //
+
+ struct tasklet_struct rx_done_task;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index ef03927..096badf 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -582,7 +582,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
+ if (!ps)
+ goto out;
+
+- ret = -ENOENT;
++ ret = -ENODEV;
+
+ /* usbdev device-node */
+ if (imajor(inode) == USB_DEVICE_MAJOR)
+@@ -1308,7 +1308,8 @@ static int get_urb32(struct usbdevfs_urb *kurb,
+ struct usbdevfs_urb32 __user *uurb)
+ {
+ __u32 uptr;
+- if (get_user(kurb->type, &uurb->type) ||
++ if (!access_ok(VERIFY_READ, uurb, sizeof(*uurb)) ||
++ __get_user(kurb->type, &uurb->type) ||
+ __get_user(kurb->endpoint, &uurb->endpoint) ||
+ __get_user(kurb->status, &uurb->status) ||
+ __get_user(kurb->flags, &uurb->flags) ||
+@@ -1523,8 +1524,9 @@ static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg)
+ u32 udata;
+
+ uioc = compat_ptr((long)arg);
+- if (get_user(ctrl.ifno, &uioc->ifno) ||
+- get_user(ctrl.ioctl_code, &uioc->ioctl_code) ||
++ if (!access_ok(VERIFY_READ, uioc, sizeof(*uioc)) ||
++ __get_user(ctrl.ifno, &uioc->ifno) ||
++ __get_user(ctrl.ioctl_code, &uioc->ioctl_code) ||
+ __get_user(udata, &uioc->data))
+ return -EFAULT;
+ ctrl.data = compat_ptr(udata);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 9722512..91c0e01 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -672,6 +672,9 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
+ { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
++ { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { }, /* Optional parameter entry */
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
+index 12330fa..3f89e3c 100644
+--- a/drivers/usb/serial/ftdi_sio.h
++++ b/drivers/usb/serial/ftdi_sio.h
+@@ -926,6 +926,20 @@
+ #define MARVELL_SHEEVAPLUG_PID 0x9e8f
+
+ /*
++ * Bayer Ascensia Contour blood glucose meter USB-converter cable.
++ * http://winglucofacts.com/cables/
++ */
++#define BAYER_VID 0x1A79
++#define BAYER_CONTOUR_CABLE_PID 0x6001
++
++/*
++ * Marvell OpenRD Base, Client
++ * http://www.open-rd.org
++ * OpenRD Base, Client use VID 0x0403
++ */
++#define MARVELL_OPENRD_PID 0x9e90
++
++/*
+ * BmRequestType: 1100 0000b
+ * bRequest: FTDI_E2_READ
+ * wValue: 0
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index fcb3202..e20dc52 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
+ US_BULK_GET_MAX_LUN,
+ USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE,
+- 0, us->ifnum, us->iobuf, 1, HZ);
++ 0, us->ifnum, us->iobuf, 1, 10*HZ);
+
+ US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
+ result, us->iobuf[0]);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 4b8b690..1d76bf0 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -838,6 +838,13 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001,
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
++/* Reported by Rogerio Brito <rbrito at ime.usp.br> */
++UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
++ "Prolific Technology, Inc.",
++ "Mass Storage Device",
++ US_SC_DEVICE, US_PR_DEVICE, NULL,
++ US_FL_NOT_LOCKABLE ),
++
+ /* Reported by Richard -=[]=- <micro_flyer at hotmail.com> */
+ /* Change to bcdDeviceMin (0x0100 to 0x0001) reported by
+ * Thomas Bartosik <tbartdev at gmx-topmail.de> */
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index 697f6b5..e92f229 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -828,15 +828,22 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
+ if (IS_ERR(bprm.file))
+ return res;
+
++ bprm.cred = prepare_exec_creds();
++ res = -ENOMEM;
++ if (!bprm.cred)
++ goto out;
++
+ res = prepare_binprm(&bprm);
+
+ if (res <= (unsigned long)-4096)
+ res = load_flat_file(&bprm, libs, id, NULL);
+- if (bprm.file) {
+- allow_write_access(bprm.file);
+- fput(bprm.file);
+- bprm.file = NULL;
+- }
++
++ abort_creds(bprm.cred);
++
++out:
++ allow_write_access(bprm.file);
++ fput(bprm.file);
++
+ return(res);
+ }
+
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index 83d6275..3236d1a 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -55,7 +55,7 @@ void cifs_dfs_release_automount_timer(void)
+ * i.e. strips from UNC trailing path that is not part of share
+ * name and fixup missing '\' in the begining of DFS node refferal
+ * if neccessary.
+- * Returns pointer to share name on success or NULL on error.
++ * Returns pointer to share name on success or ERR_PTR on error.
+ * Caller is responsible for freeing returned string.
+ */
+ static char *cifs_get_share_name(const char *node_name)
+@@ -68,7 +68,7 @@ static char *cifs_get_share_name(const char *node_name)
+ UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */,
+ GFP_KERNEL);
+ if (!UNC)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ /* get share name and server name */
+ if (node_name[1] != '\\') {
+@@ -87,7 +87,7 @@ static char *cifs_get_share_name(const char *node_name)
+ cERROR(1, ("%s: no server name end in node name: %s",
+ __func__, node_name));
+ kfree(UNC);
+- return NULL;
++ return ERR_PTR(-EINVAL);
+ }
+
+ /* find sharename end */
+@@ -133,6 +133,12 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
+ return ERR_PTR(-EINVAL);
+
+ *devname = cifs_get_share_name(ref->node_name);
++ if (IS_ERR(*devname)) {
++ rc = PTR_ERR(*devname);
++ *devname = NULL;
++ goto compose_mount_options_err;
++ }
++
+ rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
+ if (rc != 0) {
+ cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d",
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 1dc14f2..0b0b9df 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2563,11 +2563,20 @@ remote_path_check:
+
+ if (mount_data != mount_data_global)
+ kfree(mount_data);
++
+ mount_data = cifs_compose_mount_options(
+ cifs_sb->mountdata, full_path + 1,
+ referrals, &fake_devname);
+- kfree(fake_devname);
++
+ free_dfs_info_array(referrals, num_referrals);
++ kfree(fake_devname);
++ kfree(full_path);
++
++ if (IS_ERR(mount_data)) {
++ rc = PTR_ERR(mount_data);
++ mount_data = NULL;
++ goto mount_fail_check;
++ }
+
+ if (tcon)
+ cifs_put_tcon(tcon);
+@@ -2575,8 +2584,6 @@ remote_path_check:
+ cifs_put_smb_ses(pSesInfo);
+
+ cleanup_volume_info(&volume_info);
+- FreeXid(xid);
+- kfree(full_path);
+ referral_walks_count++;
+ goto try_mount_again;
+ }
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index b83f6bc..e2a82c1 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -1915,6 +1915,7 @@ COMPATIBLE_IOCTL(FIONCLEX)
+ COMPATIBLE_IOCTL(FIOASYNC)
+ COMPATIBLE_IOCTL(FIONBIO)
+ COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
++COMPATIBLE_IOCTL(FS_IOC_FIEMAP)
+ /* 0x00 */
+ COMPATIBLE_IOCTL(FIBMAP)
+ COMPATIBLE_IOCTL(FIGETBSZ)
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 08f6b04..630feb3 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -255,7 +255,7 @@ static void nfs_direct_read_release(void *calldata)
+
+ if (put_dreq(dreq))
+ nfs_direct_complete(dreq);
+- nfs_readdata_release(calldata);
++ nfs_readdata_free(data);
+ }
+
+ static const struct rpc_call_ops nfs_read_direct_ops = {
+@@ -311,14 +311,14 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
+ data->npages, 1, 0, data->pagevec, NULL);
+ up_read(¤t->mm->mmap_sem);
+ if (result < 0) {
+- nfs_readdata_release(data);
++ nfs_readdata_free(data);
+ break;
+ }
+ if ((unsigned)result < data->npages) {
+ bytes = result * PAGE_SIZE;
+ if (bytes <= pgbase) {
+ nfs_direct_release_pages(data->pagevec, result);
+- nfs_readdata_release(data);
++ nfs_readdata_free(data);
+ break;
+ }
+ bytes -= pgbase;
+@@ -331,7 +331,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
+ data->inode = inode;
+ data->cred = msg.rpc_cred;
+ data->args.fh = NFS_FH(inode);
+- data->args.context = get_nfs_open_context(ctx);
++ data->args.context = ctx;
+ data->args.offset = pos;
+ data->args.pgbase = pgbase;
+ data->args.pages = data->pagevec;
+@@ -438,7 +438,7 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
+ struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
+ list_del(&data->pages);
+ nfs_direct_release_pages(data->pagevec, data->npages);
+- nfs_writedata_release(data);
++ nfs_writedata_free(data);
+ }
+ }
+
+@@ -531,7 +531,7 @@ static void nfs_direct_commit_release(void *calldata)
+
+ dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
+ nfs_direct_write_complete(dreq, data->inode);
+- nfs_commitdata_release(calldata);
++ nfs_commit_free(data);
+ }
+
+ static const struct rpc_call_ops nfs_commit_direct_ops = {
+@@ -564,7 +564,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
+ data->args.fh = NFS_FH(data->inode);
+ data->args.offset = 0;
+ data->args.count = 0;
+- data->args.context = get_nfs_open_context(dreq->ctx);
++ data->args.context = dreq->ctx;
+ data->res.count = 0;
+ data->res.fattr = &data->fattr;
+ data->res.verf = &data->verf;
+@@ -725,14 +725,14 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
+ data->npages, 0, 0, data->pagevec, NULL);
+ up_read(¤t->mm->mmap_sem);
+ if (result < 0) {
+- nfs_writedata_release(data);
++ nfs_writedata_free(data);
+ break;
+ }
+ if ((unsigned)result < data->npages) {
+ bytes = result * PAGE_SIZE;
+ if (bytes <= pgbase) {
+ nfs_direct_release_pages(data->pagevec, result);
+- nfs_writedata_release(data);
++ nfs_writedata_free(data);
+ break;
+ }
+ bytes -= pgbase;
+@@ -747,7 +747,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
+ data->inode = inode;
+ data->cred = msg.rpc_cred;
+ data->args.fh = NFS_FH(inode);
+- data->args.context = get_nfs_open_context(ctx);
++ data->args.context = ctx;
+ data->args.offset = pos;
+ data->args.pgbase = pgbase;
+ data->args.pages = data->pagevec;
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index 4ace3c5..eea5bd2 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -59,17 +59,15 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
+ return p;
+ }
+
+-static void nfs_readdata_free(struct nfs_read_data *p)
++void nfs_readdata_free(struct nfs_read_data *p)
+ {
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
+ mempool_free(p, nfs_rdata_mempool);
+ }
+
+-void nfs_readdata_release(void *data)
++static void nfs_readdata_release(struct nfs_read_data *rdata)
+ {
+- struct nfs_read_data *rdata = data;
+-
+ put_nfs_open_context(rdata->args.context);
+ nfs_readdata_free(rdata);
+ }
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index e560a78..9cce370 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -84,17 +84,15 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
+ return p;
+ }
+
+-static void nfs_writedata_free(struct nfs_write_data *p)
++void nfs_writedata_free(struct nfs_write_data *p)
+ {
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
+ mempool_free(p, nfs_wdata_mempool);
+ }
+
+-void nfs_writedata_release(void *data)
++static void nfs_writedata_release(struct nfs_write_data *wdata)
+ {
+- struct nfs_write_data *wdata = data;
+-
+ put_nfs_open_context(wdata->args.context);
+ nfs_writedata_free(wdata);
+ }
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 1779ddc..18ffb2e 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1876,12 +1876,26 @@ static void nilfs_end_page_io(struct page *page, int err)
+ if (!page)
+ return;
+
+- if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page))
++ if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
+ /*
+ * For b-tree node pages, this function may be called twice
+ * or more because they might be split in a segment.
+ */
++ if (PageDirty(page)) {
++ /*
++ * For pages holding split b-tree node buffers, dirty
++ * flag on the buffers may be cleared discretely.
++ * In that case, the page is once redirtied for
++ * remaining buffers, and it must be cancelled if
++ * all the buffers get cleaned later.
++ */
++ lock_page(page);
++ if (nilfs_page_buffers_clean(page))
++ __nilfs_clear_page_dirty(page);
++ unlock_page(page);
++ }
+ return;
++ }
+
+ __nilfs_end_page_io(page, err);
+ }
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 3326bbf..f705cfd 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -234,23 +234,20 @@ static int check_mem_permission(struct task_struct *task)
+
+ struct mm_struct *mm_for_maps(struct task_struct *task)
+ {
+- struct mm_struct *mm = get_task_mm(task);
+- if (!mm)
++ struct mm_struct *mm;
++
++ if (mutex_lock_killable(&task->cred_exec_mutex))
+ return NULL;
+- down_read(&mm->mmap_sem);
+- task_lock(task);
+- if (task->mm != mm)
+- goto out;
+- if (task->mm != current->mm &&
+- __ptrace_may_access(task, PTRACE_MODE_READ) < 0)
+- goto out;
+- task_unlock(task);
++
++ mm = get_task_mm(task);
++ if (mm && mm != current->mm &&
++ !ptrace_may_access(task, PTRACE_MODE_READ)) {
++ mmput(mm);
++ mm = NULL;
++ }
++ mutex_unlock(&task->cred_exec_mutex);
++
+ return mm;
+-out:
+- task_unlock(task);
+- up_read(&mm->mmap_sem);
+- mmput(mm);
+- return NULL;
+ }
+
+ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 6f61b7c..9bd8be1 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -119,6 +119,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ mm = mm_for_maps(priv->task);
+ if (!mm)
+ return NULL;
++ down_read(&mm->mmap_sem);
+
+ tail_vma = get_gate_vma(priv->task);
+ priv->tail_vma = tail_vma;
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index 64a72e2..8f5c05d 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -189,6 +189,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ priv->task = NULL;
+ return NULL;
+ }
++ down_read(&mm->mmap_sem);
+
+ /* start from the Nth VMA */
+ for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index d88d0fa..14f2d71 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -939,8 +939,10 @@ again:
+ /* Remove from old parent's list and insert into new parent's list. */
+ sysfs_unlink_sibling(sd);
+ sysfs_get(new_parent_sd);
++ drop_nlink(old_parent->d_inode);
+ sysfs_put(sd->s_parent);
+ sd->s_parent = new_parent_sd;
++ inc_nlink(new_parent->d_inode);
+ sysfs_link_sibling(sd);
+
+ out_unlock:
+diff --git a/include/linux/ide.h b/include/linux/ide.h
+index 9fed365..ca051f3 100644
+--- a/include/linux/ide.h
++++ b/include/linux/ide.h
+@@ -1384,7 +1384,6 @@ int ide_in_drive_list(u16 *, const struct drive_list_entry *);
+ #ifdef CONFIG_BLK_DEV_IDEDMA
+ int ide_dma_good_drive(ide_drive_t *);
+ int __ide_dma_bad_drive(ide_drive_t *);
+-int ide_id_dma_bug(ide_drive_t *);
+
+ u8 ide_find_dma_mode(ide_drive_t *, u8);
+
+@@ -1425,7 +1424,6 @@ void ide_dma_lost_irq(ide_drive_t *);
+ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
+
+ #else
+-static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
+ static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
+ static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
+ static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 5a96a1a..a28daad 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1856,15 +1856,14 @@ static inline int net_gso_ok(int features, int gso_type)
+
+ static inline int skb_gso_ok(struct sk_buff *skb, int features)
+ {
+- return net_gso_ok(features, skb_shinfo(skb)->gso_type);
++ return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
++ (!skb_shinfo(skb)->frag_list || (features & NETIF_F_FRAGLIST));
+ }
+
+ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
+ {
+ return skb_is_gso(skb) &&
+ (!skb_gso_ok(skb, dev->features) ||
+- (skb_shinfo(skb)->frag_list &&
+- !(dev->features & NETIF_F_FRAGLIST)) ||
+ unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
+ }
+
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index fdffb41..f6b9024 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -473,7 +473,6 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *);
+ extern int nfs_flush_incompatible(struct file *file, struct page *page);
+ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
+ extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
+-extern void nfs_writedata_release(void *);
+
+ /*
+ * Try to write back everything synchronously (but check the
+@@ -488,7 +487,6 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
+ extern int nfs_commit_inode(struct inode *, int);
+ extern struct nfs_write_data *nfs_commitdata_alloc(void);
+ extern void nfs_commit_free(struct nfs_write_data *wdata);
+-extern void nfs_commitdata_release(void *wdata);
+ #else
+ static inline int
+ nfs_commit_inode(struct inode *inode, int how)
+@@ -507,6 +505,7 @@ nfs_have_writebacks(struct inode *inode)
+ * Allocate nfs_write_data structures
+ */
+ extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
++extern void nfs_writedata_free(struct nfs_write_data *);
+
+ /*
+ * linux/fs/nfs/read.c
+@@ -515,7 +514,6 @@ extern int nfs_readpage(struct file *, struct page *);
+ extern int nfs_readpages(struct file *, struct address_space *,
+ struct list_head *, unsigned);
+ extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
+-extern void nfs_readdata_release(void *data);
+ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
+ struct page *);
+
+@@ -523,6 +521,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
+ * Allocate nfs_read_data structures
+ */
+ extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
++extern void nfs_readdata_free(struct nfs_read_data *);
+
+ /*
+ * linux/fs/nfs3proc.c
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 9f80a76..d16a304 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -448,6 +448,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+ {
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
++ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sctp_sock_rfree;
+ atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 4bb1ff9..9bc2c83 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -103,15 +103,15 @@ struct net;
+
+ /**
+ * struct sock_common - minimal network layer representation of sockets
++ * @skc_node: main hash linkage for various protocol lookup tables
++ * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
++ * @skc_refcnt: reference count
++ * @skc_hash: hash value used with various protocol lookup tables
+ * @skc_family: network address family
+ * @skc_state: Connection state
+ * @skc_reuse: %SO_REUSEADDR setting
+ * @skc_bound_dev_if: bound device index if != 0
+- * @skc_node: main hash linkage for various protocol lookup tables
+- * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
+ * @skc_bind_node: bind hash linkage for various protocol lookup tables
+- * @skc_refcnt: reference count
+- * @skc_hash: hash value used with various protocol lookup tables
+ * @skc_prot: protocol handlers inside a network family
+ * @skc_net: reference to the network namespace of this socket
+ *
+@@ -119,17 +119,21 @@ struct net;
+ * for struct sock and struct inet_timewait_sock.
+ */
+ struct sock_common {
+- unsigned short skc_family;
+- volatile unsigned char skc_state;
+- unsigned char skc_reuse;
+- int skc_bound_dev_if;
++ /*
++ * first fields are not copied in sock_copy()
++ */
+ union {
+ struct hlist_node skc_node;
+ struct hlist_nulls_node skc_nulls_node;
+ };
+- struct hlist_node skc_bind_node;
+ atomic_t skc_refcnt;
++
+ unsigned int skc_hash;
++ unsigned short skc_family;
++ volatile unsigned char skc_state;
++ unsigned char skc_reuse;
++ int skc_bound_dev_if;
++ struct hlist_node skc_bind_node;
+ struct proto *skc_prot;
+ #ifdef CONFIG_NET_NS
+ struct net *skc_net;
+@@ -207,15 +211,17 @@ struct sock {
+ * don't add nothing before this first member (__sk_common) --acme
+ */
+ struct sock_common __sk_common;
++#define sk_node __sk_common.skc_node
++#define sk_nulls_node __sk_common.skc_nulls_node
++#define sk_refcnt __sk_common.skc_refcnt
++
++#define sk_copy_start __sk_common.skc_hash
++#define sk_hash __sk_common.skc_hash
+ #define sk_family __sk_common.skc_family
+ #define sk_state __sk_common.skc_state
+ #define sk_reuse __sk_common.skc_reuse
+ #define sk_bound_dev_if __sk_common.skc_bound_dev_if
+-#define sk_node __sk_common.skc_node
+-#define sk_nulls_node __sk_common.skc_nulls_node
+ #define sk_bind_node __sk_common.skc_bind_node
+-#define sk_refcnt __sk_common.skc_refcnt
+-#define sk_hash __sk_common.skc_hash
+ #define sk_prot __sk_common.skc_prot
+ #define sk_net __sk_common.skc_net
+ unsigned char sk_shutdown : 2,
+@@ -1225,6 +1231,8 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
+
+ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+ {
++ skb_orphan(skb);
++ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_rfree;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 875ffbd..9c1f52d 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -568,18 +568,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+ * the value intact in a core dump, and to save the unnecessary
+ * trouble otherwise. Userland only wants this done for a sys_exit.
+ */
+- if (tsk->clear_child_tid
+- && !(tsk->flags & PF_SIGNALED)
+- && atomic_read(&mm->mm_users) > 1) {
+- u32 __user * tidptr = tsk->clear_child_tid;
++ if (tsk->clear_child_tid) {
++ if (!(tsk->flags & PF_SIGNALED) &&
++ atomic_read(&mm->mm_users) > 1) {
++ /*
++ * We don't check the error code - if userspace has
++ * not set up a proper pointer then tough luck.
++ */
++ put_user(0, tsk->clear_child_tid);
++ sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
++ 1, NULL, NULL, 0);
++ }
+ tsk->clear_child_tid = NULL;
+-
+- /*
+- * We don't check the error code - if userspace has
+- * not set up a proper pointer then tough luck.
+- */
+- put_user(0, tidptr);
+- sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
+ }
+ }
+
+diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
+index 052ec4d..d089d05 100644
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -202,6 +202,12 @@ static int no_timer_create(struct k_itimer *new_timer)
+ return -EOPNOTSUPP;
+ }
+
++static int no_nsleep(const clockid_t which_clock, int flags,
++ struct timespec *tsave, struct timespec __user *rmtp)
++{
++ return -EOPNOTSUPP;
++}
++
+ /*
+ * Return nonzero if we know a priori this clockid_t value is bogus.
+ */
+@@ -254,6 +260,7 @@ static __init int init_posix_timers(void)
+ .clock_get = posix_get_monotonic_raw,
+ .clock_set = do_posix_clock_nosettime,
+ .timer_create = no_timer_create,
++ .nsleep = no_nsleep,
+ };
+
+ register_posix_clock(CLOCK_REALTIME, &clock_realtime);
+diff --git a/kernel/smp.c b/kernel/smp.c
+index ad63d85..94188b8 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -57,7 +57,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ return NOTIFY_BAD;
+ break;
+
+-#ifdef CONFIG_CPU_HOTPLUG
++#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 960cbf4..f99b792 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -695,6 +695,7 @@ ring_buffer_free(struct ring_buffer *buffer)
+
+ put_online_cpus();
+
++ kfree(buffer->buffers);
+ free_cpumask_var(buffer->cpumask);
+
+ kfree(buffer);
+@@ -2101,7 +2102,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
+ * the box. Return the padding, and we will release
+ * the current locks, and try again.
+ */
+- rb_advance_reader(cpu_buffer);
+ return event;
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+@@ -2218,6 +2218,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
+ again:
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ event = rb_buffer_peek(buffer, cpu, ts);
++ if (event && event->type == RINGBUF_TYPE_PADDING)
++ rb_advance_reader(cpu_buffer);
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ if (event && event->type == RINGBUF_TYPE_PADDING) {
+@@ -2282,12 +2284,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+ event = rb_buffer_peek(buffer, cpu, ts);
+- if (!event)
+- goto out_unlock;
+-
+- rb_advance_reader(cpu_buffer);
++ if (event)
++ rb_advance_reader(cpu_buffer);
+
+- out_unlock:
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ out:
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index cda81ec..3928aee 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2993,7 +2993,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
+ break;
+ }
+
+- trace_consume(iter);
++ if (ret != TRACE_TYPE_NO_CONSUME)
++ trace_consume(iter);
+ rem -= count;
+ if (!find_next_entry_inc(iter)) {
+ rem = 0;
+@@ -4122,8 +4123,11 @@ static void __ftrace_dump(bool disable_tracing)
+ iter.pos = -1;
+
+ if (find_next_entry_inc(&iter) != NULL) {
+- print_trace_line(&iter);
+- trace_consume(&iter);
++ int ret;
++
++ ret = print_trace_line(&iter);
++ if (ret != TRACE_TYPE_NO_CONSUME)
++ trace_consume(&iter);
+ }
+
+ trace_printk_seq(&iter.seq);
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index d28687e..8ef6a93 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -798,9 +798,16 @@ print_graph_function(struct trace_iterator *iter)
+
+ switch (entry->type) {
+ case TRACE_GRAPH_ENT: {
+- struct ftrace_graph_ent_entry *field;
++ /*
++ * print_graph_entry() may consume the current event,
++ * thus @field may become invalid, so we need to save it.
++ * sizeof(struct ftrace_graph_ent_entry) is very small,
++ * it can be safely saved at the stack.
++ */
++ struct ftrace_graph_ent_entry *field, saved;
+ trace_assign_type(field, entry);
+- return print_graph_entry(field, s, iter);
++ saved = *field;
++ return print_graph_entry(&saved, s, iter);
+ }
+ case TRACE_GRAPH_RET: {
+ struct ftrace_graph_ret_entry *field;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index e83ad2c..2403eb9 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2341,7 +2341,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+ long chg = region_truncate(&inode->i_mapping->private_list, offset);
+
+ spin_lock(&inode->i_lock);
+- inode->i_blocks -= blocks_per_huge_page(h);
++ inode->i_blocks -= (blocks_per_huge_page(h) * freed);
+ spin_unlock(&inode->i_lock);
+
+ hugetlb_put_quota(inode->i_mapping, (chg - freed));
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 480907c..6bf3cc4 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -858,7 +858,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
+ */
+ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ unsigned long count, struct list_head *list,
+- int migratetype)
++ int migratetype, int cold)
+ {
+ int i;
+
+@@ -877,7 +877,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ * merge IO requests if the physical pages are ordered
+ * properly.
+ */
+- list_add(&page->lru, list);
++ if (likely(cold == 0))
++ list_add(&page->lru, list);
++ else
++ list_add_tail(&page->lru, list);
+ set_page_private(page, migratetype);
+ list = &page->lru;
+ }
+@@ -1077,7 +1080,8 @@ again:
+ local_irq_save(flags);
+ if (!pcp->count) {
+ pcp->count = rmqueue_bulk(zone, 0,
+- pcp->batch, &pcp->list, migratetype);
++ pcp->batch, &pcp->list,
++ migratetype, cold);
+ if (unlikely(!pcp->count))
+ goto failed;
+ }
+@@ -1096,7 +1100,8 @@ again:
+ /* Allocate more to the pcp list if necessary */
+ if (unlikely(&page->lru == &pcp->list)) {
+ pcp->count += rmqueue_bulk(zone, 0,
+- pcp->batch, &pcp->list, migratetype);
++ pcp->batch, &pcp->list,
++ migratetype, cold);
+ page = list_entry(pcp->list.next, struct page, lru);
+ }
+
+diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
+index 5f1d210..de56d39 100644
+--- a/net/ax25/ax25_in.c
++++ b/net/ax25/ax25_in.c
+@@ -437,8 +437,7 @@ free:
+ int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev)
+ {
+- skb->sk = NULL; /* Initially we don't know who it's for */
+- skb->destructor = NULL; /* Who initializes this, dammit?! */
++ skb_orphan(skb);
+
+ if (!net_eq(dev_net(dev), &init_net)) {
+ kfree_skb(skb);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index e2e9e4a..e545067 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2284,8 +2284,6 @@ ncls:
+ if (!skb)
+ goto out;
+
+- skb_orphan(skb);
+-
+ type = skb->protocol;
+ list_for_each_entry_rcu(ptype,
+ &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
+@@ -2788,9 +2786,11 @@ static void net_rx_action(struct softirq_action *h)
+ * move the instance around on the list at-will.
+ */
+ if (unlikely(work == weight)) {
+- if (unlikely(napi_disable_pending(n)))
+- __napi_complete(n);
+- else
++ if (unlikely(napi_disable_pending(n))) {
++ local_irq_enable();
++ napi_complete(n);
++ local_irq_disable();
++ } else
+ list_move_tail(&n->poll_list, list);
+ }
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 7dbf3ff..3f5e77e 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -915,13 +915,19 @@ static inline void sock_lock_init(struct sock *sk)
+ af_family_keys + sk->sk_family);
+ }
+
++/*
++ * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
++ * even temporarly, because of RCU lookups. sk_node should also be left as is.
++ */
+ static void sock_copy(struct sock *nsk, const struct sock *osk)
+ {
+ #ifdef CONFIG_SECURITY_NETWORK
+ void *sptr = nsk->sk_security;
+ #endif
+-
+- memcpy(nsk, osk, osk->sk_prot->obj_size);
++ BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
++ sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
++ memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
++ osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
+ #ifdef CONFIG_SECURITY_NETWORK
+ nsk->sk_security = sptr;
+ security_sk_clone(osk, nsk);
+@@ -935,8 +941,23 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
+ struct kmem_cache *slab;
+
+ slab = prot->slab;
+- if (slab != NULL)
+- sk = kmem_cache_alloc(slab, priority);
++ if (slab != NULL) {
++ sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
++ if (!sk)
++ return sk;
++ if (priority & __GFP_ZERO) {
++ /*
++ * caches using SLAB_DESTROY_BY_RCU should let
++ * sk_node.next un-modified. Special care is taken
++ * when initializing object to zero.
++ */
++ if (offsetof(struct sock, sk_node.next) != 0)
++ memset(sk, 0, offsetof(struct sock, sk_node.next));
++ memset(&sk->sk_node.pprev, 0,
++ prot->obj_size - offsetof(struct sock,
++ sk_node.pprev));
++ }
++ }
+ else
+ sk = kmalloc(prot->obj_size, priority);
+
+@@ -1103,6 +1124,11 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+
+ newsk->sk_err = 0;
+ newsk->sk_priority = 0;
++ /*
++ * Before updating sk_refcnt, we must commit prior changes to memory
++ * (Documentation/RCU/rculist_nulls.txt for details)
++ */
++ smp_wmb();
+ atomic_set(&newsk->sk_refcnt, 2);
+
+ /*
+@@ -1794,6 +1820,11 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+
+ sk->sk_stamp = ktime_set(-1L, 0);
+
++ /*
++ * Before updating sk_refcnt, we must commit prior changes to memory
++ * (Documentation/RCU/rculist_nulls.txt for details)
++ */
++ smp_wmb();
+ atomic_set(&sk->sk_refcnt, 1);
+ atomic_set(&sk->sk_drops, 0);
+ }
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index 1a58a6f..0bc7bf5 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -437,6 +437,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+ /* Remove any debris in the socket control block */
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+
++ /* Must drop socket now because of tproxy. */
++ skb_orphan(skb);
++
+ return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL,
+ ip_rcv_finish);
+
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index 8f04bd9..c0cd26a 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -139,6 +139,9 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
+
+ rcu_read_unlock();
+
++ /* Must drop socket now because of tproxy. */
++ skb_orphan(skb);
++
+ return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL,
+ ip6_rcv_finish);
+ err:
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index 3eb5bcc..e0fbcff 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -913,9 +913,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
+ /* Clean up the original one to keep it in listen state */
+ irttp_listen(self->tsap);
+
+- /* Wow ! What is that ? Jean II */
+- skb->sk = NULL;
+- skb->destructor = NULL;
+ kfree_skb(skb);
+ sk->sk_ack_backlog--;
+
+diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
+index 67c99d2..7ba9661 100644
+--- a/net/irda/ircomm/ircomm_lmp.c
++++ b/net/irda/ircomm/ircomm_lmp.c
+@@ -196,6 +196,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
+ /* Don't forget to refcount it - see ircomm_tty_do_softint() */
+ skb_get(skb);
+
++ skb_orphan(skb);
+ skb->destructor = ircomm_lmp_flow_control;
+
+ if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) {
+diff --git a/net/socket.c b/net/socket.c
+index 791d71a..6d47165 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -736,7 +736,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
+ if (more)
+ flags |= MSG_MORE;
+
+- return sock->ops->sendpage(sock, page, offset, size, flags);
++ return kernel_sendpage(sock, page, offset, size, flags);
+ }
+
+ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index bc7f788..f5be6a8 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -118,7 +118,7 @@ static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
+
+ if (!ie1 && !ie2)
+ return 0;
+- if (!ie1)
++ if (!ie1 || !ie2)
+ return -1;
+
+ r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1]));
+@@ -171,6 +171,8 @@ static bool is_mesh(struct cfg80211_bss *a,
+ ie = find_ie(WLAN_EID_MESH_CONFIG,
+ a->information_elements,
+ a->len_information_elements);
++ if (!ie)
++ return false;
+ if (ie[1] != IEEE80211_MESH_CONFIG_LEN)
+ return false;
+
+diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
+index 96036cf..79d78f4 100644
+--- a/net/xfrm/xfrm_algo.c
++++ b/net/xfrm/xfrm_algo.c
+@@ -292,8 +292,8 @@ static struct xfrm_algo_desc ealg_list[] = {
+ }
+ },
+ {
+- .name = "cbc(cast128)",
+- .compat = "cast128",
++ .name = "cbc(cast5)",
++ .compat = "cast5",
+
+ .uinfo = {
+ .encr = {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 18e8dad..1df7692 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -13150,6 +13150,8 @@ static int patch_alc269(struct hda_codec *codec)
+ set_capture_mixer(spec);
+ set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+
++ spec->vmaster_nid = 0x02;
++
+ codec->patch_ops = alc_patch_ops;
+ if (board_config == ALC269_AUTO)
+ spec->init_hook = alc269_auto_init;
Added: dists/sid/linux-2.6/debian/patches/bugfix/all/stable/2.6.30.6.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/all/stable/2.6.30.6.patch Tue Sep 15 12:19:09 2009 (r14233)
@@ -0,0 +1,3878 @@
+diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
+index eb98738..391f637 100644
+--- a/arch/ia64/kernel/pci-dma.c
++++ b/arch/ia64/kernel/pci-dma.c
+@@ -67,11 +67,6 @@ iommu_dma_init(void)
+
+ int iommu_dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
+-
+- if (ops->dma_supported)
+- return ops->dma_supported(dev, mask);
+-
+ /* Copied from i386. Doesn't make much sense, because it will
+ only work for pci_alloc_coherent.
+ The caller just has to use GFP_DMA in this case. */
+diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
+index b178a1e..40b5cb4 100644
+--- a/arch/powerpc/platforms/ps3/time.c
++++ b/arch/powerpc/platforms/ps3/time.c
+@@ -21,6 +21,7 @@
+ #include <linux/kernel.h>
+ #include <linux/platform_device.h>
+
++#include <asm/firmware.h>
+ #include <asm/rtc.h>
+ #include <asm/lv1call.h>
+ #include <asm/ps3.h>
+@@ -84,6 +85,9 @@ static int __init ps3_rtc_init(void)
+ {
+ struct platform_device *pdev;
+
++ if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
++ return -ENODEV;
++
+ pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index a93d1cc..9a9efb0 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -185,6 +185,7 @@ union kvm_mmu_page_role {
+ unsigned access:3;
+ unsigned invalid:1;
+ unsigned cr4_pge:1;
++ unsigned nxe:1;
+ };
+ };
+
+@@ -513,6 +514,8 @@ struct kvm_x86_ops {
+ void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
+ int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
++ void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
++ u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
+ void (*patch_hypercall)(struct kvm_vcpu *vcpu,
+ unsigned char *hypercall_addr);
+ int (*get_irq)(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_x86_emulate.h
+index 6a15973..b7ed2c4 100644
+--- a/arch/x86/include/asm/kvm_x86_emulate.h
++++ b/arch/x86/include/asm/kvm_x86_emulate.h
+@@ -143,6 +143,9 @@ struct decode_cache {
+ struct fetch_cache fetch;
+ };
+
++#define X86_SHADOW_INT_MOV_SS 1
++#define X86_SHADOW_INT_STI 2
++
+ struct x86_emulate_ctxt {
+ /* Register state before/after emulation. */
+ struct kvm_vcpu *vcpu;
+@@ -152,6 +155,9 @@ struct x86_emulate_ctxt {
+ int mode;
+ u32 cs_base;
+
++ /* interruptibility state, as a result of execution of STI or MOV SS */
++ int interruptibility;
++
+ /* decode cache */
+ struct decode_cache decode;
+ };
+diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
+index dbf5445..6ef00ba 100644
+--- a/arch/x86/kernel/apic/ipi.c
++++ b/arch/x86/kernel/apic/ipi.c
+@@ -106,6 +106,9 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
+ unsigned long mask = cpumask_bits(cpumask)[0];
+ unsigned long flags;
+
++ if (WARN_ONCE(!mask, "empty IPI mask"))
++ return;
++
+ local_irq_save(flags);
+ WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
+ __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index d869b3b..61a592e 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
+ #endif
+ #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
+ /* check CPU config space for extended APIC ID */
+- if (c->x86 >= 0xf) {
++ if (cpu_has_apic && c->x86 >= 0xf) {
+ unsigned int val;
+ val = read_pci_config(0, 24, 0, 0x68);
+ if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 32cf11e..d7ce26b 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -490,16 +490,20 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
+ *
+ * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
+ * containing more mappings.
++ *
++ * Returns the number of rmap entries before the spte was added or zero if
++ * the spte was not added.
++ *
+ */
+-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
++static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
+ {
+ struct kvm_mmu_page *sp;
+ struct kvm_rmap_desc *desc;
+ unsigned long *rmapp;
+- int i;
++ int i, count = 0;
+
+ if (!is_rmap_pte(*spte))
+- return;
++ return count;
+ gfn = unalias_gfn(vcpu->kvm, gfn);
+ sp = page_header(__pa(spte));
+ sp->gfns[spte - sp->spt] = gfn;
+@@ -516,8 +520,10 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
+ } else {
+ rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
+ desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
+- while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
++ while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) {
+ desc = desc->more;
++ count += RMAP_EXT;
++ }
+ if (desc->shadow_ptes[RMAP_EXT-1]) {
+ desc->more = mmu_alloc_rmap_desc(vcpu);
+ desc = desc->more;
+@@ -526,6 +532,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
+ ;
+ desc->shadow_ptes[i] = spte;
+ }
++ return count;
+ }
+
+ static void rmap_desc_remove_entry(unsigned long *rmapp,
+@@ -755,6 +762,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
+ return young;
+ }
+
++#define RMAP_RECYCLE_THRESHOLD 1000
++
++static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
++{
++ unsigned long *rmapp;
++
++ gfn = unalias_gfn(vcpu->kvm, gfn);
++ rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
++
++ kvm_unmap_rmapp(vcpu->kvm, rmapp);
++ kvm_flush_remote_tlbs(vcpu->kvm);
++}
++
+ int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+ {
+ return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
+@@ -1417,24 +1437,25 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+ */
+ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
+ {
++ int used_pages;
++
++ used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
++ used_pages = max(0, used_pages);
++
+ /*
+ * If we set the number of mmu pages to be smaller be than the
+ * number of actived pages , we must to free some mmu pages before we
+ * change the value
+ */
+
+- if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
+- kvm_nr_mmu_pages) {
+- int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
+- - kvm->arch.n_free_mmu_pages;
+-
+- while (n_used_mmu_pages > kvm_nr_mmu_pages) {
++ if (used_pages > kvm_nr_mmu_pages) {
++ while (used_pages > kvm_nr_mmu_pages) {
+ struct kvm_mmu_page *page;
+
+ page = container_of(kvm->arch.active_mmu_pages.prev,
+ struct kvm_mmu_page, link);
+ kvm_mmu_zap_page(kvm, page);
+- n_used_mmu_pages--;
++ used_pages--;
+ }
+ kvm->arch.n_free_mmu_pages = 0;
+ }
+@@ -1770,6 +1791,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
+ {
+ int was_rmapped = 0;
+ int was_writeble = is_writeble_pte(*shadow_pte);
++ int rmap_count;
+
+ pgprintk("%s: spte %llx access %x write_fault %d"
+ " user_fault %d gfn %lx\n",
+@@ -1811,9 +1833,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
+
+ page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
+ if (!was_rmapped) {
+- rmap_add(vcpu, shadow_pte, gfn, largepage);
++ rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage);
+ if (!is_rmap_pte(*shadow_pte))
+ kvm_release_pfn_clean(pfn);
++ if (rmap_count > RMAP_RECYCLE_THRESHOLD)
++ rmap_recycle(vcpu, gfn, largepage);
+ } else {
+ if (was_writeble)
+ kvm_release_pfn_dirty(pfn);
+@@ -1942,7 +1966,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
+ vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+ }
+
+-static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
++static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
++{
++ int ret = 0;
++
++ if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
++ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
++ ret = 1;
++ }
++
++ return ret;
++}
++
++static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
+ {
+ int i;
+ gfn_t root_gfn;
+@@ -1957,13 +1993,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
+ ASSERT(!VALID_PAGE(root));
+ if (tdp_enabled)
+ direct = 1;
++ if (mmu_check_root(vcpu, root_gfn))
++ return 1;
+ sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
+ PT64_ROOT_LEVEL, direct,
+ ACC_ALL, NULL);
+ root = __pa(sp->spt);
+ ++sp->root_count;
+ vcpu->arch.mmu.root_hpa = root;
+- return;
++ return 0;
+ }
+ direct = !is_paging(vcpu);
+ if (tdp_enabled)
+@@ -1980,6 +2018,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
+ root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
+ } else if (vcpu->arch.mmu.root_level == 0)
+ root_gfn = 0;
++ if (mmu_check_root(vcpu, root_gfn))
++ return 1;
+ sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
+ PT32_ROOT_LEVEL, direct,
+ ACC_ALL, NULL);
+@@ -1988,6 +2028,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
+ vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
+ }
+ vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
++ return 0;
+ }
+
+ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
+@@ -2006,7 +2047,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
+ for (i = 0; i < 4; ++i) {
+ hpa_t root = vcpu->arch.mmu.pae_root[i];
+
+- if (root) {
++ if (root && VALID_PAGE(root)) {
+ root &= PT64_BASE_ADDR_MASK;
+ sp = page_header(root);
+ mmu_sync_children(vcpu, sp);
+@@ -2290,9 +2331,11 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
+ goto out;
+ spin_lock(&vcpu->kvm->mmu_lock);
+ kvm_mmu_free_some_pages(vcpu);
+- mmu_alloc_roots(vcpu);
++ r = mmu_alloc_roots(vcpu);
+ mmu_sync_roots(vcpu);
+ spin_unlock(&vcpu->kvm->mmu_lock);
++ if (r)
++ goto out;
+ kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
+ kvm_mmu_flush_tlb(vcpu);
+ out:
+@@ -2638,14 +2681,6 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
+
+ static void free_mmu_pages(struct kvm_vcpu *vcpu)
+ {
+- struct kvm_mmu_page *sp;
+-
+- while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
+- sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
+- struct kvm_mmu_page, link);
+- kvm_mmu_zap_page(vcpu->kvm, sp);
+- cond_resched();
+- }
+ free_page((unsigned long)vcpu->arch.mmu.pae_root);
+ }
+
+@@ -2710,7 +2745,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
+ {
+ struct kvm_mmu_page *sp;
+
+- spin_lock(&kvm->mmu_lock);
+ list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
+ int i;
+ u64 *pt;
+@@ -2725,7 +2759,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
+ pt[i] &= ~PT_WRITABLE_MASK;
+ }
+ kvm_flush_remote_tlbs(kvm);
+- spin_unlock(&kvm->mmu_lock);
+ }
+
+ void kvm_mmu_zap_all(struct kvm *kvm)
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 1f8510c..5700009 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -227,6 +227,27 @@ static int is_external_interrupt(u32 info)
+ return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
+ }
+
++static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
++{
++ struct vcpu_svm *svm = to_svm(vcpu);
++ u32 ret = 0;
++
++ if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
++ ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
++ return ret & mask;
++}
++
++static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
++{
++ struct vcpu_svm *svm = to_svm(vcpu);
++
++ if (mask == 0)
++ svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
++ else
++ svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
++
++}
++
+ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+@@ -240,7 +261,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ __func__, kvm_rip_read(vcpu), svm->next_rip);
+
+ kvm_rip_write(vcpu, svm->next_rip);
+- svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
++ svm_set_interrupt_shadow(vcpu, 0);
+
+ vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK);
+ }
+@@ -715,6 +736,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ svm->vmcb->control.tsc_offset += delta;
+ vcpu->cpu = cpu;
+ kvm_migrate_timers(vcpu);
++ svm->asid_generation = 0;
+ }
+
+ for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
+@@ -1025,7 +1047,6 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
+ svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
+ }
+
+- svm->vcpu.cpu = svm_data->cpu;
+ svm->asid_generation = svm_data->asid_generation;
+ svm->vmcb->control.asid = svm_data->next_asid++;
+ }
+@@ -2237,8 +2258,8 @@ static void pre_svm_run(struct vcpu_svm *svm)
+ struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
+
+ svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+- if (svm->vcpu.cpu != cpu ||
+- svm->asid_generation != svm_data->asid_generation)
++ /* FIXME: handle wraparound of asid_generation */
++ if (svm->asid_generation != svm_data->asid_generation)
+ new_asid(svm, svm_data);
+ }
+
+@@ -2667,6 +2688,8 @@ static struct kvm_x86_ops svm_x86_ops = {
+ .run = svm_vcpu_run,
+ .handle_exit = handle_exit,
+ .skip_emulated_instruction = skip_emulated_instruction,
++ .set_interrupt_shadow = svm_set_interrupt_shadow,
++ .get_interrupt_shadow = svm_get_interrupt_shadow,
+ .patch_hypercall = svm_patch_hypercall,
+ .get_irq = svm_get_irq,
+ .set_irq = svm_set_irq,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index fa0adcd..1a0d5cd 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -732,23 +732,45 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+ vmcs_writel(GUEST_RFLAGS, rflags);
+ }
+
++static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
++{
++ u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
++ int ret = 0;
++
++ if (interruptibility & GUEST_INTR_STATE_STI)
++ ret |= X86_SHADOW_INT_STI;
++ if (interruptibility & GUEST_INTR_STATE_MOV_SS)
++ ret |= X86_SHADOW_INT_MOV_SS;
++
++ return ret & mask;
++}
++
++static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
++{
++ u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
++ u32 interruptibility = interruptibility_old;
++
++ interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
++
++ if (mask & X86_SHADOW_INT_MOV_SS)
++ interruptibility |= GUEST_INTR_STATE_MOV_SS;
++ if (mask & X86_SHADOW_INT_STI)
++ interruptibility |= GUEST_INTR_STATE_STI;
++
++ if ((interruptibility != interruptibility_old))
++ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
++}
++
+ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ {
+ unsigned long rip;
+- u32 interruptibility;
+
+ rip = kvm_rip_read(vcpu);
+ rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ kvm_rip_write(vcpu, rip);
+
+- /*
+- * We emulated an instruction, so temporary interrupt blocking
+- * should be removed, if set.
+- */
+- interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+- if (interruptibility & 3)
+- vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
+- interruptibility & ~3);
++ /* skipping an emulated instruction also counts */
++ vmx_set_interrupt_shadow(vcpu, 0);
+ vcpu->arch.interrupt_window_open = 1;
+ }
+
+@@ -3738,6 +3760,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
+ .run = vmx_vcpu_run,
+ .handle_exit = kvm_handle_exit,
+ .skip_emulated_instruction = skip_emulated_instruction,
++ .set_interrupt_shadow = vmx_set_interrupt_shadow,
++ .get_interrupt_shadow = vmx_get_interrupt_shadow,
+ .patch_hypercall = vmx_patch_hypercall,
+ .get_irq = vmx_get_irq,
+ .set_irq = vmx_inject_irq,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index ee4714b..0b1bfc6 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -523,6 +523,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+ efer |= vcpu->arch.shadow_efer & EFER_LMA;
+
+ vcpu->arch.shadow_efer = efer;
++
++ vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
++ kvm_mmu_reset_context(vcpu);
+ }
+
+ void kvm_enable_efer_bits(u64 mask)
+@@ -703,11 +706,48 @@ static bool msr_mtrr_valid(unsigned msr)
+ return false;
+ }
+
++static bool valid_pat_type(unsigned t)
++{
++ return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
++}
++
++static bool valid_mtrr_type(unsigned t)
++{
++ return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
++}
++
++static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
++{
++ int i;
++
++ if (!msr_mtrr_valid(msr))
++ return false;
++
++ if (msr == MSR_IA32_CR_PAT) {
++ for (i = 0; i < 8; i++)
++ if (!valid_pat_type((data >> (i * 8)) & 0xff))
++ return false;
++ return true;
++ } else if (msr == MSR_MTRRdefType) {
++ if (data & ~0xcff)
++ return false;
++ return valid_mtrr_type(data & 0xff);
++ } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
++ for (i = 0; i < 8 ; i++)
++ if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
++ return false;
++ return true;
++ }
++
++ /* variable MTRRs */
++ return valid_mtrr_type(data & 0xff);
++}
++
+ static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ {
+ u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
+
+- if (!msr_mtrr_valid(msr))
++ if (!mtrr_valid(vcpu, msr, data))
+ return 1;
+
+ if (msr == MSR_MTRRdefType) {
+@@ -895,6 +935,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+ case MSR_IA32_LASTINTFROMIP:
+ case MSR_IA32_LASTINTTOIP:
+ case MSR_VM_HSAVE_PA:
++ case MSR_P6_EVNTSEL0:
++ case MSR_P6_EVNTSEL1:
++ case MSR_K7_EVNTSEL0:
+ data = 0;
+ break;
+ case MSR_MTRRcap:
+@@ -1074,14 +1117,13 @@ long kvm_arch_dev_ioctl(struct file *filp,
+ if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
+ goto out;
+ r = -E2BIG;
+- if (n < num_msrs_to_save)
++ if (n < msr_list.nmsrs)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(user_msr_list->indices, &msrs_to_save,
+ num_msrs_to_save * sizeof(u32)))
+ goto out;
+- if (copy_to_user(user_msr_list->indices
+- + num_msrs_to_save * sizeof(u32),
++ if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
+ &emulated_msrs,
+ ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
+ goto out;
+@@ -1250,9 +1292,12 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
+ bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
+ bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
++ bit(X86_FEATURE_MCE) |
+ bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
+- bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
+- bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
++ bit(X86_FEATURE_SEP) | bit(X86_FEATURE_MTRR) |
++ bit(X86_FEATURE_PGE) | bit(X86_FEATURE_MCA) |
++ bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PAT) |
++ bit(X86_FEATURE_PSE36) |
+ bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
+ bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
+ bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
+@@ -1608,10 +1653,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
+ return -EINVAL;
+
+ down_write(&kvm->slots_lock);
++ spin_lock(&kvm->mmu_lock);
+
+ kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
+ kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
+
++ spin_unlock(&kvm->mmu_lock);
+ up_write(&kvm->slots_lock);
+ return 0;
+ }
+@@ -1787,7 +1834,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+
+ /* If nothing is dirty, don't bother messing with page tables. */
+ if (is_dirty) {
++ spin_lock(&kvm->mmu_lock);
+ kvm_mmu_slot_remove_write_access(kvm, log->slot);
++ spin_unlock(&kvm->mmu_lock);
+ kvm_flush_remote_tlbs(kvm);
+ memslot = &kvm->memslots[log->slot];
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+@@ -2362,7 +2411,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
+ u16 error_code,
+ int emulation_type)
+ {
+- int r;
++ int r, shadow_mask;
+ struct decode_cache *c;
+
+ kvm_clear_exception_queue(vcpu);
+@@ -2411,6 +2460,10 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
+ }
+
+ r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
++ shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
++
++ if (r == 0)
++ kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
+
+ if (vcpu->arch.pio.string)
+ return EMULATE_DO_MMIO;
+@@ -4419,12 +4472,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
+ }
+ }
+
++ spin_lock(&kvm->mmu_lock);
+ if (!kvm->arch.n_requested_mmu_pages) {
+ unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
+ kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+ }
+
+ kvm_mmu_slot_remove_write_access(kvm, mem->slot);
++ spin_unlock(&kvm->mmu_lock);
+ kvm_flush_remote_tlbs(kvm);
+
+ return 0;
+@@ -4433,6 +4488,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
+ void kvm_arch_flush_shadow(struct kvm *kvm)
+ {
+ kvm_mmu_zap_all(kvm);
++ kvm_reload_remote_mmus(kvm);
+ }
+
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
+index ca91749..d80126f 100644
+--- a/arch/x86/kvm/x86_emulate.c
++++ b/arch/x86/kvm/x86_emulate.c
+@@ -1349,6 +1349,20 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
+ return 0;
+ }
+
++void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
++{
++ u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask);
++ /*
++ * an sti; sti; sequence only disable interrupts for the first
++ * instruction. So, if the last instruction, be it emulated or
++ * not, left the system with the INT_STI flag enabled, it
++ * means that the last instruction is an sti. We should not
++ * leave the flag on in this case. The same goes for mov ss
++ */
++ if (!(int_shadow & mask))
++ ctxt->interruptibility = mask;
++}
++
+ int
+ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+ {
+@@ -1360,6 +1374,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+ int io_dir_in;
+ int rc = 0;
+
++ ctxt->interruptibility = 0;
++
+ /* Shadow copy of register state. Committed on successful emulation.
+ * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
+ * modify them.
+@@ -1609,6 +1625,9 @@ special_insn:
+ int err;
+
+ sel = c->src.val;
++ if (c->modrm_reg == VCPU_SREG_SS)
++ toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
++
+ if (c->modrm_reg <= 5) {
+ type_bits = (c->modrm_reg == 1) ? 9 : 1;
+ err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
+@@ -1865,6 +1884,7 @@ special_insn:
+ c->dst.type = OP_NONE; /* Disable writeback. */
+ break;
+ case 0xfb: /* sti */
++ toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
+ ctxt->eflags |= X86_EFLAGS_IF;
+ c->dst.type = OP_NONE; /* Disable writeback. */
+ break;
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 821e970..c814e14 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -183,18 +183,17 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
+
+ f->flush_mm = mm;
+ f->flush_va = va;
+- cpumask_andnot(to_cpumask(f->flush_cpumask),
+- cpumask, cpumask_of(smp_processor_id()));
+-
+- /*
+- * We have to send the IPI only to
+- * CPUs affected.
+- */
+- apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
+- INVALIDATE_TLB_VECTOR_START + sender);
++ if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
++ /*
++ * We have to send the IPI only to
++ * CPUs affected.
++ */
++ apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
++ INVALIDATE_TLB_VECTOR_START + sender);
+
+- while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
+- cpu_relax();
++ while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
++ cpu_relax();
++ }
+
+ f->flush_mm = NULL;
+ f->flush_va = 0;
+diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
+index 39838c6..31adda1 100644
+--- a/drivers/acpi/processor_thermal.c
++++ b/drivers/acpi/processor_thermal.c
+@@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
+ if (pr->limit.thermal.tx > tx)
+ tx = pr->limit.thermal.tx;
+
+- result = acpi_processor_set_throttling(pr, tx);
++ result = acpi_processor_set_throttling(pr, tx, false);
+ if (result)
+ goto end;
+ }
+@@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
+
+ if (state <= max_pstate) {
+ if (pr->flags.throttling && pr->throttling.state)
+- result = acpi_processor_set_throttling(pr, 0);
++ result = acpi_processor_set_throttling(pr, 0, false);
+ cpufreq_set_cur_state(pr->id, state);
+ } else {
+ cpufreq_set_cur_state(pr->id, max_pstate);
+ result = acpi_processor_set_throttling(pr,
+- state - max_pstate);
++ state - max_pstate, false);
+ }
+ return result;
+ }
+diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
+index 2275437..841be4e 100644
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -62,7 +62,8 @@ struct throttling_tstate {
+ #define THROTTLING_POSTCHANGE (2)
+
+ static int acpi_processor_get_throttling(struct acpi_processor *pr);
+-int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
++int acpi_processor_set_throttling(struct acpi_processor *pr,
++ int state, bool force);
+
+ static int acpi_processor_update_tsd_coord(void)
+ {
+@@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
+ */
+ target_state = throttling_limit;
+ }
+- return acpi_processor_set_throttling(pr, target_state);
++ return acpi_processor_set_throttling(pr, target_state, false);
+ }
+
+ /*
+@@ -842,7 +843,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
+ ACPI_WARNING((AE_INFO,
+ "Invalid throttling state, reset"));
+ state = 0;
+- ret = acpi_processor_set_throttling(pr, state);
++ ret = acpi_processor_set_throttling(pr, state, true);
+ if (ret)
+ return ret;
+ }
+@@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
+ }
+
+ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
+- int state)
++ int state, bool force)
+ {
+ u32 value = 0;
+ u32 duty_mask = 0;
+@@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+- if (state == pr->throttling.state)
++ if (!force && (state == pr->throttling.state))
+ return 0;
+
+ if (state < pr->throttling_platform_limit)
+@@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
+ }
+
+ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
+- int state)
++ int state, bool force)
+ {
+ int ret;
+ acpi_integer value;
+@@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+- if (state == pr->throttling.state)
++ if (!force && (state == pr->throttling.state))
+ return 0;
+
+ if (state < pr->throttling_platform_limit)
+@@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
+ return 0;
+ }
+
+-int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
++int acpi_processor_set_throttling(struct acpi_processor *pr,
++ int state, bool force)
+ {
+ cpumask_var_t saved_mask;
+ int ret = 0;
+@@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+ ret = p_throttling->acpi_processor_set_throttling(pr,
+- t_state.target_state);
++ t_state.target_state, force);
+ } else {
+ /*
+ * When the T-state coordination is SW_ALL or HW_ALL,
+@@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
+ set_cpus_allowed_ptr(current, cpumask_of(i));
+ ret = match_pr->throttling.
+ acpi_processor_set_throttling(
+- match_pr, t_state.target_state);
++ match_pr, t_state.target_state, force);
+ }
+ }
+ /*
+@@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Disabling throttling (was T%d)\n",
+ pr->throttling.state));
+- result = acpi_processor_set_throttling(pr, 0);
++ result = acpi_processor_set_throttling(pr, 0, false);
+ if (result)
+ goto end;
+ }
+@@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
+ if (strcmp(tmpbuf, charp) != 0)
+ return -EINVAL;
+
+- result = acpi_processor_set_throttling(pr, state_val);
++ result = acpi_processor_set_throttling(pr, state_val, false);
+ if (result)
+ return result;
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index c924230..7b2f499 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4271,6 +4271,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
+ { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
+
++ /* this one allows HPA unlocking but fails IOs on the area */
++ { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
++
+ /* Devices which report 1 sector over size HPA */
+ { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
+ { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
+diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
+index 19e0bc6..504f849 100644
+--- a/drivers/media/video/gspca/ov534.c
++++ b/drivers/media/video/gspca/ov534.c
+@@ -832,9 +832,11 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, struct gspca_frame *frame,
+ __u32 this_pts;
+ u16 this_fid;
+ int remaining_len = len;
++ int payload_len;
+
++ payload_len = (sd->sensor == SENSOR_OV772X) ? 2048 : 2040;
+ do {
+- len = min(remaining_len, 2040); /*fixme: was 2048*/
++ len = min(remaining_len, payload_len);
+
+ /* Payloads are prefixed with a UVC-style header. We
+ consider a frame to start when the FID toggles, or the PTS
+diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
+index 16a4138..4567e90 100644
+--- a/drivers/net/ehea/ehea.h
++++ b/drivers/net/ehea/ehea.h
+@@ -40,7 +40,7 @@
+ #include <asm/io.h>
+
+ #define DRV_NAME "ehea"
+-#define DRV_VERSION "EHEA_0101"
++#define DRV_VERSION "EHEA_0102"
+
+ /* eHEA capability flags */
+ #define DLPAR_PORT_ADD_REM 1
+diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
+index 0a7a288..9bc4775 100644
+--- a/drivers/net/ehea/ehea_main.c
++++ b/drivers/net/ehea/ehea_main.c
+@@ -1545,6 +1545,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
+ {
+ int ret, i;
+
++ if (pr->qp)
++ netif_napi_del(&pr->napi);
++
+ ret = ehea_destroy_qp(pr->qp);
+
+ if (!ret) {
+diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ar9170/main.c
+index 5996ff9..f839c83 100644
+--- a/drivers/net/wireless/ar9170/main.c
++++ b/drivers/net/wireless/ar9170/main.c
+@@ -1486,13 +1486,14 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ int ret;
+
+ mutex_lock(&ar->mutex);
+- if ((param) && !(queue > ar->hw->queues)) {
++ if (queue < __AR9170_NUM_TXQ) {
+ memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
+ param, sizeof(*param));
+
+ ret = ar9170_set_qos(ar);
+- } else
++ } else {
+ ret = -EINVAL;
++ }
+
+ mutex_unlock(&ar->mutex);
+ return ret;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+index 2ad9faf..fc3a95f 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
++++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+@@ -53,22 +53,31 @@ static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
+ switch (state) {
+ case RFKILL_STATE_UNBLOCKED:
+ if (iwl_is_rfkill_hw(priv)) {
++ /* pass error to rfkill core, make it state HARD
++ * BLOCKED (rfkill->mutex taken) and disable
++ * software kill switch */
+ err = -EBUSY;
+- goto out_unlock;
++ priv->rfkill->state = RFKILL_STATE_HARD_BLOCKED;
+ }
+ iwl_radio_kill_sw_enable_radio(priv);
+ break;
+ case RFKILL_STATE_SOFT_BLOCKED:
+ iwl_radio_kill_sw_disable_radio(priv);
++ /* rfkill->mutex is taken */
++ if (priv->rfkill->state == RFKILL_STATE_HARD_BLOCKED) {
++ /* force rfkill core state to be SOFT BLOCKED,
++ * otherwise core will be unable to disable software
++ * kill switch */
++ priv->rfkill->state = RFKILL_STATE_SOFT_BLOCKED;
++ }
+ break;
+ default:
+ IWL_WARN(priv, "we received unexpected RFKILL state %d\n",
+ state);
+ break;
+ }
+-out_unlock:
+- mutex_unlock(&priv->mutex);
+
++ mutex_unlock(&priv->mutex);
+ return err;
+ }
+
+@@ -132,14 +141,11 @@ void iwl_rfkill_set_hw_state(struct iwl_priv *priv)
+ if (!priv->rfkill)
+ return;
+
+- if (iwl_is_rfkill_hw(priv)) {
++ if (iwl_is_rfkill_sw(priv))
++ rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
++ else if (iwl_is_rfkill_hw(priv))
+ rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
+- return;
+- }
+-
+- if (!iwl_is_rfkill_sw(priv))
+- rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
+ else
+- rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
++ rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
+ }
+ EXPORT_SYMBOL(iwl_rfkill_set_hw_state);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
+index 44ab03a..da2e2d4 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
+@@ -560,6 +560,8 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
++ IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
++ keyconf->keyidx);
+
+ if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
+ IWL_ERR(priv, "index %d not used in uCode key table.\n",
+@@ -567,6 +569,11 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
+
+ priv->default_wep_key--;
+ memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
++ if (iwl_is_rfkill(priv)) {
++ IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
++ spin_unlock_irqrestore(&priv->sta_lock, flags);
++ return 0;
++ }
+ ret = iwl_send_static_wepkey_cmd(priv, 1);
+ IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
+ keyconf->keyidx, ret);
+@@ -847,6 +854,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
++ if (iwl_is_rfkill(priv)) {
++ IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
++ spin_unlock_irqrestore(&priv->sta_lock, flags);
++ return 0;
++ }
+ ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return ret;
+diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
+index 84bd6f1..c242b54 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00.h
++++ b/drivers/net/wireless/rt2x00/rt2x00.h
+@@ -814,13 +814,15 @@ struct rt2x00_dev {
+ static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word, u32 *data)
+ {
+- *data = rt2x00dev->rf[word];
++ BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
++ *data = rt2x00dev->rf[word - 1];
+ }
+
+ static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word, u32 data)
+ {
+- rt2x00dev->rf[word] = data;
++ BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
++ rt2x00dev->rf[word - 1] = data;
+ }
+
+ /*
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 043b208..f215a59 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
+ acpi_status status;
+ struct acpi_object_list input;
+ union acpi_object params[3];
+- char method[4] = "WM";
++ char method[5] = "WM";
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_ERROR;
+@@ -328,8 +328,8 @@ struct acpi_buffer *out)
+ acpi_status status, wc_status = AE_ERROR;
+ struct acpi_object_list input, wc_input;
+ union acpi_object wc_params[1], wq_params[1];
+- char method[4];
+- char wc_method[4] = "WC";
++ char method[5];
++ char wc_method[5] = "WC";
+
+ if (!guid_string || !out)
+ return AE_BAD_PARAMETER;
+@@ -410,7 +410,7 @@ const struct acpi_buffer *in)
+ acpi_handle handle;
+ struct acpi_object_list input;
+ union acpi_object params[2];
+- char method[4] = "WS";
++ char method[5] = "WS";
+
+ if (!guid_string || !in)
+ return AE_BAD_DATA;
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index f3da592..35a1386 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -119,6 +119,64 @@ _base_fault_reset_work(struct work_struct *work)
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ }
+
++/**
++ * mpt2sas_base_start_watchdog - start the fault_reset_work_q
++ * @ioc: pointer to scsi command object
++ * Context: sleep.
++ *
++ * Return nothing.
++ */
++void
++mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
++{
++ unsigned long flags;
++
++ if (ioc->fault_reset_work_q)
++ return;
++
++ /* initialize fault polling */
++ INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
++ snprintf(ioc->fault_reset_work_q_name,
++ sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
++ ioc->fault_reset_work_q =
++ create_singlethread_workqueue(ioc->fault_reset_work_q_name);
++ if (!ioc->fault_reset_work_q) {
++ printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
++ ioc->name, __func__, __LINE__);
++ return;
++ }
++ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
++ if (ioc->fault_reset_work_q)
++ queue_delayed_work(ioc->fault_reset_work_q,
++ &ioc->fault_reset_work,
++ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
++ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
++}
++
++/**
++ * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
++ * @ioc: pointer to scsi command object
++ * Context: sleep.
++ *
++ * Return nothing.
++ */
++void
++mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
++{
++ unsigned long flags;
++ struct workqueue_struct *wq;
++
++ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
++ wq = ioc->fault_reset_work_q;
++ ioc->fault_reset_work_q = NULL;
++ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
++ if (wq) {
++ if (!cancel_delayed_work(&ioc->fault_reset_work))
++ flush_workqueue(wq);
++ destroy_workqueue(wq);
++ }
++}
++
+ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ /**
+ * _base_sas_ioc_info - verbose translation of the ioc status
+@@ -440,6 +498,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
+ if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
+ return;
+
++ /* each nexus loss loginfo */
++ if (log_info == 0x31170000)
++ return;
++
+ /* eat the loginfos associated with task aborts */
+ if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
+ 0x31140000 || log_info == 0x31130000))
+@@ -1109,7 +1171,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
+ }
+ }
+
+- pci_set_drvdata(pdev, ioc->shost);
+ _base_mask_interrupts(ioc);
+ r = _base_enable_msix(ioc);
+ if (r)
+@@ -1132,7 +1193,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
+ ioc->pci_irq = -1;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_device(pdev);
+- pci_set_drvdata(pdev, NULL);
+ return r;
+ }
+
+@@ -3191,7 +3251,6 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_device(pdev);
+- pci_set_drvdata(pdev, NULL);
+ return;
+ }
+
+@@ -3205,7 +3264,6 @@ int
+ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ {
+ int r, i;
+- unsigned long flags;
+
+ dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
+ __func__));
+@@ -3214,6 +3272,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ if (r)
+ return r;
+
++ pci_set_drvdata(ioc->pdev, ioc->shost);
+ r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ if (r)
+ goto out_free_resources;
+@@ -3288,23 +3347,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ if (r)
+ goto out_free_resources;
+
+- /* initialize fault polling */
+- INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
+- snprintf(ioc->fault_reset_work_q_name,
+- sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
+- ioc->fault_reset_work_q =
+- create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+- if (!ioc->fault_reset_work_q) {
+- printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
+- ioc->name, __func__, __LINE__);
+- goto out_free_resources;
+- }
+- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+- if (ioc->fault_reset_work_q)
+- queue_delayed_work(ioc->fault_reset_work_q,
+- &ioc->fault_reset_work,
+- msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
++ mpt2sas_base_start_watchdog(ioc);
+ return 0;
+
+ out_free_resources:
+@@ -3312,6 +3355,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ ioc->remove_host = 1;
+ mpt2sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
++ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+@@ -3337,22 +3381,14 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ void
+ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
+ {
+- unsigned long flags;
+- struct workqueue_struct *wq;
+
+ dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
+ __func__));
+
+- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+- wq = ioc->fault_reset_work_q;
+- ioc->fault_reset_work_q = NULL;
+- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+- if (!cancel_delayed_work(&ioc->fault_reset_work))
+- flush_workqueue(wq);
+- destroy_workqueue(wq);
+-
++ mpt2sas_base_stop_watchdog(ioc);
+ mpt2sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
++ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->pfacts);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->base_cmds.reply);
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
+index 36b1d10..1dd7c9a 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
+@@ -672,6 +672,8 @@ typedef void (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID,
+
+ /* base shared API */
+ extern struct list_head mpt2sas_ioc_list;
++void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc);
++void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc);
+
+ int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc);
+ void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc);
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
+index 58cfb97..6ddee16 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
+@@ -236,17 +236,25 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ Mpi2ConfigRequest_t *config_request;
+ int r;
+ u8 retry_count;
+- u8 issue_reset;
++ u8 issue_host_reset = 0;
+ u16 wait_state_count;
+
++ mutex_lock(&ioc->config_cmds.mutex);
+ if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n",
+ ioc->name, __func__);
++ mutex_unlock(&ioc->config_cmds.mutex);
+ return -EAGAIN;
+ }
+ retry_count = 0;
+
+ retry_config:
++ if (retry_count) {
++ if (retry_count > 2) /* attempt only 2 retries */
++ return -EFAULT;
++ printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n",
++ ioc->name, __func__, retry_count);
++ }
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+@@ -254,8 +262,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+- ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+- return -EFAULT;
++ r = -EFAULT;
++ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+@@ -271,8 +279,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+- ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+- return -EAGAIN;
++ r = -EAGAIN;
++ goto out;
+ }
+
+ r = 0;
+@@ -292,9 +300,15 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
+- if (!(ioc->config_cmds.status & MPT2_CMD_RESET))
+- issue_reset = 1;
+- goto issue_host_reset;
++ retry_count++;
++ if (ioc->config_cmds.smid == smid)
++ mpt2sas_base_free_smid(ioc, smid);
++ if ((ioc->shost_recovery) ||
++ (ioc->config_cmds.status & MPT2_CMD_RESET))
++ goto retry_config;
++ issue_host_reset = 1;
++ r = -EFAULT;
++ goto out;
+ }
+ if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->config_cmds.reply,
+@@ -302,21 +316,13 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ if (retry_count)
+ printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n",
+ ioc->name, __func__);
++out:
+ ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+- return r;
+-
+- issue_host_reset:
+- if (issue_reset)
++ mutex_unlock(&ioc->config_cmds.mutex);
++ if (issue_host_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+- ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+- if (!retry_count) {
+- printk(MPT2SAS_INFO_FMT "%s: attempting retry\n",
+- ioc->name, __func__);
+- retry_count++;
+- goto retry_config;
+- }
+- return -EFAULT;
++ return r;
+ }
+
+ /**
+@@ -375,7 +381,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -417,7 +422,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -438,7 +442,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2BiosPage2_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -480,7 +483,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -501,7 +503,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2BiosPage3_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -543,7 +544,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -564,7 +564,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -606,7 +605,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -627,7 +625,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -669,7 +666,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -690,7 +686,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+@@ -732,7 +727,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -753,7 +747,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2IOCPage8_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -795,7 +788,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -818,7 +810,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -863,7 +854,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -886,7 +876,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -931,7 +920,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -953,7 +941,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t config_page;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+@@ -1002,7 +989,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1026,8 +1012,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+ struct config_request mem;
+-
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sz);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1070,7 +1054,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1095,7 +1078,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sz);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1138,7 +1120,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1161,7 +1142,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1206,7 +1186,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1230,7 +1209,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1277,7 +1255,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1300,7 +1277,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1345,7 +1321,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1367,7 +1342,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1413,7 +1387,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1435,7 +1408,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1481,7 +1453,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1505,7 +1476,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1548,7 +1518,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1572,7 +1541,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ struct config_request mem;
+ u16 ioc_status;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ *num_pds = 0;
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1620,7 +1588,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1645,7 +1612,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ memset(config_page, 0, sz);
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1687,7 +1653,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1711,7 +1676,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1754,7 +1718,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1778,7 +1741,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
+ struct config_request mem;
+ u16 ioc_status;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ *volume_handle = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1842,7 +1804,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index e3a7967..7dacc68 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -2560,6 +2560,10 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ char *desc_ioc_state = NULL;
+ char *desc_scsi_status = NULL;
+ char *desc_scsi_state = ioc->tmp_string;
++ u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
++
++ if (log_info == 0x31170000)
++ return;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_SUCCESS:
+@@ -3205,7 +3209,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+ __le64 sas_address;
+ int i;
+ unsigned long flags;
+- struct _sas_port *mpt2sas_port;
++ struct _sas_port *mpt2sas_port = NULL;
+ int rc = 0;
+
+ if (!handle)
+@@ -3297,12 +3301,20 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+ &expander_pg1, i, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+- continue;
++ rc = -1;
++ goto out_fail;
+ }
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+- mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i],
+- expander_pg1, sas_expander->parent_dev);
++
++ if ((mpt2sas_transport_add_expander_phy(ioc,
++ &sas_expander->phy[i], expander_pg1,
++ sas_expander->parent_dev))) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ rc = -1;
++ goto out_fail;
++ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+@@ -3319,8 +3331,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+
+ out_fail:
+
+- if (sas_expander)
+- kfree(sas_expander->phy);
++ if (mpt2sas_port)
++ mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
++ sas_expander->parent_handle);
+ kfree(sas_expander);
+ return rc;
+ }
+@@ -3442,12 +3455,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
+ sas_device->hidden_raid_component = is_pd;
+
+ /* get enclosure_logical_id */
+- if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0,
+- MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+- sas_device->enclosure_handle))) {
++ if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0(
++ ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
++ sas_device->enclosure_handle)))
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+- }
+
+ /* get device name */
+ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+@@ -4029,12 +4041,6 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
+ u16 handle = le16_to_cpu(element->VolDevHandle);
+ int rc;
+
+-#if 0 /* RAID_HACKS */
+- if (le32_to_cpu(event_data->Flags) &
+- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+- return;
+-#endif
+-
+ mpt2sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ printk(MPT2SAS_ERR_FMT
+@@ -4089,12 +4095,6 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc,
+ unsigned long flags;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+
+-#if 0 /* RAID_HACKS */
+- if (le32_to_cpu(event_data->Flags) &
+- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+- return;
+-#endif
+-
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+@@ -4207,14 +4207,38 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
++ Mpi2ConfigReply_t mpi_reply;
++ Mpi2SasDevicePage0_t sas_device_pg0;
++ u32 ioc_status;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+- if (sas_device)
++ if (sas_device) {
+ sas_device->hidden_raid_component = 1;
+- else
+- _scsih_add_device(ioc, handle, 0, 1);
++ return;
++ }
++
++ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
++ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ return;
++ }
++
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ return;
++ }
++
++ _scsih_link_change(ioc,
++ le16_to_cpu(sas_device_pg0.ParentDevHandle),
++ handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
++
++ _scsih_add_device(ioc, handle, 0, 1);
+ }
+
+ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+@@ -4314,12 +4338,15 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
+ {
+ Mpi2EventIrConfigElement_t *element;
+ int i;
++ u8 foreign_config;
+
+ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_config_change_event_debug(ioc, event_data);
+
+ #endif
++ foreign_config = (le32_to_cpu(event_data->Flags) &
++ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+@@ -4327,11 +4354,13 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+- _scsih_sas_volume_add(ioc, element);
++ if (!foreign_config)
++ _scsih_sas_volume_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+- _scsih_sas_volume_delete(ioc, element);
++ if (!foreign_config)
++ _scsih_sas_volume_delete(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ _scsih_sas_pd_hide(ioc, element);
+@@ -4450,6 +4479,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
+ u32 state;
+ struct _sas_device *sas_device;
+ unsigned long flags;
++ Mpi2ConfigReply_t mpi_reply;
++ Mpi2SasDevicePage0_t sas_device_pg0;
++ u32 ioc_status;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
+ return;
+@@ -4466,22 +4498,40 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ switch (state) {
+-#if 0
+- case MPI2_RAID_PD_STATE_OFFLINE:
+- if (sas_device)
+- _scsih_remove_device(ioc, handle);
+- break;
+-#endif
+ case MPI2_RAID_PD_STATE_ONLINE:
+ case MPI2_RAID_PD_STATE_DEGRADED:
+ case MPI2_RAID_PD_STATE_REBUILDING:
+ case MPI2_RAID_PD_STATE_OPTIMAL:
+- if (sas_device)
++ if (sas_device) {
+ sas_device->hidden_raid_component = 1;
+- else
+- _scsih_add_device(ioc, handle, 0, 1);
++ return;
++ }
++
++ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
++ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
++ handle))) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ return;
++ }
++
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ return;
++ }
++
++ _scsih_link_change(ioc,
++ le16_to_cpu(sas_device_pg0.ParentDevHandle),
++ handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
++
++ _scsih_add_device(ioc, handle, 0, 1);
++
+ break;
+
++ case MPI2_RAID_PD_STATE_OFFLINE:
+ case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
+ case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
+ case MPI2_RAID_PD_STATE_HOT_SPARE:
+@@ -5549,6 +5599,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 device_state;
+
++ mpt2sas_base_stop_watchdog(ioc);
+ flush_scheduled_work();
+ scsi_block_requests(shost);
+ device_state = pci_choose_state(pdev, state);
+@@ -5591,6 +5642,7 @@ scsih_resume(struct pci_dev *pdev)
+
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+ scsi_unblock_requests(shost);
++ mpt2sas_base_start_watchdog(ioc);
+ return 0;
+ }
+ #endif /* CONFIG_PM */
+diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
+index e7d4479..798f362 100644
+--- a/drivers/usb/core/hcd.h
++++ b/drivers/usb/core/hcd.h
+@@ -224,6 +224,10 @@ struct hc_driver {
+ void (*relinquish_port)(struct usb_hcd *, int);
+ /* has a port been handed over to a companion? */
+ int (*port_handed_over)(struct usb_hcd *, int);
++
++ /* CLEAR_TT_BUFFER completion callback */
++ void (*clear_tt_buffer_complete)(struct usb_hcd *,
++ struct usb_host_endpoint *);
+ };
+
+ extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index be86ae3..2fc5b57 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -448,10 +448,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
+ * talking to TTs must queue control transfers (not just bulk and iso), so
+ * both can talk to the same hub concurrently.
+ */
+-static void hub_tt_kevent (struct work_struct *work)
++static void hub_tt_work(struct work_struct *work)
+ {
+ struct usb_hub *hub =
+- container_of(work, struct usb_hub, tt.kevent);
++ container_of(work, struct usb_hub, tt.clear_work);
+ unsigned long flags;
+ int limit = 100;
+
+@@ -460,6 +460,7 @@ static void hub_tt_kevent (struct work_struct *work)
+ struct list_head *temp;
+ struct usb_tt_clear *clear;
+ struct usb_device *hdev = hub->hdev;
++ const struct hc_driver *drv;
+ int status;
+
+ temp = hub->tt.clear_list.next;
+@@ -469,21 +470,25 @@ static void hub_tt_kevent (struct work_struct *work)
+ /* drop lock so HCD can concurrently report other TT errors */
+ spin_unlock_irqrestore (&hub->tt.lock, flags);
+ status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt);
+- spin_lock_irqsave (&hub->tt.lock, flags);
+-
+ if (status)
+ dev_err (&hdev->dev,
+ "clear tt %d (%04x) error %d\n",
+ clear->tt, clear->devinfo, status);
++
++ /* Tell the HCD, even if the operation failed */
++ drv = clear->hcd->driver;
++ if (drv->clear_tt_buffer_complete)
++ (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep);
++
+ kfree(clear);
++ spin_lock_irqsave(&hub->tt.lock, flags);
+ }
+ spin_unlock_irqrestore (&hub->tt.lock, flags);
+ }
+
+ /**
+- * usb_hub_tt_clear_buffer - clear control/bulk TT state in high speed hub
+- * @udev: the device whose split transaction failed
+- * @pipe: identifies the endpoint of the failed transaction
++ * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub
++ * @urb: an URB associated with the failed or incomplete split transaction
+ *
+ * High speed HCDs use this to tell the hub driver that some split control or
+ * bulk transaction failed in a way that requires clearing internal state of
+@@ -493,8 +498,10 @@ static void hub_tt_kevent (struct work_struct *work)
+ * It may not be possible for that hub to handle additional full (or low)
+ * speed transactions until that state is fully cleared out.
+ */
+-void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe)
++int usb_hub_clear_tt_buffer(struct urb *urb)
+ {
++ struct usb_device *udev = urb->dev;
++ int pipe = urb->pipe;
+ struct usb_tt *tt = udev->tt;
+ unsigned long flags;
+ struct usb_tt_clear *clear;
+@@ -506,7 +513,7 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe)
+ if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) {
+ dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
+ /* FIXME recover somehow ... RESET_TT? */
+- return;
++ return -ENOMEM;
+ }
+
+ /* info that CLEAR_TT_BUFFER needs */
+@@ -518,14 +525,19 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe)
+ : (USB_ENDPOINT_XFER_BULK << 11);
+ if (usb_pipein (pipe))
+ clear->devinfo |= 1 << 15;
+-
++
++ /* info for completion callback */
++ clear->hcd = bus_to_hcd(udev->bus);
++ clear->ep = urb->ep;
++
+ /* tell keventd to clear state for this TT */
+ spin_lock_irqsave (&tt->lock, flags);
+ list_add_tail (&clear->clear_list, &tt->clear_list);
+- schedule_work (&tt->kevent);
++ schedule_work(&tt->clear_work);
+ spin_unlock_irqrestore (&tt->lock, flags);
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(usb_hub_tt_clear_buffer);
++EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer);
+
+ /* If do_delay is false, return the number of milliseconds the caller
+ * needs to delay.
+@@ -816,7 +828,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
+ if (hub->has_indicators)
+ cancel_delayed_work_sync(&hub->leds);
+ if (hub->tt.hub)
+- cancel_work_sync(&hub->tt.kevent);
++ cancel_work_sync(&hub->tt.clear_work);
+ }
+
+ /* caller has locked the hub device */
+@@ -933,7 +945,7 @@ static int hub_configure(struct usb_hub *hub,
+
+ spin_lock_init (&hub->tt.lock);
+ INIT_LIST_HEAD (&hub->tt.clear_list);
+- INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
++ INIT_WORK(&hub->tt.clear_work, hub_tt_work);
+ switch (hdev->descriptor.bDeviceProtocol) {
+ case 0:
+ break;
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index 2a116ce..528c411 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -185,16 +185,18 @@ struct usb_tt {
+ /* for control/bulk error recovery (CLEAR_TT_BUFFER) */
+ spinlock_t lock;
+ struct list_head clear_list; /* of usb_tt_clear */
+- struct work_struct kevent;
++ struct work_struct clear_work;
+ };
+
+ struct usb_tt_clear {
+ struct list_head clear_list;
+ unsigned tt;
+ u16 devinfo;
++ struct usb_hcd *hcd;
++ struct usb_host_endpoint *ep;
+ };
+
+-extern void usb_hub_tt_clear_buffer(struct usb_device *dev, int pipe);
++extern int usb_hub_clear_tt_buffer(struct urb *urb);
+ extern void usb_ep0_reinit(struct usb_device *);
+
+ #endif /* __LINUX_HUB_H */
+diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
+index bf69f47..5c25b1a 100644
+--- a/drivers/usb/host/ehci-au1xxx.c
++++ b/drivers/usb/host/ehci-au1xxx.c
+@@ -112,6 +112,8 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
++
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
+diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
+index 01c3da3..7fb1ef0 100644
+--- a/drivers/usb/host/ehci-fsl.c
++++ b/drivers/usb/host/ehci-fsl.c
+@@ -324,6 +324,8 @@ static const struct hc_driver ehci_fsl_hc_driver = {
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
++
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+ static int ehci_fsl_drv_probe(struct platform_device *pdev)
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index c637207..d75b8cf 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -903,7 +903,8 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ /* already started */
+ break;
+ case QH_STATE_IDLE:
+- WARN_ON(1);
++ /* QH might be waiting for a Clear-TT-Buffer */
++ qh_completions(ehci, qh);
+ break;
+ }
+ break;
+@@ -1003,6 +1004,8 @@ idle_timeout:
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ case QH_STATE_IDLE: /* fully unlinked */
++ if (qh->clearing_tt)
++ goto idle_timeout;
+ if (list_empty (&qh->qtd_list)) {
+ qh_put (qh);
+ break;
+diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
+index 9c32063..8573b03 100644
+--- a/drivers/usb/host/ehci-ixp4xx.c
++++ b/drivers/usb/host/ehci-ixp4xx.c
+@@ -60,6 +60,8 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = {
+ #endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
++
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+ static int ixp4xx_ehci_probe(struct platform_device *pdev)
+diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
+index 9d48790..64ab30a 100644
+--- a/drivers/usb/host/ehci-orion.c
++++ b/drivers/usb/host/ehci-orion.c
+@@ -164,6 +164,8 @@ static const struct hc_driver ehci_orion_hc_driver = {
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
++
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+ static void __init
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index 5aa8bce..a26b7f7 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -408,6 +408,8 @@ static const struct hc_driver ehci_pci_hc_driver = {
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
++
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+ /*-------------------------------------------------------------------------*/
+diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
+index ef732b7..8b6556e 100644
+--- a/drivers/usb/host/ehci-ppc-of.c
++++ b/drivers/usb/host/ehci-ppc-of.c
+@@ -78,6 +78,8 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
+ #endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
++
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+
+diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
+index 1ba9f9a..efefc91 100644
+--- a/drivers/usb/host/ehci-ps3.c
++++ b/drivers/usb/host/ehci-ps3.c
+@@ -74,6 +74,8 @@ static const struct hc_driver ps3_ehci_hc_driver = {
+ #endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
++
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+ static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index 1976b1b..a39f2c6 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -139,6 +139,55 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
+
+ /*-------------------------------------------------------------------------*/
+
++static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
++
++static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
++ struct usb_host_endpoint *ep)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ struct ehci_qh *qh = ep->hcpriv;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ehci->lock, flags);
++ qh->clearing_tt = 0;
++ if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
++ && HC_IS_RUNNING(hcd->state))
++ qh_link_async(ehci, qh);
++ spin_unlock_irqrestore(&ehci->lock, flags);
++}
++
++static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
++ struct urb *urb, u32 token)
++{
++
++ /* If an async split transaction gets an error or is unlinked,
++ * the TT buffer may be left in an indeterminate state. We
++ * have to clear the TT buffer.
++ *
++ * Note: this routine is never called for Isochronous transfers.
++ */
++ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
++#ifdef DEBUG
++ struct usb_device *tt = urb->dev->tt->hub;
++ dev_dbg(&tt->dev,
++ "clear tt buffer port %d, a%d ep%d t%08x\n",
++ urb->dev->ttport, urb->dev->devnum,
++ usb_pipeendpoint(urb->pipe), token);
++#endif /* DEBUG */
++ if (!ehci_is_TDI(ehci)
++ || urb->dev->tt->hub !=
++ ehci_to_hcd(ehci)->self.root_hub) {
++ if (usb_hub_clear_tt_buffer(urb) == 0)
++ qh->clearing_tt = 1;
++ } else {
++
++ /* REVISIT ARC-derived cores don't clear the root
++ * hub TT buffer in this way...
++ */
++ }
++ }
++}
++
+ static int qtd_copy_status (
+ struct ehci_hcd *ehci,
+ struct urb *urb,
+@@ -195,28 +244,6 @@ static int qtd_copy_status (
+ usb_pipeendpoint (urb->pipe),
+ usb_pipein (urb->pipe) ? "in" : "out",
+ token, status);
+-
+- /* if async CSPLIT failed, try cleaning out the TT buffer */
+- if (status != -EPIPE
+- && urb->dev->tt
+- && !usb_pipeint(urb->pipe)
+- && ((token & QTD_STS_MMF) != 0
+- || QTD_CERR(token) == 0)
+- && (!ehci_is_TDI(ehci)
+- || urb->dev->tt->hub !=
+- ehci_to_hcd(ehci)->self.root_hub)) {
+-#ifdef DEBUG
+- struct usb_device *tt = urb->dev->tt->hub;
+- dev_dbg (&tt->dev,
+- "clear tt buffer port %d, a%d ep%d t%08x\n",
+- urb->dev->ttport, urb->dev->devnum,
+- usb_pipeendpoint (urb->pipe), token);
+-#endif /* DEBUG */
+- /* REVISIT ARC-derived cores don't clear the root
+- * hub TT buffer in this way...
+- */
+- usb_hub_tt_clear_buffer (urb->dev, urb->pipe);
+- }
+ }
+
+ return status;
+@@ -407,9 +434,16 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
+ /* qh unlinked; token in overlay may be most current */
+ if (state == QH_STATE_IDLE
+ && cpu_to_hc32(ehci, qtd->qtd_dma)
+- == qh->hw_current)
++ == qh->hw_current) {
+ token = hc32_to_cpu(ehci, qh->hw_token);
+
++ /* An unlink may leave an incomplete
++ * async transaction in the TT buffer.
++ * We have to clear it.
++ */
++ ehci_clear_tt_buffer(ehci, qh, urb, token);
++ }
++
+ /* force halt for unlinked or blocked qh, so we'll
+ * patch the qh later and so that completions can't
+ * activate it while we "know" it's stopped.
+@@ -435,6 +469,13 @@ halt:
+ && (qtd->hw_alt_next
+ & EHCI_LIST_END(ehci)))
+ last_status = -EINPROGRESS;
++
++ /* As part of low/full-speed endpoint-halt processing
++ * we must clear the TT buffer (11.17.5).
++ */
++ if (unlikely(last_status != -EINPROGRESS &&
++ last_status != -EREMOTEIO))
++ ehci_clear_tt_buffer(ehci, qh, urb, token);
+ }
+
+ /* if we're removing something not at the queue head,
+@@ -864,6 +905,10 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
+ __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
+ struct ehci_qh *head;
+
++ /* Don't link a QH if there's a Clear-TT-Buffer pending */
++ if (unlikely(qh->clearing_tt))
++ return;
++
+ /* (re)start the async schedule? */
+ head = ehci->async;
+ timer_action_done (ehci, TIMER_ASYNC_OFF);
+@@ -893,6 +938,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
+ head->qh_next.qh = qh;
+ head->hw_next = dma;
+
++ qh_get(qh);
+ qh->xacterrs = QH_XACTERR_MAX;
+ qh->qh_state = QH_STATE_LINKED;
+ /* qtd completions reported later by interrupt */
+@@ -1033,7 +1079,7 @@ submit_async (
+ * the HC and TT handle it when the TT has a buffer ready.
+ */
+ if (likely (qh->qh_state == QH_STATE_IDLE))
+- qh_link_async (ehci, qh_get (qh));
++ qh_link_async(ehci, qh);
+ done:
+ spin_unlock_irqrestore (&ehci->lock, flags);
+ if (unlikely (qh == NULL))
+@@ -1068,8 +1114,6 @@ static void end_unlink_async (struct ehci_hcd *ehci)
+ && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
+ qh_link_async (ehci, qh);
+ else {
+- qh_put (qh); // refcount from async list
+-
+ /* it's not free to turn the async schedule on/off; leave it
+ * active but idle for a while once it empties.
+ */
+@@ -1077,6 +1121,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
+ && ehci->async->qh_next.qh == NULL)
+ timer_action (ehci, TIMER_ASYNC_OFF);
+ }
++ qh_put(qh); /* refcount from async list */
+
+ if (next) {
+ ehci->reclaim = NULL;
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
+index 6cff195..ec5af22 100644
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -353,7 +353,9 @@ struct ehci_qh {
+ unsigned short period; /* polling interval */
+ unsigned short start; /* where polling starts */
+ #define NO_FRAME ((unsigned short)~0) /* pick new start */
++
+ struct usb_device *dev; /* access to TT */
++ unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
+ } __attribute__ ((aligned (32)));
+
+ /*-------------------------------------------------------------------------*/
+diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
+index 2493f05..d886bf9 100644
+--- a/drivers/video/xen-fbfront.c
++++ b/drivers/video/xen-fbfront.c
+@@ -454,6 +454,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
+
+ xenfb_init_shared_page(info, fb_info);
+
++ ret = xenfb_connect_backend(dev, info);
++ if (ret < 0)
++ goto error;
++
+ ret = register_framebuffer(fb_info);
+ if (ret) {
+ fb_deferred_io_cleanup(fb_info);
+@@ -464,10 +468,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
+ }
+ info->fb_info = fb_info;
+
+- ret = xenfb_connect_backend(dev, info);
+- if (ret < 0)
+- goto error;
+-
+ xenfb_make_preferred_console();
+ return 0;
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 4910612..941c78b 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -1165,8 +1165,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
+
+ if (!test_set_buffer_dirty(bh)) {
+ struct page *page = bh->b_page;
+- if (!TestSetPageDirty(page))
+- __set_page_dirty(page, page_mapping(page), 0);
++ if (!TestSetPageDirty(page)) {
++ struct address_space *mapping = page_mapping(page);
++ if (mapping)
++ __set_page_dirty(page, mapping, 0);
++ }
+ }
+ }
+
+diff --git a/fs/exec.c b/fs/exec.c
+index 895823d..42414e5 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -677,8 +677,8 @@ exit:
+ }
+ EXPORT_SYMBOL(open_exec);
+
+-int kernel_read(struct file *file, unsigned long offset,
+- char *addr, unsigned long count)
++int kernel_read(struct file *file, loff_t offset,
++ char *addr, unsigned long count)
+ {
+ mm_segment_t old_fs;
+ loff_t pos = offset;
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index c1462d4..7ae4e4b 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -934,26 +934,28 @@ static int can_do_hugetlb_shm(void)
+ return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
+ }
+
+-struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag)
++struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
++ struct user_struct **user)
+ {
+ int error = -ENOMEM;
+- int unlock_shm = 0;
+ struct file *file;
+ struct inode *inode;
+ struct dentry *dentry, *root;
+ struct qstr quick_string;
+- struct user_struct *user = current_user();
+
++ *user = NULL;
+ if (!hugetlbfs_vfsmount)
+ return ERR_PTR(-ENOENT);
+
+ if (!can_do_hugetlb_shm()) {
+- if (user_shm_lock(size, user)) {
+- unlock_shm = 1;
++ *user = current_user();
++ if (user_shm_lock(size, *user)) {
+ WARN_ONCE(1,
+ "Using mlock ulimits for SHM_HUGETLB deprecated\n");
+- } else
++ } else {
++ *user = NULL;
+ return ERR_PTR(-EPERM);
++ }
+ }
+
+ root = hugetlbfs_vfsmount->mnt_root;
+@@ -994,8 +996,10 @@ out_inode:
+ out_dentry:
+ dput(dentry);
+ out_shm_unlock:
+- if (unlock_shm)
+- user_shm_unlock(size, user);
++ if (*user) {
++ user_shm_unlock(size, *user);
++ *user = NULL;
++ }
+ return ERR_PTR(error);
+ }
+
+diff --git a/fs/inode.c b/fs/inode.c
+index bca0c61..a9e8ef0 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -118,12 +118,11 @@ static void wake_up_inode(struct inode *inode)
+ * These are initializations that need to be done on every inode
+ * allocation as the fields are not initialised by slab allocation.
+ */
+-struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
++int inode_init_always(struct super_block *sb, struct inode *inode)
+ {
+ static const struct address_space_operations empty_aops;
+ static struct inode_operations empty_iops;
+ static const struct file_operations empty_fops;
+-
+ struct address_space *const mapping = &inode->i_data;
+
+ inode->i_sb = sb;
+@@ -150,7 +149,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
+ inode->dirtied_when = 0;
+
+ if (security_inode_alloc(inode))
+- goto out_free_inode;
++ goto out;
+
+ /* allocate and initialize an i_integrity */
+ if (ima_inode_alloc(inode))
+@@ -189,16 +188,12 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
+ inode->i_private = NULL;
+ inode->i_mapping = mapping;
+
+- return inode;
++ return 0;
+
+ out_free_security:
+ security_inode_free(inode);
+-out_free_inode:
+- if (inode->i_sb->s_op->destroy_inode)
+- inode->i_sb->s_op->destroy_inode(inode);
+- else
+- kmem_cache_free(inode_cachep, (inode));
+- return NULL;
++out:
++ return -ENOMEM;
+ }
+ EXPORT_SYMBOL(inode_init_always);
+
+@@ -211,23 +206,36 @@ static struct inode *alloc_inode(struct super_block *sb)
+ else
+ inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
+
+- if (inode)
+- return inode_init_always(sb, inode);
+- return NULL;
++ if (!inode)
++ return NULL;
++
++ if (unlikely(inode_init_always(sb, inode))) {
++ if (inode->i_sb->s_op->destroy_inode)
++ inode->i_sb->s_op->destroy_inode(inode);
++ else
++ kmem_cache_free(inode_cachep, inode);
++ return NULL;
++ }
++
++ return inode;
+ }
+
+-void destroy_inode(struct inode *inode)
++void __destroy_inode(struct inode *inode)
+ {
+ BUG_ON(inode_has_buffers(inode));
+ ima_inode_free(inode);
+ security_inode_free(inode);
++}
++EXPORT_SYMBOL(__destroy_inode);
++
++void destroy_inode(struct inode *inode)
++{
++ __destroy_inode(inode);
+ if (inode->i_sb->s_op->destroy_inode)
+ inode->i_sb->s_op->destroy_inode(inode);
+ else
+ kmem_cache_free(inode_cachep, (inode));
+ }
+-EXPORT_SYMBOL(destroy_inode);
+-
+
+ /*
+ * These are initializations that only need to be done
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index b2c52b3..044990a 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -894,18 +894,17 @@ struct ocfs2_write_cluster_desc {
+ */
+ unsigned c_new;
+ unsigned c_unwritten;
++ unsigned c_needs_zero;
+ };
+
+-static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
+-{
+- return d->c_new || d->c_unwritten;
+-}
+-
+ struct ocfs2_write_ctxt {
+ /* Logical cluster position / len of write */
+ u32 w_cpos;
+ u32 w_clen;
+
++ /* First cluster allocated in a nonsparse extend */
++ u32 w_first_new_cpos;
++
+ struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
+
+ /*
+@@ -983,6 +982,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
+ return -ENOMEM;
+
+ wc->w_cpos = pos >> osb->s_clustersize_bits;
++ wc->w_first_new_cpos = UINT_MAX;
+ cend = (pos + len - 1) >> osb->s_clustersize_bits;
+ wc->w_clen = cend - wc->w_cpos + 1;
+ get_bh(di_bh);
+@@ -1217,20 +1217,18 @@ out:
+ */
+ static int ocfs2_write_cluster(struct address_space *mapping,
+ u32 phys, unsigned int unwritten,
++ unsigned int should_zero,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_write_ctxt *wc, u32 cpos,
+ loff_t user_pos, unsigned user_len)
+ {
+- int ret, i, new, should_zero = 0;
++ int ret, i, new;
+ u64 v_blkno, p_blkno;
+ struct inode *inode = mapping->host;
+ struct ocfs2_extent_tree et;
+
+ new = phys == 0 ? 1 : 0;
+- if (new || unwritten)
+- should_zero = 1;
+-
+ if (new) {
+ u32 tmp_pos;
+
+@@ -1341,7 +1339,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
+ local_len = osb->s_clustersize - cluster_off;
+
+ ret = ocfs2_write_cluster(mapping, desc->c_phys,
+- desc->c_unwritten, data_ac, meta_ac,
++ desc->c_unwritten,
++ desc->c_needs_zero,
++ data_ac, meta_ac,
+ wc, desc->c_cpos, pos, local_len);
+ if (ret) {
+ mlog_errno(ret);
+@@ -1391,14 +1391,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
+ * newly allocated cluster.
+ */
+ desc = &wc->w_desc[0];
+- if (ocfs2_should_zero_cluster(desc))
++ if (desc->c_needs_zero)
+ ocfs2_figure_cluster_boundaries(osb,
+ desc->c_cpos,
+ &wc->w_target_from,
+ NULL);
+
+ desc = &wc->w_desc[wc->w_clen - 1];
+- if (ocfs2_should_zero_cluster(desc))
++ if (desc->c_needs_zero)
+ ocfs2_figure_cluster_boundaries(osb,
+ desc->c_cpos,
+ NULL,
+@@ -1466,13 +1466,28 @@ static int ocfs2_populate_write_desc(struct inode *inode,
+ phys++;
+ }
+
++ /*
++ * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
++ * file that got extended. w_first_new_cpos tells us
++ * where the newly allocated clusters are so we can
++ * zero them.
++ */
++ if (desc->c_cpos >= wc->w_first_new_cpos) {
++ BUG_ON(phys == 0);
++ desc->c_needs_zero = 1;
++ }
++
+ desc->c_phys = phys;
+ if (phys == 0) {
+ desc->c_new = 1;
++ desc->c_needs_zero = 1;
+ *clusters_to_alloc = *clusters_to_alloc + 1;
+ }
+- if (ext_flags & OCFS2_EXT_UNWRITTEN)
++
++ if (ext_flags & OCFS2_EXT_UNWRITTEN) {
+ desc->c_unwritten = 1;
++ desc->c_needs_zero = 1;
++ }
+
+ num_clusters--;
+ }
+@@ -1632,10 +1647,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
+ if (newsize <= i_size_read(inode))
+ return 0;
+
+- ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
++ ret = ocfs2_extend_no_holes(inode, newsize, pos);
+ if (ret)
+ mlog_errno(ret);
+
++ wc->w_first_new_cpos =
++ ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
++
+ return ret;
+ }
+
+@@ -1644,7 +1662,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
+ struct page **pagep, void **fsdata,
+ struct buffer_head *di_bh, struct page *mmap_page)
+ {
+- int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
++ int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
+ unsigned int clusters_to_alloc, extents_to_split;
+ struct ocfs2_write_ctxt *wc;
+ struct inode *inode = mapping->host;
+@@ -1722,8 +1740,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
+
+ }
+
+- ocfs2_set_target_boundaries(osb, wc, pos, len,
+- clusters_to_alloc + extents_to_split);
++ /*
++ * We have to zero sparse allocated clusters, unwritten extent clusters,
++ * and non-sparse clusters we just extended. For non-sparse writes,
++ * we know zeros will only be needed in the first and/or last cluster.
++ */
++ if (clusters_to_alloc || extents_to_split ||
++ (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
++ wc->w_desc[wc->w_clen - 1].c_needs_zero)))
++ cluster_of_pages = 1;
++ else
++ cluster_of_pages = 0;
++
++ ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
+
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+@@ -1756,8 +1785,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
+ * extent.
+ */
+ ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
+- clusters_to_alloc + extents_to_split,
+- mmap_page);
++ cluster_of_pages, mmap_page);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_quota;
+diff --git a/fs/select.c b/fs/select.c
+index 0fe0e14..6d76b82 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -110,6 +110,7 @@ void poll_initwait(struct poll_wqueues *pwq)
+ {
+ init_poll_funcptr(&pwq->pt, __pollwait);
+ pwq->polling_task = current;
++ pwq->triggered = 0;
+ pwq->error = 0;
+ pwq->table = NULL;
+ pwq->inline_index = 0;
+diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
+index 89b81ee..1863b0d 100644
+--- a/fs/xfs/xfs_iget.c
++++ b/fs/xfs/xfs_iget.c
+@@ -63,6 +63,10 @@ xfs_inode_alloc(
+ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
+ if (!ip)
+ return NULL;
++ if (inode_init_always(mp->m_super, VFS_I(ip))) {
++ kmem_zone_free(xfs_inode_zone, ip);
++ return NULL;
++ }
+
+ ASSERT(atomic_read(&ip->i_iocount) == 0);
+ ASSERT(atomic_read(&ip->i_pincount) == 0);
+@@ -104,17 +108,6 @@ xfs_inode_alloc(
+ #ifdef XFS_DIR2_TRACE
+ ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
+ #endif
+- /*
+- * Now initialise the VFS inode. We do this after the xfs_inode
+- * initialisation as internal failures will result in ->destroy_inode
+- * being called and that will pass down through the reclaim path and
+- * free the XFS inode. This path requires the XFS inode to already be
+- * initialised. Hence if this call fails, the xfs_inode has already
+- * been freed and we should not reference it at all in the error
+- * handling.
+- */
+- if (!inode_init_always(mp->m_super, VFS_I(ip)))
+- return NULL;
+
+ /* prevent anyone from using this yet */
+ VFS_I(ip)->i_state = I_NEW|I_LOCK;
+@@ -122,6 +115,71 @@ xfs_inode_alloc(
+ return ip;
+ }
+
++STATIC void
++xfs_inode_free(
++ struct xfs_inode *ip)
++{
++ switch (ip->i_d.di_mode & S_IFMT) {
++ case S_IFREG:
++ case S_IFDIR:
++ case S_IFLNK:
++ xfs_idestroy_fork(ip, XFS_DATA_FORK);
++ break;
++ }
++
++ if (ip->i_afp)
++ xfs_idestroy_fork(ip, XFS_ATTR_FORK);
++
++#ifdef XFS_INODE_TRACE
++ ktrace_free(ip->i_trace);
++#endif
++#ifdef XFS_BMAP_TRACE
++ ktrace_free(ip->i_xtrace);
++#endif
++#ifdef XFS_BTREE_TRACE
++ ktrace_free(ip->i_btrace);
++#endif
++#ifdef XFS_RW_TRACE
++ ktrace_free(ip->i_rwtrace);
++#endif
++#ifdef XFS_ILOCK_TRACE
++ ktrace_free(ip->i_lock_trace);
++#endif
++#ifdef XFS_DIR2_TRACE
++ ktrace_free(ip->i_dir_trace);
++#endif
++
++ if (ip->i_itemp) {
++ /*
++ * Only if we are shutting down the fs will we see an
++ * inode still in the AIL. If it is there, we should remove
++ * it to prevent a use-after-free from occurring.
++ */
++ xfs_log_item_t *lip = &ip->i_itemp->ili_item;
++ struct xfs_ail *ailp = lip->li_ailp;
++
++ ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
++ XFS_FORCED_SHUTDOWN(ip->i_mount));
++ if (lip->li_flags & XFS_LI_IN_AIL) {
++ spin_lock(&ailp->xa_lock);
++ if (lip->li_flags & XFS_LI_IN_AIL)
++ xfs_trans_ail_delete(ailp, lip);
++ else
++ spin_unlock(&ailp->xa_lock);
++ }
++ xfs_inode_item_destroy(ip);
++ ip->i_itemp = NULL;
++ }
++
++ /* asserts to verify all state is correct here */
++ ASSERT(atomic_read(&ip->i_iocount) == 0);
++ ASSERT(atomic_read(&ip->i_pincount) == 0);
++ ASSERT(!spin_is_locked(&ip->i_flags_lock));
++ ASSERT(completion_done(&ip->i_flush));
++
++ kmem_zone_free(xfs_inode_zone, ip);
++}
++
+ /*
+ * Check the validity of the inode we just found it the cache
+ */
+@@ -166,7 +224,7 @@ xfs_iget_cache_hit(
+ * errors cleanly, then tag it so it can be set up correctly
+ * later.
+ */
+- if (!inode_init_always(mp->m_super, VFS_I(ip))) {
++ if (inode_init_always(mp->m_super, VFS_I(ip))) {
+ error = ENOMEM;
+ goto out_error;
+ }
+@@ -298,7 +356,8 @@ out_preload_end:
+ if (lock_flags)
+ xfs_iunlock(ip, lock_flags);
+ out_destroy:
+- xfs_destroy_inode(ip);
++ __destroy_inode(VFS_I(ip));
++ xfs_inode_free(ip);
+ return error;
+ }
+
+@@ -506,62 +565,7 @@ xfs_ireclaim(
+ XFS_QM_DQDETACH(ip->i_mount, ip);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+- switch (ip->i_d.di_mode & S_IFMT) {
+- case S_IFREG:
+- case S_IFDIR:
+- case S_IFLNK:
+- xfs_idestroy_fork(ip, XFS_DATA_FORK);
+- break;
+- }
+-
+- if (ip->i_afp)
+- xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+-
+-#ifdef XFS_INODE_TRACE
+- ktrace_free(ip->i_trace);
+-#endif
+-#ifdef XFS_BMAP_TRACE
+- ktrace_free(ip->i_xtrace);
+-#endif
+-#ifdef XFS_BTREE_TRACE
+- ktrace_free(ip->i_btrace);
+-#endif
+-#ifdef XFS_RW_TRACE
+- ktrace_free(ip->i_rwtrace);
+-#endif
+-#ifdef XFS_ILOCK_TRACE
+- ktrace_free(ip->i_lock_trace);
+-#endif
+-#ifdef XFS_DIR2_TRACE
+- ktrace_free(ip->i_dir_trace);
+-#endif
+- if (ip->i_itemp) {
+- /*
+- * Only if we are shutting down the fs will we see an
+- * inode still in the AIL. If it is there, we should remove
+- * it to prevent a use-after-free from occurring.
+- */
+- xfs_log_item_t *lip = &ip->i_itemp->ili_item;
+- struct xfs_ail *ailp = lip->li_ailp;
+-
+- ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
+- XFS_FORCED_SHUTDOWN(ip->i_mount));
+- if (lip->li_flags & XFS_LI_IN_AIL) {
+- spin_lock(&ailp->xa_lock);
+- if (lip->li_flags & XFS_LI_IN_AIL)
+- xfs_trans_ail_delete(ailp, lip);
+- else
+- spin_unlock(&ailp->xa_lock);
+- }
+- xfs_inode_item_destroy(ip);
+- ip->i_itemp = NULL;
+- }
+- /* asserts to verify all state is correct here */
+- ASSERT(atomic_read(&ip->i_iocount) == 0);
+- ASSERT(atomic_read(&ip->i_pincount) == 0);
+- ASSERT(!spin_is_locked(&ip->i_flags_lock));
+- ASSERT(completion_done(&ip->i_flush));
+- kmem_zone_free(xfs_inode_zone, ip);
++ xfs_inode_free(ip);
+ }
+
+ /*
+diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
+index f879c1b..71c20ec 100644
+--- a/fs/xfs/xfs_inode.h
++++ b/fs/xfs/xfs_inode.h
+@@ -309,23 +309,6 @@ static inline struct inode *VFS_I(struct xfs_inode *ip)
+ }
+
+ /*
+- * Get rid of a partially initialized inode.
+- *
+- * We have to go through destroy_inode to make sure allocations
+- * from init_inode_always like the security data are undone.
+- *
+- * We mark the inode bad so that it takes the short cut in
+- * the reclaim path instead of going through the flush path
+- * which doesn't make sense for an inode that has never seen the
+- * light of day.
+- */
+-static inline void xfs_destroy_inode(struct xfs_inode *ip)
+-{
+- make_bad_inode(VFS_I(ip));
+- return destroy_inode(VFS_I(ip));
+-}
+-
+-/*
+ * i_flags helper functions
+ */
+ static inline void
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index 3750f04..9dbdff3 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -3180,7 +3180,7 @@ try_again:
+ STATIC void
+ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
+ {
+- ASSERT(spin_is_locked(&log->l_icloglock));
++ assert_spin_locked(&log->l_icloglock);
+
+ if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+ xlog_state_switch_iclogs(log, iclog, 0);
+diff --git a/include/acpi/processor.h b/include/acpi/processor.h
+index 4927c06..e498c79 100644
+--- a/include/acpi/processor.h
++++ b/include/acpi/processor.h
+@@ -174,7 +174,7 @@ struct acpi_processor_throttling {
+ cpumask_var_t shared_cpu_map;
+ int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
+ int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
+- int state);
++ int state, bool force);
+
+ u32 address;
+ u8 duty_offset;
+@@ -320,7 +320,8 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
+ /* in processor_throttling.c */
+ int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
+ int acpi_processor_get_throttling_info(struct acpi_processor *pr);
+-extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
++extern int acpi_processor_set_throttling(struct acpi_processor *pr,
++ int state, bool force);
+ extern const struct file_operations acpi_processor_throttling_fops;
+ extern void acpi_processor_throttling_init(void);
+ /* in processor_idle.c */
+diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
+index 2878811..756d78b 100644
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -94,13 +94,13 @@ extern void __bitmap_shift_right(unsigned long *dst,
+ const unsigned long *src, int shift, int bits);
+ extern void __bitmap_shift_left(unsigned long *dst,
+ const unsigned long *src, int shift, int bits);
+-extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
++extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+ extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+ extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+-extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
++extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+ extern int __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+@@ -171,13 +171,12 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
+ }
+ }
+
+-static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
++static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+ {
+ if (small_const_nbits(nbits))
+- *dst = *src1 & *src2;
+- else
+- __bitmap_and(dst, src1, src2, nbits);
++ return (*dst = *src1 & *src2) != 0;
++ return __bitmap_and(dst, src1, src2, nbits);
+ }
+
+ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
+@@ -198,13 +197,12 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ __bitmap_xor(dst, src1, src2, nbits);
+ }
+
+-static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
++static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+ {
+ if (small_const_nbits(nbits))
+- *dst = *src1 & ~(*src2);
+- else
+- __bitmap_andnot(dst, src1, src2, nbits);
++ return (*dst = *src1 & ~(*src2)) != 0;
++ return __bitmap_andnot(dst, src1, src2, nbits);
+ }
+
+ static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index c5ac87c..796df12 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -43,10 +43,10 @@
+ * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
+ * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
+ *
+- * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
++ * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
+ * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
+ * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
+- * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
++ * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2
+ * void cpus_complement(dst, src) dst = ~src
+ *
+ * int cpus_equal(mask1, mask2) Does mask1 == mask2?
+@@ -179,10 +179,10 @@ static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
+ }
+
+ #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
+-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
++static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+ {
+- bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
++ return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+ }
+
+ #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
+@@ -201,10 +201,10 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
+
+ #define cpus_andnot(dst, src1, src2) \
+ __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
+-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
++static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+ {
+- bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
++ return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+ }
+
+ #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
+@@ -738,11 +738,11 @@ static inline void cpumask_clear(struct cpumask *dstp)
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+-static inline void cpumask_and(struct cpumask *dstp,
++static inline int cpumask_and(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+ {
+- bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
++ return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+ }
+
+@@ -779,11 +779,11 @@ static inline void cpumask_xor(struct cpumask *dstp,
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+-static inline void cpumask_andnot(struct cpumask *dstp,
++static inline int cpumask_andnot(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+ {
+- bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
++ return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+ }
+
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 3b534e5..53618df 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2121,7 +2121,7 @@ extern struct file *do_filp_open(int dfd, const char *pathname,
+ int open_flag, int mode, int acc_mode);
+ extern int may_open(struct path *, int, int);
+
+-extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
++extern int kernel_read(struct file *, loff_t, char *, unsigned long);
+ extern struct file * open_exec(const char *);
+
+ /* fs/dcache.c -- generic fs support functions */
+@@ -2135,7 +2135,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
+
+ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
+
+-extern struct inode * inode_init_always(struct super_block *, struct inode *);
++extern int inode_init_always(struct super_block *, struct inode *);
+ extern void inode_init_once(struct inode *);
+ extern void inode_add_to_lists(struct super_block *, struct inode *);
+ extern void iput(struct inode *);
+@@ -2162,6 +2162,7 @@ extern void __iget(struct inode * inode);
+ extern void iget_failed(struct inode *);
+ extern void clear_inode(struct inode *);
+ extern void destroy_inode(struct inode *);
++extern void __destroy_inode(struct inode *);
+ extern struct inode *new_inode(struct super_block *);
+ extern int should_remove_suid(struct dentry *);
+ extern int file_remove_suid(struct file *);
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 03be7f2..7e2f1ef 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -10,6 +10,7 @@
+ #include <asm/tlbflush.h>
+
+ struct ctl_table;
++struct user_struct;
+
+ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
+ {
+@@ -139,7 +140,8 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
+
+ extern const struct file_operations hugetlbfs_file_operations;
+ extern struct vm_operations_struct hugetlb_vm_ops;
+-struct file *hugetlb_file_setup(const char *name, size_t, int);
++struct file *hugetlb_file_setup(const char *name, size_t size, int acct,
++ struct user_struct **user);
+ int hugetlb_get_quota(struct address_space *mapping, long delta);
+ void hugetlb_put_quota(struct address_space *mapping, long delta);
+
+@@ -161,7 +163,7 @@ static inline void set_file_hugepages(struct file *file)
+
+ #define is_file_hugepages(file) 0
+ #define set_file_hugepages(file) BUG()
+-#define hugetlb_file_setup(name,size,acctflag) ERR_PTR(-ENOSYS)
++#define hugetlb_file_setup(name,size,acct,user) ERR_PTR(-ENOSYS)
+
+ #endif /* !CONFIG_HUGETLBFS */
+
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 5eed8fa..340e909 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -110,6 +110,7 @@ struct kvm_memory_slot {
+
+ struct kvm_kernel_irq_routing_entry {
+ u32 gsi;
++ u32 type;
+ int (*set)(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int level);
+ union {
+diff --git a/init/main.c b/init/main.c
+index d721dad..303903c 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -702,13 +702,14 @@ asmlinkage void __init start_kernel(void)
+ int initcall_debug;
+ core_param(initcall_debug, initcall_debug, bool, 0644);
+
++static char msgbuf[64];
++static struct boot_trace_call call;
++static struct boot_trace_ret ret;
++
+ int do_one_initcall(initcall_t fn)
+ {
+ int count = preempt_count();
+ ktime_t calltime, delta, rettime;
+- char msgbuf[64];
+- struct boot_trace_call call;
+- struct boot_trace_ret ret;
+
+ if (initcall_debug) {
+ call.caller = task_pid_nr(current);
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 4259716..30b1265 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -174,7 +174,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+ shm_unlock(shp);
+ if (!is_file_hugepages(shp->shm_file))
+ shmem_lock(shp->shm_file, 0, shp->mlock_user);
+- else
++ else if (shp->mlock_user)
+ user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
+ shp->mlock_user);
+ fput (shp->shm_file);
+@@ -369,8 +369,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ /* hugetlb_file_setup applies strict accounting */
+ if (shmflg & SHM_NORESERVE)
+ acctflag = VM_NORESERVE;
+- file = hugetlb_file_setup(name, size, acctflag);
+- shp->mlock_user = current_user();
++ file = hugetlb_file_setup(name, size, acctflag,
++ &shp->mlock_user);
+ } else {
+ /*
+ * Do not allow no accounting for OVERCOMMIT_NEVER, even
+@@ -411,6 +411,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ return error;
+
+ no_id:
++ if (shp->mlock_user) /* shmflg & SHM_HUGETLB case */
++ user_shm_unlock(size, shp->mlock_user);
+ fput(file);
+ no_file:
+ security_shm_free(shp);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 9c1f52d..f4be1ee 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -816,11 +816,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
+ {
+ struct signal_struct *sig;
+
+- if (clone_flags & CLONE_THREAD) {
+- atomic_inc(¤t->signal->count);
+- atomic_inc(¤t->signal->live);
++ if (clone_flags & CLONE_THREAD)
+ return 0;
+- }
+
+ sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
+ tsk->signal = sig;
+@@ -878,16 +875,6 @@ void __cleanup_signal(struct signal_struct *sig)
+ kmem_cache_free(signal_cachep, sig);
+ }
+
+-static void cleanup_signal(struct task_struct *tsk)
+-{
+- struct signal_struct *sig = tsk->signal;
+-
+- atomic_dec(&sig->live);
+-
+- if (atomic_dec_and_test(&sig->count))
+- __cleanup_signal(sig);
+-}
+-
+ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
+ {
+ unsigned long new_flags = p->flags;
+@@ -1237,6 +1224,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ }
+
+ if (clone_flags & CLONE_THREAD) {
++ atomic_inc(¤t->signal->count);
++ atomic_inc(¤t->signal->live);
+ p->group_leader = current->group_leader;
+ list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
+ }
+@@ -1281,7 +1270,8 @@ bad_fork_cleanup_mm:
+ if (p->mm)
+ mmput(p->mm);
+ bad_fork_cleanup_signal:
+- cleanup_signal(p);
++ if (!(clone_flags & CLONE_THREAD))
++ __cleanup_signal(p->signal);
+ bad_fork_cleanup_sighand:
+ __cleanup_sighand(p->sighand);
+ bad_fork_cleanup_fs:
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 4ebaf85..7fbaa09 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -216,12 +216,12 @@ int kthread_stop(struct task_struct *k)
+ /* Now set kthread_should_stop() to true, and wake it up. */
+ kthread_stop_info.k = k;
+ wake_up_process(k);
+- put_task_struct(k);
+
+ /* Once it dies, reset stop ptr, gather result and we're done. */
+ wait_for_completion(&kthread_stop_info.done);
+ kthread_stop_info.k = NULL;
+ ret = kthread_stop_info.err;
++ put_task_struct(k);
+ mutex_unlock(&kthread_stop_lock);
+
+ trace_sched_kthread_stop_ret(ret);
+diff --git a/kernel/signal.c b/kernel/signal.c
+index d803473..2dfc931 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2414,11 +2414,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
+ stack_t oss;
+ int error;
+
+- if (uoss) {
+- oss.ss_sp = (void __user *) current->sas_ss_sp;
+- oss.ss_size = current->sas_ss_size;
+- oss.ss_flags = sas_ss_flags(sp);
+- }
++ oss.ss_sp = (void __user *) current->sas_ss_sp;
++ oss.ss_size = current->sas_ss_size;
++ oss.ss_flags = sas_ss_flags(sp);
+
+ if (uss) {
+ void __user *ss_sp;
+@@ -2461,13 +2459,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
+ current->sas_ss_size = ss_size;
+ }
+
++ error = 0;
+ if (uoss) {
+ error = -EFAULT;
+- if (copy_to_user(uoss, &oss, sizeof(oss)))
++ if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
+ goto out;
++ error = __put_user(oss.ss_sp, &uoss->ss_sp) |
++ __put_user(oss.ss_size, &uoss->ss_size) |
++ __put_user(oss.ss_flags, &uoss->ss_flags);
+ }
+
+- error = 0;
+ out:
+ return error;
+ }
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 35a1f7f..7025658 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst,
+ }
+ EXPORT_SYMBOL(__bitmap_shift_left);
+
+-void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
++int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+ {
+ int k;
+ int nr = BITS_TO_LONGS(bits);
++ unsigned long result = 0;
+
+ for (k = 0; k < nr; k++)
+- dst[k] = bitmap1[k] & bitmap2[k];
++ result |= (dst[k] = bitmap1[k] & bitmap2[k]);
++ return result != 0;
+ }
+ EXPORT_SYMBOL(__bitmap_and);
+
+@@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ }
+ EXPORT_SYMBOL(__bitmap_xor);
+
+-void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
++int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+ {
+ int k;
+ int nr = BITS_TO_LONGS(bits);
++ unsigned long result = 0;
+
+ for (k = 0; k < nr; k++)
+- dst[k] = bitmap1[k] & ~bitmap2[k];
++ result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
++ return result != 0;
+ }
+ EXPORT_SYMBOL(__bitmap_andnot);
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 6bf3cc4..b91020e 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2342,7 +2342,6 @@ static void build_zonelists(pg_data_t *pgdat)
+ prev_node = local_node;
+ nodes_clear(used_mask);
+
+- memset(node_load, 0, sizeof(node_load));
+ memset(node_order, 0, sizeof(node_order));
+ j = 0;
+
+@@ -2451,6 +2450,9 @@ static int __build_all_zonelists(void *dummy)
+ {
+ int nid;
+
++#ifdef CONFIG_NUMA
++ memset(node_load, 0, sizeof(node_load));
++#endif
+ for_each_online_node(nid) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index d6a9243..e8e9bad 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1242,6 +1242,7 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
+ return -ENOBUFS;
+
+ *uaddr_len = sizeof(struct sockaddr_at);
++ memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
+
+ if (peer) {
+ if (sk->sk_state != TCP_ESTABLISHED)
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 6aa154e..5df3bf6 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -397,6 +397,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
+ if (peer)
+ return -EOPNOTSUPP;
+
++ memset(addr, 0, sizeof(*addr));
+ addr->can_family = AF_CAN;
+ addr->can_ifindex = ro->ifindex;
+
+diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
+index 6f479fa..3bafb21 100644
+--- a/net/econet/af_econet.c
++++ b/net/econet/af_econet.c
+@@ -520,6 +520,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
+ if (peer)
+ return -EOPNOTSUPP;
+
++ memset(sec, 0, sizeof(*sec));
+ mutex_lock(&econet_mutex);
+
+ sk = sock->sk;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 3e7e910..d1d88e6 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -814,6 +814,8 @@ int ip_append_data(struct sock *sk,
+ inet->cork.addr = ipc->addr;
+ }
+ rt = *rtp;
++ if (unlikely(!rt))
++ return -EFAULT;
+ /*
+ * We steal reference to this route, caller should not release it
+ */
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 61f5538..55e315a 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -294,8 +294,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ v4addr != htonl(INADDR_ANY) &&
+ chk_addr_ret != RTN_LOCAL &&
+ chk_addr_ret != RTN_MULTICAST &&
+- chk_addr_ret != RTN_BROADCAST)
++ chk_addr_ret != RTN_BROADCAST) {
++ err = -EADDRNOTAVAIL;
+ goto out;
++ }
+ } else {
+ if (addr_type != IPV6_ADDR_ANY) {
+ struct net_device *dev = NULL;
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index e0fbcff..b06224b 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -714,6 +714,7 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
+ struct sock *sk = sock->sk;
+ struct irda_sock *self = irda_sk(sk);
+
++ memset(&saddr, 0, sizeof(saddr));
+ if (peer) {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index febae70..515d556 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -914,6 +914,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
+ struct llc_sock *llc = llc_sk(sk);
+ int rc = 0;
+
++ memset(&sllc, 0, sizeof(sllc));
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_ZAPPED))
+ goto out;
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
+index 947aaaa..baf0f77 100644
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -376,6 +376,14 @@ static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ &local->hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+
++ if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK))
++ return;
++
++ if (WARN(!sta->ampdu_mlme.tid_tx[tid],
++ "TID %d gone but expected when splicing aggregates from"
++ "the pending queue\n", tid))
++ return;
++
+ if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ /* mark queue as pending, it is stopped already */
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 3be0e01..0c3e755 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -848,6 +848,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
+ sax->fsa_ax25.sax25_family = AF_NETROM;
+ sax->fsa_ax25.sax25_ndigis = 1;
+ sax->fsa_ax25.sax25_call = nr->user_addr;
++ memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
+ sax->fsa_digipeater[0] = nr->dest_addr;
+ *uaddr_len = sizeof(struct full_sockaddr_ax25);
+ } else {
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 877a7f6..ebe1cc9 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -957,6 +957,7 @@ static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
+ struct rose_sock *rose = rose_sk(sk);
+ int n;
+
++ memset(srose, 0, sizeof(*srose));
+ if (peer != 0) {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 5abab09..8d02e05 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -876,6 +876,7 @@ static inline void
+ rpc_task_force_reencode(struct rpc_task *task)
+ {
+ task->tk_rqstp->rq_snd_buf.len = 0;
++ task->tk_rqstp->rq_bytes_sent = 0;
+ }
+
+ static inline void
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
+index 50d572b..2ae3aff 100644
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -45,9 +45,9 @@ int ima_calc_hash(struct file *file, char *digest)
+ {
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+- loff_t i_size;
++ loff_t i_size, offset = 0;
+ char *rbuf;
+- int rc, offset = 0;
++ int rc;
+
+ rc = init_desc(&desc);
+ if (rc != 0)
+@@ -67,6 +67,8 @@ int ima_calc_hash(struct file *file, char *digest)
+ rc = rbuf_len;
+ break;
+ }
++ if (rbuf_len == 0)
++ break;
+ offset += rbuf_len;
+ sg_init_one(sg, rbuf, rbuf_len);
+
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index d659995..2a2c2ca 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -876,47 +876,24 @@ static int snd_interval_ratden(struct snd_interval *i,
+ int snd_interval_list(struct snd_interval *i, unsigned int count, unsigned int *list, unsigned int mask)
+ {
+ unsigned int k;
+- int changed = 0;
++ struct snd_interval list_range;
+
+ if (!count) {
+ i->empty = 1;
+ return -EINVAL;
+ }
++ snd_interval_any(&list_range);
++ list_range.min = UINT_MAX;
++ list_range.max = 0;
+ for (k = 0; k < count; k++) {
+ if (mask && !(mask & (1 << k)))
+ continue;
+- if (i->min == list[k] && !i->openmin)
+- goto _l1;
+- if (i->min < list[k]) {
+- i->min = list[k];
+- i->openmin = 0;
+- changed = 1;
+- goto _l1;
+- }
+- }
+- i->empty = 1;
+- return -EINVAL;
+- _l1:
+- for (k = count; k-- > 0;) {
+- if (mask && !(mask & (1 << k)))
++ if (!snd_interval_test(i, list[k]))
+ continue;
+- if (i->max == list[k] && !i->openmax)
+- goto _l2;
+- if (i->max > list[k]) {
+- i->max = list[k];
+- i->openmax = 0;
+- changed = 1;
+- goto _l2;
+- }
++ list_range.min = min(list_range.min, list[k]);
++ list_range.max = max(list_range.max, list[k]);
+ }
+- i->empty = 1;
+- return -EINVAL;
+- _l2:
+- if (snd_interval_checkempty(i)) {
+- i->empty = 1;
+- return -EINVAL;
+- }
+- return changed;
++ return snd_interval_refine(i, &list_range);
+ }
+
+ EXPORT_SYMBOL(snd_interval_list);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1df7692..c734840 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6186,9 +6186,9 @@ static struct hda_verb alc885_mbp_ch2_init[] = {
+ };
+
+ /*
+- * 6ch mode
++ * 4ch mode
+ */
+-static struct hda_verb alc885_mbp_ch6_init[] = {
++static struct hda_verb alc885_mbp_ch4_init[] = {
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+@@ -6197,9 +6197,9 @@ static struct hda_verb alc885_mbp_ch6_init[] = {
+ { } /* end */
+ };
+
+-static struct hda_channel_mode alc885_mbp_6ch_modes[2] = {
++static struct hda_channel_mode alc885_mbp_4ch_modes[2] = {
+ { 2, alc885_mbp_ch2_init },
+- { 6, alc885_mbp_ch6_init },
++ { 4, alc885_mbp_ch4_init },
+ };
+
+
+@@ -6232,10 +6232,11 @@ static struct snd_kcontrol_new alc882_base_mixer[] = {
+ };
+
+ static struct snd_kcontrol_new alc885_mbp3_mixer[] = {
+- HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
+- HDA_BIND_MUTE ("Front Playback Switch", 0x0c, 0x02, HDA_INPUT),
+- HDA_CODEC_MUTE ("Speaker Playback Switch", 0x14, 0x00, HDA_OUTPUT),
+- HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
++ HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
++ HDA_BIND_MUTE ("Speaker Playback Switch", 0x0c, 0x02, HDA_INPUT),
++ HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0e, 0x00, HDA_OUTPUT),
++ HDA_BIND_MUTE ("Headphone Playback Switch", 0x0e, 0x02, HDA_INPUT),
++ HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
+@@ -6481,14 +6482,18 @@ static struct hda_verb alc885_mbp3_init_verbs[] = {
+ {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+ {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
++ /* HP mixer */
++ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
++ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
++ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ /* Front Pin: output 0 (0x0c) */
+ {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
+- /* HP Pin: output 0 (0x0d) */
++ /* HP Pin: output 0 (0x0e) */
+ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc4},
+- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+- {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
++ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
++ {0x15, AC_VERB_SET_CONNECT_SEL, 0x02},
+ {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
+ /* Mic (rear) pin: input vref at 80% */
+ {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+@@ -6885,10 +6890,11 @@ static struct alc_config_preset alc882_presets[] = {
+ .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc885_mbp3_init_verbs,
+ alc880_gpio1_init_verbs },
+- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
++ .num_dacs = 2,
+ .dac_nids = alc882_dac_nids,
+- .channel_mode = alc885_mbp_6ch_modes,
+- .num_channel_mode = ARRAY_SIZE(alc885_mbp_6ch_modes),
++ .hp_nid = 0x04,
++ .channel_mode = alc885_mbp_4ch_modes,
++ .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes),
+ .input_mux = &alc882_capture_source,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
+index 864ac54..8f2018a 100644
+--- a/virt/kvm/irq_comm.c
++++ b/virt/kvm/irq_comm.c
+@@ -141,7 +141,8 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+ unsigned gsi = pin;
+
+ list_for_each_entry(e, &kvm->irq_routing, link)
+- if (e->irqchip.irqchip == irqchip &&
++ if (e->type == KVM_IRQ_ROUTING_IRQCHIP &&
++ e->irqchip.irqchip == irqchip &&
+ e->irqchip.pin == pin) {
+ gsi = e->gsi;
+ break;
+@@ -240,6 +241,7 @@ static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+ int delta;
+
+ e->gsi = ue->gsi;
++ e->type = ue->type;
+ switch (ue->type) {
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ delta = 0;
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 1489829..ad38135 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -881,6 +881,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
+ #endif
+ #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
++#else
++ kvm_arch_flush_shadow(kvm);
+ #endif
+ kvm_arch_destroy_vm(kvm);
+ mmdrop(mm);
+@@ -1055,8 +1057,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
+
+ kvm_free_physmem_slot(&old, npages ? &new : NULL);
+ /* Slot deletion case: we have to update the current slot */
++ spin_lock(&kvm->mmu_lock);
+ if (!npages)
+ *memslot = old;
++ spin_unlock(&kvm->mmu_lock);
+ #ifdef CONFIG_DMAR
+ /* map the pages in iommu page table */
+ r = kvm_iommu_map_pages(kvm, base_gfn, npages);
Modified: dists/sid/linux-2.6/debian/patches/series/7
==============================================================================
--- dists/sid/linux-2.6/debian/patches/series/7 Mon Sep 14 10:20:45 2009 (r14232)
+++ dists/sid/linux-2.6/debian/patches/series/7 Tue Sep 15 12:19:09 2009 (r14233)
@@ -6,3 +6,14 @@
+ bugfix/all/drivers-gpu-drm-r128-ioctl-add-init-test.patch
+ bugfix/x86/fix-alternatives-on-486.patch
+ bugfix/x86/fix-i8xx-agp-flush.patch
+- bugfix/x86/arch-kernel-cpu-amd-pci-access.diff
+- bugfix/all/make-sock_sendpage-use-kernel_sendpage.patch
+- bugfix/parisc/isa-eeprom-fix-loff_t-usage.patch
+- bugfix/all/flat-fix-uninitialized-ptr-with-shared-libs.patch
+- bugfix/all/do_sigaltstack-avoid-copying-stack_t-as-a-structure-to-userspace.patch
+- bugfix/all/do_sigaltstack-small-cleanups.patch
+- bugfix/all/posix-timers-fix-oops-in-clock-nanosleep-with-CLOCK_MONOTONIC_RAW.patch
++ bugfix/all/block-fix-sg-sg_dxfer_to_from_dev-regression.patch
++ bugfix/all/sched_rt-fix-overload-bug-on-rt-group-scheduling.patch
++ bugfix/all/stable/2.6.30.5.patch
++ bugfix/all/stable/2.6.30.6.patch
More information about the Kernel-svn-changes
mailing list