[kernel] r18167 - in dists/sid/linux-2.6/debian: . patches/bugfix/all/stable patches/series
Bastian Blank
waldi at alioth.debian.org
Tue Oct 18 08:42:34 UTC 2011
Author: waldi
Date: Tue Oct 18 08:42:31 2011
New Revision: 18167
Log:
Add stable release 3.0.7.
* debian/changelog: Update.
* debian/patches/bugfix/all/stable/3.0.7.patch: Add.
* debian/patches/series/6: Add new patch.
Added:
dists/sid/linux-2.6/debian/patches/bugfix/all/stable/3.0.7.patch
dists/sid/linux-2.6/debian/patches/series/6
Modified:
dists/sid/linux-2.6/debian/changelog
Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog Mon Oct 17 23:57:08 2011 (r18166)
+++ dists/sid/linux-2.6/debian/changelog Tue Oct 18 08:42:31 2011 (r18167)
@@ -1,7 +1,45 @@
linux-2.6 (3.0.0-6) UNRELEASED; urgency=low
+ [ Uwe Kleine-König ]
* [amd64] Update rt featureset to 3.0.6-rt17
+ [ Bastian Blank ]
+ * Add stable release 3.0.7:
+ - sparc64: Force the execute bit in OpenFirmware's translation entries.
+ - sched/rt: Migrate equal priority tasks to available CPUs
+ - sched: Fix up wchan borkage
+ - ide-disk: Fix request requeuing
+ - posix-cpu-timers: Cure SMP wobbles
+ - lis3: fix regression of HP DriveGuard with 8bit chip
+ - ASoC: use a valid device for dev_err() in Zylonite
+ - ASoC: Fix setting update bits for WM8753_LADC and WM8753_RADC
+ - drm/radeon: Update AVIVO cursor coordinate origin before x/yorigin
+ calculation.
+ - drm/radeon/kms: Fix logic error in DP HPD handler
+ - drm/radeon/kms: fix regression in DP aux defer handling
+ - drm/radeon/kms: add retry limits for native DP aux defer
+ - drm/radeon/kms: fix channel_remap setup (v2)
+ - ptp: fix L2 event message recognition
+ - rt2x00: Serialize TX operations on a queue.
+ - x86/PCI: use host bridge _CRS info on ASUS M2V-MX SE
+ - qla2xxx: Fix crash in qla2x00_abort_all_cmds() on unload
+ - libsas: fix panic when single phy is disabled on a wide port
+ - md: Avoid waking up a thread after it has been freed.
+ - dm table: avoid crash if integrity profile changes
+ - mmc: mxs-mmc: fix clock rate setting
+ - exec: do not call request_module() twice from search_binary_handler()
+ - ARM: mach-ux500: enable fix for ARM errata 754322
+ - drm/radeon/kms: retry aux transactions if there are status flags
+ - drm/radeon/kms: use hardcoded dig encoder to transmitter mapping for
+ DCE4.1
+ - ipv6: fix NULL dereference in udp6_ufo_fragment()
+ - ahci: Enable SB600 64bit DMA on Asus M3A
+ - MIPS: PM: Use struct syscore_ops instead of sysdevs for PM (v2)
+ - ftrace: Fix regression of :mod:module function enabling
+ - ftrace: Fix regression where ftrace breaks when modules are loaded
+ - ftrace: Fix warning when CONFIG_FUNCTION_TRACER is not defined
+ - e1000e: workaround for packet drop on 82579 at 100Mbps
+
-- Uwe Kleine-König <u.kleine-koenig at pengutronix.de> Fri, 07 Oct 2011 15:48:22 +0200
linux-2.6 (3.0.0-5) unstable; urgency=low
Added: dists/sid/linux-2.6/debian/patches/bugfix/all/stable/3.0.7.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/all/stable/3.0.7.patch Tue Oct 18 08:42:31 2011 (r18167)
@@ -0,0 +1,1403 @@
+diff --git a/Makefile b/Makefile
+index 7767a64..11c4249 100644
+diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
+index f8b9392..9a9706c 100644
+--- a/arch/arm/mach-ux500/Kconfig
++++ b/arch/arm/mach-ux500/Kconfig
+@@ -6,6 +6,7 @@ config UX500_SOC_COMMON
+ select ARM_GIC
+ select HAS_MTU
+ select ARM_ERRATA_753970
++ select ARM_ERRATA_754322
+
+ menu "Ux500 SoC"
+
+diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
+index 73031f7..4397972 100644
+--- a/arch/mips/jz4740/gpio.c
++++ b/arch/mips/jz4740/gpio.c
+@@ -18,7 +18,7 @@
+ #include <linux/init.h>
+
+ #include <linux/spinlock.h>
+-#include <linux/sysdev.h>
++#include <linux/syscore_ops.h>
+ #include <linux/io.h>
+ #include <linux/gpio.h>
+ #include <linux/delay.h>
+@@ -86,7 +86,6 @@ struct jz_gpio_chip {
+ spinlock_t lock;
+
+ struct gpio_chip gpio_chip;
+- struct sys_device sysdev;
+ };
+
+ static struct jz_gpio_chip jz4740_gpio_chips[];
+@@ -459,49 +458,47 @@ static struct jz_gpio_chip jz4740_gpio_chips[] = {
+ JZ4740_GPIO_CHIP(D),
+ };
+
+-static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev)
++static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip)
+ {
+- return container_of(dev, struct jz_gpio_chip, sysdev);
++ chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
++ writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
++ writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
+ }
+
+-static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state)
++static int jz4740_gpio_suspend(void)
+ {
+- struct jz_gpio_chip *chip = sysdev_to_chip(dev);
++ int i;
+
+- chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
+- writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
+- writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
++ for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++)
++ jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]);
+
+ return 0;
+ }
+
+-static int jz4740_gpio_resume(struct sys_device *dev)
++static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip)
+ {
+- struct jz_gpio_chip *chip = sysdev_to_chip(dev);
+ uint32_t mask = chip->suspend_mask;
+
+ writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR);
+ writel(mask, chip->base + JZ_REG_GPIO_MASK_SET);
++}
+
+- return 0;
++static void jz4740_gpio_resume(void)
++{
++ int i;
++
++ for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--)
++ jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]);
+ }
+
+-static struct sysdev_class jz4740_gpio_sysdev_class = {
+- .name = "gpio",
++static struct syscore_ops jz4740_gpio_syscore_ops = {
+ .suspend = jz4740_gpio_suspend,
+ .resume = jz4740_gpio_resume,
+ };
+
+-static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
++static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
+ {
+- int ret, irq;
+-
+- chip->sysdev.id = id;
+- chip->sysdev.cls = &jz4740_gpio_sysdev_class;
+- ret = sysdev_register(&chip->sysdev);
+-
+- if (ret)
+- return ret;
++ int irq;
+
+ spin_lock_init(&chip->lock);
+
+@@ -519,22 +516,17 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
+ irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
+ handle_level_irq);
+ }
+-
+- return 0;
+ }
+
+ static int __init jz4740_gpio_init(void)
+ {
+ unsigned int i;
+- int ret;
+-
+- ret = sysdev_class_register(&jz4740_gpio_sysdev_class);
+- if (ret)
+- return ret;
+
+ for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
+ jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
+
++ register_syscore_ops(&jz4740_gpio_syscore_ops);
++
+ printk(KERN_INFO "JZ4740 GPIO initialized\n");
+
+ return 0;
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 581531d..8e073d8 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -511,6 +511,11 @@ static void __init read_obp_translations(void)
+ for (i = 0; i < prom_trans_ents; i++)
+ prom_trans[i].data &= ~0x0003fe0000000000UL;
+ }
++
++ /* Force execute bit on. */
++ for (i = 0; i < prom_trans_ents; i++)
++ prom_trans[i].data |= (tlb_type == hypervisor ?
++ _PAGE_EXEC_4V : _PAGE_EXEC_4U);
+ }
+
+ static void __init hypervisor_tlb_lock(unsigned long vaddr,
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index 68c3c13..50b3f14 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
+ },
+ },
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
++ /* 2006 AMD HT/VIA system with two host bridges */
++ {
++ .callback = set_use_crs,
++ .ident = "ASUS M2V-MX SE",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index cab6960..1e9ab9b 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -812,6 +812,18 @@ static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
+ },
+ },
++ /*
++ * All BIOS versions for the Asus M3A support 64bit DMA.
++ * (all release versions from 0301 to 1206 were tested)
++ */
++ {
++ .ident = "ASUS M3A",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "M3A"),
++ },
++ },
+ { }
+ };
+ const struct dmi_system_id *match;
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 7ad43c6..79e8ebc 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
+ u8 msg[20];
+ int msg_bytes = send_bytes + 4;
+ u8 ack;
++ unsigned retry;
+
+ if (send_bytes > 16)
+ return -1;
+@@ -125,20 +126,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
+ msg[3] = (msg_bytes << 4) | (send_bytes - 1);
+ memcpy(&msg[4], send, send_bytes);
+
+- while (1) {
++ for (retry = 0; retry < 4; retry++) {
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
+ msg, msg_bytes, NULL, 0, delay, &ack);
+- if (ret < 0)
++ if (ret == -EBUSY)
++ continue;
++ else if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+- break;
++ return send_bytes;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(400);
+ else
+ return -EIO;
+ }
+
+- return send_bytes;
++ return -EIO;
+ }
+
+ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
+@@ -149,26 +152,31 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
+ int msg_bytes = 4;
+ u8 ack;
+ int ret;
++ unsigned retry;
+
+ msg[0] = address;
+ msg[1] = address >> 8;
+ msg[2] = AUX_NATIVE_READ << 4;
+ msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
+
+- while (1) {
++ for (retry = 0; retry < 4; retry++) {
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
+ msg, msg_bytes, recv, recv_bytes, delay, &ack);
+- if (ret == 0)
+- return -EPROTO;
+- if (ret < 0)
++ if (ret == -EBUSY)
++ continue;
++ else if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ return ret;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(400);
++ else if (ret == 0)
++ return -EPROTO;
+ else
+ return -EIO;
+ }
++
++ return -EIO;
+ }
+
+ static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
+@@ -232,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ for (retry = 0; retry < 4; retry++) {
+ ret = radeon_process_aux_ch(auxch,
+ msg, msg_bytes, reply, reply_bytes, 0, &ack);
+- if (ret < 0) {
++ if (ret == -EBUSY)
++ continue;
++ else if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index c975581..ea7a24e 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1593,48 +1593,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ return backend_map;
+ }
+
+-static void evergreen_program_channel_remap(struct radeon_device *rdev)
+-{
+- u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+-
+- tmp = RREG32(MC_SHARED_CHMAP);
+- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+- case 0:
+- case 1:
+- case 2:
+- case 3:
+- default:
+- /* default mapping */
+- mc_shared_chremap = 0x00fac688;
+- break;
+- }
+-
+- switch (rdev->family) {
+- case CHIP_HEMLOCK:
+- case CHIP_CYPRESS:
+- case CHIP_BARTS:
+- tcp_chan_steer_lo = 0x54763210;
+- tcp_chan_steer_hi = 0x0000ba98;
+- break;
+- case CHIP_JUNIPER:
+- case CHIP_REDWOOD:
+- case CHIP_CEDAR:
+- case CHIP_PALM:
+- case CHIP_SUMO:
+- case CHIP_SUMO2:
+- case CHIP_TURKS:
+- case CHIP_CAICOS:
+- default:
+- tcp_chan_steer_lo = 0x76543210;
+- tcp_chan_steer_hi = 0x0000ba98;
+- break;
+- }
+-
+- WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+- WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+- WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+-}
+-
+ static void evergreen_gpu_init(struct radeon_device *rdev)
+ {
+ u32 cc_rb_backend_disable = 0;
+@@ -2080,8 +2038,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+- evergreen_program_channel_remap(rdev);
+-
+ num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
+ grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 0b132a3..0c460c4 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ return backend_map;
+ }
+
+-static void cayman_program_channel_remap(struct radeon_device *rdev)
+-{
+- u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+-
+- tmp = RREG32(MC_SHARED_CHMAP);
+- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+- case 0:
+- case 1:
+- case 2:
+- case 3:
+- default:
+- /* default mapping */
+- mc_shared_chremap = 0x00fac688;
+- break;
+- }
+-
+- switch (rdev->family) {
+- case CHIP_CAYMAN:
+- default:
+- //tcp_chan_steer_lo = 0x54763210
+- tcp_chan_steer_lo = 0x76543210;
+- tcp_chan_steer_hi = 0x0000ba98;
+- break;
+- }
+-
+- WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+- WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+- WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+-}
+-
+ static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
+ u32 disable_mask_per_se,
+ u32 max_disable_mask_per_se,
+@@ -841,8 +811,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+- cayman_program_channel_remap(rdev);
+-
+ /* primary versions */
+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index d1b36f8..05b8b2c 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ int saved_dpms = connector->dpms;
+
+- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+- radeon_dp_needs_link_train(radeon_connector))
+- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+- else
++ /* Only turn off the display it it's physically disconnected */
++ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++ else if (radeon_dp_needs_link_train(radeon_connector))
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ connector->dpms = saved_dpms;
+ }
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index 3189a7e..f59a682 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -208,6 +208,13 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ int xorigin = 0, yorigin = 0;
+ int w = radeon_crtc->cursor_width;
+
++ if (ASIC_IS_AVIVO(rdev)) {
++ /* avivo cursor are offset into the total surface */
++ x += crtc->x;
++ y += crtc->y;
++ }
++ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
++
+ if (x < 0)
+ xorigin = -x + 1;
+ if (y < 0)
+@@ -221,11 +228,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ int i = 0;
+ struct drm_crtc *crtc_p;
+
+- /* avivo cursor are offset into the total surface */
+- x += crtc->x;
+- y += crtc->y;
+- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+-
+ /* avivo cursor image can't end on 128 pixel boundary or
+ * go past the end of the frame if both crtcs are enabled
+ */
+diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
+index 13690f3..8a171b2 100644
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -1755,9 +1755,12 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+ /* DCE4/5 */
+ if (ASIC_IS_DCE4(rdev)) {
+ dig = radeon_encoder->enc_priv;
+- if (ASIC_IS_DCE41(rdev))
+- return radeon_crtc->crtc_id;
+- else {
++ if (ASIC_IS_DCE41(rdev)) {
++ if (dig->linkb)
++ return 1;
++ else
++ return 0;
++ } else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 4de5189..f2516e6 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ return backend_map;
+ }
+
+-static void rv770_program_channel_remap(struct radeon_device *rdev)
+-{
+- u32 tcp_chan_steer, mc_shared_chremap, tmp;
+- bool force_no_swizzle;
+-
+- switch (rdev->family) {
+- case CHIP_RV770:
+- case CHIP_RV730:
+- force_no_swizzle = false;
+- break;
+- case CHIP_RV710:
+- case CHIP_RV740:
+- default:
+- force_no_swizzle = true;
+- break;
+- }
+-
+- tmp = RREG32(MC_SHARED_CHMAP);
+- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+- case 0:
+- case 1:
+- default:
+- /* default mapping */
+- mc_shared_chremap = 0x00fac688;
+- break;
+- case 2:
+- case 3:
+- if (force_no_swizzle)
+- mc_shared_chremap = 0x00fac688;
+- else
+- mc_shared_chremap = 0x00bbc298;
+- break;
+- }
+-
+- if (rdev->family == CHIP_RV740)
+- tcp_chan_steer = 0x00ef2a60;
+- else
+- tcp_chan_steer = 0x00fac688;
+-
+- /* RV770 CE has special chremap setup */
+- if (rdev->pdev->device == 0x944e) {
+- tcp_chan_steer = 0x00b08b08;
+- mc_shared_chremap = 0x00b08b08;
+- }
+-
+- WREG32(TCP_CHAN_STEER, tcp_chan_steer);
+- WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+-}
+-
+ static void rv770_gpu_init(struct radeon_device *rdev)
+ {
+ int i, j, num_qd_pipes;
+@@ -784,8 +735,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
+ WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+
+- rv770_program_channel_remap(rdev);
+-
+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
+index 2747980..16f69be 100644
+--- a/drivers/ide/ide-disk.c
++++ b/drivers/ide/ide-disk.c
+@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
+ if (!(rq->cmd_flags & REQ_FLUSH))
+ return BLKPREP_OK;
+
+- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
++ if (rq->special) {
++ cmd = rq->special;
++ memset(cmd, 0, sizeof(*cmd));
++ } else {
++ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
++ }
+
+ /* FIXME: map struct ide_taskfile on rq->cmd[] */
+ BUG_ON(cmd == NULL);
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 451c3bb..ebdae6e 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1184,14 +1184,15 @@ static void dm_table_set_integrity(struct dm_table *t)
+ return;
+
+ template_disk = dm_table_get_integrity_disk(t, true);
+- if (!template_disk &&
+- blk_integrity_is_initialized(dm_disk(t->md))) {
++ if (template_disk)
++ blk_integrity_register(dm_disk(t->md),
++ blk_get_integrity(template_disk));
++ else if (blk_integrity_is_initialized(dm_disk(t->md)))
+ DMWARN("%s: device no longer has a valid integrity profile",
+ dm_device_name(t->md));
+- return;
+- }
+- blk_integrity_register(dm_disk(t->md),
+- blk_get_integrity(template_disk));
++ else
++ DMWARN("%s: unable to establish an integrity profile",
++ dm_device_name(t->md));
+ }
+
+ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 8554082..bc83428 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -61,6 +61,11 @@
+ static void autostart_arrays(int part);
+ #endif
+
++/* pers_list is a list of registered personalities protected
++ * by pers_lock.
++ * pers_lock does extra service to protect accesses to
++ * mddev->thread when the mutex cannot be held.
++ */
+ static LIST_HEAD(pers_list);
+ static DEFINE_SPINLOCK(pers_lock);
+
+@@ -690,7 +695,12 @@ static void mddev_unlock(mddev_t * mddev)
+ } else
+ mutex_unlock(&mddev->reconfig_mutex);
+
++ /* was we've dropped the mutex we need a spinlock to
++ * make sur the thread doesn't disappear
++ */
++ spin_lock(&pers_lock);
+ md_wakeup_thread(mddev->thread);
++ spin_unlock(&pers_lock);
+ }
+
+ static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
+@@ -6186,11 +6196,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
+ return thread;
+ }
+
+-void md_unregister_thread(mdk_thread_t *thread)
++void md_unregister_thread(mdk_thread_t **threadp)
+ {
++ mdk_thread_t *thread = *threadp;
+ if (!thread)
+ return;
+ dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
++ /* Locking ensures that mddev_unlock does not wake_up a
++ * non-existent thread
++ */
++ spin_lock(&pers_lock);
++ *threadp = NULL;
++ spin_unlock(&pers_lock);
+
+ kthread_stop(thread->tsk);
+ kfree(thread);
+@@ -7125,8 +7142,7 @@ static void reap_sync_thread(mddev_t *mddev)
+ mdk_rdev_t *rdev;
+
+ /* resync has finished, collect result */
+- md_unregister_thread(mddev->sync_thread);
+- mddev->sync_thread = NULL;
++ md_unregister_thread(&mddev->sync_thread);
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /* success...*/
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 1c26c7a..ce4e328 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -475,7 +475,7 @@ extern int register_md_personality(struct mdk_personality *p);
+ extern int unregister_md_personality(struct mdk_personality *p);
+ extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
+ mddev_t *mddev, const char *name);
+-extern void md_unregister_thread(mdk_thread_t *thread);
++extern void md_unregister_thread(mdk_thread_t **threadp);
+ extern void md_wakeup_thread(mdk_thread_t *thread);
+ extern void md_check_recovery(mddev_t *mddev);
+ extern void md_write_start(mddev_t *mddev, struct bio *bi);
+diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
+index 3535c23..d5b5fb3 100644
+--- a/drivers/md/multipath.c
++++ b/drivers/md/multipath.c
+@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)
+ {
+ multipath_conf_t *conf = mddev->private;
+
+- md_unregister_thread(mddev->thread);
+- mddev->thread = NULL;
++ md_unregister_thread(&mddev->thread);
+ blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ mempool_destroy(conf->pool);
+ kfree(conf->multipaths);
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index f7431b6..3a9e59f 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2045,8 +2045,7 @@ static int stop(mddev_t *mddev)
+ raise_barrier(conf);
+ lower_barrier(conf);
+
+- md_unregister_thread(mddev->thread);
+- mddev->thread = NULL;
++ md_unregister_thread(&mddev->thread);
+ if (conf->r1bio_pool)
+ mempool_destroy(conf->r1bio_pool);
+ kfree(conf->mirrors);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 6e84668..17cb6ab 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2331,7 +2331,7 @@ static int run(mddev_t *mddev)
+ return 0;
+
+ out_free_conf:
+- md_unregister_thread(mddev->thread);
++ md_unregister_thread(&mddev->thread);
+ if (conf->r10bio_pool)
+ mempool_destroy(conf->r10bio_pool);
+ safe_put_page(conf->tmppage);
+@@ -2349,8 +2349,7 @@ static int stop(mddev_t *mddev)
+ raise_barrier(conf, 0);
+ lower_barrier(conf);
+
+- md_unregister_thread(mddev->thread);
+- mddev->thread = NULL;
++ md_unregister_thread(&mddev->thread);
+ blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ if (conf->r10bio_pool)
+ mempool_destroy(conf->r10bio_pool);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index b72edf3..2581ba1 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5162,8 +5162,7 @@ static int run(mddev_t *mddev)
+
+ return 0;
+ abort:
+- md_unregister_thread(mddev->thread);
+- mddev->thread = NULL;
++ md_unregister_thread(&mddev->thread);
+ if (conf) {
+ print_raid5_conf(conf);
+ free_conf(conf);
+@@ -5177,8 +5176,7 @@ static int stop(mddev_t *mddev)
+ {
+ raid5_conf_t *conf = mddev->private;
+
+- md_unregister_thread(mddev->thread);
+- mddev->thread = NULL;
++ md_unregister_thread(&mddev->thread);
+ if (mddev->queue)
+ mddev->queue->backing_dev_info.congested_fn = NULL;
+ free_conf(conf);
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
+index b928bc1..8b51cd6 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d.c
+@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
+ * both have been read. So the value read will always be correct.
+ * Set BOOT bit to refresh factory tuning values.
+ */
+- lis3->read(lis3, CTRL_REG2, ®);
+- if (lis3->whoami == WAI_12B)
+- reg |= CTRL2_BDU | CTRL2_BOOT;
+- else
+- reg |= CTRL2_BOOT_8B;
+- lis3->write(lis3, CTRL_REG2, reg);
++ if (lis3->pdata) {
++ lis3->read(lis3, CTRL_REG2, ®);
++ if (lis3->whoami == WAI_12B)
++ reg |= CTRL2_BDU | CTRL2_BOOT;
++ else
++ reg |= CTRL2_BOOT_8B;
++ lis3->write(lis3, CTRL_REG2, reg);
++ }
+
+ /* LIS3 power on delay is quite long */
+ msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index 99d39a6..d513d47 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -564,40 +564,38 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+
+ static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
+ {
+- unsigned int ssp_rate, bit_rate;
+- u32 div1, div2;
++ unsigned int ssp_clk, ssp_sck;
++ u32 clock_divide, clock_rate;
+ u32 val;
+
+- ssp_rate = clk_get_rate(host->clk);
++ ssp_clk = clk_get_rate(host->clk);
+
+- for (div1 = 2; div1 < 254; div1 += 2) {
+- div2 = ssp_rate / rate / div1;
+- if (div2 < 0x100)
++ for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) {
++ clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide);
++ clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0;
++ if (clock_rate <= 255)
+ break;
+ }
+
+- if (div1 >= 254) {
++ if (clock_divide > 254) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: cannot set clock to %d\n", __func__, rate);
+ return;
+ }
+
+- if (div2 == 0)
+- bit_rate = ssp_rate / div1;
+- else
+- bit_rate = ssp_rate / div1 / div2;
++ ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
+
+ val = readl(host->base + HW_SSP_TIMING);
+ val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
+- val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE);
+- val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE);
++ val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
++ val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
+ writel(val, host->base + HW_SSP_TIMING);
+
+- host->clk_rate = bit_rate;
++ host->clk_rate = ssp_sck;
+
+ dev_dbg(mmc_dev(host->mmc),
+- "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n",
+- __func__, div1, div2, ssp_rate, bit_rate, rate);
++ "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n",
++ __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate);
+ }
+
+ static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
+index 3369d1f..ee77b94 100644
+--- a/drivers/net/e1000e/ich8lan.c
++++ b/drivers/net/e1000e/ich8lan.c
+@@ -137,8 +137,9 @@
+ #define HV_PM_CTRL PHY_REG(770, 17)
+
+ /* PHY Low Power Idle Control */
+-#define I82579_LPI_CTRL PHY_REG(772, 20)
+-#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
++#define I82579_LPI_CTRL PHY_REG(772, 20)
++#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
++#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
+
+ /* EMI Registers */
+ #define I82579_EMI_ADDR 0x10
+@@ -1611,6 +1612,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+ s32 ret_val = 0;
+ u16 status_reg = 0;
+ u32 mac_reg;
++ u16 phy_reg;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+@@ -1625,12 +1627,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+ mac_reg = er32(FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+- if (status_reg & HV_M_STATUS_SPEED_1000)
++ ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
++ if (ret_val)
++ goto out;
++
++ if (status_reg & HV_M_STATUS_SPEED_1000) {
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+- else
++ phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
++ } else {
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+-
++ phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
++ }
+ ew32(FEXTNVM4, mac_reg);
++ ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+ }
+
+ out:
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index ab8c16f..2886d25 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -556,15 +556,21 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+ bool local)
+ {
+ struct ieee80211_tx_info *tx_info;
+- struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
++ struct queue_entry *entry;
+ struct txentry_desc txdesc;
+ struct skb_frame_desc *skbdesc;
+ u8 rate_idx, rate_flags;
++ int ret = 0;
++
++ spin_lock(&queue->tx_lock);
++
++ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+
+ if (unlikely(rt2x00queue_full(queue))) {
+ ERROR(queue->rt2x00dev,
+ "Dropping frame due to full tx queue %d.\n", queue->qid);
+- return -ENOBUFS;
++ ret = -ENOBUFS;
++ goto out;
+ }
+
+ if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
+@@ -573,7 +579,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+ "Arrived at non-free entry in the non-full queue %d.\n"
+ "Please file bug report to %s.\n",
+ queue->qid, DRV_PROJECT);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+
+ /*
+@@ -635,7 +642,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+ if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
+ clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
+ entry->skb = NULL;
+- return -EIO;
++ ret = -EIO;
++ goto out;
+ }
+
+ set_bit(ENTRY_DATA_PENDING, &entry->flags);
+@@ -644,7 +652,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+ rt2x00queue_write_tx_descriptor(entry, &txdesc);
+ rt2x00queue_kick_tx_queue(queue, &txdesc);
+
+- return 0;
++out:
++ spin_unlock(&queue->tx_lock);
++ return ret;
+ }
+
+ int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+@@ -1185,6 +1195,7 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue, enum data_queue_qid qid)
+ {
+ mutex_init(&queue->status_lock);
++ spin_lock_init(&queue->tx_lock);
+ spin_lock_init(&queue->index_lock);
+
+ queue->rt2x00dev = rt2x00dev;
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
+index 167d458..ad3d527 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
+@@ -432,6 +432,7 @@ enum data_queue_flags {
+ * @flags: Entry flags, see &enum queue_entry_flags.
+ * @status_lock: The mutex for protecting the start/stop/flush
+ * handling on this queue.
++ * @tx_lock: Spinlock to serialize tx operations on this queue.
+ * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
+ * @index_crypt needs to be changed this lock should be grabbed to prevent
+ * index corruption due to concurrency.
+@@ -458,6 +459,7 @@ struct data_queue {
+ unsigned long flags;
+
+ struct mutex status_lock;
++ spinlock_t tx_lock;
+ spinlock_t index_lock;
+
+ unsigned int count;
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index c9e3dc0..16ad97d 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
+ sas_disable_routing(parent, phy->attached_sas_addr);
+ }
+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+- sas_port_delete_phy(phy->port, phy->phy);
+- if (phy->port->num_phys == 0)
+- sas_port_delete(phy->port);
+- phy->port = NULL;
++ if (phy->port) {
++ sas_port_delete_phy(phy->port, phy->phy);
++ if (phy->port->num_phys == 0)
++ sas_port_delete(phy->port);
++ phy->port = NULL;
++ }
+ }
+
+ static int sas_discover_bfs_by_root_level(struct domain_device *root,
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index f461925..a2a1a83 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1240,10 +1240,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+ qla2x00_sp_compl(ha, sp);
+ } else {
+ ctx = sp->ctx;
+- if (ctx->type == SRB_LOGIN_CMD ||
+- ctx->type == SRB_LOGOUT_CMD) {
+- ctx->u.iocb_cmd->free(sp);
+- } else {
++ if (ctx->type == SRB_ELS_CMD_RPT ||
++ ctx->type == SRB_ELS_CMD_HST ||
++ ctx->type == SRB_CT_CMD) {
+ struct fc_bsg_job *bsg_job =
+ ctx->u.bsg_job;
+ if (bsg_job->request->msgcode
+@@ -1255,6 +1254,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+ kfree(sp->ctx);
+ mempool_free(sp,
+ ha->srb_mempool);
++ } else {
++ ctx->u.iocb_cmd->free(sp);
+ }
+ }
+ }
+diff --git a/fs/exec.c b/fs/exec.c
+index 6075a1e..044c13f 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1411,6 +1411,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+ printable(bprm->buf[2]) &&
+ printable(bprm->buf[3]))
+ break; /* -ENOEXEC */
++ if (try)
++ break; /* -ENOEXEC */
+ request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
+ #endif
+ }
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 9d88e1c..f0c0e8a 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -19,6 +19,8 @@
+
+ #include <asm/ftrace.h>
+
++struct ftrace_hash;
++
+ #ifdef CONFIG_FUNCTION_TRACER
+
+ extern int ftrace_enabled;
+@@ -29,8 +31,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+
+ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
+
+-struct ftrace_hash;
+-
+ enum {
+ FTRACE_OPS_FL_ENABLED = 1 << 0,
+ FTRACE_OPS_FL_GLOBAL = 1 << 1,
+@@ -123,7 +123,8 @@ stack_trace_sysctl(struct ctl_table *table, int write,
+ struct ftrace_func_command {
+ struct list_head list;
+ char *name;
+- int (*func)(char *func, char *cmd,
++ int (*func)(struct ftrace_hash *hash,
++ char *func, char *cmd,
+ char *params, int enable);
+ };
+
+diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
+index e07e274..1dc420b 100644
+--- a/include/linux/ptp_classify.h
++++ b/include/linux/ptp_classify.h
+@@ -51,6 +51,7 @@
+ #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
+
+ #define PTP_EV_PORT 319
++#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
+
+ #define OFF_ETYPE 12
+ #define OFF_IHL 14
+@@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len)
+ {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
+ {OP_RETA, 0, 0, 0 }, /* */ \
+ /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
+-/*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \
++/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
+ {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
+- {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \
++ {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
++ {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
++ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
++ {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
+ {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
+ {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
+ {OP_RETA, 0, 0, 0 }, /* */ \
+-/*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \
++/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
++ {OP_LDB, 0, 0, ETH_HLEN }, /* */ \
++ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
++ {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
+ {OP_LDH, 0, 0, ETH_HLEN }, /* */ \
+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
+ {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 14a6c7b..4ef452b 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1937,7 +1937,6 @@ static inline void disable_sched_clock_irqtime(void) {}
+
+ extern unsigned long long
+ task_sched_runtime(struct task_struct *task);
+-extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
+
+ /* sched_exec is called by processes performing an exec */
+ #ifdef CONFIG_SMP
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 3b5ac1f..c39121f 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -463,7 +463,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
+ return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+ }
+
+-extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
++extern void ipv6_select_ident(struct frag_hdr *fhdr, struct in6_addr *addr);
+
+ /*
+ * Prototypes exported by ipv6
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index 58f405b..c8008dd 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
+ do {
+ times->utime = cputime_add(times->utime, t->utime);
+ times->stime = cputime_add(times->stime, t->stime);
+- times->sum_exec_runtime += t->se.sum_exec_runtime;
++ times->sum_exec_runtime += task_sched_runtime(t);
+ } while_each_thread(tsk, t);
+ out:
+ rcu_read_unlock();
+@@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
+ cpu->cpu = cputime.utime;
+ break;
+ case CPUCLOCK_SCHED:
+- cpu->sched = thread_group_sched_runtime(p);
++ thread_group_cputime(p, &cputime);
++ cpu->sched = cputime.sum_exec_runtime;
+ break;
+ }
+ return 0;
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 8b37360..063d7a4 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3713,30 +3713,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
+ }
+
+ /*
+- * Return sum_exec_runtime for the thread group.
+- * In case the task is currently running, return the sum plus current's
+- * pending runtime that have not been accounted yet.
+- *
+- * Note that the thread group might have other running tasks as well,
+- * so the return value not includes other pending runtime that other
+- * running tasks might have.
+- */
+-unsigned long long thread_group_sched_runtime(struct task_struct *p)
+-{
+- struct task_cputime totals;
+- unsigned long flags;
+- struct rq *rq;
+- u64 ns;
+-
+- rq = task_rq_lock(p, &flags);
+- thread_group_cputime(p, &totals);
+- ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
+- task_rq_unlock(rq, p, &flags);
+-
+- return ns;
+-}
+-
+-/*
+ * Account user cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in user space since the last update
+@@ -4335,7 +4311,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
+ blk_schedule_flush_plug(tsk);
+ }
+
+-asmlinkage void schedule(void)
++asmlinkage void __sched schedule(void)
+ {
+ struct task_struct *tsk = current;
+
+diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
+index 10d0182..17f2319 100644
+--- a/kernel/sched_rt.c
++++ b/kernel/sched_rt.c
+@@ -1038,7 +1038,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
+ */
+ if (curr && unlikely(rt_task(curr)) &&
+ (curr->rt.nr_cpus_allowed < 2 ||
+- curr->prio < p->prio) &&
++ curr->prio <= p->prio) &&
+ (p->rt.nr_cpus_allowed > 1)) {
+ int target = find_lowest_rq(p);
+
+@@ -1569,7 +1569,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
+ p->rt.nr_cpus_allowed > 1 &&
+ rt_task(rq->curr) &&
+ (rq->curr->rt.nr_cpus_allowed < 2 ||
+- rq->curr->prio < p->prio))
++ rq->curr->prio <= p->prio))
+ push_rt_tasks(rq);
+ }
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 908038f..ef9271b 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1744,10 +1744,36 @@ static cycle_t ftrace_update_time;
+ static unsigned long ftrace_update_cnt;
+ unsigned long ftrace_update_tot_cnt;
+
++static int ops_traces_mod(struct ftrace_ops *ops)
++{
++ struct ftrace_hash *hash;
++
++ hash = ops->filter_hash;
++ return !!(!hash || !hash->count);
++}
++
+ static int ftrace_update_code(struct module *mod)
+ {
+ struct dyn_ftrace *p;
+ cycle_t start, stop;
++ unsigned long ref = 0;
++
++ /*
++ * When adding a module, we need to check if tracers are
++ * currently enabled and if they are set to trace all functions.
++ * If they are, we need to enable the module functions as well
++ * as update the reference counts for those function records.
++ */
++ if (mod) {
++ struct ftrace_ops *ops;
++
++ for (ops = ftrace_ops_list;
++ ops != &ftrace_list_end; ops = ops->next) {
++ if (ops->flags & FTRACE_OPS_FL_ENABLED &&
++ ops_traces_mod(ops))
++ ref++;
++ }
++ }
+
+ start = ftrace_now(raw_smp_processor_id());
+ ftrace_update_cnt = 0;
+@@ -1760,7 +1786,7 @@ static int ftrace_update_code(struct module *mod)
+
+ p = ftrace_new_addrs;
+ ftrace_new_addrs = p->newlist;
+- p->flags = 0L;
++ p->flags = ref;
+
+ /*
+ * Do the initial record conversion from mcount jump
+@@ -1783,7 +1809,7 @@ static int ftrace_update_code(struct module *mod)
+ * conversion puts the module to the correct state, thus
+ * passing the ftrace_make_call check.
+ */
+- if (ftrace_start_up) {
++ if (ftrace_start_up && ref) {
+ int failed = __ftrace_replace_code(p, 1);
+ if (failed) {
+ ftrace_bug(failed, p->ip);
+@@ -2407,10 +2433,9 @@ ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
+ */
+
+ static int
+-ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
++ftrace_mod_callback(struct ftrace_hash *hash,
++ char *func, char *cmd, char *param, int enable)
+ {
+- struct ftrace_ops *ops = &global_ops;
+- struct ftrace_hash *hash;
+ char *mod;
+ int ret = -EINVAL;
+
+@@ -2430,11 +2455,6 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
+ if (!strlen(mod))
+ return ret;
+
+- if (enable)
+- hash = ops->filter_hash;
+- else
+- hash = ops->notrace_hash;
+-
+ ret = ftrace_match_module_records(hash, func, mod);
+ if (!ret)
+ ret = -EINVAL;
+@@ -2760,7 +2780,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
+ mutex_lock(&ftrace_cmd_mutex);
+ list_for_each_entry(p, &ftrace_commands, list) {
+ if (strcmp(p->name, command) == 0) {
+- ret = p->func(func, command, next, enable);
++ ret = p->func(hash, func, command, next, enable);
+ goto out_unlock;
+ }
+ }
+diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
+index 8d0e1cc..c7b0c6a 100644
+--- a/kernel/trace/trace_functions.c
++++ b/kernel/trace/trace_functions.c
+@@ -324,7 +324,8 @@ ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
+ }
+
+ static int
+-ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
++ftrace_trace_onoff_callback(struct ftrace_hash *hash,
++ char *glob, char *cmd, char *param, int enable)
+ {
+ struct ftrace_probe_ops *ops;
+ void *count = (void *)-1;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 1661296..e17596b 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -620,9 +620,9 @@ static u32 __ipv6_select_ident(const struct in6_addr *addr)
+ return hash + newid;
+ }
+
+-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
++void ipv6_select_ident(struct frag_hdr *fhdr, struct in6_addr *addr)
+ {
+- fhdr->identification = htonl(__ipv6_select_ident(&rt->rt6i_dst.addr));
++ fhdr->identification = htonl(__ipv6_select_ident(addr));
+ }
+
+ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+@@ -709,7 +709,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+ skb_reset_network_header(skb);
+ memcpy(skb_network_header(skb), tmp_hdr, hlen);
+
+- ipv6_select_ident(fh, rt);
++ ipv6_select_ident(fh, &rt->rt6i_dst.addr);
+ fh->nexthdr = nexthdr;
+ fh->reserved = 0;
+ fh->frag_off = htons(IP6_MF);
+@@ -855,7 +855,7 @@ slow_path:
+ fh->nexthdr = nexthdr;
+ fh->reserved = 0;
+ if (!frag_id) {
+- ipv6_select_ident(fh, rt);
++ ipv6_select_ident(fh, &rt->rt6i_dst.addr);
+ frag_id = fh->identification;
+ } else
+ fh->identification = frag_id;
+@@ -1146,7 +1146,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
+ sizeof(struct frag_hdr)) & ~7;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+- ipv6_select_ident(&fhdr, rt);
++ ipv6_select_ident(&fhdr, &rt->rt6i_dst.addr);
+ skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+ __skb_queue_tail(&sk->sk_write_queue, skb);
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 29213b5..0d920c5 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1309,6 +1309,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
+ u8 frag_hdr_sz = sizeof(struct frag_hdr);
+ int offset;
+ __wsum csum;
++ struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+@@ -1359,7 +1360,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
+ fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ fptr->nexthdr = nexthdr;
+ fptr->reserved = 0;
+- ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
++ ipv6_select_ident(fptr,
++ rt ? &rt->rt6i_dst.addr : &ipv6_hdr(skb)->daddr);
+
+ /* Fragment the skb. ipv6 header and the remaining fields of the
+ * fragment header are updated in ipv6_gso_segment()
+diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
+index ffa2ffe..aa091a0 100644
+--- a/sound/soc/codecs/wm8753.c
++++ b/sound/soc/codecs/wm8753.c
+@@ -1454,8 +1454,8 @@ static int wm8753_probe(struct snd_soc_codec *codec)
+ /* set the update bits */
+ snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
+ snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
+- snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
+- snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
++ snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100);
++ snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100);
+ snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100);
+ snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100);
+ snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100);
+diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
+index b644575..2b8350b 100644
+--- a/sound/soc/pxa/zylonite.c
++++ b/sound/soc/pxa/zylonite.c
+@@ -196,20 +196,20 @@ static int zylonite_probe(struct snd_soc_card *card)
+ if (clk_pout) {
+ pout = clk_get(NULL, "CLK_POUT");
+ if (IS_ERR(pout)) {
+- dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n",
++ dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n",
+ PTR_ERR(pout));
+ return PTR_ERR(pout);
+ }
+
+ ret = clk_enable(pout);
+ if (ret != 0) {
+- dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n",
++ dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
+ ret);
+ clk_put(pout);
+ return ret;
+ }
+
+- dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n",
++ dev_dbg(card->dev, "MCLK enabled at %luHz\n",
+ clk_get_rate(pout));
+ }
+
+@@ -241,7 +241,7 @@ static int zylonite_resume_pre(struct snd_soc_card *card)
+ if (clk_pout) {
+ ret = clk_enable(pout);
+ if (ret != 0)
+- dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n",
++ dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
+ ret);
+ }
+
Added: dists/sid/linux-2.6/debian/patches/series/6
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/series/6 Tue Oct 18 08:42:31 2011 (r18167)
@@ -0,0 +1 @@
++ bugfix/all/stable/3.0.7.patch
More information about the Kernel-svn-changes
mailing list