[kernel] r19304 - in dists/sid/linux/debian: . patches patches/bugfix/all patches/features/all patches/features/all/cpu-devices patches/features/all/rt
Ben Hutchings
benh at alioth.debian.org
Fri Aug 3 01:37:44 UTC 2012
Author: benh
Date: Fri Aug 3 01:37:42 2012
New Revision: 19304
Log:
Update to 3.2.25
Drop one bug fix that was included.
Refresh various other patches with more or less serious conflicts.
Deleted:
dists/sid/linux/debian/patches/bugfix/all/udf-Improve-table-length-check-to-avoid-possible-underflow.patch
Modified:
dists/sid/linux/debian/changelog
dists/sid/linux/debian/patches/features/all/cpu-devices/cpu-convert-cpu-and-machinecheck-sysdev_class-to-a-r.patch
dists/sid/linux/debian/patches/features/all/rt/0227-workqueue-Fix-cpuhotplug-trainwreck.patch
dists/sid/linux/debian/patches/features/all/rt2x00-add-rt5372-chipset-support.patch
dists/sid/linux/debian/patches/series
Modified: dists/sid/linux/debian/changelog
==============================================================================
--- dists/sid/linux/debian/changelog Thu Aug 2 13:06:18 2012 (r19303)
+++ dists/sid/linux/debian/changelog Fri Aug 3 01:37:42 2012 (r19304)
@@ -1,4 +1,4 @@
-linux (3.2.24-1) UNRELEASED; urgency=low
+linux (3.2.25-1) UNRELEASED; urgency=low
* New upstream stable update:
http://www.kernel.org/pub/linux/kernel/v3.x/ChangeLog-3.2.24
@@ -27,6 +27,18 @@
- bnx2x: fix panic when TX ring is full
- eCryptfs: Gracefully refuse miscdev file ops on inherited/passed files
- ACPI / PM: Make acpi_pm_device_sleep_state() follow the specification
+ http://www.kernel.org/pub/linux/kernel/v3.x/ChangeLog-3.2.25
+ - mm: Fix various performance problems, particularly affecting use of
+ transparent hugepages (Closes: #675493)
+ - target: Add range checking to UNMAP emulation
+ - target: Fix reading of data length fields for UNMAP commands
+ - target: Fix possible integer underflow in UNMAP emulation
+ - target: Check number of unmap descriptors against our limit
+ - ext4: don't let i_reserved_meta_blocks go negative
+ - ext4: undo ext4_calc_metadata_amount if we fail to claim space
+ - locks: fix checking of fcntl_setlease argument
+ - drm/radeon: fix bo creation retry path
+ - Btrfs: call the ordered free operation without any locks held
[ Ben Hutchings ]
* linux-image: Include package version in utsname version string
Modified: dists/sid/linux/debian/patches/features/all/cpu-devices/cpu-convert-cpu-and-machinecheck-sysdev_class-to-a-r.patch
==============================================================================
--- dists/sid/linux/debian/patches/features/all/cpu-devices/cpu-convert-cpu-and-machinecheck-sysdev_class-to-a-r.patch Thu Aug 2 13:06:18 2012 (r19303)
+++ dists/sid/linux/debian/patches/features/all/cpu-devices/cpu-convert-cpu-and-machinecheck-sysdev_class-to-a-r.patch Fri Aug 3 01:37:42 2012 (r19304)
@@ -98,7 +98,7 @@
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/module.h>
-@@ -26,16 +26,16 @@
+@@ -26,16 +26,16 @@ static DEFINE_PER_CPU(struct cpu, cpu_de
* XXX: If/when a SMP-capable implementation of AVR32 will ever be
* made, we must make sure that the code executes on the correct CPU.
*/
@@ -119,7 +119,7 @@
size_t count)
{
unsigned long val;
-@@ -48,16 +48,16 @@
+@@ -48,16 +48,16 @@ static ssize_t store_pc0event(struct sys
sysreg_write(PCCR, val);
return count;
}
@@ -140,7 +140,7 @@
const char *buf, size_t count)
{
unsigned long val;
-@@ -71,16 +71,16 @@
+@@ -71,16 +71,16 @@ static ssize_t store_pc0count(struct sys
return count;
}
@@ -161,7 +161,7 @@
size_t count)
{
unsigned long val;
-@@ -93,16 +93,16 @@
+@@ -93,16 +93,16 @@ static ssize_t store_pc1event(struct sys
sysreg_write(PCCR, val);
return count;
}
@@ -182,7 +182,7 @@
size_t count)
{
unsigned long val;
-@@ -116,16 +116,16 @@
+@@ -116,16 +116,16 @@ static ssize_t store_pc1count(struct sys
return count;
}
@@ -203,7 +203,7 @@
size_t count)
{
unsigned long val;
-@@ -139,16 +139,16 @@
+@@ -139,16 +139,16 @@ static ssize_t store_pccycles(struct sys
return count;
}
@@ -224,7 +224,7 @@
size_t count)
{
unsigned long pccr, val;
-@@ -167,12 +167,12 @@
+@@ -167,12 +167,12 @@ static ssize_t store_pcenable(struct sys
return count;
}
@@ -243,7 +243,7 @@
#endif /* CONFIG_PERFORMANCE_COUNTERS */
-@@ -186,12 +186,12 @@
+@@ -186,12 +186,12 @@ static int __init topology_init(void)
register_cpu(c, cpu);
#ifdef CONFIG_PERFORMANCE_COUNTERS
@@ -286,7 +286,7 @@
static u64 call_start[NR_CPUS];
static u64 phys_addr[NR_CPUS];
-@@ -55,7 +55,7 @@
+@@ -55,7 +55,7 @@ static u64 resources[NR_CPUS];
#define show(name) \
static ssize_t \
@@ -295,7 +295,7 @@
char *buf) \
{ \
u32 cpu=dev->id; \
-@@ -64,7 +64,7 @@
+@@ -64,7 +64,7 @@ show_##name(struct sys_device *dev, stru
#define store(name) \
static ssize_t \
@@ -304,7 +304,7 @@
const char *buf, size_t size) \
{ \
unsigned int cpu=dev->id; \
-@@ -78,7 +78,7 @@
+@@ -78,7 +78,7 @@ show(call_start)
* processor. The cpu number in driver is only used for storing data.
*/
static ssize_t
@@ -313,7 +313,7 @@
const char *buf, size_t size)
{
unsigned int cpu=dev->id;
-@@ -127,7 +127,7 @@
+@@ -127,7 +127,7 @@ show(err_type_info)
store(err_type_info)
static ssize_t
@@ -322,7 +322,7 @@
char *buf)
{
unsigned int cpu=dev->id;
-@@ -135,7 +135,7 @@
+@@ -135,7 +135,7 @@ show_virtual_to_phys(struct sys_device *
}
static ssize_t
@@ -331,7 +331,7 @@
const char *buf, size_t size)
{
unsigned int cpu=dev->id;
-@@ -159,8 +159,8 @@
+@@ -159,8 +159,8 @@ show(err_struct_info)
store(err_struct_info)
static ssize_t
@@ -342,7 +342,7 @@
{
unsigned int cpu=dev->id;
-@@ -171,8 +171,8 @@
+@@ -171,8 +171,8 @@ show_err_data_buffer(struct sys_device *
}
static ssize_t
@@ -353,7 +353,7 @@
const char *buf, size_t size)
{
unsigned int cpu=dev->id;
-@@ -209,14 +209,14 @@
+@@ -209,14 +209,14 @@ define_one_ro(capabilities);
define_one_ro(resources);
static struct attribute *default_attrs[] = {
@@ -376,7 +376,7 @@
NULL
};
-@@ -225,12 +225,12 @@
+@@ -225,12 +225,12 @@ static struct attribute_group err_inject
.name = "err_inject"
};
/* Add/Remove err_inject interface for CPU device */
@@ -391,7 +391,7 @@
{
sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
return 0;
-@@ -239,9 +239,9 @@
+@@ -239,9 +239,9 @@ static int __cpuinit err_inject_cpu_call
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -403,7 +403,7 @@
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
-@@ -283,13 +283,13 @@
+@@ -283,13 +283,13 @@ static void __exit
err_inject_exit(void)
{
int i;
@@ -421,7 +421,7 @@
unregister_hotcpu_notifier(&err_inject_cpu_notifier);
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
-@@ -350,7 +350,7 @@
+@@ -350,7 +350,7 @@ static int __cpuinit cpu_cache_sysfs_ini
}
/* Add cache interface for CPU device */
@@ -430,7 +430,7 @@
{
unsigned int cpu = sys_dev->id;
unsigned long i, j;
-@@ -400,7 +400,7 @@
+@@ -400,7 +400,7 @@ static int __cpuinit cache_add_dev(struc
}
/* Remove cache interface for CPU device */
@@ -439,7 +439,7 @@
{
unsigned int cpu = sys_dev->id;
unsigned long i;
-@@ -428,9 +428,9 @@
+@@ -428,9 +428,9 @@ static int __cpuinit cache_cpu_callback(
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -451,7 +451,7 @@
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
-@@ -454,7 +454,7 @@
+@@ -454,7 +454,7 @@ static int __init cache_sysfs_init(void)
int i;
for_each_online_cpu(i) {
@@ -471,7 +471,7 @@
#include <linux/mutex.h>
#define LS_SIZE (256 * 1024)
-@@ -166,7 +166,7 @@
+@@ -166,7 +166,7 @@ struct spu {
/* beat only */
u64 shadow_int_mask_RW[3];
@@ -480,7 +480,7 @@
int has_mem_affinity;
struct list_head aff_list;
-@@ -270,11 +270,11 @@
+@@ -270,11 +270,11 @@ struct spufs_calls {
int register_spu_syscalls(struct spufs_calls *calls);
void unregister_spu_syscalls(struct spufs_calls *calls);
@@ -507,7 +507,7 @@
struct device_node;
#ifdef CONFIG_NUMA
-@@ -86,19 +86,19 @@
+@@ -86,19 +86,19 @@ extern int __node_distance(int, int);
extern void __init dump_numa_cpu_topology(void);
@@ -533,7 +533,7 @@
}
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
-@@ -451,15 +451,15 @@
+@@ -451,15 +451,15 @@ out:
static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
{
struct cache_dir *cache_dir;
@@ -573,7 +573,7 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/percpu.h>
-@@ -37,12 +37,12 @@
+@@ -37,12 +37,12 @@ static DEFINE_PER_CPU(struct cpu, cpu_de
/* Time in microseconds we delay before sleeping in the idle loop */
DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
@@ -589,7 +589,7 @@
ssize_t ret;
long snooze;
-@@ -50,21 +50,21 @@
+@@ -50,21 +50,21 @@ static ssize_t store_smt_snooze_delay(st
if (ret != 1)
return -EINVAL;
@@ -617,7 +617,7 @@
store_smt_snooze_delay);
static int __init setup_smt_snooze_delay(char *str)
-@@ -117,25 +117,25 @@
+@@ -117,25 +117,25 @@ static void write_##NAME(void *val) \
ppc_enable_pmcs(); \
mtspr(ADDRESS, *(unsigned long *)val); \
} \
@@ -650,7 +650,7 @@
return count; \
}
-@@ -178,22 +178,22 @@
+@@ -178,22 +178,22 @@ SYSFS_PMCSETUP(purr, SPRN_PURR);
SYSFS_PMCSETUP(spurr, SPRN_SPURR);
SYSFS_PMCSETUP(dscr, SPRN_DSCR);
@@ -681,7 +681,7 @@
size_t count)
{
unsigned long val;
-@@ -207,15 +207,14 @@
+@@ -207,15 +207,14 @@ static ssize_t __used store_dscr_default
return count;
}
@@ -699,7 +699,7 @@
}
#endif /* CONFIG_PPC64 */
-@@ -259,72 +258,72 @@
+@@ -259,72 +258,72 @@ SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3);
#endif /* HAS_PPC_PMC_PA6T */
#ifdef HAS_PPC_PMC_IBM
@@ -825,7 +825,7 @@
#endif /* CONFIG_DEBUG_KERNEL */
};
#endif /* HAS_PPC_PMC_PA6T */
-@@ -333,14 +332,14 @@
+@@ -333,14 +332,14 @@ static struct sysdev_attribute pa6t_attr
static void __cpuinit register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -843,7 +843,7 @@
#endif
/* PMC stuff */
-@@ -348,14 +347,14 @@
+@@ -348,14 +347,14 @@ static void __cpuinit register_cpu_onlin
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
@@ -860,7 +860,7 @@
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
-@@ -363,7 +362,7 @@
+@@ -363,7 +362,7 @@ static void __cpuinit register_cpu_onlin
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
@@ -869,7 +869,7 @@
pmc_attrs = NULL;
break;
#endif /* HAS_PPC_PMC_PA6T */
-@@ -374,24 +373,24 @@
+@@ -374,24 +373,24 @@ static void __cpuinit register_cpu_onlin
}
for (i = 0; i < nattrs; i++)
@@ -900,7 +900,7 @@
#endif /* CONFIG_PPC64 */
cacheinfo_cpu_online(cpu);
-@@ -401,8 +400,8 @@
+@@ -401,8 +400,8 @@ static void __cpuinit register_cpu_onlin
static void unregister_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -911,7 +911,7 @@
int i, nattrs;
BUG_ON(!c->hotpluggable);
-@@ -410,7 +409,7 @@
+@@ -410,7 +409,7 @@ static void unregister_cpu_online(unsign
#ifdef CONFIG_PPC64
if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
cpu_has_feature(CPU_FTR_SMT))
@@ -920,7 +920,7 @@
#endif
/* PMC stuff */
-@@ -418,14 +417,14 @@
+@@ -418,14 +417,14 @@ static void unregister_cpu_online(unsign
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
@@ -937,7 +937,7 @@
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
-@@ -433,7 +432,7 @@
+@@ -433,7 +432,7 @@ static void unregister_cpu_online(unsign
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
@@ -946,7 +946,7 @@
pmc_attrs = NULL;
break;
#endif /* HAS_PPC_PMC_PA6T */
-@@ -444,24 +443,24 @@
+@@ -444,24 +443,24 @@ static void unregister_cpu_online(unsign
}
for (i = 0; i < nattrs; i++)
@@ -977,7 +977,7 @@
#endif /* CONFIG_PPC64 */
cacheinfo_cpu_offline(cpu);
-@@ -513,70 +512,70 @@
+@@ -513,70 +512,70 @@ static struct notifier_block __cpuinitda
static DEFINE_MUTEX(cpu_mutex);
@@ -1064,7 +1064,7 @@
/* NUMA stuff */
-@@ -590,7 +589,7 @@
+@@ -590,7 +589,7 @@ static void register_nodes(void)
register_one_node(i);
}
@@ -1073,7 +1073,7 @@
{
struct node *node = &node_devices[nid];
return sysfs_create_link(&node->sysdev.kobj, &dev->kobj,
-@@ -598,7 +597,7 @@
+@@ -598,7 +597,7 @@ int sysfs_add_device_to_node(struct sys_
}
EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
@@ -1082,7 +1082,7 @@
{
struct node *node = &node_devices[nid];
sysfs_remove_link(&node->sysdev.kobj, kobject_name(&dev->kobj));
-@@ -614,14 +613,14 @@
+@@ -614,14 +613,14 @@ static void register_nodes(void)
#endif
/* Only valid if CPU is present. */
@@ -1102,7 +1102,7 @@
static int __init topology_init(void)
{
-@@ -646,7 +645,7 @@
+@@ -646,7 +645,7 @@ static int __init topology_init(void)
if (cpu_online(cpu) || c->hotpluggable) {
register_cpu(c, cpu);
@@ -1113,7 +1113,7 @@
if (cpu_online(cpu))
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
-@@ -1462,7 +1462,7 @@
+@@ -1462,7 +1462,7 @@ int arch_update_cpu_topology(void)
{
int cpu, nid, old_nid;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
@@ -1122,7 +1122,7 @@
for_each_cpu(cpu,&cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity);
-@@ -1483,9 +1483,9 @@
+@@ -1483,9 +1483,9 @@ int arch_update_cpu_topology(void)
register_cpu_under_node(cpu, nid);
put_online_cpus();
@@ -1157,7 +1157,7 @@
.attr = { .name = __stringify(_name), .mode = _mode }, \
.show = _prefix ## _show_ ## _name, \
.store = _prefix ## _store_ ## _name, \
-@@ -76,36 +76,36 @@
+@@ -76,36 +76,36 @@ static inline u8 temp_to_reg(u8 temp)
return ((temp - TEMP_MIN) >> 1) & 0x3f;
}
@@ -1201,7 +1201,7 @@
return sprintf(buf, "%d\n", reg_to_temp(value));
}
-@@ -147,48 +147,48 @@
+@@ -147,48 +147,48 @@ static ssize_t store_throttle(struct cbe
return size;
}
@@ -1270,7 +1270,7 @@
value = in_be64(&pmd_regs->ts_ctsr2);
value = (value >> pos) & 0x3f;
-@@ -199,64 +199,64 @@
+@@ -199,64 +199,64 @@ static ssize_t ppe_show_temp(struct sys_
/* shows the temperature of the DTS on the PPE,
* located near the linear thermal sensor */
@@ -1363,7 +1363,7 @@
static struct attribute *spu_attributes[] = {
-@@ -272,19 +272,19 @@
+@@ -272,19 +272,19 @@ static struct attribute_group spu_attrib
.attrs = spu_attributes,
};
@@ -1388,7 +1388,7 @@
static struct attribute *ppe_attributes[] = {
&attr_ppe_temperature0.attr,
-@@ -307,7 +307,7 @@
+@@ -307,7 +307,7 @@ static int __init init_default_values(vo
{
int cpu;
struct cbe_pmd_regs __iomem *pmd_regs;
@@ -1397,7 +1397,7 @@
union ppe_spe_reg tpr;
union spe_reg str1;
u64 str2;
-@@ -349,14 +349,14 @@
+@@ -349,14 +349,14 @@ static int __init init_default_values(vo
for_each_possible_cpu (cpu) {
pr_debug("processing cpu %d\n", cpu);
@@ -1416,7 +1416,7 @@
if (!pmd_regs) {
pr_info("invalid CBE regs pointer for cbe_thermal\n");
-@@ -379,8 +379,8 @@
+@@ -379,8 +379,8 @@ static int __init thermal_init(void)
int rc = init_default_values();
if (rc == 0) {
@@ -1427,7 +1427,7 @@
}
return rc;
-@@ -389,8 +389,8 @@
+@@ -389,8 +389,8 @@ module_init(thermal_init);
static void __exit thermal_exit(void)
{
@@ -1440,7 +1440,7 @@
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
-@@ -519,31 +519,32 @@
+@@ -519,31 +519,32 @@ void spu_init_channels(struct spu *spu)
}
EXPORT_SYMBOL_GPL(spu_init_channels);
@@ -1479,7 +1479,7 @@
/* we're in trouble here, but try unwinding anyway */
if (rc) {
-@@ -552,7 +553,7 @@
+@@ -552,7 +553,7 @@ int spu_add_sysdev_attr_group(struct att
list_for_each_entry_continue_reverse(spu,
&spu_full_list, full_list)
@@ -1488,7 +1488,7 @@
break;
}
}
-@@ -561,45 +562,45 @@
+@@ -561,45 +562,45 @@ int spu_add_sysdev_attr_group(struct att
return rc;
}
@@ -1546,7 +1546,7 @@
return 0;
}
-@@ -635,7 +636,7 @@
+@@ -635,7 +636,7 @@ static int __init create_spu(void *data)
if (ret)
goto out_destroy;
@@ -1555,7 +1555,7 @@
if (ret)
goto out_free_irqs;
-@@ -692,10 +693,10 @@
+@@ -692,10 +693,10 @@ static unsigned long long spu_acct_time(
}
@@ -1569,7 +1569,7 @@
return sprintf(buf, "%s %llu %llu %llu %llu "
"%llu %llu %llu %llu %llu %llu %llu %llu\n",
-@@ -714,7 +715,7 @@
+@@ -714,7 +715,7 @@ static ssize_t spu_stat_show(struct sys_
spu->stats.libassist);
}
@@ -1578,7 +1578,7 @@
#ifdef CONFIG_KEXEC
-@@ -813,8 +814,8 @@
+@@ -813,8 +814,8 @@ static int __init init_spu_base(void)
if (!spu_management_ops)
goto out;
@@ -1589,7 +1589,7 @@
if (ret)
goto out;
-@@ -823,7 +824,7 @@
+@@ -823,7 +824,7 @@ static int __init init_spu_base(void)
if (ret < 0) {
printk(KERN_WARNING "%s: Error initializing spus\n",
__func__);
@@ -1598,7 +1598,7 @@
}
if (ret > 0)
-@@ -833,15 +834,15 @@
+@@ -833,15 +834,15 @@ static int __init init_spu_base(void)
xmon_register_spus(&spu_full_list);
crash_register_spus(&spu_full_list);
mutex_unlock(&spu_full_list_mutex);
@@ -1628,7 +1628,7 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <asm/cputhreads.h>
-@@ -184,7 +184,7 @@
+@@ -184,7 +184,7 @@ static ssize_t get_best_energy_list(char
return s-page;
}
@@ -1637,7 +1637,7 @@
char *page, int activate)
{
int rc;
-@@ -207,26 +207,26 @@
+@@ -207,26 +207,26 @@ static ssize_t get_best_energy_data(stru
/* Wrapper functions */
@@ -1672,7 +1672,7 @@
{
return get_best_energy_data(dev, page, 0);
}
-@@ -241,48 +241,48 @@
+@@ -241,48 +241,48 @@ static ssize_t percpu_deactivate_hint_sh
* Per-cpu value of the hint
*/
@@ -1739,7 +1739,7 @@
if (err)
break;
}
-@@ -298,23 +298,20 @@
+@@ -298,23 +298,20 @@ static int __init pseries_energy_init(vo
static void __exit pseries_energy_cleanup(void)
{
int cpu;
@@ -1771,7 +1771,7 @@
}
--- a/arch/powerpc/sysdev/ppc4xx_cpm.c
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
-@@ -179,12 +179,12 @@
+@@ -179,12 +179,12 @@ static struct kobj_attribute cpm_idle_at
static void cpm_idle_config_sysfs(void)
{
@@ -1789,7 +1789,7 @@
printk(KERN_WARNING
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
-@@ -831,8 +831,8 @@
+@@ -831,8 +831,8 @@ int setup_profiling_timer(unsigned int m
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -1800,7 +1800,7 @@
{
ssize_t count;
-@@ -842,8 +842,8 @@
+@@ -842,8 +842,8 @@ static ssize_t cpu_configure_show(struct
return count;
}
@@ -1811,7 +1811,7 @@
const char *buf, size_t count)
{
int cpu = dev->id;
-@@ -889,11 +889,11 @@
+@@ -889,11 +889,11 @@ out:
put_online_cpus();
return rc ? rc : count;
}
@@ -1826,7 +1826,7 @@
{
int cpu = dev->id;
ssize_t count;
-@@ -919,22 +919,22 @@
+@@ -919,22 +919,22 @@ static ssize_t cpu_polarization_show(str
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
@@ -1856,7 +1856,7 @@
NULL,
};
-@@ -942,8 +942,8 @@
+@@ -942,8 +942,8 @@ static struct attribute_group cpu_common
.attrs = cpu_common_attrs,
};
@@ -1867,7 +1867,7 @@
{
unsigned int capability;
int rc;
-@@ -953,10 +953,10 @@
+@@ -953,10 +953,10 @@ static ssize_t show_capability(struct sy
return rc;
return sprintf(buf, "%u\n", capability);
}
@@ -1881,7 +1881,7 @@
{
struct s390_idle_data *idle;
unsigned long long idle_count;
-@@ -976,10 +976,10 @@
+@@ -976,10 +976,10 @@ repeat:
goto repeat;
return sprintf(buf, "%llu\n", idle_count);
}
@@ -1895,7 +1895,7 @@
{
struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter;
-@@ -1001,12 +1001,12 @@
+@@ -1001,12 +1001,12 @@ repeat:
goto repeat;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
@@ -1912,16 +1912,16 @@
NULL,
};
-@@ -1019,7 +1019,7 @@
+@@ -1019,7 +1019,7 @@ static int __cpuinit smp_cpu_notify(stru
{
unsigned int cpu = (unsigned int)(long)hcpu;
struct cpu *c = &per_cpu(cpu_devices, cpu);
- struct sys_device *s = &c->sysdev;
+ struct device *s = &c->dev;
- struct s390_idle_data *idle;
int err = 0;
-@@ -1045,7 +1045,7 @@
+ switch (action) {
+@@ -1042,7 +1042,7 @@ static struct notifier_block __cpuinitda
static int __devinit smp_add_present_cpu(int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -1930,7 +1930,7 @@
int rc;
c->hotpluggable = 1;
-@@ -1098,8 +1098,8 @@
+@@ -1095,8 +1095,8 @@ out:
return rc;
}
@@ -1941,7 +1941,7 @@
const char *buf,
size_t count)
{
-@@ -1108,11 +1108,11 @@
+@@ -1105,11 +1105,11 @@ static ssize_t __ref rescan_store(struct
rc = smp_rescan_cpus();
return rc ? rc : count;
}
@@ -1956,7 +1956,7 @@
char *buf)
{
ssize_t count;
-@@ -1123,8 +1123,8 @@
+@@ -1120,8 +1120,8 @@ static ssize_t dispatching_show(struct s
return count;
}
@@ -1967,7 +1967,7 @@
const char *buf,
size_t count)
{
-@@ -1148,7 +1148,7 @@
+@@ -1145,7 +1145,7 @@ out:
put_online_cpus();
return rc ? rc : count;
}
@@ -1976,7 +1976,7 @@
dispatching_store);
static int __init topology_init(void)
-@@ -1159,11 +1159,11 @@
+@@ -1156,11 +1156,11 @@ static int __init topology_init(void)
register_cpu_notifier(&smp_cpu_nb);
#ifdef CONFIG_HOTPLUG_CPU
@@ -1992,7 +1992,7 @@
for_each_present_cpu(cpu) {
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
-@@ -261,7 +261,7 @@
+@@ -261,7 +261,7 @@ void store_topology(struct sysinfo_15_1_
int arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
@@ -2001,7 +2001,7 @@
int cpu;
if (!MACHINE_HAS_TOPOLOGY) {
-@@ -273,8 +273,8 @@
+@@ -273,8 +273,8 @@ int arch_update_cpu_topology(void)
tl_to_cores(info);
update_cpu_core_map();
for_each_online_cpu(cpu) {
@@ -2023,7 +2023,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-@@ -337,9 +337,9 @@
+@@ -337,9 +337,9 @@ static struct kobj_type ktype_percpu_ent
.default_attrs = sq_sysfs_attrs,
};
@@ -2035,7 +2035,7 @@
struct kobject *kobj;
int error;
-@@ -348,25 +348,27 @@
+@@ -348,25 +348,27 @@ static int __devinit sq_sysdev_add(struc
return -ENOMEM;
kobj = sq_kobject[cpu];
@@ -2069,7 +2069,7 @@
};
static int __init sq_api_init(void)
-@@ -386,7 +388,7 @@
+@@ -386,7 +388,7 @@ static int __init sq_api_init(void)
if (unlikely(!sq_bitmap))
goto out;
@@ -2078,7 +2078,7 @@
if (unlikely(ret != 0))
goto out;
-@@ -401,7 +403,7 @@
+@@ -401,7 +403,7 @@ out:
static void __exit sq_api_exit(void)
{
@@ -2115,7 +2115,7 @@
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
-@@ -58,38 +58,38 @@
+@@ -58,38 +58,38 @@ SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
static struct attribute *mmu_stat_attrs[] = {
@@ -2186,7 +2186,7 @@
NULL,
};
-@@ -139,15 +139,15 @@
+@@ -139,15 +139,15 @@ static unsigned long write_mmustat_enabl
return sun4v_mmustat_conf(ra, &orig_ra);
}
@@ -2206,7 +2206,7 @@
size_t count)
{
unsigned long val, err;
-@@ -163,39 +163,39 @@
+@@ -163,39 +163,39 @@ static ssize_t store_mmustat_enable(stru
return count;
}
@@ -2255,7 +2255,7 @@
{ \
cpuinfo_sparc *c = &cpu_data(dev->id); \
return sprintf(buf, "%u\n", c->MEMBER); \
-@@ -209,14 +209,14 @@
+@@ -209,14 +209,14 @@ SHOW_CPUDATA_UINT_NAME(l1_icache_line_si
SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
@@ -2278,7 +2278,7 @@
};
static DEFINE_PER_CPU(struct cpu, cpu_devices);
-@@ -224,11 +224,11 @@
+@@ -224,11 +224,11 @@ static DEFINE_PER_CPU(struct cpu, cpu_de
static void register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -2292,7 +2292,7 @@
register_mmu_stats(s);
}
-@@ -237,12 +237,12 @@
+@@ -237,12 +237,12 @@ static void register_cpu_online(unsigned
static void unregister_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -2318,7 +2318,7 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/smp.h>
-@@ -32,55 +32,55 @@
+@@ -32,55 +32,55 @@ static ssize_t get_hv_confstr(char *page
return n;
}
@@ -2392,7 +2392,7 @@
HV_CONF_ATTR(version, HV_CONFSTR_HV_SW_VER)
HV_CONF_ATTR(config_version, HV_CONFSTR_HV_CONFIG_VER)
-@@ -96,15 +96,15 @@
+@@ -96,15 +96,15 @@ HV_CONF_ATTR(mezz_description, HV_CONFST
HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
static struct attribute *board_attrs[] = {
@@ -2417,7 +2417,7 @@
NULL
};
-@@ -151,12 +151,11 @@
+@@ -151,12 +151,11 @@ hvconfig_bin_read(struct file *filp, str
static int __init create_sysfs_entries(void)
{
@@ -2431,7 +2431,7 @@
create_cpu_attr(chip_width);
create_cpu_attr(chip_height);
create_cpu_attr(chip_serial);
-@@ -164,7 +163,7 @@
+@@ -164,7 +163,7 @@ static int __init create_sysfs_entries(v
#define create_hv_attr(name) \
if (!err) \
@@ -2442,7 +2442,7 @@
create_hv_attr(config_version);
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
-@@ -149,7 +149,7 @@
+@@ -149,7 +149,7 @@ static inline void enable_p5_mce(void) {
void mce_setup(struct mce *m);
void mce_log(struct mce *m);
@@ -2453,7 +2453,7 @@
* Maximum banks number.
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
-@@ -872,8 +872,7 @@
+@@ -872,8 +872,7 @@ static int __cpuinit detect_cache_attrib
#include <linux/kobject.h>
#include <linux/sysfs.h>
@@ -2463,7 +2463,7 @@
/* pointer to kobject for cpuX/cache */
static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
-@@ -1101,9 +1100,9 @@
+@@ -1101,9 +1100,9 @@ err_out:
static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
/* Add/Remove cache interface for CPU device */
@@ -2475,7 +2475,7 @@
unsigned long i, j;
struct _index_kobject *this_object;
struct _cpuid4_info *this_leaf;
-@@ -1115,7 +1114,7 @@
+@@ -1115,7 +1114,7 @@ static int __cpuinit cache_add_dev(struc
retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
&ktype_percpu_entry,
@@ -2484,7 +2484,7 @@
if (retval < 0) {
cpuid4_cache_sysfs_exit(cpu);
return retval;
-@@ -1152,9 +1151,9 @@
+@@ -1152,9 +1151,9 @@ static int __cpuinit cache_add_dev(struc
return 0;
}
@@ -2496,7 +2496,7 @@
unsigned long i;
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
-@@ -1173,17 +1172,17 @@
+@@ -1173,17 +1172,17 @@ static int __cpuinit cacheinfo_cpu_callb
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -2518,7 +2518,7 @@
break;
}
return NOTIFY_OK;
-@@ -1202,9 +1201,9 @@
+@@ -1202,9 +1201,9 @@ static int __cpuinit cache_sysfs_init(vo
for_each_online_cpu(i) {
int err;
@@ -2538,7 +2538,7 @@
#include <asm/mce.h>
enum severity_level {
-@@ -17,7 +17,7 @@
+@@ -17,7 +17,7 @@ enum severity_level {
struct mce_bank {
u64 ctl; /* subevents to enable */
unsigned char init; /* initialise bank? */
@@ -2558,7 +2558,7 @@
#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/ctype.h>
-@@ -1778,7 +1778,7 @@
+@@ -1778,7 +1778,7 @@ static struct syscore_ops mce_syscore_op
};
/*
@@ -2567,7 +2567,7 @@
*/
static void mce_cpu_restart(void *data)
-@@ -1814,27 +1814,28 @@
+@@ -1814,27 +1814,28 @@ static void mce_enable_ce(void *all)
__mcheck_cpu_init_timer();
}
@@ -2601,7 +2601,7 @@
const char *buf, size_t size)
{
u64 new;
-@@ -1849,14 +1850,14 @@
+@@ -1849,14 +1850,14 @@ static ssize_t set_bank(struct sys_devic
}
static ssize_t
@@ -2618,7 +2618,7 @@
const char *buf, size_t siz)
{
char *p;
-@@ -1871,8 +1872,8 @@
+@@ -1871,8 +1872,8 @@ static ssize_t set_trigger(struct sys_de
return strlen(mce_helper) + !!p;
}
@@ -2629,7 +2629,7 @@
const char *buf, size_t size)
{
u64 new;
-@@ -1895,8 +1896,8 @@
+@@ -1895,8 +1896,8 @@ static ssize_t set_ignore_ce(struct sys_
return size;
}
@@ -2640,7 +2640,7 @@
const char *buf, size_t size)
{
u64 new;
-@@ -1918,108 +1919,107 @@
+@@ -1918,108 +1919,107 @@ static ssize_t set_cmci_disabled(struct
return size;
}
@@ -2794,7 +2794,7 @@
}
/* Make sure there are no machine checks on offlined CPUs. */
-@@ -2069,7 +2069,7 @@
+@@ -2069,7 +2069,7 @@ mce_cpu_callback(struct notifier_block *
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
@@ -2803,7 +2803,7 @@
if (threshold_cpu_callback)
threshold_cpu_callback(action, cpu);
break;
-@@ -2077,7 +2077,7 @@
+@@ -2077,7 +2077,7 @@ mce_cpu_callback(struct notifier_block *
case CPU_DEAD_FROZEN:
if (threshold_cpu_callback)
threshold_cpu_callback(action, cpu);
@@ -2812,7 +2812,7 @@
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
-@@ -2111,7 +2111,7 @@
+@@ -2111,7 +2111,7 @@ static __init void mce_init_banks(void)
for (i = 0; i < banks; i++) {
struct mce_bank *b = &mce_banks[i];
@@ -2821,7 +2821,7 @@
sysfs_attr_init(&a->attr);
a->attr.name = b->attrname;
-@@ -2131,16 +2131,16 @@
+@@ -2131,16 +2131,16 @@ static __init int mcheck_init_device(voi
if (!mce_available(&boot_cpu_data))
return -EIO;
@@ -2851,7 +2851,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sysfs.h>
-@@ -548,7 +547,7 @@
+@@ -581,7 +580,7 @@ static __cpuinit int threshold_create_ba
if (!b)
goto out;
@@ -2860,7 +2860,7 @@
b->kobj, name);
if (err)
goto out;
-@@ -571,7 +570,7 @@
+@@ -604,7 +603,7 @@ static __cpuinit int threshold_create_ba
goto out;
}
@@ -2869,7 +2869,7 @@
if (!b->kobj)
goto out_free;
-@@ -591,7 +590,7 @@
+@@ -624,7 +623,7 @@ static __cpuinit int threshold_create_ba
if (i == cpu)
continue;
@@ -2878,7 +2878,7 @@
b->kobj, name);
if (err)
goto out;
-@@ -669,7 +668,7 @@
+@@ -702,7 +701,7 @@ static void threshold_remove_bank(unsign
#ifdef CONFIG_SMP
/* sibling symlink */
if (shared_bank[bank] && b->blocks->cpu != cpu) {
@@ -2887,7 +2887,7 @@
per_cpu(threshold_banks, cpu)[bank] = NULL;
return;
-@@ -681,7 +680,7 @@
+@@ -714,7 +713,7 @@ static void threshold_remove_bank(unsign
if (i == cpu)
continue;
@@ -2906,7 +2906,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/smp.h>
-@@ -69,16 +68,16 @@
+@@ -69,16 +68,16 @@ static atomic_t therm_throt_en = ATOMIC_
static u32 lvtthmr_init __read_mostly;
#ifdef CONFIG_SYSFS
@@ -2930,7 +2930,7 @@
char *buf) \
{ \
unsigned int cpu = dev->id; \
-@@ -95,20 +94,20 @@
+@@ -95,20 +94,20 @@ static ssize_t therm_throt_sysdev_show_#
return ret; \
}
@@ -2960,7 +2960,7 @@
NULL
};
-@@ -223,36 +222,36 @@
+@@ -223,36 +222,36 @@ static int thresh_event_valid(int event)
#ifdef CONFIG_SYSFS
/* Add/Remove thermal_throttle interface for CPU device: */
@@ -3007,7 +3007,7 @@
}
/* Mutex protecting device creation against CPU hotplug: */
-@@ -265,16 +264,16 @@
+@@ -265,16 +264,16 @@ thermal_throttle_cpu_callback(struct not
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -3027,7 +3027,7 @@
mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
-@@ -283,7 +282,7 @@
+@@ -283,7 +282,7 @@ thermal_throttle_cpu_callback(struct not
case CPU_DEAD:
case CPU_DEAD_FROZEN:
mutex_lock(&therm_cpu_lock);
@@ -3036,7 +3036,7 @@
mutex_unlock(&therm_cpu_lock);
break;
}
-@@ -310,7 +309,7 @@
+@@ -310,7 +309,7 @@ static __init int thermal_throttle_init_
#endif
/* connect live CPUs to sysfs */
for_each_online_cpu(cpu) {
@@ -3047,7 +3047,7 @@
#ifdef CONFIG_HOTPLUG_CPU
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
-@@ -292,8 +292,8 @@
+@@ -292,8 +292,8 @@ static int reload_for_cpu(int cpu)
return err;
}
@@ -3058,7 +3058,7 @@
const char *buf, size_t size)
{
unsigned long val;
-@@ -318,30 +318,30 @@
+@@ -329,30 +329,30 @@ static ssize_t reload_store(struct sys_d
return ret;
}
@@ -3099,7 +3099,7 @@
NULL
};
-@@ -405,16 +405,16 @@
+@@ -416,16 +416,16 @@ static enum ucode_state microcode_update
return ustate;
}
@@ -3119,7 +3119,7 @@
if (err)
return err;
-@@ -424,22 +424,24 @@
+@@ -435,22 +435,24 @@ static int mc_sysdev_add(struct sys_devi
return err;
}
@@ -3150,7 +3150,7 @@
};
/**
-@@ -462,9 +464,9 @@
+@@ -473,9 +475,9 @@ static __cpuinit int
mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -3162,7 +3162,7 @@
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
-@@ -472,13 +474,13 @@
+@@ -483,13 +485,13 @@ mc_cpu_callback(struct notifier_block *n
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
pr_debug("CPU%d added\n", cpu);
@@ -3178,7 +3178,7 @@
pr_debug("CPU%d removed\n", cpu);
break;
-@@ -523,7 +525,7 @@
+@@ -534,7 +536,7 @@ static int __init microcode_init(void)
get_online_cpus();
mutex_lock(µcode_mutex);
@@ -3187,7 +3187,7 @@
mutex_unlock(µcode_mutex);
put_online_cpus();
-@@ -533,7 +535,7 @@
+@@ -544,7 +546,7 @@ static int __init microcode_init(void)
error = microcode_dev_init();
if (error)
@@ -3196,7 +3196,7 @@
register_syscore_ops(&mc_syscore_ops);
register_hotcpu_notifier(&mc_cpu_notifier);
-@@ -543,11 +545,11 @@
+@@ -554,11 +556,11 @@ static int __init microcode_init(void)
return 0;
@@ -3210,7 +3210,7 @@
mutex_unlock(µcode_mutex);
put_online_cpus();
-@@ -569,7 +571,7 @@
+@@ -580,7 +582,7 @@ static void __exit microcode_exit(void)
get_online_cpus();
mutex_lock(µcode_mutex);
@@ -3221,7 +3221,7 @@
put_online_cpus();
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
-@@ -446,7 +446,7 @@
+@@ -446,7 +446,7 @@ static int __cpuinit acpi_processor_add(
{
struct acpi_processor *pr = NULL;
int result = 0;
@@ -3230,7 +3230,7 @@
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
-@@ -491,8 +491,8 @@
+@@ -491,8 +491,8 @@ static int __cpuinit acpi_processor_add(
per_cpu(processors, pr->id) = pr;
@@ -3318,7 +3318,7 @@
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
break;
-@@ -60,44 +59,44 @@
+@@ -60,44 +59,44 @@ static ssize_t __ref store_online(struct
ret = count;
return ret;
}
@@ -3374,7 +3374,7 @@
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#else /* ... !CONFIG_HOTPLUG_CPU */
-@@ -109,15 +108,15 @@
+@@ -109,15 +108,15 @@ static inline void register_cpu_control(
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
@@ -3393,7 +3393,7 @@
/*
* Might be reading other cpu's data based on which cpu read thread
-@@ -129,7 +128,7 @@
+@@ -129,7 +128,7 @@ static ssize_t show_crash_notes(struct s
rc = sprintf(buf, "%Lx\n", addr);
return rc;
}
@@ -3402,7 +3402,7 @@
#endif
/*
-@@ -137,12 +136,12 @@
+@@ -137,12 +136,12 @@ static SYSDEV_ATTR(crash_notes, 0400, sh
*/
struct cpu_attr {
@@ -3418,7 +3418,7 @@
char *buf)
{
struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
-@@ -153,10 +152,10 @@
+@@ -153,10 +152,10 @@ static ssize_t show_cpus_attr(struct sys
return n;
}
@@ -3432,7 +3432,7 @@
static struct cpu_attr cpu_attrs[] = {
_CPU_ATTR(online, &cpu_online_mask),
_CPU_ATTR(possible, &cpu_possible_mask),
-@@ -166,19 +165,19 @@
+@@ -166,19 +165,19 @@ static struct cpu_attr cpu_attrs[] = {
/*
* Print values for NR_CPUS and offlined cpus
*/
@@ -3457,7 +3457,7 @@
{
int n = 0, len = PAGE_SIZE-2;
cpumask_var_t offline;
-@@ -205,7 +204,7 @@
+@@ -205,7 +204,7 @@ static ssize_t print_cpus_offline(struct
n += snprintf(&buf[n], len - n, "\n");
return n;
}
@@ -3466,7 +3466,7 @@
/*
* register_cpu - Setup a sysfs device for a CPU.
-@@ -218,57 +217,66 @@
+@@ -218,57 +217,66 @@ static SYSDEV_CLASS_ATTR(offline, 0444,
int __cpuinit register_cpu(struct cpu *cpu, int num)
{
int error;
@@ -3561,7 +3561,7 @@
-};
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
-@@ -317,12 +317,12 @@
+@@ -317,12 +317,12 @@ struct node node_devices[MAX_NUMNODES];
int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
int ret;
@@ -3576,7 +3576,7 @@
if (!obj)
return 0;
-@@ -339,12 +339,12 @@
+@@ -339,12 +339,12 @@ int register_cpu_under_node(unsigned int
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
@@ -3620,7 +3620,7 @@
{ \
unsigned int cpu = dev->id; \
return sprintf(buf, "%d\n", topology_##name(cpu)); \
-@@ -65,16 +64,16 @@
+@@ -65,16 +64,16 @@ static ssize_t show_cpumap(int type, con
#ifdef arch_provides_topology_pointers
#define define_siblings_show_map(name) \
@@ -3641,7 +3641,7 @@
char *buf) \
{ \
unsigned int cpu = dev->id; \
-@@ -83,15 +82,15 @@
+@@ -83,15 +82,15 @@ static ssize_t show_##name##_list(struct
#else
#define define_siblings_show_map(name) \
@@ -3661,7 +3661,7 @@
char *buf) \
{ \
return show_cpumap(1, topology_##name(dev->id), buf); \
-@@ -124,16 +123,16 @@
+@@ -124,16 +123,16 @@ define_one_ro_named(book_siblings_list,
#endif
static struct attribute *default_attrs[] = {
@@ -3687,7 +3687,7 @@
#endif
NULL
};
-@@ -146,16 +145,16 @@
+@@ -146,16 +145,16 @@ static struct attribute_group topology_a
/* Add/Remove cpu_topology interface for CPU device */
static int __cpuinit topology_add_dev(unsigned int cpu)
{
@@ -3710,7 +3710,7 @@
static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
-@@ -679,7 +679,7 @@
+@@ -679,7 +679,7 @@ static struct kobj_type ktype_cpufreq =
*/
static int cpufreq_add_dev_policy(unsigned int cpu,
struct cpufreq_policy *policy,
@@ -3719,7 +3719,7 @@
{
int ret = 0;
#ifdef CONFIG_SMP
-@@ -728,7 +728,7 @@
+@@ -728,7 +728,7 @@ static int cpufreq_add_dev_policy(unsign
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_debug("CPU already managed, adding link\n");
@@ -3728,7 +3728,7 @@
&managed_policy->kobj,
"cpufreq");
if (ret)
-@@ -761,7 +761,7 @@
+@@ -761,7 +761,7 @@ static int cpufreq_add_dev_symlink(unsig
for_each_cpu(j, policy->cpus) {
struct cpufreq_policy *managed_policy;
@@ -3737,7 +3737,7 @@
if (j == cpu)
continue;
-@@ -770,8 +770,8 @@
+@@ -770,8 +770,8 @@ static int cpufreq_add_dev_symlink(unsig
pr_debug("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
@@ -3748,7 +3748,7 @@
"cpufreq");
if (ret) {
cpufreq_cpu_put(managed_policy);
-@@ -783,7 +783,7 @@
+@@ -783,7 +783,7 @@ static int cpufreq_add_dev_symlink(unsig
static int cpufreq_add_dev_interface(unsigned int cpu,
struct cpufreq_policy *policy,
@@ -3757,7 +3757,7 @@
{
struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
-@@ -793,7 +793,7 @@
+@@ -793,7 +793,7 @@ static int cpufreq_add_dev_interface(uns
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
@@ -3766,7 +3766,7 @@
if (ret)
return ret;
-@@ -866,9 +866,9 @@
+@@ -866,9 +866,9 @@ err_out_kobj_put:
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
@@ -3778,7 +3778,7 @@
int ret = 0, found = 0;
struct cpufreq_policy *policy;
unsigned long flags;
-@@ -947,7 +947,7 @@
+@@ -947,7 +947,7 @@ static int cpufreq_add_dev(struct sys_de
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
@@ -3787,7 +3787,7 @@
if (ret) {
if (ret > 0)
/* This is a managed cpu, symlink created,
-@@ -956,7 +956,7 @@
+@@ -956,7 +956,7 @@ static int cpufreq_add_dev(struct sys_de
goto err_unlock_policy;
}
@@ -3796,7 +3796,7 @@
if (ret)
goto err_out_unregister;
-@@ -999,15 +999,15 @@
+@@ -999,15 +999,15 @@ module_out:
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
@@ -3815,7 +3815,7 @@
unsigned int j;
#endif
-@@ -1032,7 +1032,7 @@
+@@ -1032,7 +1032,7 @@ static int __cpufreq_remove_dev(struct s
pr_debug("removing link\n");
cpumask_clear_cpu(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -3824,7 +3824,7 @@
cpufreq_cpu_put(data);
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
-@@ -1071,8 +1071,8 @@
+@@ -1071,8 +1071,8 @@ static int __cpufreq_remove_dev(struct s
strncpy(per_cpu(cpufreq_cpu_governor, j),
data->governor->name, CPUFREQ_NAME_LEN);
#endif
@@ -3835,7 +3835,7 @@
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
lock_policy_rwsem_write(cpu);
-@@ -1112,11 +1112,11 @@
+@@ -1112,11 +1112,11 @@ static int __cpufreq_remove_dev(struct s
if (unlikely(cpumask_weight(data->cpus) > 1)) {
/* first sibling now owns the new sysfs dir */
cpumask_clear_cpu(cpu, data->cpus);
@@ -3849,7 +3849,7 @@
}
#endif
-@@ -1128,9 +1128,9 @@
+@@ -1128,9 +1128,9 @@ static int __cpufreq_remove_dev(struct s
}
@@ -3861,7 +3861,7 @@
int retval;
if (cpu_is_offline(cpu))
-@@ -1139,7 +1139,7 @@
+@@ -1139,7 +1139,7 @@ static int cpufreq_remove_dev(struct sys
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
@@ -3870,7 +3870,7 @@
return retval;
}
-@@ -1271,9 +1271,11 @@
+@@ -1271,9 +1271,11 @@ out:
}
EXPORT_SYMBOL(cpufreq_get);
@@ -3885,7 +3885,7 @@
};
-@@ -1765,25 +1767,25 @@
+@@ -1765,25 +1767,25 @@ static int __cpuinit cpufreq_cpu_callbac
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -3917,7 +3917,7 @@
break;
}
}
-@@ -1830,8 +1832,7 @@
+@@ -1830,8 +1832,7 @@ int cpufreq_register_driver(struct cpufr
cpufreq_driver = driver_data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -3927,7 +3927,7 @@
if (ret)
goto err_null_driver;
-@@ -1850,7 +1851,7 @@
+@@ -1850,7 +1851,7 @@ int cpufreq_register_driver(struct cpufr
if (ret) {
pr_debug("no CPU initialized for driver %s\n",
driver_data->name);
@@ -3936,7 +3936,7 @@
}
}
-@@ -1858,9 +1859,8 @@
+@@ -1858,9 +1859,8 @@ int cpufreq_register_driver(struct cpufr
pr_debug("driver %s up and running\n", driver_data->name);
return 0;
@@ -3948,7 +3948,7 @@
err_null_driver:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
-@@ -1887,7 +1887,7 @@
+@@ -1887,7 +1887,7 @@ int cpufreq_unregister_driver(struct cpu
pr_debug("unregistering driver %s\n", driver->name);
@@ -3957,7 +3957,7 @@
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
-@@ -1907,8 +1907,7 @@
+@@ -1907,8 +1907,7 @@ static int __init cpufreq_core_init(void
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
@@ -3979,7 +3979,7 @@
#include <linux/cpufreq.h>
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
-@@ -291,10 +291,10 @@
+@@ -291,10 +291,10 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
@@ -3992,7 +3992,7 @@
return -EINVAL;
if (!try_module_get(cpuidle_driver->owner))
return -EINVAL;
-@@ -303,7 +303,7 @@
+@@ -303,7 +303,7 @@ static int __cpuidle_register_device(str
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
@@ -4001,7 +4001,7 @@
module_put(cpuidle_driver->owner);
return ret;
}
-@@ -344,7 +344,7 @@
+@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_devic
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
@@ -4010,7 +4010,7 @@
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (dev->registered == 0)
-@@ -354,7 +354,7 @@
+@@ -354,7 +354,7 @@ void cpuidle_unregister_device(struct cp
cpuidle_disable_device(dev);
@@ -4019,7 +4019,7 @@
list_del(&dev->device_list);
wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
-@@ -411,7 +411,7 @@
+@@ -411,7 +411,7 @@ static int __init cpuidle_init(void)
if (cpuidle_disabled())
return -ENODEV;
@@ -4039,7 +4039,7 @@
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
-@@ -23,11 +23,11 @@
+@@ -23,11 +23,11 @@ extern void cpuidle_uninstall_idle_handl
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
@@ -4057,7 +4057,7 @@
#endif /* __DRIVER_CPUIDLE_H */
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
-@@ -22,8 +22,8 @@
+@@ -22,8 +22,8 @@ static int __init cpuidle_sysfs_setup(ch
}
__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
@@ -4068,7 +4068,7 @@
char *buf)
{
ssize_t i = 0;
-@@ -42,8 +42,8 @@
+@@ -42,8 +42,8 @@ out:
return i;
}
@@ -4079,7 +4079,7 @@
char *buf)
{
ssize_t ret;
-@@ -59,8 +59,8 @@
+@@ -59,8 +59,8 @@ static ssize_t show_current_driver(struc
return ret;
}
@@ -4090,7 +4090,7 @@
char *buf)
{
ssize_t ret;
-@@ -75,8 +75,8 @@
+@@ -75,8 +75,8 @@ static ssize_t show_current_governor(str
return ret;
}
@@ -4101,7 +4101,7 @@
const char *buf, size_t count)
{
char gov_name[CPUIDLE_NAME_LEN];
-@@ -109,50 +109,48 @@
+@@ -109,50 +109,48 @@ static ssize_t store_current_governor(st
return count;
}
@@ -4175,7 +4175,7 @@
}
struct cpuidle_attr {
-@@ -365,16 +363,16 @@
+@@ -365,16 +363,16 @@ void cpuidle_remove_state_sysfs(struct c
/**
* cpuidle_add_sysfs - creates a sysfs instance for the target device
@@ -4196,7 +4196,7 @@
"cpuidle");
if (!error)
kobject_uevent(&dev->kobj, KOBJ_ADD);
-@@ -383,11 +381,11 @@
+@@ -383,11 +381,11 @@ int cpuidle_add_sysfs(struct sys_device
/**
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
@@ -4222,7 +4222,7 @@
#include <linux/workqueue.h>
#include <asm/smp.h>
-@@ -31,14 +31,14 @@
+@@ -31,14 +31,14 @@ static struct work_struct sclp_cpu_chang
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
@@ -4278,7 +4278,7 @@
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
-@@ -160,7 +160,7 @@
+@@ -161,7 +161,7 @@ static inline void cpu_maps_update_done(
}
#endif /* CONFIG_SMP */
@@ -4289,7 +4289,7 @@
/* Stop CPUs going up and down. */
--- a/kernel/sched.c
+++ b/kernel/sched.c
-@@ -7949,54 +7949,52 @@
+@@ -8136,54 +8136,52 @@ static ssize_t sched_power_savings_store
}
#ifdef CONFIG_SCHED_MC
Modified: dists/sid/linux/debian/patches/features/all/rt/0227-workqueue-Fix-cpuhotplug-trainwreck.patch
==============================================================================
--- dists/sid/linux/debian/patches/features/all/rt/0227-workqueue-Fix-cpuhotplug-trainwreck.patch Thu Aug 2 13:06:18 2012 (r19303)
+++ dists/sid/linux/debian/patches/features/all/rt/0227-workqueue-Fix-cpuhotplug-trainwreck.patch Fri Aug 3 01:37:42 2012 (r19304)
@@ -27,22 +27,24 @@
Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+[bwh: Adjust to apply on top of commit
+ 6575820221f7a4dd6eadecf7bf83cdd154335eda ('workqueue: perform cpu down
+ operations from low priority cpu_notifier()'), cherry-picked in 3.2.25]
---
include/linux/cpu.h | 6 +-
include/linux/workqueue.h | 5 +-
kernel/workqueue.c | 556 ++++++++++++---------------------------------
3 files changed, 152 insertions(+), 415 deletions(-)
-diff --git a/include/linux/cpu.h b/include/linux/cpu.h
-index c46ec3e..72e90bb 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -66,8 +66,10 @@ enum {
+@@ -73,9 +73,10 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
-- /* prepare workqueues for other notifiers */
-- CPU_PRI_WORKQUEUE = 5,
+- /* bring up workqueues before normal notifiers and down after */
+- CPU_PRI_WORKQUEUE_UP = 5,
+- CPU_PRI_WORKQUEUE_DOWN = -5,
+
+ CPU_PRI_WORKQUEUE_ACTIVE = 5, /* prepare workqueues for others */
+ CPU_PRI_NORMAL = 0,
@@ -50,8 +52,6 @@
};
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
-diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
-index e228ca9..3d8ac9d 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -254,9 +254,10 @@ enum {
@@ -67,8 +67,6 @@
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
-diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 5d23c05b..8daede8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
@@ -87,9 +85,7 @@
- WORKER_REBIND = 1 << 5, /* mom is home, come back */
- WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
- WORKER_UNBOUND = 1 << 7, /* worker is unbound */
-+ WORKER_CPU_INTENSIVE = 1 << 4, /* cpu intensive */
-+ WORKER_UNBOUND = 1 << 5, /* worker is unbound */
-
+-
- WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
- WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
-
@@ -99,6 +95,9 @@
- TRUSTEE_BUTCHER = 2, /* butcher workers */
- TRUSTEE_RELEASE = 3, /* release workers */
- TRUSTEE_DONE = 4, /* trustee is done */
++ WORKER_CPU_INTENSIVE = 1 << 4, /* cpu intensive */
++ WORKER_UNBOUND = 1 << 5, /* worker is unbound */
++
+ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
@@ -131,7 +130,7 @@
} ____cacheline_aligned_in_smp;
/*
-@@ -974,13 +961,38 @@ static bool is_chained_work(struct workqueue_struct *wq)
+@@ -974,13 +961,38 @@ static bool is_chained_work(struct workq
return false;
}
@@ -173,7 +172,7 @@
unsigned long flags;
debug_work_activate(work);
-@@ -1026,27 +1038,32 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
+@@ -1026,27 +1038,32 @@ static void __queue_work(unsigned int cp
spin_lock_irqsave(&gcwq->lock, flags);
}
@@ -223,7 +222,7 @@
}
/**
-@@ -1063,34 +1080,19 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
+@@ -1063,34 +1080,19 @@ int queue_work(struct workqueue_struct *
{
int ret;
@@ -261,7 +260,7 @@
}
EXPORT_SYMBOL_GPL(queue_work_on);
-@@ -1136,6 +1138,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1136,6 +1138,8 @@ int queue_delayed_work_on(int cpu, struc
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
@@ -270,7 +269,7 @@
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
unsigned int lcpu;
-@@ -1201,12 +1205,13 @@ static void worker_enter_idle(struct worker *worker)
+@@ -1201,12 +1205,13 @@ static void worker_enter_idle(struct wor
/* idle_list is LIFO */
list_add(&worker->entry, &gcwq->idle_list);
@@ -322,7 +321,7 @@
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
-@@ -1663,13 +1650,6 @@ static bool manage_workers(struct worker *worker)
+@@ -1663,13 +1650,6 @@ static bool manage_workers(struct worker
gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
@@ -336,7 +335,7 @@
return ret;
}
-@@ -3209,171 +3189,71 @@ EXPORT_SYMBOL_GPL(work_busy);
+@@ -3209,366 +3189,42 @@ EXPORT_SYMBOL_GPL(work_busy);
* gcwqs serve mix of short, long and very long running works making
* blocked draining impractical.
*
@@ -381,8 +380,8 @@
- * | | ^
- * | CPU is back online v return workers |
- * ----------------> RELEASE --------------
- */
-
+- */
+-
-/**
- * trustee_wait_event_timeout - timed event wait for trustee
- * @cond: condition to wait for
@@ -411,15 +410,7 @@
- } \
- gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
-})
-+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
-+ unsigned long action,
-+ void *hcpu)
-+{
-+ unsigned int cpu = (unsigned long)hcpu;
-+ struct global_cwq *gcwq = get_gcwq(cpu);
-+ struct worker *uninitialized_var(new_worker);
-+ unsigned long flags;
-
+-
-/**
- * trustee_wait_event - event wait for trustee
- * @cond: condition to wait for
@@ -439,8 +430,7 @@
- __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
- __ret1 < 0 ? -1 : 0; \
-})
-+ action &= ~CPU_TASKS_FROZEN;
-
+-
-static int __cpuinit trustee_thread(void *__gcwq)
-{
- struct global_cwq *gcwq = __gcwq;
@@ -449,18 +439,9 @@
- struct hlist_node *pos;
- long rc;
- int i;
-+ switch (action) {
-+ case CPU_UP_PREPARE:
-+ BUG_ON(gcwq->first_idle);
-+ new_worker = create_worker(gcwq, false);
-+ if (!new_worker)
-+ return NOTIFY_BAD;
-+ }
-
+-
- BUG_ON(gcwq->cpu != smp_processor_id());
-+ /* some are called w/ irq disabled, don't disturb irq status */
-+ spin_lock_irqsave(&gcwq->lock, flags);
-
+-
- spin_lock_irq(&gcwq->lock);
- /*
- * Claim the manager position and make all workers rogue.
@@ -470,34 +451,15 @@
- BUG_ON(gcwq->cpu != smp_processor_id());
- rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
- BUG_ON(rc < 0);
-+ switch (action) {
-+ case CPU_UP_PREPARE:
-+ BUG_ON(gcwq->first_idle);
-+ gcwq->first_idle = new_worker;
-+ break;
-
+-
- gcwq->flags |= GCWQ_MANAGING_WORKERS;
-+ case CPU_UP_CANCELED:
-+ destroy_worker(gcwq->first_idle);
-+ gcwq->first_idle = NULL;
-+ break;
-
+-
- list_for_each_entry(worker, &gcwq->idle_list, entry)
- worker->flags |= WORKER_ROGUE;
-+ case CPU_ONLINE:
-+ spin_unlock_irq(&gcwq->lock);
-+ kthread_bind(gcwq->first_idle->task, cpu);
-+ spin_lock_irq(&gcwq->lock);
-+ gcwq->flags |= GCWQ_MANAGE_WORKERS;
-+ start_worker(gcwq->first_idle);
-+ gcwq->first_idle = NULL;
-+ break;
-+ }
-
+-
- for_each_busy_worker(worker, i, pos, gcwq)
- worker->flags |= WORKER_ROGUE;
-+ spin_unlock_irqrestore(&gcwq->lock, flags);
-
+-
- /*
- * Call schedule() so that we cross rq->lock and thus can
- * guarantee sched callbacks see the rogue flag. This is
@@ -507,9 +469,7 @@
- spin_unlock_irq(&gcwq->lock);
- schedule();
- spin_lock_irq(&gcwq->lock);
-+ return notifier_from_errno(0);
-+}
-
+-
- /*
- * Sched callbacks are disabled now. Zap nr_running. After
- * this, nr_running stays zero and need_more_worker() and
@@ -517,18 +477,11 @@
- * not empty.
- */
- atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
-+static void flush_gcwq(struct global_cwq *gcwq)
-+{
-+ struct work_struct *work, *nw;
-+ struct worker *worker, *n;
-+ LIST_HEAD(non_affine_works);
-
+-
- spin_unlock_irq(&gcwq->lock);
- del_timer_sync(&gcwq->idle_timer);
- spin_lock_irq(&gcwq->lock);
-+ list_for_each_entry_safe(work, nw, &gcwq->worklist, entry) {
-+ struct workqueue_struct *wq = get_work_cwq(work)->wq;
-
+- spin_lock_irq(&gcwq->lock);
+-
- /*
- * We're now in charge. Notify and proceed to drain. We need
- * to keep the gcwq running during the whole CPU down
@@ -537,10 +490,7 @@
- */
- gcwq->trustee_state = TRUSTEE_IN_CHARGE;
- wake_up_all(&gcwq->trustee_wait);
-+ if (wq->flags & WQ_NON_AFFINE)
-+ list_move(&work->entry, &non_affine_works);
-+ }
-
+-
- /*
- * The original cpu is in the process of dying and may go away
- * anytime now. When that happens, we and all workers would
@@ -554,28 +504,29 @@
- while (gcwq->nr_workers != gcwq->nr_idle ||
- gcwq->flags & GCWQ_FREEZING ||
- gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
-+ while (!list_empty(&gcwq->worklist)) {
- int nr_works = 0;
-
- list_for_each_entry(work, &gcwq->worklist, entry) {
-@@ -3387,200 +3267,55 @@ static int __cpuinit trustee_thread(void *__gcwq)
- wake_up_process(worker->task);
- }
-
-+ spin_unlock_irq(&gcwq->lock);
-+
- if (need_to_create_worker(gcwq)) {
+- int nr_works = 0;
+-
+- list_for_each_entry(work, &gcwq->worklist, entry) {
+- send_mayday(work);
+- nr_works++;
+- }
+-
+- list_for_each_entry(worker, &gcwq->idle_list, entry) {
+- if (!nr_works--)
+- break;
+- wake_up_process(worker->task);
+- }
+-
+- if (need_to_create_worker(gcwq)) {
- spin_unlock_irq(&gcwq->lock);
- worker = create_worker(gcwq, false);
- spin_lock_irq(&gcwq->lock);
- if (worker) {
- worker->flags |= WORKER_ROGUE;
-+ worker = create_worker(gcwq, true);
-+ if (worker)
- start_worker(worker);
+- start_worker(worker);
- }
- }
-
+- }
+-
- /* give a breather */
- if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
- break;
@@ -601,14 +552,10 @@
- * currently scheduled works by scheduling the rebind_work.
- */
- WARN_ON(!list_empty(&gcwq->idle_list));
-+ wait_event_timeout(gcwq->idle_wait,
-+ gcwq->nr_idle == gcwq->nr_workers, HZ/10);
-
+-
- for_each_busy_worker(worker, i, pos, gcwq) {
- struct work_struct *rebind_work = &worker->rebind_work;
-+ spin_lock_irq(&gcwq->lock);
-+ }
-
+-
- /*
- * Rebind_work may race with future cpu hotplug
- * operations. Use a separate flag to mark that
@@ -616,22 +563,18 @@
- */
- worker->flags |= WORKER_REBIND;
- worker->flags &= ~WORKER_ROGUE;
-+ WARN_ON(gcwq->nr_workers != gcwq->nr_idle);
-
+-
- /* queue rebind_work, wq doesn't matter, use the default one */
- if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
- work_data_bits(rebind_work)))
- continue;
-+ list_for_each_entry_safe(worker, n, &gcwq->idle_list, entry)
-+ destroy_worker(worker);
-
+-
- debug_work_activate(rebind_work);
- insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
- worker->scheduled.next,
- work_color_to_flags(WORK_NO_COLOR));
- }
-+ WARN_ON(gcwq->nr_workers || gcwq->nr_idle);
-
+-
- /* relinquish manager role */
- gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
-
@@ -639,10 +582,10 @@
- gcwq->trustee = NULL;
- gcwq->trustee_state = TRUSTEE_DONE;
- wake_up_all(&gcwq->trustee_wait);
- spin_unlock_irq(&gcwq->lock);
+- spin_unlock_irq(&gcwq->lock);
- return 0;
-}
-
+-
-/**
- * wait_trustee_state - wait for trustee to enter the specified state
- * @gcwq: gcwq the trustee of interest belongs to
@@ -653,7 +596,7 @@
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by cpu_callback.
-- */
+ */
-static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
@@ -665,30 +608,23 @@
- gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE);
- spin_lock_irq(&gcwq->lock);
-+ gcwq = get_gcwq(get_cpu());
-+ spin_lock_irq(&gcwq->lock);
-+ list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
-+ list_del_init(&work->entry);
-+ ___queue_work(get_work_cwq(work)->wq, gcwq, work);
- }
-+ spin_unlock_irq(&gcwq->lock);
-+ put_cpu();
- }
+- }
+-}
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
-+static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
++static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct global_cwq *gcwq = get_gcwq(cpu);
- struct task_struct *new_trustee = NULL;
-- struct worker *uninitialized_var(new_worker);
-- unsigned long flags;
+ struct worker *uninitialized_var(new_worker);
+ unsigned long flags;
action &= ~CPU_TASKS_FROZEN;
-- switch (action) {
+ switch (action) {
- case CPU_DOWN_PREPARE:
- new_trustee = kthread_create(trustee_thread, gcwq,
- "workqueue_trustee/%d\n", cpu);
@@ -696,13 +632,14 @@
- return notifier_from_errno(PTR_ERR(new_trustee));
- kthread_bind(new_trustee, cpu);
- /* fall through */
-- case CPU_UP_PREPARE:
-- BUG_ON(gcwq->first_idle);
-- new_worker = create_worker(gcwq, false);
+ case CPU_UP_PREPARE:
+ BUG_ON(gcwq->first_idle);
+ new_worker = create_worker(gcwq, false);
- if (!new_worker) {
- if (new_trustee)
- kthread_stop(new_trustee);
-- return NOTIFY_BAD;
++ if (!new_worker)
+ return NOTIFY_BAD;
- }
- break;
- case CPU_POST_DEAD:
@@ -723,12 +660,12 @@
- gcwq->flags |= GCWQ_DISASSOCIATED;
- default:
- goto out;
-- }
--
-- /* some are called w/ irq disabled, don't disturb irq status */
-- spin_lock_irqsave(&gcwq->lock, flags);
--
-- switch (action) {
+ }
+
+ /* some are called w/ irq disabled, don't disturb irq status */
+ spin_lock_irqsave(&gcwq->lock, flags);
+
+ switch (action) {
- case CPU_DOWN_PREPARE:
- /* initialize trustee and tell it to acquire the gcwq */
- BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
@@ -737,26 +674,21 @@
- wake_up_process(gcwq->trustee);
- wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
- /* fall through */
-- case CPU_UP_PREPARE:
-- BUG_ON(gcwq->first_idle);
-- gcwq->first_idle = new_worker;
-- break;
-+ switch (action) {
-+ case CPU_DOWN_PREPARE:
-+ flush_gcwq(gcwq);
-+ break;
-+ }
+ case CPU_UP_PREPARE:
+ BUG_ON(gcwq->first_idle);
+ gcwq->first_idle = new_worker;
+ break;
- case CPU_POST_DEAD:
- gcwq->trustee_state = TRUSTEE_BUTCHER;
- /* fall through */
-- case CPU_UP_CANCELED:
-- destroy_worker(gcwq->first_idle);
-- gcwq->first_idle = NULL;
-- break;
+ case CPU_UP_CANCELED:
+ destroy_worker(gcwq->first_idle);
+ gcwq->first_idle = NULL;
+ break;
- case CPU_DOWN_FAILED:
-- case CPU_ONLINE:
+ case CPU_ONLINE:
- gcwq->flags &= ~GCWQ_DISASSOCIATED;
- if (gcwq->trustee_state != TRUSTEE_DONE) {
- gcwq->trustee_state = TRUSTEE_RELEASE;
@@ -769,32 +701,137 @@
- * Put the first_idle in and request a real manager to
- * take a look.
- */
-- spin_unlock_irq(&gcwq->lock);
-- kthread_bind(gcwq->first_idle->task, cpu);
-- spin_lock_irq(&gcwq->lock);
-- gcwq->flags |= GCWQ_MANAGE_WORKERS;
-- start_worker(gcwq->first_idle);
-- gcwq->first_idle = NULL;
-- break;
-- }
--
-- spin_unlock_irqrestore(&gcwq->lock, flags);
--
+ spin_unlock_irq(&gcwq->lock);
+ kthread_bind(gcwq->first_idle->task, cpu);
+ spin_lock_irq(&gcwq->lock);
+@@ -3580,43 +3236,87 @@ static int __devinit workqueue_cpu_callb
+
+ spin_unlock_irqrestore(&gcwq->lock, flags);
+
-out:
return notifier_from_errno(0);
}
-@@ -3777,7 +3512,8 @@ static int __init init_workqueues(void)
+-/*
+- * Workqueues should be brought up before normal priority CPU notifiers.
+- * This will be registered high priority CPU notifier.
+- */
+-static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+- unsigned long action,
+- void *hcpu)
++static void flush_gcwq(struct global_cwq *gcwq)
+ {
+- switch (action & ~CPU_TASKS_FROZEN) {
+- case CPU_UP_PREPARE:
+- case CPU_UP_CANCELED:
+- case CPU_DOWN_FAILED:
+- case CPU_ONLINE:
+- return workqueue_cpu_callback(nfb, action, hcpu);
++ struct work_struct *work, *nw;
++ struct worker *worker, *n;
++ LIST_HEAD(non_affine_works);
++
++ spin_lock_irq(&gcwq->lock);
++ list_for_each_entry_safe(work, nw, &gcwq->worklist, entry) {
++ struct workqueue_struct *wq = get_work_cwq(work)->wq;
++
++ if (wq->flags & WQ_NON_AFFINE)
++ list_move(&work->entry, &non_affine_works);
++ }
++
++ while (!list_empty(&gcwq->worklist)) {
++ int nr_works = 0;
++
++ list_for_each_entry(work, &gcwq->worklist, entry) {
++ send_mayday(work);
++ nr_works++;
++ }
++
++ list_for_each_entry(worker, &gcwq->idle_list, entry) {
++ if (!nr_works--)
++ break;
++ wake_up_process(worker->task);
++ }
++
++ spin_unlock_irq(&gcwq->lock);
++
++ if (need_to_create_worker(gcwq)) {
++ worker = create_worker(gcwq, true);
++ if (worker)
++ start_worker(worker);
++ }
++
++ wait_event_timeout(gcwq->idle_wait,
++ gcwq->nr_idle == gcwq->nr_workers, HZ/10);
++
++ spin_lock_irq(&gcwq->lock);
+ }
+- return NOTIFY_OK;
++
++ WARN_ON(gcwq->nr_workers != gcwq->nr_idle);
++
++ list_for_each_entry_safe(worker, n, &gcwq->idle_list, entry)
++ destroy_worker(worker);
++
++ WARN_ON(gcwq->nr_workers || gcwq->nr_idle);
++
++ spin_unlock_irq(&gcwq->lock);
++
++ gcwq = get_gcwq(get_cpu());
++ spin_lock_irq(&gcwq->lock);
++ list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
++ list_del_init(&work->entry);
++ ___queue_work(get_work_cwq(work)->wq, gcwq, work);
++ }
++ spin_unlock_irq(&gcwq->lock);
++ put_cpu();
+ }
+
+-/*
+- * Workqueues should be brought down after normal priority CPU notifiers.
+- * This will be registered as low priority CPU notifier.
+- */
+ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+- unsigned long action,
+- void *hcpu)
++ unsigned long action,
++ void *hcpu)
+ {
+- switch (action & ~CPU_TASKS_FROZEN) {
+- case CPU_DOWN_PREPARE:
+- case CPU_DYING:
+- case CPU_POST_DEAD:
+- return workqueue_cpu_callback(nfb, action, hcpu);
+- }
+- return NOTIFY_OK;
++ unsigned int cpu = (unsigned long)hcpu;
++ struct global_cwq *gcwq = get_gcwq(cpu);
++
++ action &= ~CPU_TASKS_FROZEN;
++
++ switch (action) {
++ case CPU_DOWN_PREPARE:
++ flush_gcwq(gcwq);
++ break;
++ }
++
++
++ return notifier_from_errno(0);
+ }
+
+ #ifdef CONFIG_SMP
+@@ -3812,8 +3512,8 @@ static int __init init_workqueues(void)
unsigned int cpu;
int i;
-- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
+- cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
+- cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
+ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_ACTIVE);
+ hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_INACTIVE);
/* initialize gcwqs */
for_each_gcwq_cpu(cpu) {
-@@ -3800,9 +3536,7 @@ static int __init init_workqueues(void)
+@@ -3836,9 +3536,7 @@ static int __init init_workqueues(void)
(unsigned long)gcwq);
ida_init(&gcwq->worker_ida);
@@ -805,6 +842,3 @@
}
/* create the initial worker */
---
-1.7.10
-
Modified: dists/sid/linux/debian/patches/features/all/rt2x00-add-rt5372-chipset-support.patch
==============================================================================
--- dists/sid/linux/debian/patches/features/all/rt2x00-add-rt5372-chipset-support.patch Thu Aug 2 13:06:18 2012 (r19303)
+++ dists/sid/linux/debian/patches/features/all/rt2x00-add-rt5372-chipset-support.patch Fri Aug 3 01:37:42 2012 (r19304)
@@ -9,6 +9,7 @@
Acked-by: Ivo van Doorn <IvDoorn at gmail.com>
Signed-off-by: John W. Linville <linville at tuxdriver.com>
Signed-off-by: Jonathan Nieder <jrnieder at gmail.com>
+[bwh: Adjust context to apply after 3.2.25]
---
drivers/net/wireless/rt2x00/rt2800.h | 1 +
drivers/net/wireless/rt2x00/rt2800lib.c | 155 ++++++++++++++++++++++++++-----
@@ -17,8 +18,6 @@
drivers/net/wireless/rt2x00/rt2x00.h | 1 +
5 files changed, 148 insertions(+), 26 deletions(-)
-diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
-index 4778620347c4..9efdaafb11e5 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -68,6 +68,7 @@
@@ -29,11 +28,9 @@
#define RF5390 0x5390
/*
-diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
-index 1ff428ba060e..1896cbf912ad 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
-@@ -402,7 +402,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
+@@ -402,7 +402,8 @@ int rt2800_load_firmware(struct rt2x00_d
if (rt2x00_is_pci(rt2x00dev)) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
@@ -43,7 +40,7 @@
rt2800_register_read(rt2x00dev, AUX_CTRL, ®);
rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1);
-@@ -1904,7 +1905,8 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
+@@ -1904,7 +1905,8 @@ static void rt2800_config_channel_rf53xx
r55_nonbt_rev[idx]);
rt2800_rfcsr_write(rt2x00dev, 59,
r59_nonbt_rev[idx]);
@@ -53,7 +50,7 @@
static const char r59_non_bt[] = {0x8f, 0x8f,
0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
-@@ -1951,6 +1953,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+@@ -1951,6 +1953,7 @@ static void rt2800_config_channel(struct
else if (rt2x00_rf(rt2x00dev, RF3052))
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
else if (rt2x00_rf(rt2x00dev, RF5370) ||
@@ -61,7 +58,7 @@
rt2x00_rf(rt2x00dev, RF5390))
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
else
-@@ -1965,7 +1968,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+@@ -1965,7 +1968,8 @@ static void rt2800_config_channel(struct
rt2800_bbp_write(rt2x00dev, 86, 0);
if (rf->channel <= 14) {
@@ -71,7 +68,7 @@
if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
&rt2x00dev->cap_flags)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
-@@ -2495,7 +2499,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
+@@ -2495,7 +2499,8 @@ static u8 rt2800_get_default_vgc(struct
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
@@ -81,7 +78,7 @@
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
-@@ -2630,7 +2635,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
+@@ -2630,7 +2635,8 @@ static int rt2800_init_registers(struct
} else if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -91,7 +88,7 @@
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
-@@ -3006,7 +3012,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+@@ -3006,7 +3012,8 @@ static int rt2800_init_bbp(struct rt2x00
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
@@ -101,7 +98,7 @@
rt2800_bbp_read(rt2x00dev, 4, &value);
rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
rt2800_bbp_write(rt2x00dev, 4, value);
-@@ -3014,19 +3021,22 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+@@ -3014,19 +3021,22 @@ static int rt2800_init_bbp(struct rt2x00
if (rt2800_is_305x_soc(rt2x00dev) ||
rt2x00_rt(rt2x00dev, RT3572) ||
@@ -127,7 +124,7 @@
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
-@@ -3044,7 +3054,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+@@ -3044,7 +3054,8 @@ static int rt2800_init_bbp(struct rt2x00
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
@@ -137,7 +134,7 @@
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
-@@ -3056,64 +3067,88 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+@@ -3056,64 +3067,88 @@ static int rt2800_init_bbp(struct rt2x00
}
rt2800_bbp_write(rt2x00dev, 82, 0x62);
@@ -234,7 +231,7 @@
rt2800_bbp_read(rt2x00dev, 138, &value);
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
-@@ -3125,7 +3160,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+@@ -3125,7 +3160,8 @@ static int rt2800_init_bbp(struct rt2x00
rt2800_bbp_write(rt2x00dev, 138, value);
}
@@ -244,7 +241,7 @@
int ant, div_mode;
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
-@@ -3251,13 +3287,15 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
+@@ -3251,13 +3287,15 @@ static int rt2800_init_rfcsr(struct rt2x
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
@@ -261,7 +258,7 @@
rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
-@@ -3475,6 +3513,66 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
+@@ -3475,6 +3513,66 @@ static int rt2800_init_rfcsr(struct rt2x
rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
@@ -328,7 +325,7 @@
}
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
-@@ -3542,7 +3640,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
+@@ -3542,7 +3640,8 @@ static int rt2800_init_rfcsr(struct rt2x
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
}
@@ -338,7 +335,7 @@
/*
* Set back to initial state
*/
-@@ -3570,7 +3669,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
+@@ -3570,7 +3669,8 @@ static int rt2800_init_rfcsr(struct rt2x
rt2x00_set_field32(®, OPT_14_CSR_BIT0, 1);
rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
@@ -348,7 +345,7 @@
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
-@@ -3638,7 +3738,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
+@@ -3638,7 +3738,8 @@ static int rt2800_init_rfcsr(struct rt2x
rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
}
@@ -358,7 +355,7 @@
rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
-@@ -3922,7 +4023,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+@@ -3922,7 +4023,8 @@ int rt2800_init_eeprom(struct rt2x00_dev
* RT53xx: defined in "EEPROM_CHIP_ID" field
*/
rt2800_register_read(rt2x00dev, MAC_CSR0, ®);
@@ -368,7 +365,7 @@
rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
else
value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
-@@ -3938,7 +4040,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+@@ -3938,7 +4040,8 @@ int rt2800_init_eeprom(struct rt2x00_dev
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT3572) &&
@@ -378,7 +375,7 @@
ERROR(rt2x00dev, "Invalid RT chipset 0x%04x detected.\n", rt2x00dev->chip.rt);
return -ENODEV;
}
-@@ -3955,6 +4058,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+@@ -3955,6 +4058,7 @@ int rt2800_init_eeprom(struct rt2x00_dev
case RF3052:
case RF3320:
case RF5370:
@@ -386,7 +383,7 @@
case RF5390:
break;
default:
-@@ -4261,6 +4365,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+@@ -4261,6 +4365,7 @@ int rt2800_probe_hw_mode(struct rt2x00_d
rt2x00_rf(rt2x00dev, RF3022) ||
rt2x00_rf(rt2x00dev, RF3320) ||
rt2x00_rf(rt2x00dev, RF5370) ||
@@ -394,11 +391,9 @@
rt2x00_rf(rt2x00dev, RF5390)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
-diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
-index 837b460d4055..bf0f83cf3738 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
-@@ -480,7 +480,8 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
+@@ -480,7 +480,8 @@ static int rt2800pci_init_registers(stru
if (rt2x00_is_pcie(rt2x00dev) &&
(rt2x00_rt(rt2x00dev, RT3572) ||
@@ -408,11 +403,9 @@
rt2x00pci_register_read(rt2x00dev, AUX_CTRL, ®);
rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1);
-diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
-index 0ffa1119acd1..d241ac6d7477 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
-@@ -1119,12 +1119,26 @@ static struct usb_device_id rt2800usb_device_table[] = {
+@@ -1133,15 +1133,29 @@ static struct usb_device_id rt2800usb_de
{ USB_DEVICE(0x5a57, 0x0284) },
#endif
#ifdef CONFIG_RT2800USB_RT53XX
@@ -424,6 +417,9 @@
/* Azurewave */
{ USB_DEVICE(0x13d3, 0x3329) },
{ USB_DEVICE(0x13d3, 0x3365) },
+ /* D-Link */
+ { USB_DEVICE(0x2001, 0x3c1c) },
+ { USB_DEVICE(0x2001, 0x3c1d) },
+ /* LG innotek */
+ { USB_DEVICE(0x043e, 0x7a22) },
+ /* Panasonic */
@@ -439,8 +435,6 @@
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
-diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
-index 99ff12d0c29d..845dce5c997a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -192,6 +192,7 @@ struct rt2x00_chip {
@@ -451,6 +445,3 @@
u16 rf;
u16 rev;
---
-1.7.10.2
-
Modified: dists/sid/linux/debian/patches/series
==============================================================================
--- dists/sid/linux/debian/patches/series Thu Aug 2 13:06:18 2012 (r19303)
+++ dists/sid/linux/debian/patches/series Fri Aug 3 01:37:42 2012 (r19304)
@@ -343,8 +343,6 @@
# Until next ABI bump
debian/driver-core-avoid-ABI-change-for-removal-of-__must_check.patch
-bugfix/all/udf-Improve-table-length-check-to-avoid-possible-underflow.patch
-
# nouveau update to support Fermi (NVC0+) acceleration
features/all/fermi-accel/drm-nouveau-ttm-always-do-buffer-moves-on-kernel-cha.patch
features/all/fermi-accel/drm-nouveau-remove-subchannel-names-from-places-wher.patch
More information about the Kernel-svn-changes
mailing list