[kernel] r18446 - in people/ukleinek/3.0-rt/linux-2.6/debian: . patches/features/all/rt patches/series

Uwe Kleine-König ukleinek-guest at alioth.debian.org
Sun Jan 1 10:40:32 UTC 2012


Author: ukleinek-guest
Date: Sun Jan  1 10:40:30 2012
New Revision: 18446

Log:
[amd64] Update rt featureset to 3.0.12-rt30

Added:
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.12-rt30.patch
      - copied, changed from r18445, people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.9-rt26.patch
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx5-extra
Deleted:
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.9-rt26.patch
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx2-extra
Modified:
   people/ukleinek/3.0-rt/linux-2.6/debian/changelog

Modified: people/ukleinek/3.0-rt/linux-2.6/debian/changelog
==============================================================================
--- people/ukleinek/3.0-rt/linux-2.6/debian/changelog	Sun Jan  1 10:40:19 2012	(r18445)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/changelog	Sun Jan  1 10:40:30 2012	(r18446)
@@ -1,3 +1,9 @@
+linux-2.6 (3.0.0-6ptx5) unstable; urgency=low
+
+  * [amd64] Update rt featureset to 3.0.12-rt30
+
+ -- Uwe Kleine-König <u.kleine-koenig at pengutronix.de>  Thu, 08 Dec 2011 09:28:18 +0100
+
 linux-2.6 (3.0.0-6ptx4) unstable; urgency=low
 
   * Add stable releases 3.0.11 and 3.0.12

Copied and modified: people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.12-rt30.patch (from r18445, people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.9-rt26.patch)
==============================================================================
--- people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.9-rt26.patch	Sun Jan  1 10:40:19 2012	(r18445, copy source)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.12-rt30.patch	Sun Jan  1 10:40:30 2012	(r18446)
@@ -5329,10 +5329,98 @@
  
  /* RED-PEN may want to readd seq locking, but then the variable should be
 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 77c9d86..545c61b 100644
+index 77c9d86..3567c76 100644
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -4900,6 +4900,13 @@ int kvm_arch_init(void *opaque)
+@@ -2283,6 +2283,13 @@ static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 	entry->flags = 0;
+ }
+ 
++static bool supported_xcr0_bit(unsigned bit)
++{
++	u64 mask = ((u64)1 << bit);
++
++	return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
++}
++
+ #define F(x) bit(X86_FEATURE_##x)
+ 
+ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+@@ -2393,6 +2400,8 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 		}
+ 		break;
+ 	}
++	case 9:
++		break;
+ 	case 0xb: {
+ 		int i, level_type;
+ 
+@@ -2410,16 +2419,17 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 		break;
+ 	}
+ 	case 0xd: {
+-		int i;
++		int idx, i;
+ 
+ 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+-		for (i = 1; *nent < maxnent && i < 64; ++i) {
+-			if (entry[i].eax == 0)
++		for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
++			do_cpuid_1_ent(&entry[i], function, idx);
++			if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
+ 				continue;
+-			do_cpuid_1_ent(&entry[i], function, i);
+ 			entry[i].flags |=
+ 			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ 			++*nent;
++			++i;
+ 		}
+ 		break;
+ 	}
+@@ -2451,6 +2461,24 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 		entry->ecx &= kvm_supported_word6_x86_features;
+ 		cpuid_mask(&entry->ecx, 6);
+ 		break;
++	case 0x80000008: {
++		unsigned g_phys_as = (entry->eax >> 16) & 0xff;
++		unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
++		unsigned phys_as = entry->eax & 0xff;
++
++		if (!g_phys_as)
++			g_phys_as = phys_as;
++		entry->eax = g_phys_as | (virt_as << 8);
++		entry->ebx = entry->edx = 0;
++		break;
++	}
++	case 0x80000019:
++		entry->ecx = entry->edx = 0;
++		break;
++	case 0x8000001a:
++		break;
++	case 0x8000001d:
++		break;
+ 	/*Add support for Centaur's CPUID instruction*/
+ 	case 0xC0000000:
+ 		/*Just support up to 0xC0000004 now*/
+@@ -2460,10 +2488,16 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 		entry->edx &= kvm_supported_word5_x86_features;
+ 		cpuid_mask(&entry->edx, 5);
+ 		break;
++	case 3: /* Processor serial number */
++	case 5: /* MONITOR/MWAIT */
++	case 6: /* Thermal management */
++	case 0xA: /* Architectural Performance Monitoring */
++	case 0x80000007: /* Advanced power management */
+ 	case 0xC0000002:
+ 	case 0xC0000003:
+ 	case 0xC0000004:
+-		/*Now nothing to do, reserved for the future*/
++	default:
++		entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ 		break;
+ 	}
+ 
+@@ -4900,6 +4934,13 @@ int kvm_arch_init(void *opaque)
  		goto out;
  	}
  
@@ -5580,6 +5668,483 @@
  }
  
  void blk_finish_plug(struct blk_plug *plug)
+diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
+index 73863d8..6c169a2 100644
+--- a/drivers/acpi/acpica/acglobal.h
++++ b/drivers/acpi/acpica/acglobal.h
+@@ -229,8 +229,8 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
+  * Spinlocks are used for interfaces that can be possibly called at
+  * interrupt level
+  */
+-ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock;	/* For GPE data structs and registers */
+-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock;	/* For ACPI H/W except GPE registers */
++extern raw_spinlock_t acpi_gbl_gpe_lock;	/* For GPE data structs and registers */
++extern raw_spinlock_t acpi_gbl_hardware_lock;	/* For ACPI H/W except GPE registers */
+ 
+ /*****************************************************************************
+  *
+diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
+index 65c79ad..36e7e10 100644
+--- a/drivers/acpi/acpica/evgpe.c
++++ b/drivers/acpi/acpica/evgpe.c
+@@ -357,7 +357,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
+ 	 * Note: Not necessary to obtain the hardware lock, since the GPE
+ 	 * registers are owned by the gpe_lock.
+ 	 */
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Examine all GPE blocks attached to this interrupt level */
+ 
+@@ -440,7 +440,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
+ 
+       unlock_and_exit:
+ 
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	return (int_status);
+ }
+ 
+diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
+index ca2c41a..60c47b9 100644
+--- a/drivers/acpi/acpica/evgpeblk.c
++++ b/drivers/acpi/acpica/evgpeblk.c
+@@ -95,7 +95,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
+ 
+ 	/* Install the new block at the end of the list with lock */
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 	if (gpe_xrupt_block->gpe_block_list_head) {
+ 		next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
+ 		while (next_gpe_block->next) {
+@@ -109,7 +109,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
+ 	}
+ 
+ 	gpe_block->xrupt_block = gpe_xrupt_block;
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 
+       unlock_and_exit:
+ 	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+@@ -156,7 +156,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
+ 	} else {
+ 		/* Remove the block on this interrupt with lock */
+ 
+-		flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++		raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 		if (gpe_block->previous) {
+ 			gpe_block->previous->next = gpe_block->next;
+ 		} else {
+@@ -167,7 +167,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
+ 		if (gpe_block->next) {
+ 			gpe_block->next->previous = gpe_block->previous;
+ 		}
+-		acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++		raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	}
+ 
+ 	acpi_current_gpe_count -= gpe_block->gpe_count;
+diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
+index 80a81d0..895b68ab 100644
+--- a/drivers/acpi/acpica/evgpeutil.c
++++ b/drivers/acpi/acpica/evgpeutil.c
+@@ -70,7 +70,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
+ 
+ 	ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Walk the interrupt level descriptor list */
+ 
+@@ -101,7 +101,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
+ 	}
+ 
+       unlock_and_exit:
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	return_ACPI_STATUS(status);
+ }
+ 
+@@ -237,7 +237,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
+ 
+ 	/* Install new interrupt descriptor with spin lock */
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 	if (acpi_gbl_gpe_xrupt_list_head) {
+ 		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ 		while (next_gpe_xrupt->next) {
+@@ -249,7 +249,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
+ 	} else {
+ 		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
+ 	}
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Install new interrupt handler if not SCI_INT */
+ 
+@@ -306,7 +306,7 @@ acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
+ 
+ 	/* Unlink the interrupt block with lock */
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 	if (gpe_xrupt->previous) {
+ 		gpe_xrupt->previous->next = gpe_xrupt->next;
+ 	} else {
+@@ -318,7 +318,7 @@ acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
+ 	if (gpe_xrupt->next) {
+ 		gpe_xrupt->next->previous = gpe_xrupt->previous;
+ 	}
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Free the block */
+ 
+diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
+index e114140..e849c10 100644
+--- a/drivers/acpi/acpica/evxface.c
++++ b/drivers/acpi/acpica/evxface.c
+@@ -750,7 +750,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
+ 		goto unlock_and_exit;
+ 	}
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Ensure that we have a valid GPE number */
+ 
+@@ -798,14 +798,14 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
+ 	    ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
+ 	gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
+ 
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 
+ unlock_and_exit:
+ 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ 	return_ACPI_STATUS(status);
+ 
+ free_and_exit:
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	ACPI_FREE(handler);
+ 	goto unlock_and_exit;
+ }
+@@ -852,7 +852,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
+ 		return_ACPI_STATUS(status);
+ 	}
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Ensure that we have a valid GPE number */
+ 
+@@ -903,7 +903,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
+ 	ACPI_FREE(handler);
+ 
+ unlock_and_exit:
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 
+ 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ 	return_ACPI_STATUS(status);
+diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
+index 52aaff3..ce07ebb 100644
+--- a/drivers/acpi/acpica/evxfgpe.c
++++ b/drivers/acpi/acpica/evxfgpe.c
+@@ -121,7 +121,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+ 
+ 	ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Ensure that we have a valid GPE number */
+ 
+@@ -130,7 +130,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+ 		status = acpi_ev_add_gpe_reference(gpe_event_info);
+ 	}
+ 
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	return_ACPI_STATUS(status);
+ }
+ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
+@@ -158,7 +158,7 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+ 
+ 	ACPI_FUNCTION_TRACE(acpi_disable_gpe);
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Ensure that we have a valid GPE number */
+ 
+@@ -167,7 +167,7 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+ 		status = acpi_ev_remove_gpe_reference(gpe_event_info) ;
+ 	}
+ 
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	return_ACPI_STATUS(status);
+ }
+ ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+@@ -214,7 +214,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
+ 		return_ACPI_STATUS(AE_BAD_PARAMETER);
+ 	}
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Ensure that we have a valid GPE number */
+ 
+@@ -270,7 +270,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
+ 	status = AE_OK;
+ 
+  unlock_and_exit:
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	return_ACPI_STATUS(status);
+ }
+ ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
+@@ -300,7 +300,7 @@ acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 ac
+ 
+ 	ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask);
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/*
+ 	 * Ensure that we have a valid GPE number and that this GPE is in
+@@ -346,7 +346,7 @@ acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 ac
+ 	}
+ 
+ unlock_and_exit:
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	return_ACPI_STATUS(status);
+ }
+ 
+@@ -372,7 +372,7 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
+ 
+ 	ACPI_FUNCTION_TRACE(acpi_clear_gpe);
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Ensure that we have a valid GPE number */
+ 
+@@ -385,7 +385,7 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
+ 	status = acpi_hw_clear_gpe(gpe_event_info);
+ 
+       unlock_and_exit:
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	return_ACPI_STATUS(status);
+ }
+ 
+@@ -415,7 +415,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
+ 
+ 	ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
+ 
+-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
+ 
+ 	/* Ensure that we have a valid GPE number */
+ 
+@@ -433,7 +433,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
+ 		*event_status |= ACPI_EVENT_FLAG_HANDLE;
+ 
+       unlock_and_exit:
+-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
+ 	return_ACPI_STATUS(status);
+ }
+ 
+diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
+index 55accb7..4772930 100644
+--- a/drivers/acpi/acpica/hwregs.c
++++ b/drivers/acpi/acpica/hwregs.c
+@@ -263,7 +263,7 @@ acpi_status acpi_hw_clear_acpi_status(void)
+ 			  ACPI_BITMASK_ALL_FIXED_STATUS,
+ 			  ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
+ 
+-	lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_hardware_lock, lock_flags);
+ 
+ 	/* Clear the fixed events in PM1 A/B */
+ 
+@@ -278,7 +278,7 @@ acpi_status acpi_hw_clear_acpi_status(void)
+ 	status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);
+ 
+       unlock_and_exit:
+-	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_hardware_lock, lock_flags);
+ 	return_ACPI_STATUS(status);
+ }
+ 
+diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
+index f75f81a..76159ba 100644
+--- a/drivers/acpi/acpica/hwxface.c
++++ b/drivers/acpi/acpica/hwxface.c
+@@ -386,7 +386,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
+ 		return_ACPI_STATUS(AE_BAD_PARAMETER);
+ 	}
+ 
+-	lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
++	raw_spin_lock_irqsave(&acpi_gbl_hardware_lock, lock_flags);
+ 
+ 	/*
+ 	 * At this point, we know that the parent register is one of the
+@@ -447,7 +447,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
+ 
+ unlock_and_exit:
+ 
+-	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
++	raw_spin_unlock_irqrestore(&acpi_gbl_hardware_lock, lock_flags);
+ 	return_ACPI_STATUS(status);
+ }
+ 
+diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
+index 7d797e2..420eecf 100644
+--- a/drivers/acpi/acpica/utmutex.c
++++ b/drivers/acpi/acpica/utmutex.c
+@@ -52,6 +52,9 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id);
+ 
+ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id);
+ 
++DEFINE_RAW_SPINLOCK(acpi_gbl_gpe_lock);
++DEFINE_RAW_SPINLOCK(acpi_gbl_hardware_lock);
++
+ /*******************************************************************************
+  *
+  * FUNCTION:    acpi_ut_mutex_initialize
+@@ -81,18 +84,6 @@ acpi_status acpi_ut_mutex_initialize(void)
+ 		}
+ 	}
+ 
+-	/* Create the spinlocks for use at interrupt level */
+-
+-	status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
+-	if (ACPI_FAILURE (status)) {
+-		return_ACPI_STATUS (status);
+-	}
+-
+-	status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
+-	if (ACPI_FAILURE (status)) {
+-		return_ACPI_STATUS (status);
+-	}
+-
+ 	/* Mutex for _OSI support */
+ 	status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
+ 	if (ACPI_FAILURE(status)) {
+@@ -132,13 +123,7 @@ void acpi_ut_mutex_terminate(void)
+ 
+ 	acpi_os_delete_mutex(acpi_gbl_osi_mutex);
+ 
+-	/* Delete the spinlocks */
+-
+-	acpi_os_delete_lock(acpi_gbl_gpe_lock);
+-	acpi_os_delete_lock(acpi_gbl_hardware_lock);
+-
+ 	/* Delete the reader/writer lock */
+-
+ 	acpi_ut_delete_rw_lock(&acpi_gbl_namespace_rw_lock);
+ 	return_VOID;
+ }
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index b19a18d..5812e01 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -152,10 +152,10 @@ static int ec_transaction_done(struct acpi_ec *ec)
+ {
+ 	unsigned long flags;
+ 	int ret = 0;
+-	spin_lock_irqsave(&ec->curr_lock, flags);
++	raw_spin_lock_irqsave(&ec->curr_lock, flags);
+ 	if (!ec->curr || ec->curr->done)
+ 		ret = 1;
+-	spin_unlock_irqrestore(&ec->curr_lock, flags);
++	raw_spin_unlock_irqrestore(&ec->curr_lock, flags);
+ 	return ret;
+ }
+ 
+@@ -169,7 +169,7 @@ static void start_transaction(struct acpi_ec *ec)
+ static void advance_transaction(struct acpi_ec *ec, u8 status)
+ {
+ 	unsigned long flags;
+-	spin_lock_irqsave(&ec->curr_lock, flags);
++	raw_spin_lock_irqsave(&ec->curr_lock, flags);
+ 	if (!ec->curr)
+ 		goto unlock;
+ 	if (ec->curr->wlen > ec->curr->wi) {
+@@ -194,7 +194,7 @@ err:
+ 	if (in_interrupt())
+ 		++ec->curr->irq_count;
+ unlock:
+-	spin_unlock_irqrestore(&ec->curr_lock, flags);
++	raw_spin_unlock_irqrestore(&ec->curr_lock, flags);
+ }
+ 
+ static int acpi_ec_sync_query(struct acpi_ec *ec);
+@@ -232,9 +232,9 @@ static int ec_poll(struct acpi_ec *ec)
+ 		if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
+ 			break;
+ 		pr_debug(PREFIX "controller reset, restart transaction\n");
+-		spin_lock_irqsave(&ec->curr_lock, flags);
++		raw_spin_lock_irqsave(&ec->curr_lock, flags);
+ 		start_transaction(ec);
+-		spin_unlock_irqrestore(&ec->curr_lock, flags);
++		raw_spin_unlock_irqrestore(&ec->curr_lock, flags);
+ 	}
+ 	return -ETIME;
+ }
+@@ -247,17 +247,17 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
+ 	if (EC_FLAGS_MSI)
+ 		udelay(ACPI_EC_MSI_UDELAY);
+ 	/* start transaction */
+-	spin_lock_irqsave(&ec->curr_lock, tmp);
++	raw_spin_lock_irqsave(&ec->curr_lock, tmp);
+ 	/* following two actions should be kept atomic */
+ 	ec->curr = t;
+ 	start_transaction(ec);
+ 	if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
+ 		clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+-	spin_unlock_irqrestore(&ec->curr_lock, tmp);
++	raw_spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ 	ret = ec_poll(ec);
+-	spin_lock_irqsave(&ec->curr_lock, tmp);
++	raw_spin_lock_irqsave(&ec->curr_lock, tmp);
+ 	ec->curr = NULL;
+-	spin_unlock_irqrestore(&ec->curr_lock, tmp);
++	raw_spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ 	return ret;
+ }
+ 
+@@ -678,7 +678,7 @@ static struct acpi_ec *make_acpi_ec(void)
+ 	mutex_init(&ec->lock);
+ 	init_waitqueue_head(&ec->wait);
+ 	INIT_LIST_HEAD(&ec->list);
+-	spin_lock_init(&ec->curr_lock);
++	raw_spin_lock_init(&ec->curr_lock);
+ 	return ec;
+ }
+ 
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index ca75b9c..68ed95f 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -62,7 +62,7 @@ struct acpi_ec {
+ 	wait_queue_head_t wait;
+ 	struct list_head list;
+ 	struct transaction *curr;
+-	spinlock_t curr_lock;
++	raw_spinlock_t curr_lock;
+ };
+ 
+ extern struct acpi_ec *first_ec;
 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
 index 431ab11..3e05314 100644
 --- a/drivers/acpi/processor_idle.c
@@ -6532,7 +7097,7 @@
  
  delay_and_out:
 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index e509147..777a79d 100644
+index cbb50d3..38a648f 100644
 --- a/drivers/md/raid5.c
 +++ b/drivers/md/raid5.c
 @@ -1226,8 +1226,9 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
@@ -6556,7 +7121,7 @@
  }
  
  #ifdef CONFIG_MULTICORE_RAID456
-@@ -4756,6 +4758,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
+@@ -4764,6 +4766,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
  			break;
  		}
  		per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
@@ -6577,7 +7142,7 @@
  		void		*scribble;   /* space for constructing buffer
  					      * lists and performing address
 diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
-index 3546474..7254a4f 100644
+index 56c05ef..1704061 100644
 --- a/drivers/misc/Kconfig
 +++ b/drivers/misc/Kconfig
 @@ -82,6 +82,7 @@ config AB8500_PWM
@@ -8674,6 +9239,19 @@
  	return 0;
  }
  
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index fdaa42a..1a6cc67 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -441,7 +441,7 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
+ 	WARN_ON(!dev->block_ucfg_access);
+ 
+ 	dev->block_ucfg_access = 0;
+-	wake_up_all(&pci_ucfg_wait);
++	wake_up_all_locked(&pci_ucfg_wait);
+ 	raw_spin_unlock_irqrestore(&pci_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
 diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
 index 6dcc7e2..82dd604 100644
 --- a/drivers/pci/dmar.c
@@ -8858,7 +9436,7 @@
  }
  
 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
-index f02c34d..8c2564d 100644
+index f02c34d..bc05a51 100644
 --- a/drivers/pci/intel-iommu.c
 +++ b/drivers/pci/intel-iommu.c
 @@ -933,7 +933,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
@@ -9021,6 +9599,24 @@
  	}
  
  	for_each_active_iommu(iommu, drhd)
+@@ -3569,6 +3569,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
+ 			found = 1;
+ 	}
+ 
++	spin_unlock_irqrestore(&device_domain_lock, flags);
++
+ 	if (found == 0) {
+ 		unsigned long tmp_flags;
+ 		spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
+@@ -3585,8 +3587,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
+ 			spin_unlock_irqrestore(&iommu->lock, tmp_flags);
+ 		}
+ 	}
+-
+-	spin_unlock_irqrestore(&device_domain_lock, flags);
+ }
+ 
+ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
 diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
 index 3607faf..c0c1510 100644
 --- a/drivers/pci/intr_remapping.c
@@ -9475,10 +10071,10 @@
  EXPORT_SYMBOL(tty_flip_buffer_push);
  
 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
-index ef925d5..e5e5902 100644
+index a76c808..dfff19f 100644
 --- a/drivers/tty/tty_ldisc.c
 +++ b/drivers/tty/tty_ldisc.c
-@@ -70,7 +70,7 @@ static void put_ldisc(struct tty_ldisc *ld)
+@@ -71,7 +71,7 @@ static void put_ldisc(struct tty_ldisc *ld)
  	 * We really want an "atomic_dec_and_lock_irqsave()",
  	 * but we don't have it, so this does it by hand.
  	 */
@@ -9487,7 +10083,7 @@
  	if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
  		struct tty_ldisc_ops *ldo = ld->ops;
  
-@@ -81,7 +81,7 @@ static void put_ldisc(struct tty_ldisc *ld)
+@@ -82,7 +82,7 @@ static void put_ldisc(struct tty_ldisc *ld)
  		kfree(ld);
  		return;
  	}
@@ -10284,10 +10880,10 @@
  
  #endif /* __XFS_SUPPORT_MRLOCK_H__ */
 diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
-index 347cae9..dd45aae 100644
+index 28de70b..01bb878 100644
 --- a/fs/xfs/linux-2.6/xfs_super.c
 +++ b/fs/xfs/linux-2.6/xfs_super.c
-@@ -1002,7 +1002,7 @@ xfs_fs_evict_inode(
+@@ -986,7 +986,7 @@ xfs_fs_evict_inode(
  	 * (and basically indicate what we are doing), we explicitly
  	 * re-init the iolock here.
  	 */
@@ -10706,7 +11302,7 @@
  	int		agaw; /* agaw of this iommu */
  	int		msagaw; /* max sagaw of this iommu */
 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
-index b9490bf..a62158f 100644
+index b9490bf..3142442 100644
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
 @@ -205,7 +205,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
@@ -10758,7 +11354,82 @@
  /* This is the worklist that queues up per-cpu softirq work.
   *
   * send_remote_sendirq() adds work to these lists, and
-@@ -626,6 +638,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
+@@ -489,8 +501,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
+      to be executed on some cpu at least once after this.
+    * If the tasklet is already scheduled, but its execution is still not
+      started, it will be executed only once.
+-   * If this tasklet is already running on another CPU (or schedule is called
+-     from tasklet itself), it is rescheduled for later.
++   * If this tasklet is already running on another CPU, it is rescheduled
++     for later.
++   * Schedule must not be called from the tasklet itself (a lockup occurs)
+    * Tasklet is strictly serialized wrt itself, but not
+      wrt another tasklets. If client needs some intertask synchronization,
+      he makes it with spinlocks.
+@@ -515,27 +528,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+ enum
+ {
+ 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
+-	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
++	TASKLET_STATE_RUN,	/* Tasklet is running (SMP only) */
++	TASKLET_STATE_PENDING	/* Tasklet is pending */
+ };
+ 
+-#ifdef CONFIG_SMP
++#define TASKLET_STATEF_SCHED	(1 << TASKLET_STATE_SCHED)
++#define TASKLET_STATEF_RUN	(1 << TASKLET_STATE_RUN)
++#define TASKLET_STATEF_PENDING	(1 << TASKLET_STATE_PENDING)
++
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ static inline int tasklet_trylock(struct tasklet_struct *t)
+ {
+ 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+ 
++static inline int tasklet_tryunlock(struct tasklet_struct *t)
++{
++	return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
++}
++
+ static inline void tasklet_unlock(struct tasklet_struct *t)
+ {
+ 	smp_mb__before_clear_bit(); 
+ 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+ 
+-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+-{
+-	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+-}
++extern void tasklet_unlock_wait(struct tasklet_struct *t);
++
+ #else
+ #define tasklet_trylock(t) 1
++#define tasklet_tryunlock(t)	1
+ #define tasklet_unlock_wait(t) do { } while (0)
+ #define tasklet_unlock(t) do { } while (0)
+ #endif
+@@ -584,17 +606,8 @@ static inline void tasklet_disable(struct tasklet_struct *t)
+ 	smp_mb();
+ }
+ 
+-static inline void tasklet_enable(struct tasklet_struct *t)
+-{
+-	smp_mb__before_atomic_dec();
+-	atomic_dec(&t->count);
+-}
+-
+-static inline void tasklet_hi_enable(struct tasklet_struct *t)
+-{
+-	smp_mb__before_atomic_dec();
+-	atomic_dec(&t->count);
+-}
++extern  void tasklet_enable(struct tasklet_struct *t);
++extern  void tasklet_hi_enable(struct tasklet_struct *t);
+ 
+ extern void tasklet_kill(struct tasklet_struct *t);
+ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
+@@ -626,6 +639,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
  	tasklet_kill(&ttimer->tasklet);
  }
  
@@ -14236,6 +14907,29 @@
  }
  
  static inline void count_vm_events(enum vm_event_item item, long delta)
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 3efc9f3..1e904b8 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -157,7 +157,7 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
+ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
+ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
+ 			void *key);
+-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
++void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
+ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
+ void __wake_up_bit(wait_queue_head_t *, void *, int);
+ int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
+@@ -170,7 +170,8 @@ wait_queue_head_t *bit_waitqueue(void *, int);
+ #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
+ #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
+ #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
+-#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL)
++#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
++#define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
+ 
+ #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
+ #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
 index f584aba..eda8db8 100644
 --- a/include/linux/workqueue.h
@@ -15661,10 +16355,21 @@
  
  /**
 diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
-index aa57d5d..e57f1b3 100644
+index b5f4742..d09e0f5 100644
 --- a/kernel/irq/spurious.c
 +++ b/kernel/irq/spurious.c
-@@ -339,6 +339,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
+@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+ 	 */
+ 	action = desc->action;
+ 	if (!action || !(action->flags & IRQF_SHARED) ||
+-	    (action->flags & __IRQF_TIMER) || !action->next)
++	    (action->flags & __IRQF_TIMER) ||
++	    (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
++	    !action->next)
+ 		goto out;
+ 
+ 	/* Already running on another processor */
+@@ -339,6 +341,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
  
  static int __init irqfixup_setup(char *str)
  {
@@ -15676,7 +16381,7 @@
  	irqfixup = 1;
  	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
  	printk(KERN_WARNING "This may impact system performance.\n");
-@@ -351,6 +356,11 @@ module_param(irqfixup, int, 0644);
+@@ -351,6 +358,11 @@ module_param(irqfixup, int, 0644);
  
  static int __init irqpoll_setup(char *str)
  {
@@ -18512,7 +19217,7 @@
  #endif
  
 diff --git a/kernel/sched.c b/kernel/sched.c
-index 063d7a4..640f740 100644
+index 063d7a4..63aeba0 100644
 --- a/kernel/sched.c
 +++ b/kernel/sched.c
 @@ -71,6 +71,7 @@
@@ -18937,6 +19642,18 @@
  		sub_preempt_count_notrace(PREEMPT_ACTIVE);
  
  		/*
+@@ -4487,9 +4642,9 @@ EXPORT_SYMBOL(__wake_up);
+ /*
+  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
+  */
+-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
++void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
+ {
+-	__wake_up_common(q, mode, 1, 0, NULL);
++	__wake_up_common(q, mode, nr, 0, NULL);
+ }
+ EXPORT_SYMBOL_GPL(__wake_up_locked);
+ 
 @@ -4814,9 +4969,8 @@ long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
  EXPORT_SYMBOL(sleep_on_timeout);
  
@@ -19901,10 +20618,14 @@
  	} else {
  		/*
 diff --git a/kernel/softirq.c b/kernel/softirq.c
-index fca82c3..026a283 100644
+index fca82c3..3489d06 100644
 --- a/kernel/softirq.c
 +++ b/kernel/softirq.c
-@@ -24,6 +24,7 @@
+@@ -21,9 +21,11 @@
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
+ #include <linux/rcupdate.h>
++#include <linux/delay.h>
  #include <linux/ftrace.h>
  #include <linux/smp.h>
  #include <linux/tick.h>
@@ -19912,7 +20633,7 @@
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/irq.h>
-@@ -61,6 +62,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
+@@ -61,6 +63,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
  	"TASKLET", "SCHED", "HRTIMER", "RCU"
  };
  
@@ -19980,7 +20701,7 @@
  /*
   * we cannot loop indefinitely here to avoid userspace starvation,
   * but we also don't want to introduce a worst case 1/HZ latency
-@@ -76,6 +138,36 @@ static void wakeup_softirqd(void)
+@@ -76,6 +139,36 @@ static void wakeup_softirqd(void)
  		wake_up_process(tsk);
  }
  
@@ -20017,7 +20738,7 @@
  /*
   * preempt_count and SOFTIRQ_OFFSET usage:
   * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -206,7 +298,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
+@@ -206,7 +299,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
  
  asmlinkage void __do_softirq(void)
  {
@@ -20025,7 +20746,7 @@
  	__u32 pending;
  	int max_restart = MAX_SOFTIRQ_RESTART;
  	int cpu;
-@@ -215,7 +306,7 @@ asmlinkage void __do_softirq(void)
+@@ -215,7 +307,7 @@ asmlinkage void __do_softirq(void)
  	account_system_vtime(current);
  
  	__local_bh_disable((unsigned long)__builtin_return_address(0),
@@ -20034,7 +20755,7 @@
  	lockdep_softirq_enter();
  
  	cpu = smp_processor_id();
-@@ -223,36 +314,7 @@ restart:
+@@ -223,36 +315,7 @@ restart:
  	/* Reset the pending bitmask before enabling irqs */
  	set_softirq_pending(0);
  
@@ -20072,7 +20793,7 @@
  
  	pending = local_softirq_pending();
  	if (pending && --max_restart)
-@@ -267,6 +329,26 @@ restart:
+@@ -267,6 +330,26 @@ restart:
  	__local_bh_enable(SOFTIRQ_OFFSET);
  }
  
@@ -20099,7 +20820,7 @@
  #ifndef __ARCH_HAS_DO_SOFTIRQ
  
  asmlinkage void do_softirq(void)
-@@ -289,6 +371,184 @@ asmlinkage void do_softirq(void)
+@@ -289,6 +372,184 @@ asmlinkage void do_softirq(void)
  
  #endif
  
@@ -20284,7 +21005,7 @@
  /*
   * Enter an interrupt context.
   */
-@@ -302,9 +562,9 @@ void irq_enter(void)
+@@ -302,9 +563,9 @@ void irq_enter(void)
  		 * Prevent raise_softirq from needlessly waking up ksoftirqd
  		 * here, as softirq will be serviced on return from interrupt.
  		 */
@@ -20296,7 +21017,7 @@
  	}
  
  	__irq_enter();
-@@ -313,6 +573,7 @@ void irq_enter(void)
+@@ -313,6 +574,7 @@ void irq_enter(void)
  #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
  static inline void invoke_softirq(void)
  {
@@ -20304,7 +21025,7 @@
  	if (!force_irqthreads)
  		__do_softirq();
  	else {
-@@ -321,10 +582,14 @@ static inline void invoke_softirq(void)
+@@ -321,10 +583,14 @@ static inline void invoke_softirq(void)
  		wakeup_softirqd();
  		__local_bh_enable(SOFTIRQ_OFFSET);
  	}
@@ -20319,7 +21040,7 @@
  	if (!force_irqthreads)
  		do_softirq();
  	else {
-@@ -333,6 +598,9 @@ static inline void invoke_softirq(void)
+@@ -333,6 +599,9 @@ static inline void invoke_softirq(void)
  		wakeup_softirqd();
  		__local_bh_enable(SOFTIRQ_OFFSET);
  	}
@@ -20329,7 +21050,7 @@
  }
  #endif
  
-@@ -353,7 +621,7 @@ void irq_exit(void)
+@@ -353,7 +622,7 @@ void irq_exit(void)
  	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
  		tick_nohz_stop_sched_tick(0);
  #endif
@@ -20338,8 +21059,275 @@
  }
  
  /*
-@@ -739,29 +1007,21 @@ void __init softirq_init(void)
+@@ -402,15 +671,45 @@ struct tasklet_head
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
+ 
++static void inline
++__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
++{
++	if (tasklet_trylock(t)) {
++again:
++		/* We may have been preempted before tasklet_trylock
++		 * and __tasklet_action may have already run.
++		 * So double check the sched bit while the takslet
++		 * is locked before adding it to the list.
++		 */
++		if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
++			t->next = NULL;
++			*head->tail = t;
++			head->tail = &(t->next);
++			raise_softirq_irqoff(nr);
++			tasklet_unlock(t);
++		} else {
++			/* This is subtle. If we hit the corner case above
++			 * It is possible that we get preempted right here,
++			 * and another task has successfully called
++			 * tasklet_schedule(), then this function, and
++			 * failed on the trylock. Thus we must be sure
++			 * before releasing the tasklet lock, that the
++			 * SCHED_BIT is clear. Otherwise the tasklet
++			 * may get its SCHED_BIT set, but not added to the
++			 * list
++			 */
++			if (!tasklet_tryunlock(t))
++				goto again;
++		}
++	}
++}
++
+ void __tasklet_schedule(struct tasklet_struct *t)
+ {
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-	t->next = NULL;
+-	*__this_cpu_read(tasklet_vec.tail) = t;
+-	__this_cpu_write(tasklet_vec.tail, &(t->next));
+-	raise_softirq_irqoff(TASKLET_SOFTIRQ);
++	__tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
+ 	local_irq_restore(flags);
+ }
+ 
+@@ -421,10 +720,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-	t->next = NULL;
+-	*__this_cpu_read(tasklet_hi_vec.tail) = t;
+-	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
+-	raise_softirq_irqoff(HI_SOFTIRQ);
++	__tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ);
+ 	local_irq_restore(flags);
+ }
+ 
+@@ -432,50 +728,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
+ 
+ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+ {
+-	BUG_ON(!irqs_disabled());
+-
+-	t->next = __this_cpu_read(tasklet_hi_vec.head);
+-	__this_cpu_write(tasklet_hi_vec.head, t);
+-	__raise_softirq_irqoff(HI_SOFTIRQ);
++	__tasklet_hi_schedule(t);
+ }
+ 
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
++ 
++void  tasklet_enable(struct tasklet_struct *t)
++{
++	if (!atomic_dec_and_test(&t->count))
++		return;
++	if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
++		tasklet_schedule(t);
++}
++ 
++EXPORT_SYMBOL(tasklet_enable);
  
+-static void tasklet_action(struct softirq_action *a)
++void  tasklet_hi_enable(struct tasklet_struct *t)
+ {
+-	struct tasklet_struct *list;
++	if (!atomic_dec_and_test(&t->count))
++		return;
++	if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
++		tasklet_hi_schedule(t);
++}
+ 
+-	local_irq_disable();
+-	list = __this_cpu_read(tasklet_vec.head);
+-	__this_cpu_write(tasklet_vec.head, NULL);
+-	__this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
+-	local_irq_enable();
++EXPORT_SYMBOL(tasklet_hi_enable);
++
++static void
++__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
++{
++	int loops = 1000000;
+ 
+ 	while (list) {
+ 		struct tasklet_struct *t = list;
+ 
+ 		list = list->next;
+ 
+-		if (tasklet_trylock(t)) {
+-			if (!atomic_read(&t->count)) {
+-				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+-					BUG();
+-				t->func(t->data);
+-				tasklet_unlock(t);
+-				continue;
+-			}
+-			tasklet_unlock(t);
++		/*
++		 * Should always succeed - after a tasklist got on the
++		 * list (after getting the SCHED bit set from 0 to 1),
++		 * nothing but the tasklet softirq it got queued to can
++		 * lock it:
++		 */
++		if (!tasklet_trylock(t)) {
++			WARN_ON(1);
++			continue;
+ 		}
+ 
+-		local_irq_disable();
+ 		t->next = NULL;
+-		*__this_cpu_read(tasklet_vec.tail) = t;
+-		__this_cpu_write(tasklet_vec.tail, &(t->next));
+-		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
+-		local_irq_enable();
++
++		/*
++		 * If we cannot handle the tasklet because it's disabled,
++		 * mark it as pending. tasklet_enable() will later
++		 * re-schedule the tasklet.
++		 */
++		if (unlikely(atomic_read(&t->count))) {
++out_disabled:
++			/* implicit unlock: */
++			wmb();
++			t->state = TASKLET_STATEF_PENDING;
++			continue;
++		}
++
++		/*
++		 * After this point on the tasklet might be rescheduled
++		 * on another CPU, but it can only be added to another
++		 * CPU's tasklet list if we unlock the tasklet (which we
++		 * dont do yet).
++		 */
++		if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++			WARN_ON(1);
++
++again:
++		t->func(t->data);
++
++		/*
++		 * Try to unlock the tasklet. We must use cmpxchg, because
++		 * another CPU might have scheduled or disabled the tasklet.
++		 * We only allow the STATE_RUN -> 0 transition here.
++		 */
++		while (!tasklet_tryunlock(t)) {
++			/*
++			 * If it got disabled meanwhile, bail out:
++			 */
++			if (atomic_read(&t->count))
++				goto out_disabled;
++			/*
++			 * If it got scheduled meanwhile, re-execute
++			 * the tasklet function:
++			 */
++			if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++				goto again;
++			if (!--loops) {
++				printk("hm, tasklet state: %08lx\n", t->state);
++				WARN_ON(1);
++				tasklet_unlock(t);
++				break;
++			}
++		}
+ 	}
+ }
+ 
++static void tasklet_action(struct softirq_action *a)
++{
++	struct tasklet_struct *list;
++
++	local_irq_disable();
++	list = __get_cpu_var(tasklet_vec).head;
++	__get_cpu_var(tasklet_vec).head = NULL;
++	__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
++	local_irq_enable();
++
++	__tasklet_action(a, list);
++}
++
+ static void tasklet_hi_action(struct softirq_action *a)
+ {
+ 	struct tasklet_struct *list;
+@@ -486,29 +851,7 @@ static void tasklet_hi_action(struct softirq_action *a)
+ 	__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
+ 	local_irq_enable();
+ 
+-	while (list) {
+-		struct tasklet_struct *t = list;
+-
+-		list = list->next;
+-
+-		if (tasklet_trylock(t)) {
+-			if (!atomic_read(&t->count)) {
+-				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+-					BUG();
+-				t->func(t->data);
+-				tasklet_unlock(t);
+-				continue;
+-			}
+-			tasklet_unlock(t);
+-		}
+-
+-		local_irq_disable();
+-		t->next = NULL;
+-		*__this_cpu_read(tasklet_hi_vec.tail) = t;
+-		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+-		__raise_softirq_irqoff(HI_SOFTIRQ);
+-		local_irq_enable();
+-	}
++	__tasklet_action(a, list);
+ }
+ 
+ 
+@@ -531,7 +874,7 @@ void tasklet_kill(struct tasklet_struct *t)
+ 
+ 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ 		do {
+-			yield();
++			msleep(1);
+ 		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ 	}
+ 	tasklet_unlock_wait(t);
+@@ -737,31 +1080,40 @@ void __init softirq_init(void)
+ 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
+ }
+ 
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
++void tasklet_unlock_wait(struct tasklet_struct *t)
++{
++	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
++		/*
++		 * Hack for now to avoid this busy-loop:
++		 */
++#ifdef CONFIG_PREEMPT_RT_FULL
++		msleep(1);
++#else
++		barrier();
++#endif
++	}
++}
++EXPORT_SYMBOL(tasklet_unlock_wait);
++#endif
++
  static int run_ksoftirqd(void * __bind_cpu)
  {
 +	ksoftirqd_set_sched_params();
@@ -20374,7 +21362,7 @@
  			cond_resched();
  			preempt_disable();
  			rcu_note_context_switch((long)__bind_cpu);
-@@ -774,6 +1034,7 @@ static int run_ksoftirqd(void * __bind_cpu)
+@@ -774,6 +1126,7 @@ static int run_ksoftirqd(void * __bind_cpu)
  
  wait_to_die:
  	preempt_enable();
@@ -20382,7 +21370,7 @@
  	/* Wait for kthread_stop */
  	set_current_state(TASK_INTERRUPTIBLE);
  	while (!kthread_should_stop()) {
-@@ -850,9 +1111,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
+@@ -850,9 +1203,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
  	int hotcpu = (unsigned long)hcpu;
  	struct task_struct *p;
  
@@ -20393,7 +21381,7 @@
  		p = kthread_create_on_node(run_ksoftirqd,
  					   hcpu,
  					   cpu_to_node(hotcpu),
-@@ -865,19 +1125,16 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
+@@ -865,19 +1217,16 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
    		per_cpu(ksoftirqd, hotcpu) = p;
   		break;
  	case CPU_ONLINE:
@@ -24361,6 +25349,7 @@
 +void wq_worker_running(struct task_struct *task);
 +void wq_worker_sleeping(struct task_struct *task);
 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index dd373c8..383b565 100644
 --- a/lib/Kconfig.debug
 +++ b/lib/Kconfig.debug
 @@ -62,6 +62,28 @@ config MAGIC_SYSRQ
@@ -25744,7 +26733,7 @@
  static unsigned long total_usage;
  
 diff --git a/mm/slab.c b/mm/slab.c
-index d96e223..015cd76 100644
+index d96e223..5251b99 100644
 --- a/mm/slab.c
 +++ b/mm/slab.c
 @@ -116,6 +116,7 @@
@@ -25755,25 +26744,73 @@
  
  #include	<asm/cacheflush.h>
  #include	<asm/tlbflush.h>
-@@ -620,6 +621,51 @@ int slab_is_available(void)
+@@ -593,6 +594,7 @@ static enum {
+ 	PARTIAL_AC,
+ 	PARTIAL_L3,
+ 	EARLY,
++	LATE,
+ 	FULL
+ } g_cpucache_up;
+ 
+@@ -604,6 +606,12 @@ int slab_is_available(void)
+ 	return g_cpucache_up >= EARLY;
+ }
+ 
++/*
++ * Guard access to the cache-chain.
++ */
++static DEFINE_MUTEX(cache_chain_mutex);
++static struct list_head cache_chain;
++
+ #ifdef CONFIG_LOCKDEP
+ 
+ /*
+@@ -620,70 +628,159 @@ int slab_is_available(void)
  static struct lock_class_key on_slab_l3_key;
  static struct lock_class_key on_slab_alc_key;
  
+-static void init_node_lock_keys(int q)
 +static struct lock_class_key debugobj_l3_key;
 +static struct lock_class_key debugobj_alc_key;
 +
 +static void slab_set_lock_classes(struct kmem_cache *cachep,
 +		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
 +		int q)
-+{
+ {
+-	struct cache_sizes *s = malloc_sizes;
 +	struct array_cache **alc;
 +	struct kmem_list3 *l3;
 +	int r;
-+
+ 
+-	if (g_cpucache_up != FULL)
 +	l3 = cachep->nodelists[q];
 +	if (!l3)
-+		return;
-+
+ 		return;
+ 
+-	for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
+-		struct array_cache **alc;
+-		struct kmem_list3 *l3;
+-		int r;
+-
+-		l3 = s->cs_cachep->nodelists[q];
+-		if (!l3 || OFF_SLAB(s->cs_cachep))
+-			continue;
+-		lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
+-		alc = l3->alien;
+-		/*
+-		 * FIXME: This check for BAD_ALIEN_MAGIC
+-		 * should go away when common slab code is taught to
+-		 * work even without alien caches.
+-		 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
+-		 * for alloc_alien_cache,
+-		 */
+-		if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
+-			continue;
+-		for_each_node(r) {
+-			if (alc[r])
+-				lockdep_set_class(&alc[r]->lock,
+-					&on_slab_alc_key);
+-		}
 +	lockdep_set_class(&l3->list_lock, l3_key);
 +	alc = l3->alien;
 +	/*
@@ -25788,9 +26825,10 @@
 +	for_each_node(r) {
 +		if (alc[r])
 +			lockdep_set_class(&alc[r]->lock, alc_key);
-+	}
-+}
-+
+ 	}
+ }
+ 
+-static inline void init_lock_keys(void)
 +static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
 +{
 +	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
@@ -25804,77 +26842,77 @@
 +		slab_set_debugobj_lock_classes_node(cachep, node);
 +}
 +
- static void init_node_lock_keys(int q)
++static void init_lock_keys(struct kmem_cache *cachep, int node)
++{
++	struct kmem_list3 *l3;
++
++	if (g_cpucache_up < LATE)
++		return;
++
++	l3 = cachep->nodelists[node];
++	if (!l3 || OFF_SLAB(cachep))
++		return;
++
++	slab_set_lock_classes(cachep, &on_slab_l3_key, &on_slab_alc_key, node);
++}
++
++static void init_node_lock_keys(int node)
++{
++	struct kmem_cache *cachep;
++
++	list_for_each_entry(cachep, &cache_chain, next)
++		init_lock_keys(cachep, node);
++}
++
++static inline void init_cachep_lock_keys(struct kmem_cache *cachep)
  {
- 	struct cache_sizes *s = malloc_sizes;
-@@ -628,29 +674,14 @@ static void init_node_lock_keys(int q)
- 		return;
- 
- 	for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
--		struct array_cache **alc;
- 		struct kmem_list3 *l3;
--		int r;
+ 	int node;
  
- 		l3 = s->cs_cachep->nodelists[q];
- 		if (!l3 || OFF_SLAB(s->cs_cachep))
- 			continue;
--		lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
--		alc = l3->alien;
--		/*
--		 * FIXME: This check for BAD_ALIEN_MAGIC
--		 * should go away when common slab code is taught to
--		 * work even without alien caches.
--		 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
--		 * for alloc_alien_cache,
--		 */
--		if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
--			continue;
--		for_each_node(r) {
--			if (alc[r])
--				lockdep_set_class(&alc[r]->lock,
--					&on_slab_alc_key);
--		}
-+
-+		slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
-+				&on_slab_alc_key, q);
- 	}
+ 	for_each_node(node)
+-		init_node_lock_keys(node);
++		init_lock_keys(cachep, node);
  }
- 
-@@ -669,6 +700,14 @@ static void init_node_lock_keys(int q)
- static inline void init_lock_keys(void)
+ #else
+-static void init_node_lock_keys(int q)
++static void init_node_lock_keys(int node)
++{
++}
++
++static void init_cachep_lock_keys(struct kmem_cache *cachep)
  {
  }
-+
+ 
+-static inline void init_lock_keys(void)
 +static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
 +{
 +}
 +
 +static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
-+{
-+}
+ {
+ }
  #endif
  
- /*
-@@ -678,12 +717,66 @@ static DEFINE_MUTEX(cache_chain_mutex);
- static struct list_head cache_chain;
- 
- static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
++static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 +static DEFINE_PER_CPU(struct list_head, slab_free_list);
 +static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
 +
 +#ifndef CONFIG_PREEMPT_RT_BASE
 +# define slab_on_each_cpu(func, cp)	on_each_cpu(func, cp, 1)
 +#else
-+/*
+ /*
+- * Guard access to the cache-chain.
 + * execute func() for all CPUs. On PREEMPT_RT we dont actually have
 + * to run on the remote CPUs - we only have to take their CPU-locks.
 + * (This is a rare operation, so cacheline bouncing is not an issue.)
-+ */
+  */
+-static DEFINE_MUTEX(cache_chain_mutex);
+-static struct list_head cache_chain;
 +static void
 +slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
 +{
 +	unsigned int i;
-+
+ 
+-static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 +	for_each_online_cpu(i)
 +		func(arg, i);
 +}
@@ -25922,7 +26960,7 @@
  static inline struct kmem_cache *__find_general_cachep(size_t size,
  							gfp_t gfpflags)
  {
-@@ -1021,9 +1114,10 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
+@@ -1021,9 +1118,10 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
  	if (l3->alien) {
  		struct array_cache *ac = l3->alien[node];
  
@@ -25935,7 +26973,7 @@
  		}
  	}
  }
-@@ -1038,9 +1132,9 @@ static void drain_alien_cache(struct kmem_cache *cachep,
+@@ -1038,9 +1136,9 @@ static void drain_alien_cache(struct kmem_cache *cachep,
  	for_each_online_node(i) {
  		ac = alien[i];
  		if (ac) {
@@ -25947,7 +26985,7 @@
  		}
  	}
  }
-@@ -1119,11 +1213,11 @@ static int init_cache_nodelists_node(int node)
+@@ -1119,11 +1217,11 @@ static int init_cache_nodelists_node(int node)
  			cachep->nodelists[node] = l3;
  		}
  
@@ -25961,7 +26999,7 @@
  	}
  	return 0;
  }
-@@ -1148,7 +1242,7 @@ static void __cpuinit cpuup_canceled(long cpu)
+@@ -1148,7 +1246,7 @@ static void __cpuinit cpuup_canceled(long cpu)
  		if (!l3)
  			goto free_array_cache;
  
@@ -25970,7 +27008,7 @@
  
  		/* Free limit for this kmem_list3 */
  		l3->free_limit -= cachep->batchcount;
-@@ -1156,7 +1250,7 @@ static void __cpuinit cpuup_canceled(long cpu)
+@@ -1156,7 +1254,7 @@ static void __cpuinit cpuup_canceled(long cpu)
  			free_block(cachep, nc->entry, nc->avail, node);
  
  		if (!cpumask_empty(mask)) {
@@ -25979,7 +27017,7 @@
  			goto free_array_cache;
  		}
  
-@@ -1170,7 +1264,7 @@ static void __cpuinit cpuup_canceled(long cpu)
+@@ -1170,7 +1268,7 @@ static void __cpuinit cpuup_canceled(long cpu)
  		alien = l3->alien;
  		l3->alien = NULL;
  
@@ -25988,7 +27026,7 @@
  
  		kfree(shared);
  		if (alien) {
-@@ -1244,7 +1338,7 @@ static int __cpuinit cpuup_prepare(long cpu)
+@@ -1244,7 +1342,7 @@ static int __cpuinit cpuup_prepare(long cpu)
  		l3 = cachep->nodelists[node];
  		BUG_ON(!l3);
  
@@ -25997,7 +27035,7 @@
  		if (!l3->shared) {
  			/*
  			 * We are serialised from CPU_DEAD or
-@@ -1259,9 +1353,11 @@ static int __cpuinit cpuup_prepare(long cpu)
+@@ -1259,9 +1357,11 @@ static int __cpuinit cpuup_prepare(long cpu)
  			alien = NULL;
  		}
  #endif
@@ -26010,7 +27048,7 @@
  	}
  	init_node_lock_keys(node);
  
-@@ -1448,6 +1544,10 @@ void __init kmem_cache_init(void)
+@@ -1448,6 +1548,10 @@ void __init kmem_cache_init(void)
  	if (num_possible_nodes() == 1)
  		use_alien_caches = 0;
  
@@ -26021,17 +27059,22 @@
  	for (i = 0; i < NUM_INIT_LISTS; i++) {
  		kmem_list3_init(&initkmem_list3[i]);
  		if (i < MAX_NUMNODES)
-@@ -1625,6 +1725,9 @@ void __init kmem_cache_init_late(void)
+@@ -1625,19 +1729,20 @@ void __init kmem_cache_init_late(void)
  {
  	struct kmem_cache *cachep;
  
-+	/* Annotate slab for lockdep -- annotate the malloc caches */
-+	init_lock_keys();
++	g_cpucache_up = LATE;
 +
  	/* 6) resize the head arrays to their final sizes */
  	mutex_lock(&cache_chain_mutex);
- 	list_for_each_entry(cachep, &cache_chain, next)
-@@ -1635,9 +1738,6 @@ void __init kmem_cache_init_late(void)
+-	list_for_each_entry(cachep, &cache_chain, next)
++	list_for_each_entry(cachep, &cache_chain, next) {
++		init_cachep_lock_keys(cachep);
+ 		if (enable_cpucache(cachep, GFP_NOWAIT))
+ 			BUG();
++	}
+ 	mutex_unlock(&cache_chain_mutex);
+ 
  	/* Done! */
  	g_cpucache_up = FULL;
  
@@ -26041,7 +27084,7 @@
  	/*
  	 * Register a cpu startup notifier callback that initializes
  	 * cpu_cache_get for all new cpus
-@@ -1725,12 +1825,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+@@ -1725,12 +1830,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
  /*
   * Interface to system's page release.
   */
@@ -26058,7 +27101,7 @@
  	kmemcheck_free_shadow(page, cachep->gfporder);
  
  	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
-@@ -1746,7 +1848,13 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
+@@ -1746,7 +1853,13 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
  	}
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += nr_freed;
@@ -26073,7 +27116,7 @@
  }
  
  static void kmem_rcu_free(struct rcu_head *head)
-@@ -1754,7 +1862,7 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -1754,7 +1867,7 @@ static void kmem_rcu_free(struct rcu_head *head)
  	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
  	struct kmem_cache *cachep = slab_rcu->cachep;
  
@@ -26082,7 +27125,7 @@
  	if (OFF_SLAB(cachep))
  		kmem_cache_free(cachep->slabp_cache, slab_rcu);
  }
-@@ -1973,7 +2081,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
+@@ -1973,7 +2086,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
   * Before calling the slab must have been unlinked from the cache.  The
   * cache-lock is not held/needed.
   */
@@ -26092,7 +27135,7 @@
  {
  	void *addr = slabp->s_mem - slabp->colouroff;
  
-@@ -1986,7 +2095,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+@@ -1986,7 +2100,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
  		slab_rcu->addr = addr;
  		call_rcu(&slab_rcu->head, kmem_rcu_free);
  	} else {
@@ -26101,7 +27144,7 @@
  		if (OFF_SLAB(cachep))
  			kmem_cache_free(cachep->slabp_cache, slabp);
  	}
-@@ -2424,6 +2533,16 @@ kmem_cache_create (const char *name, size_t size, size_t align,
+@@ -2424,6 +2538,18 @@ kmem_cache_create (const char *name, size_t size, size_t align,
  		goto oops;
  	}
  
@@ -26115,10 +27158,12 @@
 +		slab_set_debugobj_lock_classes(cachep);
 +	}
 +
++	init_cachep_lock_keys(cachep);
++
  	/* cache setup completed, link it into the list */
  	list_add(&cachep->next, &cache_chain);
  oops:
-@@ -2441,7 +2560,7 @@ EXPORT_SYMBOL(kmem_cache_create);
+@@ -2441,7 +2567,7 @@ EXPORT_SYMBOL(kmem_cache_create);
  #if DEBUG
  static void check_irq_off(void)
  {
@@ -26127,7 +27172,7 @@
  }
  
  static void check_irq_on(void)
-@@ -2476,26 +2595,43 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+@@ -2476,26 +2602,43 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
  			struct array_cache *ac,
  			int force, int node);
  
@@ -26176,7 +27221,7 @@
  	check_irq_on();
  	for_each_online_node(node) {
  		l3 = cachep->nodelists[node];
-@@ -2526,10 +2662,10 @@ static int drain_freelist(struct kmem_cache *cache,
+@@ -2526,10 +2669,10 @@ static int drain_freelist(struct kmem_cache *cache,
  	nr_freed = 0;
  	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
  
@@ -26189,7 +27234,7 @@
  			goto out;
  		}
  
-@@ -2543,8 +2679,8 @@ static int drain_freelist(struct kmem_cache *cache,
+@@ -2543,8 +2686,8 @@ static int drain_freelist(struct kmem_cache *cache,
  		 * to the cache.
  		 */
  		l3->free_objects -= cache->num;
@@ -26200,7 +27245,7 @@
  		nr_freed++;
  	}
  out:
-@@ -2838,7 +2974,7 @@ static int cache_grow(struct kmem_cache *cachep,
+@@ -2838,7 +2981,7 @@ static int cache_grow(struct kmem_cache *cachep,
  	offset *= cachep->colour_off;
  
  	if (local_flags & __GFP_WAIT)
@@ -26209,7 +27254,7 @@
  
  	/*
  	 * The test for missing atomic flag is performed here, rather than
-@@ -2868,7 +3004,7 @@ static int cache_grow(struct kmem_cache *cachep,
+@@ -2868,7 +3011,7 @@ static int cache_grow(struct kmem_cache *cachep,
  	cache_init_objs(cachep, slabp);
  
  	if (local_flags & __GFP_WAIT)
@@ -26218,7 +27263,7 @@
  	check_irq_off();
  	spin_lock(&l3->list_lock);
  
-@@ -2879,10 +3015,10 @@ static int cache_grow(struct kmem_cache *cachep,
+@@ -2879,10 +3022,10 @@ static int cache_grow(struct kmem_cache *cachep,
  	spin_unlock(&l3->list_lock);
  	return 1;
  opps1:
@@ -26231,7 +27276,7 @@
  	return 0;
  }
  
-@@ -3280,11 +3416,11 @@ retry:
+@@ -3280,11 +3423,11 @@ retry:
  		 * set and go into memory reserves if necessary.
  		 */
  		if (local_flags & __GFP_WAIT)
@@ -26245,7 +27290,7 @@
  		if (obj) {
  			/*
  			 * Insert into the appropriate per node queues
-@@ -3400,7 +3536,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+@@ -3400,7 +3543,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
  		return NULL;
  
  	cache_alloc_debugcheck_before(cachep, flags);
@@ -26254,7 +27299,7 @@
  
  	if (nodeid == -1)
  		nodeid = slab_node;
-@@ -3425,7 +3561,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+@@ -3425,7 +3568,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
  	/* ___cache_alloc_node can fall back to other nodes */
  	ptr = ____cache_alloc_node(cachep, flags, nodeid);
    out:
@@ -26263,7 +27308,7 @@
  	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
  	kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
  				 flags);
-@@ -3485,9 +3621,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
+@@ -3485,9 +3628,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
  		return NULL;
  
  	cache_alloc_debugcheck_before(cachep, flags);
@@ -26275,7 +27320,7 @@
  	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
  	kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
  				 flags);
-@@ -3535,7 +3671,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
+@@ -3535,7 +3678,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
  				 * a different cache, refer to comments before
  				 * alloc_slabmgmt.
  				 */
@@ -26284,7 +27329,7 @@
  			} else {
  				list_add(&slabp->list, &l3->slabs_free);
  			}
-@@ -3798,12 +3934,12 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+@@ -3798,12 +3941,12 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
  {
  	unsigned long flags;
  
@@ -26299,7 +27344,7 @@
  
  	trace_kmem_cache_free(_RET_IP_, objp);
  }
-@@ -3827,13 +3963,13 @@ void kfree(const void *objp)
+@@ -3827,13 +3970,13 @@ void kfree(const void *objp)
  
  	if (unlikely(ZERO_OR_NULL_PTR(objp)))
  		return;
@@ -26315,7 +27360,7 @@
  }
  EXPORT_SYMBOL(kfree);
  
-@@ -3876,7 +4012,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
+@@ -3876,7 +4019,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
  		if (l3) {
  			struct array_cache *shared = l3->shared;
  
@@ -26324,7 +27369,7 @@
  
  			if (shared)
  				free_block(cachep, shared->entry,
-@@ -3889,7 +4025,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
+@@ -3889,7 +4032,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
  			}
  			l3->free_limit = (1 + nr_cpus_node(node)) *
  					cachep->batchcount + cachep->num;
@@ -26334,7 +27379,7 @@
  			kfree(shared);
  			free_alien_cache(new_alien);
  			continue;
-@@ -3936,17 +4073,30 @@ struct ccupdate_struct {
+@@ -3936,17 +4080,30 @@ struct ccupdate_struct {
  	struct array_cache *new[NR_CPUS];
  };
  
@@ -26347,30 +27392,30 @@
 -	check_irq_off();
 -	old = cpu_cache_get(new->cachep);
 +	old = cpu_cache_get_on_cpu(new->cachep, cpu);
- 
--	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
--	new->new[smp_processor_id()] = old;
++
 +	new->cachep->array[cpu] = new->new[cpu];
 +	new->new[cpu] = old;
 +}
-+
+ 
+-	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
+-	new->new[smp_processor_id()] = old;
 +#ifndef CONFIG_PREEMPT_RT_BASE
 +static void do_ccupdate_local(void *info)
 +{
 +	__do_ccupdate_local(info, smp_processor_id());
-+}
+ }
 +#else
 +static void do_ccupdate_local(void *info, int cpu)
 +{
 +	spin_lock_irq(&per_cpu(slab_lock, cpu).lock);
 +	__do_ccupdate_local(info, cpu);
 +	spin_unlock_irq(&per_cpu(slab_lock, cpu).lock);
- }
++}
 +#endif
  
  /* Always called with the cache_chain_mutex held */
  static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
-@@ -3971,7 +4121,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+@@ -3971,7 +4128,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
  	}
  	new->cachep = cachep;
  
@@ -26379,7 +27424,7 @@
  
  	check_irq_on();
  	cachep->batchcount = batchcount;
-@@ -3982,9 +4132,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+@@ -3982,9 +4139,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
  		struct array_cache *ccold = new->new[i];
  		if (!ccold)
  			continue;
@@ -26393,7 +27438,7 @@
  		kfree(ccold);
  	}
  	kfree(new);
-@@ -4060,7 +4212,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+@@ -4060,7 +4219,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
  	if (ac->touched && !force) {
  		ac->touched = 0;
  	} else {
@@ -26402,7 +27447,7 @@
  		if (ac->avail) {
  			tofree = force ? ac->avail : (ac->limit + 4) / 5;
  			if (tofree > ac->avail)
-@@ -4070,7 +4222,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+@@ -4070,7 +4229,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
  			memmove(ac->entry, &(ac->entry[tofree]),
  				sizeof(void *) * ac->avail);
  		}
@@ -26411,7 +27456,7 @@
  	}
  }
  
-@@ -4209,7 +4361,7 @@ static int s_show(struct seq_file *m, void *p)
+@@ -4209,7 +4368,7 @@ static int s_show(struct seq_file *m, void *p)
  			continue;
  
  		check_irq_on();
@@ -26420,7 +27465,7 @@
  
  		list_for_each_entry(slabp, &l3->slabs_full, list) {
  			if (slabp->inuse != cachep->num && !error)
-@@ -4234,7 +4386,7 @@ static int s_show(struct seq_file *m, void *p)
+@@ -4234,7 +4393,7 @@ static int s_show(struct seq_file *m, void *p)
  		if (l3->shared)
  			shared_avail += l3->shared->avail;
  
@@ -26429,7 +27474,7 @@
  	}
  	num_slabs += active_slabs;
  	num_objs = num_slabs * cachep->num;
-@@ -4463,13 +4615,13 @@ static int leaks_show(struct seq_file *m, void *p)
+@@ -4463,13 +4622,13 @@ static int leaks_show(struct seq_file *m, void *p)
  			continue;
  
  		check_irq_on();
@@ -26978,10 +28023,10 @@
  		.data		= &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
  		.maxlen		= sizeof(int),
 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
-index 7fa8c6b..ea6b707 100644
+index 378bd67..ba6f0ac 100644
 --- a/net/mac80211/rx.c
 +++ b/net/mac80211/rx.c
-@@ -2853,7 +2853,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+@@ -2856,7 +2856,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
  	struct ieee80211_supported_band *sband;
  	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  

Added: people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx5-extra
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx5-extra	Sun Jan  1 10:40:30 2012	(r18446)
@@ -0,0 +1 @@
++ features/all/rt/patch-3.0.12-rt30.patch featureset=rt



More information about the Kernel-svn-changes mailing list