[kernel] r18687 - in people/ukleinek/3.0-rt/linux-2.6/debian: . patches/features/all/rt patches/series

Uwe Kleine-König ukleinek-guest at alioth.debian.org
Sun Feb 12 11:58:30 UTC 2012


Author: ukleinek-guest
Date: Sun Feb 12 11:58:28 2012
New Revision: 18687

Log:
[amd64] Update rt featureset to 3.0.18-rt34

Added:
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.18-rt34.patch
      - copied, changed from r18686, people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.12-rt30.patch
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx7-extra
Deleted:
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.12-rt30.patch
   people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx5-extra
Modified:
   people/ukleinek/3.0-rt/linux-2.6/debian/changelog

Modified: people/ukleinek/3.0-rt/linux-2.6/debian/changelog
==============================================================================
--- people/ukleinek/3.0-rt/linux-2.6/debian/changelog	Sun Feb 12 11:58:18 2012	(r18686)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/changelog	Sun Feb 12 11:58:28 2012	(r18687)
@@ -2,6 +2,7 @@
 
   * Add stable releases 3.0.15 up to 3.0.19
     3.0.16 includes "oom: fix integer overflow of points in oom_badness"
+  * [amd64] Update rt featureset to 3.0.18-rt34
 
  -- Uwe Kleine-König <u.kleine-koenig at pengutronix.de>  Sun, 05 Feb 2012 11:03:12 +0100
 

Copied and modified: people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.18-rt34.patch (from r18686, people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.12-rt30.patch)
==============================================================================
--- people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.12-rt30.patch	Sun Feb 12 11:58:18 2012	(r18686, copy source)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/features/all/rt/patch-3.0.18-rt34.patch	Sun Feb 12 11:58:28 2012	(r18687)
@@ -285,7 +285,7 @@
 +
 +These data are also reset when the wakeup histogram is reset.
 diff --git a/MAINTAINERS b/MAINTAINERS
-index 34e2418..fed9c84 100644
+index de85391..7d8e486 100644
 --- a/MAINTAINERS
 +++ b/MAINTAINERS
 @@ -2911,6 +2911,15 @@ L:	linuxppc-dev at lists.ozlabs.org
@@ -415,7 +415,7 @@
  
  #ifdef CONFIG_ALPHA_LARGE_VMALLOC
 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index 91c84cb..5c224f5 100644
+index 2456bad..0fd9288 100644
 --- a/arch/arm/Kconfig
 +++ b/arch/arm/Kconfig
 @@ -29,6 +29,7 @@ config ARM
@@ -426,7 +426,7 @@
  	help
  	  The ARM series is a line of low-power-consumption RISC chip designs
  	  licensed by ARM Ltd and targeted at embedded applications and
-@@ -1524,7 +1525,7 @@ config HAVE_ARCH_PFN_VALID
+@@ -1536,7 +1537,7 @@ config HAVE_ARCH_PFN_VALID
  
  config HIGHMEM
  	bool "High Memory Support"
@@ -640,10 +640,10 @@
  		if (err) {
  			pr_warning("unable to request IRQ%d for ARM perf "
 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 5e1e541..fd9477d 100644
+index 74ae833..2012ffe 100644
 --- a/arch/arm/kernel/process.c
 +++ b/arch/arm/kernel/process.c
-@@ -209,9 +209,7 @@ void cpu_idle(void)
+@@ -212,9 +212,7 @@ void cpu_idle(void)
  		}
  		leds_event(led_idle_end);
  		tick_nohz_restart_sched_tick();
@@ -654,7 +654,7 @@
  	}
  }
  
-@@ -486,6 +484,31 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
+@@ -489,6 +487,31 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
  }
  
  #ifdef CONFIG_MMU
@@ -2883,10 +2883,10 @@
  }
  
 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
-index 5b428e3..70e88b2 100644
+index ca2987d..a96989c 100644
 --- a/arch/powerpc/kernel/irq.c
 +++ b/arch/powerpc/kernel/irq.c
-@@ -449,6 +449,7 @@ void irq_ctx_init(void)
+@@ -446,6 +446,7 @@ void irq_ctx_init(void)
  	}
  }
  
@@ -2894,7 +2894,7 @@
  static inline void do_softirq_onstack(void)
  {
  	struct thread_info *curtp, *irqtp;
-@@ -478,7 +479,7 @@ void do_softirq(void)
+@@ -475,7 +476,7 @@ void do_softirq(void)
  
  	local_irq_restore(flags);
  }
@@ -3707,10 +3707,10 @@
  
  	/* Set sparc_cpu_model */
 diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
-index 3c5bb78..409cb28 100644
+index 4e7d3ff..5c9e2f2 100644
 --- a/arch/sparc/kernel/setup_64.c
 +++ b/arch/sparc/kernel/setup_64.c
-@@ -469,6 +469,12 @@ static void __init init_sparc64_elf_hwcap(void)
+@@ -479,6 +479,12 @@ static void __init init_sparc64_elf_hwcap(void)
  		popc_patch();
  }
  
@@ -3723,7 +3723,7 @@
  void __init setup_arch(char **cmdline_p)
  {
  	/* Initialize PROM console and command line. */
-@@ -480,7 +486,7 @@ void __init setup_arch(char **cmdline_p)
+@@ -490,7 +496,7 @@ void __init setup_arch(char **cmdline_p)
  #ifdef CONFIG_EARLYFB
  	if (btext_find_display())
  #endif
@@ -4056,10 +4056,10 @@
  
  int __acpi_acquire_global_lock(unsigned int *lock);
 diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
-index 67f87f2..8e41071 100644
+index 78a1eff..49ad773 100644
 --- a/arch/x86/include/asm/amd_nb.h
 +++ b/arch/x86/include/asm/amd_nb.h
-@@ -19,9 +19,15 @@ extern int amd_numa_init(void);
+@@ -21,9 +21,15 @@ extern int amd_numa_init(void);
  extern int amd_get_subcaches(int);
  extern int amd_set_subcaches(int, int);
  
@@ -5059,7 +5059,7 @@
  #ifdef CONFIG_XEN
  zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
-index 6781765..91e83b8 100644
+index aa083d3..a9b21d6 100644
 --- a/arch/x86/kernel/hpet.c
 +++ b/arch/x86/kernel/hpet.c
 @@ -7,6 +7,7 @@
@@ -5448,7 +5448,7 @@
  		return;
  	}
 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
-index b499626..29dd703 100644
+index f4f29b1..71bd7d6 100644
 --- a/arch/x86/mm/highmem_32.c
 +++ b/arch/x86/mm/highmem_32.c
 @@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
@@ -5591,7 +5591,7 @@
  		return;
  	}
 diff --git a/block/blk-core.c b/block/blk-core.c
-index 847d04e..69ef3f7 100644
+index 35ae52d..4e662ed 100644
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
 @@ -236,7 +236,7 @@ EXPORT_SYMBOL(blk_delay_queue);
@@ -5616,7 +5616,7 @@
  	q->request_fn(q);
  }
  EXPORT_SYMBOL(__blk_run_queue);
-@@ -2669,11 +2673,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
+@@ -2661,11 +2665,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
  	 * this lock).
  	 */
  	if (from_schedule) {
@@ -5630,7 +5630,7 @@
  	}
  
  }
-@@ -2699,7 +2703,6 @@ static void flush_plug_callbacks(struct blk_plug *plug)
+@@ -2691,7 +2695,6 @@ static void flush_plug_callbacks(struct blk_plug *plug)
  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  {
  	struct request_queue *q;
@@ -5638,7 +5638,7 @@
  	struct request *rq;
  	LIST_HEAD(list);
  	unsigned int depth;
-@@ -2720,11 +2723,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+@@ -2712,11 +2715,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  	q = NULL;
  	depth = 0;
  
@@ -5650,7 +5650,7 @@
  	while (!list_empty(&list)) {
  		rq = list_entry_rq(list.next);
  		list_del_init(&rq->queuelist);
-@@ -2737,7 +2735,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+@@ -2729,7 +2727,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  				queue_unplugged(q, depth, from_schedule);
  			q = rq->q;
  			depth = 0;
@@ -5659,7 +5659,7 @@
  		}
  		/*
  		 * rq is already accounted, so use raw insert
-@@ -2755,8 +2753,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+@@ -2747,8 +2745,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  	 */
  	if (q)
  		queue_unplugged(q, depth, from_schedule);
@@ -6055,7 +6055,7 @@
  	return_VOID;
  }
 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
-index b19a18d..5812e01 100644
+index b19a18d..db0e6c3 100644
 --- a/drivers/acpi/ec.c
 +++ b/drivers/acpi/ec.c
 @@ -152,10 +152,10 @@ static int ec_transaction_done(struct acpi_ec *ec)
@@ -6089,6 +6089,15 @@
  }
  
  static int acpi_ec_sync_query(struct acpi_ec *ec);
+@@ -222,7 +222,7 @@ static int ec_poll(struct acpi_ec *ec)
+ 				if (ec_transaction_done(ec))
+ 					return 0;
+ 			} else {
+-				if (wait_event_timeout(ec->wait,
++				if (swait_event_timeout(ec->wait,
+ 						ec_transaction_done(ec),
+ 						msecs_to_jiffies(1)))
+ 					return 0;
 @@ -232,9 +232,9 @@ static int ec_poll(struct acpi_ec *ec)
  		if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
  			break;
@@ -6123,9 +6132,30 @@
  	return ret;
  }
  
-@@ -678,7 +678,7 @@ static struct acpi_ec *make_acpi_ec(void)
+@@ -272,7 +272,7 @@ static int ec_wait_ibf0(struct acpi_ec *ec)
+ 	unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
+ 	/* interrupt wait manually if GPE mode is not active */
+ 	while (time_before(jiffies, delay))
+-		if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
++		if (swait_event_timeout(ec->wait, ec_check_ibf0(ec),
+ 					msecs_to_jiffies(1)))
+ 			return 0;
+ 	return -ETIME;
+@@ -612,7 +612,7 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+ 	advance_transaction(ec, acpi_ec_read_status(ec));
+ 	if (ec_transaction_done(ec) &&
+ 	    (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
+-		wake_up(&ec->wait);
++		swait_wake(&ec->wait);
+ 		ec_check_sci(ec, acpi_ec_read_status(ec));
+ 	}
+ 	return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
+@@ -676,9 +676,9 @@ static struct acpi_ec *make_acpi_ec(void)
+ 		return NULL;
+ 	ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
  	mutex_init(&ec->lock);
- 	init_waitqueue_head(&ec->wait);
+-	init_waitqueue_head(&ec->wait);
++	init_swait_head(&ec->wait);
  	INIT_LIST_HEAD(&ec->list);
 -	spin_lock_init(&ec->curr_lock);
 +	raw_spin_lock_init(&ec->curr_lock);
@@ -6133,11 +6163,24 @@
  }
  
 diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
-index ca75b9c..68ed95f 100644
+index ca75b9c..2519b6e 100644
 --- a/drivers/acpi/internal.h
 +++ b/drivers/acpi/internal.h
-@@ -62,7 +62,7 @@ struct acpi_ec {
- 	wait_queue_head_t wait;
+@@ -23,6 +23,8 @@
+ 
+ #define PREFIX "ACPI: "
+ 
++#include <linux/wait-simple.h>
++
+ int init_acpi_device_notify(void);
+ int acpi_scan_init(void);
+ int acpi_sysfs_init(void);
+@@ -59,10 +61,10 @@ struct acpi_ec {
+ 	unsigned long global_lock;
+ 	unsigned long flags;
+ 	struct mutex lock;
+-	wait_queue_head_t wait;
++	struct swait_head wait;
  	struct list_head list;
  	struct transaction *curr;
 -	spinlock_t curr_lock;
@@ -7097,7 +7140,7 @@
  
  delay_and_out:
 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index cbb50d3..38a648f 100644
+index 1f6c68d..8f6f5d7 100644
 --- a/drivers/md/raid5.c
 +++ b/drivers/md/raid5.c
 @@ -1226,8 +1226,9 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
@@ -9215,7 +9258,7 @@
  }
  
 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
-index e9ff6f7..d0de6cc 100644
+index 1c0b799..2f0aa0f 100644
 --- a/drivers/oprofile/oprofilefs.c
 +++ b/drivers/oprofile/oprofilefs.c
 @@ -21,7 +21,7 @@
@@ -9227,7 +9270,7 @@
  
  static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
  {
-@@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
+@@ -83,9 +83,9 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
  	if (copy_from_user(tmpbuf, buf, count))
  		return -EFAULT;
  
@@ -9236,7 +9279,7 @@
  	*val = simple_strtoul(tmpbuf, NULL, 0);
 -	spin_unlock_irqrestore(&oprofilefs_lock, flags);
 +	raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- 	return 0;
+ 	return count;
  }
  
 diff --git a/drivers/pci/access.c b/drivers/pci/access.c
@@ -9436,10 +9479,10 @@
  }
  
 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
-index f02c34d..bc05a51 100644
+index 0ec8930..a24f171 100644
 --- a/drivers/pci/intel-iommu.c
 +++ b/drivers/pci/intel-iommu.c
-@@ -933,7 +933,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
+@@ -937,7 +937,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
  
  	addr = iommu->root_entry;
  
@@ -9448,7 +9491,7 @@
  	dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
  
  	writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
-@@ -942,7 +942,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
+@@ -946,7 +946,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  		      readl, (sts & DMA_GSTS_RTPS), sts);
  
@@ -9457,7 +9500,7 @@
  }
  
  static void iommu_flush_write_buffer(struct intel_iommu *iommu)
-@@ -953,14 +953,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
+@@ -957,14 +957,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
  	if (!rwbf_quirk && !cap_rwbf(iommu->cap))
  		return;
  
@@ -9474,7 +9517,7 @@
  }
  
  /* return value determine if we need a write buffer flush */
-@@ -987,14 +987,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
+@@ -991,14 +991,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
  	}
  	val |= DMA_CCMD_ICC;
  
@@ -9491,7 +9534,7 @@
  }
  
  /* return value determine if we need a write buffer flush */
-@@ -1033,7 +1033,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
+@@ -1037,7 +1037,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
  	if (cap_write_drain(iommu->cap))
  		val |= DMA_TLB_WRITE_DRAIN;
  
@@ -9500,7 +9543,7 @@
  	/* Note: Only uses first TLB reg currently */
  	if (val_iva)
  		dmar_writeq(iommu->reg + tlb_offset, val_iva);
-@@ -1043,7 +1043,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
+@@ -1047,7 +1047,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
  	IOMMU_WAIT_OP(iommu, tlb_offset + 8,
  		dmar_readq, (!(val & DMA_TLB_IVT)), val);
  
@@ -9509,7 +9552,7 @@
  
  	/* check IOTLB invalidation granularity */
  	if (DMA_TLB_IAIG(val) == 0)
-@@ -1159,7 +1159,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
+@@ -1163,7 +1163,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
  	u32 pmen;
  	unsigned long flags;
  
@@ -9518,7 +9561,7 @@
  	pmen = readl(iommu->reg + DMAR_PMEN_REG);
  	pmen &= ~DMA_PMEN_EPM;
  	writel(pmen, iommu->reg + DMAR_PMEN_REG);
-@@ -1168,7 +1168,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
+@@ -1172,7 +1172,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
  	IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
  		readl, !(pmen & DMA_PMEN_PRS), pmen);
  
@@ -9527,7 +9570,7 @@
  }
  
  static int iommu_enable_translation(struct intel_iommu *iommu)
-@@ -1176,7 +1176,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
+@@ -1180,7 +1180,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
  	u32 sts;
  	unsigned long flags;
  
@@ -9536,7 +9579,7 @@
  	iommu->gcmd |= DMA_GCMD_TE;
  	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  
-@@ -1184,7 +1184,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
+@@ -1188,7 +1188,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  		      readl, (sts & DMA_GSTS_TES), sts);
  
@@ -9545,7 +9588,7 @@
  	return 0;
  }
  
-@@ -1193,7 +1193,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
+@@ -1197,7 +1197,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
  	u32 sts;
  	unsigned long flag;
  
@@ -9554,7 +9597,7 @@
  	iommu->gcmd &= ~DMA_GCMD_TE;
  	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  
-@@ -1201,7 +1201,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
+@@ -1205,7 +1205,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  		      readl, (!(sts & DMA_GSTS_TES)), sts);
  
@@ -9563,7 +9606,7 @@
  	return 0;
  }
  
-@@ -3321,7 +3321,7 @@ static int iommu_suspend(void)
+@@ -3325,7 +3325,7 @@ static int iommu_suspend(void)
  	for_each_active_iommu(iommu, drhd) {
  		iommu_disable_translation(iommu);
  
@@ -9572,7 +9615,7 @@
  
  		iommu->iommu_state[SR_DMAR_FECTL_REG] =
  			readl(iommu->reg + DMAR_FECTL_REG);
-@@ -3332,7 +3332,7 @@ static int iommu_suspend(void)
+@@ -3336,7 +3336,7 @@ static int iommu_suspend(void)
  		iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
  			readl(iommu->reg + DMAR_FEUADDR_REG);
  
@@ -9581,7 +9624,7 @@
  	}
  	return 0;
  
-@@ -3359,7 +3359,7 @@ static void iommu_resume(void)
+@@ -3363,7 +3363,7 @@ static void iommu_resume(void)
  
  	for_each_active_iommu(iommu, drhd) {
  
@@ -9590,7 +9633,7 @@
  
  		writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
  			iommu->reg + DMAR_FECTL_REG);
-@@ -3370,7 +3370,7 @@ static void iommu_resume(void)
+@@ -3374,7 +3374,7 @@ static void iommu_resume(void)
  		writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
  			iommu->reg + DMAR_FEUADDR_REG);
  
@@ -9599,7 +9642,7 @@
  	}
  
  	for_each_active_iommu(iommu, drhd)
-@@ -3569,6 +3569,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
+@@ -3573,6 +3573,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
  			found = 1;
  	}
  
@@ -9608,7 +9651,7 @@
  	if (found == 0) {
  		unsigned long tmp_flags;
  		spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
-@@ -3585,8 +3587,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
+@@ -3589,8 +3591,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
  			spin_unlock_irqrestore(&iommu->lock, tmp_flags);
  		}
  	}
@@ -9793,7 +9836,7 @@
  
  int __init intr_remapping_supported(void)
 diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
-index 8885b3e..3e5b58c 100644
+index f829adc..3e5b58c 100644
 --- a/drivers/scsi/fcoe/fcoe.c
 +++ b/drivers/scsi/fcoe/fcoe.c
 @@ -1113,7 +1113,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
@@ -9829,15 +9872,7 @@
  
  	return rc;
  }
-@@ -1561,6 +1561,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
- 	stats->InvalidCRCCount++;
- 	if (stats->InvalidCRCCount < 5)
- 		printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
-+	put_cpu();
- 	return -EINVAL;
- }
- 
-@@ -1605,7 +1606,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
+@@ -1606,7 +1606,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
  	 */
  	hp = (struct fcoe_hdr *) skb_network_header(skb);
  
@@ -9846,7 +9881,7 @@
  	if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
  		if (stats->ErrorFrames < 5)
  			printk(KERN_WARNING "fcoe: FCoE version "
-@@ -1637,13 +1638,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
+@@ -1638,13 +1638,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
  		goto drop;
  
  	if (!fcoe_filter_frames(lport, fp)) {
@@ -10093,10 +10128,10 @@
  }
  
 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
-index 39ea00b..cec6dfd 100644
+index 691d212..24b40ac 100644
 --- a/drivers/usb/core/hcd.c
 +++ b/drivers/usb/core/hcd.c
-@@ -2120,7 +2120,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
+@@ -2119,7 +2119,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
  	 * when the first handler doesn't use it.  So let's just
  	 * assume it's never used.
  	 */
@@ -10105,7 +10140,7 @@
  
  	if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) {
  		rc = IRQ_NONE;
-@@ -2133,7 +2133,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
+@@ -2132,7 +2132,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
  		rc = IRQ_HANDLED;
  	}
  
@@ -10128,10 +10163,10 @@
  
  /**
 diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
-index f9cf3f0..5fc952d 100644
+index 23107e2..43ae966 100644
 --- a/drivers/usb/host/ohci-hcd.c
 +++ b/drivers/usb/host/ohci-hcd.c
-@@ -833,9 +833,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+@@ -830,9 +830,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
  	}
  
  	if (ints & OHCI_INTR_WDH) {
@@ -10518,10 +10553,10 @@
  	arch_pick_mmap_layout(mm);
  	if (old_mm) {
 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 1265904..ddf500d 100644
+index af09060..6adc30a 100644
 --- a/fs/ext4/inode.c
 +++ b/fs/ext4/inode.c
-@@ -5872,7 +5872,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -5875,7 +5875,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  	 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
  	 * get i_mutex because we are already holding mmap_sem.
  	 */
@@ -10530,7 +10565,7 @@
  	size = i_size_read(inode);
  	if (page->mapping != mapping || size <= page_offset(page)
  	    || !PageUptodate(page)) {
-@@ -5884,7 +5884,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -5887,7 +5887,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  	lock_page(page);
  	wait_on_page_writeback(page);
  	if (PageMappedToDisk(page)) {
@@ -10539,7 +10574,7 @@
  		return VM_FAULT_LOCKED;
  	}
  
-@@ -5902,7 +5902,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -5905,7 +5905,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  	if (page_has_buffers(page)) {
  		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
  					ext4_bh_unmapped)) {
@@ -10548,7 +10583,7 @@
  			return VM_FAULT_LOCKED;
  		}
  	}
-@@ -5931,11 +5931,11 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -5934,11 +5934,11 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  	 */
  	lock_page(page);
  	wait_on_page_writeback(page);
@@ -10622,7 +10657,7 @@
  
  	mapping->a_ops = &empty_aops;
 diff --git a/fs/namespace.c b/fs/namespace.c
-index 537dd96..480df48 100644
+index edc1c4a..472caea 100644
 --- a/fs/namespace.c
 +++ b/fs/namespace.c
 @@ -341,8 +341,14 @@ int mnt_want_write(struct vfsmount *mnt)
@@ -10880,10 +10915,10 @@
  
  #endif /* __XFS_SUPPORT_MRLOCK_H__ */
 diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
-index 28de70b..01bb878 100644
+index e6ac98c..523c451 100644
 --- a/fs/xfs/linux-2.6/xfs_super.c
 +++ b/fs/xfs/linux-2.6/xfs_super.c
-@@ -986,7 +986,7 @@ xfs_fs_evict_inode(
+@@ -966,7 +966,7 @@ xfs_fs_evict_inode(
  	 * (and basically indicate what we are doing), we explicitly
  	 * re-init the iolock here.
  	 */
@@ -11106,7 +11141,7 @@
  /* These aren't inline functions due to a GCC bug. */
  #define register_hotcpu_notifier(nb)	({ (void)(nb); 0; })
 diff --git a/include/linux/fs.h b/include/linux/fs.h
-index b5b9792..3a023ec 100644
+index 7b17db7..88175ce 100644
 --- a/include/linux/fs.h
 +++ b/include/linux/fs.h
 @@ -777,7 +777,7 @@ struct inode {
@@ -11818,10 +11853,10 @@
  
  struct kretprobe_instance {
 diff --git a/include/linux/lglock.h b/include/linux/lglock.h
-index f549056..433f12d 100644
+index 87f402c..d8acbcc 100644
 --- a/include/linux/lglock.h
 +++ b/include/linux/lglock.h
-@@ -70,6 +70,9 @@
+@@ -71,6 +71,9 @@
   extern void name##_global_lock_online(void);				\
   extern void name##_global_unlock_online(void);				\
  
@@ -11830,8 +11865,8 @@
 +
  #define DEFINE_LGLOCK(name)						\
  									\
-  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);				\
-@@ -169,4 +172,101 @@
+  DEFINE_SPINLOCK(name##_cpu_lock);					\
+@@ -197,4 +200,101 @@
  	preempt_enable();						\
   }									\
   EXPORT_SYMBOL(name##_global_unlock);
@@ -14907,6 +14942,164 @@
  }
  
  static inline void count_vm_events(enum vm_event_item item, long delta)
+diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h
+new file mode 100644
+index 0000000..de69d8a
+--- /dev/null
++++ b/include/linux/wait-simple.h
+@@ -0,0 +1,152 @@
++#ifndef _LINUX_WAIT_SIMPLE_H
++#define _LINUX_WAIT_SIMPLE_H
++
++#include <linux/spinlock.h>
++#include <linux/list.h>
++
++#include <asm/current.h>
++
++struct swaiter {
++	struct task_struct	*task;
++	struct list_head	node;
++};
++
++#define DEFINE_SWAITER(name)					\
++	struct swaiter name = {					\
++		.task	= current,				\
++		.node	= LIST_HEAD_INIT((name).node),		\
++	}
++
++struct swait_head {
++	raw_spinlock_t		lock;
++	struct list_head	list;
++};
++
++#define DEFINE_SWAIT_HEAD(name)					\
++	struct swait_head name = {				\
++		.lock	= __RAW_SPIN_LOCK_UNLOCKED(name.lock),	\
++		.list	= LIST_HEAD_INIT((name).list),		\
++	}
++
++extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
++
++#define init_swait_head(swh)					\
++	do {							\
++		static struct lock_class_key __key;		\
++								\
++		__init_swait_head((swh), &__key);		\
++	} while (0)
++
++/*
++ * Waiter functions
++ */
++static inline bool swaiter_enqueued(struct swaiter *w)
++{
++	return w->task != NULL;
++}
++
++extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
++extern void swait_finish(struct swait_head *head, struct swaiter *w);
++
++/*
++ * Adds w to head->list. Must be called with head->lock locked.
++ */
++static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
++{
++	list_add(&w->node, &head->list);
++}
++
++/*
++ * Removes w from head->list. Must be called with head->lock locked.
++ */
++static inline void __swait_dequeue(struct swaiter *w)
++{
++	list_del_init(&w->node);
++}
++
++/*
++ * Wakeup functions
++ */
++extern void __swait_wake(struct swait_head *head, unsigned int state);
++
++static inline void swait_wake(struct swait_head *head)
++{
++	__swait_wake(head, TASK_NORMAL);
++}
++
++/*
++ * Event API
++ */
++
++#define __swait_event(wq, condition)					\
++do {									\
++	DEFINE_SWAITER(__wait);						\
++									\
++	for (;;) {							\
++		swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
++		if (condition)						\
++			break;						\
++		schedule();						\
++	}								\
++	swait_finish(&wq, &__wait);					\
++} while (0)
++
++/**
++ * swait_event - sleep until a condition gets true
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ *
++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
++ * @condition evaluates to true. The @condition is checked each time
++ * the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ */
++#define swait_event(wq, condition)					\
++do {									\
++	if (condition)							\
++		break;							\
++	__swait_event(wq, condition);					\
++} while (0)
++
++#define __swait_event_timeout(wq, condition, ret)			\
++do {									\
++	DEFINE_SWAITER(__wait);						\
++									\
++	for (;;) {							\
++		swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
++		if (condition)						\
++			break;						\
++		ret = schedule_timeout(ret);				\
++		if (!ret)						\
++			break;						\
++	}								\
++	swait_finish(&wq, &__wait);					\
++} while (0)
++
++/**
++ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ * @timeout: timeout, in jiffies
++ *
++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
++ * @condition evaluates to true. The @condition is checked each time
++ * the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ *
++ * The function returns 0 if the @timeout elapsed, and the remaining
++ * jiffies if the condition evaluated to true before the timeout elapsed.
++ */
++#define swait_event_timeout(wq, condition, timeout)			\
++({									\
++	long __ret = timeout;						\
++	if (!(condition))						\
++		__swait_event_timeout(wq, condition, __ret);		\
++	__ret;								\
++})
++
++#endif
 diff --git a/include/linux/wait.h b/include/linux/wait.h
 index 3efc9f3..1e904b8 100644
 --- a/include/linux/wait.h
@@ -15342,10 +15535,10 @@
  endchoice
  
 diff --git a/kernel/Makefile b/kernel/Makefile
-index 2d64cfc..11949f1 100644
+index 2d64cfc..6a9558b 100644
 --- a/kernel/Makefile
 +++ b/kernel/Makefile
-@@ -7,7 +7,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o \
+@@ -7,10 +7,10 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o \
  	    sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
  	    signal.o sys.o kmod.o workqueue.o pid.o \
  	    rcupdate.o extable.o params.o posix-timers.o \
@@ -15353,7 +15546,11 @@
 +	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
  	    hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
  	    notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
- 	    async.o range.o jump_label.o
+-	    async.o range.o jump_label.o
++	    async.o range.o wait-simple.o jump_label.o
+ obj-y += groups.o
+ 
+ ifdef CONFIG_FUNCTION_TRACER
 @@ -29,7 +29,10 @@ obj-$(CONFIG_PROFILING) += profile.o
  obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
  obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -15374,7 +15571,7 @@
  obj-$(CONFIG_SMP) += smp.o
  ifneq ($(CONFIG_SMP),y)
 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index 2731d11..2d7503c 100644
+index 2efce77..feb47ec 100644
 --- a/kernel/cgroup.c
 +++ b/kernel/cgroup.c
 @@ -263,7 +263,7 @@ list_for_each_entry(_root, &roots, root_list)
@@ -15386,7 +15583,7 @@
  static void cgroup_release_agent(struct work_struct *work);
  static DECLARE_WORK(release_agent_work, cgroup_release_agent);
  static void check_for_release(struct cgroup *cgrp);
-@@ -4010,11 +4010,11 @@ again:
+@@ -4005,11 +4005,11 @@ again:
  	finish_wait(&cgroup_rmdir_waitq, &wait);
  	clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  
@@ -15400,7 +15597,7 @@
  
  	cgroup_lock_hierarchy(cgrp->root);
  	/* delete this cgroup from parent->children */
-@@ -4667,13 +4667,13 @@ static void check_for_release(struct cgroup *cgrp)
+@@ -4662,13 +4662,13 @@ static void check_for_release(struct cgroup *cgrp)
  		 * already queued for a userspace notification, queue
  		 * it now */
  		int need_schedule_work = 0;
@@ -15416,7 +15613,7 @@
  		if (need_schedule_work)
  			schedule_work(&release_agent_work);
  	}
-@@ -4725,7 +4725,7 @@ static void cgroup_release_agent(struct work_struct *work)
+@@ -4720,7 +4720,7 @@ static void cgroup_release_agent(struct work_struct *work)
  {
  	BUG_ON(work != &release_agent_work);
  	mutex_lock(&cgroup_mutex);
@@ -15425,7 +15622,7 @@
  	while (!list_empty(&release_list)) {
  		char *argv[3], *envp[3];
  		int i;
-@@ -4734,7 +4734,7 @@ static void cgroup_release_agent(struct work_struct *work)
+@@ -4729,7 +4729,7 @@ static void cgroup_release_agent(struct work_struct *work)
  						    struct cgroup,
  						    release_list);
  		list_del_init(&cgrp->release_list);
@@ -15434,7 +15631,7 @@
  		pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  		if (!pathbuf)
  			goto continue_free;
-@@ -4764,9 +4764,9 @@ static void cgroup_release_agent(struct work_struct *work)
+@@ -4759,9 +4759,9 @@ static void cgroup_release_agent(struct work_struct *work)
   continue_free:
  		kfree(pathbuf);
  		kfree(agentbuf);
@@ -15447,10 +15644,10 @@
  }
  
 diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 12b7458..13066a3 100644
+index aa39dd7..b2de274 100644
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -57,6 +57,104 @@ static struct {
+@@ -58,6 +58,104 @@ static struct {
  	.refcount = 0,
  };
  
@@ -15555,7 +15752,7 @@
  void get_online_cpus(void)
  {
  	might_sleep();
-@@ -210,13 +308,14 @@ static int __ref take_cpu_down(void *_param)
+@@ -211,13 +309,14 @@ static int __ref take_cpu_down(void *_param)
  /* Requires cpu_add_remove_lock to be held */
  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
  {
@@ -15571,7 +15768,7 @@
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
-@@ -224,7 +323,26 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+@@ -225,7 +324,26 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
  	if (!cpu_online(cpu))
  		return -EINVAL;
  
@@ -15598,7 +15795,7 @@
  
  	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
  	if (err) {
-@@ -263,6 +381,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+@@ -264,6 +382,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
  	check_for_tasks(cpu);
  
  out_release:
@@ -15684,7 +15881,7 @@
  		irq_work_queue(&handle->event->pending);
  	} else
 diff --git a/kernel/exit.c b/kernel/exit.c
-index f2b321b..7e8cbe8 100644
+index 303bed2..d9a8d35 100644
 --- a/kernel/exit.c
 +++ b/kernel/exit.c
 @@ -142,7 +142,7 @@ static void __exit_signal(struct task_struct *tsk)
@@ -15777,10 +15974,10 @@
  	p->lockdep_depth = 0; /* no locks held yet */
  	p->curr_chain_key = 0;
 diff --git a/kernel/futex.c b/kernel/futex.c
-index 8b6da25..30b238c 100644
+index 6487e4c..24ae478 100644
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1410,6 +1410,16 @@ retry_private:
+@@ -1422,6 +1422,16 @@ retry_private:
  				requeue_pi_wake_futex(this, &key2, hb2);
  				drop_count++;
  				continue;
@@ -15797,7 +15994,7 @@
  			} else if (ret) {
  				/* -EDEADLK */
  				this->pi_state = NULL;
-@@ -2254,7 +2264,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2266,7 +2276,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  	struct hrtimer_sleeper timeout, *to = NULL;
  	struct rt_mutex_waiter rt_waiter;
  	struct rt_mutex *pi_mutex = NULL;
@@ -15806,7 +16003,7 @@
  	union futex_key key2 = FUTEX_KEY_INIT;
  	struct futex_q q = futex_q_init;
  	int res, ret;
-@@ -2276,8 +2286,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2288,8 +2298,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  	 * The waiter is allocated on our stack, manipulated by the requeue
  	 * code while we sleep on uaddr.
  	 */
@@ -15816,7 +16013,7 @@
  
  	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
  	if (unlikely(ret != 0))
-@@ -2298,20 +2307,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2310,20 +2319,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
  	futex_wait_queue_me(hb, &q, to);
  
@@ -15883,7 +16080,7 @@
  
  	/* Check if the requeue code acquired the second futex for us. */
  	if (!q.rt_waiter) {
-@@ -2320,9 +2364,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2332,9 +2376,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  		 * did a lock-steal - fix up the PI-state in that case.
  		 */
  		if (q.pi_state && (q.pi_state->owner != current)) {
@@ -15896,7 +16093,7 @@
  		}
  	} else {
  		/*
-@@ -2335,7 +2380,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2347,7 +2392,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
  		debug_rt_mutex_free_waiter(&rt_waiter);
  
@@ -15907,7 +16104,7 @@
  		 * Fixup the pi_state owner and possibly acquire the lock if we
  		 * haven't already.
 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
-index a9205e3..af89591 100644
+index 2043c08..bb07742 100644
 --- a/kernel/hrtimer.c
 +++ b/kernel/hrtimer.c
 @@ -49,6 +49,7 @@
@@ -16014,7 +16211,7 @@
  /*
   * enqueue_hrtimer - internal function to (re)start a timer
   *
-@@ -888,6 +931,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
+@@ -889,6 +932,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
  	if (!(timer->state & HRTIMER_STATE_ENQUEUED))
  		goto out;
  
@@ -16026,7 +16223,7 @@
  	next_timer = timerqueue_getnext(&base->active);
  	timerqueue_del(&base->active, &timer->node);
  	if (&timer->node == next_timer) {
-@@ -983,8 +1031,20 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+@@ -985,8 +1033,20 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  	 *
  	 * XXX send_remote_softirq() ?
  	 */
@@ -16049,7 +16246,7 @@
  
  	unlock_hrtimer_base(timer, &flags);
  
-@@ -1070,7 +1130,7 @@ int hrtimer_cancel(struct hrtimer *timer)
+@@ -1072,7 +1132,7 @@ int hrtimer_cancel(struct hrtimer *timer)
  
  		if (ret >= 0)
  			return ret;
@@ -16058,7 +16255,7 @@
  	}
  }
  EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1149,6 +1209,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+@@ -1151,6 +1211,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  
  	base = hrtimer_clockid_to_base(clock_id);
  	timer->base = &cpu_base->clock_base[base];
@@ -16066,7 +16263,7 @@
  	timerqueue_init(&timer->node);
  
  #ifdef CONFIG_TIMER_STATS
-@@ -1232,6 +1293,122 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
+@@ -1234,6 +1295,122 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
  	timer->state &= ~HRTIMER_STATE_CALLBACK;
  }
  
@@ -16189,7 +16386,7 @@
  #ifdef CONFIG_HIGH_RES_TIMERS
  
  /*
-@@ -1242,7 +1419,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+@@ -1244,7 +1421,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
  {
  	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  	ktime_t expires_next, now, entry_time, delta;
@@ -16198,7 +16395,7 @@
  
  	BUG_ON(!cpu_base->hres_active);
  	cpu_base->nr_events++;
-@@ -1278,6 +1455,14 @@ retry:
+@@ -1280,6 +1457,14 @@ retry:
  
  			timer = container_of(node, struct hrtimer, node);
  
@@ -16213,7 +16410,7 @@
  			/*
  			 * The immediate goal for using the softexpires is
  			 * minimizing wakeups, not running timers at the
-@@ -1301,7 +1486,10 @@ retry:
+@@ -1303,7 +1488,10 @@ retry:
  				break;
  			}
  
@@ -16225,7 +16422,7 @@
  		}
  	}
  
-@@ -1316,6 +1504,10 @@ retry:
+@@ -1318,6 +1506,10 @@ retry:
  	if (expires_next.tv64 == KTIME_MAX ||
  	    !tick_program_event(expires_next, 0)) {
  		cpu_base->hang_detected = 0;
@@ -16236,7 +16433,7 @@
  		return;
  	}
  
-@@ -1391,17 +1583,17 @@ void hrtimer_peek_ahead_timers(void)
+@@ -1393,17 +1585,17 @@ void hrtimer_peek_ahead_timers(void)
  	local_irq_restore(flags);
  }
  
@@ -16259,7 +16456,7 @@
  /*
   * Called from timer softirq every jiffy, expire hrtimers:
   *
-@@ -1434,7 +1626,7 @@ void hrtimer_run_queues(void)
+@@ -1436,7 +1628,7 @@ void hrtimer_run_queues(void)
  	struct timerqueue_node *node;
  	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  	struct hrtimer_clock_base *base;
@@ -16268,7 +16465,7 @@
  
  	if (hrtimer_hres_active())
  		return;
-@@ -1459,10 +1651,16 @@ void hrtimer_run_queues(void)
+@@ -1461,10 +1653,16 @@ void hrtimer_run_queues(void)
  					hrtimer_get_expires_tv64(timer))
  				break;
  
@@ -16286,7 +16483,7 @@
  }
  
  /*
-@@ -1484,6 +1682,7 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
+@@ -1486,6 +1684,7 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
  void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
  {
  	sl->timer.function = hrtimer_wakeup;
@@ -16294,7 +16491,7 @@
  	sl->task = task;
  }
  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1622,9 +1821,13 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
+@@ -1624,9 +1823,13 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  		cpu_base->clock_base[i].cpu_base = cpu_base;
  		timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -16308,7 +16505,7 @@
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
-@@ -1737,9 +1940,7 @@ void __init hrtimers_init(void)
+@@ -1739,9 +1942,7 @@ void __init hrtimers_init(void)
  	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
  			  (void *)(long)smp_processor_id());
  	register_cpu_notifier(&hrtimers_nb);
@@ -16335,7 +16532,7 @@
  	if (!noirqdebug)
  		note_interrupt(irq, desc, retval);
 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
-index 0a7840ae..d624790 100644
+index a1aadab..e1bd49d 100644
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
 @@ -18,6 +18,7 @@
@@ -16355,10 +16552,10 @@
  
  /**
 diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
-index b5f4742..d09e0f5 100644
+index dc813a9..d09e0f5 100644
 --- a/kernel/irq/spurious.c
 +++ b/kernel/irq/spurious.c
-@@ -339,6 +341,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
+@@ -341,6 +341,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
  
  static int __init irqfixup_setup(char *str)
  {
@@ -16370,7 +16567,7 @@
  	irqfixup = 1;
  	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
  	printk(KERN_WARNING "This may impact system performance.\n");
-@@ -351,6 +358,11 @@ module_param(irqfixup, int, 0644);
+@@ -353,6 +358,11 @@ module_param(irqfixup, int, 0644);
  
  static int __init irqpoll_setup(char *str)
  {
@@ -16410,7 +16607,7 @@
  		}
  		expires = timeval_to_ktime(value->it_value);
 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
-index 7798181..3553e90 100644
+index e0f0bdd..bb14394 100644
 --- a/kernel/kprobes.c
 +++ b/kernel/kprobes.c
 @@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
@@ -19206,7 +19403,7 @@
  #endif
  
 diff --git a/kernel/sched.c b/kernel/sched.c
-index 063d7a4..63aeba0 100644
+index 063d7a4..e5ef7a8 100644
 --- a/kernel/sched.c
 +++ b/kernel/sched.c
 @@ -71,6 +71,7 @@
@@ -19293,7 +19490,7 @@
  }
  
  /*
-@@ -2678,8 +2686,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+@@ -2678,8 +2686,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
  
  	smp_wmb();
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -19305,8 +19502,10 @@
 +		 * if the wakeup condition is true.
 +		 */
 +		if (!(wake_flags & WF_LOCK_SLEEPER)) {
-+			if (p->saved_state & state)
++			if (p->saved_state & state) {
 +				p->saved_state = TASK_RUNNING;
++				success = 1;
++			}
 +		}
  		goto out;
 +	}
@@ -19320,7 +19519,7 @@
  
  	success = 1; /* we're going to change ->state */
  	cpu = task_cpu(p);
-@@ -2735,40 +2760,6 @@ out:
+@@ -2735,40 +2762,6 @@ out:
  }
  
  /**
@@ -19361,7 +19560,7 @@
   * wake_up_process - Wake up a specific process
   * @p: The process to be woken up.
   *
-@@ -2785,6 +2776,18 @@ int wake_up_process(struct task_struct *p)
+@@ -2785,6 +2778,18 @@ int wake_up_process(struct task_struct *p)
  }
  EXPORT_SYMBOL(wake_up_process);
  
@@ -19380,7 +19579,7 @@
  int wake_up_state(struct task_struct *p, unsigned int state)
  {
  	return try_to_wake_up(p, state, 0);
-@@ -3060,8 +3063,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+@@ -3060,8 +3065,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
  	finish_lock_switch(rq, prev);
  
  	fire_sched_in_preempt_notifiers(current);
@@ -19394,7 +19593,7 @@
  	if (unlikely(prev_state == TASK_DEAD)) {
  		/*
  		 * Remove function-return probe instances associated with this
-@@ -4182,6 +4189,126 @@ static inline void schedule_debug(struct task_struct *prev)
+@@ -4182,6 +4191,126 @@ static inline void schedule_debug(struct task_struct *prev)
  	schedstat_inc(this_rq(), sched_count);
  }
  
@@ -19521,7 +19720,7 @@
  static void put_prev_task(struct rq *rq, struct task_struct *prev)
  {
  	if (prev->on_rq || rq->skip_clock_update < 0)
-@@ -4241,6 +4368,8 @@ need_resched:
+@@ -4241,6 +4370,8 @@ need_resched:
  
  	raw_spin_lock_irq(&rq->lock);
  
@@ -19530,7 +19729,7 @@
  	switch_count = &prev->nivcsw;
  	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
  		if (unlikely(signal_pending_state(prev->state, prev))) {
-@@ -4248,19 +4377,6 @@ need_resched:
+@@ -4248,19 +4379,6 @@ need_resched:
  		} else {
  			deactivate_task(rq, prev, DEQUEUE_SLEEP);
  			prev->on_rq = 0;
@@ -19550,7 +19749,7 @@
  		}
  		switch_count = &prev->nvcsw;
  	}
-@@ -4294,15 +4410,23 @@ need_resched:
+@@ -4294,15 +4412,23 @@ need_resched:
  
  	post_schedule(rq);
  
@@ -19576,7 +19775,7 @@
  	/*
  	 * If we are going to sleep and we have plugged IO queued,
  	 * make sure to submit it to avoid deadlocks.
-@@ -4311,15 +4435,37 @@ static inline void sched_submit_work(struct task_struct *tsk)
+@@ -4311,15 +4437,37 @@ static inline void sched_submit_work(struct task_struct *tsk)
  		blk_schedule_flush_plug(tsk);
  }
  
@@ -19614,7 +19813,7 @@
  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  
  static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
-@@ -4391,7 +4537,16 @@ asmlinkage void __sched notrace preempt_schedule(void)
+@@ -4391,7 +4539,16 @@ asmlinkage void __sched notrace preempt_schedule(void)
  
  	do {
  		add_preempt_count_notrace(PREEMPT_ACTIVE);
@@ -19631,7 +19830,7 @@
  		sub_preempt_count_notrace(PREEMPT_ACTIVE);
  
  		/*
-@@ -4487,9 +4642,9 @@ EXPORT_SYMBOL(__wake_up);
+@@ -4487,9 +4644,9 @@ EXPORT_SYMBOL(__wake_up);
  /*
   * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
   */
@@ -19643,7 +19842,7 @@
  }
  EXPORT_SYMBOL_GPL(__wake_up_locked);
  
-@@ -4814,9 +4969,8 @@ long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+@@ -4814,9 +4971,8 @@ long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
  EXPORT_SYMBOL(sleep_on_timeout);
  
  #ifdef CONFIG_RT_MUTEXES
@@ -19654,7 +19853,7 @@
   * @p: task
   * @prio: prio value (kernel-internal form)
   *
-@@ -4825,7 +4979,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
+@@ -4825,7 +4981,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
   *
   * Used by the rt_mutex code to implement priority inheritance logic.
   */
@@ -19663,7 +19862,7 @@
  {
  	int oldprio, on_rq, running;
  	struct rq *rq;
-@@ -4835,6 +4989,24 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+@@ -4835,6 +4991,24 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
  
  	rq = __task_rq_lock(p);
  
@@ -19688,7 +19887,7 @@
  	trace_sched_pi_setprio(p, prio);
  	oldprio = p->prio;
  	prev_class = p->sched_class;
-@@ -4858,9 +5030,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+@@ -4858,9 +5032,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
  		enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
  
  	check_class_changed(rq, p, prev_class, oldprio);
@@ -19699,7 +19898,7 @@
  #endif
  
  void set_user_nice(struct task_struct *p, long nice)
-@@ -4995,7 +5167,13 @@ EXPORT_SYMBOL(task_nice);
+@@ -4995,7 +5169,13 @@ EXPORT_SYMBOL(task_nice);
   */
  int idle_cpu(int cpu)
  {
@@ -19714,7 +19913,7 @@
  }
  
  /**
-@@ -5529,7 +5707,7 @@ SYSCALL_DEFINE0(sched_yield)
+@@ -5529,7 +5709,7 @@ SYSCALL_DEFINE0(sched_yield)
  	__release(rq->lock);
  	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  	do_raw_spin_unlock(&rq->lock);
@@ -19723,7 +19922,7 @@
  
  	schedule();
  
-@@ -5543,9 +5721,17 @@ static inline int should_resched(void)
+@@ -5543,9 +5723,17 @@ static inline int should_resched(void)
  
  static void __cond_resched(void)
  {
@@ -19744,7 +19943,7 @@
  }
  
  int __sched _cond_resched(void)
-@@ -5586,6 +5772,7 @@ int __cond_resched_lock(spinlock_t *lock)
+@@ -5586,6 +5774,7 @@ int __cond_resched_lock(spinlock_t *lock)
  }
  EXPORT_SYMBOL(__cond_resched_lock);
  
@@ -19752,7 +19951,7 @@
  int __sched __cond_resched_softirq(void)
  {
  	BUG_ON(!in_softirq());
-@@ -5599,6 +5786,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5599,6 +5788,7 @@ int __sched __cond_resched_softirq(void)
  	return 0;
  }
  EXPORT_SYMBOL(__cond_resched_softirq);
@@ -19760,7 +19959,7 @@
  
  /**
   * yield - yield the current processor to other threads.
-@@ -5845,7 +6033,7 @@ void show_state_filter(unsigned long state_filter)
+@@ -5845,7 +6035,7 @@ void show_state_filter(unsigned long state_filter)
  	printk(KERN_INFO
  		"  task                        PC stack   pid father\n");
  #endif
@@ -19769,7 +19968,7 @@
  	do_each_thread(g, p) {
  		/*
  		 * reset the NMI-timeout, listing all files on a slow
-@@ -5861,7 +6049,7 @@ void show_state_filter(unsigned long state_filter)
+@@ -5861,7 +6051,7 @@ void show_state_filter(unsigned long state_filter)
  #ifdef CONFIG_SCHED_DEBUG
  	sysrq_sched_debug_show();
  #endif
@@ -19778,7 +19977,7 @@
  	/*
  	 * Only show locks if all tasks are dumped:
  	 */
-@@ -5922,6 +6110,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
+@@ -5922,6 +6112,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
  	 */
  	idle->sched_class = &idle_sched_class;
  	ftrace_graph_init_idle_task(idle, cpu);
@@ -19788,7 +19987,7 @@
  }
  
  /*
-@@ -5983,12 +6174,12 @@ static inline void sched_init_granularity(void)
+@@ -5983,12 +6176,12 @@ static inline void sched_init_granularity(void)
  #ifdef CONFIG_SMP
  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  {
@@ -19805,7 +20004,7 @@
  }
  
  /*
-@@ -6039,7 +6230,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+@@ -6039,7 +6232,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
  	do_set_cpus_allowed(p, new_mask);
  
  	/* Can the task run on the task's current CPU? If so, we're done */
@@ -19814,7 +20013,7 @@
  		goto out;
  
  	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -6086,7 +6277,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+@@ -6086,7 +6279,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
  	if (task_cpu(p) != src_cpu)
  		goto done;
  	/* Affinity changed (again). */
@@ -19823,7 +20022,7 @@
  		goto fail;
  
  	/*
-@@ -6128,6 +6319,8 @@ static int migration_cpu_stop(void *data)
+@@ -6128,6 +6321,8 @@ static int migration_cpu_stop(void *data)
  
  #ifdef CONFIG_HOTPLUG_CPU
  
@@ -19832,7 +20031,7 @@
  /*
   * Ensures that the idle task is using init_mm right before its cpu goes
   * offline.
-@@ -6140,7 +6333,12 @@ void idle_task_exit(void)
+@@ -6140,7 +6335,12 @@ void idle_task_exit(void)
  
  	if (mm != &init_mm)
  		switch_mm(mm, &init_mm, current);
@@ -19846,7 +20045,7 @@
  }
  
  /*
-@@ -6458,6 +6656,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+@@ -6458,6 +6658,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
  		migrate_nr_uninterruptible(rq);
  		calc_global_load_remove(rq);
  		break;
@@ -19859,7 +20058,7 @@
  #endif
  	}
  
-@@ -8175,7 +8379,8 @@ void __init sched_init(void)
+@@ -8175,7 +8381,8 @@ void __init sched_init(void)
  #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
  static inline int preempt_count_equals(int preempt_offset)
  {
@@ -21631,7 +21830,7 @@
  	}
  
 diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
-index e0980f0..cf52fda 100644
+index 8b27006..b9bf9d0 100644
 --- a/kernel/time/clocksource.c
 +++ b/kernel/time/clocksource.c
 @@ -186,6 +186,7 @@ static struct timer_list watchdog_timer;
@@ -21887,7 +22086,7 @@
  
  	/* Get the next period (per cpu) */
 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index 342408c..bdbf452 100644
+index 5f45831..d040f93 100644
 --- a/kernel/time/timekeeping.c
 +++ b/kernel/time/timekeeping.c
 @@ -139,8 +139,7 @@ static inline s64 timekeeping_get_ns_raw(void)
@@ -21900,7 +22099,7 @@
  
  /*
   * The current time
-@@ -361,7 +360,7 @@ int do_settimeofday(const struct timespec *tv)
+@@ -365,7 +364,7 @@ int do_settimeofday(const struct timespec *tv)
  	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
  		return -EINVAL;
  
@@ -21909,7 +22108,7 @@
  
  	timekeeping_forward_now();
  
-@@ -377,7 +376,7 @@ int do_settimeofday(const struct timespec *tv)
+@@ -381,7 +380,7 @@ int do_settimeofday(const struct timespec *tv)
  	update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
  				timekeeper.mult);
  
@@ -21918,7 +22117,7 @@
  
  	/* signal hrtimers about time change */
  	clock_was_set();
-@@ -401,7 +400,7 @@ int timekeeping_inject_offset(struct timespec *ts)
+@@ -405,7 +404,7 @@ int timekeeping_inject_offset(struct timespec *ts)
  	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
  		return -EINVAL;
  
@@ -21927,7 +22126,7 @@
  
  	timekeeping_forward_now();
  
-@@ -414,7 +413,7 @@ int timekeeping_inject_offset(struct timespec *ts)
+@@ -418,7 +417,7 @@ int timekeeping_inject_offset(struct timespec *ts)
  	update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
  				timekeeper.mult);
  
@@ -21936,7 +22135,7 @@
  
  	/* signal hrtimers about time change */
  	clock_was_set();
-@@ -568,7 +567,7 @@ void __init timekeeping_init(void)
+@@ -572,7 +571,7 @@ void __init timekeeping_init(void)
  	read_persistent_clock(&now);
  	read_boot_clock(&boot);
  
@@ -21945,7 +22144,7 @@
  
  	ntp_init();
  
-@@ -589,7 +588,7 @@ void __init timekeeping_init(void)
+@@ -593,7 +592,7 @@ void __init timekeeping_init(void)
  				-boot.tv_sec, -boot.tv_nsec);
  	total_sleep_time.tv_sec = 0;
  	total_sleep_time.tv_nsec = 0;
@@ -21954,7 +22153,7 @@
  }
  
  /* time in seconds when suspend began */
-@@ -630,7 +629,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
+@@ -634,7 +633,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
  	if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
  		return;
  
@@ -21963,7 +22162,7 @@
  	timekeeping_forward_now();
  
  	__timekeeping_inject_sleeptime(delta);
-@@ -640,7 +639,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
+@@ -644,7 +643,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
  	update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
  				timekeeper.mult);
  
@@ -21972,7 +22171,7 @@
  
  	/* signal hrtimers about time change */
  	clock_was_set();
-@@ -663,7 +662,7 @@ static void timekeeping_resume(void)
+@@ -667,7 +666,7 @@ static void timekeeping_resume(void)
  
  	clocksource_resume();
  
@@ -21981,7 +22180,7 @@
  
  	if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
  		ts = timespec_sub(ts, timekeeping_suspend_time);
-@@ -673,7 +672,7 @@ static void timekeeping_resume(void)
+@@ -677,7 +676,7 @@ static void timekeeping_resume(void)
  	timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
  	timekeeper.ntp_error = 0;
  	timekeeping_suspended = 0;
@@ -21990,7 +22189,7 @@
  
  	touch_softlockup_watchdog();
  
-@@ -689,10 +688,10 @@ static int timekeeping_suspend(void)
+@@ -693,10 +692,10 @@ static int timekeeping_suspend(void)
  
  	read_persistent_clock(&timekeeping_suspend_time);
  
@@ -22003,7 +22202,7 @@
  
  	clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
  	clocksource_suspend();
-@@ -1121,7 +1120,7 @@ ktime_t ktime_get_monotonic_offset(void)
+@@ -1125,7 +1124,7 @@ ktime_t ktime_get_monotonic_offset(void)
   */
  void xtime_update(unsigned long ticks)
  {
@@ -22434,107 +22633,6 @@
  obj-$(CONFIG_NOP_TRACER) += trace_nop.o
  obj-$(CONFIG_STACK_TRACER) += trace_stack.o
  obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
-diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index ef9271b..1eef6cf 100644
---- a/kernel/trace/ftrace.c
-+++ b/kernel/trace/ftrace.c
-@@ -1182,8 +1182,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
- 	return NULL;
- }
- 
-+static void
-+ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
-+static void
-+ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
-+
- static int
--ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
-+ftrace_hash_move(struct ftrace_ops *ops, int enable,
-+		 struct ftrace_hash **dst, struct ftrace_hash *src)
- {
- 	struct ftrace_func_entry *entry;
- 	struct hlist_node *tp, *tn;
-@@ -1193,9 +1199,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
- 	unsigned long key;
- 	int size = src->count;
- 	int bits = 0;
-+	int ret;
- 	int i;
- 
- 	/*
-+	 * Remove the current set, update the hash and add
-+	 * them back.
-+	 */
-+	ftrace_hash_rec_disable(ops, enable);
-+
-+	/*
- 	 * If the new source is empty, just free dst and assign it
- 	 * the empty_hash.
- 	 */
-@@ -1215,9 +1228,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
- 	if (bits > FTRACE_HASH_MAX_BITS)
- 		bits = FTRACE_HASH_MAX_BITS;
- 
-+	ret = -ENOMEM;
- 	new_hash = alloc_ftrace_hash(bits);
- 	if (!new_hash)
--		return -ENOMEM;
-+		goto out;
- 
- 	size = 1 << src->size_bits;
- 	for (i = 0; i < size; i++) {
-@@ -1236,7 +1250,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
- 	rcu_assign_pointer(*dst, new_hash);
- 	free_ftrace_hash_rcu(old_hash);
- 
--	return 0;
-+	ret = 0;
-+ out:
-+	/*
-+	 * Enable regardless of ret:
-+	 *  On success, we enable the new hash.
-+	 *  On failure, we re-enable the original hash.
-+	 */
-+	ftrace_hash_rec_enable(ops, enable);
-+
-+	return ret;
- }
- 
- /*
-@@ -2877,7 +2900,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
- 		ftrace_match_records(hash, buf, len);
- 
- 	mutex_lock(&ftrace_lock);
--	ret = ftrace_hash_move(orig_hash, hash);
-+	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
- 	mutex_unlock(&ftrace_lock);
- 
- 	mutex_unlock(&ftrace_regex_lock);
-@@ -3060,18 +3083,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
- 			orig_hash = &iter->ops->notrace_hash;
- 
- 		mutex_lock(&ftrace_lock);
--		/*
--		 * Remove the current set, update the hash and add
--		 * them back.
--		 */
--		ftrace_hash_rec_disable(iter->ops, filter_hash);
--		ret = ftrace_hash_move(orig_hash, iter->hash);
--		if (!ret) {
--			ftrace_hash_rec_enable(iter->ops, filter_hash);
--			if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
--			    && ftrace_enabled)
--				ftrace_run_update_code(FTRACE_ENABLE_CALLS);
--		}
-+		ret = ftrace_hash_move(iter->ops, filter_hash,
-+				       orig_hash, iter->hash);
-+		if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
-+		    && ftrace_enabled)
-+			ftrace_run_update_code(FTRACE_ENABLE_CALLS);
-+
- 		mutex_unlock(&ftrace_lock);
- 	}
- 	free_ftrace_hash(iter->hash);
 diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
 new file mode 100644
 index 0000000..9d49fcb
@@ -24217,7 +24315,7 @@
  
  void ftrace(struct trace_array *tr,
 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index 3e2a7c9..c71844c 100644
+index 2d04936..cf95828 100644
 --- a/kernel/trace/trace_events.c
 +++ b/kernel/trace/trace_events.c
 @@ -116,7 +116,8 @@ static int trace_define_common_fields(void)
@@ -24380,6 +24478,75 @@
  }
  
  struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
+diff --git a/kernel/wait-simple.c b/kernel/wait-simple.c
+new file mode 100644
+index 0000000..35cc000
+--- /dev/null
++++ b/kernel/wait-simple.c
+@@ -0,0 +1,63 @@
++/*
++ * Simple waitqueues without fancy flags and callbacks
++ *
++ * (C) 2011 Thomas Gleixner <tglx at linutronix.de>
++ *
++ * Based on kernel/wait.c
++ *
++ * For licencing details see kernel-base/COPYING
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/wait-simple.h>
++
++void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
++{
++	raw_spin_lock_init(&head->lock);
++	lockdep_set_class(&head->lock, key);
++	INIT_LIST_HEAD(&head->list);
++}
++EXPORT_SYMBOL_GPL(__init_swait_head);
++
++void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
++{
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&head->lock, flags);
++	w->task = current;
++	__swait_enqueue(head, w);
++	set_current_state(state);
++	raw_spin_unlock_irqrestore(&head->lock, flags);
++}
++EXPORT_SYMBOL_GPL(swait_prepare);
++
++void swait_finish(struct swait_head *head, struct swaiter *w)
++{
++	unsigned long flags;
++
++	__set_current_state(TASK_RUNNING);
++	if (w->task) {
++		raw_spin_lock_irqsave(&head->lock, flags);
++		__swait_dequeue(w);
++		raw_spin_unlock_irqrestore(&head->lock, flags);
++	}
++}
++EXPORT_SYMBOL_GPL(swait_finish);
++
++void __swait_wake(struct swait_head *head, unsigned int state)
++{
++	struct swaiter *curr, *next;
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&head->lock, flags);
++
++	list_for_each_entry_safe(curr, next, &head->list, node) {
++		if (wake_up_state(curr->task, state)) {
++			__swait_dequeue(curr);
++			curr->task = NULL;
++		}
++	}
++
++	raw_spin_unlock_irqrestore(&head->lock, flags);
++}
 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
 index 3d0c56a..84e3cf1 100644
 --- a/kernel/watchdog.c
@@ -26268,10 +26435,10 @@
  	help
  	  Transparent Hugepages allows the kernel to use huge pages and
 diff --git a/mm/filemap.c b/mm/filemap.c
-index a8251a8..be1b637 100644
+index 3c981ba..acd34ec 100644
 --- a/mm/filemap.c
 +++ b/mm/filemap.c
-@@ -2040,7 +2040,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
+@@ -2023,7 +2023,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
  	char *kaddr;
  	size_t copied;
  
@@ -26388,7 +26555,7 @@
  
  	if (active_mm != mm)
 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 0f50cdb..d892db7 100644
+index 947a7e9..3950dab 100644
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
 @@ -57,6 +57,7 @@
@@ -26681,7 +26848,7 @@
  
  		page = get_page_from_freelist(gfp_mask, nodemask,
  				order, zonelist, high_zoneidx,
-@@ -3685,14 +3744,16 @@ static int __zone_pcp_update(void *data)
+@@ -3691,14 +3750,16 @@ static int __zone_pcp_update(void *data)
  	for_each_possible_cpu(cpu) {
  		struct per_cpu_pageset *pset;
  		struct per_cpu_pages *pcp;
@@ -26701,7 +26868,7 @@
  	}
  	return 0;
  }
-@@ -5004,6 +5065,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+@@ -5010,6 +5071,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
  void __init page_alloc_init(void)
  {
  	hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -27612,7 +27779,7 @@
  
  	return 0;
 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 65d5fd2..95c34be 100644
+index 43b44db..87c9357 100644
 --- a/mm/vmalloc.c
 +++ b/mm/vmalloc.c
 @@ -789,7 +789,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
@@ -27981,10 +28148,10 @@
  }
  
 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index 75ef66f..7c6c3a8 100644
+index 4845bfe..52d37a7 100644
 --- a/net/ipv4/route.c
 +++ b/net/ipv4/route.c
-@@ -242,7 +242,7 @@ struct rt_hash_bucket {
+@@ -246,7 +246,7 @@ struct rt_hash_bucket {
  };
  
  #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \

Added: people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx7-extra
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ people/ukleinek/3.0-rt/linux-2.6/debian/patches/series/6ptx7-extra	Sun Feb 12 11:58:28 2012	(r18687)
@@ -0,0 +1 @@
++ features/all/rt/patch-3.0.18-rt34.patch featureset=rt



More information about the Kernel-svn-changes mailing list