[kernel] r12114 - in dists/sid/linux-2.6/debian: . patches/bugfix patches/series

Dann Frazier dannf at alioth.debian.org
Wed Aug 20 23:42:40 UTC 2008


Author: dannf
Date: Wed Aug 20 23:42:39 2008
New Revision: 12114

Log:
* [ia64] Fix boot-time hang w/ PRINTK_TIME by ensuring that cpu0 can access
  per-cpu vars in early boot
* delay calls to sched_clock() until after sched_clock_init() to prevent
  inaccurate printk timings on ia64 and presumably other architectures

Added:
   dists/sid/linux-2.6/debian/patches/bugfix/delay-sched_clock.patch
   dists/sid/linux-2.6/debian/patches/bugfix/ia64-cpu0-early-access-per-cpu-vars.patch
Modified:
   dists/sid/linux-2.6/debian/changelog
   dists/sid/linux-2.6/debian/patches/series/4

Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog	(original)
+++ dists/sid/linux-2.6/debian/changelog	Wed Aug 20 23:42:39 2008
@@ -1,5 +1,6 @@
 linux-2.6 (2.6.26-4) UNRELEASED; urgency=low
 
+  [ maximilian attems ]
   * x86: Reset ACPI_PROCFS_POWER for Lenny as buggy apps depend on it.
     (closes: #495541)
   * x86: ACPI: Fix thermal shutdowns
@@ -74,7 +75,13 @@
     - ipv6: Fix ip6_xmit to send fragments if ipfragok is true
     - x86: amd opteron TOM2 mask val fix
 
- -- maximilian attems <maks at debian.org>  Tue, 19 Aug 2008 10:53:45 +0200
+  [ dann frazier ]
+  * [ia64] Fix boot-time hang w/ PRINTK_TIME by ensuring that cpu0 can access
+    per-cpu vars in early boot
+  * delay calls to sched_clock() until after sched_clock_init() to prevent
+    inaccurate printk timings on ia64 and presumably other architectures
+
+ -- dann frazier <dannf at debian.org>  Wed, 20 Aug 2008 16:58:30 -0600
 
 linux-2.6 (2.6.26-3) unstable; urgency=low
 

Added: dists/sid/linux-2.6/debian/patches/bugfix/delay-sched_clock.patch
==============================================================================
--- (empty file)
+++ dists/sid/linux-2.6/debian/patches/bugfix/delay-sched_clock.patch	Wed Aug 20 23:42:39 2008
@@ -0,0 +1,92 @@
+commit c1955a3d4762e7a9bf84035eb3c4886a900f0d15
+Author: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Date:   Mon Aug 11 08:59:03 2008 +0200
+
+    sched_clock: delay using sched_clock()
+    
+    Some arch's can't handle sched_clock() being called too early - delay
+    this until sched_clock_init() has been called.
+    
+    Reported-by: Bill Gatliff <bgat at billgatliff.com>
+    Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+    Tested-by: Nishanth Aravamudan <nacc at us.ibm.com>
+    CC: Russell King - ARM Linux <linux at arm.linux.org.uk>
+    Signed-off-by: Ingo Molnar <mingo at elte.hu>
+
+Adjusted to apply to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.26.orig/include/linux/sched.h linux-source-2.6.26/include/linux/sched.h
+--- linux-source-2.6.26.orig/include/linux/sched.h	2008-07-13 15:51:29.000000000 -0600
++++ linux-source-2.6.26/include/linux/sched.h	2008-08-19 23:59:36.000000000 -0600
+@@ -1552,16 +1552,10 @@ static inline int set_cpus_allowed(struc
+ 
+ extern unsigned long long sched_clock(void);
+ 
+-#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+-static inline void sched_clock_init(void)
+-{
+-}
+-
+-static inline u64 sched_clock_cpu(int cpu)
+-{
+-	return sched_clock();
+-}
++extern void sched_clock_init(void);
++extern u64 sched_clock_cpu(int cpu);
+ 
++#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+ static inline void sched_clock_tick(void)
+ {
+ }
+@@ -1574,8 +1568,6 @@ static inline void sched_clock_idle_wake
+ {
+ }
+ #else
+-extern void sched_clock_init(void);
+-extern u64 sched_clock_cpu(int cpu);
+ extern void sched_clock_tick(void);
+ extern void sched_clock_idle_sleep_event(void);
+ extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+diff -urpN linux-source-2.6.26.orig/kernel/sched_clock.c linux-source-2.6.26/kernel/sched_clock.c
+--- linux-source-2.6.26.orig/kernel/sched_clock.c	2008-07-13 15:51:29.000000000 -0600
++++ linux-source-2.6.26/kernel/sched_clock.c	2008-08-19 23:59:36.000000000 -0600
+@@ -30,6 +30,8 @@
+ #include <linux/module.h>
+ 
+ 
++static __read_mostly int sched_clock_running;
++
+ #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+ 
+ struct sched_clock_data {
+@@ -59,8 +61,6 @@ static inline struct sched_clock_data *c
+ 	return &per_cpu(sched_clock_data, cpu);
+ }
+ 
+-static __read_mostly int sched_clock_running;
+-
+ void sched_clock_init(void)
+ {
+ 	u64 ktime_now = ktime_to_ns(ktime_get());
+@@ -233,6 +233,21 @@ void sched_clock_idle_wakeup_event(u64 d
+ }
+ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
+ 
++#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
++
++void sched_clock_init(void)
++{
++	sched_clock_running = 1;
++}
++
++u64 sched_clock_cpu(int cpu)
++{
++	if (unlikely(!sched_clock_running))
++		return 0;
++
++	return sched_clock();
++}
++
+ #endif
+ 
+ /*

Added: dists/sid/linux-2.6/debian/patches/bugfix/ia64-cpu0-early-access-per-cpu-vars.patch
==============================================================================
--- (empty file)
+++ dists/sid/linux-2.6/debian/patches/bugfix/ia64-cpu0-early-access-per-cpu-vars.patch	Wed Aug 20 23:42:39 2008
@@ -0,0 +1,167 @@
+commit 10617bbe84628eb18ab5f723d3ba35005adde143
+Author: Tony Luck <tony.luck at intel.com>
+Date:   Tue Aug 12 10:34:20 2008 -0700
+
+    [IA64] Ensure cpu0 can access per-cpu variables in early boot code
+    
+    ia64 handles per-cpu variables a litle differently from other architectures
+    in that it maps the physical memory allocated for each cpu at a constant
+    virtual address (0xffffffffffff0000). This mapping is not enabled until
+    the architecture specific cpu_init() function is run, which causes problems
+    since some generic code is run before this point. In particular when
+    CONFIG_PRINTK_TIME is enabled, the boot cpu will trap on the access to
+    per-cpu memory at the first printk() call so the boot will fail without
+    the kernel printing anything to the console.
+    
+    Fix this by allocating percpu memory for cpu0 in the kernel data section
+    and doing all initialization to enable percpu access in head.S before
+    calling any generic code.
+    
+    Other cpus must take care not to access per-cpu variables too early, but
+    their code path from start_secondary() to cpu_init() is all in arch/ia64
+    
+    Signed-off-by: Tony Luck <tony.luck at intel.com>
+
+diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
+index 41c7129..8bdea8e 100644
+--- a/arch/ia64/kernel/head.S
++++ b/arch/ia64/kernel/head.S
+@@ -359,7 +359,31 @@ start_ap:
+ 	mov ar.rsc=0		// place RSE in enforced lazy mode
+ 	;;
+ 	loadrs			// clear the dirty partition
+-	mov IA64_KR(PER_CPU_DATA)=r0	// clear physical per-CPU base
++	movl r19=__phys_per_cpu_start
++	mov r18=PERCPU_PAGE_SIZE
++	;;
++#ifndef CONFIG_SMP
++	add r19=r19,r18
++	;;
++#else
++(isAP)	br.few 2f
++	mov r20=r19
++	sub r19=r19,r18
++	;;
++	shr.u r18=r18,3
++1:
++	ld8 r21=[r20],8;;
++	st8[r19]=r21,8
++	adds r18=-1,r18;;
++	cmp4.lt p7,p6=0,r18
++(p7)	br.cond.dptk.few 1b
++2:
++#endif
++	tpa r19=r19
++	;;
++	.pred.rel.mutex isBP,isAP
++(isBP)	mov IA64_KR(PER_CPU_DATA)=r19	// per-CPU base for cpu0
++(isAP)	mov IA64_KR(PER_CPU_DATA)=r0	// clear physical per-CPU base
+ 	;;
+ 	mov ar.bspstore=r2	// establish the new RSE stack
+ 	;;
+diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
+index 593279f..c27d5b2 100644
+--- a/arch/ia64/kernel/setup.c
++++ b/arch/ia64/kernel/setup.c
+@@ -927,17 +927,19 @@ cpu_init (void)
+ 	if (smp_processor_id() == 0) {
+ 		cpu_set(0, per_cpu(cpu_sibling_map, 0));
+ 		cpu_set(0, cpu_core_map[0]);
++	} else {
++		/*
++		 * Set ar.k3 so that assembly code in MCA handler can compute
++		 * physical addresses of per cpu variables with a simple:
++		 *   phys = ar.k3 + &per_cpu_var
++		 * and the alt-dtlb-miss handler can set per-cpu mapping into
++		 * the TLB when needed. head.S already did this for cpu0.
++		 */
++		ia64_set_kr(IA64_KR_PER_CPU_DATA,
++			    ia64_tpa(cpu_data) - (long) __per_cpu_start);
+ 	}
+ #endif
+ 
+-	/*
+-	 * We set ar.k3 so that assembly code in MCA handler can compute
+-	 * physical addresses of per cpu variables with a simple:
+-	 *   phys = ar.k3 + &per_cpu_var
+-	 */
+-	ia64_set_kr(IA64_KR_PER_CPU_DATA,
+-		    ia64_tpa(cpu_data) - (long) __per_cpu_start);
+-
+ 	get_max_cacheline_size();
+ 
+ 	/*
+diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
+index 03f1a99..b39853a 100644
+--- a/arch/ia64/kernel/smpboot.c
++++ b/arch/ia64/kernel/smpboot.c
+@@ -467,7 +467,9 @@ start_secondary (void *unused)
+ {
+ 	/* Early console may use I/O ports */
+ 	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
++#ifndef CONFIG_PRINTK_TIME
+ 	Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
++#endif
+ 	efi_map_pal_code();
+ 	cpu_init();
+ 	preempt_disable();
+diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
+index 5a77206..de71da8 100644
+--- a/arch/ia64/kernel/vmlinux.lds.S
++++ b/arch/ia64/kernel/vmlinux.lds.S
+@@ -215,6 +215,9 @@ SECTIONS
+   /* Per-cpu data: */
+   percpu : { } :percpu
+   . = ALIGN(PERCPU_PAGE_SIZE);
++#ifdef	CONFIG_SMP
++  . = . + PERCPU_PAGE_SIZE;	/* cpu0 per-cpu space */
++#endif
+   __phys_per_cpu_start = .;
+   .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
+ 	{
+diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
+index 798bf98..e566ff4 100644
+--- a/arch/ia64/mm/contig.c
++++ b/arch/ia64/mm/contig.c
+@@ -163,8 +163,14 @@ per_cpu_init (void)
+ 	 * get_zeroed_page().
+ 	 */
+ 	if (first_time) {
++		void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
++
+ 		first_time=0;
+-		for (cpu = 0; cpu < NR_CPUS; cpu++) {
++
++		__per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
++		per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
++
++		for (cpu = 1; cpu < NR_CPUS; cpu++) {
+ 			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
+ 			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
+ 			cpu_data += PERCPU_PAGE_SIZE;
+@@ -177,7 +183,7 @@ per_cpu_init (void)
+ static inline void
+ alloc_per_cpu_data(void)
+ {
+-	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
++	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
+ 				   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ }
+ #else
+diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
+index d83125e..78026aa 100644
+--- a/arch/ia64/mm/discontig.c
++++ b/arch/ia64/mm/discontig.c
+@@ -143,7 +143,11 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
+ 	int cpu;
+ 
+ 	for_each_possible_early_cpu(cpu) {
+-		if (node == node_cpuid[cpu].nid) {
++		if (cpu == 0) {
++			void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
++			__per_cpu_offset[cpu] = (char*)cpu0_data -
++				__per_cpu_start;
++		} else if (node == node_cpuid[cpu].nid) {
+ 			memcpy(__va(cpu_data), __phys_per_cpu_start,
+ 			       __per_cpu_end - __per_cpu_start);
+ 			__per_cpu_offset[cpu] = (char*)__va(cpu_data) -

Modified: dists/sid/linux-2.6/debian/patches/series/4
==============================================================================
--- dists/sid/linux-2.6/debian/patches/series/4	(original)
+++ dists/sid/linux-2.6/debian/patches/series/4	Wed Aug 20 23:42:39 2008
@@ -1,3 +1,5 @@
 + bugfix/all/acpi-fix-thermal-shutdowns-x60.patch
 - bugfix/x86-amd-opteron-tom2-mask-val-fix.patch
 + bugfix/all/stable/2.6.26.3.patch
++ bugfix/delay-sched_clock.patch
++ bugfix/ia64-cpu0-early-access-per-cpu-vars.patch



More information about the Kernel-svn-changes mailing list