[kernel] r12304 - in dists/etch/linux-2.6/debian: . patches/bugfix patches/bugfix/all/vserver patches/series

Dann Frazier dannf at alioth.debian.org
Mon Oct 13 05:40:54 UTC 2008


Author: dannf
Date: Mon Oct 13 05:40:53 2008
New Revision: 12304

Log:
merge in 22etch[1-3]

Added:
   dists/etch/linux-2.6/debian/patches/bugfix/all/vserver/proc-link-security.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/all/vserver/proc-link-security.patch
   dists/etch/linux-2.6/debian/patches/bugfix/amd64-fix-zeroing-on-exception-in-copy_user-pre.patch
   dists/etch/linux-2.6/debian/patches/bugfix/amd64-fix-zeroing-on-exception-in-copy_user.patch
   dists/etch/linux-2.6/debian/patches/bugfix/check-privileges-before-setting-mount-propagation.patch
   dists/etch/linux-2.6/debian/patches/bugfix/cifs-fix-compiler-warning.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/cifs-fix-compiler-warning.patch
   dists/etch/linux-2.6/debian/patches/bugfix/dccp-change-l-r-must-have-at-least-one-byte-in-the-dccpsf_val-field.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/dccp-change-l-r-must-have-at-least-one-byte-in-the-dccpsf_val-field.patch
   dists/etch/linux-2.6/debian/patches/bugfix/dio-zero-struct-dio-with-kzalloc-instead-of-manually.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/dio-zero-struct-dio-with-kzalloc-instead-of-manually.patch
   dists/etch/linux-2.6/debian/patches/bugfix/esp-iv-in-linear-part-of-skb.patch
   dists/etch/linux-2.6/debian/patches/bugfix/lockless-helpers-for-remove_suid.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/lockless-helpers-for-remove_suid.patch
   dists/etch/linux-2.6/debian/patches/bugfix/netfilter-nf_nat_snmp_basic-fix-range-check.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/netfilter-nf_nat_snmp_basic-fix-range-check.patch
   dists/etch/linux-2.6/debian/patches/bugfix/open-allows-sgid-in-sgid-directory.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/open-allows-sgid-in-sgid-directory.patch
   dists/etch/linux-2.6/debian/patches/bugfix/prevent-ptrace-padding-area-readwrite-in-32bit-mode.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/prevent-ptrace-padding-area-readwrite-in-32bit-mode.patch
   dists/etch/linux-2.6/debian/patches/bugfix/remove-SUID-when-splicing-into-an-inode.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/remove-SUID-when-splicing-into-an-inode.patch
   dists/etch/linux-2.6/debian/patches/bugfix/sctp-make-sure-n-sizeof-does-not-overflow.patch
   dists/etch/linux-2.6/debian/patches/bugfix/sound-ensure-device-number-is-valid-in-snd_seq_oss_synth_make_info.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/sound-ensure-device-number-is-valid-in-snd_seq_oss_synth_make_info.patch
   dists/etch/linux-2.6/debian/patches/bugfix/splice-fix-bad-unlock_page-in-error-case.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/splice-fix-bad-unlock_page-in-error-case.patch
   dists/etch/linux-2.6/debian/patches/bugfix/tty-fix-for-tty-operations-bugs.patch
   dists/etch/linux-2.6/debian/patches/bugfix/vfs-fix-lookup-on-deleted-directory.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/vfs-fix-lookup-on-deleted-directory.patch
   dists/etch/linux-2.6/debian/patches/bugfix/vmsplice-security.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/vmsplice-security.patch
   dists/etch/linux-2.6/debian/patches/bugfix/wan-sbni_ioctl-cap-checks.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/wan-sbni_ioctl-cap-checks.patch
   dists/etch/linux-2.6/debian/patches/bugfix/x86-add-copy_user_handle_tail.patch
   dists/etch/linux-2.6/debian/patches/bugfix/x86-fix-copy_user.patch
   dists/etch/linux-2.6/debian/patches/bugfix/x86-wrong-register-was-used-in-align-macro.patch
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/bugfix/x86-wrong-register-was-used-in-align-macro.patch
   dists/etch/linux-2.6/debian/patches/series/18etch1
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/series/18etch1
   dists/etch/linux-2.6/debian/patches/series/18etch1-extra
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/series/18etch1-extra
   dists/etch/linux-2.6/debian/patches/series/22etch1
   dists/etch/linux-2.6/debian/patches/series/22etch2
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/series/22etch2
   dists/etch/linux-2.6/debian/patches/series/22etch3
      - copied unchanged from r12303, /releases/linux-2.6/2.6.18.dfsg.1-22etch3/debian/patches/series/22etch3
Modified:
   dists/etch/linux-2.6/debian/changelog

Modified: dists/etch/linux-2.6/debian/changelog
==============================================================================
--- dists/etch/linux-2.6/debian/changelog	(original)
+++ dists/etch/linux-2.6/debian/changelog	Mon Oct 13 05:40:53 2008
@@ -1,4 +1,4 @@
-linux-2.6 (2.6.18.dfsg.1-23) UNRELEASED; urgency=high
+linux-2.6 (2.6.18.dfsg.1-23) stable; urgency=high
 
   [ Ian Campbell ]
   * Fix DMA crash under Xen when no IOMMU is present (closes: #445987)
@@ -6,7 +6,75 @@
   [ dann frazier ]
   * [xfs] Fix attr2 corruption with btree data extents (closes: #498309)
 
- -- dann frazier <dannf at debian.org>  Mon, 08 Sep 2008 17:49:40 -0600
+ -- dann frazier <dannf at debian.org>  Sun, 12 Oct 2008 23:23:27 -0600
+
+linux-2.6 (2.6.18.dfsg.1-22etch3) stable-security; urgency=high
+
+  * bugfix/dccp-change-l-r-must-have-at-least-one-byte-in-the-dccpsf_val-field.patch
+    Fix integer overflow in dccp_setsockopt_change()
+    See CVE-2008-3276
+  * bugfix/dio-zero-struct-dio-with-kzalloc-instead-of-manually.patch
+    Fix oops caused by uninitialized field in struct dio
+    See CVE-2007-6716
+  * bugfix/wan-sbni_ioctl-cap-checks.patch
+    Add missing capability checks in sbni_ioctl
+    See CVE-2008-3525
+  * bugfix/open-allows-sgid-in-sgid-directory.patch
+    Prevent open() creating file with wrong permissions
+    See CVE-2008-4210
+  * bugfix/splice-fix-bad-unlock_page-in-error-case.patch
+    Don't attempt to unlock a page if add_to_page_cache_lru fails
+    See CVE-2008-4302
+  * bugfix/remove-SUID-when-splicing-into-an-inode.patch
+    Remove SUID when splicing into an inode
+    See CVE-2008-3833
+  * bugfix/prevent-ptrace-padding-area-readwrite-in-32bit-mode.patch
+    [S390] prevent ptrace padding area read/write in 31-bit mode
+    See CVE-2008-1514
+
+ -- dann frazier <dannf at debian.org>  Thu, 09 Oct 2008 09:02:40 -0600
+
+linux-2.6 (2.6.18.dfsg.1-22etch2) stable-security; urgency=high
+
+  * bugfix/x86-wrong-register-was-used-in-align-macro.patch
+    Fix regression introduced upstream by the fix for CVE-2008-0598
+  * bugfix/cifs-fix-compiler-warning.patch,
+    bugfix/netfilter-nf_nat_snmp_basic-fix-range-check.patch
+    Fix regressions introduced upstream by the fixes for CVE-2008-1673
+  * bugfix/sound-ensure-device-number-is-valid-in-snd_seq_oss_synth_make_info.patch
+    Fix possible information leak in seq_oss_synth.c
+    See CVE-2008-3272
+  * bugfix/vfs-fix-lookup-on-deleted-directory.patch
+    Fix potential memory leak in lookup path
+    See CVE-2008-3275
+
+ -- dann frazier <dannf at debian.org>  Mon, 18 Aug 2008 01:43:55 -0600
+
+linux-2.6 (2.6.18.dfsg.1-22etch1) stable-security; urgency=high
+
+  * bugfix/sctp-make-sure-n-sizeof-does-not-overflow.patch
+    [SECURITY] Fix potential overflow condition in
+    sctp_getsockopt_local_addrs_old
+    See CVE-2008-2826
+  * bugfix/esp-iv-in-linear-part-of-skb.patch
+    [SECURITY] Avoid tripping BUG() in IPsec code when the first fragment
+    of an ESP packet does not contain the entire ESP header and IV
+    See CVE-2007-6282
+  * bugfix/amd64-fix-zeroing-on-exception-in-copy_user.patch
+    [SECURITY] [amd64] Fix potential information leak when a copy
+    operation fails by properly zeroing out destination memory
+    See CVE-2008-2729
+  * bugfix/tty-fix-for-tty-operations-bugs.patch
+    [SECURITY] Fix issues with tty operation handling in various drivers
+    See CVE-2008-2812
+  * bugfix/check-privileges-before-setting-mount-propagation.patch
+    [SECURITY] Check CAP_SYS_ADMIN when changing mountpoint type 
+    See CVE-2008-2931
+  * bugfix/x86-fix-copy_user.patch
+    [SECURITY][amd64] Fix memory leak in the copy_user routine, see #490910.
+    See CVE-2008-0598
+
+ -- dann frazier <dannf at debian.org>  Sun, 27 Jul 2008 16:06:38 -0600
 
 linux-2.6 (2.6.18.dfsg.1-22) stable; urgency=high
 

Added: dists/etch/linux-2.6/debian/patches/bugfix/amd64-fix-zeroing-on-exception-in-copy_user-pre.patch
==============================================================================
--- (empty file)
+++ dists/etch/linux-2.6/debian/patches/bugfix/amd64-fix-zeroing-on-exception-in-copy_user-pre.patch	Mon Oct 13 05:40:53 2008
@@ -0,0 +1,798 @@
+commit 8d379dad8f1670d233ac67b76b1c5a42ad3714a3
+Author: Jan Beulich <jbeulich at novell.com>
+Date:   Tue Sep 26 10:52:32 2006 +0200
+
+    [PATCH] annotate arch/x86_64/lib/*.S
+    
+    Add unwind annotations to arch/x86_64/lib/*.S, and also use the macros
+    provided by linux/linkage.h where-ever possible.
+    
+    Some of the alternative instructions handling needed to be adjusted so
+    that the replacement code would also have valid unwind information.
+    
+    Signed-off-by: Jan Beulich <jbeulich at novell.com>
+    Signed-off-by: Andi Kleen <ak at suse.de>
+
+diff --git a/arch/x86_64/lib/clear_page.S b/arch/x86_64/lib/clear_page.S
+index 1f81b79..9a10a78 100644
+--- a/arch/x86_64/lib/clear_page.S
++++ b/arch/x86_64/lib/clear_page.S
+@@ -1,10 +1,22 @@
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
+ /*
+  * Zero a page. 	
+  * rdi	page
+  */			
+-	.globl clear_page
+-	.p2align 4
+-clear_page:
++	ALIGN
++clear_page_c:
++	CFI_STARTPROC
++	movl $4096/8,%ecx
++	xorl %eax,%eax
++	rep stosq
++	ret
++	CFI_ENDPROC
++ENDPROC(clear_page)
++
++ENTRY(clear_page)
++	CFI_STARTPROC
+ 	xorl   %eax,%eax
+ 	movl   $4096/64,%ecx
+ 	.p2align 4
+@@ -23,28 +35,25 @@ clear_page:
+ 	jnz	.Lloop
+ 	nop
+ 	ret
+-clear_page_end:
++	CFI_ENDPROC
++.Lclear_page_end:
++ENDPROC(clear_page)
+ 
+ 	/* Some CPUs run faster using the string instructions.
+ 	   It is also a lot simpler. Use this when possible */
+ 
+ #include <asm/cpufeature.h>
+ 
++	.section .altinstr_replacement,"ax"
++1:	.byte 0xeb					/* jmp <disp8> */
++	.byte (clear_page_c - clear_page) - (2f - 1b)	/* offset */
++2:
++	.previous
+ 	.section .altinstructions,"a"
+ 	.align 8
+-	.quad  clear_page
+-	.quad  clear_page_c
+-	.byte  X86_FEATURE_REP_GOOD
+-	.byte  clear_page_end-clear_page
+-	.byte  clear_page_c_end-clear_page_c
+-	.previous
+-
+-	.section .altinstr_replacement,"ax"
+-clear_page_c:
+-	movl $4096/8,%ecx
+-	xorl %eax,%eax
+-	rep 
+-	stosq
+-	ret
+-clear_page_c_end:
++	.quad clear_page
++	.quad 1b
++	.byte X86_FEATURE_REP_GOOD
++	.byte .Lclear_page_end - clear_page
++	.byte 2b - 1b
+ 	.previous
+diff --git a/arch/x86_64/lib/copy_page.S b/arch/x86_64/lib/copy_page.S
+index 8fa19d9..0ebb03b 100644
+--- a/arch/x86_64/lib/copy_page.S
++++ b/arch/x86_64/lib/copy_page.S
+@@ -1,17 +1,33 @@
+ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
+ 	
++#include <linux/config.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
++	ALIGN
++copy_page_c:
++	CFI_STARTPROC
++	movl $4096/8,%ecx
++	rep movsq
++	ret
++	CFI_ENDPROC
++ENDPROC(copy_page_c)
++
+ /* Don't use streaming store because it's better when the target
+    ends up in cache. */
+ 	    
+ /* Could vary the prefetch distance based on SMP/UP */
+ 
+-	.globl copy_page
+-	.p2align 4
+-copy_page:
++ENTRY(copy_page)
++	CFI_STARTPROC
+ 	subq	$3*8,%rsp
++	CFI_ADJUST_CFA_OFFSET 3*8
+ 	movq	%rbx,(%rsp)
++	CFI_REL_OFFSET rbx, 0
+ 	movq	%r12,1*8(%rsp)
++	CFI_REL_OFFSET r12, 1*8
+ 	movq	%r13,2*8(%rsp)
++	CFI_REL_OFFSET r13, 2*8
+ 
+ 	movl	$(4096/64)-5,%ecx
+ 	.p2align 4
+@@ -72,30 +88,33 @@ copy_page:
+ 	jnz	.Loop2
+ 
+ 	movq	(%rsp),%rbx
++	CFI_RESTORE rbx
+ 	movq	1*8(%rsp),%r12
++	CFI_RESTORE r12
+ 	movq	2*8(%rsp),%r13
++	CFI_RESTORE r13
+ 	addq	$3*8,%rsp
++	CFI_ADJUST_CFA_OFFSET -3*8
+ 	ret
++.Lcopy_page_end:
++	CFI_ENDPROC
++ENDPROC(copy_page)
+ 
+ 	/* Some CPUs run faster using the string copy instructions.
+ 	   It is also a lot simpler. Use this when possible */
+ 
+ #include <asm/cpufeature.h>
+ 
++	.section .altinstr_replacement,"ax"
++1:	.byte 0xeb					/* jmp <disp8> */
++	.byte (copy_page_c - copy_page) - (2f - 1b)	/* offset */
++2:
++	.previous
+ 	.section .altinstructions,"a"
+ 	.align 8
+-	.quad  copy_page
+-	.quad  copy_page_c
+-	.byte  X86_FEATURE_REP_GOOD
+-	.byte  copy_page_c_end-copy_page_c
+-	.byte  copy_page_c_end-copy_page_c
+-	.previous
+-
+-	.section .altinstr_replacement,"ax"
+-copy_page_c:
+-	movl $4096/8,%ecx
+-	rep 
+-	movsq 
+-	ret
+-copy_page_c_end:
++	.quad copy_page
++	.quad 1b
++	.byte X86_FEATURE_REP_GOOD
++	.byte .Lcopy_page_end - copy_page
++	.byte 2b - 1b
+ 	.previous
+diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S
+index f64569b..962f3a6 100644
+--- a/arch/x86_64/lib/copy_user.S
++++ b/arch/x86_64/lib/copy_user.S
+@@ -4,6 +4,9 @@
+  * Functions to copy from and to user space.		
+  */		 
+ 
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
+ #define FIX_ALIGNMENT 1
+ 
+ 	#include <asm/current.h>
+@@ -12,9 +15,8 @@
+ 	#include <asm/cpufeature.h>
+ 
+ /* Standard copy_to_user with segment limit checking */		
+-	.globl copy_to_user
+-	.p2align 4	
+-copy_to_user:
++ENTRY(copy_to_user)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%rax)
+ 	movq %rdi,%rcx
+ 	addq %rdx,%rcx
+@@ -25,9 +27,11 @@ copy_to_user:
+ 	.byte 0xe9	/* 32bit jump */
+ 	.long .Lcug-1f
+ 1:
++	CFI_ENDPROC
++ENDPROC(copy_to_user)
+ 
+ 	.section .altinstr_replacement,"ax"
+-3:	.byte 0xe9			/* replacement jmp with 8 bit immediate */
++3:	.byte 0xe9			/* replacement jmp with 32 bit immediate */
+ 	.long copy_user_generic_c-1b	/* offset */
+ 	.previous
+ 	.section .altinstructions,"a"
+@@ -40,9 +44,8 @@ copy_to_user:
+ 	.previous
+ 
+ /* Standard copy_from_user with segment limit checking */	
+-	.globl copy_from_user
+-	.p2align 4	
+-copy_from_user:
++ENTRY(copy_from_user)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%rax)
+ 	movq %rsi,%rcx
+ 	addq %rdx,%rcx
+@@ -50,10 +53,13 @@ copy_from_user:
+ 	cmpq threadinfo_addr_limit(%rax),%rcx
+ 	jae  bad_from_user
+ 	/* FALL THROUGH to copy_user_generic */
++	CFI_ENDPROC
++ENDPROC(copy_from_user)
+ 	
+ 	.section .fixup,"ax"
+ 	/* must zero dest */
+ bad_from_user:
++	CFI_STARTPROC
+ 	movl %edx,%ecx
+ 	xorl %eax,%eax
+ 	rep
+@@ -61,6 +67,8 @@ bad_from_user:
+ bad_to_user:
+ 	movl	%edx,%eax
+ 	ret
++	CFI_ENDPROC
++END(bad_from_user)
+ 	.previous
+ 	
+ 		
+@@ -75,9 +83,8 @@ bad_to_user:
+  * Output:		
+  * eax uncopied bytes or 0 if successful.
+  */
+-	.globl copy_user_generic
+-	.p2align 4
+-copy_user_generic:
++ENTRY(copy_user_generic)
++	CFI_STARTPROC
+ 	.byte 0x66,0x66,0x90	/* 5 byte nop for replacement jump */
+ 	.byte 0x66,0x90
+ 1:
+@@ -95,6 +102,8 @@ copy_user_generic:
+ 	.previous
+ .Lcug:
+ 	pushq %rbx
++	CFI_ADJUST_CFA_OFFSET 8
++	CFI_REL_OFFSET rbx, 0
+ 	xorl %eax,%eax		/*zero for the exception handler */
+ 
+ #ifdef FIX_ALIGNMENT
+@@ -168,9 +177,13 @@ copy_user_generic:
+ 	decl %ecx
+ 	jnz .Lloop_1
+ 
++	CFI_REMEMBER_STATE
+ .Lende:
+ 	popq %rbx
++	CFI_ADJUST_CFA_OFFSET -8
++	CFI_RESTORE rbx
+ 	ret
++	CFI_RESTORE_STATE
+ 
+ #ifdef FIX_ALIGNMENT
+ 	/* align destination */
+@@ -261,6 +274,9 @@ copy_user_generic:
+ .Le_zero:
+ 	movq %rdx,%rax
+ 	jmp .Lende
++	CFI_ENDPROC
++ENDPROC(copy_user_generic)
++
+ 
+ 	/* Some CPUs run faster using the string copy instructions.
+ 	   This is also a lot simpler. Use them when possible.
+@@ -282,6 +298,7 @@ copy_user_generic:
+   * this please consider this.
+    */
+ copy_user_generic_c:
++	CFI_STARTPROC
+ 	movl %edx,%ecx
+ 	shrl $3,%ecx
+ 	andl $7,%edx	
+@@ -294,6 +311,8 @@ copy_user_generic_c:
+ 	ret
+ 3:	lea (%rdx,%rcx,8),%rax
+ 	ret
++	CFI_ENDPROC
++END(copy_user_generic_c)
+ 
+ 	.section __ex_table,"a"
+ 	.quad 1b,3b
+diff --git a/arch/x86_64/lib/csum-copy.S b/arch/x86_64/lib/csum-copy.S
+index 72fd55e..f0dba36 100644
+--- a/arch/x86_64/lib/csum-copy.S
++++ b/arch/x86_64/lib/csum-copy.S
+@@ -5,8 +5,9 @@
+  * License.  See the file COPYING in the main directory of this archive
+  * for more details. No warranty for anything given at all.
+  */
+- 	#include <linux/linkage.h>
+-	#include <asm/errno.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++#include <asm/errno.h>
+ 
+ /*
+  * Checksum copy with exception handling.
+@@ -53,19 +54,24 @@
+ 	.endm
+ 	
+ 				
+-	.globl csum_partial_copy_generic
+-	.p2align 4
+-csum_partial_copy_generic:
++ENTRY(csum_partial_copy_generic)
++	CFI_STARTPROC
+ 	cmpl	 $3*64,%edx
+ 	jle	 .Lignore
+ 
+ .Lignore:		
+ 	subq  $7*8,%rsp
++	CFI_ADJUST_CFA_OFFSET 7*8
+ 	movq  %rbx,2*8(%rsp)
++	CFI_REL_OFFSET rbx, 2*8
+ 	movq  %r12,3*8(%rsp)
++	CFI_REL_OFFSET r12, 3*8
+ 	movq  %r14,4*8(%rsp)
++	CFI_REL_OFFSET r14, 4*8
+ 	movq  %r13,5*8(%rsp)
++	CFI_REL_OFFSET r13, 5*8
+ 	movq  %rbp,6*8(%rsp)
++	CFI_REL_OFFSET rbp, 6*8
+ 
+ 	movq  %r8,(%rsp)
+ 	movq  %r9,1*8(%rsp)
+@@ -208,14 +214,22 @@ csum_partial_copy_generic:
+ 	addl %ebx,%eax
+ 	adcl %r9d,%eax		/* carry */
+ 			
++	CFI_REMEMBER_STATE
+ .Lende:
+ 	movq 2*8(%rsp),%rbx
++	CFI_RESTORE rbx
+ 	movq 3*8(%rsp),%r12
++	CFI_RESTORE r12
+ 	movq 4*8(%rsp),%r14
++	CFI_RESTORE r14
+ 	movq 5*8(%rsp),%r13
++	CFI_RESTORE r13
+ 	movq 6*8(%rsp),%rbp
++	CFI_RESTORE rbp
+ 	addq $7*8,%rsp
++	CFI_ADJUST_CFA_OFFSET -7*8
+ 	ret
++	CFI_RESTORE_STATE
+ 
+ 	/* Exception handlers. Very simple, zeroing is done in the wrappers */
+ .Lbad_source:
+@@ -231,3 +245,5 @@ csum_partial_copy_generic:
+ 	jz   .Lende	
+ 	movl $-EFAULT,(%rax)
+ 	jmp .Lende
++	CFI_ENDPROC
++ENDPROC(csum_partial_copy_generic)
+diff --git a/arch/x86_64/lib/getuser.S b/arch/x86_64/lib/getuser.S
+index 3844d5e..5448876 100644
+--- a/arch/x86_64/lib/getuser.S
++++ b/arch/x86_64/lib/getuser.S
+@@ -27,25 +27,26 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/dwarf2.h>
+ #include <asm/page.h>
+ #include <asm/errno.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+ 
+ 	.text
+-	.p2align 4
+-.globl __get_user_1
+-__get_user_1:	
++ENTRY(__get_user_1)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%r8)
+ 	cmpq threadinfo_addr_limit(%r8),%rcx
+ 	jae bad_get_user
+ 1:	movzb (%rcx),%edx
+ 	xorl %eax,%eax
+ 	ret
++	CFI_ENDPROC
++ENDPROC(__get_user_1)
+ 
+-	.p2align 4
+-.globl __get_user_2
+-__get_user_2:
++ENTRY(__get_user_2)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%r8)
+ 	addq $1,%rcx
+ 	jc 20f
+@@ -57,10 +58,11 @@ __get_user_2:
+ 	ret
+ 20:	decq    %rcx
+ 	jmp	bad_get_user
++	CFI_ENDPROC
++ENDPROC(__get_user_2)
+ 
+-	.p2align 4
+-.globl __get_user_4
+-__get_user_4:
++ENTRY(__get_user_4)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%r8)
+ 	addq $3,%rcx
+ 	jc 30f
+@@ -72,10 +74,11 @@ __get_user_4:
+ 	ret
+ 30:	subq $3,%rcx
+ 	jmp bad_get_user
++	CFI_ENDPROC
++ENDPROC(__get_user_4)
+ 
+-	.p2align 4
+-.globl __get_user_8
+-__get_user_8:
++ENTRY(__get_user_8)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%r8)
+ 	addq $7,%rcx
+ 	jc 40f
+@@ -87,11 +90,16 @@ __get_user_8:
+ 	ret
+ 40:	subq $7,%rcx
+ 	jmp bad_get_user
++	CFI_ENDPROC
++ENDPROC(__get_user_8)
+ 
+ bad_get_user:
++	CFI_STARTPROC
+ 	xorl %edx,%edx
+ 	movq $(-EFAULT),%rax
+ 	ret
++	CFI_ENDPROC
++END(bad_get_user)
+ 
+ .section __ex_table,"a"
+ 	.quad 1b,bad_get_user
+diff --git a/arch/x86_64/lib/iomap_copy.S b/arch/x86_64/lib/iomap_copy.S
+index 8bbade5..05a95e7 100644
+--- a/arch/x86_64/lib/iomap_copy.S
++++ b/arch/x86_64/lib/iomap_copy.S
+@@ -15,12 +15,16 @@
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+  */
+ 
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
+ /*
+  * override generic version in lib/iomap_copy.c
+  */
+- 	.globl __iowrite32_copy
+-	.p2align 4
+-__iowrite32_copy:
++ENTRY(__iowrite32_copy)
++	CFI_STARTPROC
+ 	movl %edx,%ecx
+ 	rep movsd
+ 	ret
++	CFI_ENDPROC
++ENDPROC(__iowrite32_copy)
+diff --git a/arch/x86_64/lib/memcpy.S b/arch/x86_64/lib/memcpy.S
+index 5554948..967b22f 100644
+--- a/arch/x86_64/lib/memcpy.S
++++ b/arch/x86_64/lib/memcpy.S
+@@ -1,6 +1,10 @@
+ /* Copyright 2002 Andi Kleen */
+ 	
+-	#include <asm/cpufeature.h>		
++#include <linux/config.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++#include <asm/cpufeature.h>
++
+ /*
+  * memcpy - Copy a memory block.
+  *
+@@ -13,12 +17,26 @@
+  * rax original destination
+  */	
+ 
+- 	.globl __memcpy
+-	.globl memcpy
+-	.p2align 4
+-__memcpy:
+-memcpy:		
++	ALIGN
++memcpy_c:
++	CFI_STARTPROC
++	movq %rdi,%rax
++	movl %edx,%ecx
++	shrl $3,%ecx
++	andl $7,%edx
++	rep movsq
++	movl %edx,%ecx
++	rep movsb
++	ret
++	CFI_ENDPROC
++ENDPROC(memcpy_c)
++
++ENTRY(__memcpy)
++ENTRY(memcpy)
++	CFI_STARTPROC
+ 	pushq %rbx
++	CFI_ADJUST_CFA_OFFSET 8
++	CFI_REL_OFFSET rbx, 0
+ 	movq %rdi,%rax
+ 
+ 	movl %edx,%ecx
+@@ -86,36 +104,27 @@ memcpy:
+ 
+ .Lende:
+ 	popq %rbx
++	CFI_ADJUST_CFA_OFFSET -8
++	CFI_RESTORE rbx
+ 	ret
+ .Lfinal:
++	CFI_ENDPROC
++ENDPROC(memcpy)
++ENDPROC(__memcpy)
+ 
+ 	/* Some CPUs run faster using the string copy instructions.
+ 	   It is also a lot simpler. Use this when possible */
+ 
++	.section .altinstr_replacement,"ax"
++1:	.byte 0xeb				/* jmp <disp8> */
++	.byte (memcpy_c - memcpy) - (2f - 1b)	/* offset */
++2:
++	.previous
+ 	.section .altinstructions,"a"
+ 	.align 8
+-	.quad  memcpy
+-	.quad  memcpy_c
+-	.byte  X86_FEATURE_REP_GOOD
+-	.byte  .Lfinal-memcpy
+-	.byte  memcpy_c_end-memcpy_c
+-	.previous
+-
+-	.section .altinstr_replacement,"ax"
+- /* rdi	destination
+-  * rsi source
+-  * rdx count
+-  */
+-memcpy_c:
+-	movq %rdi,%rax
+-	movl %edx,%ecx
+-	shrl $3,%ecx
+-	andl $7,%edx	
+-	rep 
+-	movsq 
+-	movl %edx,%ecx
+-	rep
+-	movsb
+-	ret
+-memcpy_c_end:
++	.quad memcpy
++	.quad 1b
++	.byte X86_FEATURE_REP_GOOD
++	.byte .Lfinal - memcpy
++	.byte 2b - 1b
+ 	.previous
+diff --git a/arch/x86_64/lib/memset.S b/arch/x86_64/lib/memset.S
+index ad397f2..09ed1f6 100644
+--- a/arch/x86_64/lib/memset.S
++++ b/arch/x86_64/lib/memset.S
+@@ -1,4 +1,9 @@
+ /* Copyright 2002 Andi Kleen, SuSE Labs */
++
++#include <linux/config.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
+ /*
+  * ISO C memset - set a memory block to a byte value.
+  *	
+@@ -8,11 +13,29 @@
+  * 
+  * rax   original destination
+  */	
+- 	.globl __memset
+-	.globl memset
+-	.p2align 4
+-memset:	
+-__memset:
++	ALIGN
++memset_c:
++	CFI_STARTPROC
++	movq %rdi,%r9
++	movl %edx,%r8d
++	andl $7,%r8d
++	movl %edx,%ecx
++	shrl $3,%ecx
++	/* expand byte value  */
++	movzbl %sil,%esi
++	movabs $0x0101010101010101,%rax
++	mulq %rsi		/* with rax, clobbers rdx */
++	rep stosq
++	movl %r8d,%ecx
++	rep stosb
++	movq %r9,%rax
++	ret
++	CFI_ENDPROC
++ENDPROC(memset_c)
++
++ENTRY(memset)
++ENTRY(__memset)
++	CFI_STARTPROC
+ 	movq %rdi,%r10
+ 	movq %rdx,%r11
+ 
+@@ -25,6 +48,7 @@ __memset:
+ 	movl  %edi,%r9d
+ 	andl  $7,%r9d
+ 	jnz  .Lbad_alignment
++	CFI_REMEMBER_STATE
+ .Lafter_bad_alignment:
+ 
+ 	movl %r11d,%ecx
+@@ -75,6 +99,7 @@ __memset:
+ 	movq	%r10,%rax
+ 	ret
+ 
++	CFI_RESTORE_STATE
+ .Lbad_alignment:
+ 	cmpq $7,%r11
+ 	jbe	.Lhandle_7
+@@ -84,42 +109,26 @@ __memset:
+ 	addq %r8,%rdi
+ 	subq %r8,%r11
+ 	jmp .Lafter_bad_alignment
++.Lfinal:
++	CFI_ENDPROC
++ENDPROC(memset)
++ENDPROC(__memset)
+ 
+ 	/* Some CPUs run faster using the string instructions.
+ 	   It is also a lot simpler. Use this when possible */
+ 
+ #include <asm/cpufeature.h>
+ 
++	.section .altinstr_replacement,"ax"
++1:	.byte 0xeb				/* jmp <disp8> */
++	.byte (memset_c - memset) - (2f - 1b)	/* offset */
++2:
++	.previous
+ 	.section .altinstructions,"a"
+ 	.align 8
+-	.quad  memset
+-	.quad  memset_c
+-	.byte  X86_FEATURE_REP_GOOD
+-	.byte  memset_c_end-memset_c
+-	.byte  memset_c_end-memset_c
+-	.previous
+-
+-	.section .altinstr_replacement,"ax"
+- /* rdi	destination
+-  * rsi value
+-  * rdx count
+-  */
+-memset_c:
+-	movq %rdi,%r9
+-	movl %edx,%r8d
+-	andl $7,%r8d		
+-	movl %edx,%ecx
+-	shrl $3,%ecx		
+-	/* expand byte value  */
+-	movzbl %sil,%esi
+-	movabs $0x0101010101010101,%rax
+-	mulq   %rsi		/* with rax, clobbers rdx */
+-	rep
+-	stosq	
+-	movl %r8d,%ecx
+-	rep
+-	stosb
+-	movq %r9,%rax
+-	ret
+-memset_c_end:
++	.quad memset
++	.quad 1b
++	.byte X86_FEATURE_REP_GOOD
++	.byte .Lfinal - memset
++	.byte 2b - 1b
+ 	.previous
+diff --git a/arch/x86_64/lib/putuser.S b/arch/x86_64/lib/putuser.S
+index 7f55939..4989f5a 100644
+--- a/arch/x86_64/lib/putuser.S
++++ b/arch/x86_64/lib/putuser.S
+@@ -25,25 +25,26 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/dwarf2.h>
+ #include <asm/page.h>
+ #include <asm/errno.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+ 
+ 	.text
+-	.p2align 4
+-.globl __put_user_1
+-__put_user_1:
++ENTRY(__put_user_1)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%r8)
+ 	cmpq threadinfo_addr_limit(%r8),%rcx
+ 	jae bad_put_user
+ 1:	movb %dl,(%rcx)
+ 	xorl %eax,%eax
+ 	ret
++	CFI_ENDPROC
++ENDPROC(__put_user_1)
+ 
+-	.p2align 4
+-.globl __put_user_2
+-__put_user_2:
++ENTRY(__put_user_2)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%r8)
+ 	addq $1,%rcx
+ 	jc 20f
+@@ -55,10 +56,11 @@ __put_user_2:
+ 	ret
+ 20:	decq %rcx
+ 	jmp bad_put_user
++	CFI_ENDPROC
++ENDPROC(__put_user_2)
+ 
+-	.p2align 4
+-.globl __put_user_4
+-__put_user_4:
++ENTRY(__put_user_4)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%r8)
+ 	addq $3,%rcx
+ 	jc 30f
+@@ -70,10 +72,11 @@ __put_user_4:
+ 	ret
+ 30:	subq $3,%rcx
+ 	jmp bad_put_user
++	CFI_ENDPROC
++ENDPROC(__put_user_4)
+ 
+-	.p2align 4
+-.globl __put_user_8
+-__put_user_8:
++ENTRY(__put_user_8)
++	CFI_STARTPROC
+ 	GET_THREAD_INFO(%r8)
+ 	addq $7,%rcx
+ 	jc 40f
+@@ -85,10 +88,15 @@ __put_user_8:
+ 	ret
+ 40:	subq $7,%rcx
+ 	jmp bad_put_user
++	CFI_ENDPROC
++ENDPROC(__put_user_8)
+ 
+ bad_put_user:
++	CFI_STARTPROC
+ 	movq $(-EFAULT),%rax
+ 	ret
++	CFI_ENDPROC
++END(bad_put_user)
+ 
+ .section __ex_table,"a"
+ 	.quad 1b,bad_put_user

Added: dists/etch/linux-2.6/debian/patches/bugfix/amd64-fix-zeroing-on-exception-in-copy_user.patch
==============================================================================
--- (empty file)
+++ dists/etch/linux-2.6/debian/patches/bugfix/amd64-fix-zeroing-on-exception-in-copy_user.patch	Mon Oct 13 05:40:53 2008
@@ -0,0 +1,272 @@
+commit 3022d734a54cbd2b65eea9a024564821101b4a9a
+Author: Andi Kleen <ak at suse.de>
+Date:   Tue Sep 26 10:52:39 2006 +0200
+
+    [PATCH] Fix zeroing on exception in copy_*_user
+    
+    - Don't zero for __copy_from_user_inatomic following i386.
+    This will prevent spurious zeros for parallel file system writers when
+    one does a exception
+    - The string instruction version didn't zero the output on
+    exception. Oops.
+    
+    Also I cleaned up the code a bit while I was at it and added a minor
+    optimization to the string instruction path.
+    
+    Signed-off-by: Andi Kleen <ak at suse.de>
+
+diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
+index 370952c..c3454af 100644
+--- a/arch/x86_64/kernel/x8664_ksyms.c
++++ b/arch/x86_64/kernel/x8664_ksyms.c
+@@ -29,6 +29,7 @@ EXPORT_SYMBOL(__put_user_8);
+ EXPORT_SYMBOL(copy_user_generic);
+ EXPORT_SYMBOL(copy_from_user);
+ EXPORT_SYMBOL(copy_to_user);
++EXPORT_SYMBOL(__copy_from_user_inatomic);
+ 
+ EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(clear_page);
+diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S
+index 962f3a6..70bebd3 100644
+--- a/arch/x86_64/lib/copy_user.S
++++ b/arch/x86_64/lib/copy_user.S
+@@ -9,10 +9,29 @@
+ 
+ #define FIX_ALIGNMENT 1
+ 
+-	#include <asm/current.h>
+-	#include <asm/asm-offsets.h>
+-	#include <asm/thread_info.h>
+-	#include <asm/cpufeature.h>
++#include <asm/current.h>
++#include <asm/asm-offsets.h>
++#include <asm/thread_info.h>
++#include <asm/cpufeature.h>
++
++	.macro ALTERNATIVE_JUMP feature,orig,alt
++0:
++	.byte 0xe9	/* 32bit jump */
++	.long \orig-1f	/* by default jump to orig */
++1:
++	.section .altinstr_replacement,"ax"
++2:	.byte 0xe9	             /* near jump with 32bit immediate */
++	.long \alt-1b /* offset */   /* or alternatively to alt */
++	.previous
++	.section .altinstructions,"a"
++	.align 8
++	.quad  0b
++	.quad  2b
++	.byte  \feature		     /* when feature is set */
++	.byte  5
++	.byte  5
++	.previous
++	.endm
+ 
+ /* Standard copy_to_user with segment limit checking */		
+ ENTRY(copy_to_user)
+@@ -23,25 +42,21 @@ ENTRY(copy_to_user)
+ 	jc  bad_to_user
+ 	cmpq threadinfo_addr_limit(%rax),%rcx
+ 	jae bad_to_user
+-2:
+-	.byte 0xe9	/* 32bit jump */
+-	.long .Lcug-1f
+-1:
++	xorl %eax,%eax	/* clear zero flag */
++	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ 	CFI_ENDPROC
+-ENDPROC(copy_to_user)
+ 
+-	.section .altinstr_replacement,"ax"
+-3:	.byte 0xe9			/* replacement jmp with 32 bit immediate */
+-	.long copy_user_generic_c-1b	/* offset */
+-	.previous
+-	.section .altinstructions,"a"
+-	.align 8
+-	.quad  2b
+-	.quad  3b
+-	.byte  X86_FEATURE_REP_GOOD
+-	.byte  5
+-	.byte  5
+-	.previous
++ENTRY(copy_user_generic)
++	CFI_STARTPROC
++	movl $1,%ecx	/* set zero flag */
++	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
++	CFI_ENDPROC
++
++ENTRY(__copy_from_user_inatomic)
++	CFI_STARTPROC
++	xorl %ecx,%ecx	/* clear zero flag */
++	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
++	CFI_ENDPROC
+ 
+ /* Standard copy_from_user with segment limit checking */	
+ ENTRY(copy_from_user)
+@@ -52,7 +67,8 @@ ENTRY(copy_from_user)
+ 	jc  bad_from_user
+ 	cmpq threadinfo_addr_limit(%rax),%rcx
+ 	jae  bad_from_user
+-	/* FALL THROUGH to copy_user_generic */
++	movl $1,%ecx	/* set zero flag */
++	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ 	CFI_ENDPROC
+ ENDPROC(copy_from_user)
+ 	
+@@ -73,37 +89,26 @@ END(bad_from_user)
+ 	
+ 		
+ /*
+- * copy_user_generic - memory copy with exception handling.
++ * copy_user_generic_unrolled - memory copy with exception handling.
++ * This version is for CPUs like P4 that don't have efficient micro code for rep movsq
+  * 	
+  * Input:	
+  * rdi destination
+  * rsi source
+  * rdx count
++ * ecx zero flag -- if true zero destination on error
+  *
+  * Output:		
+  * eax uncopied bytes or 0 if successful.
+  */
+-ENTRY(copy_user_generic)
++ENTRY(copy_user_generic_unrolled)
+ 	CFI_STARTPROC
+-	.byte 0x66,0x66,0x90	/* 5 byte nop for replacement jump */
+-	.byte 0x66,0x90
+-1:
+-	.section .altinstr_replacement,"ax"
+-2:	.byte 0xe9	             /* near jump with 32bit immediate */
+-	.long copy_user_generic_c-1b /* offset */
+-	.previous
+-	.section .altinstructions,"a"
+-	.align 8
+-	.quad  copy_user_generic
+-	.quad  2b
+-	.byte  X86_FEATURE_REP_GOOD
+-	.byte  5
+-	.byte  5
+-	.previous
+-.Lcug:
+ 	pushq %rbx
+ 	CFI_ADJUST_CFA_OFFSET 8
+ 	CFI_REL_OFFSET rbx, 0
++	pushq %rcx
++	CFI_ADJUST_CFA_OFFSET 8
++	CFI_REL_OFFSET rcx, 0
+ 	xorl %eax,%eax		/*zero for the exception handler */
+ 
+ #ifdef FIX_ALIGNMENT
+@@ -179,6 +184,9 @@ ENTRY(copy_user_generic)
+ 
+ 	CFI_REMEMBER_STATE
+ .Lende:
++	popq %rcx
++	CFI_ADJUST_CFA_OFFSET -8
++	CFI_RESTORE rcx
+ 	popq %rbx
+ 	CFI_ADJUST_CFA_OFFSET -8
+ 	CFI_RESTORE rbx
+@@ -265,6 +273,8 @@ ENTRY(copy_user_generic)
+ 	addl %ecx,%edx
+ 	/* edx: bytes to zero, rdi: dest, eax:zero */
+ .Lzero_rest:
++	cmpl $0,(%rsp)
++	jz   .Le_zero
+ 	movq %rdx,%rcx
+ .Le_byte:
+ 	xorl %eax,%eax
+@@ -286,6 +296,7 @@ ENDPROC(copy_user_generic)
+  /* rdi	destination
+   * rsi source
+   * rdx count
++  * ecx zero flag
+   *
+   * Output:
+   * eax uncopied bytes or 0 if successfull.
+@@ -296,25 +307,48 @@ ENDPROC(copy_user_generic)
+   * And more would be dangerous because both Intel and AMD have
+   * errata with rep movsq > 4GB. If someone feels the need to fix
+   * this please consider this.
+-   */
+-copy_user_generic_c:
++  */
++ENTRY(copy_user_generic_string)
+ 	CFI_STARTPROC
++	movl %ecx,%r8d		/* save zero flag */
+ 	movl %edx,%ecx
+ 	shrl $3,%ecx
+ 	andl $7,%edx	
++	jz   10f
+ 1:	rep 
+ 	movsq 
+ 	movl %edx,%ecx
+ 2:	rep
+ 	movsb
+-4:	movl %ecx,%eax
++9:	movl %ecx,%eax
+ 	ret
+-3:	lea (%rdx,%rcx,8),%rax
++
++	/* multiple of 8 byte */
++10:	rep
++	movsq
++	xor %eax,%eax
+ 	ret
++
++	/* exception handling */
++3:      lea (%rdx,%rcx,8),%rax	/* exception on quad loop */
++	jmp 6f
++5:	movl %ecx,%eax		/* exception on byte loop */
++	/* eax: left over bytes */
++6:	testl %r8d,%r8d		/* zero flag set? */
++	jz 7f
++	movl %eax,%ecx		/* initialize x86 loop counter */
++	push %rax
++	xorl %eax,%eax
++8:	rep
++	stosb 			/* zero the rest */
++11:	pop %rax
++7:	ret
+ 	CFI_ENDPROC
+ END(copy_user_generic_c)
+ 
+ 	.section __ex_table,"a"
+ 	.quad 1b,3b
+-	.quad 2b,4b
++	.quad 2b,5b
++	.quad 8b,11b
++	.quad 10b,3b
+ 	.previous
+diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
+index 1e1fa00..bc68120 100644
+--- a/include/asm-x86_64/uaccess.h
++++ b/include/asm-x86_64/uaccess.h
+@@ -238,6 +238,7 @@ do {									\
+ 
+ /* Handles exceptions in both to and from, but doesn't do access_ok */
+ extern unsigned long copy_user_generic(void *to, const void *from, unsigned len); 
++extern unsigned long copy_user_generic_dontzero(void *to, const void *from, unsigned len);
+ 
+ extern unsigned long copy_to_user(void __user *to, const void *from, unsigned len); 
+ extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); 
+@@ -303,7 +304,6 @@ static __always_inline int __copy_to_user(void __user *dst, const void *src, uns
+ 	}
+ }	
+ 
+-
+ static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ { 
+        int ret = 0;
+@@ -352,7 +352,7 @@ long strlen_user(const char __user *str);
+ unsigned long clear_user(void __user *mem, unsigned long len);
+ unsigned long __clear_user(void __user *mem, unsigned long len);
+ 
+-#define __copy_to_user_inatomic __copy_to_user
+-#define __copy_from_user_inatomic __copy_from_user
++extern long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
++#define __copy_to_user_inatomic copy_user_generic
+ 
+ #endif /* __X86_64_UACCESS_H */

Added: dists/etch/linux-2.6/debian/patches/bugfix/check-privileges-before-setting-mount-propagation.patch
==============================================================================
--- (empty file)
+++ dists/etch/linux-2.6/debian/patches/bugfix/check-privileges-before-setting-mount-propagation.patch	Mon Oct 13 05:40:53 2008
@@ -0,0 +1,28 @@
+commit ee6f958291e2a768fd727e7a67badfff0b67711a
+Author: Miklos Szeredi <mszeredi at suse.cz>
+Date:   Tue May 8 00:30:40 2007 -0700
+
+    check privileges before setting mount propagation
+    
+    There's a missing check for CAP_SYS_ADMIN in do_change_type().
+    
+    Signed-off-by: Miklos Szeredi <mszeredi at suse.cz>
+    Cc: Al Viro <viro at zeniv.linux.org.uk>
+    Cc: Christoph Hellwig <hch at lst.de>
+    Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+    Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 72bb106..b696e3a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -886,6 +886,9 @@ static int do_change_type(struct nameidata *nd, int flag)
+ 	int recurse = flag & MS_REC;
+ 	int type = flag & ~MS_REC;
+ 
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
+ 	if (nd->dentry != nd->mnt->mnt_root)
+ 		return -EINVAL;
+ 

Added: dists/etch/linux-2.6/debian/patches/bugfix/esp-iv-in-linear-part-of-skb.patch
==============================================================================
--- (empty file)
+++ dists/etch/linux-2.6/debian/patches/bugfix/esp-iv-in-linear-part-of-skb.patch	Mon Oct 13 05:40:53 2008
@@ -0,0 +1,48 @@
+From: Thomas Graf <tgraf at suug.ch>
+Date: Thu, 27 Mar 2008 23:08:03 +0000 (-0700)
+Subject: (CVE-2007-6282) [ESP]: Ensure IV is in linear part of the skb to avoid BUG() due ...
+X-Git-Url: http://kernel.ubuntu.com/git?p=ubuntu-security%2Fubuntu-dapper.git;a=commitdiff_plain;h=3f83e11fbd494f5e40db1a7bbd2b780118b712a1
+
+(CVE-2007-6282) [ESP]: Ensure IV is in linear part of the skb to avoid BUG() due to OOB access
+
+[linux-2.6: 920fc941a9617f95ccb283037fe6f8a38d95bb69]
+
+ESP does not account for the IV size when calling pskb_may_pull() to
+ensure everything it accesses directly is within the linear part of a
+potential fragment. This results in a BUG() being triggered when the
+both the IPv4 and IPv6 ESP stack is fed with an skb where the first
+fragment ends between the end of the esp header and the end of the IV.
+
+This bug was found by Dirk Nehring <dnehring at gmx.net> .
+
+Signed-off-by: Thomas Graf <tgraf at suug.ch>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Kees Cook <kees.cook at canonical.com>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.18.orig/net/ipv4/esp4.c linux-source-2.6.18/net/ipv4/esp4.c
+--- linux-source-2.6.18.orig/net/ipv4/esp4.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/net/ipv4/esp4.c	2008-06-24 22:47:45.000000000 -0600
+@@ -147,7 +147,7 @@ static int esp_input(struct xfrm_state *
+ 	struct scatterlist *sg;
+ 	int padlen;
+ 
+-	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
++	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen))
+ 		goto out;
+ 
+ 	if (elen <= 0 || (elen & (blksize-1)))
+diff -urpN linux-source-2.6.18.orig/net/ipv6/esp6.c linux-source-2.6.18/net/ipv6/esp6.c
+--- linux-source-2.6.18.orig/net/ipv6/esp6.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/net/ipv6/esp6.c	2008-06-24 22:47:45.000000000 -0600
+@@ -143,7 +143,7 @@ static int esp6_input(struct xfrm_state 
+ 	int nfrags;
+ 	int ret = 0;
+ 
+-	if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) {
++	if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}

Added: dists/etch/linux-2.6/debian/patches/bugfix/sctp-make-sure-n-sizeof-does-not-overflow.patch
==============================================================================
--- (empty file)
+++ dists/etch/linux-2.6/debian/patches/bugfix/sctp-make-sure-n-sizeof-does-not-overflow.patch	Mon Oct 13 05:40:53 2008
@@ -0,0 +1,29 @@
+commit 735ce972fbc8a65fb17788debd7bbe7b4383cc62
+Author: David S. Miller <davem at davemloft.net>
+Date:   Fri Jun 20 22:04:34 2008 -0700
+
+    sctp: Make sure N * sizeof(union sctp_addr) does not overflow.
+    
+    As noticed by Gabriel Campana, the kmalloc() length arg
+    passed in by sctp_getsockopt_local_addrs_old() can overflow
+    if ->addr_num is large enough.
+    
+    Therefore, enforce an appropriate limit.
+    
+    Signed-off-by: David S. Miller <davem at davemloft.net>
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index e7e3baf..0dbcde6 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4401,7 +4401,9 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
+ 	if (copy_from_user(&getaddrs, optval, len))
+ 		return -EFAULT;
+ 
+-	if (getaddrs.addr_num <= 0) return -EINVAL;
++	if (getaddrs.addr_num <= 0 ||
++	    getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr)))
++		return -EINVAL;
+ 	/*
+ 	 *  For UDP-style sockets, id specifies the association to query.
+ 	 *  If the id field is set to the value '0' then the locally bound

Added: dists/etch/linux-2.6/debian/patches/bugfix/tty-fix-for-tty-operations-bugs.patch
==============================================================================
--- (empty file)
+++ dists/etch/linux-2.6/debian/patches/bugfix/tty-fix-for-tty-operations-bugs.patch	Mon Oct 13 05:40:53 2008
@@ -0,0 +1,183 @@
+From alan at lxorguk.ukuu.org.uk Fri Jun 27 07:39:26 2008
+From: Alan Cox <alan at lxorguk.ukuu.org.uk>
+Date: Fri, 27 Jun 2008 15:21:55 +0100
+Subject: TTY: fix for tty operations bugs
+To: greg at kroah.com
+Message-ID: <20080627152155.50f0ebae at lxorguk.ukuu.org.uk>
+
+From: Alan Cox <alan at lxorguk.ukuu.org.uk>
+
+This is fixed with the recent tty operations rewrite in mainline in a
+different way, this is a selective backport of the relevant portions to
+the -stable tree.
+
+Signed-off-by: Alan Cox <alan at redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.18.orig/drivers/net/hamradio/6pack.c linux-source-2.6.18/drivers/net/hamradio/6pack.c
+--- linux-source-2.6.18.orig/drivers/net/hamradio/6pack.c	2008-06-16 16:25:20.000000000 -0600
++++ linux-source-2.6.18/drivers/net/hamradio/6pack.c	2008-07-02 02:45:08.000000000 -0600
+@@ -601,6 +601,8 @@ static int sixpack_open(struct tty_struc
+ 
+ 	if (!capable(CAP_NET_ADMIN))
+ 		return -EPERM;
++	if (!tty->driver->write)
++		return -EOPNOTSUPP;
+ 
+ 	dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup);
+ 	if (!dev) {
+diff -urpN linux-source-2.6.18.orig/drivers/net/hamradio/mkiss.c linux-source-2.6.18/drivers/net/hamradio/mkiss.c
+--- linux-source-2.6.18.orig/drivers/net/hamradio/mkiss.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/hamradio/mkiss.c	2008-07-02 02:45:08.000000000 -0600
+@@ -530,6 +530,7 @@ static void ax_encaps(struct net_device 
+ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct mkiss *ax = netdev_priv(dev);
++	int cib = 0;
+ 
+ 	if (!netif_running(dev))  {
+ 		printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
+@@ -545,10 +546,11 @@ static int ax_xmit(struct sk_buff *skb, 
+ 			/* 20 sec timeout not reached */
+ 			return 1;
+ 		}
++		if (ax->tty->driver->chars_in_buffer)
++			cib = ax->tty->driver->chars_in_buffer(ax->tty);
+ 
+ 		printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
+-		       (ax->tty->driver->chars_in_buffer(ax->tty) || ax->xleft) ?
+-		       "bad line quality" : "driver error");
++		     cib || ax->xleft ? "bad line quality" : "driver error");
+ 
+ 		ax->xleft = 0;
+ 		clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
+@@ -736,6 +738,8 @@ static int mkiss_open(struct tty_struct 
+ 
+ 	if (!capable(CAP_NET_ADMIN))
+ 		return -EPERM;
++	if (!tty->driver->write)
++		return -EOPNOTSUPP;
+ 
+ 	dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup);
+ 	if (!dev) {
+diff -urpN linux-source-2.6.18.orig/drivers/net/irda/irtty-sir.c linux-source-2.6.18/drivers/net/irda/irtty-sir.c
+--- linux-source-2.6.18.orig/drivers/net/irda/irtty-sir.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/irda/irtty-sir.c	2008-07-02 02:45:08.000000000 -0600
+@@ -64,7 +64,9 @@ static int irtty_chars_in_buffer(struct 
+ 	IRDA_ASSERT(priv != NULL, return -1;);
+ 	IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+ 
+-	return priv->tty->driver->chars_in_buffer(priv->tty);
++	if (priv->tty->driver->chars_in_buffer)
++		return priv->tty->driver->chars_in_buffer(priv->tty);
++	return 0;
+ }
+ 
+ /* Wait (sleep) until underlaying hardware finished transmission
+diff -urpN linux-source-2.6.18.orig/drivers/net/ppp_async.c linux-source-2.6.18/drivers/net/ppp_async.c
+--- linux-source-2.6.18.orig/drivers/net/ppp_async.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/ppp_async.c	2008-07-02 02:45:08.000000000 -0600
+@@ -158,6 +158,9 @@ ppp_asynctty_open(struct tty_struct *tty
+ 	struct asyncppp *ap;
+ 	int err;
+ 
++	if (!tty->driver->write)
++		return -EOPNOTSUPP;
++
+ 	err = -ENOMEM;
+ 	ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ 	if (ap == 0)
+diff -urpN linux-source-2.6.18.orig/drivers/net/ppp_synctty.c linux-source-2.6.18/drivers/net/ppp_synctty.c
+--- linux-source-2.6.18.orig/drivers/net/ppp_synctty.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/ppp_synctty.c	2008-07-02 02:49:36.000000000 -0600
+@@ -207,6 +207,9 @@ ppp_sync_open(struct tty_struct *tty)
+ 	struct syncppp *ap;
+ 	int err;
+ 
++	if (!tty->driver->write)
++		return -EOPNOTSUPP;
++
+ 	ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ 	err = -ENOMEM;
+ 	if (ap == 0)
+diff -urpN linux-source-2.6.18.orig/drivers/net/slip.c linux-source-2.6.18/drivers/net/slip.c
+--- linux-source-2.6.18.orig/drivers/net/slip.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/slip.c	2008-07-02 02:48:57.000000000 -0600
+@@ -463,9 +463,14 @@ static void sl_tx_timeout(struct net_dev
+ 			/* 20 sec timeout not reached */
+ 			goto out;
+ 		}
+-		printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+-		       (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
+-		       "bad line quality" : "driver error");
++		{
++			int cib = 0;
++			if (sl->tty->driver->chars_in_buffer)
++				cib = sl->tty->driver->chars_in_buffer(sl->tty);
++			printk(KERN_WARNING "%s: transmit timed out, %s?\n",
++				dev->name, (cib || sl->xleft) ?
++				       "bad line quality" : "driver error");
++		}
+ 		sl->xleft = 0;
+ 		sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ 		sl_unlock(sl);
+@@ -836,6 +841,8 @@ static int slip_open(struct tty_struct *
+ 
+ 	if(!capable(CAP_NET_ADMIN))
+ 		return -EPERM;
++	if (!tty->driver->write)
++		return -EOPNOTSUPP;
+ 		
+ 	/* RTnetlink lock is misused here to serialize concurrent
+ 	   opens of slip channels. There are better ways, but it is
+diff -urpN linux-source-2.6.18.orig/drivers/net/wan/x25_asy.c linux-source-2.6.18/drivers/net/wan/x25_asy.c
+--- linux-source-2.6.18.orig/drivers/net/wan/x25_asy.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/wan/x25_asy.c	2008-07-02 02:45:08.000000000 -0600
+@@ -283,6 +283,10 @@ static void x25_asy_write_wakeup(struct 
+ static void x25_asy_timeout(struct net_device *dev)
+ {
+ 	struct x25_asy *sl = (struct x25_asy*)(dev->priv);
++	int cib = 0;
++
++	if (sl->tty->driver->chars_in_buffer)
++		cib = sl->tty->driver->chars_in_buffer(sl->tty);
+ 
+ 	spin_lock(&sl->lock);
+ 	if (netif_queue_stopped(dev)) {
+@@ -290,8 +294,7 @@ static void x25_asy_timeout(struct net_d
+ 		 *      14 Oct 1994 Dmitry Gorodchanin.
+ 		 */
+ 		printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+-		       (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
+-		       "bad line quality" : "driver error");
++		       (cib || sl->xleft) ? "bad line quality" : "driver error");
+ 		sl->xleft = 0;
+ 		sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ 		x25_asy_unlock(sl);
+@@ -561,6 +564,9 @@ static int x25_asy_open_tty(struct tty_s
+ 		return -EEXIST;
+ 	}
+ 
++	if (!tty->driver->write)
++		return -EOPNOTSUPP;
++
+ 	/* OK.  Find a free X.25 channel to use. */
+ 	if ((sl = x25_asy_alloc()) == NULL) {
+ 		return -ENFILE;
+diff -urpN linux-source-2.6.18.orig/drivers/net/wireless/strip.c linux-source-2.6.18/drivers/net/wireless/strip.c
+--- linux-source-2.6.18.orig/drivers/net/wireless/strip.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/wireless/strip.c	2008-07-02 02:45:08.000000000 -0600
+@@ -801,7 +801,8 @@ static void set_baud(struct tty_struct *
+ 	struct termios old_termios = *(tty->termios);
+ 	tty->termios->c_cflag &= ~CBAUD;	/* Clear the old baud setting */
+ 	tty->termios->c_cflag |= baudcode;	/* Set the new baud setting */
+-	tty->driver->set_termios(tty, &old_termios);
++	if (tty->driver->set_termios)
++		tty->driver->set_termios(tty, &old_termios);
+ }
+ 
+ /*

Added: dists/etch/linux-2.6/debian/patches/bugfix/x86-add-copy_user_handle_tail.patch
==============================================================================
--- (empty file)
+++ dists/etch/linux-2.6/debian/patches/bugfix/x86-add-copy_user_handle_tail.patch	Mon Oct 13 05:40:53 2008
@@ -0,0 +1,56 @@
+commit 1129585a08baf58582c0da91e572cb29e3179acf
+Author: Vitaly Mayatskikh <v.mayatskih at gmail.com>
+Date:   Wed Jul 2 15:48:21 2008 +0200
+
+    x86: introduce copy_user_handle_tail() routine
+    
+    Introduce generic C routine for handling necessary tail operations after
+    protection fault in copy_*_user on x86.
+    
+    Signed-off-by: Vitaly Mayatskikh <v.mayatskih at gmail.com>
+    Acked-by: Linus Torvalds <torvalds at linux-foundation.org>
+    Signed-off-by: Ingo Molnar <mingo at elte.hu>
+
+Backported to Debian's 2.6.18 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.18.orig/arch/x86_64/lib/usercopy.c linux-source-2.6.18/arch/x86_64/lib/usercopy.c
+--- linux-source-2.6.18.orig/arch/x86_64/lib/usercopy.c	2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/arch/x86_64/lib/usercopy.c	2008-07-16 02:39:08.000000000 -0600
+@@ -164,3 +164,26 @@ unsigned long copy_in_user(void __user *
+ }
+ EXPORT_SYMBOL(copy_in_user);
+ 
++/*
++ * Try to copy last bytes and clear the rest if needed.
++ * Since protection fault in copy_from/to_user is not a normal situation,
++ * it is not necessary to optimize tail handling.
++ */
++unsigned long
++copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
++{
++	char c;
++	unsigned zero_len;
++
++	for (; len; --len) {
++		if (__get_user_nocheck(c, from++, sizeof(char)))
++			break;
++		if (__put_user_nocheck(c, to++, sizeof(char)))
++			break;
++	}
++
++	for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
++		if (__put_user_nocheck(c, to++, sizeof(char)))
++			break;
++	return len;
++}
+diff -urpN linux-source-2.6.18.orig/include/asm-x86_64/uaccess.h linux-source-2.6.18/include/asm-x86_64/uaccess.h
+--- linux-source-2.6.18.orig/include/asm-x86_64/uaccess.h	2008-07-16 00:01:24.000000000 -0600
++++ linux-source-2.6.18/include/asm-x86_64/uaccess.h	2008-07-16 02:39:15.000000000 -0600
+@@ -355,4 +355,7 @@ unsigned long __clear_user(void __user *
+ extern long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
+ #define __copy_to_user_inatomic copy_user_generic
+ 
++unsigned long
++copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
++
+ #endif /* __X86_64_UACCESS_H */

Added: dists/etch/linux-2.6/debian/patches/bugfix/x86-fix-copy_user.patch
==============================================================================
--- (empty file)
+++ dists/etch/linux-2.6/debian/patches/bugfix/x86-fix-copy_user.patch	Mon Oct 13 05:40:53 2008
@@ -0,0 +1,537 @@
+commit ad2fc2cd925300b8127cf682f5a1c7511ae9dd27
+Author: Vitaly Mayatskikh <v.mayatskih at gmail.com>
+Date:   Wed Jul 2 15:53:13 2008 +0200
+
+    x86: fix copy_user on x86
+    
+    Switch copy_user_generic_string(), copy_user_generic_unrolled() and
+    __copy_user_nocache() from custom tail handlers to generic
+    copy_user_tail_handle().
+    
+    Signed-off-by: Vitaly Mayatskikh <v.mayatskih at gmail.com>
+    Acked-by: Linus Torvalds <torvalds at linux-foundation.org>
+    Signed-off-by: Ingo Molnar <mingo at elte.hu>
+
+Backported to Debian's 2.6.18 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.18.orig/arch/x86_64/lib/copy_user.S linux-source-2.6.18/arch/x86_64/lib/copy_user.S
+--- linux-source-2.6.18.orig/arch/x86_64/lib/copy_user.S	2008-07-15 23:01:24.000000000 -0700
++++ linux-source-2.6.18/arch/x86_64/lib/copy_user.S	2008-07-15 23:33:23.000000000 -0700
+@@ -1,8 +1,10 @@
+-/* Copyright 2002 Andi Kleen, SuSE Labs.
++/*
++ * Copyright 2008 Vitaly Mayatskikh <vmayatsk at redhat.com>
++ * Copyright 2002 Andi Kleen, SuSE Labs.
+  * Subject to the GNU Public License v2.
+- * 
+- * Functions to copy from and to user space.		
+- */		 
++ *
++ * Functions to copy from and to user space.
++ */
+ 
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+@@ -20,60 +22,88 @@
+ 	.long \orig-1f	/* by default jump to orig */
+ 1:
+ 	.section .altinstr_replacement,"ax"
+-2:	.byte 0xe9	             /* near jump with 32bit immediate */
++2:	.byte 0xe9			/* near jump with 32bit immediate */
+ 	.long \alt-1b /* offset */   /* or alternatively to alt */
+ 	.previous
+ 	.section .altinstructions,"a"
+ 	.align 8
+ 	.quad  0b
+ 	.quad  2b
+-	.byte  \feature		     /* when feature is set */
++	.byte  \feature			/* when feature is set */
+ 	.byte  5
+ 	.byte  5
+ 	.previous
+ 	.endm
+ 
+-/* Standard copy_to_user with segment limit checking */		
++	.macro ALIGN_DESTINATION
++#ifdef FIX_ALIGNMENT
++	/* check for bad alignment of destination */
++	movl %edi,%ecx
++	andl $7,%ecx
++	jz 102f				/* already aligned */
++	subl $8,%ecx
++	negl %ecx
++	subl %ecx,%edx
++100:	movb (%rsi),%al
++101:	movb %al,(%rdi)
++	incq %rsi
++	incq %rdi
++	decl %ecx
++	jnz 100b
++102:
++	.section .fixup,"ax"
++103:	addl %r8d,%edx			/* ecx is zerorest also */
++	jmp copy_user_handle_tail
++	.previous
++
++	.section __ex_table,"a"
++	.align 8
++	.quad 100b,103b
++	.quad 101b,103b
++	.previous
++#endif
++	.endm
++
++/* Standard copy_to_user with segment limit checking */
+ ENTRY(copy_to_user)
+ 	CFI_STARTPROC
+ 	GET_THREAD_INFO(%rax)
+ 	movq %rdi,%rcx
+ 	addq %rdx,%rcx
+-	jc  bad_to_user
+-	cmpq threadinfo_addr_limit(%rax),%rcx
++	jc bad_to_user
++	cmpq threadinfo_addr_limit(%rax),%rcx
+ 	jae bad_to_user
+-	xorl %eax,%eax	/* clear zero flag */
+ 	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ 	CFI_ENDPROC
+ 
+-ENTRY(copy_user_generic)
++/* Standard copy_from_user with segment limit checking */
++ENTRY(copy_from_user)
+ 	CFI_STARTPROC
+-	movl $1,%ecx	/* set zero flag */
++	GET_THREAD_INFO(%rax)
++	movq %rsi,%rcx
++	addq %rdx,%rcx
++	jc bad_from_user
++	cmpq threadinfo_addr_limit(%rax),%rcx
++	jae bad_from_user
+ 	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ 	CFI_ENDPROC
++ENDPROC(copy_from_user)
+ 
+-ENTRY(__copy_from_user_inatomic)
++ENTRY(copy_user_generic)
+ 	CFI_STARTPROC
+-	xorl %ecx,%ecx	/* clear zero flag */
+ 	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ 	CFI_ENDPROC
++ENDPROC(copy_user_generic)
+ 
+-/* Standard copy_from_user with segment limit checking */	
+-ENTRY(copy_from_user)
++ENTRY(__copy_from_user_inatomic)
+ 	CFI_STARTPROC
+-	GET_THREAD_INFO(%rax)
+-	movq %rsi,%rcx
+-	addq %rdx,%rcx
+-	jc  bad_from_user
+-	cmpq threadinfo_addr_limit(%rax),%rcx
+-	jae  bad_from_user
+-	movl $1,%ecx	/* set zero flag */
+ 	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ 	CFI_ENDPROC
+-ENDPROC(copy_from_user)
+-	
++ENDPROC(__copy_from_user_inatomic)
++
+ 	.section .fixup,"ax"
+ 	/* must zero dest */
++ENTRY(bad_from_user)
+ bad_from_user:
+ 	CFI_STARTPROC
+ 	movl %edx,%ecx
+@@ -81,274 +111,158 @@ bad_from_user:
+ 	rep
+ 	stosb
+ bad_to_user:
+-	movl	%edx,%eax
++	movl %edx,%eax
+ 	ret
+ 	CFI_ENDPROC
+-END(bad_from_user)
++ENDPROC(bad_from_user)
+ 	.previous
+-	
+-		
++
+ /*
+  * copy_user_generic_unrolled - memory copy with exception handling.
+- * This version is for CPUs like P4 that don't have efficient micro code for rep movsq
+- * 	
+- * Input:	
++ * This version is for CPUs like P4 that don't have efficient micro
++ * code for rep movsq
++ *
++ * Input:
+  * rdi destination
+  * rsi source
+  * rdx count
+- * ecx zero flag -- if true zero destination on error
+  *
+- * Output:		
+- * eax uncopied bytes or 0 if successful.
++ * Output:
++ * eax uncopied bytes or 0 if successfull.
+  */
+ ENTRY(copy_user_generic_unrolled)
+ 	CFI_STARTPROC
+-	pushq %rbx
+-	CFI_ADJUST_CFA_OFFSET 8
+-	CFI_REL_OFFSET rbx, 0
+-	pushq %rcx
+-	CFI_ADJUST_CFA_OFFSET 8
+-	CFI_REL_OFFSET rcx, 0
+-	xorl %eax,%eax		/*zero for the exception handler */
+-
+-#ifdef FIX_ALIGNMENT
+-	/* check for bad alignment of destination */
+-	movl %edi,%ecx
+-	andl $7,%ecx
+-	jnz  .Lbad_alignment
+-.Lafter_bad_alignment:
+-#endif
+-
+-	movq %rdx,%rcx
+-
+-	movl $64,%ebx
+-	shrq $6,%rdx
+-	decq %rdx
+-	js   .Lhandle_tail
+-
+-	.p2align 4
+-.Lloop:
+-.Ls1:	movq (%rsi),%r11
+-.Ls2:	movq 1*8(%rsi),%r8
+-.Ls3:	movq 2*8(%rsi),%r9
+-.Ls4:	movq 3*8(%rsi),%r10
+-.Ld1:	movq %r11,(%rdi)
+-.Ld2:	movq %r8,1*8(%rdi)
+-.Ld3:	movq %r9,2*8(%rdi)
+-.Ld4:	movq %r10,3*8(%rdi)
+-
+-.Ls5:	movq 4*8(%rsi),%r11
+-.Ls6:	movq 5*8(%rsi),%r8
+-.Ls7:	movq 6*8(%rsi),%r9
+-.Ls8:	movq 7*8(%rsi),%r10
+-.Ld5:	movq %r11,4*8(%rdi)
+-.Ld6:	movq %r8,5*8(%rdi)
+-.Ld7:	movq %r9,6*8(%rdi)
+-.Ld8:	movq %r10,7*8(%rdi)
+-
+-	decq %rdx
+-
++	cmpl $8,%edx
++	jb 20f		/* less then 8 bytes, go to byte copy loop */
++	ALIGN_DESTINATION
++	movl %edx,%ecx
++	andl $63,%edx
++	shrl $6,%ecx
++	jz 17f
++1:	movq (%rsi),%r8
++2:	movq 1*8(%rsi),%r9
++3:	movq 2*8(%rsi),%r10
++4:	movq 3*8(%rsi),%r11
++5:	movq %r8,(%rdi)
++6:	movq %r9,1*8(%rdi)
++7:	movq %r10,2*8(%rdi)
++8:	movq %r11,3*8(%rdi)
++9:	movq 4*8(%rsi),%r8
++10:	movq 5*8(%rsi),%r9
++11:	movq 6*8(%rsi),%r10
++12:	movq 7*8(%rsi),%r11
++13:	movq %r8,4*8(%rdi)
++14:	movq %r9,5*8(%rdi)
++15:	movq %r10,6*8(%rdi)
++16:	movq %r11,7*8(%rdi)
+ 	leaq 64(%rsi),%rsi
+ 	leaq 64(%rdi),%rdi
+-
+-	jns  .Lloop
+-
+-	.p2align 4
+-.Lhandle_tail:
+-	movl %ecx,%edx
+-	andl $63,%ecx
+-	shrl $3,%ecx
+-	jz   .Lhandle_7
+-	movl $8,%ebx
+-	.p2align 4
+-.Lloop_8:
+-.Ls9:	movq (%rsi),%r8
+-.Ld9:	movq %r8,(%rdi)
+ 	decl %ecx
+-	leaq 8(%rdi),%rdi
++	jnz 1b
++17:	movl %edx,%ecx
++	andl $7,%edx
++	shrl $3,%ecx
++	jz 20f
++18:	movq (%rsi),%r8
++19:	movq %r8,(%rdi)
+ 	leaq 8(%rsi),%rsi
+-	jnz .Lloop_8
+-
+-.Lhandle_7:
++	leaq 8(%rdi),%rdi
++	decl %ecx
++	jnz 18b
++20:	andl %edx,%edx
++	jz 23f
+ 	movl %edx,%ecx
+-	andl $7,%ecx
+-	jz   .Lende
+-	.p2align 4
+-.Lloop_1:
+-.Ls10:	movb (%rsi),%bl
+-.Ld10:	movb %bl,(%rdi)
+-	incq %rdi
++21:	movb (%rsi),%al
++22:	movb %al,(%rdi)
+ 	incq %rsi
++	incq %rdi
+ 	decl %ecx
+-	jnz .Lloop_1
+-
+-	CFI_REMEMBER_STATE
+-.Lende:
+-	popq %rcx
+-	CFI_ADJUST_CFA_OFFSET -8
+-	CFI_RESTORE rcx
+-	popq %rbx
+-	CFI_ADJUST_CFA_OFFSET -8
+-	CFI_RESTORE rbx
++	jnz 21b
++23:	xor %eax,%eax
+ 	ret
+-	CFI_RESTORE_STATE
+ 
+-#ifdef FIX_ALIGNMENT
+-	/* align destination */
+-	.p2align 4
+-.Lbad_alignment:
+-	movl $8,%r9d
+-	subl %ecx,%r9d
+-	movl %r9d,%ecx
+-	cmpq %r9,%rdx
+-	jz   .Lhandle_7
+-	js   .Lhandle_7
+-.Lalign_1:
+-.Ls11:	movb (%rsi),%bl
+-.Ld11:	movb %bl,(%rdi)
+-	incq %rsi
+-	incq %rdi
+-	decl %ecx
+-	jnz .Lalign_1
+-	subq %r9,%rdx
+-	jmp .Lafter_bad_alignment
+-#endif
++	.section .fixup,"ax"
++30:	shll $6,%ecx
++	addl %ecx,%edx
++	jmp 60f
++40:	lea (%rdx,%rcx,8),%rdx
++	jmp 60f
++50:	movl %ecx,%edx
++60:	jmp copy_user_handle_tail /* ecx is zerorest also */
++	.previous
+ 
+-	/* table sorted by exception address */
+ 	.section __ex_table,"a"
+ 	.align 8
+-	.quad .Ls1,.Ls1e
+-	.quad .Ls2,.Ls2e
+-	.quad .Ls3,.Ls3e
+-	.quad .Ls4,.Ls4e
+-	.quad .Ld1,.Ls1e
+-	.quad .Ld2,.Ls2e
+-	.quad .Ld3,.Ls3e
+-	.quad .Ld4,.Ls4e
+-	.quad .Ls5,.Ls5e
+-	.quad .Ls6,.Ls6e
+-	.quad .Ls7,.Ls7e
+-	.quad .Ls8,.Ls8e
+-	.quad .Ld5,.Ls5e
+-	.quad .Ld6,.Ls6e
+-	.quad .Ld7,.Ls7e
+-	.quad .Ld8,.Ls8e
+-	.quad .Ls9,.Le_quad
+-	.quad .Ld9,.Le_quad
+-	.quad .Ls10,.Le_byte
+-	.quad .Ld10,.Le_byte
+-#ifdef FIX_ALIGNMENT
+-	.quad .Ls11,.Lzero_rest
+-	.quad .Ld11,.Lzero_rest
+-#endif
+-	.quad .Le5,.Le_zero
++	.quad 1b,30b
++	.quad 2b,30b
++	.quad 3b,30b
++	.quad 4b,30b
++	.quad 5b,30b
++	.quad 6b,30b
++	.quad 7b,30b
++	.quad 8b,30b
++	.quad 9b,30b
++	.quad 10b,30b
++	.quad 11b,30b
++	.quad 12b,30b
++	.quad 13b,30b
++	.quad 14b,30b
++	.quad 15b,30b
++	.quad 16b,30b
++	.quad 18b,40b
++	.quad 19b,40b
++	.quad 21b,50b
++	.quad 22b,50b
+ 	.previous
+-
+-	/* compute 64-offset for main loop. 8 bytes accuracy with error on the
+-	   pessimistic side. this is gross. it would be better to fix the
+-	   interface. */
+-	/* eax: zero, ebx: 64 */
+-.Ls1e: 	addl $8,%eax
+-.Ls2e: 	addl $8,%eax
+-.Ls3e: 	addl $8,%eax
+-.Ls4e: 	addl $8,%eax
+-.Ls5e: 	addl $8,%eax
+-.Ls6e: 	addl $8,%eax
+-.Ls7e: 	addl $8,%eax
+-.Ls8e: 	addl $8,%eax
+-	addq %rbx,%rdi	/* +64 */
+-	subq %rax,%rdi  /* correct destination with computed offset */
+-
+-	shlq $6,%rdx	/* loop counter * 64 (stride length) */
+-	addq %rax,%rdx	/* add offset to loopcnt */
+-	andl $63,%ecx	/* remaining bytes */
+-	addq %rcx,%rdx	/* add them */
+-	jmp .Lzero_rest
+-
+-	/* exception on quad word loop in tail handling */
+-	/* ecx:	loopcnt/8, %edx: length, rdi: correct */
+-.Le_quad:
+-	shll $3,%ecx
+-	andl $7,%edx
+-	addl %ecx,%edx
+-	/* edx: bytes to zero, rdi: dest, eax:zero */
+-.Lzero_rest:
+-	cmpl $0,(%rsp)
+-	jz   .Le_zero
+-	movq %rdx,%rcx
+-.Le_byte:
+-	xorl %eax,%eax
+-.Le5:	rep
+-	stosb
+-	/* when there is another exception while zeroing the rest just return */
+-.Le_zero:
+-	movq %rdx,%rax
+-	jmp .Lende
+ 	CFI_ENDPROC
+-ENDPROC(copy_user_generic)
++ENDPROC(copy_user_generic_unrolled)
+ 
+-
+-	/* Some CPUs run faster using the string copy instructions.
+-	   This is also a lot simpler. Use them when possible.
+-	   Patch in jmps to this code instead of copying it fully
+-	   to avoid unwanted aliasing in the exception tables. */
+-
+- /* rdi	destination
+-  * rsi source
+-  * rdx count
+-  * ecx zero flag
+-  *
+-  * Output:
+-  * eax uncopied bytes or 0 if successfull.
+-  *
+-  * Only 4GB of copy is supported. This shouldn't be a problem
+-  * because the kernel normally only writes from/to page sized chunks
+-  * even if user space passed a longer buffer.
+-  * And more would be dangerous because both Intel and AMD have
+-  * errata with rep movsq > 4GB. If someone feels the need to fix
+-  * this please consider this.
+-  */
++/* Some CPUs run faster using the string copy instructions.
++ * This is also a lot simpler. Use them when possible.
++ *
++ * Only 4GB of copy is supported. This shouldn't be a problem
++ * because the kernel normally only writes from/to page sized chunks
++ * even if user space passed a longer buffer.
++ * And more would be dangerous because both Intel and AMD have
++ * errata with rep movsq > 4GB. If someone feels the need to fix
++ * this please consider this.
++ *
++ * Input:
++ * rdi destination
++ * rsi source
++ * rdx count
++ *
++ * Output:
++ * eax uncopied bytes or 0 if successful.
++ */
+ ENTRY(copy_user_generic_string)
+ 	CFI_STARTPROC
+-	movl %ecx,%r8d		/* save zero flag */
++	andl %edx,%edx
++	jz 4f
++	cmpl $8,%edx
++	jb 2f		/* less than 8 bytes, go to byte copy loop */
++	ALIGN_DESTINATION
+ 	movl %edx,%ecx
+ 	shrl $3,%ecx
+-	andl $7,%edx	
+-	jz   10f
+-1:	rep 
+-	movsq 
+-	movl %edx,%ecx
+-2:	rep
+-	movsb
+-9:	movl %ecx,%eax
+-	ret
+-
+-	/* multiple of 8 byte */
+-10:	rep
++	andl $7,%edx
++1:	rep
+ 	movsq
+-	xor %eax,%eax
++2:	movl %edx,%ecx
++3:	rep
++	movsb
++4:	xorl %eax,%eax
+ 	ret
+ 
+-	/* exception handling */
+-3:      lea (%rdx,%rcx,8),%rax	/* exception on quad loop */
+-	jmp 6f
+-5:	movl %ecx,%eax		/* exception on byte loop */
+-	/* eax: left over bytes */
+-6:	testl %r8d,%r8d		/* zero flag set? */
+-	jz 7f
+-	movl %eax,%ecx		/* initialize x86 loop counter */
+-	push %rax
+-	xorl %eax,%eax
+-8:	rep
+-	stosb 			/* zero the rest */
+-11:	pop %rax
+-7:	ret
+-	CFI_ENDPROC
+-END(copy_user_generic_c)
++	.section .fixup,"ax"
++11:	lea (%rdx,%rcx,8),%rcx
++12:	movl %ecx,%edx		/* ecx is zerorest also */
++	jmp copy_user_handle_tail
++	.previous
+ 
+ 	.section __ex_table,"a"
+-	.quad 1b,3b
+-	.quad 2b,5b
+-	.quad 8b,11b
+-	.quad 10b,3b
++	.align 8
++	.quad 1b,11b
++	.quad 3b,12b
+ 	.previous
++	CFI_ENDPROC
++ENDPROC(copy_user_generic_string)

Added: dists/etch/linux-2.6/debian/patches/series/22etch1
==============================================================================
--- (empty file)
+++ dists/etch/linux-2.6/debian/patches/series/22etch1	Mon Oct 13 05:40:53 2008
@@ -0,0 +1,8 @@
++ bugfix/sctp-make-sure-n-sizeof-does-not-overflow.patch
++ bugfix/esp-iv-in-linear-part-of-skb.patch
++ bugfix/amd64-fix-zeroing-on-exception-in-copy_user-pre.patch
++ bugfix/amd64-fix-zeroing-on-exception-in-copy_user.patch
++ bugfix/tty-fix-for-tty-operations-bugs.patch
++ bugfix/check-privileges-before-setting-mount-propagation.patch
++ bugfix/x86-add-copy_user_handle_tail.patch
++ bugfix/x86-fix-copy_user.patch



More information about the Kernel-svn-changes mailing list