[kernel] r19813 - in dists/squeeze/linux-2.6: . debian debian/patches/bugfix/all debian/patches/bugfix/x86 debian/patches/debian debian/patches/features/all/openvz debian/patches/features/all/xen debian/patches/series
Ben Hutchings
benh at alioth.debian.org
Thu Feb 14 14:03:33 UTC 2013
Author: benh
Date: Thu Feb 14 14:03:33 2013
New Revision: 19813
Log:
Merge changes from squeeze-security up to 2.6.32-46squeeze1
Added:
dists/squeeze/linux-2.6/debian/patches/bugfix/all/exec-do-not-leave-bprm-interp-on-stack.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/exec-do-not-leave-bprm-interp-on-stack.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/ext4-Fix-max-file-size-and-logical-block-counting-of-extent-format-file.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ext4-Fix-max-file-size-and-logical-block-counting-of-extent-format-file.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/ipv6-discard-overlapping-fragment.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ipv6-discard-overlapping-fragment.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/kmod-introduce-call_modprobe-helper.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/kmod-introduce-call_modprobe-helper.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/kmod-make-__request_module-killable.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/kmod-make-__request_module-killable.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/net-fix-divide-by-zero-in-tcp-algorithm-illinois.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-fix-divide-by-zero-in-tcp-algorithm-illinois.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/usermodehelper-____call_usermodehelper-doesnt-need-do_exit.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/usermodehelper-____call_usermodehelper-doesnt-need-do_exit.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/usermodehelper-implement-UMH_KILLABLE.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/usermodehelper-implement-UMH_KILLABLE.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/all/usermodehelper-introduce-umh_complete.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/usermodehelper-introduce-umh_complete.patch
dists/squeeze/linux-2.6/debian/patches/bugfix/x86/msr-add-capabilities-check.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/x86/msr-add-capabilities-check.patch
dists/squeeze/linux-2.6/debian/patches/debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch
dists/squeeze/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0001-xen-netback-garbage-ring.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0001-xen-netback-garbage-ring.patch
dists/squeeze/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0002-xen-netback-wrap-around.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0002-xen-netback-wrap-around.patch
dists/squeeze/linux-2.6/debian/patches/features/all/xen/xsa42-pvops-0001-x86-xen-don-t-assume-ds-is-usable-in-xen_iret-for-32.patch
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/xsa42-pvops-0001-x86-xen-don-t-assume-ds-is-usable-in-xen_iret-for-32.patch
dists/squeeze/linux-2.6/debian/patches/series/46squeeze1
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/series/46squeeze1
dists/squeeze/linux-2.6/debian/patches/series/46squeeze1-extra
- copied unchanged from r19812, dists/squeeze-security/linux-2.6/debian/patches/series/46squeeze1-extra
Modified:
dists/squeeze/linux-2.6/ (props changed)
dists/squeeze/linux-2.6/debian/changelog
dists/squeeze/linux-2.6/debian/patches/features/all/openvz/openvz.patch
Modified: dists/squeeze/linux-2.6/debian/changelog
==============================================================================
--- dists/squeeze/linux-2.6/debian/changelog Thu Feb 14 09:51:30 2013 (r19812)
+++ dists/squeeze/linux-2.6/debian/changelog Thu Feb 14 14:03:33 2013 (r19813)
@@ -110,6 +110,23 @@
-- Ben Hutchings <ben at decadent.org.uk> Sat, 19 Jan 2013 05:00:07 +0000
+linux-2.6 (2.6.32-46squeeze1) stable-security; urgency=high
+
+ * kmod: make __request_module() killable (CVE-2012-4398)
+ * net: fix divide by zero in tcp algorithm illinois (CVE-2012-4565)
+ * exec: do not leave bprm->interp on stack (CVE-2012-4530)
+ * exec: use -ELOOP for max recursion depth (CVE-2012-4530)
+ * ext4: Fix max file size and logical block counting of extent format file
+ (CVE-2011-2695)
+ * net: sk_add_backlog() take rmem_alloc into account (CVE-2010-4805)
+ * ipv6: discard overlapping fragment (CVE-2012-4444)
+ * x86/msr: Add capabilities check (CVE-2013-0268)
+ * xen: netback: shutdown the ring if it contains garbage (CVE-2013-0216)
+ * xen: netback: correct netbk_tx_err() to handle wrap around (CVE-2013-0217)
+ * xen: don't assume %ds is usable in xen_iret for 32-bit PVOPS (CVE-2013-0228)
+
+ -- dann frazier <dannf at debian.org> Thu, 14 Feb 2013 00:52:26 -0800
+
linux-2.6 (2.6.32-46) stable; urgency=high
[ Bastian Blank ]
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/exec-do-not-leave-bprm-interp-on-stack.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/exec-do-not-leave-bprm-interp-on-stack.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/exec-do-not-leave-bprm-interp-on-stack.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/exec-do-not-leave-bprm-interp-on-stack.patch)
@@ -0,0 +1,113 @@
+commit b66c5984017533316fd1951770302649baf1aa33
+Author: Kees Cook <keescook at chromium.org>
+Date: Thu Dec 20 15:05:16 2012 -0800
+
+ exec: do not leave bprm->interp on stack
+
+ If a series of scripts are executed, each triggering module loading via
+ unprintable bytes in the script header, kernel stack contents can leak
+ into the command line.
+
+ Normally execution of binfmt_script and binfmt_misc happens recursively.
+ However, when modules are enabled, and unprintable bytes exist in the
+ bprm->buf, execution will restart after attempting to load matching
+ binfmt modules. Unfortunately, the logic in binfmt_script and
+ binfmt_misc does not expect to get restarted. They leave bprm->interp
+ pointing to their local stack. This means on restart bprm->interp is
+ left pointing into unused stack memory which can then be copied into the
+ userspace argv areas.
+
+ After additional study, it seems that both recursion and restart remains
+ the desirable way to handle exec with scripts, misc, and modules. As
+ such, we need to protect the changes to interp.
+
+ This changes the logic to require allocation for any changes to the
+ bprm->interp. To avoid adding a new kmalloc to every exec, the default
+ value is left as-is. Only when passing through binfmt_script or
+ binfmt_misc does an allocation take place.
+
+ For a proof of concept, see DoTest.sh from:
+
+ http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/
+
+ Signed-off-by: Kees Cook <keescook at chromium.org>
+ Cc: halfdog <me at halfdog.net>
+ Cc: P J P <ppandit at redhat.com>
+ Cc: Alexander Viro <viro at zeniv.linux.org.uk>
+ Cc: <stable at vger.kernel.org>
+ Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+ [dannf: backported to Debian's 2.6.32]
+
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index 42b60b0..fb93997 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -176,7 +176,10 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ goto _error;
+ bprm->argc ++;
+
+- bprm->interp = iname; /* for binfmt_script */
++ /* Update interp in case binfmt_script needs it. */
++ retval = bprm_change_interp(iname, bprm);
++ if (retval < 0)
++ goto _error;
+
+ interp_file = open_exec (iname);
+ retval = PTR_ERR (interp_file);
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index 0834350..356568c 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -82,7 +82,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
+ retval = copy_strings_kernel(1, &i_name, bprm);
+ if (retval) return retval;
+ bprm->argc++;
+- bprm->interp = interp;
++ retval = bprm_change_interp(interp, bprm);
++ if (retval < 0)
++ return retval;
+
+ /*
+ * OK, now restart the process with the interpreter's dentry.
+diff --git a/fs/exec.c b/fs/exec.c
+index 5cfac92..47bb117 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1109,9 +1109,24 @@ void free_bprm(struct linux_binprm *bprm)
+ mutex_unlock(¤t->cred_guard_mutex);
+ abort_creds(bprm->cred);
+ }
++ /* If a binfmt changed the interp, free it. */
++ if (bprm->interp != bprm->filename)
++ kfree(bprm->interp);
+ kfree(bprm);
+ }
+
++int bprm_change_interp(char *interp, struct linux_binprm *bprm)
++{
++ /* If a binfmt changed the interp, free it first. */
++ if (bprm->interp != bprm->filename)
++ kfree(bprm->interp);
++ bprm->interp = kstrdup(interp, GFP_KERNEL);
++ if (!bprm->interp)
++ return -ENOMEM;
++ return 0;
++}
++EXPORT_SYMBOL(bprm_change_interp);
++
+ /*
+ * install the new credentials for this executable
+ */
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index c64c497..ff17462 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -120,6 +120,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
+ unsigned long stack_top,
+ int executable_stack);
+ extern int bprm_mm_init(struct linux_binprm *bprm);
++extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
+ extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
+ extern int prepare_bprm_creds(struct linux_binprm *bprm);
+ extern void install_exec_creds(struct linux_binprm *bprm);
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch)
@@ -0,0 +1,137 @@
+commit d740269867021faf4ce38a449353d2b986c34a67
+Author: Kees Cook <keescook at chromium.org>
+Date: Mon Dec 17 16:03:20 2012 -0800
+
+ exec: use -ELOOP for max recursion depth
+
+ To avoid an explosion of request_module calls on a chain of abusive
+ scripts, fail maximum recursion with -ELOOP instead of -ENOEXEC. As soon
+ as maximum recursion depth is hit, the error will fail all the way back
+ up the chain, aborting immediately.
+
+ This also has the side-effect of stopping the user's shell from attempting
+ to reexecute the top-level file as a shell script. As seen in the
+ dash source:
+
+ if (cmd != path_bshell && errno == ENOEXEC) {
+ *argv-- = cmd;
+ *argv = cmd = path_bshell;
+ goto repeat;
+ }
+
+ The above logic was designed for running scripts automatically that lacked
+ the "#!" header, not to re-try failed recursion. On a legitimate -ENOEXEC,
+ things continue to behave as the shell expects.
+
+ Additionally, when tracking recursion, the binfmt handlers should not be
+ involved. The recursion being tracked is the depth of calls through
+ search_binary_handler(), so that function should be exclusively responsible
+ for tracking the depth.
+
+ Signed-off-by: Kees Cook <keescook at chromium.org>
+ Cc: halfdog <me at halfdog.net>
+ Cc: P J P <ppandit at redhat.com>
+ Cc: Alexander Viro <viro at zeniv.linux.org.uk>
+ Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+ [dannf: backported to Debian's 2.6.32]
+
+diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
+index 32fb00b..416dcae 100644
+--- a/fs/binfmt_em86.c
++++ b/fs/binfmt_em86.c
+@@ -43,7 +43,6 @@ static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
+ return -ENOEXEC;
+ }
+
+- bprm->recursion_depth++; /* Well, the bang-shell is implicit... */
+ allow_write_access(bprm->file);
+ fput(bprm->file);
+ bprm->file = NULL;
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index fb93997..258c5ca 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -116,10 +116,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ if (!enabled)
+ goto _ret;
+
+- retval = -ENOEXEC;
+- if (bprm->recursion_depth > BINPRM_MAX_RECURSION)
+- goto _ret;
+-
+ /* to keep locking time low, we copy the interpreter string */
+ read_lock(&entries_lock);
+ fmt = check_file(bprm);
+@@ -200,8 +196,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ if (retval < 0)
+ goto _error;
+
+- bprm->recursion_depth++;
+-
+ retval = search_binary_handler (bprm, regs);
+ if (retval < 0)
+ goto _error;
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index 356568c..4fe6b8a 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -22,15 +22,13 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
+ char interp[BINPRM_BUF_SIZE];
+ int retval;
+
+- if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!') ||
+- (bprm->recursion_depth > BINPRM_MAX_RECURSION))
++ if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
+ return -ENOEXEC;
+ /*
+ * This section does the #! interpretation.
+ * Sorta complicated, but hopefully it will work. -TYT
+ */
+
+- bprm->recursion_depth++;
+ allow_write_access(bprm->file);
+ fput(bprm->file);
+ bprm->file = NULL;
+diff --git a/fs/exec.c b/fs/exec.c
+index 47bb117..fd1efbe 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1286,6 +1286,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+ int try,retval;
+ struct linux_binfmt *fmt;
+
++ /* This allows 4 levels of binfmt rewrites before failing hard. */
++ if (depth > 5)
++ return -ELOOP;
++
+ retval = security_bprm_check(bprm);
+ if (retval)
+ return retval;
+@@ -1307,12 +1311,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+ if (!try_module_get(fmt->module))
+ continue;
+ read_unlock(&binfmt_lock);
++ bprm->recursion_depth = depth + 1;
+ retval = fn(bprm, regs);
+- /*
+- * Restore the depth counter to its starting value
+- * in this call, so we don't have to rely on every
+- * load_binary function to restore it on return.
+- */
+ bprm->recursion_depth = depth;
+ if (retval >= 0) {
+ if (depth == 0)
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index ff17462..c0de775 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -69,8 +69,6 @@ extern struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ #define BINPRM_FLAGS_EXECFD_BIT 1
+ #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
+
+-#define BINPRM_MAX_RECURSION 4
+-
+ /*
+ * This structure defines the functions that are used to load the binary formats that
+ * linux accepts.
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/ext4-Fix-max-file-size-and-logical-block-counting-of-extent-format-file.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ext4-Fix-max-file-size-and-logical-block-counting-of-extent-format-file.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/ext4-Fix-max-file-size-and-logical-block-counting-of-extent-format-file.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ext4-Fix-max-file-size-and-logical-block-counting-of-extent-format-file.patch)
@@ -0,0 +1,254 @@
+commit f17722f917b2f21497deb6edc62fb1683daa08e6
+Author: Lukas Czerner <lczerner at redhat.com>
+Date: Mon Jun 6 00:05:17 2011 -0400
+
+ ext4: Fix max file size and logical block counting of extent format file
+
+ Kazuya Mio reported that he was able to hit BUG_ON(next == lblock)
+ in ext4_ext_put_gap_in_cache() while creating a sparse file in extent
+ format and fill the tail of file up to its end. We will hit the BUG_ON
+ when we write the last block (2^32-1) into the sparse file.
+
+ The root cause of the problem lies in the fact that we specifically set
+ s_maxbytes so that block at s_maxbytes fit into on-disk extent format,
+ which is 32 bit long. However, we are not storing start and end block
+ number, but rather start block number and length in blocks. It means
+ that in order to cover extent from 0 to EXT_MAX_BLOCK we need
+ EXT_MAX_BLOCK+1 to fit into len (because we counting block 0 as well) -
+ and it does not.
+
+ The only way to fix it without changing the meaning of the struct
+ ext4_extent members is, as Kazuya Mio suggested, to lower s_maxbytes
+ by one fs block so we can cover the whole extent we can get by the
+ on-disk extent format.
+
+ Also in many places EXT_MAX_BLOCK is used as length instead of maximum
+ logical block number as the name suggests, it is all a bit messy. So
+ this commit renames it to EXT_MAX_BLOCKS and change its usage in some
+ places to actually be maximum number of blocks in the extent.
+
+ The bug which this commit fixes can be reproduced as follows:
+
+ dd if=/dev/zero of=/mnt/mp1/file bs=<blocksize> count=1 seek=$((2**32-2))
+ sync
+ dd if=/dev/zero of=/mnt/mp1/file bs=<blocksize> count=1 seek=$((2**32-1))
+
+ Reported-by: Kazuya Mio <k-mio at sx.jp.nec.com>
+ Signed-off-by: Lukas Czerner <lczerner at redhat.com>
+ Signed-off-by: "Theodore Ts'o" <tytso at mit.edu>
+ [dannf: Applied the backport from RHEL6 to Debian's 2.6.32]
+
+diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
+index bdb6ce7..24fa647 100644
+--- a/fs/ext4/ext4_extents.h
++++ b/fs/ext4/ext4_extents.h
+@@ -137,8 +137,11 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
+ #define EXT_BREAK 1
+ #define EXT_REPEAT 2
+
+-/* Maximum logical block in a file; ext4_extent's ee_block is __le32 */
+-#define EXT_MAX_BLOCK 0xffffffff
++/*
++ * Maximum number of logical blocks in a file; ext4_extent's ee_block is
++ * __le32.
++ */
++#define EXT_MAX_BLOCKS 0xffffffff
+
+ /*
+ * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 0b39af1..bb478a8 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1329,7 +1329,7 @@ got_index:
+
+ /*
+ * ext4_ext_next_allocated_block:
+- * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
++ * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
+ * NOTE: it considers block number from index entry as
+ * allocated block. Thus, index entries have to be consistent
+ * with leaves.
+@@ -1343,7 +1343,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
+ depth = path->p_depth;
+
+ if (depth == 0 && path->p_ext == NULL)
+- return EXT_MAX_BLOCK;
++ return EXT_MAX_BLOCKS;
+
+ while (depth >= 0) {
+ if (depth == path->p_depth) {
+@@ -1360,12 +1360,12 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
+ depth--;
+ }
+
+- return EXT_MAX_BLOCK;
++ return EXT_MAX_BLOCKS;
+ }
+
+ /*
+ * ext4_ext_next_leaf_block:
+- * returns first allocated block from next leaf or EXT_MAX_BLOCK
++ * returns first allocated block from next leaf or EXT_MAX_BLOCKS
+ */
+ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
+ struct ext4_ext_path *path)
+@@ -1377,7 +1377,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
+
+ /* zero-tree has no leaf blocks at all */
+ if (depth == 0)
+- return EXT_MAX_BLOCK;
++ return EXT_MAX_BLOCKS;
+
+ /* go to index block */
+ depth--;
+@@ -1390,7 +1390,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
+ depth--;
+ }
+
+- return EXT_MAX_BLOCK;
++ return EXT_MAX_BLOCKS;
+ }
+
+ /*
+@@ -1570,13 +1570,13 @@ unsigned int ext4_ext_check_overlap(struct inode *inode,
+ */
+ if (b2 < b1) {
+ b2 = ext4_ext_next_allocated_block(path);
+- if (b2 == EXT_MAX_BLOCK)
++ if (b2 == EXT_MAX_BLOCKS)
+ goto out;
+ }
+
+ /* check for wrap through zero on extent logical start block*/
+ if (b1 + len1 < b1) {
+- len1 = EXT_MAX_BLOCK - b1;
++ len1 = EXT_MAX_BLOCKS - b1;
+ newext->ee_len = cpu_to_le16(len1);
+ ret = 1;
+ }
+@@ -1652,7 +1652,7 @@ repeat:
+ fex = EXT_LAST_EXTENT(eh);
+ next = ext4_ext_next_leaf_block(inode, path);
+ if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
+- && next != EXT_MAX_BLOCK) {
++ && next != EXT_MAX_BLOCKS) {
+ ext_debug("next leaf block - %d\n", next);
+ BUG_ON(npath != NULL);
+ npath = ext4_ext_find_extent(inode, next, NULL);
+@@ -1770,7 +1770,7 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
+ BUG_ON(func == NULL);
+ BUG_ON(inode == NULL);
+
+- while (block < last && block != EXT_MAX_BLOCK) {
++ while (block < last && block != EXT_MAX_BLOCKS) {
+ num = last - block;
+ /* find extent for this block */
+ down_read(&EXT4_I(inode)->i_data_sem);
+@@ -1898,7 +1898,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
+ if (ex == NULL) {
+ /* there is no extent yet, so gap is [0;-] */
+ lblock = 0;
+- len = EXT_MAX_BLOCK;
++ len = EXT_MAX_BLOCKS;
+ ext_debug("cache gap(whole file):");
+ } else if (block < le32_to_cpu(ex->ee_block)) {
+ lblock = block;
+@@ -2143,8 +2143,8 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ path[depth].p_ext = ex;
+
+ a = ex_ee_block > start ? ex_ee_block : start;
+- b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
+- ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
++ b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCKS ?
++ ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCKS;
+
+ ext_debug(" border %u:%u\n", a, b);
+
+@@ -3673,15 +3673,14 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
+ flags |= FIEMAP_EXTENT_UNWRITTEN;
+
+ /*
+- * If this extent reaches EXT_MAX_BLOCK, it must be last.
++ * If this extent reaches EXT_MAX_BLOCKS, it must be last.
+ *
+- * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
++ * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCKS,
+ * this also indicates no more allocated blocks.
+ *
+- * XXX this might miss a single-block extent at EXT_MAX_BLOCK
+ */
+- if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
+- newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
++ if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCKS ||
++ newex->ec_block + newex->ec_len == EXT_MAX_BLOCKS) {
+ loff_t size = i_size_read(inode);
+ loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);
+
+@@ -3762,8 +3761,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+
+ start_blk = start >> inode->i_sb->s_blocksize_bits;
+ last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
+- if (last_blk >= EXT_MAX_BLOCK)
+- last_blk = EXT_MAX_BLOCK-1;
++ if (last_blk >= EXT_MAX_BLOCKS)
++ last_blk = EXT_MAX_BLOCKS-1;
+ len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
+
+ /*
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index a73ed78..fe81390 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -1001,12 +1001,12 @@ mext_check_arguments(struct inode *orig_inode,
+ return -EINVAL;
+ }
+
+- if ((orig_start > EXT_MAX_BLOCK) ||
+- (donor_start > EXT_MAX_BLOCK) ||
+- (*len > EXT_MAX_BLOCK) ||
+- (orig_start + *len > EXT_MAX_BLOCK)) {
++ if ((orig_start >= EXT_MAX_BLOCKS) ||
++ (donor_start >= EXT_MAX_BLOCKS) ||
++ (*len > EXT_MAX_BLOCKS) ||
++ (orig_start + *len >= EXT_MAX_BLOCKS)) {
+ ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
+- "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCK,
++ "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 15018a8..e184335 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1975,6 +1975,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
+ * so that won't be a limiting factor.
+ *
++ * However there is other limiting factor. We do store extents in the form
++ * of starting block and length, hence the resulting length of the extent
++ * covering maximum file size must fit into on-disk format containers as
++ * well. Given that length is always by 1 unit bigger than max unit (because
++ * we count 0 as well) we have to lower the s_maxbytes by one fs block.
++ *
+ * Note, this does *not* consider any metadata overhead for vfs i_blocks.
+ */
+ static loff_t ext4_max_size(int blkbits, int has_huge_files)
+@@ -1996,10 +2002,13 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
+ upper_limit <<= blkbits;
+ }
+
+- /* 32-bit extent-start container, ee_block */
+- res = 1LL << 32;
++ /*
++ * 32-bit extent-start container, ee_block. We lower the maxbytes
++ * by one fs block, so ee_len can cover the extent of maximum file
++ * size
++ */
++ res = (1LL << 32) - 1;
+ res <<= blkbits;
+- res -= 1;
+
+ /* Sanity check against vm- & vfs- imposed limits */
+ if (res > upper_limit)
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/ipv6-discard-overlapping-fragment.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ipv6-discard-overlapping-fragment.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/ipv6-discard-overlapping-fragment.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ipv6-discard-overlapping-fragment.patch)
@@ -0,0 +1,116 @@
+ 70789d7052239992824628db8133de08dc78e593
+Author: Nicolas Dichtel <nicolas.dichtel at 6wind.com>
+Date: Fri Sep 3 05:13:05 2010 +0000
+
+ ipv6: discard overlapping fragment
+
+ RFC5722 prohibits reassembling fragments when some data overlaps.
+
+ Bug spotted by Zhang Zuotao <zuotao.zhang at 6wind.com>.
+
+ Signed-off-by: Nicolas Dichtel <nicolas.dichtel at 6wind.com>
+ Signed-off-by: David S. Miller <davem at davemloft.net>
+ [dannf: backported to Debian's 2.6.32]
+
+diff -urpN a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+--- a/net/ipv6/reassembly.c 2013-02-13 22:04:16.582911820 -0800
++++ b/net/ipv6/reassembly.c 2013-02-13 22:15:23.714382174 -0800
+@@ -148,16 +148,6 @@ int ip6_frag_match(struct inet_frag_queu
+ }
+ EXPORT_SYMBOL(ip6_frag_match);
+
+-/* Memory Tracking Functions. */
+-static inline void frag_kfree_skb(struct netns_frags *nf,
+- struct sk_buff *skb, int *work)
+-{
+- if (work)
+- *work -= skb->truesize;
+- atomic_sub(skb->truesize, &nf->mem);
+- kfree_skb(skb);
+-}
+-
+ void ip6_frag_init(struct inet_frag_queue *q, void *a)
+ {
+ struct frag_queue *fq = container_of(q, struct frag_queue, q);
+@@ -348,58 +338,22 @@ static int ip6_frag_queue(struct frag_qu
+ prev = next;
+ }
+
+- /* We found where to put this one. Check for overlap with
+- * preceding fragment, and, if needed, align things so that
+- * any overlaps are eliminated.
++ /* RFC5722, Section 4:
++ * When reassembling an IPv6 datagram, if
++ * one or more its constituent fragments is determined to be an
++ * overlapping fragment, the entire datagram (and any constituent
++ * fragments, including those not yet received) MUST be silently
++ * discarded.
+ */
+- if (prev) {
+- int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
+-
+- if (i > 0) {
+- offset += i;
+- if (end <= offset)
+- goto err;
+- if (!pskb_pull(skb, i))
+- goto err;
+- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+- skb->ip_summed = CHECKSUM_NONE;
+- }
+- }
+
+- /* Look for overlap with succeeding segments.
+- * If we can merge fragments, do it.
+- */
+- while (next && FRAG6_CB(next)->offset < end) {
+- int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
+-
+- if (i < next->len) {
+- /* Eat head of the next overlapped fragment
+- * and leave the loop. The next ones cannot overlap.
+- */
+- if (!pskb_pull(next, i))
+- goto err;
+- FRAG6_CB(next)->offset += i; /* next fragment */
+- fq->q.meat -= i;
+- if (next->ip_summed != CHECKSUM_UNNECESSARY)
+- next->ip_summed = CHECKSUM_NONE;
+- break;
+- } else {
+- struct sk_buff *free_it = next;
+-
+- /* Old fragment is completely overridden with
+- * new one drop it.
+- */
+- next = next->next;
+-
+- if (prev)
+- prev->next = next;
+- else
+- fq->q.fragments = next;
+-
+- fq->q.meat -= free_it->len;
+- frag_kfree_skb(fq->q.net, free_it, NULL);
+- }
+- }
++ /* Check for overlap with preceding fragment. */
++ if (prev &&
++ (FRAG6_CB(prev)->offset + prev->len) - offset > 0)
++ goto discard_fq;
++
++ /* Look for overlap with succeeding segment. */
++ if (next && FRAG6_CB(next)->offset < end)
++ goto discard_fq;
+
+ FRAG6_CB(skb)->offset = offset;
+
+@@ -436,6 +390,8 @@ static int ip6_frag_queue(struct frag_qu
+ write_unlock(&ip6_frags.lock);
+ return -1;
+
++discard_fq:
++ fq_kill(fq);
+ err:
+ IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_REASMFAILS);
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/kmod-introduce-call_modprobe-helper.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/kmod-introduce-call_modprobe-helper.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/kmod-introduce-call_modprobe-helper.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/kmod-introduce-call_modprobe-helper.patch)
@@ -0,0 +1,64 @@
+commit 3e63a93b987685f02421e18b2aa452d20553a88b
+Author: Oleg Nesterov <oleg at redhat.com>
+Date: Fri Mar 23 15:02:49 2012 -0700
+
+ kmod: introduce call_modprobe() helper
+
+ No functional changes. Move the call_usermodehelper code from
+ __request_module() into the new simple helper, call_modprobe().
+
+ Signed-off-by: Oleg Nesterov <oleg at redhat.com>
+ Cc: Tetsuo Handa <penguin-kernel at I-love.SAKURA.ne.jp>
+ Cc: Rusty Russell <rusty at rustcorp.com.au>
+ Cc: Tejun Heo <tj at kernel.org>
+ Cc: David Rientjes <rientjes at google.com>
+ Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+ [dannf: backported to Debian's 2.6.32]
+
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index 8ed592b..09e10c3 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -50,6 +50,18 @@ static struct workqueue_struct *khelper_wq;
+ */
+ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
+
++static int call_modprobe(char *module_name, int wait)
++{
++ static char *envp[] = { "HOME=/",
++ "TERM=linux",
++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
++ NULL };
++
++ char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
++
++ return call_usermodehelper(modprobe_path, argv, envp, wait);
++}
++
+ /**
+ * __request_module - try to load a kernel module
+ * @wait: wait (or not) for the operation to complete
+@@ -71,11 +83,6 @@ int __request_module(bool wait, const char *fmt, ...)
+ char module_name[MODULE_NAME_LEN];
+ unsigned int max_modprobes;
+ int ret;
+- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
+- static char *envp[] = { "HOME=/",
+- "TERM=linux",
+- "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+- NULL };
+ static atomic_t kmod_concurrent = ATOMIC_INIT(0);
+ #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
+ static int kmod_loop_msg;
+@@ -118,8 +125,8 @@ int __request_module(bool wait, const char *fmt, ...)
+
+ trace_module_request(module_name, wait, _RET_IP_);
+
+- ret = call_usermodehelper(modprobe_path, argv, envp,
+- wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
++ ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
++
+ atomic_dec(&kmod_concurrent);
+ return ret;
+ }
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/kmod-make-__request_module-killable.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/kmod-make-__request_module-killable.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/kmod-make-__request_module-killable.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/kmod-make-__request_module-killable.patch)
@@ -0,0 +1,85 @@
+commit 1cc684ab75123efe7ff446eb821d44375ba8fa30
+Author: Oleg Nesterov <oleg at redhat.com>
+Date: Fri Mar 23 15:02:50 2012 -0700
+
+ kmod: make __request_module() killable
+
+ As Tetsuo Handa pointed out, request_module() can stress the system
+ while the oom-killed caller sleeps in TASK_UNINTERRUPTIBLE.
+
+ The task T uses "almost all" memory, then it does something which
+ triggers request_module(). Say, it can simply call sys_socket(). This
+ in turn needs more memory and leads to OOM. oom-killer correctly
+ chooses T and kills it, but this can't help because it sleeps in
+ TASK_UNINTERRUPTIBLE and after that oom-killer becomes "disabled" by the
+ TIF_MEMDIE task T.
+
+ Make __request_module() killable. The only necessary change is that
+ call_modprobe() should kmalloc argv and module_name, they can't live in
+ the stack if we use UMH_KILLABLE. This memory is freed via
+ call_usermodehelper_freeinfo()->cleanup.
+
+ Reported-by: Tetsuo Handa <penguin-kernel at I-love.SAKURA.ne.jp>
+ Signed-off-by: Oleg Nesterov <oleg at redhat.com>
+ Cc: Rusty Russell <rusty at rustcorp.com.au>
+ Cc: Tejun Heo <tj at kernel.org>
+ Cc: David Rientjes <rientjes at google.com>
+ Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+ [dannf, bwh: backported to Debian's 2.6.32]
+
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index 09e10c3..553ce09 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -50,16 +50,48 @@ static struct workqueue_struct *khelper_wq;
+ */
+ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
+
++static void free_modprobe_argv(char **argv, char **envp)
++{
++ kfree(argv[3]); /* check call_modprobe() */
++ kfree(argv);
++}
++
+ static int call_modprobe(char *module_name, int wait)
+ {
+ static char *envp[] = { "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ NULL };
++ struct subprocess_info *info;
++
++ char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
++ if (!argv)
++ goto out;
+
+- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
++ module_name = kstrdup(module_name, GFP_KERNEL);
++ if (!module_name)
++ goto free_argv;
+
+- return call_usermodehelper(modprobe_path, argv, envp, wait);
++ argv[0] = modprobe_path;
++ argv[1] = "-q";
++ argv[2] = "--";
++ argv[3] = module_name; /* check free_modprobe_argv() */
++ argv[4] = NULL;
++
++ info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
++ if (!info)
++ goto free_module_name;
++
++ call_usermodehelper_setcleanup(info, free_modprobe_argv);
++
++ return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
++
++free_module_name:
++ kfree(module_name);
++free_argv:
++ kfree(argv);
++out:
++ return -ENOMEM;
+ }
+
+ /**
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/net-fix-divide-by-zero-in-tcp-algorithm-illinois.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-fix-divide-by-zero-in-tcp-algorithm-illinois.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/net-fix-divide-by-zero-in-tcp-algorithm-illinois.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-fix-divide-by-zero-in-tcp-algorithm-illinois.patch)
@@ -0,0 +1,61 @@
+commit 8f363b77ee4fbf7c3bbcf5ec2c5ca482d396d664
+Author: Jesper Dangaard Brouer <brouer at redhat.com>
+Date: Wed Oct 31 02:45:32 2012 +0000
+
+ net: fix divide by zero in tcp algorithm illinois
+
+ Reading TCP stats when using TCP Illinois congestion control algorithm
+ can cause a divide by zero kernel oops.
+
+ The division by zero occur in tcp_illinois_info() at:
+ do_div(t, ca->cnt_rtt);
+ where ca->cnt_rtt can become zero (when rtt_reset is called)
+
+ Steps to Reproduce:
+ 1. Register tcp_illinois:
+ # sysctl -w net.ipv4.tcp_congestion_control=illinois
+ 2. Monitor internal TCP information via command "ss -i"
+ # watch -d ss -i
+ 3. Establish new TCP conn to machine
+
+ Either it fails at the initial conn, or else it needs to wait
+ for a loss or a reset.
+
+ This is only related to reading stats. The function avg_delay() also
+ performs the same divide, but is guarded with a (ca->cnt_rtt > 0) at its
+ calling point in update_params(). Thus, simply fix tcp_illinois_info().
+
+ Function tcp_illinois_info() / get_info() is called without
+ socket lock. Thus, eliminate any race condition on ca->cnt_rtt
+ by using a local stack variable. Simply reuse info.tcpv_rttcnt,
+ as its already set to ca->cnt_rtt.
+ Function avg_delay() is not affected by this race condition, as
+ its called with the socket lock.
+
+ Cc: Petr Matousek <pmatouse at redhat.com>
+ Signed-off-by: Jesper Dangaard Brouer <brouer at redhat.com>
+ Acked-by: Eric Dumazet <edumazet at google.com>
+ Acked-by: Stephen Hemminger <shemminger at vyatta.com>
+ Signed-off-by: David S. Miller <davem at davemloft.net>
+
+diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
+index 813b43a..834857f 100644
+--- a/net/ipv4/tcp_illinois.c
++++ b/net/ipv4/tcp_illinois.c
+@@ -313,11 +313,13 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
+ .tcpv_rttcnt = ca->cnt_rtt,
+ .tcpv_minrtt = ca->base_rtt,
+ };
+- u64 t = ca->sum_rtt;
+
+- do_div(t, ca->cnt_rtt);
+- info.tcpv_rtt = t;
++ if (info.tcpv_rttcnt > 0) {
++ u64 t = ca->sum_rtt;
+
++ do_div(t, info.tcpv_rttcnt);
++ info.tcpv_rtt = t;
++ }
+ nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+ }
+ }
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch)
@@ -0,0 +1,136 @@
+commit c377411f2494a931ff7facdbb3a6839b1266bcf6
+Author: Eric Dumazet <eric.dumazet at gmail.com>
+Date: Tue Apr 27 15:13:20 2010 -0700
+
+ net: sk_add_backlog() take rmem_alloc into account
+
+ Current socket backlog limit is not enough to really stop DDOS attacks,
+ because user thread spend many time to process a full backlog each
+ round, and user might crazy spin on socket lock.
+
+ We should add backlog size and receive_queue size (aka rmem_alloc) to
+ pace writers, and let user run without being slow down too much.
+
+ Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
+ stress situations.
+
+ Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
+ receiver can now process ~200.000 pps (instead of ~100 pps before the
+ patch) on a 8 core machine.
+
+ Signed-off-by: Eric Dumazet <eric.dumazet at gmail.com>
+ Signed-off-by: David S. Miller <davem at davemloft.net>
+ [dannf: backported to Debian's 2.6.32]
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index d04a1ab..1f6d6aa 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -243,7 +243,6 @@ struct sock {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ int len;
+- int limit;
+ } sk_backlog;
+ wait_queue_head_t *sk_sleep;
+ struct dst_entry *sk_dst_cache;
+@@ -575,10 +574,20 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+ skb->next = NULL;
+ }
+
++/*
++ * Take into account size of receive queue and backlog queue
++ */
++static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
++{
++ unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
++
++ return qsize + skb->truesize > sk->sk_rcvbuf;
++}
++
+ /* The per-socket spinlock must be held here. */
+ static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
+ {
+- if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
++ if (sk_rcvqueues_full(sk, skb))
+ return -ENOBUFS;
+
+ sk_add_backlog(sk, skb);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 273e1e9..755a614 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -323,6 +323,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+
+ skb->dev = NULL;
+
++ if (sk_rcvqueues_full(sk, skb)) {
++ atomic_inc(&sk->sk_drops);
++ goto discard_and_relse;
++ }
+ if (nested)
+ bh_lock_sock_nested(sk);
+ else
+@@ -1863,7 +1867,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+ sk->sk_allocation = GFP_KERNEL;
+ sk->sk_rcvbuf = sysctl_rmem_default;
+ sk->sk_sndbuf = sysctl_wmem_default;
+- sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
+ sk->sk_state = TCP_CLOSE;
+ sk_set_socket(sk, sock);
+
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index f66a23b..571da81 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1183,6 +1183,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ goto drop;
+ }
+
++
++ if (sk_rcvqueues_full(sk, skb))
++ goto drop;
++
+ rc = 0;
+
+ bh_lock_sock(sk);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 4ae5ee3..7d2e94e 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -480,6 +480,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
+ bh_unlock_sock(sk2);
+ }
+ }
++ if (sk_rcvqueues_full(sk, skb)) {
++ kfree_skb(skb);
++ goto out;
++ }
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb);
+@@ -602,6 +606,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+
+ /* deliver */
+
++ if (sk_rcvqueues_full(sk, skb)) {
++ sock_put(sk);
++ goto discard;
++ }
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb);
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 374dfe5..3a95fcb 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3719,9 +3719,6 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
+ SCTP_DBG_OBJCNT_INC(sock);
+ percpu_counter_inc(&sctp_sockets_allocated);
+
+- /* Set socket backlog limit. */
+- sk->sk_backlog.limit = sysctl_sctp_rmem[1];
+-
+ local_bh_disable();
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ local_bh_enable();
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/usermodehelper-____call_usermodehelper-doesnt-need-do_exit.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/usermodehelper-____call_usermodehelper-doesnt-need-do_exit.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/usermodehelper-____call_usermodehelper-doesnt-need-do_exit.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/usermodehelper-____call_usermodehelper-doesnt-need-do_exit.patch)
@@ -0,0 +1,31 @@
+commit 5b9bd473e3b8a8c6c4ae99be475e6e9b27568555
+Author: Oleg Nesterov <oleg at redhat.com>
+Date: Fri Mar 23 15:02:49 2012 -0700
+
+ usermodehelper: ____call_usermodehelper() doesn't need do_exit()
+
+ Minor cleanup. ____call_usermodehelper() can simply return, no need to
+ call do_exit() explicitely.
+
+ Signed-off-by: Oleg Nesterov <oleg at redhat.com>
+ Cc: Tetsuo Handa <penguin-kernel at I-love.SAKURA.ne.jp>
+ Cc: Rusty Russell <rusty at rustcorp.com.au>
+ Cc: Tejun Heo <tj at kernel.org>
+ Cc: David Rientjes <rientjes at google.com>
+ Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+ [dannf: adjusted to apply to Debian's 2.6.32]
+
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index 0c775dc..8ed592b 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -190,7 +190,7 @@ static int ____call_usermodehelper(void *data)
+
+ /* Exec failed? */
+ sub_info->retval = retval;
+- do_exit(0);
++ return 0;
+ }
+
+ void call_usermodehelper_freeinfo(struct subprocess_info *info)
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/usermodehelper-implement-UMH_KILLABLE.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/usermodehelper-implement-UMH_KILLABLE.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/usermodehelper-implement-UMH_KILLABLE.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/usermodehelper-implement-UMH_KILLABLE.patch)
@@ -0,0 +1,102 @@
+commit d0bd587a80960d7ba7e0c8396e154028c9045c54
+Author: Oleg Nesterov <oleg at redhat.com>
+Date: Fri Mar 23 15:02:47 2012 -0700
+
+ usermodehelper: implement UMH_KILLABLE
+
+ Implement UMH_KILLABLE, should be used along with UMH_WAIT_EXEC/PROC.
+ The caller must ensure that subprocess_info->path/etc can not go away
+ until call_usermodehelper_freeinfo().
+
+ call_usermodehelper_exec(UMH_KILLABLE) does
+ wait_for_completion_killable. If it fails, it uses
+ xchg(&sub_info->complete, NULL) to serialize with umh_complete() which
+ does the same xhcg() to access sub_info->complete.
+
+ If call_usermodehelper_exec wins, it can safely return. umh_complete()
+ should get NULL and call call_usermodehelper_freeinfo().
+
+ Otherwise we know that umh_complete() was already called, in this case
+ call_usermodehelper_exec() falls back to wait_for_completion() which
+ should succeed "very soon".
+
+ Note: UMH_NO_WAIT == -1 but it obviously should not be used with
+ UMH_KILLABLE. We delay the neccessary cleanup to simplify the back
+ porting.
+
+ Signed-off-by: Oleg Nesterov <oleg at redhat.com>
+ Cc: Tetsuo Handa <penguin-kernel at I-love.SAKURA.ne.jp>
+ Cc: Rusty Russell <rusty at rustcorp.com.au>
+ Cc: Tejun Heo <tj at kernel.org>
+ Cc: David Rientjes <rientjes at google.com>
+ Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+ [dannf: backported to Debian's 2.6.32]
+
+diff --git a/include/linux/kmod.h b/include/linux/kmod.h
+index 384ca8b..4b2026f 100644
+--- a/include/linux/kmod.h
++++ b/include/linux/kmod.h
+@@ -64,6 +64,8 @@ enum umh_wait {
+ UMH_WAIT_PROC = 1, /* wait for the process to complete */
+ };
+
++#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
++
+ /* Actually execute the sub-process */
+ int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
+
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index c6059c4..0c775dc 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -205,7 +205,15 @@ EXPORT_SYMBOL(call_usermodehelper_freeinfo);
+
+ static void umh_complete(struct subprocess_info *sub_info)
+ {
+- complete(sub_info->complete);
++ struct completion *comp = xchg(&sub_info->complete, NULL);
++ /*
++ * See call_usermodehelper_exec(). If xchg() returns NULL
++ * we own sub_info, the UMH_KILLABLE caller has gone away.
++ */
++ if (comp)
++ complete(comp);
++ else
++ call_usermodehelper_freeinfo(sub_info);
+ }
+
+ /* Keventd can't block, but this (a child) can. */
+@@ -261,6 +269,9 @@ static void __call_usermodehelper(struct work_struct *work)
+
+ BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
+
++ if (wait != UMH_NO_WAIT)
++ wait &= ~UMH_KILLABLE;
++
+ /* CLONE_VFORK: wait until the usermode helper has execve'd
+ * successfully We need the data structures to stay around
+ * until that is done. */
+@@ -494,9 +505,21 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
+ queue_work(khelper_wq, &sub_info->work);
+ if (wait == UMH_NO_WAIT) /* task has freed sub_info */
+ goto unlock;
++
++ if (wait & UMH_KILLABLE) {
++ retval = wait_for_completion_killable(&done);
++ if (!retval)
++ goto wait_done;
++
++ /* umh_complete() will see NULL and free sub_info */
++ if (xchg(&sub_info->complete, NULL))
++ goto unlock;
++ /* fallthrough, umh_complete() was already called */
++ }
++
+ wait_for_completion(&done);
++wait_done:
+ retval = sub_info->retval;
+-
+ out:
+ call_usermodehelper_freeinfo(sub_info);
+ unlock:
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/all/usermodehelper-introduce-umh_complete.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/usermodehelper-introduce-umh_complete.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/all/usermodehelper-introduce-umh_complete.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/usermodehelper-introduce-umh_complete.patch)
@@ -0,0 +1,52 @@
+commit b3449922502f5a161ee2b5022a33aec8472fbf18
+Author: Oleg Nesterov <oleg at redhat.com>
+Date: Fri Mar 23 15:02:47 2012 -0700
+
+ usermodehelper: introduce umh_complete(sub_info)
+
+ Preparation. Add the new trivial helper, umh_complete(). Currently it
+ simply does complete(sub_info->complete).
+
+ Signed-off-by: Oleg Nesterov <oleg at redhat.com>
+ Cc: Tetsuo Handa <penguin-kernel at I-love.SAKURA.ne.jp>
+ Cc: Rusty Russell <rusty at rustcorp.com.au>
+ Cc: Tejun Heo <tj at kernel.org>
+ Cc: David Rientjes <rientjes at google.com>
+ Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+ [dannf: Adjusted to apply to Debian's 2.6.32]
+
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index d206078..c6059c4 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -203,6 +203,11 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info)
+ }
+ EXPORT_SYMBOL(call_usermodehelper_freeinfo);
+
++static void umh_complete(struct subprocess_info *sub_info)
++{
++ complete(sub_info->complete);
++}
++
+ /* Keventd can't block, but this (a child) can. */
+ static int wait_for_helper(void *data)
+ {
+@@ -242,7 +247,7 @@ static int wait_for_helper(void *data)
+ if (sub_info->wait == UMH_NO_WAIT)
+ call_usermodehelper_freeinfo(sub_info);
+ else
+- complete(sub_info->complete);
++ umh_complete(sub_info);
+ return 0;
+ }
+
+@@ -277,7 +282,7 @@ static void __call_usermodehelper(struct work_struct *work)
+ /* FALLTHROUGH */
+
+ case UMH_WAIT_EXEC:
+- complete(sub_info->complete);
++ umh_complete(sub_info);
+ }
+ }
+
Copied: dists/squeeze/linux-2.6/debian/patches/bugfix/x86/msr-add-capabilities-check.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/x86/msr-add-capabilities-check.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/bugfix/x86/msr-add-capabilities-check.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/bugfix/x86/msr-add-capabilities-check.patch)
@@ -0,0 +1,48 @@
+commit c903f0456bc69176912dee6dd25c6a66ee1aed00
+Author: Alan Cox <alan at linux.intel.com>
+Date: Thu Nov 15 13:06:22 2012 +0000
+
+ x86/msr: Add capabilities check
+
+ At the moment the MSR driver only relies upon file system
+ checks. This means that anything as root with any capability set
+ can write to MSRs. Historically that wasn't very interesting but
+ on modern processors the MSRs are such that writing to them
+ provides several ways to execute arbitary code in kernel space.
+ Sample code and documentation on doing this is circulating and
+ MSR attacks are used on Windows 64bit rootkits already.
+
+ In the Linux case you still need to be able to open the device
+ file so the impact is fairly limited and reduces the security of
+ some capability and security model based systems down towards
+ that of a generic "root owns the box" setup.
+
+ Therefore they should require CAP_SYS_RAWIO to prevent an
+ elevation of capabilities. The impact of this is fairly minimal
+ on most setups because they don't have heavy use of
+ capabilities. Those using SELinux, SMACK or AppArmor rules might
+ want to consider if their rulesets on the MSR driver could be
+ tighter.
+
+ Signed-off-by: Alan Cox <alan at linux.intel.com>
+ Cc: Linus Torvalds <torvalds at linux-foundation.org>
+ Cc: Andrew Morton <akpm at linux-foundation.org>
+ Cc: Peter Zijlstra <a.p.zijlstra at chello.nl>
+ Cc: Horses <stable at kernel.org>
+ Signed-off-by: Ingo Molnar <mingo at kernel.org>
+ [dannf: backported to Debian's 2.6.32]
+
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index 5eaeb5e..63a053b 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -176,6 +176,9 @@ static int msr_open(struct inode *inode, struct file *file)
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ int ret = 0;
+
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
+ lock_kernel();
+ cpu = iminor(file->f_path.dentry->d_inode);
+
Copied: dists/squeeze/linux-2.6/debian/patches/debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch)
@@ -0,0 +1,74 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Thu, 9 Sep 2010 03:46:50 +0100
+Subject: [PATCH 8/8] net: Avoid ABI change from limit for socket backlog
+
+Move the new fields to the end of struct sock and hide them from genksyms.
+---
+ include/net/sock.h | 10 ++++++----
+ net/core/sock.c | 6 +++---
+ net/sctp/socket.c | 2 +-
+ 3 files changed, 10 insertions(+), 8 deletions(-)
+ [dannf: Adjusted to apply on top of bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch]
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 1f6d6aa..e5a0d8c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -242,7 +242,6 @@ struct sock {
+ struct {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+- int len;
+ } sk_backlog;
+ wait_queue_head_t *sk_sleep;
+ struct dst_entry *sk_dst_cache;
+@@ -302,6 +301,9 @@ struct sock {
+ int (*sk_backlog_rcv)(struct sock *sk,
+ struct sk_buff *skb);
+ void (*sk_destruct)(struct sock *sk);
++#ifndef __GENKSYMS__
++ int sk_backlog_len;
++#endif
+ };
+
+ /*
+@@ -579,7 +581,7 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+ */
+ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+ {
+- unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
++ unsigned int qsize = sk->sk_backlog_len + atomic_read(&sk->sk_rmem_alloc);
+
+ return qsize + skb->truesize > sk->sk_rcvbuf;
+ }
+@@ -591,7 +593,7 @@ static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
+ return -ENOBUFS;
+
+ sk_add_backlog(sk, skb);
+- sk->sk_backlog.len += skb->truesize;
++ sk->sk_backlog_len += skb->truesize;
+ return 0;
+ }
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 755a614..188a326 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1122,7 +1122,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+ sock_lock_init(newsk);
+ bh_lock_sock(newsk);
+ newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+- newsk->sk_backlog.len = 0;
++ newsk->sk_backlog_len = 0;
+
+ atomic_set(&newsk->sk_rmem_alloc, 0);
+ /*
+@@ -1534,7 +1534,7 @@ static void __release_sock(struct sock *sk)
+ * Doing the zeroing here guarantee we can not loop forever
+ * while a wild producer attempts to flood us.
+ */
+- sk->sk_backlog.len = 0;
++ sk->sk_backlog_len = 0;
+ }
+
+ /**
Modified: dists/squeeze/linux-2.6/debian/patches/features/all/openvz/openvz.patch
==============================================================================
--- dists/squeeze/linux-2.6/debian/patches/features/all/openvz/openvz.patch Thu Feb 14 09:51:30 2013 (r19812)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/openvz/openvz.patch Thu Feb 14 14:03:33 2013 (r19813)
@@ -38944,9 +38944,9 @@
/*
* This structure really needs to be cleaned up.
* Most of it is for TCP, and not used by any of
-@@ -301,6 +303,8 @@ struct sock {
- int sk_backlog_len;
- int sk_backlog_limit;
+@@ -304,6 +306,8 @@ struct sock {
+ #ifndef __GENKSYMS__
+ int sk_backlog_len;
#endif
+ struct sock_beancounter sk_bc;
+ struct ve_struct *owner_env;
Copied: dists/squeeze/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0001-xen-netback-garbage-ring.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0001-xen-netback-garbage-ring.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0001-xen-netback-garbage-ring.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0001-xen-netback-garbage-ring.patch)
@@ -0,0 +1,244 @@
+netback: shutdown the ring if it contains garbage
+
+A buggy or malicious frontend should not be able to confuse netback.
+If we spot anything which is not as it should be then shutdown the
+device and don't try to continue with the ring in a potentially
+hostile state. Well behaved and non-hostile frontends will not be
+penalised.
+
+As well as making the existing checks for such errors fatal also add a
+new check that ensures that there isn't an insane number of requests
+on the ring (i.e. more than would fit in the ring). If the ring
+contains garbage then previously is was possible to loop over this
+insane number, getting an error each time and therefore not generating
+any more pending requests and therefore not exiting the loop in
+xen_netbk_tx_build_gops for an externded period.
+
+Also turn various netdev_dbg calls which no precipitate a fatal error
+into netdev_err, they are rate limited because the device is shutdown
+afterwards.
+
+This fixes at least one known DoS/softlockup of the backend domain.
+
+Signed-off-by: Ian Campbell <ian.campbell at citrix.com>
+Signed-off-by: Jan Beulich <JBeulich at suse.com>
+[dannf: backported to Debian's 2.6.32]
+
+diff -urpN a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+--- a/drivers/xen/netback/common.h 2013-02-14 00:22:26.000000000 -0700
++++ b/drivers/xen/netback/common.h 2013-02-14 00:29:04.000000000 -0700
+@@ -221,6 +221,9 @@ int netif_be_start_xmit(struct sk_buff *
+ struct net_device_stats *netif_be_get_stats(struct net_device *dev);
+ irqreturn_t netif_be_int(int irq, void *dev_id);
+
++/* Prevent the device from generating any further traffic. */
++void xenvif_carrier_off(struct xen_netif *netif);
++
+ static inline int netbk_can_queue(struct net_device *dev)
+ {
+ struct xen_netif *netif = netdev_priv(dev);
+diff -urpN a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+--- a/drivers/xen/netback/interface.c 2013-02-14 00:22:26.000000000 -0700
++++ b/drivers/xen/netback/interface.c 2013-02-14 00:39:19.000000000 -0700
+@@ -442,17 +442,21 @@ err_rx:
+ return err;
+ }
+
++void xenvif_carrier_off(struct xen_netif *netif)
++{
++ rtnl_lock();
++ netback_carrier_off(netif);
++ netif_carrier_off(netif->dev); /* discard queued packets */
++ if (netif_running(netif->dev))
++ __netif_down(netif);
++ rtnl_unlock();
++ netif_put(netif);
++}
++
+ void netif_disconnect(struct xen_netif *netif)
+ {
+- if (netback_carrier_ok(netif)) {
+- rtnl_lock();
+- netback_carrier_off(netif);
+- netif_carrier_off(netif->dev); /* discard queued packets */
+- if (netif_running(netif->dev))
+- __netif_down(netif);
+- rtnl_unlock();
+- netif_put(netif);
+- }
++ if (netback_carrier_ok(netif))
++ xenvif_carrier_off(netif);
+
+ atomic_dec(&netif->refcnt);
+ wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
+diff -urpN a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+--- a/drivers/xen/netback/netback.c 2013-02-14 00:22:26.000000000 -0700
++++ b/drivers/xen/netback/netback.c 2013-02-14 00:38:04.000000000 -0700
+@@ -986,6 +986,14 @@ static void netbk_tx_err(struct xen_neti
+ netif_put(netif);
+ }
+
++static void netbk_fatal_tx_err(struct xen_netif *netif)
++{
++ printk(KERN_ERR "%s: fatal error; disabling device\n",
++ netif->dev->name);
++ xenvif_carrier_off(netif);
++ netif_put(netif);
++}
++
+ static int netbk_count_requests(struct xen_netif *netif,
+ struct xen_netif_tx_request *first,
+ struct xen_netif_tx_request *txp, int work_to_do)
+@@ -998,19 +1006,25 @@ static int netbk_count_requests(struct x
+
+ do {
+ if (frags >= work_to_do) {
+- DPRINTK("Need more frags\n");
++ printk(KERN_ERR "%s: Need more frags\n",
++ netif->dev->name);
++ netbk_fatal_tx_err(netif);
+ return -frags;
+ }
+
+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
+- DPRINTK("Too many frags\n");
++ printk(KERN_ERR "%s: Too many frags\n",
++ netif->dev->name);
++ netbk_fatal_tx_err(netif);
+ return -frags;
+ }
+
+ memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
+ sizeof(*txp));
+ if (txp->size > first->size) {
+- DPRINTK("Frags galore\n");
++ printk(KERN_ERR "%s: Frag is bigger than frame.\n",
++ netif->dev->name);
++ netbk_fatal_tx_err(netif);
+ return -frags;
+ }
+
+@@ -1018,8 +1032,9 @@ static int netbk_count_requests(struct x
+ frags++;
+
+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+- DPRINTK("txp->offset: %x, size: %u\n",
+- txp->offset, txp->size);
++ printk(KERN_ERR "%s: txp->offset: %x, size: %u\n",
++ netif->dev->name, txp->offset, txp->size);
++ netbk_fatal_tx_err(netif);
+ return -frags;
+ }
+ } while ((txp++)->flags & NETTXF_more_data);
+@@ -1178,7 +1193,9 @@ int netbk_get_extras(struct xen_netif *n
+
+ do {
+ if (unlikely(work_to_do-- <= 0)) {
+- DPRINTK("Missing extra info\n");
++ printk(KERN_ERR "%s: Missing extra info\n",
++ netif->dev->name);
++ netbk_fatal_tx_err(netif);
+ return -EBADR;
+ }
+
+@@ -1187,7 +1204,9 @@ int netbk_get_extras(struct xen_netif *n
+ if (unlikely(!extra.type ||
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ netif->tx.req_cons = ++cons;
+- DPRINTK("Invalid extra type: %d\n", extra.type);
++ printk(KERN_ERR "%s: Invalid extra type: %d\n",
++ netif->dev->name, extra.type);
++ netbk_fatal_tx_err(netif);
+ return -EINVAL;
+ }
+
+@@ -1198,16 +1217,21 @@ int netbk_get_extras(struct xen_netif *n
+ return work_to_do;
+ }
+
+-static int netbk_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso)
++static int netbk_set_skb_gso(struct xen_netif *netif, struct sk_buff *skb,
++ struct xen_netif_extra_info *gso)
+ {
+ if (!gso->u.gso.size) {
+- DPRINTK("GSO size must not be zero.\n");
++ printk(KERN_ERR "%s: GSO size must not be zero.\n",
++ netif->dev->name);
++ netbk_fatal_tx_err(netif);
+ return -EINVAL;
+ }
+
+ /* Currently only TCPv4 S.O. is supported. */
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+- DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++ printk(KERN_ERR "%s: Bad GSO type %d.\n",
++ netif->dev->name, gso->u.gso.type);
++ netbk_fatal_tx_err(netif);
+ return -EINVAL;
+ }
+
+@@ -1317,6 +1341,16 @@ static unsigned net_tx_build_mops(struct
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+
++ if (netif->tx.sring->req_prod - netif->tx.req_cons >
++ NET_TX_RING_SIZE) {
++ printk(KERN_ERR "%s: Impossible number of requests. "
++ "req_prod %u, req_cons %u, size %lu\n",
++ netif->dev->name, netif->tx.sring->req_prod,
++ netif->tx.req_cons, NET_TX_RING_SIZE);
++ netbk_fatal_tx_err(netif);
++ continue;
++ }
++
+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
+ if (!work_to_do) {
+ netif_put(netif);
+@@ -1344,17 +1378,14 @@ static unsigned net_tx_build_mops(struct
+ work_to_do = netbk_get_extras(netif, extras,
+ work_to_do);
+ idx = netif->tx.req_cons;
+- if (unlikely(work_to_do < 0)) {
+- netbk_tx_err(netif, &txreq, idx);
++ if (unlikely(work_to_do < 0))
+ continue;
+- }
+ }
+
+ ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
+- if (unlikely(ret < 0)) {
+- netbk_tx_err(netif, &txreq, idx - ret);
++ if (unlikely(ret < 0))
+ continue;
+- }
++
+ idx += ret;
+
+ if (unlikely(txreq.size < ETH_HLEN)) {
+@@ -1365,10 +1396,10 @@ static unsigned net_tx_build_mops(struct
+
+ /* No crossing a page as the payload mustn't fragment. */
+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+- DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
+- txreq.offset, txreq.size,
+- (txreq.offset &~PAGE_MASK) + txreq.size);
+- netbk_tx_err(netif, &txreq, idx);
++ printk(KERN_ERR "%s: txreq.offset: %x, size: %u, end: %lu\n",
++ netif->dev->name, txreq.offset, txreq.size,
++ (txreq.offset & ~PAGE_MASK) + txreq.size);
++ netbk_fatal_tx_err(netif);
+ continue;
+ }
+
+@@ -1394,9 +1425,9 @@ static unsigned net_tx_build_mops(struct
+ struct xen_netif_extra_info *gso;
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+- if (netbk_set_skb_gso(skb, gso)) {
++ if (netbk_set_skb_gso(netif, skb, gso)) {
++ /* Failure in netbk_set_skb_gso is fatal. */
+ kfree_skb(skb);
+- netbk_tx_err(netif, &txreq, idx);
+ continue;
+ }
+ }
Copied: dists/squeeze/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0002-xen-netback-wrap-around.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0002-xen-netback-wrap-around.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0002-xen-netback-wrap-around.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/xsa39-classic-0002-xen-netback-wrap-around.patch)
@@ -0,0 +1,16 @@
+netback: correct netbk_tx_err() to handle wrap around
+
+Signed-off-by: Ian Campbell <ian.campbell at citrix.com>
+Signed-off-by: Jan Beulich <JBeulich at suse.com>
+
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1011,7 +1011,7 @@ static void netbk_tx_err(netif_t *netif,
+
+ do {
+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
+- if (cons >= end)
++ if (cons == end)
+ break;
+ txp = RING_GET_REQUEST(&netif->tx, cons++);
+ } while (1);
Copied: dists/squeeze/linux-2.6/debian/patches/features/all/xen/xsa42-pvops-0001-x86-xen-don-t-assume-ds-is-usable-in-xen_iret-for-32.patch (from r19812, dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/xsa42-pvops-0001-x86-xen-don-t-assume-ds-is-usable-in-xen_iret-for-32.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/xen/xsa42-pvops-0001-x86-xen-don-t-assume-ds-is-usable-in-xen_iret-for-32.patch Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/xsa42-pvops-0001-x86-xen-don-t-assume-ds-is-usable-in-xen_iret-for-32.patch)
@@ -0,0 +1,125 @@
+From 98e089c5f23b36db415d0a4c3854e969c7a4ecfa Mon Sep 17 00:00:00 2001
+From: Jan Beulich <JBeulich at suse.com>
+Date: Thu, 24 Jan 2013 13:11:10 +0000
+Subject: [PATCH] x86/xen: don't assume %ds is usable in xen_iret for 32-bit
+ PVOPS.
+
+This fixes CVE-2013-0228 / XSA-42
+
+Drew Jones while working on CVE-2013-0190 found that that unprivileged guest user
+in 32bit PV guest can use to crash the > guest with the panic like this:
+
+-------------
+general protection fault: 0000 [#1] SMP
+last sysfs file: /sys/devices/vbd-51712/block/xvda/dev
+Modules linked in: sunrpc ipt_REJECT nf_conntrack_ipv4 nf_defrag_ipv4
+iptable_filter ip_tables ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6
+xt_state nf_conntrack ip6table_filter ip6_tables ipv6 xen_netfront ext4
+mbcache jbd2 xen_blkfront dm_mirror dm_region_hash dm_log dm_mod [last
+unloaded: scsi_wait_scan]
+
+Pid: 1250, comm: r Not tainted 2.6.32-356.el6.i686 #1
+EIP: 0061:[<c0407462>] EFLAGS: 00010086 CPU: 0
+EIP is at xen_iret+0x12/0x2b
+EAX: eb8d0000 EBX: 00000001 ECX: 08049860 EDX: 00000010
+ESI: 00000000 EDI: 003d0f00 EBP: b77f8388 ESP: eb8d1fe0
+ DS: 0000 ES: 007b FS: 0000 GS: 00e0 SS: 0069
+Process r (pid: 1250, ti=eb8d0000 task=c2953550 task.ti=eb8d0000)
+Stack:
+ 00000000 0027f416 00000073 00000206 b77f8364 0000007b 00000000 00000000
+Call Trace:
+Code: c3 8b 44 24 18 81 4c 24 38 00 02 00 00 8d 64 24 30 e9 03 00 00 00
+8d 76 00 f7 44 24 08 00 00 02 80 75 33 50 b8 00 e0 ff ff 21 e0 <8b> 40
+10 8b 04 85 a0 f6 ab c0 8b 80 0c b0 b3 c0 f6 44 24 0d 02
+EIP: [<c0407462>] xen_iret+0x12/0x2b SS:ESP 0069:eb8d1fe0
+general protection fault: 0000 [#2]
+---[ end trace ab0d29a492dcd330 ]---
+Kernel panic - not syncing: Fatal exception
+Pid: 1250, comm: r Tainted: G D ---------------
+2.6.32-356.el6.i686 #1
+Call Trace:
+ [<c08476df>] ? panic+0x6e/0x122
+ [<c084b63c>] ? oops_end+0xbc/0xd0
+ [<c084b260>] ? do_general_protection+0x0/0x210
+ [<c084a9b7>] ? error_code+0x73/
+-------------
+
+Petr says: "
+ I've analysed the bug and I think that xen_iret() cannot cope with
+ mangled DS, in this case zeroed out (null selector/descriptor) by either
+ xen_failsafe_callback() or RESTORE_REGS because the corresponding LDT
+ entry was invalidated by the reproducer. "
+
+Jan took a look at the preliminary patch and came up a fix that solves
+this problem:
+
+"This code gets called after all registers other than those handled by
+IRET got already restored, hence a null selector in %ds or a non-null
+one that got loaded from a code or read-only data descriptor would
+cause a kernel mode fault (with the potential of crashing the kernel
+as a whole, if panic_on_oops is set)."
+
+The way to fix this is to realize that the we can only relay on the
+registers that IRET restores. The two that are guaranteed are the
+%cs and %ss as they are always fixed GDT selectors. Also they are
+inaccessible from user mode - so they cannot be altered. This is
+the approach taken in this patch.
+
+Another alternative option suggested by Jan would be to relay on
+the subtle realization that using the %ebp or %esp relative references uses
+the %ss segment. In which case we could switch from using %eax to %ebp and
+would not need the %ss over-rides. That would also require one extra
+instruction to compensate for the one place where the register is used
+as scaled index. However Andrew pointed out that is too subtle and if
+further work was to be done in this code-path it could escape folks attention
+and lead to accidents.
+
+Reviewed-by: Petr Matousek <pmatouse at redhat.com>
+Reported-by: Petr Matousek <pmatouse at redhat.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3 at citrix.com>
+Signed-off-by: Jan Beulich <jbeulich at suse.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>
+[dannf: backported to Debian's 2.6.32]
+
+diff -urpN a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+--- a/arch/x86/xen/xen-asm_32.S 2013-02-14 00:50:24.000000000 -0700
++++ b/arch/x86/xen/xen-asm_32.S 2013-02-14 01:06:13.000000000 -0700
+@@ -88,11 +88,11 @@ ENTRY(xen_iret)
+ */
+ #ifdef CONFIG_SMP
+ GET_THREAD_INFO(%eax)
+- movl TI_cpu(%eax), %eax
+- movl __per_cpu_offset(,%eax,4), %eax
+- mov per_cpu__xen_vcpu(%eax), %eax
++ movl %ss:TI_cpu(%eax), %eax
++ movl %ss:__per_cpu_offset(,%eax,4), %eax
++ mov %ss:per_cpu__xen_vcpu(%eax), %eax
+ #else
+- movl per_cpu__xen_vcpu, %eax
++ movl %ss:per_cpu__xen_vcpu, %eax
+ #endif
+
+ /* check IF state we're restoring */
+@@ -105,11 +105,11 @@ ENTRY(xen_iret)
+ * resuming the code, so we don't have to be worried about
+ * being preempted to another CPU.
+ */
+- setz XEN_vcpu_info_mask(%eax)
++ setz %ss:XEN_vcpu_info_mask(%eax)
+ xen_iret_start_crit:
+
+ /* check for unmasked and pending */
+- cmpw $0x0001, XEN_vcpu_info_pending(%eax)
++ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
+
+ /*
+ * If there's something pending, mask events again so we can
+@@ -117,7 +117,7 @@ xen_iret_start_crit:
+ * touch XEN_vcpu_info_mask.
+ */
+ jne 1f
+- movb $1, XEN_vcpu_info_mask(%eax)
++ movb $1, %ss:XEN_vcpu_info_mask(%eax)
+
+ 1: popl %eax
+
Copied: dists/squeeze/linux-2.6/debian/patches/series/46squeeze1 (from r19812, dists/squeeze-security/linux-2.6/debian/patches/series/46squeeze1)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/series/46squeeze1 Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/series/46squeeze1)
@@ -0,0 +1,14 @@
++ bugfix/all/usermodehelper-introduce-umh_complete.patch
++ bugfix/all/usermodehelper-implement-UMH_KILLABLE.patch
++ bugfix/all/usermodehelper-____call_usermodehelper-doesnt-need-do_exit.patch
++ bugfix/all/kmod-introduce-call_modprobe-helper.patch
++ bugfix/all/kmod-make-__request_module-killable.patch
++ bugfix/all/net-fix-divide-by-zero-in-tcp-algorithm-illinois.patch
++ bugfix/all/exec-do-not-leave-bprm-interp-on-stack.patch
++ bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch
++ bugfix/all/ext4-Fix-max-file-size-and-logical-block-counting-of-extent-format-file.patch
+- debian/net-Avoid-ABI-change-from-limit-for-socket-backlog.patch
++ bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch
++ debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch
++ bugfix/all/ipv6-discard-overlapping-fragment.patch
++ bugfix/x86/msr-add-capabilities-check.patch
Copied: dists/squeeze/linux-2.6/debian/patches/series/46squeeze1-extra (from r19812, dists/squeeze-security/linux-2.6/debian/patches/series/46squeeze1-extra)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/series/46squeeze1-extra Thu Feb 14 14:03:33 2013 (r19813, copy of r19812, dists/squeeze-security/linux-2.6/debian/patches/series/46squeeze1-extra)
@@ -0,0 +1,3 @@
++ features/all/xen/xsa39-classic-0001-xen-netback-garbage-ring.patch featureset=xen
++ features/all/xen/xsa39-classic-0002-xen-netback-wrap-around.patch featureset=xen
++ features/all/xen/xsa42-pvops-0001-x86-xen-don-t-assume-ds-is-usable-in-xen_iret-for-32.patch featureset=xen
More information about the Kernel-svn-changes
mailing list