[kernel] r22299 - in dists/squeeze-security/linux-2.6/debian: . patches/bugfix/all patches/bugfix/x86 patches/series

Ben Hutchings benh at moszumanska.debian.org
Thu Jan 29 00:59:17 UTC 2015


Author: benh
Date: Thu Jan 29 00:59:17 2015
New Revision: 22299

Log:
Add fixes for CVE-2014-9420, CVE-2014-9584, CVE-2014-9585

Added:
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/isofs-fix-infinite-looping-over-ce-entries.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/isofs-fix-unchecked-printing-of-er-records.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/x86/x86_64-vdso-fix-the-vdso-address-randomization-algor.patch
Modified:
   dists/squeeze-security/linux-2.6/debian/changelog
   dists/squeeze-security/linux-2.6/debian/patches/series/48squeeze11

Modified: dists/squeeze-security/linux-2.6/debian/changelog
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/changelog	Wed Jan 28 23:57:12 2015	(r22298)
+++ dists/squeeze-security/linux-2.6/debian/changelog	Thu Jan 29 00:59:17 2015	(r22299)
@@ -6,6 +6,9 @@
     (CVE-2014-8134)
   * netfilter: conntrack: disable generic tracking for known protocols
     (CVE-2014-8160)
+  * isofs: Fix infinite looping over CE entries (CVE-2014-9420)
+  * isofs: Fix unchecked printing of ER records (CVE-2014-9584)
+  * [amd64] vdso: Fix the vdso address randomization algorithm (CVE-2014-9585)
 
  -- Ben Hutchings <ben at decadent.org.uk>  Wed, 28 Jan 2015 22:33:05 +0000
 

Added: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/isofs-fix-infinite-looping-over-ce-entries.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/isofs-fix-infinite-looping-over-ce-entries.patch	Thu Jan 29 00:59:17 2015	(r22299)
@@ -0,0 +1,52 @@
+From: Jan Kara <jack at suse.cz>
+Date: Mon, 15 Dec 2014 14:22:46 +0100
+Subject: isofs: Fix infinite looping over CE entries
+Origin: https://git.kernel.org/linus/f54e18f1b831c92f6512d2eedb224cd63d607d3d
+
+Rock Ridge extensions define so called Continuation Entries (CE) which
+define where is further space with Rock Ridge data. Corrupted isofs
+image can contain arbitrarily long chain of these, including a one
+containing loop and thus causing kernel to end in an infinite loop when
+traversing these entries.
+
+Limit the traversal to 32 entries which should be more than enough space
+to store all the Rock Ridge data.
+
+Reported-by: P J P <ppandit at redhat.com>
+CC: stable at vger.kernel.org
+Signed-off-by: Jan Kara <jack at suse.cz>
+---
+ fs/isofs/rock.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index f488bba..bb63254 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -31,6 +31,7 @@ struct rock_state {
+ 	int cont_size;
+ 	int cont_extent;
+ 	int cont_offset;
++	int cont_loops;
+ 	struct inode *inode;
+ };
+ 
+@@ -74,6 +75,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
+ 	rs->inode = inode;
+ }
+ 
++/* Maximum number of Rock Ridge continuation entries */
++#define RR_MAX_CE_ENTRIES 32
++
+ /*
+  * Returns 0 if the caller should continue scanning, 1 if the scan must end
+  * and -ve on error.
+@@ -106,6 +110,8 @@ static int rock_continue(struct rock_state *rs)
+ 			goto out;
+ 		}
+ 		ret = -EIO;
++		if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
++			goto out;
+ 		bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
+ 		if (bh) {
+ 			memcpy(rs->buffer, bh->b_data + rs->cont_offset,

Added: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/isofs-fix-unchecked-printing-of-er-records.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/isofs-fix-unchecked-printing-of-er-records.patch	Thu Jan 29 00:59:17 2015	(r22299)
@@ -0,0 +1,30 @@
+From: Jan Kara <jack at suse.cz>
+Date: Thu, 18 Dec 2014 17:26:10 +0100
+Subject: isofs: Fix unchecked printing of ER records
+Origin: https://git.kernel.org/linus/4e2024624e678f0ebb916e6192bd23c1f9fdf696
+
+We didn't check length of rock ridge ER records before printing them.
+Thus corrupted isofs image can cause us to access and print some memory
+behind the buffer with obvious consequences.
+
+Reported-and-tested-by: Carl Henrik Lunde <chlunde at ping.uio.no>
+CC: stable at vger.kernel.org
+Signed-off-by: Jan Kara <jack at suse.cz>
+---
+ fs/isofs/rock.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index bb63254..735d752 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -363,6 +363,9 @@ repeat:
+ 			rs.cont_size = isonum_733(rr->u.CE.size);
+ 			break;
+ 		case SIG('E', 'R'):
++			/* Invalid length of ER tag id? */
++			if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
++				goto out;
+ 			ISOFS_SB(inode->i_sb)->s_rock = 1;
+ 			printk(KERN_DEBUG "ISO 9660 Extensions: ");
+ 			{

Added: dists/squeeze-security/linux-2.6/debian/patches/bugfix/x86/x86_64-vdso-fix-the-vdso-address-randomization-algor.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/x86/x86_64-vdso-fix-the-vdso-address-randomization-algor.patch	Thu Jan 29 00:59:17 2015	(r22299)
@@ -0,0 +1,109 @@
+From: Andy Lutomirski <luto at amacapital.net>
+Date: Fri, 19 Dec 2014 16:04:11 -0800
+Subject: x86_64, vdso: Fix the vdso address randomization algorithm
+Origin: https://git.kernel.org/linus/394f56fe480140877304d342dec46d50dc823d46
+
+The theory behind vdso randomization is that it's mapped at a random
+offset above the top of the stack.  To avoid wasting a page of
+memory for an extra page table, the vdso isn't supposed to extend
+past the lowest PMD into which it can fit.  Other than that, the
+address should be a uniformly distributed address that meets all of
+the alignment requirements.
+
+The current algorithm is buggy: the vdso has about a 50% probability
+of being at the very end of a PMD.  The current algorithm also has a
+decent chance of failing outright due to incorrect handling of the
+case where the top of the stack is near the top of its PMD.
+
+This fixes the implementation.  The paxtest estimate of vdso
+"randomisation" improves from 11 bits to 18 bits.  (Disclaimer: I
+don't know what the paxtest code is actually calculating.)
+
+It's worth noting that this algorithm is inherently biased: the vdso
+is more likely to end up near the end of its PMD than near the
+beginning.  Ideally we would either nix the PMD sharing requirement
+or jointly randomize the vdso and the stack to reduce the bias.
+
+In the mean time, this is a considerable improvement with basically
+no risk of compatibility issues, since the allowed outputs of the
+algorithm are unchanged.
+
+As an easy test, doing this:
+
+for i in `seq 10000`
+  do grep -P vdso /proc/self/maps |cut -d- -f1
+done |sort |uniq -d
+
+used to produce lots of output (1445 lines on my most recent run).
+A tiny subset looks like this:
+
+7fffdfffe000
+7fffe01fe000
+7fffe05fe000
+7fffe07fe000
+7fffe09fe000
+7fffe0bfe000
+7fffe0dfe000
+
+Note the suspicious fe000 endings.  With the fix, I get a much more
+palatable 76 repeated addresses.
+
+Reviewed-by: Kees Cook <keescook at chromium.org>
+Cc: stable at vger.kernel.org
+Signed-off-by: Andy Lutomirski <luto at amacapital.net>
+[bwh: Backported to 2.6.32:
+ - The whole file is only built for x86_64; adjust context and comment for this
+ - We don't have align_vdso_addr()]
+---
+--- a/arch/x86/vdso/vma.c
++++ b/arch/x86/vdso/vma.c
+@@ -77,23 +77,39 @@ subsys_initcall(init_vdso);
+ 
+ struct linux_binprm;
+ 
+-/* Put the vdso above the (randomized) stack with another randomized offset.
+-   This way there is no hole in the middle of address space.
+-   To save memory make sure it is still in the same PTE as the stack top.
+-   This doesn't give that many random bits */
++/*
++ * Put the vdso above the (randomized) stack with another randomized
++ * offset.  This way there is no hole in the middle of address space.
++ * To save memory make sure it is still in the same PTE as the stack
++ * top.  This doesn't give that many random bits.
++ *
++ * Note that this algorithm is imperfect: the distribution of the vdso
++ * start address within a PMD is biased toward the end.
++ */
+ static unsigned long vdso_addr(unsigned long start, unsigned len)
+ {
+ 	unsigned long addr, end;
+ 	unsigned offset;
+-	end = (start + PMD_SIZE - 1) & PMD_MASK;
++
++	/*
++	 * Round up the start address.  It can start out unaligned as a result
++	 * of stack start randomization.
++	 */
++	start = PAGE_ALIGN(start);
++
++	/* Round the lowest possible end address up to a PMD boundary. */
++	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+ 	if (end >= TASK_SIZE_MAX)
+ 		end = TASK_SIZE_MAX;
+ 	end -= len;
+-	/* This loses some more bits than a modulo, but is cheaper */
+-	offset = get_random_int() & (PTRS_PER_PTE - 1);
+-	addr = start + (offset << PAGE_SHIFT);
+-	if (addr >= end)
+-		addr = end;
++
++	if (end > start) {
++		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
++		addr = start + (offset << PAGE_SHIFT);
++	} else {
++		addr = start;
++	}
++
+ 	return addr;
+ }
+ 

Modified: dists/squeeze-security/linux-2.6/debian/patches/series/48squeeze11
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/patches/series/48squeeze11	Wed Jan 28 23:57:12 2015	(r22298)
+++ dists/squeeze-security/linux-2.6/debian/patches/series/48squeeze11	Thu Jan 29 00:59:17 2015	(r22299)
@@ -2,3 +2,6 @@
 + bugfix/x86/x86-tls-validate-tls-entries-to-protect-espfix.patch
 + bugfix/x86/x86-kvm-clear-paravirt_enabled-on-kvm-guests-for-espfix32-s-benefit.patch
 + bugfix/all/netfilter-conntrack-disable-generic-tracking-for-kno.patch
++ bugfix/all/isofs-fix-infinite-looping-over-ce-entries.patch
++ bugfix/all/isofs-fix-unchecked-printing-of-er-records.patch
++ bugfix/x86/x86_64-vdso-fix-the-vdso-address-randomization-algor.patch



More information about the Kernel-svn-changes mailing list