[kernel] r22304 - in dists/wheezy-security/linux/debian: . patches patches/bugfix/all patches/bugfix/x86

Ben Hutchings benh at moszumanska.debian.org
Thu Jan 29 04:55:49 UTC 2015


Author: benh
Date: Thu Jan 29 04:55:48 2015
New Revision: 22304

Log:
Add fixes for CVE-2014-8160, CVE-2014-9585, CVE-2015-0239 and a related bug with no CVE ID

Added:
   dists/wheezy-security/linux/debian/patches/bugfix/all/netfilter-conntrack-disable-generic-tracking-for-kno.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-x86-emulator-reject-sysenter-in-compatibility-mo.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-x86-sysenter-emulation-is-broken.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/x86_64-vdso-fix-the-vdso-address-randomization-algor.patch
Modified:
   dists/wheezy-security/linux/debian/changelog
   dists/wheezy-security/linux/debian/patches/series

Modified: dists/wheezy-security/linux/debian/changelog
==============================================================================
--- dists/wheezy-security/linux/debian/changelog	Thu Jan 29 04:51:00 2015	(r22303)
+++ dists/wheezy-security/linux/debian/changelog	Thu Jan 29 04:55:48 2015	(r22304)
@@ -7,6 +7,12 @@
     - prefix module autoloading with "crypto-"
     - include crypto- module prefix in template
     - add missing crypto module aliases
+  * netfilter: conntrack: disable generic tracking for known protocols
+    (CVE-2014-8160)
+  * [amd64] vdso: Fix the vdso address randomization algorithm (CVE-2014-9585)
+  * [x86] KVM: x86 emulator: reject SYSENTER in compatibility mode on AMD
+    guests
+  * [x86] KVM: SYSENTER emulation is broken (CVE-2015-0239)
 
  -- Ben Hutchings <ben at decadent.org.uk>  Thu, 29 Jan 2015 04:02:31 +0000
 

Added: dists/wheezy-security/linux/debian/patches/bugfix/all/netfilter-conntrack-disable-generic-tracking-for-kno.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/all/netfilter-conntrack-disable-generic-tracking-for-kno.patch	Thu Jan 29 04:55:48 2015	(r22304)
@@ -0,0 +1,88 @@
+From: Florian Westphal <fw at strlen.de>
+Date: Fri, 26 Sep 2014 11:35:42 +0200
+Subject: netfilter: conntrack: disable generic tracking for known protocols
+Origin: https://git.kernel.org/linus/db29a9508a9246e77087c5531e45b2c88ec6988b
+
+Given following iptables ruleset:
+
+-P FORWARD DROP
+-A FORWARD -m sctp --dport 9 -j ACCEPT
+-A FORWARD -p tcp --dport 80 -j ACCEPT
+-A FORWARD -p tcp -m conntrack -m state ESTABLISHED,RELATED -j ACCEPT
+
+One would assume that this allows SCTP on port 9 and TCP on port 80.
+Unfortunately, if the SCTP conntrack module is not loaded, this allows
+*all* SCTP communication, to pass though, i.e. -p sctp -j ACCEPT,
+which we think is a security issue.
+
+This is because on the first SCTP packet on port 9, we create a dummy
+"generic l4" conntrack entry without any port information (since
+conntrack doesn't know how to extract this information).
+
+All subsequent packets that are unknown will then be in established
+state since they will fallback to proto_generic and will match the
+'generic' entry.
+
+Our originally proposed version [1] completely disabled generic protocol
+tracking, but Jozsef suggests to not track protocols for which a more
+suitable helper is available, hence we now mitigate the issue for in
+tree known ct protocol helpers only, so that at least NAT and direction
+information will still be preserved for others.
+
+ [1] http://www.spinics.net/lists/netfilter-devel/msg33430.html
+
+Joint work with Daniel Borkmann.
+
+Signed-off-by: Florian Westphal <fw at strlen.de>
+Signed-off-by: Daniel Borkmann <dborkman at redhat.com>
+Acked-by: Jozsef Kadlecsik <kadlec at blackhole.kfki.hu>
+Signed-off-by: Pablo Neira Ayuso <pablo at netfilter.org>
+[bwh: Backported to 2.6.32: adjust context]
+---
+ net/netfilter/nf_conntrack_proto_generic.c | 26 +++++++++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
+index d25f293..957c1db 100644
+--- a/net/netfilter/nf_conntrack_proto_generic.c
++++ b/net/netfilter/nf_conntrack_proto_generic.c
+@@ -14,6 +14,30 @@
+ 
+ static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+ 
++static bool nf_generic_should_process(u8 proto)
++{
++	switch (proto) {
++#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
++	case IPPROTO_SCTP:
++		return false;
++#endif
++#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
++	case IPPROTO_DCCP:
++		return false;
++#endif
++#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
++	case IPPROTO_GRE:
++		return false;
++#endif
++#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
++	case IPPROTO_UDPLITE:
++		return false;
++#endif
++	default:
++		return true;
++	}
++}
++
+ static bool generic_pkt_to_tuple(const struct sk_buff *skb,
+ 				 unsigned int dataoff,
+ 				 struct nf_conntrack_tuple *tuple)
+@@ -56,7 +80,7 @@ static int generic_packet(struct nf_conn *ct,
+ static bool new(struct nf_conn *ct, const struct sk_buff *skb,
+ 		unsigned int dataoff)
+ {
+-	return true;
++	return nf_generic_should_process(nf_ct_protonum(ct));
+ }
+ 
+ #ifdef CONFIG_SYSCTL

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-x86-emulator-reject-sysenter-in-compatibility-mo.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-x86-emulator-reject-sysenter-in-compatibility-mo.patch	Thu Jan 29 04:55:48 2015	(r22304)
@@ -0,0 +1,51 @@
+From: Avi Kivity <avi at redhat.com>
+Date: Wed, 1 Feb 2012 12:23:21 +0200
+Subject: KVM: x86 emulator: reject SYSENTER in compatibility mode on AMD
+ guests
+Origin: https://git.kernel.org/linus/1a18a69b762374c423305772500f36eb8984ca52
+
+If the guest thinks it's an AMD, it will not have prepared the SYSENTER MSRs,
+and if the guest executes SYSENTER in compatibility mode, it will fails.
+
+Detect this condition and #UD instead, like the spec says.
+
+Signed-off-by: Avi Kivity <avi at redhat.com>
+---
+ arch/x86/kvm/emulate.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -1952,6 +1952,17 @@ setup_syscalls_segments(struct x86_emula
+ 	ss->p = 1;
+ }
+ 
++static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
++{
++	u32 eax, ebx, ecx, edx;
++
++	eax = ecx = 0;
++	return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)
++		&& ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
++		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
++		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
++}
++
+ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
+ {
+ 	struct x86_emulate_ops *ops = ctxt->ops;
+@@ -2068,6 +2079,14 @@ static int em_sysenter(struct x86_emulat
+ 	if (ctxt->mode == X86EMUL_MODE_REAL)
+ 		return emulate_gp(ctxt, 0);
+ 
++	/*
++	 * Not recognized on AMD in compat mode (but is recognized in legacy
++	 * mode).
++	 */
++	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
++	    && !vendor_intel(ctxt))
++		return emulate_ud(ctxt);
++
+ 	/* XXX sysenter/sysexit have not been tested in 64bit mode.
+ 	* Therefore, we inject an #UD.
+ 	*/

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-x86-sysenter-emulation-is-broken.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-x86-sysenter-emulation-is-broken.patch	Thu Jan 29 04:55:48 2015	(r22304)
@@ -0,0 +1,76 @@
+From: Nadav Amit <namit at cs.technion.ac.il>
+Date: Thu, 1 Jan 2015 23:11:11 +0200
+Subject: KVM: x86: SYSENTER emulation is broken
+Origin: https://git.kernel.org/linus/f3747379accba8e95d70cec0eae0582c8c182050
+
+SYSENTER emulation is broken in several ways:
+1. It misses the case of 16-bit code segments completely (CVE-2015-0239).
+2. MSR_IA32_SYSENTER_CS is checked in 64-bit mode incorrectly (bits 0 and 1 can
+   still be set without causing #GP).
+3. MSR_IA32_SYSENTER_EIP and MSR_IA32_SYSENTER_ESP are not masked in
+   legacy-mode.
+4. There is some unneeded code.
+
+Fix it.
+
+Cc: stable at vger.linux.org
+Signed-off-by: Nadav Amit <namit at cs.technion.ac.il>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2: adjust context]
+---
+ arch/x86/kvm/emulate.c | 27 ++++++++-------------------
+ 1 file changed, 8 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2083,7 +2083,7 @@ static int em_sysenter(struct x86_emulat
+ 	 * Not recognized on AMD in compat mode (but is recognized in legacy
+ 	 * mode).
+ 	 */
+-	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
++	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
+ 	    && !vendor_intel(ctxt))
+ 		return emulate_ud(ctxt);
+ 
+@@ -2096,23 +2096,13 @@ static int em_sysenter(struct x86_emulat
+ 	setup_syscalls_segments(ctxt, &cs, &ss);
+ 
+ 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+-	switch (ctxt->mode) {
+-	case X86EMUL_MODE_PROT32:
+-		if ((msr_data & 0xfffc) == 0x0)
+-			return emulate_gp(ctxt, 0);
+-		break;
+-	case X86EMUL_MODE_PROT64:
+-		if (msr_data == 0x0)
+-			return emulate_gp(ctxt, 0);
+-		break;
+-	}
++	if ((msr_data & 0xfffc) == 0x0)
++		return emulate_gp(ctxt, 0);
+ 
+ 	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+-	cs_sel = (u16)msr_data;
+-	cs_sel &= ~SELECTOR_RPL_MASK;
++	cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
+ 	ss_sel = cs_sel + 8;
+-	ss_sel &= ~SELECTOR_RPL_MASK;
+-	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
++	if (efer & EFER_LMA) {
+ 		cs.d = 0;
+ 		cs.l = 1;
+ 	}
+@@ -2121,10 +2111,11 @@ static int em_sysenter(struct x86_emulat
+ 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+ 
+ 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
+-	ctxt->_eip = msr_data;
++	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
+ 
+ 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
+-	ctxt->regs[VCPU_REGS_RSP] = msr_data;
++	ctxt->regs[VCPU_REGS_RSP] = (efer & EFER_LMA) ? msr_data :
++							(u32)msr_data;
+ 
+ 	return X86EMUL_CONTINUE;
+ }

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/x86_64-vdso-fix-the-vdso-address-randomization-algor.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/x86_64-vdso-fix-the-vdso-address-randomization-algor.patch	Thu Jan 29 04:55:48 2015	(r22304)
@@ -0,0 +1,117 @@
+From: Andy Lutomirski <luto at amacapital.net>
+Date: Fri, 19 Dec 2014 16:04:11 -0800
+Subject: x86_64, vdso: Fix the vdso address randomization algorithm
+Origin: https://git.kernel.org/linus/394f56fe480140877304d342dec46d50dc823d46
+
+The theory behind vdso randomization is that it's mapped at a random
+offset above the top of the stack.  To avoid wasting a page of
+memory for an extra page table, the vdso isn't supposed to extend
+past the lowest PMD into which it can fit.  Other than that, the
+address should be a uniformly distributed address that meets all of
+the alignment requirements.
+
+The current algorithm is buggy: the vdso has about a 50% probability
+of being at the very end of a PMD.  The current algorithm also has a
+decent chance of failing outright due to incorrect handling of the
+case where the top of the stack is near the top of its PMD.
+
+This fixes the implementation.  The paxtest estimate of vdso
+"randomisation" improves from 11 bits to 18 bits.  (Disclaimer: I
+don't know what the paxtest code is actually calculating.)
+
+It's worth noting that this algorithm is inherently biased: the vdso
+is more likely to end up near the end of its PMD than near the
+beginning.  Ideally we would either nix the PMD sharing requirement
+or jointly randomize the vdso and the stack to reduce the bias.
+
+In the mean time, this is a considerable improvement with basically
+no risk of compatibility issues, since the allowed outputs of the
+algorithm are unchanged.
+
+As an easy test, doing this:
+
+for i in `seq 10000`
+  do grep -P vdso /proc/self/maps |cut -d- -f1
+done |sort |uniq -d
+
+used to produce lots of output (1445 lines on my most recent run).
+A tiny subset looks like this:
+
+7fffdfffe000
+7fffe01fe000
+7fffe05fe000
+7fffe07fe000
+7fffe09fe000
+7fffe0bfe000
+7fffe0dfe000
+
+Note the suspicious fe000 endings.  With the fix, I get a much more
+palatable 76 repeated addresses.
+
+Reviewed-by: Kees Cook <keescook at chromium.org>
+Cc: stable at vger.kernel.org
+Signed-off-by: Andy Lutomirski <luto at amacapital.net>
+[bwh: Backported to 2.6.32:
+ - Adjust context
+ - The whole file is only built for x86_64; adjust comment for this]
+---
+--- a/arch/x86/vdso/vma.c
++++ b/arch/x86/vdso/vma.c
+@@ -72,30 +72,43 @@ subsys_initcall(init_vdso);
+ 
+ struct linux_binprm;
+ 
+-/* Put the vdso above the (randomized) stack with another randomized offset.
+-   This way there is no hole in the middle of address space.
+-   To save memory make sure it is still in the same PTE as the stack top.
+-   This doesn't give that many random bits */
++/*
++ * Put the vdso above the (randomized) stack with another randomized
++ * offset.  This way there is no hole in the middle of address space.
++ * To save memory make sure it is still in the same PTE as the stack
++ * top.  This doesn't give that many random bits.
++ *
++ * Note that this algorithm is imperfect: the distribution of the vdso
++ * start address within a PMD is biased toward the end.
++ */
+ static unsigned long vdso_addr(unsigned long start, unsigned len)
+ {
+ 	unsigned long addr, end;
+ 	unsigned offset;
+-	end = (start + PMD_SIZE - 1) & PMD_MASK;
++
++	/*
++	 * Round up the start address.  It can start out unaligned as a result
++	 * of stack start randomization.
++	 */
++	start = PAGE_ALIGN(start);
++
++	/* Round the lowest possible end address up to a PMD boundary. */
++	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+ 	if (end >= TASK_SIZE_MAX)
+ 		end = TASK_SIZE_MAX;
+ 	end -= len;
+-	/* This loses some more bits than a modulo, but is cheaper */
+-	offset = get_random_int() & (PTRS_PER_PTE - 1);
+-	addr = start + (offset << PAGE_SHIFT);
+-	if (addr >= end)
+-		addr = end;
++
++	if (end > start) {
++		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
++		addr = start + (offset << PAGE_SHIFT);
++	} else {
++		addr = start;
++	}
+ 
+ 	/*
+-	 * page-align it here so that get_unmapped_area doesn't
+-	 * align it wrongfully again to the next page. addr can come in 4K
+-	 * unaligned here as a result of stack start randomization.
++	 * Forcibly align the final address in case we have a hardware
++	 * issue that requires alignment for performance reasons.
+ 	 */
+-	addr = PAGE_ALIGN(addr);
+ 	addr = align_addr(addr, NULL, ALIGN_VDSO);
+ 
+ 	return addr;

Modified: dists/wheezy-security/linux/debian/patches/series
==============================================================================
--- dists/wheezy-security/linux/debian/patches/series	Thu Jan 29 04:51:00 2015	(r22303)
+++ dists/wheezy-security/linux/debian/patches/series	Thu Jan 29 04:55:48 2015	(r22304)
@@ -1157,3 +1157,7 @@
 bugfix/all/crypto-prefix-module-autoloading-with-crypto.patch
 bugfix/all/crypto-include-crypto-module-prefix-in-template.patch
 bugfix/all/crypto-add-missing-crypto-module-aliases.patch
+bugfix/all/netfilter-conntrack-disable-generic-tracking-for-kno.patch
+bugfix/x86/x86_64-vdso-fix-the-vdso-address-randomization-algor.patch
+bugfix/x86/kvm-x86-emulator-reject-sysenter-in-compatibility-mo.patch
+bugfix/x86/kvm-x86-sysenter-emulation-is-broken.patch



More information about the Kernel-svn-changes mailing list