[kernel] r22011 - in dists/wheezy-security/linux/debian: . patches patches/bugfix/all patches/bugfix/x86 patches/debian

Ben Hutchings benh at moszumanska.debian.org
Fri Oct 31 16:37:44 UTC 2014


Author: benh
Date: Fri Oct 31 16:37:44 2014
New Revision: 22011

Log:
Changes for 3.2.63-2+deb7u1

Added:
   dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-disable-ufo-through-virtio.patch
   dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch
   dists/wheezy-security/linux/debian/patches/bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch
   dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch
   dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch
   dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/nEPT-Nested-INVEPT.patch
   dists/wheezy-security/linux/debian/patches/bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch
   dists/wheezy-security/linux/debian/patches/debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch
Modified:
   dists/wheezy-security/linux/debian/changelog
   dists/wheezy-security/linux/debian/patches/series

Modified: dists/wheezy-security/linux/debian/changelog
==============================================================================
--- dists/wheezy-security/linux/debian/changelog	Fri Oct 31 15:37:32 2014	(r22010)
+++ dists/wheezy-security/linux/debian/changelog	Fri Oct 31 16:37:44 2014	(r22011)
@@ -1,3 +1,29 @@
+linux (3.2.63-2+deb7u1) wheezy-security; urgency=high
+
+  * drivers/net,ipv6: Fix virtio/IPv6 regression in 3.2.63:
+    - ipv6: reuse ip6_frag_id from ip6_ufo_append_data (Closes: #766195)
+      (CVE-2014-7207)
+    - drivers/net: Disable UFO through virtio
+    - drivers/net,ipv6: Select IPv6 fragment idents for virtio UFO packets
+  * [x86] KVM: Check non-canonical addresses upon WRMSR (CVE-2014-3610)
+  * [x86] KVM: Improve thread safety in pit (CVE-2014-3611)
+  * [x86] KVM: nEPT: Nested INVEPT (CVE-2014-3645)
+  * [x86] kvm: vmx: handle invvpid vm exit gracefully (CVE-2014-3646)
+  * [x86] KVM: emulator: Use opcode::execute for CALL
+  * [x86] KVM: Fix wrong masking on relative jump/call
+  * [x86] KVM: Emulator fixes for eip canonical checks on near branches
+    (CVE-2014-3647)
+  * [x86] KVM: use new CS.RPL as CPL during task switch
+  * [x86] KVM: Handle errors when RIP is set during far jumps (CVE-2014-3647)
+  * net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks
+    (CVE-2014-3673)
+  * net: sctp: fix panic on duplicate ASCONF chunks (CVE-2014-3687)
+  * net: sctp: fix remote memory pressure from excessive queueing
+    (CVE-2014-3688)
+  * [x86] kvm,vmx: Preserve CR4 across VM entry (CVE-2014-3690)
+
+ -- Ben Hutchings <ben at decadent.org.uk>  Wed, 29 Oct 2014 23:35:20 +0000
+
 linux (3.2.63-2) wheezy; urgency=medium
 
   * [s390*] Ignore ABI change in lowcore structure (fixes FTBFS)

Added: dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-disable-ufo-through-virtio.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-disable-ufo-through-virtio.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,176 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Thu, 23 Oct 2014 00:15:30 +0100
+Subject: drivers/net: Disable UFO through virtio
+Forwarded: http://patchwork.ozlabs.org/patch/403358/
+
+IPv6 does not allow fragmentation by routers, so there is no
+fragmentation ID in the fixed header.  UFO for IPv6 requires the ID to
+be passed separately, but there is no provision for this in the virtio
+net protocol.
+
+Until recently our software implementation of UFO/IPv6 generated a new
+ID, but this was a bug.  Now we will use ID=0 for any UFO/IPv6 packet
+passed through a tap, which is even worse.
+
+Unfortunately there is no distinction between UFO/IPv4 and v6
+features, so disable UFO on taps and virtio_net completely until we
+have a proper solution.
+
+We cannot depend on VM managers respecting the tap feature flags, so
+keep accepting UFO packets but log a warning the first time we do
+this.
+
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+Fixes: 916e4cf46d02 ("ipv6: reuse ip6_frag_id from ip6_ufo_append_data")
+---
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -576,6 +576,8 @@ static int macvtap_skb_from_vnet_hdr(str
+ 			gso_type = SKB_GSO_TCPV6;
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
++			pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
++				     current->comm);
+ 			gso_type = SKB_GSO_UDP;
+ 			break;
+ 		default:
+@@ -621,8 +623,6 @@ static int macvtap_skb_to_vnet_hdr(const
+ 			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ 		else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ 			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+-		else if (sinfo->gso_type & SKB_GSO_UDP)
+-			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ 		else
+ 			BUG();
+ 		if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+@@ -960,7 +960,7 @@ static long macvtap_ioctl(struct file *f
+ 	case TUNSETOFFLOAD:
+ 		/* let the user check for future flags */
+ 		if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+-			    TUN_F_TSO_ECN | TUN_F_UFO))
++			    TUN_F_TSO_ECN))
+ 			return -EINVAL;
+ 
+ 		/* TODO: only accept frames with the features that
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -125,7 +125,7 @@ struct tun_struct {
+ 	struct net_device	*dev;
+ 	u32			set_features;
+ #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
+-			  NETIF_F_TSO6|NETIF_F_UFO)
++			  NETIF_F_TSO6)
+ 	struct fasync_struct	*fasync;
+ 
+ 	struct tap_filter       txflt;
+@@ -705,8 +705,17 @@ static ssize_t tun_get_user(struct tun_s
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
++		{
++			static bool warned;
++			if (!warned) {
++				warned = true;
++				netdev_warn(tun->dev,
++					    "%s: using disabled UFO feature; please fix this program\n",
++					    current->comm);
++			}
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ 			break;
++		}
+ 		default:
+ 			tun->dev->stats.rx_frame_errors++;
+ 			kfree_skb(skb);
+@@ -792,8 +801,6 @@ static ssize_t tun_put_user(struct tun_s
+ 				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ 				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+-			else if (sinfo->gso_type & SKB_GSO_UDP)
+-				gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ 			else {
+ 				pr_err("unexpected GSO type: "
+ 				       "0x%x, gso_size %d, hdr_len %d\n",
+@@ -1217,11 +1224,6 @@ static int set_offload(struct tun_struct
+ 				features |= NETIF_F_TSO6;
+ 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
+ 		}
+-
+-		if (arg & TUN_F_UFO) {
+-			features |= NETIF_F_UFO;
+-			arg &= ~TUN_F_UFO;
+-		}
+ 	}
+ 
+ 	/* This gives the user a way to test for new features in future by
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -315,8 +315,16 @@ static void receive_buf(struct net_devic
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
++		{
++			static bool warned;
++			if (!warned) {
++				warned = true;
++				netdev_warn(dev,
++					    "host using disabled UFO feature; please fix it\n");
++			}
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ 			break;
++		}
+ 		case VIRTIO_NET_HDR_GSO_TCPV6:
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ 			break;
+@@ -581,8 +589,6 @@ static int xmit_skb(struct virtnet_info
+ 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+-		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+-			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ 		else
+ 			BUG();
+ 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
+@@ -986,7 +992,7 @@ static int virtnet_probe(struct virtio_d
+ 			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ 
+ 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+-			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
++			dev->hw_features |= NETIF_F_TSO
+ 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
+ 		}
+ 		/* Individual feature bits: what can host handle? */
+@@ -996,11 +1002,9 @@ static int virtnet_probe(struct virtio_d
+ 			dev->hw_features |= NETIF_F_TSO6;
+ 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+ 			dev->hw_features |= NETIF_F_TSO_ECN;
+-		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+-			dev->hw_features |= NETIF_F_UFO;
+ 
+ 		if (gso)
+-			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
++			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ 		/* (!csum && gso) case will be fixed by register_netdev() */
+ 	}
+ 
+@@ -1029,8 +1033,7 @@ static int virtnet_probe(struct virtio_d
+ 	/* If we can receive ANY GSO packets, we must allocate large ones. */
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+-	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+-	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
++	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ 		vi->big_packets = true;
+ 
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+@@ -1147,9 +1150,9 @@ static struct virtio_device_id id_table[
+ static unsigned int features[] = {
+ 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
+ 	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
+-	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
++	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
+ 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+-	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
++	VIRTIO_NET_F_GUEST_ECN,
+ 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
+ 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+ };

Added: dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,121 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Sun, 26 Oct 2014 22:59:29 +0000
+Subject: drivers/net,ipv6: Select IPv6 fragment idents for virtio UFO packets
+Forwarded: http://patchwork.ozlabs.org/patch/403359/
+
+UFO is now disabled on all drivers that work with virtio net headers,
+but userland may try to send UFO/IPv6 packets anyway.  Instead of
+sending with ID=0, we should select identifiers on their behalf (as we
+used to).
+
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+Fixes: 916e4cf46d02 ("ipv6: reuse ip6_frag_id from ip6_ufo_append_data")
+---
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -15,6 +15,7 @@
+ #include <linux/cdev.h>
+ #include <linux/fs.h>
+ 
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/rtnetlink.h>
+ #include <net/sock.h>
+@@ -579,6 +580,8 @@ static int macvtap_skb_from_vnet_hdr(str
+ 			pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
+ 				     current->comm);
+ 			gso_type = SKB_GSO_UDP;
++			if (skb->protocol == htons(ETH_P_IPV6))
++				ipv6_proxy_select_ident(skb);
+ 			break;
+ 		default:
+ 			return -EINVAL;
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -64,6 +64,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
+ #include <linux/rcupdate.h>
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
+@@ -695,6 +696,8 @@ static ssize_t tun_get_user(struct tun_s
+ 		break;
+ 	}
+ 
++	skb_reset_network_header(skb);
++
+ 	if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ 		pr_debug("GSO!\n");
+ 		switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+@@ -714,6 +717,8 @@ static ssize_t tun_get_user(struct tun_s
+ 					    current->comm);
+ 			}
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
++			if (skb->protocol == htons(ETH_P_IPV6))
++				ipv6_proxy_select_ident(skb);
+ 			break;
+ 		}
+ 		default:
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -481,6 +481,7 @@ static inline int ipv6_addr_diff(const s
+ }
+ 
+ extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
++void ipv6_proxy_select_ident(struct sk_buff *skb);
+ 
+ /*
+  *	Prototypes exported by ipv6
+--- /dev/null
++++ b/net/ipv6/output_core.c
+@@ -0,0 +1,38 @@
++#include <linux/export.h>
++#include <linux/skbuff.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++
++/* This function exists only for tap drivers that must support broken
++ * clients requesting UFO without specifying an IPv6 fragment ID.
++ *
++ * This is similar to ipv6_select_ident() but we use an independent hash
++ * seed to limit information leakage.
++ */
++void ipv6_proxy_select_ident(struct sk_buff *skb)
++{
++	static u32 ip6_proxy_idents_hashrnd __read_mostly;
++	static bool hashrnd_initialized = false;
++	struct in6_addr buf[2];
++	struct in6_addr *addrs;
++	u32 hash, id;
++
++	addrs = skb_header_pointer(skb,
++				   skb_network_offset(skb) +
++				   offsetof(struct ipv6hdr, saddr),
++				   sizeof(buf), buf);
++	if (!addrs)
++		return;
++
++	if (unlikely(!hashrnd_initialized)) {
++		hashrnd_initialized = true;
++		get_random_bytes(&ip6_proxy_idents_hashrnd,
++				 sizeof(ip6_proxy_idents_hashrnd));
++	}
++	hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
++	hash = __ipv6_addr_jhash(&addrs[0], hash);
++
++	id = ip_idents_reserve(hash, 1);
++	skb_shinfo(skb)->ip6_frag_id = htonl(id);
++}
++EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+--- a/net/ipv6/Makefile
++++ b/net/ipv6/Makefile
+@@ -37,6 +37,6 @@ obj-$(CONFIG_NETFILTER)	+= netfilter/
+ obj-$(CONFIG_IPV6_SIT) += sit.o
+ obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
+ 
+-obj-y += addrconf_core.o exthdrs_core.o
++obj-y += addrconf_core.o exthdrs_core.o output_core.o
+ 
+ obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o

Added: dists/wheezy-security/linux/debian/patches/bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,34 @@
+From: Hannes Frederic Sowa <hannes at stressinduktion.org>
+Date: Fri, 21 Feb 2014 02:55:35 +0100
+Subject: ipv6: reuse ip6_frag_id from ip6_ufo_append_data
+Origin: https://git.kernel.org/linus/916e4cf46d0204806c062c8c6c4d1f633852c5b6
+
+Currently we generate a new fragmentation id on UFO segmentation. It
+is pretty hairy to identify the correct net namespace and dst there.
+Especially tunnels use IFF_XMIT_DST_RELEASE and thus have no skb_dst
+available at all.
+
+This causes unreliable or very predictable ipv6 fragmentation id
+generation while segmentation.
+
+Luckily we already have pregenerated the ip6_frag_id in
+ip6_ufo_append_data and can use it here.
+
+Signed-off-by: Hannes Frederic Sowa <hannes at stressinduktion.org>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Backported to 3.2: adjust filename, indentation]
+---
+ net/ipv6/udp_offload.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1362,7 +1362,7 @@ static struct sk_buff *udp6_ufo_fragment
+ 	fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ 	fptr->nexthdr = nexthdr;
+ 	fptr->reserved = 0;
+-	ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
++	fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+ 
+ 	/* Fragment the skb. ipv6 header and the remaining fields of the
+ 	 * fragment header are updated in ipv6_gso_segment()

Added: dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,87 @@
+From: Daniel Borkmann <dborkman at redhat.com>
+Date: Thu, 9 Oct 2014 22:55:32 +0200
+Subject: net: sctp: fix panic on duplicate ASCONF chunks
+Origin: https://git.kernel.org/linus/b69040d8e39f20d5215a03502a8e8b4c6ab78395
+
+When receiving a e.g. semi-good formed connection scan in the
+form of ...
+
+  -------------- INIT[ASCONF; ASCONF_ACK] ------------->
+  <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
+  -------------------- COOKIE-ECHO -------------------->
+  <-------------------- COOKIE-ACK ---------------------
+  ---------------- ASCONF_a; ASCONF_b ----------------->
+
+... where ASCONF_a equals ASCONF_b chunk (at least both serials
+need to be equal), we panic an SCTP server!
+
+The problem is that good-formed ASCONF chunks that we reply with
+ASCONF_ACK chunks are cached per serial. Thus, when we receive a
+same ASCONF chunk twice (e.g. through a lost ASCONF_ACK), we do
+not need to process them again on the server side (that was the
+idea, also proposed in the RFC). Instead, we know it was cached
+and we just resend the cached chunk instead. So far, so good.
+
+Where things get nasty is in SCTP's side effect interpreter, that
+is, sctp_cmd_interpreter():
+
+While incoming ASCONF_a (chunk = event_arg) is being marked
+!end_of_packet and !singleton, and we have an association context,
+we do not flush the outqueue the first time after processing the
+ASCONF_ACK singleton chunk via SCTP_CMD_REPLY. Instead, we keep it
+queued up, although we set local_cork to 1. Commit 2e3216cd54b1
+changed the precedence, so that as long as we get bundled, incoming
+chunks we try possible bundling on outgoing queue as well. Before
+this commit, we would just flush the output queue.
+
+Now, while ASCONF_a's ASCONF_ACK sits in the corked outq, we
+continue to process the same ASCONF_b chunk from the packet. As
+we have cached the previous ASCONF_ACK, we find it, grab it and
+do another SCTP_CMD_REPLY command on it. So, effectively, we rip
+the chunk->list pointers and requeue the same ASCONF_ACK chunk
+another time. Since we process ASCONF_b, it's correctly marked
+with end_of_packet and we enforce an uncork, and thus flush, thus
+crashing the kernel.
+
+Fix it by testing if the ASCONF_ACK is currently pending and if
+that is the case, do not requeue it. When flushing the output
+queue we may relink the chunk for preparing an outgoing packet,
+but eventually unlink it when it's copied into the skb right
+before transmission.
+
+Joint work with Vlad Yasevich.
+
+Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet")
+Signed-off-by: Daniel Borkmann <dborkman at redhat.com>
+Signed-off-by: Vlad Yasevich <vyasevich at gmail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ include/net/sctp/sctp.h | 5 +++++
+ net/sctp/associola.c    | 2 ++
+ 2 files changed, 7 insertions(+)
+
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -524,6 +524,11 @@ static inline void sctp_assoc_pending_pm
+ 	asoc->pmtu_pending = 0;
+ }
+ 
++static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
++{
++	return !list_empty(&chunk->list);
++}
++
+ /* Walk through a list of TLV parameters.  Don't trust the
+  * individual parameter lengths and instead depend on
+  * the chunk length to indicate when to stop.  Make sure
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1638,6 +1638,8 @@ struct sctp_chunk *sctp_assoc_lookup_asc
+ 	 * ack chunk whose serial number matches that of the request.
+ 	 */
+ 	list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
++		if (sctp_chunk_pending(ack))
++			continue;
+ 		if (ack->subh.addip_hdr->serial == serial) {
+ 			sctp_chunk_hold(ack);
+ 			return ack;

Added: dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,145 @@
+From: Daniel Borkmann <dborkman at redhat.com>
+Date: Thu, 9 Oct 2014 22:55:33 +0200
+Subject: net: sctp: fix remote memory pressure from excessive queueing
+Origin: https://git.kernel.org/linus/26b87c7881006311828bb0ab271a551a62dcceb4
+
+This scenario is not limited to ASCONF, just taken as one
+example triggering the issue. When receiving ASCONF probes
+in the form of ...
+
+  -------------- INIT[ASCONF; ASCONF_ACK] ------------->
+  <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
+  -------------------- COOKIE-ECHO -------------------->
+  <-------------------- COOKIE-ACK ---------------------
+  ---- ASCONF_a; [ASCONF_b; ...; ASCONF_n;] JUNK ------>
+  [...]
+  ---- ASCONF_m; [ASCONF_o; ...; ASCONF_z;] JUNK ------>
+
+... where ASCONF_a, ASCONF_b, ..., ASCONF_z are good-formed
+ASCONFs and have increasing serial numbers, we process such
+ASCONF chunk(s) marked with !end_of_packet and !singleton,
+since we have not yet reached the SCTP packet end. SCTP does
+only do verification on a chunk by chunk basis, as an SCTP
+packet is nothing more than just a container of a stream of
+chunks which it eats up one by one.
+
+We could run into the case that we receive a packet with a
+malformed tail, above marked as trailing JUNK. All previous
+chunks are here goodformed, so the stack will eat up all
+previous chunks up to this point. In case JUNK does not fit
+into a chunk header and there are no more other chunks in
+the input queue, or in case JUNK contains a garbage chunk
+header, but the encoded chunk length would exceed the skb
+tail, or we came here from an entirely different scenario
+and the chunk has pdiscard=1 mark (without having had a flush
+point), it will happen, that we will excessively queue up
+the association's output queue (a correct final chunk may
+then turn it into a response flood when flushing the
+queue ;)): I ran a simple script with incremental ASCONF
+serial numbers and could see the server side consuming
+excessive amount of RAM [before/after: up to 2GB and more].
+
+The issue at heart is that the chunk train basically ends
+with !end_of_packet and !singleton markers and since commit
+2e3216cd54b1 ("sctp: Follow security requirement of responding
+with 1 packet") therefore preventing an output queue flush
+point in sctp_do_sm() -> sctp_cmd_interpreter() on the input
+chunk (chunk = event_arg) even though local_cork is set,
+but its precedence has changed since then. In the normal
+case, the last chunk with end_of_packet=1 would trigger the
+queue flush to accommodate possible outgoing bundling.
+
+In the input queue, sctp_inq_pop() seems to do the right thing
+in terms of discarding invalid chunks. So, above JUNK will
+not enter the state machine and instead be released and exit
+the sctp_assoc_bh_rcv() chunk processing loop. It's simply
+the flush point being missing at loop exit. Adding a try-flush
+approach on the output queue might not work as the underlying
+infrastructure might be long gone at this point due to the
+side-effect interpreter run.
+
+One possibility, albeit a bit of a kludge, would be to defer
+invalid chunk freeing into the state machine in order to
+possibly trigger packet discards and thus indirectly a queue
+flush on error. It would surely be better to discard chunks
+as in the current, perhaps better controlled environment, but
+going back and forth, it's simply architecturally not possible.
+I tried various trailing JUNK attack cases and it seems to
+look good now.
+
+Joint work with Vlad Yasevich.
+
+Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet")
+Signed-off-by: Daniel Borkmann <dborkman at redhat.com>
+Signed-off-by: Vlad Yasevich <vyasevich at gmail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ net/sctp/inqueue.c      | 33 +++++++--------------------------
+ net/sctp/sm_statefuns.c |  3 +++
+ 2 files changed, 10 insertions(+), 26 deletions(-)
+
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -152,18 +152,9 @@ struct sctp_chunk *sctp_inq_pop(struct s
+ 		} else {
+ 			/* Nothing to do. Next chunk in the packet, please. */
+ 			ch = (sctp_chunkhdr_t *) chunk->chunk_end;
+-
+ 			/* Force chunk->skb->data to chunk->chunk_end.  */
+-			skb_pull(chunk->skb,
+-				 chunk->chunk_end - chunk->skb->data);
+-
+-			/* Verify that we have at least chunk headers
+-			 * worth of buffer left.
+-			 */
+-			if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
+-				sctp_chunk_free(chunk);
+-				chunk = queue->in_progress = NULL;
+-			}
++			skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
++			/* We are guaranteed to pull a SCTP header. */
+ 		}
+ 	}
+ 
+@@ -199,24 +190,14 @@ struct sctp_chunk *sctp_inq_pop(struct s
+ 	skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
+ 	chunk->subh.v = NULL; /* Subheader is no longer valid.  */
+ 
+-	if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
++	if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
++	    skb_tail_pointer(chunk->skb)) {
+ 		/* This is not a singleton */
+ 		chunk->singleton = 0;
+ 	} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
+-		/* RFC 2960, Section 6.10  Bundling
+-		 *
+-		 * Partial chunks MUST NOT be placed in an SCTP packet.
+-		 * If the receiver detects a partial chunk, it MUST drop
+-		 * the chunk.
+-		 *
+-		 * Since the end of the chunk is past the end of our buffer
+-		 * (which contains the whole packet, we can freely discard
+-		 * the whole packet.
+-		 */
+-		sctp_chunk_free(chunk);
+-		chunk = queue->in_progress = NULL;
+-
+-		return NULL;
++		/* Discard inside state machine. */
++		chunk->pdiscard = 1;
++		chunk->chunk_end = skb_tail_pointer(chunk->skb);
+ 	} else {
+ 		/* We are at the end of the packet, so mark the chunk
+ 		 * in case we need to send a SACK.
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -163,6 +163,9 @@ sctp_chunk_length_valid(struct sctp_chun
+ {
+ 	__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
+ 
++	/* Previously already marked? */
++	if (unlikely(chunk->pdiscard))
++		return 0;
+ 	if (unlikely(chunk_length < required_length))
+ 		return 0;
+ 

Added: dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,333 @@
+From: Daniel Borkmann <dborkman at redhat.com>
+Date: Thu, 9 Oct 2014 22:55:31 +0200
+Subject: net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks
+Origin: https://git.kernel.org/linus/9de7922bc709eee2f609cd01d98aaedc4cf5ea74
+
+Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for
+ASCONF chunk") added basic verification of ASCONF chunks, however,
+it is still possible to remotely crash a server by sending a
+special crafted ASCONF chunk, even up to pre 2.6.12 kernels:
+
+skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768
+ head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950
+ end:0x440 dev:<NULL>
+ ------------[ cut here ]------------
+kernel BUG at net/core/skbuff.c:129!
+[...]
+Call Trace:
+ <IRQ>
+ [<ffffffff8144fb1c>] skb_put+0x5c/0x70
+ [<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp]
+ [<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp]
+ [<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20
+ [<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp]
+ [<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp]
+ [<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0
+ [<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp]
+ [<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp]
+ [<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp]
+ [<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp]
+ [<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter]
+ [<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0
+ [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
+ [<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120
+ [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
+ [<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0
+ [<ffffffff81497078>] ip_local_deliver+0x98/0xa0
+ [<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440
+ [<ffffffff81496ac5>] ip_rcv+0x275/0x350
+ [<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750
+ [<ffffffff81460588>] netif_receive_skb+0x58/0x60
+
+This can be triggered e.g., through a simple scripted nmap
+connection scan injecting the chunk after the handshake, for
+example, ...
+
+  -------------- INIT[ASCONF; ASCONF_ACK] ------------->
+  <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
+  -------------------- COOKIE-ECHO -------------------->
+  <-------------------- COOKIE-ACK ---------------------
+  ------------------ ASCONF; UNKNOWN ------------------>
+
+... where ASCONF chunk of length 280 contains 2 parameters ...
+
+  1) Add IP address parameter (param length: 16)
+  2) Add/del IP address parameter (param length: 255)
+
+... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the
+Address Parameter in the ASCONF chunk is even missing, too.
+This is just an example and similarly-crafted ASCONF chunks
+could be used just as well.
+
+The ASCONF chunk passes through sctp_verify_asconf() as all
+parameters passed sanity checks, and after walking, we ended
+up successfully at the chunk end boundary, and thus may invoke
+sctp_process_asconf(). Parameter walking is done with
+WORD_ROUND() to take padding into account.
+
+In sctp_process_asconf()'s TLV processing, we may fail in
+sctp_process_asconf_param() e.g., due to removal of the IP
+address that is also the source address of the packet containing
+the ASCONF chunk, and thus we need to add all TLVs after the
+failure to our ASCONF response to remote via helper function
+sctp_add_asconf_response(), which basically invokes a
+sctp_addto_chunk() adding the error parameters to the given
+skb.
+
+When walking to the next parameter this time, we proceed
+with ...
+
+  length = ntohs(asconf_param->param_hdr.length);
+  asconf_param = (void *)asconf_param + length;
+
+... instead of the WORD_ROUND()'ed length, thus resulting here
+in an off-by-one that leads to reading the follow-up garbage
+parameter length of 12336, and thus throwing an skb_over_panic
+for the reply when trying to sctp_addto_chunk() next time,
+which implicitly calls the skb_put() with that length.
+
+Fix it by using sctp_walk_params() [ which is also used in
+INIT parameter processing ] macro in the verification *and*
+in ASCONF processing: it will make sure we don't spill over,
+that we walk parameters WORD_ROUND()'ed. Moreover, we're being
+more defensive and guard against unknown parameter types and
+missized addresses.
+
+Joint work with Vlad Yasevich.
+
+Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.")
+Signed-off-by: Daniel Borkmann <dborkman at redhat.com>
+Signed-off-by: Vlad Yasevich <vyasevich at gmail.com>
+Acked-by: Neil Horman <nhorman at tuxdriver.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - sctp_sf_violation_paramlen() doesn't take a struct net * parameter]
+---
+ include/net/sctp/sm.h    |  6 +--
+ net/sctp/sm_make_chunk.c | 99 +++++++++++++++++++++++++++---------------------
+ net/sctp/sm_statefuns.c  | 18 +--------
+ 3 files changed, 60 insertions(+), 63 deletions(-)
+
+--- a/include/net/sctp/sm.h
++++ b/include/net/sctp/sm.h
+@@ -251,9 +251,9 @@ struct sctp_chunk *sctp_make_asconf_upda
+ 					      int, __be16);
+ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
+ 					     union sctp_addr *addr);
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+-		       struct sctp_paramhdr *param_hdr, void *chunk_end,
+-		       struct sctp_paramhdr **errp);
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++			struct sctp_chunk *chunk, bool addr_param_needed,
++			struct sctp_paramhdr **errp);
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ 				       struct sctp_chunk *asconf);
+ int sctp_process_asconf_ack(struct sctp_association *asoc,
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -3068,50 +3068,63 @@ static __be16 sctp_process_asconf_param(
+ 	return SCTP_ERROR_NO_ERROR;
+ }
+ 
+-/* Verify the ASCONF packet before we process it.  */
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+-		       struct sctp_paramhdr *param_hdr, void *chunk_end,
+-		       struct sctp_paramhdr **errp) {
+-	sctp_addip_param_t *asconf_param;
++/* Verify the ASCONF packet before we process it. */
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++			struct sctp_chunk *chunk, bool addr_param_needed,
++			struct sctp_paramhdr **errp)
++{
++	sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
+ 	union sctp_params param;
+-	int length, plen;
+-
+-	param.v = (sctp_paramhdr_t *) param_hdr;
+-	while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
+-		length = ntohs(param.p->length);
+-		*errp = param.p;
++	bool addr_param_seen = false;
+ 
+-		if (param.v > chunk_end - length ||
+-		    length < sizeof(sctp_paramhdr_t))
+-			return 0;
++	sctp_walk_params(param, addip, addip_hdr.params) {
++		size_t length = ntohs(param.p->length);
+ 
++		*errp = param.p;
+ 		switch (param.p->type) {
++		case SCTP_PARAM_ERR_CAUSE:
++			break;
++		case SCTP_PARAM_IPV4_ADDRESS:
++			if (length != sizeof(sctp_ipv4addr_param_t))
++				return false;
++			addr_param_seen = true;
++			break;
++		case SCTP_PARAM_IPV6_ADDRESS:
++			if (length != sizeof(sctp_ipv6addr_param_t))
++				return false;
++			addr_param_seen = true;
++			break;
+ 		case SCTP_PARAM_ADD_IP:
+ 		case SCTP_PARAM_DEL_IP:
+ 		case SCTP_PARAM_SET_PRIMARY:
+-			asconf_param = (sctp_addip_param_t *)param.v;
+-			plen = ntohs(asconf_param->param_hdr.length);
+-			if (plen < sizeof(sctp_addip_param_t) +
+-			    sizeof(sctp_paramhdr_t))
+-				return 0;
++			/* In ASCONF chunks, these need to be first. */
++			if (addr_param_needed && !addr_param_seen)
++				return false;
++			length = ntohs(param.addip->param_hdr.length);
++			if (length < sizeof(sctp_addip_param_t) +
++				     sizeof(sctp_paramhdr_t))
++				return false;
+ 			break;
+ 		case SCTP_PARAM_SUCCESS_REPORT:
+ 		case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ 			if (length != sizeof(sctp_addip_param_t))
+-				return 0;
+-
++				return false;
+ 			break;
+ 		default:
+-			break;
++			/* This is unkown to us, reject! */
++			return false;
+ 		}
+-
+-		param.v += WORD_ROUND(length);
+ 	}
+ 
+-	if (param.v != chunk_end)
+-		return 0;
++	/* Remaining sanity checks. */
++	if (addr_param_needed && !addr_param_seen)
++		return false;
++	if (!addr_param_needed && addr_param_seen)
++		return false;
++	if (param.v != chunk->chunk_end)
++		return false;
+ 
+-	return 1;
++	return true;
+ }
+ 
+ /* Process an incoming ASCONF chunk with the next expected serial no. and
+@@ -3120,16 +3133,17 @@ int sctp_verify_asconf(const struct sctp
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ 				       struct sctp_chunk *asconf)
+ {
++	sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
++	bool all_param_pass = true;
++	union sctp_params param;
+ 	sctp_addiphdr_t		*hdr;
+ 	union sctp_addr_param	*addr_param;
+ 	sctp_addip_param_t	*asconf_param;
+ 	struct sctp_chunk	*asconf_ack;
+-
+ 	__be16	err_code;
+ 	int	length = 0;
+ 	int	chunk_len;
+ 	__u32	serial;
+-	int	all_param_pass = 1;
+ 
+ 	chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
+ 	hdr = (sctp_addiphdr_t *)asconf->skb->data;
+@@ -3157,9 +3171,14 @@ struct sctp_chunk *sctp_process_asconf(s
+ 		goto done;
+ 
+ 	/* Process the TLVs contained within the ASCONF chunk. */
+-	while (chunk_len > 0) {
++	sctp_walk_params(param, addip, addip_hdr.params) {
++		/* Skip preceeding address parameters. */
++		if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
++		    param.p->type == SCTP_PARAM_IPV6_ADDRESS)
++			continue;
++
+ 		err_code = sctp_process_asconf_param(asoc, asconf,
+-						     asconf_param);
++						     param.addip);
+ 		/* ADDIP 4.1 A7)
+ 		 * If an error response is received for a TLV parameter,
+ 		 * all TLVs with no response before the failed TLV are
+@@ -3167,28 +3186,20 @@ struct sctp_chunk *sctp_process_asconf(s
+ 		 * the failed response are considered unsuccessful unless
+ 		 * a specific success indication is present for the parameter.
+ 		 */
+-		if (SCTP_ERROR_NO_ERROR != err_code)
+-			all_param_pass = 0;
+-
++		if (err_code != SCTP_ERROR_NO_ERROR)
++			all_param_pass = false;
+ 		if (!all_param_pass)
+-			sctp_add_asconf_response(asconf_ack,
+-						 asconf_param->crr_id, err_code,
+-						 asconf_param);
++			sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
++						 err_code, param.addip);
+ 
+ 		/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
+ 		 * an IP address sends an 'Out of Resource' in its response, it
+ 		 * MUST also fail any subsequent add or delete requests bundled
+ 		 * in the ASCONF.
+ 		 */
+-		if (SCTP_ERROR_RSRC_LOW == err_code)
++		if (err_code == SCTP_ERROR_RSRC_LOW)
+ 			goto done;
+-
+-		/* Move to the next ASCONF param. */
+-		length = ntohs(asconf_param->param_hdr.length);
+-		asconf_param = (void *)asconf_param + length;
+-		chunk_len -= length;
+ 	}
+-
+ done:
+ 	asoc->peer.addip_serial++;
+ 
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -3516,9 +3516,7 @@ sctp_disposition_t sctp_sf_do_asconf(con
+ 	struct sctp_chunk	*asconf_ack = NULL;
+ 	struct sctp_paramhdr	*err_param = NULL;
+ 	sctp_addiphdr_t		*hdr;
+-	union sctp_addr_param	*addr_param;
+ 	__u32			serial;
+-	int			length;
+ 
+ 	if (!sctp_vtag_verify(chunk, asoc)) {
+ 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+@@ -3543,17 +3541,8 @@ sctp_disposition_t sctp_sf_do_asconf(con
+ 	hdr = (sctp_addiphdr_t *)chunk->skb->data;
+ 	serial = ntohl(hdr->serial);
+ 
+-	addr_param = (union sctp_addr_param *)hdr->params;
+-	length = ntohs(addr_param->p.length);
+-	if (length < sizeof(sctp_paramhdr_t))
+-		return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+-			   (void *)addr_param, commands);
+-
+ 	/* Verify the ASCONF chunk before processing it. */
+-	if (!sctp_verify_asconf(asoc,
+-			    (sctp_paramhdr_t *)((void *)addr_param + length),
+-			    (void *)chunk->chunk_end,
+-			    &err_param))
++	if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
+ 		return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+ 						  (void *)err_param, commands);
+ 
+@@ -3670,10 +3659,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack
+ 	rcvd_serial = ntohl(addip_hdr->serial);
+ 
+ 	/* Verify the ASCONF-ACK chunk before processing it. */
+-	if (!sctp_verify_asconf(asoc,
+-	    (sctp_paramhdr_t *)addip_hdr->params,
+-	    (void *)asconf_ack->chunk_end,
+-	    &err_param))
++	if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
+ 		return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+ 			   (void *)err_param, commands);
+ 

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,137 @@
+From: Nadav Amit <namit at cs.technion.ac.il>
+Date: Tue, 16 Sep 2014 03:24:05 +0300
+Subject: KVM: x86: Check non-canonical addresses upon WRMSR
+Origin: https://git.kernel.org/linus/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
+
+Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
+written to certain MSRs. The behavior is "almost" identical for AMD and Intel
+(ignoring MSRs that are not implemented in either architecture since they would
+anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
+non-canonical address is written on Intel but not on AMD (which ignores the top
+32-bits).
+
+Accordingly, this patch injects a #GP on the MSRs which behave identically on
+Intel and AMD.  To eliminate the differences between the architecutres, the
+value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
+canonical value before writing instead of injecting a #GP.
+
+Some references from Intel and AMD manuals:
+
+According to Intel SDM description of WRMSR instruction #GP is expected on
+WRMSR "If the source register contains a non-canonical address and ECX
+specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
+IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
+
+According to AMD manual instruction manual:
+LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
+LSTAR and CSTAR registers.  If an RIP written by WRMSR is not in canonical
+form, a general-protection exception (#GP) occurs."
+IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
+base field must be in canonical form or a #GP fault will occur."
+IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
+be in canonical form."
+
+This patch fixes CVE-2014-3610.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Nadav Amit <namit at cs.technion.ac.il>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - The various set_msr() functions all separate msr_index and data parameters]
+---
+ arch/x86/include/asm/kvm_host.h | 14 ++++++++++++++
+ arch/x86/kvm/svm.c              |  2 +-
+ arch/x86/kvm/vmx.c              |  2 +-
+ arch/x86/kvm/x86.c              | 27 ++++++++++++++++++++++++++-
+ 4 files changed, 42 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -821,6 +821,20 @@ static inline void kvm_inject_gp(struct
+ 	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+ }
+ 
++static inline u64 get_canonical(u64 la)
++{
++	return ((int64_t)la << 16) >> 16;
++}
++
++static inline bool is_noncanonical_address(u64 la)
++{
++#ifdef CONFIG_X86_64
++	return get_canonical(la) != la;
++#else
++	return false;
++#endif
++}
++
+ #define TSS_IOPB_BASE_OFFSET 0x66
+ #define TSS_BASE_SIZE 0x68
+ #define TSS_IOPB_SIZE (65536 / 8)
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3109,7 +3109,7 @@ static int wrmsr_interception(struct vcp
+ 
+ 
+ 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
+-	if (svm_set_msr(&svm->vcpu, ecx, data)) {
++	if (kvm_set_msr(&svm->vcpu, ecx, data)) {
+ 		trace_kvm_msr_write_ex(ecx, data);
+ 		kvm_inject_gp(&svm->vcpu, 0);
+ 	} else {
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4548,7 +4548,7 @@ static int handle_wrmsr(struct kvm_vcpu
+ 	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
+ 		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+ 
+-	if (vmx_set_msr(vcpu, ecx, data) != 0) {
++	if (kvm_set_msr(vcpu, ecx, data) != 0) {
+ 		trace_kvm_msr_write_ex(ecx, data);
+ 		kvm_inject_gp(vcpu, 0);
+ 		return 1;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -893,7 +893,6 @@ void kvm_enable_efer_bits(u64 mask)
+ }
+ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+ 
+-
+ /*
+  * Writes msr value into into the appropriate "register".
+  * Returns 0 on success, non-0 otherwise.
+@@ -901,8 +900,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+  */
+ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+ {
++	switch (msr_index) {
++	case MSR_FS_BASE:
++	case MSR_GS_BASE:
++	case MSR_KERNEL_GS_BASE:
++	case MSR_CSTAR:
++	case MSR_LSTAR:
++		if (is_noncanonical_address(data))
++			return 1;
++		break;
++	case MSR_IA32_SYSENTER_EIP:
++	case MSR_IA32_SYSENTER_ESP:
++		/*
++		 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
++		 * non-canonical address is written on Intel but not on
++		 * AMD (which ignores the top 32-bits, because it does
++		 * not implement 64-bit SYSENTER).
++		 *
++		 * 64-bit code should hence be able to write a non-canonical
++		 * value on AMD.  Making the address canonical ensures that
++		 * vmentry does not fail on Intel after writing a non-canonical
++		 * value, and that something deterministic happens if the guest
++		 * invokes 64-bit SYSENTER.
++		 */
++		data = get_canonical(data);
++	}
+ 	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
+ }
++EXPORT_SYMBOL_GPL(kvm_set_msr);
+ 
+ /*
+  * Adapt set_msr() to msr_io()'s calling convention

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,232 @@
+From: Nadav Amit <namit at cs.technion.ac.il>
+Date: Thu, 18 Sep 2014 22:39:38 +0300
+Subject: KVM: x86: Emulator fixes for eip canonical checks on near branches
+Origin: https://git.kernel.org/linus/234f3ce485d54017f15cf5e0699cff4100121601
+
+Before changing rip (during jmp, call, ret, etc.) the target should be asserted
+to be canonical one, as real CPUs do.  During sysret, both target rsp and rip
+should be canonical. If any of these values is noncanonical, a #GP exception
+should occur.  The exception to this rule are syscall and sysenter instructions
+in which the assigned rip is checked during the assignment to the relevant
+MSRs.
+
+This patch fixes the emulator to behave as real CPUs do for near branches.
+Far branches are handled by the next patch.
+
+This fixes CVE-2014-3647.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Nadav Amit <namit at cs.technion.ac.il>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - Use ctxt->regs[] instead of reg_read(), reg_write(), reg_rmw()]
+---
+ arch/x86/kvm/emulate.c | 78 ++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 54 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -529,7 +529,8 @@ static int emulate_nm(struct x86_emulate
+ 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
+ }
+ 
+-static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
++			       int cs_l)
+ {
+ 	switch (ctxt->op_bytes) {
+ 	case 2:
+@@ -539,16 +540,25 @@ static inline void assign_eip_near(struc
+ 		ctxt->_eip = (u32)dst;
+ 		break;
+ 	case 8:
++		if ((cs_l && is_noncanonical_address(dst)) ||
++		    (!cs_l && (dst & ~(u32)-1)))
++			return emulate_gp(ctxt, 0);
+ 		ctxt->_eip = dst;
+ 		break;
+ 	default:
+ 		WARN(1, "unsupported eip assignment size\n");
+ 	}
++	return X86EMUL_CONTINUE;
++}
++
++static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++{
++	return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
+ }
+ 
+-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
++static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+ {
+-	assign_eip_near(ctxt, ctxt->_eip + rel);
++	return assign_eip_near(ctxt, ctxt->_eip + rel);
+ }
+ 
+ static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+@@ -1787,13 +1797,15 @@ static int em_grp45(struct x86_emulate_c
+ 	case 2: /* call near abs */ {
+ 		long int old_eip;
+ 		old_eip = ctxt->_eip;
+-		ctxt->_eip = ctxt->src.val;
++		rc = assign_eip_near(ctxt, ctxt->src.val);
++		if (rc != X86EMUL_CONTINUE)
++			break;
+ 		ctxt->src.val = old_eip;
+ 		rc = em_push(ctxt);
+ 		break;
+ 	}
+ 	case 4: /* jmp abs */
+-		ctxt->_eip = ctxt->src.val;
++		rc = assign_eip_near(ctxt, ctxt->src.val);
+ 		break;
+ 	case 5: /* jmp far */
+ 		rc = em_jmp_far(ctxt);
+@@ -1825,10 +1837,14 @@ static int em_grp9(struct x86_emulate_ct
+ 
+ static int em_ret(struct x86_emulate_ctxt *ctxt)
+ {
+-	ctxt->dst.type = OP_REG;
+-	ctxt->dst.addr.reg = &ctxt->_eip;
+-	ctxt->dst.bytes = ctxt->op_bytes;
+-	return em_pop(ctxt);
++	int rc;
++	unsigned long eip;
++
++	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++
++	return assign_eip_near(ctxt, eip);
+ }
+ 
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+@@ -2060,7 +2076,7 @@ static int em_sysexit(struct x86_emulate
+ {
+ 	struct x86_emulate_ops *ops = ctxt->ops;
+ 	struct desc_struct cs, ss;
+-	u64 msr_data;
++	u64 msr_data, rcx, rdx;
+ 	int usermode;
+ 	u16 cs_sel = 0, ss_sel = 0;
+ 
+@@ -2076,6 +2092,9 @@ static int em_sysexit(struct x86_emulate
+ 	else
+ 		usermode = X86EMUL_MODE_PROT32;
+ 
++	rcx = ctxt->regs[VCPU_REGS_RCX];
++	rdx = ctxt->regs[VCPU_REGS_RDX];
++
+ 	cs.dpl = 3;
+ 	ss.dpl = 3;
+ 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+@@ -2093,6 +2112,9 @@ static int em_sysexit(struct x86_emulate
+ 		ss_sel = cs_sel + 8;
+ 		cs.d = 0;
+ 		cs.l = 1;
++		if (is_noncanonical_address(rcx) ||
++		    is_noncanonical_address(rdx))
++			return emulate_gp(ctxt, 0);
+ 		break;
+ 	}
+ 	cs_sel |= SELECTOR_RPL_MASK;
+@@ -2101,8 +2123,8 @@ static int em_sysexit(struct x86_emulate
+ 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+ 
+-	ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
+-	ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
++	ctxt->_eip = rdx;
++	ctxt->regs[VCPU_REGS_RSP] = rcx;
+ 
+ 	return X86EMUL_CONTINUE;
+ }
+@@ -2555,10 +2577,13 @@ static int em_das(struct x86_emulate_ctx
+ 
+ static int em_call(struct x86_emulate_ctxt *ctxt)
+ {
++	int rc;
+ 	long rel = ctxt->src.val;
+ 
+ 	ctxt->src.val = (unsigned long)ctxt->_eip;
+-	jmp_rel(ctxt, rel);
++	rc = jmp_rel(ctxt, rel);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
+ 	return em_push(ctxt);
+ }
+ 
+@@ -2590,11 +2615,12 @@ static int em_call_far(struct x86_emulat
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
++	unsigned long eip;
+ 
+-	ctxt->dst.type = OP_REG;
+-	ctxt->dst.addr.reg = &ctxt->_eip;
+-	ctxt->dst.bytes = ctxt->op_bytes;
+-	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
++	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++	rc = assign_eip_near(ctxt, eip);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
+@@ -2840,20 +2866,24 @@ static int em_lmsw(struct x86_emulate_ct
+ 
+ static int em_loop(struct x86_emulate_ctxt *ctxt)
+ {
++	int rc = X86EMUL_CONTINUE;
++
+ 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
+ 	if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
+ 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
+-		jmp_rel(ctxt, ctxt->src.val);
++		rc = jmp_rel(ctxt, ctxt->src.val);
+ 
+-	return X86EMUL_CONTINUE;
++	return rc;
+ }
+ 
+ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
+ {
++	int rc = X86EMUL_CONTINUE;
++
+ 	if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
+-		jmp_rel(ctxt, ctxt->src.val);
++		rc = jmp_rel(ctxt, ctxt->src.val);
+ 
+-	return X86EMUL_CONTINUE;
++	return rc;
+ }
+ 
+ static int em_cli(struct x86_emulate_ctxt *ctxt)
+@@ -3946,7 +3976,7 @@ special_insn:
+ 		break;
+ 	case 0x70 ... 0x7f: /* jcc (short) */
+ 		if (test_cc(ctxt->b, ctxt->eflags))
+-			jmp_rel(ctxt, ctxt->src.val);
++			rc = jmp_rel(ctxt, ctxt->src.val);
+ 		break;
+ 	case 0x8d: /* lea r16/r32, m */
+ 		ctxt->dst.val = ctxt->src.addr.mem.ea;
+@@ -3994,7 +4024,7 @@ special_insn:
+ 		goto do_io_out;
+ 	case 0xe9: /* jmp rel */
+ 	case 0xeb: /* jmp rel short */
+-		jmp_rel(ctxt, ctxt->src.val);
++		rc = jmp_rel(ctxt, ctxt->src.val);
+ 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
+ 		break;
+ 	case 0xec: /* in al,dx */
+@@ -4160,7 +4190,7 @@ twobyte_insn:
+ 		break;
+ 	case 0x80 ... 0x8f: /* jnz rel, etc*/
+ 		if (test_cc(ctxt->b, ctxt->eflags))
+-			jmp_rel(ctxt, ctxt->src.val);
++			rc = jmp_rel(ctxt, ctxt->src.val);
+ 		break;
+ 	case 0x90 ... 0x9f:     /* setcc r/m8 */
+ 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,60 @@
+From: Nadav Amit <namit at cs.technion.ac.il>
+Date: Thu, 18 Sep 2014 22:39:37 +0300
+Subject: KVM: x86: Fix wrong masking on relative jump/call
+Origin: https://git.kernel.org/linus/05c83ec9b73c8124555b706f6af777b10adf0862
+
+Relative jumps and calls do the masking according to the operand size, and not
+according to the address size as the KVM emulator does today.
+
+This patch fixes KVM behavior.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Nadav Amit <namit at cs.technion.ac.il>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ arch/x86/kvm/emulate.c | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -456,11 +456,6 @@ register_address_increment(struct x86_em
+ 		*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
+ }
+ 
+-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+-{
+-	register_address_increment(ctxt, &ctxt->_eip, rel);
+-}
+-
+ static u32 desc_limit_scaled(struct desc_struct *desc)
+ {
+ 	u32 limit = get_desc_limit(desc);
+@@ -534,6 +529,28 @@ static int emulate_nm(struct x86_emulate
+ 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
+ }
+ 
++static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++{
++	switch (ctxt->op_bytes) {
++	case 2:
++		ctxt->_eip = (u16)dst;
++		break;
++	case 4:
++		ctxt->_eip = (u32)dst;
++		break;
++	case 8:
++		ctxt->_eip = dst;
++		break;
++	default:
++		WARN(1, "unsupported eip assignment size\n");
++	}
++}
++
++static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
++{
++	assign_eip_near(ctxt, ctxt->_eip + rel);
++}
++
+ static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+ {
+ 	u16 selector;

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,244 @@
+From: Nadav Amit <namit at cs.technion.ac.il>
+Date: Thu, 18 Sep 2014 22:39:39 +0300
+Subject: KVM: x86: Handle errors when RIP is set during far jumps
+Origin: https://git.kernel.org/linus/d1442d85cc30ea75f7d399474ca738e0bc96f715
+
+Far jmp/call/ret may fault while loading a new RIP.  Currently KVM does not
+handle this case, and may result in failed vm-entry once the assignment is
+done.  The tricky part of doing so is that loading the new CS affects the
+VMCS/VMCB state, so if we fail during loading the new RIP, we are left in
+unconsistent state.  Therefore, this patch saves on 64-bit the old CS
+descriptor and restores it if loading RIP failed.
+
+This fixes CVE-2014-3647.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Nadav Amit <namit at cs.technion.ac.il>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - __load_segment_descriptor() does not take an in_task_switch parameter]
+---
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -1234,7 +1234,8 @@ static int write_segment_descriptor(stru
+ 
+ /* Does not support long mode */
+ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+-				     u16 selector, int seg, u8 cpl)
++				     u16 selector, int seg, u8 cpl,
++				     struct desc_struct *desc)
+ {
+ 	struct desc_struct seg_desc;
+ 	u8 dpl, rpl;
+@@ -1342,6 +1343,8 @@ static int __load_segment_descriptor(str
+ 	}
+ load:
+ 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
++	if (desc)
++		*desc = seg_desc;
+ 	return X86EMUL_CONTINUE;
+ exception:
+ 	emulate_exception(ctxt, err_vec, err_code, true);
+@@ -1352,7 +1355,7 @@ static int load_segment_descriptor(struc
+ 				   u16 selector, int seg)
+ {
+ 	u8 cpl = ctxt->ops->cpl(ctxt);
+-	return __load_segment_descriptor(ctxt, selector, seg, cpl);
++	return __load_segment_descriptor(ctxt, selector, seg, cpl, NULL);
+ }
+ 
+ static void write_register_operand(struct operand *op)
+@@ -1694,17 +1697,31 @@ static int em_iret(struct x86_emulate_ct
+ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
+-	unsigned short sel;
++	unsigned short sel, old_sel;
++	struct desc_struct old_desc, new_desc;
++	const struct x86_emulate_ops *ops = ctxt->ops;
++	u8 cpl = ctxt->ops->cpl(ctxt);
++
++	/* Assignment of RIP may only fail in 64-bit mode */
++	if (ctxt->mode == X86EMUL_MODE_PROT64)
++		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
++				 VCPU_SREG_CS);
+ 
+ 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+ 
+-	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
++	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
++				       &new_desc);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+-	ctxt->_eip = 0;
+-	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
+-	return X86EMUL_CONTINUE;
++	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++	if (rc != X86EMUL_CONTINUE) {
++		WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++		/* assigning eip failed; restore the old cs */
++		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
++		return rc;
++	}
++	return rc;
+ }
+ 
+ static int em_grp1a(struct x86_emulate_ctxt *ctxt)
+@@ -1856,21 +1873,34 @@ static int em_ret(struct x86_emulate_ctx
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
+-	unsigned long cs;
++	unsigned long eip, cs;
++	u16 old_cs;
+ 	int cpl = ctxt->ops->cpl(ctxt);
++	struct desc_struct old_desc, new_desc;
++	const struct x86_emulate_ops *ops = ctxt->ops;
++
++	if (ctxt->mode == X86EMUL_MODE_PROT64)
++		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
++				 VCPU_SREG_CS);
+ 
+-	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
++	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+-	if (ctxt->op_bytes == 4)
+-		ctxt->_eip = (u32)ctxt->_eip;
+ 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 	/* Outer-privilege level return is not implemented */
+ 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+ 		return X86EMUL_UNHANDLEABLE;
+-	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
++	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0,
++				       &new_desc);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++	rc = assign_eip_far(ctxt, eip, new_desc.l);
++	if (rc != X86EMUL_CONTINUE) {
++		WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++	}
+ 	return rc;
+ }
+ 
+@@ -2248,19 +2278,24 @@ static int load_state_from_tss16(struct
+ 	 * Now load segment descriptors. If fault happenes at this stage
+ 	 * it is handled in a context of new task
+ 	 */
+-	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+@@ -2373,25 +2408,32 @@ static int load_state_from_tss32(struct
+ 	 * Now load segment descriptors. If fault happenes at this stage
+ 	 * it is handled in a context of new task
+ 	 */
+-	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
++					cpl, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl);
++	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
++					NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+@@ -2605,24 +2647,39 @@ static int em_call_far(struct x86_emulat
+ 	u16 sel, old_cs;
+ 	ulong old_eip;
+ 	int rc;
++	struct desc_struct old_desc, new_desc;
++	const struct x86_emulate_ops *ops = ctxt->ops;
++	int cpl = ctxt->ops->cpl(ctxt);
+ 
+-	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
+ 	old_eip = ctxt->_eip;
++	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
+ 
+ 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+-	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
++	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
++				       &new_desc);
++	if (rc != X86EMUL_CONTINUE)
+ 		return X86EMUL_CONTINUE;
+ 
+-	ctxt->_eip = 0;
+-	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
++	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++	if (rc != X86EMUL_CONTINUE)
++		goto fail;
+ 
+ 	ctxt->src.val = old_cs;
+ 	rc = em_push(ctxt);
+ 	if (rc != X86EMUL_CONTINUE)
+-		return rc;
++		goto fail;
+ 
+ 	ctxt->src.val = old_eip;
+-	return em_push(ctxt);
++	rc = em_push(ctxt);
++	/* If we failed, we tainted the memory, but the very least we should
++	   restore cs */
++	if (rc != X86EMUL_CONTINUE)
++		goto fail;
++	return rc;
++fail:
++	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++	return rc;
++
+ }
+ 
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,33 @@
+From: Andy Honig <ahonig at google.com>
+Date: Wed, 27 Aug 2014 14:42:54 -0700
+Subject: KVM: x86: Improve thread safety in pit
+Origin: https://git.kernel.org/linus/2febc839133280d5a5e8e1179c94ea674489dae2
+
+There's a race condition in the PIT emulation code in KVM.  In
+__kvm_migrate_pit_timer the pit_timer object is accessed without
+synchronization.  If the race condition occurs at the wrong time this
+can crash the host kernel.
+
+This fixes CVE-2014-3611.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Andrew Honig <ahonig at google.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2: adjust context]
+---
+ arch/x86/kvm/i8254.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -264,8 +264,10 @@ void __kvm_migrate_pit_timer(struct kvm_
+ 		return;
+ 
+ 	timer = &pit->pit_state.pit_timer.timer;
++	mutex_lock(&pit->pit_state.lock);
+ 	if (hrtimer_cancel(timer))
+ 		hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
++	mutex_unlock(&pit->pit_state.lock);
+ }
+ 
+ static void destroy_pit_timer(struct kvm_pit *pit)

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,55 @@
+From: Takuya Yoshikawa <yoshikawa.takuya at oss.ntt.co.jp>
+Date: Tue, 22 Nov 2011 15:18:35 +0900
+Subject: KVM: x86 emulator: Use opcode::execute for CALL
+Origin: https://git.kernel.org/linus/d4ddafcdf2201326ec9717172767cfad0ede1472
+
+CALL: E8
+
+Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya at oss.ntt.co.jp>
+Signed-off-by: Marcelo Tosatti <mtosatti at redhat.com>
+[bwh: Backported to 3.2: adjust context]
+---
+ arch/x86/kvm/emulate.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2536,6 +2536,15 @@ static int em_das(struct x86_emulate_ctx
+ 	return X86EMUL_CONTINUE;
+ }
+ 
++static int em_call(struct x86_emulate_ctxt *ctxt)
++{
++	long rel = ctxt->src.val;
++
++	ctxt->src.val = (unsigned long)ctxt->_eip;
++	jmp_rel(ctxt, rel);
++	return em_push(ctxt);
++}
++
+ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	u16 sel, old_cs;
+@@ -3271,7 +3280,7 @@ static struct opcode opcode_table[256] =
+ 	D2bvIP(SrcImmUByte | DstAcc, in,  check_perm_in),
+ 	D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
+ 	/* 0xE8 - 0xEF */
+-	D(SrcImm | Stack), D(SrcImm | ImplicitOps),
++	I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
+ 	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
+ 	D2bvIP(SrcDX | DstAcc, in,  check_perm_in),
+ 	D2bvIP(SrcAcc | DstDX, out, check_perm_out),
+@@ -3966,13 +3975,6 @@ special_insn:
+ 	case 0xe6: /* outb */
+ 	case 0xe7: /* out */
+ 		goto do_io_out;
+-	case 0xe8: /* call (near) */ {
+-		long int rel = ctxt->src.val;
+-		ctxt->src.val = (unsigned long) ctxt->_eip;
+-		jmp_rel(ctxt, rel);
+-		rc = em_push(ctxt);
+-		break;
+-	}
+ 	case 0xe9: /* jmp rel */
+ 	case 0xeb: /* jmp rel short */
+ 		jmp_rel(ctxt, ctxt->src.val);

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,162 @@
+From: Paolo Bonzini <pbonzini at redhat.com>
+Date: Thu, 15 May 2014 17:56:57 +0200
+Subject: KVM: x86: use new CS.RPL as CPL during task switch
+Origin: https://git.kernel.org/linus/2356aaeb2f58f491679dc0c38bc3f6dbe54e7ded
+
+During task switch, all of CS.DPL, CS.RPL, SS.DPL must match (in addition
+to all the other requirements) and will be the new CPL.  So far this
+worked by carefully setting the CS selector and flag before doing the
+task switch; setting CS.selector will already change the CPL.
+
+However, this will not work once we get the CPL from SS.DPL, because
+then you will have to set the full segment descriptor cache to change
+the CPL.  ctxt->ops->cpl(ctxt) will then return the old CPL during the
+task switch, and the check that SS.DPL == CPL will fail.
+
+Temporarily assume that the CPL comes from CS.RPL during task switch
+to a protected-mode task.  This is the same approach used in QEMU's
+emulation code, which (until version 2.0) manually tracks the CPL.
+
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - load_state_from_tss32() does not support VM86 mode]
+---
+ arch/x86/kvm/emulate.c | 60 +++++++++++++++++++++++++++-----------------------
+ 1 file changed, 33 insertions(+), 27 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -1233,11 +1233,11 @@ static int write_segment_descriptor(stru
+ }
+ 
+ /* Does not support long mode */
+-static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+-				   u16 selector, int seg)
++static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
++				     u16 selector, int seg, u8 cpl)
+ {
+ 	struct desc_struct seg_desc;
+-	u8 dpl, rpl, cpl;
++	u8 dpl, rpl;
+ 	unsigned err_vec = GP_VECTOR;
+ 	u32 err_code = 0;
+ 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+@@ -1286,7 +1286,6 @@ static int load_segment_descriptor(struc
+ 
+ 	rpl = selector & 3;
+ 	dpl = seg_desc.dpl;
+-	cpl = ctxt->ops->cpl(ctxt);
+ 
+ 	switch (seg) {
+ 	case VCPU_SREG_SS:
+@@ -1349,6 +1348,13 @@ exception:
+ 	return X86EMUL_PROPAGATE_FAULT;
+ }
+ 
++static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
++				   u16 selector, int seg)
++{
++	u8 cpl = ctxt->ops->cpl(ctxt);
++	return __load_segment_descriptor(ctxt, selector, seg, cpl);
++}
++
+ static void write_register_operand(struct operand *op)
+ {
+ 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
+@@ -2213,6 +2219,7 @@ static int load_state_from_tss16(struct
+ 				 struct tss_segment_16 *tss)
+ {
+ 	int ret;
++	u8 cpl;
+ 
+ 	ctxt->_eip = tss->ip;
+ 	ctxt->eflags = tss->flag | 2;
+@@ -2235,23 +2242,25 @@ static int load_state_from_tss16(struct
+ 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
+ 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
+ 
++	cpl = tss->cs & 3;
++
+ 	/*
+ 	 * Now load segment descriptors. If fault happenes at this stage
+ 	 * it is handled in a context of new task
+ 	 */
+-	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
++	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
++	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
++	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
++	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
++	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+@@ -2330,6 +2339,7 @@ static int load_state_from_tss32(struct
+ 				 struct tss_segment_32 *tss)
+ {
+ 	int ret;
++	u8 cpl;
+ 
+ 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
+ 		return emulate_gp(ctxt, 0);
+@@ -2346,7 +2356,8 @@ static int load_state_from_tss32(struct
+ 
+ 	/*
+ 	 * SDM says that segment selectors are loaded before segment
+-	 * descriptors
++	 * descriptors.  This is important because CPL checks will
++	 * use CS.RPL.
+ 	 */
+ 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
+ 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
+@@ -2356,29 +2367,31 @@ static int load_state_from_tss32(struct
+ 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
+ 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
+ 
++	cpl = tss->cs & 3;
++
+ 	/*
+ 	 * Now load segment descriptors. If fault happenes at this stage
+ 	 * it is handled in a context of new task
+ 	 */
+-	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
++	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
++	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
++	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
++	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
++	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
++	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
++	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,67 @@
+From: Petr Matousek <pmatouse at redhat.com>
+Date: Tue, 23 Sep 2014 20:22:30 +0200
+Subject: kvm: vmx: handle invvpid vm exit gracefully
+Origin: https://git.kernel.org/linus/a642fc305053cc1c6e47e4f4df327895747ab485
+
+On systems with invvpid instruction support (corresponding bit in
+IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
+causes vm exit, which is currently not handled and results in
+propagation of unknown exit to userspace.
+
+Fix this by installing an invvpid vm exit handler.
+
+This is CVE-2014-3646.
+
+Cc: stable at vger.kernel.org
+Signed-off-by: Petr Matousek <pmatouse at redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust filename
+ - Drop inapplicable change to exit reason string array]
+---
+ arch/x86/include/asm/vmx.h      | 2 ++
+ arch/x86/kvm/vmx.c              | 9 ++++++++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -280,6 +280,7 @@ enum vmcs_field {
+ #define EXIT_REASON_EPT_VIOLATION       48
+ #define EXIT_REASON_EPT_MISCONFIG       49
+ #define EXIT_REASON_INVEPT              50
++#define EXIT_REASON_INVVPID             53
+ #define EXIT_REASON_WBINVD		54
+ #define EXIT_REASON_XSETBV		55
+ 
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -5624,6 +5624,12 @@ static int handle_invept(struct kvm_vcpu
+ 	return 1;
+ }
+ 
++static int handle_invvpid(struct kvm_vcpu *vcpu)
++{
++	kvm_queue_exception(vcpu, UD_VECTOR);
++	return 1;
++}
++
+ /*
+  * The exit handlers return 1 if the exit was handled fully and guest execution
+  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
+@@ -5666,6 +5672,7 @@ static int (*kvm_vmx_exit_handlers[])(st
+ 	[EXIT_REASON_MWAIT_INSTRUCTION]	      = handle_invalid_op,
+ 	[EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
+ 	[EXIT_REASON_INVEPT]                  = handle_invept,
++	[EXIT_REASON_INVVPID]                 = handle_invvpid,
+ };
+ 
+ static const int kvm_vmx_max_exit_handlers =
+@@ -5850,7 +5857,7 @@ static bool nested_vmx_exit_handled(stru
+ 	case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ 	case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ 	case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+-	case EXIT_REASON_INVEPT:
++	case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
+ 		/*
+ 		 * VMX instructions trap unconditionally. This allows L1 to
+ 		 * emulate them for its L2 guest, i.e., allows 3-level nesting!

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/nEPT-Nested-INVEPT.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/nEPT-Nested-INVEPT.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,193 @@
+From: Nadav Har'El <nyh at il.ibm.com>
+Date: Mon, 5 Aug 2013 11:07:17 +0300
+Subject: nEPT: Nested INVEPT
+Origin: https://git.kernel.org/linus/bfd0a56b90005f8c8a004baf407ad90045c2b11e
+
+If we let L1 use EPT, we should probably also support the INVEPT instruction.
+
+In our current nested EPT implementation, when L1 changes its EPT table
+for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in
+the course of this modification already calls INVEPT. But if last level
+of shadow page is unsync not all L1's changes to EPT12 are intercepted,
+which means roots need to be synced when L1 calls INVEPT. Global INVEPT
+should not be different since roots are synced by kvm_mmu_load() each
+time EPTP02 changes.
+
+Reviewed-by: Xiao Guangrong <xiaoguangrong at linux.vnet.ibm.com>
+Signed-off-by: Nadav Har'El <nyh at il.ibm.com>
+Signed-off-by: Jun Nakajima <jun.nakajima at intel.com>
+Signed-off-by: Xinhao Xu <xinhao.xu at intel.com>
+Signed-off-by: Yang Zhang <yang.z.zhang at Intel.com>
+Signed-off-by: Gleb Natapov <gleb at redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context, filename
+ - Add definition of nested_ept_get_cr3(), added upstream by commit
+   155a97a3d7c7 ("nEPT: MMU context for nested EPT")]
+---
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -279,6 +279,7 @@ enum vmcs_field {
+ #define EXIT_REASON_APIC_ACCESS         44
+ #define EXIT_REASON_EPT_VIOLATION       48
+ #define EXIT_REASON_EPT_MISCONFIG       49
++#define EXIT_REASON_INVEPT              50
+ #define EXIT_REASON_WBINVD		54
+ #define EXIT_REASON_XSETBV		55
+ 
+@@ -397,6 +398,7 @@ enum vmcs_field {
+ #define VMX_EPT_EXTENT_INDIVIDUAL_ADDR		0
+ #define VMX_EPT_EXTENT_CONTEXT			1
+ #define VMX_EPT_EXTENT_GLOBAL			2
++#define VMX_EPT_EXTENT_SHIFT			24
+ 
+ #define VMX_EPT_EXECUTE_ONLY_BIT		(1ull)
+ #define VMX_EPT_PAGE_WALK_4_BIT			(1ull << 6)
+@@ -404,6 +406,7 @@ enum vmcs_field {
+ #define VMX_EPTP_WB_BIT				(1ull << 14)
+ #define VMX_EPT_2MB_PAGE_BIT			(1ull << 16)
+ #define VMX_EPT_1GB_PAGE_BIT			(1ull << 17)
++#define VMX_EPT_INVEPT_BIT			(1ull << 20)
+ #define VMX_EPT_EXTENT_INDIVIDUAL_BIT		(1ull << 24)
+ #define VMX_EPT_EXTENT_CONTEXT_BIT		(1ull << 25)
+ #define VMX_EPT_EXTENT_GLOBAL_BIT		(1ull << 26)
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -2869,6 +2869,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu
+ 	mmu_sync_roots(vcpu);
+ 	spin_unlock(&vcpu->kvm->mmu_lock);
+ }
++EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
+ 
+ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
+ 				  u32 access, struct x86_exception *exception)
+@@ -3131,6 +3132,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *
+ 	++vcpu->stat.tlb_flush;
+ 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
++EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
+ 
+ static void paging_new_cr3(struct kvm_vcpu *vcpu)
+ {
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -602,6 +602,7 @@ static void nested_release_page_clean(st
+ 	kvm_release_page_clean(page);
+ }
+ 
++static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
+ static u64 construct_eptp(unsigned long root_hpa);
+ static void kvm_cpu_vmxon(u64 addr);
+ static void kvm_cpu_vmxoff(void);
+@@ -1899,6 +1900,7 @@ static u32 nested_vmx_secondary_ctls_low
+ static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
+ static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
+ static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
++static u32 nested_vmx_ept_caps;
+ static __init void nested_vmx_setup_ctls_msrs(void)
+ {
+ 	/*
+@@ -5554,6 +5556,74 @@ static int handle_vmptrst(struct kvm_vcp
+ 	return 1;
+ }
+ 
++/* Emulate the INVEPT instruction */
++static int handle_invept(struct kvm_vcpu *vcpu)
++{
++	u32 vmx_instruction_info, types;
++	unsigned long type;
++	gva_t gva;
++	struct x86_exception e;
++	struct {
++		u64 eptp, gpa;
++	} operand;
++	u64 eptp_mask = ((1ull << 51) - 1) & PAGE_MASK;
++
++	if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
++	    !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
++		kvm_queue_exception(vcpu, UD_VECTOR);
++		return 1;
++	}
++
++	if (!nested_vmx_check_permission(vcpu))
++		return 1;
++
++	if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
++		kvm_queue_exception(vcpu, UD_VECTOR);
++		return 1;
++	}
++
++	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
++	type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
++
++	types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
++
++	if (!(types & (1UL << type))) {
++		nested_vmx_failValid(vcpu,
++				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++		return 1;
++	}
++
++	/* According to the Intel VMX instruction reference, the memory
++	 * operand is read even if it isn't needed (e.g., for type==global)
++	 */
++	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
++			vmx_instruction_info, &gva))
++		return 1;
++	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
++				sizeof(operand), &e)) {
++		kvm_inject_page_fault(vcpu, &e);
++		return 1;
++	}
++
++	switch (type) {
++	case VMX_EPT_EXTENT_CONTEXT:
++		if ((operand.eptp & eptp_mask) !=
++				(nested_ept_get_cr3(vcpu) & eptp_mask))
++			break;
++	case VMX_EPT_EXTENT_GLOBAL:
++		kvm_mmu_sync_roots(vcpu);
++		kvm_mmu_flush_tlb(vcpu);
++		nested_vmx_succeed(vcpu);
++		break;
++	default:
++		BUG_ON(1);
++		break;
++	}
++
++	skip_emulated_instruction(vcpu);
++	return 1;
++}
++
+ /*
+  * The exit handlers return 1 if the exit was handled fully and guest execution
+  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
+@@ -5595,6 +5665,7 @@ static int (*kvm_vmx_exit_handlers[])(st
+ 	[EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
+ 	[EXIT_REASON_MWAIT_INSTRUCTION]	      = handle_invalid_op,
+ 	[EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
++	[EXIT_REASON_INVEPT]                  = handle_invept,
+ };
+ 
+ static const int kvm_vmx_max_exit_handlers =
+@@ -5779,6 +5850,7 @@ static bool nested_vmx_exit_handled(stru
+ 	case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ 	case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ 	case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
++	case EXIT_REASON_INVEPT:
+ 		/*
+ 		 * VMX instructions trap unconditionally. This allows L1 to
+ 		 * emulate them for its L2 guest, i.e., allows 3-level nesting!
+@@ -6440,6 +6512,12 @@ static void vmx_set_supported_cpuid(u32
+ 		entry->ecx |= bit(X86_FEATURE_VMX);
+ }
+ 
++static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
++{
++	/* return the page table to be shadowed - in our case, EPT12 */
++	return get_vmcs12(vcpu)->ept_pointer;
++}
++
+ /*
+  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
+  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it

Added: dists/wheezy-security/linux/debian/patches/bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,105 @@
+From: Andy Lutomirski <luto at amacapital.net>
+Date: Wed, 8 Oct 2014 09:02:13 -0700
+Subject: x86,kvm,vmx: Preserve CR4 across VM entry
+Origin: https://git.kernel.org/linus/d974baa398f34393db76be45f7d4d04fbdbb4a0a
+
+CR4 isn't constant; at least the TSD and PCE bits can vary.
+
+TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks
+like it's correct.
+
+This adds a branch and a read from cr4 to each vm entry.  Because it is
+extremely likely that consecutive entries into the same vcpu will have
+the same host cr4 value, this fixes up the vmcs instead of restoring cr4
+after the fact.  A subsequent patch will add a kernel-wide cr4 shadow,
+reducing the overhead in the common case to just two memory reads and a
+branch.
+
+Signed-off-by: Andy Lutomirski <luto at amacapital.net>
+Acked-by: Paolo Bonzini <pbonzini at redhat.com>
+Cc: stable at vger.kernel.org
+Cc: Petr Matousek <pmatouse at redhat.com>
+Cc: Gleb Natapov <gleb at kernel.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - Add struct vcpu_vmx *vmx parameter to vmx_set_constant_host_state(), done
+   upstream in commit a547c6db4d2f ("KVM: VMX: Enable acknowledge interupt
+   on vmexit")]
+---
+ arch/x86/kvm/vmx.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -390,6 +390,7 @@ struct vcpu_vmx {
+ 		u16           fs_sel, gs_sel, ldt_sel;
+ 		int           gs_ldt_reload_needed;
+ 		int           fs_reload_needed;
++		unsigned long vmcs_host_cr4;	/* May not match real cr4 */
+ 	} host_state;
+ 	struct {
+ 		int vm86_active;
+@@ -3631,16 +3632,21 @@ static void vmx_disable_intercept_for_ms
+  * Note that host-state that does change is set elsewhere. E.g., host-state
+  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
+  */
+-static void vmx_set_constant_host_state(void)
++static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+ {
+ 	u32 low32, high32;
+ 	unsigned long tmpl;
+ 	struct desc_ptr dt;
++	unsigned long cr4;
+ 
+ 	vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS);  /* 22.2.3 */
+-	vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
+ 	vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
+ 
++	/* Save the most likely value for this task's CR4 in the VMCS. */
++	cr4 = read_cr4();
++	vmcs_writel(HOST_CR4, cr4);			/* 22.2.3, 22.2.5 */
++	vmx->host_state.vmcs_host_cr4 = cr4;
++
+ 	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
+ 	vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+ 	vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+@@ -3762,7 +3768,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
+ 
+ 	vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
+ 	vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
+-	vmx_set_constant_host_state();
++	vmx_set_constant_host_state(vmx);
+ #ifdef CONFIG_X86_64
+ 	rdmsrl(MSR_FS_BASE, a);
+ 	vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
+@@ -6176,6 +6182,7 @@ static void atomic_switch_perf_msrs(stru
+ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	unsigned long cr4;
+ 
+ 	if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
+ 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+@@ -6206,6 +6213,12 @@ static void __noclone vmx_vcpu_run(struc
+ 	if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+ 		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+ 
++	cr4 = read_cr4();
++	if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
++		vmcs_writel(HOST_CR4, cr4);
++		vmx->host_state.vmcs_host_cr4 = cr4;
++	}
++
+ 	/* When single-stepping over STI and MOV SS, we must clear the
+ 	 * corresponding interruptibility bits in the guest state. Otherwise
+ 	 * vmentry fails as it then expects bit 14 (BS) in pending debug
+@@ -6670,7 +6683,7 @@ static void prepare_vmcs02(struct kvm_vc
+ 	 * Other fields are different per CPU, and will be set later when
+ 	 * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
+ 	 */
+-	vmx_set_constant_host_state();
++	vmx_set_constant_host_state(vmx);
+ 
+ 	/*
+ 	 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before

Added: dists/wheezy-security/linux/debian/patches/debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/wheezy-security/linux/debian/patches/debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch	Fri Oct 31 16:37:44 2014	(r22011)
@@ -0,0 +1,35 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Mon, 27 Oct 2014 00:17:13 +0000
+Subject: drivers/net: Avoid ABI change for UFO/IPv6 fix
+Forwarded: not-needed
+
+Hide new header inclusion from genksyms.
+
+The functions exported from these drivers (macvtap_get_socket() and
+tun_get_socket()) don't seem to be used by anything in wheezy but
+it looks like they could be used by a backport of vhost.
+
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -15,7 +15,9 @@
+ #include <linux/cdev.h>
+ #include <linux/fs.h>
+ 
++#ifndef __GENKSYMS__
+ #include <net/ipv6.h>
++#endif
+ #include <net/net_namespace.h>
+ #include <net/rtnetlink.h>
+ #include <net/sock.h>
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -64,7 +64,9 @@
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
+ #include <linux/rcupdate.h>
++#ifndef __GENKSYMS__
+ #include <net/ipv6.h>
++#endif
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>

Modified: dists/wheezy-security/linux/debian/patches/series
==============================================================================
--- dists/wheezy-security/linux/debian/patches/series	Fri Oct 31 15:37:32 2014	(r22010)
+++ dists/wheezy-security/linux/debian/patches/series	Fri Oct 31 16:37:44 2014	(r22011)
@@ -1137,3 +1137,20 @@
 bugfix/all/libceph-add-process_one_ticket-helper.patch
 bugfix/all/libceph-do-not-hard-code-max-auth-ticket-len.patch
 debian/sp5100_tco-reject-sb8x0-chips.patch
+bugfix/all/ipv6-reuse-ip6_frag_id-from-ip6_ufo_append_data.patch
+bugfix/all/drivers-net-disable-ufo-through-virtio.patch
+bugfix/all/drivers-net-ipv6-select-ipv6-fragment-idents-for-vir.patch
+debian/drivers-net-avoid-abi-change-for-ufo-ipv6-fix.patch
+bugfix/x86/KVM-x86-Check-non-canonical-addresses-upon-WRMSR.patch
+bugfix/x86/KVM-x86-Improve-thread-safety-in-pit.patch
+bugfix/x86/nEPT-Nested-INVEPT.patch
+bugfix/x86/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch
+bugfix/x86/KVM-x86-emulator-Use-opcode-execute-for-CALL.patch
+bugfix/x86/KVM-x86-Fix-wrong-masking-on-relative-jump-call.patch
+bugfix/x86/KVM-x86-Emulator-fixes-for-eip-canonical-checks-on-n.patch
+bugfix/x86/KVM-x86-use-new-CS.RPL-as-CPL-during-task-switch.patch
+bugfix/x86/KVM-x86-Handle-errors-when-RIP-is-set-during-far-jum.patch
+bugfix/all/net-sctp-fix-skb_over_panic-when-receiving-malformed.patch
+bugfix/all/net-sctp-fix-panic-on-duplicate-ASCONF-chunks.patch
+bugfix/all/net-sctp-fix-remote-memory-pressure-from-excessive-q.patch
+bugfix/x86/x86-kvm-vmx-Preserve-CR4-across-VM-entry.patch



More information about the Kernel-svn-changes mailing list