[kernel] r22845 - dists/jessie-security/linux/debian/patches/bugfix/x86

Ben Hutchings benh at moszumanska.debian.org
Wed Jul 22 21:23:23 UTC 2015


Author: benh
Date: Wed Jul 22 21:23:22 2015
New Revision: 22845

Log:
Changes uploaded as 3.16.7-ckt11-1+deb8u1

Added:
   dists/jessie-security/linux/debian/patches/bugfix/x86/revert-perf-x86-Further-optimize-copy_from_user_nmi.patch

Added: dists/jessie-security/linux/debian/patches/bugfix/x86/revert-perf-x86-Further-optimize-copy_from_user_nmi.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/jessie-security/linux/debian/patches/bugfix/x86/revert-perf-x86-Further-optimize-copy_from_user_nmi.patch	Wed Jul 22 21:23:22 2015	(r22845)
@@ -0,0 +1,144 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Thu, 16 Jul 2015 20:15:45 +0100
+Subject: Revert "perf/x86: Further optimize copy_from_user_nmi()"
+
+This reverts commit e00b12e64be9a34ef071de7b6052ca9ea29dd460, as
+mitigation against CVE-2015-3290.
+
+Conflicts:
+	arch/x86/lib/usercopy.c
+	arch/x86/mm/fault.c
+---
+ arch/x86/lib/usercopy.c | 43 ++++++++++++++++++++++++++++---------------
+ arch/x86/mm/fault.c     | 39 +++++++++++++++++++--------------------
+ 2 files changed, 47 insertions(+), 35 deletions(-)
+
+--- a/arch/x86/lib/usercopy.c
++++ b/arch/x86/lib/usercopy.c
+@@ -11,26 +11,39 @@
+ #include <linux/sched.h>
+ 
+ /*
+- * We rely on the nested NMI work to allow atomic faults from the NMI path; the
+- * nested NMI paths are careful to preserve CR2.
++ * best effort, GUP based copy_from_user() that is NMI-safe
+  */
+ unsigned long
+ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
+ {
+-	unsigned long ret;
++	unsigned long offset, addr = (unsigned long)from;
++	unsigned long size, len = 0;
++	struct page *page;
++	void *map;
++	int ret;
+ 
+ 	if (__range_not_ok(from, n, TASK_SIZE))
+-		return 0;
++		return len;
+ 
+-	/*
+-	 * Even though this function is typically called from NMI/IRQ context
+-	 * disable pagefaults so that its behaviour is consistent even when
+-	 * called form other contexts.
+-	 */
+-	pagefault_disable();
+-	ret = __copy_from_user_inatomic(to, from, n);
+-	pagefault_enable();
++	do {
++		ret = __get_user_pages_fast(addr, 1, 0, &page);
++		if (!ret)
++			break;
+ 
+-	return ret;
++		offset = addr & (PAGE_SIZE - 1);
++		size = min(PAGE_SIZE - offset, n - len);
++
++		map = kmap_atomic(page);
++		memcpy(to, map+offset, size);
++		kunmap_atomic(map);
++		put_page(page);
++
++		len  += size;
++		to   += size;
++		addr += size;
++
++	} while (len < n);
++
++	return n - len;
+ }
+ EXPORT_SYMBOL_GPL(copy_from_user_nmi);
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -55,7 +55,7 @@ kmmio_fault(struct pt_regs *regs, unsign
+ 	return 0;
+ }
+ 
+-static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
++static nokprobe_inline int notify_page_fault(struct pt_regs *regs)
+ {
+ 	int ret = 0;
+ 
+@@ -1080,7 +1080,7 @@ __do_page_fault(struct pt_regs *regs, un
+ 			return;
+ 
+ 		/* kprobes don't want to hook the spurious faults: */
+-		if (kprobes_fault(regs))
++		if (notify_page_fault(regs))
+ 			return;
+ 		/*
+ 		 * Don't take the mm semaphore here. If we fixup a prefetch
+@@ -1092,26 +1092,8 @@ __do_page_fault(struct pt_regs *regs, un
+ 	}
+ 
+ 	/* kprobes don't want to hook the spurious faults: */
+-	if (unlikely(kprobes_fault(regs)))
++	if (unlikely(notify_page_fault(regs)))
+ 		return;
+-
+-	if (unlikely(error_code & PF_RSVD))
+-		pgtable_bad(regs, error_code, address);
+-
+-	if (unlikely(smap_violation(error_code, regs))) {
+-		bad_area_nosemaphore(regs, error_code, address);
+-		return;
+-	}
+-
+-	/*
+-	 * If we're in an interrupt, have no user context or are running
+-	 * in an atomic region then we must not take the fault:
+-	 */
+-	if (unlikely(in_atomic() || !mm)) {
+-		bad_area_nosemaphore(regs, error_code, address);
+-		return;
+-	}
+-
+ 	/*
+ 	 * It's safe to allow irq's after cr2 has been saved and the
+ 	 * vmalloc fault has been handled.
+@@ -1128,8 +1110,25 @@ __do_page_fault(struct pt_regs *regs, un
+ 			local_irq_enable();
+ 	}
+ 
++	if (unlikely(error_code & PF_RSVD))
++		pgtable_bad(regs, error_code, address);
++
++	if (unlikely(smap_violation(error_code, regs))) {
++		bad_area_nosemaphore(regs, error_code, address);
++		return;
++	}
++
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ 
++	/*
++	 * If we're in an interrupt, have no user context or are running
++	 * in an atomic region then we must not take the fault:
++	 */
++	if (unlikely(in_atomic() || !mm)) {
++		bad_area_nosemaphore(regs, error_code, address);
++		return;
++	}
++
+ 	if (error_code & PF_WRITE)
+ 		flags |= FAULT_FLAG_WRITE;
+ 



More information about the Kernel-svn-changes mailing list