[kernel] r4960 - patch-tracking

Moritz Muehlenhoff jmm-guest at costa.debian.org
Fri Dec 2 23:59:18 UTC 2005


Author: jmm-guest
Date: Fri Dec  2 23:59:17 2005
New Revision: 4960

Added:
   patch-tracking/CVE-2005-1265.patch
Log:
patch for CVE-2005-1265


Added: patch-tracking/CVE-2005-1265.patch
==============================================================================
--- (empty file)
+++ patch-tracking/CVE-2005-1265.patch	Fri Dec  2 23:59:17 2005
@@ -0,0 +1,98 @@
+diff -urN x/include/linux/err.h y/include/linux/err.h
+--- x/include/linux/err.h	2004-08-24 17:19:18.000000000 +1000
++++ y/include/linux/err.h	2005-05-20 18:38:34.000000000 +1000
+@@ -11,6 +11,89 @@
+  * This should be a per-architecture thing, to allow different
+  * error and pointer decisions.
+  */
++#define IS_ERR_VALUE(x) ((x) > (unsigned long)-1000L)
++
+ static inline void *ERR_PTR(long error)
+ {
+ 	return (void *) error;
+@@ -23,7 +25,79 @@
+ 
+ static inline long IS_ERR(const void *ptr)
+ {
+-	return (unsigned long)ptr > (unsigned long)-1000L;
++	return IS_ERR_VALUE((unsigned long)ptr);
+ }
+ 
+ #endif /* _LINUX_ERR_H */
+diff -urN x/mm/mmap.c y/mm/mmap.c
+--- x/mm/mmap.c	2005-05-19 20:54:12.000000000 +1000
++++ y/mm/mmap.c	2005-05-20 18:39:23.000000000 +1000
+@@ -1076,37 +1076,40 @@
+ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+ 		unsigned long pgoff, unsigned long flags)
+ {
+-	if (flags & MAP_FIXED) {
+-		unsigned long ret;
++	unsigned long ret;
+ 
+-		if (addr > TASK_SIZE - len)
+-			return -ENOMEM;
+-		if (addr & ~PAGE_MASK)
+-			return -EINVAL;
+-		if (file && is_file_hugepages(file))  {
+-			/*
+-			 * Check if the given range is hugepage aligned, and
+-			 * can be made suitable for hugepages.
+-			 */
+-			ret = prepare_hugepage_range(addr, len);
+-		} else {
+-			/*
+-			 * Ensure that a normal request is not falling in a
+-			 * reserved hugepage range.  For some archs like IA-64,
+-			 * there is a separate region for hugepages.
+-			 */
+-			ret = is_hugepage_only_range(addr, len);
+-		}
+-		if (ret)
+-			return -EINVAL;
+-		return addr;
+-	}
++	if (!(flags & MAP_FIXED)) {
++		unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+ 
+-	if (file && file->f_op && file->f_op->get_unmapped_area)
+-		return file->f_op->get_unmapped_area(file, addr, len,
+-						pgoff, flags);
++		get_area = arch_get_unmapped_area;
++		if (file && file->f_op && file->f_op->get_unmapped_area)
++			get_area = file->f_op->get_unmapped_area;
++		addr = get_area(file, addr, len, pgoff, flags);
++		if (IS_ERR_VALUE(addr))
++			return addr;
++	}
+ 
+-	return arch_get_unmapped_area(file, addr, len, pgoff, flags);
++	if (addr > TASK_SIZE - len)
++		return -ENOMEM;
++	if (addr & ~PAGE_MASK)
++		return -EINVAL;
++	if (file && is_file_hugepages(file))  {
++		/*
++		 * Check if the given range is hugepage aligned, and
++		 * can be made suitable for hugepages.
++		 */
++		ret = prepare_hugepage_range(addr, len);
++	} else {
++		/*
++		 * Ensure that a normal request is not falling in a
++		 * reserved hugepage range.  For some archs like IA-64,
++		 * there is a separate region for hugepages.
++		 */
++		ret = is_hugepage_only_range(addr, len);
++	}
++	if (ret)
++		return -EINVAL;
++	return addr;
+ }
+ 
+ EXPORT_SYMBOL(get_unmapped_area);
+
+
+
+
+



More information about the Kernel-svn-changes mailing list