[kernel] r16173 - in dists/sid/linux-2.6/debian: . patches/features/all/xen patches/series
Ian Campbell
ijc-guest at alioth.debian.org
Sun Aug 22 16:44:13 UTC 2010
Author: ijc-guest
Date: Sun Aug 22 16:44:06 2010
New Revision: 16173
Log:
remove stack guard patches from xen flavour for the time being
Added:
dists/sid/linux-2.6/debian/patches/features/all/xen/revert-stack-guard.patch
dists/sid/linux-2.6/debian/patches/series/21-extra
Modified:
dists/sid/linux-2.6/debian/changelog
Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog Sat Aug 21 07:59:17 2010 (r16172)
+++ dists/sid/linux-2.6/debian/changelog Sun Aug 22 16:44:06 2010 (r16173)
@@ -23,6 +23,7 @@
[ Ian Campbell ]
* xen: backport pvhvm drivers from upstream.
+ * xen: temporarily remove stack guard page, it breaks the xen toolstack.
-- Ben Hutchings <ben at decadent.org.uk> Thu, 12 Aug 2010 23:20:55 +0100
Added: dists/sid/linux-2.6/debian/patches/features/all/xen/revert-stack-guard.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/xen/revert-stack-guard.patch Sun Aug 22 16:44:06 2010 (r16173)
@@ -0,0 +1,114 @@
+Revert following patches (which were part of 2.6.32.19 and .20). The
+stack guard functionality breaks mlocking the stack which impacts Xen
+toolstack operation.
+
+e4599a4a45259b9cfb0942d36f6f35f3dca1d893 mm: fix up some user-visible effects of the stack guard page
+058daedc8311ab42702dfe29d3ff16dff7e7eaf8 mm: fix page table unmap for stack guard page properly
+ab832422673d1774c4ce3941f2ac87743d73bded mm: fix missing page table unmap for stack guard page failure case
+7e281afe24330aeea86113ac241eabdac8ba2311 mm: keep a guard page below a grow-down stack segment
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 899145d..366b101 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -206,7 +206,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
+ int flags = vma->vm_flags;
+ unsigned long ino = 0;
+ unsigned long long pgoff = 0;
+- unsigned long start;
+ dev_t dev = 0;
+ int len;
+
+@@ -217,13 +216,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
+ pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+ }
+
+- /* We don't show the stack guard page in /proc/maps */
+- start = vma->vm_start;
+- if (vma->vm_flags & VM_GROWSDOWN)
+- start += PAGE_SIZE;
+-
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+- start,
++ vma->vm_start,
+ vma->vm_end,
+ flags & VM_READ ? 'r' : '-',
+ flags & VM_WRITE ? 'w' : '-',
+diff --git a/mm/memory.c b/mm/memory.c
+index ae85b20..ca859b7 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2668,26 +2668,6 @@ out_release:
+ }
+
+ /*
+- * This is like a special single-page "expand_downwards()",
+- * except we must first make sure that 'address-PAGE_SIZE'
+- * doesn't hit another vma.
+- *
+- * The "find_vma()" will do the right thing even if we wrap
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+- address &= PAGE_MASK;
+- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+- address -= PAGE_SIZE;
+- if (find_vma(vma->vm_mm, address) != vma)
+- return -ENOMEM;
+-
+- expand_stack(vma, address);
+- }
+- return 0;
+-}
+-
+-/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2700,23 +2680,19 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ spinlock_t *ptl;
+ pte_t entry;
+
+- pte_unmap(page_table);
+-
+- /* Check if we need to add a guard page to the stack */
+- if (check_stack_guard_page(vma, address) < 0)
+- return VM_FAULT_SIGBUS;
+-
+- /* Use the zero-page for reads */
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
+- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
+ if (!pte_none(*page_table))
+ goto unlock;
+ goto setpte;
+ }
+
+ /* Allocate our own private page. */
++ pte_unmap(page_table);
++
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_zeroed_user_highpage_movable(vma, address);
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 524d2a4..2e05c97 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -170,14 +170,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
+ if (vma->vm_flags & VM_WRITE)
+ gup_flags |= FOLL_WRITE;
+
+- /* We don't try to access the guard page of a stack vma */
+- if (vma->vm_flags & VM_GROWSDOWN) {
+- if (start == vma->vm_start) {
+- start += PAGE_SIZE;
+- nr_pages--;
+- }
+- }
+-
+ while (nr_pages > 0) {
+ int i;
+
Added: dists/sid/linux-2.6/debian/patches/series/21-extra
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/series/21-extra Sun Aug 22 16:44:06 2010 (r16173)
@@ -0,0 +1 @@
++ features/all/xen/revert-stack-guard.patch featureset=xen
More information about the Kernel-svn-changes
mailing list