[Pkg-xen-changes] r1123 - in branches/wheezy/xen/debian: . patches

Bastian Blank waldi at alioth.debian.org
Mon Dec 3 18:17:45 UTC 2012


Author: waldi
Date: Mon Dec  3 18:17:44 2012
New Revision: 1123

Log:
* debian/changelog: Update.
* debian/patches:
  Add fixes for CVE-2012-5510, CVE-2012-5511, CVE-2012-5512, CVE-2012-5513 and
  CVE-2012-5515.

Added:
   branches/wheezy/xen/debian/patches/CVE-2012-5510
   branches/wheezy/xen/debian/patches/CVE-2012-5511
   branches/wheezy/xen/debian/patches/CVE-2012-5512
   branches/wheezy/xen/debian/patches/CVE-2012-5513
   branches/wheezy/xen/debian/patches/CVE-2012-5515
Modified:
   branches/wheezy/xen/debian/changelog
   branches/wheezy/xen/debian/patches/series

Modified: branches/wheezy/xen/debian/changelog
==============================================================================
--- branches/wheezy/xen/debian/changelog	Tue Nov 20 19:47:00 2012	(r1122)
+++ branches/wheezy/xen/debian/changelog	Mon Dec  3 18:17:44 2012	(r1123)
@@ -1,3 +1,18 @@
+xen (4.1.3-5) UNRELEASED; urgency=high
+
+  * Fix state corruption due to incomplete grant table switch.
+    CVE-2012-5510
+  * Check range of arguments to several HVM operations.
+    CVE-2012-5511
+  * Check array index before using it in HVM memory operation.
+    CVE-2012-5512
+  * Check memory range in memory exchange operation.
+    CVE-2012-5513
+  * Don't allow too large memory size and avoid busy looping.
+    CVE-2012-5515
+
+ -- Bastian Blank <waldi at debian.org>  Mon, 03 Dec 2012 18:56:57 +0100
+
 xen (4.1.3-4) unstable; urgency=high
 
   * Use linux 3.2.0-4 stuff.

Added: branches/wheezy/xen/debian/patches/CVE-2012-5510
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2012-5510	Mon Dec  3 18:17:44 2012	(r1123)
@@ -0,0 +1,102 @@
+Description: gnttab: fix releasing of memory upon switches between versions
+ gnttab_unpopulate_status_frames() incompletely freed the pages
+ previously used as status frame in that they did not get removed from
+ the domain's xenpage_list, thus causing subsequent list corruption
+ when those pages did get allocated again for the same or another purpose.
+ .
+ Similarly, grant_table_create() and gnttab_grow_table() both improperly
+ clean up in the event of an error - pages already shared with the guest
+ can't be freed by just passing them to free_xenheap_page(). Fix this by
+ sharing the pages only after all allocations succeeded.
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2012-5510
+---
+--- a/xen/common/grant_table.c
++++ b/xen/common/grant_table.c
+@@ -1126,12 +1126,13 @@ fault:
+ }
+ 
+ static int
+-gnttab_populate_status_frames(struct domain *d, struct grant_table *gt)
++gnttab_populate_status_frames(struct domain *d, struct grant_table *gt,
++                              unsigned int req_nr_frames)
+ {
+     unsigned i;
+     unsigned req_status_frames;
+ 
+-    req_status_frames = grant_to_status_frames(gt->nr_grant_frames);
++    req_status_frames = grant_to_status_frames(req_nr_frames);
+     for ( i = nr_status_frames(gt); i < req_status_frames; i++ )
+     {
+         if ( (gt->status[i] = alloc_xenheap_page()) == NULL )
+@@ -1162,7 +1163,12 @@ gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt)
+ 
+     for ( i = 0; i < nr_status_frames(gt); i++ )
+     {
+-        page_set_owner(virt_to_page(gt->status[i]), dom_xen);
++        struct page_info *pg = virt_to_page(gt->status[i]);
++
++        BUG_ON(page_get_owner(pg) != d);
++        if ( test_and_clear_bit(_PGC_allocated, &pg->count_info) )
++            put_page(pg);
++        BUG_ON(pg->count_info & ~PGC_xen_heap);
+         free_xenheap_page(gt->status[i]);
+         gt->status[i] = NULL;
+     }
+@@ -1200,19 +1206,18 @@ gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
+         clear_page(gt->shared_raw[i]);
+     }
+ 
+-    /* Share the new shared frames with the recipient domain */
+-    for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
+-        gnttab_create_shared_page(d, gt, i);
+-
+-    gt->nr_grant_frames = req_nr_frames;
+-
+     /* Status pages - version 2 */
+     if (gt->gt_version > 1)
+     {
+-        if ( gnttab_populate_status_frames(d, gt) )
++        if ( gnttab_populate_status_frames(d, gt, req_nr_frames) )
+             goto shared_alloc_failed;
+     }
+ 
++    /* Share the new shared frames with the recipient domain */
++    for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
++        gnttab_create_shared_page(d, gt, i);
++    gt->nr_grant_frames = req_nr_frames;
++
+     return 1;
+ 
+ shared_alloc_failed:
+@@ -2134,7 +2139,7 @@ gnttab_set_version(XEN_GUEST_HANDLE(gnttab_set_version_t uop))
+ 
+     if ( op.version == 2 && gt->gt_version < 2 )
+     {
+-        res = gnttab_populate_status_frames(d, gt);
++        res = gnttab_populate_status_frames(d, gt, nr_grant_frames(gt));
+         if ( res < 0)
+             goto out_unlock;
+     }
+@@ -2449,9 +2454,6 @@ grant_table_create(
+         clear_page(t->shared_raw[i]);
+     }
+     
+-    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+-        gnttab_create_shared_page(d, t, i);
+-
+     /* Status pages for grant table - for version 2 */
+     t->status = xmalloc_array(grant_status_t *,
+                               grant_to_status_frames(max_nr_grant_frames));
+@@ -2459,6 +2461,10 @@ grant_table_create(
+         goto no_mem_4;
+     memset(t->status, 0,
+            grant_to_status_frames(max_nr_grant_frames) * sizeof(t->status[0]));
++
++    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
++        gnttab_create_shared_page(d, t, i);
++
+     t->nr_status_frames = 0;
+ 
+     /* Okay, install the structure. */

Added: branches/wheezy/xen/debian/patches/CVE-2012-5511
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2012-5511	Mon Dec  3 18:17:44 2012	(r1123)
@@ -0,0 +1,149 @@
+Description: hvm: Limit the size of large HVM op batches
+ Doing large p2m updates for HVMOP_track_dirty_vram without preemption
+ ties up the physical processor. Integrating preemption into the p2m
+ updates is hard so simply limit to 1GB which is sufficient for a 15000
+ * 15000 * 32bpp framebuffer.
+ x86/paging: Don't allocate user-controlled amounts of stack memory.
+From: Tim Deegan <tim at xen.org>
+From: Ian Campbell <ian.campbell at citrix.com>
+From: Jan Beulich <JBeulich at suse.com>
+Origin: upstream
+Id: CVE-2012-5511
+---
+--- a/xen/arch/x86/hvm/hvm.c	Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/arch/x86/hvm/hvm.c	Mon Nov 19 16:00:33 2012 +0000
+@@ -3471,6 +3471,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
+         if ( !is_hvm_domain(d) )
+             goto param_fail2;
+ 
++        if ( a.nr > GB(1) >> PAGE_SHIFT )
++            goto param_fail2;
++
+         rc = xsm_hvm_param(d, op);
+         if ( rc )
+             goto param_fail2;
+@@ -3498,7 +3501,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
+         struct xen_hvm_modified_memory a;
+         struct domain *d;
+         struct p2m_domain *p2m;
+-        unsigned long pfn;
+ 
+         if ( copy_from_guest(&a, arg, 1) )
+             return -EFAULT;
+@@ -3526,8 +3528,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
+             goto param_fail3;
+ 
+         p2m = p2m_get_hostp2m(d);
+-        for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
++        while ( a.nr > 0 )
+         {
++            unsigned long pfn = a.first_pfn;
+             p2m_type_t t;
+             mfn_t mfn = gfn_to_mfn(p2m, pfn, &t);
+             if ( p2m_is_paging(t) )
+@@ -3548,6 +3551,19 @@ long do_hvm_op(unsigned long op, XEN_GUE
+                 /* don't take a long time and don't die either */
+                 sh_remove_shadows(d->vcpu[0], mfn, 1, 0);
+             }
++
++            a.first_pfn++;
++            a.nr--;
++
++            /* Check for continuation if it's not the last interation */
++            if ( a.nr > 0 && hypercall_preempt_check() )
++            {
++                if ( copy_to_guest(arg, &a, 1) )
++                    rc = -EFAULT;
++                else
++                    rc = -EAGAIN;
++                break;
++            }
+         }
+ 
+     param_fail3:
+@@ -3595,7 +3611,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
+         struct xen_hvm_set_mem_type a;
+         struct domain *d;
+         struct p2m_domain *p2m;
+-        unsigned long pfn;
+         
+         /* Interface types to internal p2m types */
+         p2m_type_t memtype[] = {
+@@ -3625,8 +3640,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
+             goto param_fail4;
+ 
+         p2m = p2m_get_hostp2m(d);
+-        for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
++        while ( a.nr > 0 )
+         {
++            unsigned long pfn = a.first_pfn;
+             p2m_type_t t;
+             p2m_type_t nt;
+             mfn_t mfn;
+@@ -3662,6 +3678,19 @@ long do_hvm_op(unsigned long op, XEN_GUE
+                     goto param_fail4;
+                 }
+             }
++
++            a.first_pfn++;
++            a.nr--;
++
++            /* Check for continuation if it's not the last interation */
++            if ( a.nr > 0 && hypercall_preempt_check() )
++            {
++                if ( copy_to_guest(arg, &a, 1) )
++                    rc = -EFAULT;
++                else
++                    rc = -EAGAIN;
++                goto param_fail4;
++            }
+         }
+ 
+         rc = 0;
+--- a/xen/arch/x86/mm/paging.c	Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/arch/x86/mm/paging.c	Mon Nov 19 16:00:33 2012 +0000
+@@ -529,13 +529,18 @@ int paging_log_dirty_range(struct domain
+ 
+     if ( !d->arch.paging.log_dirty.fault_count &&
+          !d->arch.paging.log_dirty.dirty_count ) {
+-        int size = (nr + BITS_PER_LONG - 1) / BITS_PER_LONG;
+-        unsigned long zeroes[size];
+-        memset(zeroes, 0x00, size * BYTES_PER_LONG);
++        static uint8_t zeroes[PAGE_SIZE];
++        int off, size;
++
++        size = ((nr + BITS_PER_LONG - 1) / BITS_PER_LONG) * sizeof (long);
+         rv = 0;
+-        if ( copy_to_guest_offset(dirty_bitmap, 0, (uint8_t *) zeroes,
+-                                  size * BYTES_PER_LONG) != 0 )
+-            rv = -EFAULT;
++        for ( off = 0; !rv && off < size; off += sizeof zeroes )
++        {
++            int todo = min(size - off, (int) PAGE_SIZE);
++            if ( copy_to_guest_offset(dirty_bitmap, off, zeroes, todo) )
++                rv = -EFAULT;
++            off += todo;
++        }
+         goto out;
+     }
+     d->arch.paging.log_dirty.fault_count = 0;
+--- a/xen/include/asm-x86/config.h	Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/include/asm-x86/config.h	Mon Nov 19 16:00:33 2012 +0000
+@@ -108,6 +108,9 @@ extern unsigned int trampoline_xen_phys_
+ extern unsigned char trampoline_cpu_started;
+ extern char wakeup_start[];
+ extern unsigned int video_mode, video_flags;
++
++#define GB(_gb) (_gb ## UL << 30)
++
+ #endif
+ 
+ #define asmlinkage
+@@ -123,7 +126,6 @@ extern unsigned int video_mode, video_fl
+ #define PML4_ADDR(_slot)                             \
+     ((((_slot ## UL) >> 8) * 0xffff000000000000UL) | \
+      (_slot ## UL << PML4_ENTRY_BITS))
+-#define GB(_gb) (_gb ## UL << 30)
+ #else
+ #define PML4_ENTRY_BYTES (1 << PML4_ENTRY_BITS)
+ #define PML4_ADDR(_slot)                             \

Added: branches/wheezy/xen/debian/patches/CVE-2012-5512
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2012-5512	Mon Dec  3 18:17:44 2012	(r1123)
@@ -0,0 +1,30 @@
+Description: x86/HVM: range check xen_hvm_set_mem_access.hvmmem_access before use
+ Otherwise an out of bounds array access can happen if changing the
+ default access is being requested, which - if it doesn't crash Xen -
+ would subsequently allow reading arbitrary memory through
+ HVMOP_get_mem_access (again, unless that operation crashes Xen).
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2012-5512
+---
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -3699,7 +3699,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
+             return rc;
+ 
+         rc = -EINVAL;
+-        if ( !is_hvm_domain(d) )
++        if ( !is_hvm_domain(d) || a.hvmmem_access >= ARRAY_SIZE(memaccess) )
+             goto param_fail5;
+ 
+         p2m = p2m_get_hostp2m(d);
+@@ -3719,9 +3719,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
+              ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
+             goto param_fail5;
+             
+-        if ( a.hvmmem_access >= ARRAY_SIZE(memaccess) )
+-            goto param_fail5;
+-
+         for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
+         {
+             p2m_type_t t;

Added: branches/wheezy/xen/debian/patches/CVE-2012-5513
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2012-5513	Mon Dec  3 18:17:44 2012	(r1123)
@@ -0,0 +1,41 @@
+Description: xen: add missing guest address range checks to XENMEM_exchange handlers
+ Ever since its existence (3.0.3 iirc) the handler for this has been
+ using non address range checking guest memory accessors (i.e.
+ the ones prefixed with two underscores) without first range
+ checking the accessed space (via guest_handle_okay()), allowing
+ a guest to access and overwrite hypervisor memory.
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2012-5513
+---
+--- a/xen/common/compat/memory.c
++++ b/xen/common/compat/memory.c
+@@ -114,6 +114,12 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE(void) compat)
+                   (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) )
+                 return -EINVAL;
+ 
++            if ( !compat_handle_okay(cmp.xchg.in.extent_start,
++                                     cmp.xchg.in.nr_extents) ||
++                 !compat_handle_okay(cmp.xchg.out.extent_start,
++                                     cmp.xchg.out.nr_extents) )
++                return -EFAULT;
++
+             start_extent = cmp.xchg.nr_exchanged;
+             end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) /
+                          (((1U << ABS(order_delta)) + 1) *
+--- a/xen/common/memory.c
++++ b/xen/common/memory.c
+@@ -289,6 +289,13 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
+         goto fail_early;
+     }
+ 
++    if ( !guest_handle_okay(exch.in.extent_start, exch.in.nr_extents) ||
++         !guest_handle_okay(exch.out.extent_start, exch.out.nr_extents) )
++    {
++        rc = -EFAULT;
++        goto fail_early;
++    }
++
+     /* Only privileged guests can allocate multi-page contiguous extents. */
+     if ( !multipage_allocation_permitted(current->domain,
+                                          exch.in.extent_order) ||

Added: branches/wheezy/xen/debian/patches/CVE-2012-5515
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2012-5515	Mon Dec  3 18:17:44 2012	(r1123)
@@ -0,0 +1,39 @@
+Description: memop: limit guest specified extent order
+ Allowing unbounded order values here causes almost unbounded loops
+ and/or partially incomplete requests, particularly in PoD code.
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2012-5515
+---
+--- a/xen/common/memory.c
++++ b/xen/common/memory.c
+@@ -117,7 +117,8 @@ static void populate_physmap(struct memop_args *a)
+ 
+         if ( a->memflags & MEMF_populate_on_demand )
+         {
+-            if ( guest_physmap_mark_populate_on_demand(d, gpfn,
++            if ( a->extent_order > MAX_ORDER ||
++                 guest_physmap_mark_populate_on_demand(d, gpfn,
+                                                        a->extent_order) < 0 )
+                 goto out;
+         }
+@@ -216,7 +217,8 @@ static void decrease_reservation(struct memop_args *a)
+     xen_pfn_t gmfn;
+ 
+     if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
+-                                     a->nr_extents-1) )
++                                     a->nr_extents-1) ||
++         a->extent_order > MAX_ORDER )
+         return;
+ 
+     for ( i = a->nr_done; i < a->nr_extents; i++ )
+@@ -278,6 +280,9 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
+     if ( (exch.nr_exchanged > exch.in.nr_extents) ||
+          /* Input and output domain identifiers match? */
+          (exch.in.domid != exch.out.domid) ||
++         /* Extent orders are sensible? */
++         (exch.in.extent_order > MAX_ORDER) ||
++         (exch.out.extent_order > MAX_ORDER) ||
+          /* Sizes of input and output lists do not overflow a long? */
+          ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
+          ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||

Modified: branches/wheezy/xen/debian/patches/series
==============================================================================
--- branches/wheezy/xen/debian/patches/series	Tue Nov 20 19:47:00 2012	(r1122)
+++ branches/wheezy/xen/debian/patches/series	Mon Dec  3 18:17:44 2012	(r1123)
@@ -16,6 +16,11 @@
 CVE-2012-4538
 CVE-2012-4539
 CVE-2012-4544
+CVE-2012-5510
+CVE-2012-5511
+CVE-2012-5512
+CVE-2012-5513
+CVE-2012-5515
 
 xen-x86-interrupt-pointer-missmatch.diff
 



More information about the Pkg-xen-changes mailing list