[Pkg-xen-changes] r1150 - in branches/wheezy/xen/debian: . patches

Bastian Blank waldi at alioth.debian.org
Thu May 2 12:23:34 UTC 2013


Author: waldi
Date: Thu May  2 12:23:34 2013
New Revision: 1150

Log:
* debian/changelog: Update.
* debian/patches: Add fixes for CVE-2013-1918 and CVE-2013-1952.

Added:
   branches/wheezy/xen/debian/patches/CVE-2013-1918-1
   branches/wheezy/xen/debian/patches/CVE-2013-1918-2
   branches/wheezy/xen/debian/patches/CVE-2013-1918-3
   branches/wheezy/xen/debian/patches/CVE-2013-1918-4
   branches/wheezy/xen/debian/patches/CVE-2013-1918-5
   branches/wheezy/xen/debian/patches/CVE-2013-1918-6
   branches/wheezy/xen/debian/patches/CVE-2013-1918-7
   branches/wheezy/xen/debian/patches/CVE-2013-1952
Modified:
   branches/wheezy/xen/debian/changelog
   branches/wheezy/xen/debian/patches/series

Modified: branches/wheezy/xen/debian/changelog
==============================================================================
--- branches/wheezy/xen/debian/changelog	Wed Apr 24 12:58:35 2013	(r1149)
+++ branches/wheezy/xen/debian/changelog	Thu May  2 12:23:34 2013	(r1150)
@@ -1,3 +1,12 @@
+xen (4.1.4-4) UNRELEASED; urgency=low
+
+  * Make several long runing operations preemptible.
+    CVE-2013-1918
+  * Fix source validation for VT-d interrupt remapping.
+    CVE-2013-1952
+
+ -- Bastian Blank <waldi at debian.org>  Thu, 02 May 2013 14:05:30 +0200
+
 xen (4.1.4-3) unstable; urgency=high
 
   * Fix return from SYSENTER.

Added: branches/wheezy/xen/debian/patches/CVE-2013-1918-1
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2013-1918-1	Thu May  2 12:23:34 2013	(r1150)
@@ -0,0 +1,247 @@
+Description: x86: make vcpu_destroy_pagetables() preemptible
+ ... as it may take significant amounts of time.
+ .
+ The function, being moved to mm.c as the better home for it anyway, and
+ to avoid having to make a new helper function there non-static, is
+ given a "preemptible" parameter temporarily (until, in a subsequent
+ patch, its other caller is also being made capable of dealing with
+ preemption).
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2013-1918 XSA-45
+---
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+@@ -70,8 +70,6 @@ void (*dead_idle) (void) __read_mostly =
+ static void paravirt_ctxt_switch_from(struct vcpu *v);
+ static void paravirt_ctxt_switch_to(struct vcpu *v);
+ 
+-static void vcpu_destroy_pagetables(struct vcpu *v);
+-
+ static void continue_idle_domain(struct vcpu *v)
+ {
+     reset_stack_and_jump(idle_loop);
+@@ -909,7 +907,7 @@ void arch_vcpu_reset(struct vcpu *v)
+     if ( !is_hvm_vcpu(v) )
+     {
+         destroy_gdt(v);
+-        vcpu_destroy_pagetables(v);
++        vcpu_destroy_pagetables(v, 0);
+     }
+     else
+     {
+@@ -1917,63 +1915,6 @@ static int relinquish_memory(
+     return ret;
+ }
+ 
+-static void vcpu_destroy_pagetables(struct vcpu *v)
+-{
+-    struct domain *d = v->domain;
+-    unsigned long pfn;
+-
+-#ifdef __x86_64__
+-    if ( is_pv_32on64_vcpu(v) )
+-    {
+-        pfn = l4e_get_pfn(*(l4_pgentry_t *)
+-                          __va(pagetable_get_paddr(v->arch.guest_table)));
+-
+-        if ( pfn != 0 )
+-        {
+-            if ( paging_mode_refcounts(d) )
+-                put_page(mfn_to_page(pfn));
+-            else
+-                put_page_and_type(mfn_to_page(pfn));
+-        }
+-
+-        l4e_write(
+-            (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)),
+-            l4e_empty());
+-
+-        v->arch.cr3 = 0;
+-        return;
+-    }
+-#endif
+-
+-    pfn = pagetable_get_pfn(v->arch.guest_table);
+-    if ( pfn != 0 )
+-    {
+-        if ( paging_mode_refcounts(d) )
+-            put_page(mfn_to_page(pfn));
+-        else
+-            put_page_and_type(mfn_to_page(pfn));
+-        v->arch.guest_table = pagetable_null();
+-    }
+-
+-#ifdef __x86_64__
+-    /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
+-    pfn = pagetable_get_pfn(v->arch.guest_table_user);
+-    if ( pfn != 0 )
+-    {
+-        if ( !is_pv_32bit_vcpu(v) )
+-        {
+-            if ( paging_mode_refcounts(d) )
+-                put_page(mfn_to_page(pfn));
+-            else
+-                put_page_and_type(mfn_to_page(pfn));
+-        }
+-        v->arch.guest_table_user = pagetable_null();
+-    }
+-#endif
+-
+-    v->arch.cr3 = 0;
+-}
+-
+ int domain_relinquish_resources(struct domain *d)
+ {
+     int ret;
+@@ -1992,7 +1933,9 @@ int domain_relinquish_resources(struct d
+         for_each_vcpu ( d, v )
+         {
+             /* Drop the in-use references to page-table bases. */
+-            vcpu_destroy_pagetables(v);
++            ret = vcpu_destroy_pagetables(v, 1);
++            if ( ret )
++                return ret;
+ 
+             /*
+              * Relinquish GDT mappings. No need for explicit unmapping of the
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -2725,6 +2725,82 @@ static void put_superpage(unsigned long 
+ 
+ #endif
+ 
++static int put_old_guest_table(struct vcpu *v)
++{
++    int rc;
++
++    if ( !v->arch.old_guest_table )
++        return 0;
++
++    switch ( rc = put_page_and_type_preemptible(v->arch.old_guest_table, 1) )
++    {
++    case -EINTR:
++    case -EAGAIN:
++        return -EAGAIN;
++    }
++
++    v->arch.old_guest_table = NULL;
++
++    return rc;
++}
++
++int vcpu_destroy_pagetables(struct vcpu *v, bool_t preemptible)
++{
++    unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
++    struct page_info *page;
++    int rc = put_old_guest_table(v);
++
++    if ( rc )
++        return rc;
++
++#ifdef __x86_64__
++    if ( is_pv_32on64_vcpu(v) )
++        mfn = l4e_get_pfn(*(l4_pgentry_t *)mfn_to_virt(mfn));
++#endif
++
++    if ( mfn )
++    {
++        page = mfn_to_page(mfn);
++        if ( paging_mode_refcounts(v->domain) )
++            put_page(page);
++        else
++            rc = put_page_and_type_preemptible(page, preemptible);
++    }
++
++#ifdef __x86_64__
++    if ( is_pv_32on64_vcpu(v) )
++    {
++        if ( !rc )
++            l4e_write(
++                (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)),
++                l4e_empty());
++    }
++    else
++#endif
++    if ( !rc )
++    {
++        v->arch.guest_table = pagetable_null();
++
++#ifdef __x86_64__
++        /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
++        mfn = pagetable_get_pfn(v->arch.guest_table_user);
++        if ( mfn )
++        {
++            page = mfn_to_page(mfn);
++            if ( paging_mode_refcounts(v->domain) )
++                put_page(page);
++            else
++                rc = put_page_and_type_preemptible(page, preemptible);
++        }
++        if ( !rc )
++            v->arch.guest_table_user = pagetable_null();
++#endif
++    }
++
++    v->arch.cr3 = 0;
++
++    return rc;
++}
+ 
+ int new_guest_cr3(unsigned long mfn)
+ {
+@@ -2911,12 +2987,21 @@ long do_mmuext_op(
+     unsigned int foreigndom)
+ {
+     struct mmuext_op op;
+-    int rc = 0, i = 0, okay;
+     unsigned long type;
+-    unsigned int done = 0;
++    unsigned int i = 0, done = 0;
+     struct vcpu *curr = current;
+     struct domain *d = curr->domain;
+     struct domain *pg_owner;
++    int okay, rc = put_old_guest_table(curr);
++
++    if ( unlikely(rc) )
++    {
++        if ( likely(rc == -EAGAIN) )
++            rc = hypercall_create_continuation(
++                     __HYPERVISOR_mmuext_op, "hihi", uops, count, pdone,
++                     foreigndom);
++        return rc;
++    }
+ 
+     if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
+     {
+--- a/xen/arch/x86/x86_64/compat/mm.c
++++ b/xen/arch/x86/x86_64/compat/mm.c
+@@ -319,7 +319,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
+                                     : mcs->call.args[1];
+                 unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
+ 
+-                BUG_ON(left == arg1);
++                BUG_ON(left == arg1 && left != i);
+                 BUG_ON(left > count);
+                 guest_handle_add_offset(nat_ops, i - left);
+                 guest_handle_subtract_offset(cmp_uops, left);
+--- a/xen/include/asm-x86/domain.h
++++ b/xen/include/asm-x86/domain.h
+@@ -405,6 +405,7 @@ struct arch_vcpu
+     pagetable_t guest_table_user;       /* (MFN) x86/64 user-space pagetable */
+ #endif
+     pagetable_t guest_table;            /* (MFN) guest notion of cr3 */
++    struct page_info *old_guest_table;  /* partially destructed pagetable */
+     /* guest_table holds a ref to the page, and also a type-count unless
+      * shadow refcounts are in use */
+     pagetable_t shadow_table[4];        /* (MFN) shadow(s) of guest */
+--- a/xen/include/asm-x86/mm.h
++++ b/xen/include/asm-x86/mm.h
+@@ -555,6 +555,7 @@ void audit_domains(void);
+ int new_guest_cr3(unsigned long pfn);
+ void make_cr3(struct vcpu *v, unsigned long mfn);
+ void update_cr3(struct vcpu *v);
++int vcpu_destroy_pagetables(struct vcpu *, bool_t preemptible);
+ void propagate_page_fault(unsigned long addr, u16 error_code);
+ void *do_page_walk(struct vcpu *v, unsigned long addr);
+ 

Added: branches/wheezy/xen/debian/patches/CVE-2013-1918-2
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2013-1918-2	Thu May  2 12:23:34 2013	(r1150)
@@ -0,0 +1,151 @@
+Description: x86: make new_guest_cr3() preemptible
+ ... as it may take significant amounts of time.
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2013-1918 XSA-45
+---
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -2806,44 +2806,69 @@ int new_guest_cr3(unsigned long mfn)
+ {
+     struct vcpu *curr = current;
+     struct domain *d = curr->domain;
+-    int okay;
++    int rc;
+     unsigned long old_base_mfn;
+ 
+ #ifdef __x86_64__
+     if ( is_pv_32on64_domain(d) )
+     {
+-        okay = paging_mode_refcounts(d)
+-            ? 0 /* Old code was broken, but what should it be? */
+-            : mod_l4_entry(
++        rc = paging_mode_refcounts(d)
++             ? -EINVAL /* Old code was broken, but what should it be? */
++             : mod_l4_entry(
+                     __va(pagetable_get_paddr(curr->arch.guest_table)),
+                     l4e_from_pfn(
+                         mfn,
+                         (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)),
+-                    pagetable_get_pfn(curr->arch.guest_table), 0, 0, curr) == 0;
+-        if ( unlikely(!okay) )
++                    pagetable_get_pfn(curr->arch.guest_table), 0, 1, curr);
++        switch ( rc )
+         {
++        case 0:
++            break;
++        case -EINTR:
++        case -EAGAIN:
++            return -EAGAIN;
++        default:
+             MEM_LOG("Error while installing new compat baseptr %lx", mfn);
+-            return 0;
++            return rc;
+         }
+ 
+         invalidate_shadow_ldt(curr, 0);
+         write_ptbase(curr);
+ 
+-        return 1;
++        return 0;
+     }
+ #endif
+-    okay = paging_mode_refcounts(d)
+-        ? get_page_from_pagenr(mfn, d)
+-        : !get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 0);
+-    if ( unlikely(!okay) )
++    rc = put_old_guest_table(curr);
++    if ( unlikely(rc) )
++        return rc;
++
++    old_base_mfn = pagetable_get_pfn(curr->arch.guest_table);
++    /*
++     * This is particularly important when getting restarted after the
++     * previous attempt got preempted in the put-old-MFN phase.
++     */
++    if ( old_base_mfn == mfn )
+     {
+-        MEM_LOG("Error while installing new baseptr %lx", mfn);
++        write_ptbase(curr);
+         return 0;
+     }
+ 
+-    invalidate_shadow_ldt(curr, 0);
++    rc = paging_mode_refcounts(d)
++         ? (get_page_from_pagenr(mfn, d) ? 0 : -EINVAL)
++         : get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 1);
++    switch ( rc )
++    {
++    case 0:
++        break;
++    case -EINTR:
++    case -EAGAIN:
++        return -EAGAIN;
++    default:
++        MEM_LOG("Error while installing new baseptr %lx", mfn);
++        return rc;
++    }
+ 
+-    old_base_mfn = pagetable_get_pfn(curr->arch.guest_table);
++    invalidate_shadow_ldt(curr, 0);
+ 
+     curr->arch.guest_table = pagetable_from_pfn(mfn);
+     update_cr3(curr);
+@@ -2852,13 +2877,25 @@ int new_guest_cr3(unsigned long mfn)
+ 
+     if ( likely(old_base_mfn != 0) )
+     {
++        struct page_info *page = mfn_to_page(old_base_mfn);
++
+         if ( paging_mode_refcounts(d) )
+-            put_page(mfn_to_page(old_base_mfn));
++            put_page(page);
+         else
+-            put_page_and_type(mfn_to_page(old_base_mfn));
++            switch ( rc = put_page_and_type_preemptible(page, 1) )
++            {
++            case -EINTR:
++                rc = -EAGAIN;
++            case -EAGAIN:
++                curr->arch.old_guest_table = page;
++                break;
++            default:
++                BUG_ON(rc);
++                break;
++            }
+     }
+ 
+-    return 1;
++    return rc;
+ }
+ 
+ static struct domain *get_pg_owner(domid_t domid)
+@@ -3154,7 +3191,8 @@ long do_mmuext_op(
+         }
+ 
+         case MMUEXT_NEW_BASEPTR:
+-            okay = new_guest_cr3(gmfn_to_mfn(d, op.arg1.mfn));
++            rc = new_guest_cr3(gmfn_to_mfn(d, op.arg1.mfn));
++            okay = !rc;
+             break;
+         
+ #ifdef __x86_64__
+--- a/xen/arch/x86/traps.c
++++ b/xen/arch/x86/traps.c
+@@ -2317,8 +2317,15 @@ static int emulate_privileged_op(struct 
+                 rc = new_guest_cr3(gmfn_to_mfn(v->domain, compat_cr3_to_pfn(*reg)));
+ #endif
+             domain_unlock(v->domain);
+-            if ( rc == 0 ) /* not okay */
++            switch ( rc )
++            {
++            case 0:
++                break;
++            case -EAGAIN: /* retry after preemption */
++                goto skip;
++            default:      /* not okay */
+                 goto fail;
++            }
+             break;
+ 
+         case 4: /* Write CR4 */

Added: branches/wheezy/xen/debian/patches/CVE-2013-1918-3
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2013-1918-3	Thu May  2 12:23:34 2013	(r1150)
@@ -0,0 +1,70 @@
+Description: x86: make MMUEXT_NEW_USER_BASEPTR preemptible
+ ... as it may take significant amounts of time.
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2013-1918 XSA-45
+---
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -3200,29 +3200,55 @@ long do_mmuext_op(
+             unsigned long old_mfn, mfn;
+ 
+             mfn = gmfn_to_mfn(d, op.arg1.mfn);
++            old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
++            /*
++             * This is particularly important when getting restarted after the
++             * previous attempt got preempted in the put-old-MFN phase.
++             */
++            if ( old_mfn == mfn )
++                break;
++
+             if ( mfn != 0 )
+             {
+                 if ( paging_mode_refcounts(d) )
+                     okay = get_page_from_pagenr(mfn, d);
+                 else
+-                    okay = !get_page_and_type_from_pagenr(
+-                        mfn, PGT_root_page_table, d, 0, 0);
++                {
++                    rc = get_page_and_type_from_pagenr(
++                        mfn, PGT_root_page_table, d, 0, 1);
++                    okay = !rc;
++                }
+                 if ( unlikely(!okay) )
+                 {
+-                    MEM_LOG("Error while installing new mfn %lx", mfn);
++                    if ( rc == -EINTR )
++                        rc = -EAGAIN;
++                    else if ( rc != -EAGAIN )
++                        MEM_LOG("Error while installing new mfn %lx", mfn);
+                     break;
+                 }
+             }
+ 
+-            old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
+             curr->arch.guest_table_user = pagetable_from_pfn(mfn);
+ 
+             if ( old_mfn != 0 )
+             {
++                struct page_info *page = mfn_to_page(old_mfn);
++
+                 if ( paging_mode_refcounts(d) )
+-                    put_page(mfn_to_page(old_mfn));
++                    put_page(page);
+                 else
+-                    put_page_and_type(mfn_to_page(old_mfn));
++                    switch ( rc = put_page_and_type_preemptible(page, 1) )
++                    {
++                    case -EINTR:
++                        rc = -EAGAIN;
++                    case -EAGAIN:
++                        curr->arch.old_guest_table = page;
++                        okay = 0;
++                        break;
++                    default:
++                        BUG_ON(rc);
++                        break;
++                    }
+             }
+ 
+             break;

Added: branches/wheezy/xen/debian/patches/CVE-2013-1918-4
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2013-1918-4	Thu May  2 12:23:34 2013	(r1150)
@@ -0,0 +1,197 @@
+Description: x86: make vcpu_reset() preemptible
+ ... as dropping the old page tables may take significant amounts of
+ time.
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2013-1918 XSA-45
+---
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+@@ -902,17 +902,16 @@ int arch_set_info_guest(
+ #undef c
+ }
+ 
+-void arch_vcpu_reset(struct vcpu *v)
++int arch_vcpu_reset(struct vcpu *v)
+ {
+     if ( !is_hvm_vcpu(v) )
+     {
+         destroy_gdt(v);
+-        vcpu_destroy_pagetables(v, 0);
+-    }
+-    else
+-    {
+-        vcpu_end_shutdown_deferral(v);
++        return vcpu_destroy_pagetables(v);
+     }
++
++    vcpu_end_shutdown_deferral(v);
++    return 0;
+ }
+ 
+ /* 
+@@ -1933,7 +1932,7 @@ int domain_relinquish_resources(struct d
+         for_each_vcpu ( d, v )
+         {
+             /* Drop the in-use references to page-table bases. */
+-            ret = vcpu_destroy_pagetables(v, 1);
++            ret = vcpu_destroy_pagetables(v);
+             if ( ret )
+                 return ret;
+ 
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -3083,8 +3083,11 @@ static void hvm_s3_suspend(struct domain
+ 
+     for_each_vcpu ( d, v )
+     {
++        int rc;
++
+         vlapic_reset(vcpu_vlapic(v));
+-        vcpu_reset(v);
++        rc = vcpu_reset(v);
++        ASSERT(!rc);
+     }
+ 
+     vpic_reset(d);
+--- a/xen/arch/x86/hvm/vlapic.c
++++ b/xen/arch/x86/hvm/vlapic.c
+@@ -252,10 +252,13 @@ static void vlapic_init_sipi_action(unsi
+     {
+     case APIC_DM_INIT: {
+         bool_t fpu_initialised;
++        int rc;
++
+         domain_lock(target->domain);
+         /* Reset necessary VCPU state. This does not include FPU state. */
+         fpu_initialised = target->fpu_initialised;
+-        vcpu_reset(target);
++        rc = vcpu_reset(target);
++        ASSERT(!rc);
+         target->fpu_initialised = fpu_initialised;
+         vlapic_reset(vcpu_vlapic(target));
+         domain_unlock(target->domain);
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -2744,7 +2744,7 @@ static int put_old_guest_table(struct vc
+     return rc;
+ }
+ 
+-int vcpu_destroy_pagetables(struct vcpu *v, bool_t preemptible)
++int vcpu_destroy_pagetables(struct vcpu *v)
+ {
+     unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
+     struct page_info *page;
+@@ -2764,7 +2764,7 @@ int vcpu_destroy_pagetables(struct vcpu 
+         if ( paging_mode_refcounts(v->domain) )
+             put_page(page);
+         else
+-            rc = put_page_and_type_preemptible(page, preemptible);
++            rc = put_page_and_type_preemptible(page, 1);
+     }
+ 
+ #ifdef __x86_64__
+@@ -2790,7 +2790,7 @@ int vcpu_destroy_pagetables(struct vcpu 
+             if ( paging_mode_refcounts(v->domain) )
+                 put_page(page);
+             else
+-                rc = put_page_and_type_preemptible(page, preemptible);
++                rc = put_page_and_type_preemptible(page, 1);
+         }
+         if ( !rc )
+             v->arch.guest_table_user = pagetable_null();
+--- a/xen/common/domain.c
++++ b/xen/common/domain.c
+@@ -770,14 +770,18 @@ int boot_vcpu(struct domain *d, int vcpu
+     return arch_set_info_guest(v, ctxt);
+ }
+ 
+-void vcpu_reset(struct vcpu *v)
++int vcpu_reset(struct vcpu *v)
+ {
+     struct domain *d = v->domain;
++    int rc;
+ 
+     vcpu_pause(v);
+     domain_lock(d);
+ 
+-    arch_vcpu_reset(v);
++    set_bit(_VPF_in_reset, &v->pause_flags);
++    rc = arch_vcpu_reset(v);
++    if ( rc )
++        goto out_unlock;
+ 
+     set_bit(_VPF_down, &v->pause_flags);
+ 
+@@ -793,9 +797,13 @@ void vcpu_reset(struct vcpu *v)
+ #endif
+     cpus_clear(v->cpu_affinity_tmp);
+     clear_bit(_VPF_blocked, &v->pause_flags);
++    clear_bit(_VPF_in_reset, &v->pause_flags);
+ 
++ out_unlock:
+     domain_unlock(v->domain);
+     vcpu_unpause(v);
++
++    return rc;
+ }
+ 
+ 
+--- a/xen/common/domctl.c
++++ b/xen/common/domctl.c
+@@ -286,8 +286,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
+ 
+         if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
+         {
+-            vcpu_reset(v);
+-            ret = 0;
++            ret = vcpu_reset(v);
++            if ( ret == -EAGAIN )
++                ret = hypercall_create_continuation(
++                          __HYPERVISOR_domctl, "h", u_domctl);
+             goto svc_out;
+         }
+ 
+--- a/xen/include/asm-x86/mm.h
++++ b/xen/include/asm-x86/mm.h
+@@ -555,7 +555,7 @@ void audit_domains(void);
+ int new_guest_cr3(unsigned long pfn);
+ void make_cr3(struct vcpu *v, unsigned long mfn);
+ void update_cr3(struct vcpu *v);
+-int vcpu_destroy_pagetables(struct vcpu *, bool_t preemptible);
++int vcpu_destroy_pagetables(struct vcpu *);
+ void propagate_page_fault(unsigned long addr, u16 error_code);
+ void *do_page_walk(struct vcpu *v, unsigned long addr);
+ 
+--- a/xen/include/xen/domain.h
++++ b/xen/include/xen/domain.h
+@@ -15,7 +15,7 @@ struct vcpu *alloc_vcpu(
+ int boot_vcpu(
+     struct domain *d, int vcpuid, vcpu_guest_context_u ctxt);
+ struct vcpu *alloc_dom0_vcpu0(void);
+-void vcpu_reset(struct vcpu *v);
++int vcpu_reset(struct vcpu *);
+ 
+ struct xen_domctl_getdomaininfo;
+ void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info);
+@@ -57,7 +57,7 @@ void arch_dump_vcpu_info(struct vcpu *v)
+ 
+ void arch_dump_domain_info(struct domain *d);
+ 
+-void arch_vcpu_reset(struct vcpu *v);
++int arch_vcpu_reset(struct vcpu *);
+ 
+ bool_t domctl_lock_acquire(void);
+ void domctl_lock_release(void);
+--- a/xen/include/xen/sched.h
++++ b/xen/include/xen/sched.h
+@@ -597,6 +597,9 @@ extern struct domain *domain_list;
+  /* VCPU is blocked on memory-event ring. */
+ #define _VPF_mem_event       4
+ #define VPF_mem_event        (1UL<<_VPF_mem_event)
++ /* VCPU is being reset. */
++#define _VPF_in_reset        7
++#define VPF_in_reset         (1UL<<_VPF_in_reset)
+ 
+ static inline int vcpu_runnable(struct vcpu *v)
+ {

Added: branches/wheezy/xen/debian/patches/CVE-2013-1918-5
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2013-1918-5	Thu May  2 12:23:34 2013	(r1150)
@@ -0,0 +1,199 @@
+Description: x86: make arch_set_info_guest() preemptible
+ .. as the root page table validation (and the dropping of an eventual
+ old one) can require meaningful amounts of time.
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2013-1918 XSA-45
+---
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+@@ -676,6 +676,7 @@ int arch_set_info_guest(
+ {
+     struct domain *d = v->domain;
+     unsigned long cr3_pfn = INVALID_MFN;
++    struct page_info *cr3_page;
+     unsigned long flags, cr4;
+     int i, rc = 0, compat;
+ 
+@@ -815,72 +816,103 @@ int arch_set_info_guest(
+     if ( rc != 0 )
+         return rc;
+ 
++    set_bit(_VPF_in_reset, &v->pause_flags);
++
+     if ( !compat )
+-    {
+         cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[3]));
++#ifdef __x86_64__
++    else
++        cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
++#endif
++    cr3_page = mfn_to_page(cr3_pfn);
+ 
+-        if ( !mfn_valid(cr3_pfn) ||
+-             (paging_mode_refcounts(d)
+-              ? !get_page(mfn_to_page(cr3_pfn), d)
+-              : !get_page_and_type(mfn_to_page(cr3_pfn), d,
+-                                   PGT_base_page_table)) )
+-        {
+-            destroy_gdt(v);
+-            return -EINVAL;
+-        }
++    if ( !mfn_valid(cr3_pfn) || !get_page(cr3_page, d) )
++    {
++        cr3_page = NULL;
++        rc = -EINVAL;
++    }
++    else if ( paging_mode_refcounts(d) )
++        /* nothing */;
++    else if ( cr3_page == v->arch.old_guest_table )
++    {
++        v->arch.old_guest_table = NULL;
++        put_page(cr3_page);
++    }
++    else
++    {
++        /*
++         * Since v->arch.guest_table{,_user} are both NULL, this effectively
++         * is just a call to put_old_guest_table().
++         */
++        if ( !compat )
++            rc = vcpu_destroy_pagetables(v);
++        if ( !rc )
++            rc = get_page_type_preemptible(cr3_page,
++                                           !compat ? PGT_root_page_table
++                                                   : PGT_l3_page_table);
++        if ( rc == -EINTR )
++            rc = -EAGAIN;
++    }
+ 
++    if ( rc )
++        /* handled below */;
++    else if ( !compat )
++    {
+         v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
+ 
+ #ifdef __x86_64__
+         if ( c.nat->ctrlreg[1] )
+         {
+             cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[1]));
++            cr3_page = mfn_to_page(cr3_pfn);
+ 
+-            if ( !mfn_valid(cr3_pfn) ||
+-                 (paging_mode_refcounts(d)
+-                  ? !get_page(mfn_to_page(cr3_pfn), d)
+-                  : !get_page_and_type(mfn_to_page(cr3_pfn), d,
+-                                       PGT_base_page_table)) )
++            if ( !mfn_valid(cr3_pfn) || !get_page(cr3_page, d) )
+             {
+-                cr3_pfn = pagetable_get_pfn(v->arch.guest_table);
+-                v->arch.guest_table = pagetable_null();
+-                if ( paging_mode_refcounts(d) )
+-                    put_page(mfn_to_page(cr3_pfn));
+-                else
+-                    put_page_and_type(mfn_to_page(cr3_pfn));
+-                destroy_gdt(v);
+-                return -EINVAL;
++                cr3_page = NULL;
++                rc = -EINVAL;
++            }
++            else if ( !paging_mode_refcounts(d) )
++            {
++                rc = get_page_type_preemptible(cr3_page, PGT_root_page_table);
++                switch ( rc )
++                {
++                case -EINTR:
++                    rc = -EAGAIN;
++                case -EAGAIN:
++                    v->arch.old_guest_table =
++                        pagetable_get_page(v->arch.guest_table);
++                    v->arch.guest_table = pagetable_null();
++                    break;
++                }
+             }
+ 
+-            v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);
++            if ( !rc )
++                v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);
+         }
+         else if ( !(flags & VGCF_in_kernel) )
+         {
+-            destroy_gdt(v);
+-            return -EINVAL;
++            cr3_page = NULL;
++            rc = -EINVAL;
+         }
+     }
+     else
+     {
+         l4_pgentry_t *l4tab;
+ 
+-        cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
+-
+-        if ( !mfn_valid(cr3_pfn) ||
+-             (paging_mode_refcounts(d)
+-              ? !get_page(mfn_to_page(cr3_pfn), d)
+-              : !get_page_and_type(mfn_to_page(cr3_pfn), d,
+-                                   PGT_l3_page_table)) )
+-        {
+-            destroy_gdt(v);
+-            return -EINVAL;
+-        }
+-
+         l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
+         *l4tab = l4e_from_pfn(
+             cr3_pfn, _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
+ #endif
+     }
++    if ( rc )
++    {
++        if ( cr3_page )
++            put_page(cr3_page);
++        destroy_gdt(v);
++        return rc;
++    }
++
++    clear_bit(_VPF_in_reset, &v->pause_flags);
+ 
+     if ( v->vcpu_id == 0 )
+         update_domain_wallclock_time(d);
+--- a/xen/common/compat/domain.c
++++ b/xen/common/compat/domain.c
+@@ -52,6 +52,10 @@ int compat_vcpu_op(int cmd, int vcpuid, 
+             rc = boot_vcpu(d, vcpuid, cmp_ctxt);
+         domain_unlock(d);
+ 
++        if ( rc == -EAGAIN )
++            rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih",
++                                               cmd, vcpuid, arg);
++
+         xfree(cmp_ctxt);
+         break;
+     }
+--- a/xen/common/domain.c
++++ b/xen/common/domain.c
+@@ -842,6 +842,11 @@ long do_vcpu_op(int cmd, int vcpuid, XEN
+         domain_unlock(d);
+ 
+         xfree(ctxt);
++
++        if ( rc == -EAGAIN )
++            rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih",
++                                               cmd, vcpuid, arg);
++
+         break;
+ 
+     case VCPUOP_up:
+--- a/xen/common/domctl.c
++++ b/xen/common/domctl.c
+@@ -318,6 +318,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
+             domain_pause(d);
+             ret = arch_set_info_guest(v, c);
+             domain_unpause(d);
++
++            if ( ret == -EAGAIN )
++                ret = hypercall_create_continuation(
++                          __HYPERVISOR_domctl, "h", u_domctl);
+         }
+ 
+     svc_out:

Added: branches/wheezy/xen/debian/patches/CVE-2013-1918-6
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2013-1918-6	Thu May  2 12:23:34 2013	(r1150)
@@ -0,0 +1,124 @@
+Description: x86: make page table unpinning preemptible
+ ... as it may take significant amounts of time.
+ .
+ Since we can't re-invoke the operation in a second attempt, the
+ continuation logic must be slightly tweaked so that we make sure
+ do_mmuext_op() gets run one more time even when the preempted unpin
+ operation was the last one in a batch.
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2013-1918 XSA-45
+---
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -3040,6 +3040,14 @@ long do_mmuext_op(
+         return rc;
+     }
+ 
++    if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
++         likely(guest_handle_is_null(uops)) )
++    {
++        /* See the curr->arch.old_guest_table related
++         * hypercall_create_continuation() below. */
++        return (int)foreigndom;
++    }
++
+     if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
+     {
+         count &= ~MMU_UPDATE_PREEMPTED;
+@@ -3063,7 +3071,7 @@ long do_mmuext_op(
+ 
+     for ( i = 0; i < count; i++ )
+     {
+-        if ( hypercall_preempt_check() )
++        if ( curr->arch.old_guest_table || hypercall_preempt_check() )
+         {
+             rc = -EAGAIN;
+             break;
+@@ -3181,7 +3189,17 @@ long do_mmuext_op(
+                 break;
+             }
+ 
+-            put_page_and_type(page);
++            switch ( rc = put_page_and_type_preemptible(page, 1) )
++            {
++            case -EINTR:
++            case -EAGAIN:
++                curr->arch.old_guest_table = page;
++                rc = 0;
++                break;
++            default:
++                BUG_ON(rc);
++                break;
++            }
+             put_page(page);
+ 
+             /* A page is dirtied when its pin status is cleared. */
+@@ -3487,9 +3505,27 @@ long do_mmuext_op(
+     }
+ 
+     if ( rc == -EAGAIN )
++    {
++        ASSERT(i < count);
+         rc = hypercall_create_continuation(
+             __HYPERVISOR_mmuext_op, "hihi",
+             uops, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
++    }
++    else if ( curr->arch.old_guest_table )
++    {
++        XEN_GUEST_HANDLE(void) null;
++
++        ASSERT(rc || i == count);
++        set_xen_guest_handle(null, NULL);
++        /*
++         * In order to have a way to communicate the final return value to
++         * our continuation, we pass this in place of "foreigndom", building
++         * on the fact that this argument isn't needed anymore.
++         */
++        rc = hypercall_create_continuation(
++                __HYPERVISOR_mmuext_op, "hihi", null,
++                MMU_UPDATE_PREEMPTED, null, rc);
++    }
+ 
+     put_pg_owner(pg_owner);
+ 
+--- a/xen/arch/x86/x86_64/compat/mm.c
++++ b/xen/arch/x86/x86_64/compat/mm.c
+@@ -222,6 +222,13 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
+     int rc = 0;
+     XEN_GUEST_HANDLE(mmuext_op_t) nat_ops;
+ 
++    if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
++         likely(guest_handle_is_null(cmp_uops)) )
++    {
++        set_xen_guest_handle(nat_ops, NULL);
++        return do_mmuext_op(nat_ops, count, pdone, foreigndom);
++    }
++
+     preempt_mask = count & MMU_UPDATE_PREEMPTED;
+     count ^= preempt_mask;
+ 
+@@ -324,12 +331,18 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
+                 guest_handle_add_offset(nat_ops, i - left);
+                 guest_handle_subtract_offset(cmp_uops, left);
+                 left = 1;
+-                BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops, cmp_uops));
+-                BUG_ON(left != arg1);
+-                if (!test_bit(_MCSF_in_multicall, &mcs->flags))
+-                    regs->_ecx += count - i;
++                if ( arg1 != MMU_UPDATE_PREEMPTED )
++                {
++                    BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops,
++                                                        cmp_uops));
++                    if ( !test_bit(_MCSF_in_multicall, &mcs->flags) )
++                        regs->_ecx += count - i;
++                    else
++                        mcs->compat_call.args[1] += count - i;
++                }
+                 else
+-                    mcs->compat_call.args[1] += count - i;
++                    BUG_ON(hypercall_xlat_continuation(&left, 0));
++                BUG_ON(left != arg1);
+             }
+             else
+                 BUG_ON(err > 0);

Added: branches/wheezy/xen/debian/patches/CVE-2013-1918-7
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2013-1918-7	Thu May  2 12:23:34 2013	(r1150)
@@ -0,0 +1,252 @@
+Description: x86: make page table handling error paths preemptible
+ ... as they may take significant amounts of time.
+ .
+ This requires cloning the tweaked continuation logic from
+ do_mmuext_op() to do_mmu_update().
+ .
+ Note that in mod_l[34]_entry() a negative "preemptible" value gets
+ passed to put_page_from_l[34]e() now, telling the callee to store the
+ respective page in current->arch.old_guest_table (for a hypercall
+ continuation to pick up), rather than carrying out the put right away.
+ This is going to be made a little more explicit by a subsequent cleanup
+ patch.
+From: Jan Beulich <jbeulich at suse.com>
+Origin: upstream
+Id: CVE-2013-1918 XSA-45
+---
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -1183,7 +1183,16 @@ static int put_page_from_l3e(l3_pgentry_
+ #endif
+ 
+     if ( unlikely(partial > 0) )
++    {
++        ASSERT(preemptible >= 0);
+         return __put_page_type(l3e_get_page(l3e), preemptible);
++    }
++
++    if ( preemptible < 0 )
++    {
++        current->arch.old_guest_table = l3e_get_page(l3e);
++        return 0;
++    }
+ 
+     return put_page_and_type_preemptible(l3e_get_page(l3e), preemptible);
+ }
+@@ -1196,7 +1205,17 @@ static int put_page_from_l4e(l4_pgentry_
+          (l4e_get_pfn(l4e) != pfn) )
+     {
+         if ( unlikely(partial > 0) )
++        {
++            ASSERT(preemptible >= 0);
+             return __put_page_type(l4e_get_page(l4e), preemptible);
++        }
++
++        if ( preemptible < 0 )
++        {
++            current->arch.old_guest_table = l4e_get_page(l4e);
++            return 0;
++        }
++
+         return put_page_and_type_preemptible(l4e_get_page(l4e), preemptible);
+     }
+     return 1;
+@@ -1486,12 +1505,17 @@ static int alloc_l3_table(struct page_in
+     if ( rc < 0 && rc != -EAGAIN && rc != -EINTR )
+     {
+         MEM_LOG("Failure in alloc_l3_table: entry %d", i);
++        if ( i )
++        {
++            page->nr_validated_ptes = i;
++            page->partial_pte = 0;
++            current->arch.old_guest_table = page;
++        }
+         while ( i-- > 0 )
+         {
+             if ( !is_guest_l3_slot(i) )
+                 continue;
+             unadjust_guest_l3e(pl3e[i], d);
+-            put_page_from_l3e(pl3e[i], pfn, 0, 0);
+         }
+     }
+ 
+@@ -1521,22 +1545,24 @@ static int alloc_l4_table(struct page_in
+             page->nr_validated_ptes = i;
+             page->partial_pte = partial ?: 1;
+         }
+-        else if ( rc == -EINTR )
++        else if ( rc < 0 )
+         {
++            if ( rc != -EINTR )
++                MEM_LOG("Failure in alloc_l4_table: entry %d", i);
+             if ( i )
+             {
+                 page->nr_validated_ptes = i;
+                 page->partial_pte = 0;
+-                rc = -EAGAIN;
++                if ( rc == -EINTR )
++                    rc = -EAGAIN;
++                else
++                {
++                    if ( current->arch.old_guest_table )
++                        page->nr_validated_ptes++;
++                    current->arch.old_guest_table = page;
++                }
+             }
+         }
+-        else if ( rc < 0 )
+-        {
+-            MEM_LOG("Failure in alloc_l4_table: entry %d", i);
+-            while ( i-- > 0 )
+-                if ( is_guest_l4_slot(d, i) )
+-                    put_page_from_l4e(pl4e[i], pfn, 0, 0);
+-        }
+         if ( rc < 0 )
+             return rc;
+ 
+@@ -1966,7 +1992,7 @@ static int mod_l3_entry(l3_pgentry_t *pl
+         pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
+     }
+ 
+-    put_page_from_l3e(ol3e, pfn, 0, 0);
++    put_page_from_l3e(ol3e, pfn, 0, -preemptible);
+     return rc;
+ }
+ 
+@@ -2029,7 +2055,7 @@ static int mod_l4_entry(l4_pgentry_t *pl
+         return -EFAULT;
+     }
+ 
+-    put_page_from_l4e(ol4e, pfn, 0, 0);
++    put_page_from_l4e(ol4e, pfn, 0, -preemptible);
+     return rc;
+ }
+ 
+@@ -2187,7 +2213,15 @@ static int alloc_page_type(struct page_i
+                 PRtype_info ": caf=%08lx taf=%" PRtype_info,
+                 page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
+                 type, page->count_info, page->u.inuse.type_info);
+-        page->u.inuse.type_info = 0;
++        if ( page != current->arch.old_guest_table )
++            page->u.inuse.type_info = 0;
++        else
++        {
++            ASSERT((page->u.inuse.type_info &
++                    (PGT_count_mask | PGT_validated)) == 1);
++            get_page_light(page);
++            page->u.inuse.type_info |= PGT_partial;
++        }
+     }
+     else
+     {
+@@ -3131,21 +3165,17 @@ long do_mmuext_op(
+             page = mfn_to_page(mfn);
+ 
+             if ( (rc = xsm_memory_pin_page(d, page)) != 0 )
+-            {
+-                put_page_and_type(page);
+                 okay = 0;
+-                break;
+-            }
+-
+-            if ( unlikely(test_and_set_bit(_PGT_pinned,
+-                                           &page->u.inuse.type_info)) )
++            else if ( unlikely(test_and_set_bit(_PGT_pinned,
++                                                &page->u.inuse.type_info)) )
+             {
+                 MEM_LOG("Mfn %lx already pinned", mfn);
+-                put_page_and_type(page);
+                 okay = 0;
+-                break;
+             }
+ 
++            if ( unlikely(!okay) )
++                goto pin_drop;
++
+             /* A page is dirtied when its pin status is set. */
+             paging_mark_dirty(pg_owner, mfn);
+            
+@@ -3159,7 +3189,13 @@ long do_mmuext_op(
+                                                &page->u.inuse.type_info));
+                 spin_unlock(&pg_owner->page_alloc_lock);
+                 if ( drop_ref )
+-                    put_page_and_type(page);
++                {
++        pin_drop:
++                    if ( type == PGT_l1_page_table )
++                        put_page_and_type(page);
++                    else
++                        curr->arch.old_guest_table = page;
++                }
+             }
+ 
+             break;
+@@ -3552,11 +3588,28 @@ long do_mmu_update(
+     void *va;
+     unsigned long gpfn, gmfn, mfn;
+     struct page_info *page;
+-    int rc = 0, okay = 1, i = 0;
+-    unsigned int cmd, done = 0, pt_dom;
+-    struct vcpu *v = current;
++    unsigned int cmd, i = 0, done = 0, pt_dom;
++    struct vcpu *curr = current, *v = curr;
+     struct domain *d = v->domain, *pt_owner = d, *pg_owner;
+     struct domain_mmap_cache mapcache;
++    int rc = put_old_guest_table(curr), okay = 1;
++
++    if ( unlikely(rc) )
++    {
++        if ( likely(rc == -EAGAIN) )
++            rc = hypercall_create_continuation(
++                     __HYPERVISOR_mmu_update, "hihi", ureqs, count, pdone,
++                     foreigndom);
++        return rc;
++    }
++
++    if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
++         likely(guest_handle_is_null(ureqs)) )
++    {
++        /* See the curr->arch.old_guest_table related
++         * hypercall_create_continuation() below. */
++        return (int)foreigndom;
++    }
+ 
+     if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
+     {
+@@ -3605,7 +3658,7 @@ long do_mmu_update(
+ 
+     for ( i = 0; i < count; i++ )
+     {
+-        if ( hypercall_preempt_check() )
++        if ( curr->arch.old_guest_table || hypercall_preempt_check() )
+         {
+             rc = -EAGAIN;
+             break;
+@@ -3870,9 +3923,27 @@ long do_mmu_update(
+     }
+ 
+     if ( rc == -EAGAIN )
++    {
++        ASSERT(i < count);
+         rc = hypercall_create_continuation(
+             __HYPERVISOR_mmu_update, "hihi",
+             ureqs, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
++    }
++    else if ( curr->arch.old_guest_table )
++    {
++        XEN_GUEST_HANDLE(void) null;
++
++        ASSERT(rc || i == count);
++        set_xen_guest_handle(null, NULL);
++        /*
++         * In order to have a way to communicate the final return value to
++         * our continuation, we pass this in place of "foreigndom", building
++         * on the fact that this argument isn't needed anymore.
++         */
++        rc = hypercall_create_continuation(
++                __HYPERVISOR_mmu_update, "hihi", null,
++                MMU_UPDATE_PREEMPTED, null, rc);
++    }
+ 
+     put_pg_owner(pg_owner);
+ 

Added: branches/wheezy/xen/debian/patches/CVE-2013-1952
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/wheezy/xen/debian/patches/CVE-2013-1952	Thu May  2 12:23:34 2013	(r1150)
@@ -0,0 +1,46 @@
+Description: VT-d: don't permit SVT_NO_VERIFY entries for known device types
+ Only in cases where we don't know what to do we should leave the IRTE
+ blank (suppressing all validation), but we should always log a warning
+ in those cases (as being insecure).
+From: Jan Beulich <jbeulich at suse.com>
+Id: CVE-2013-1952 XSA-49
+---
+--- a/xen/drivers/passthrough/vtd/intremap.c
++++ b/xen/drivers/passthrough/vtd/intremap.c
+@@ -477,16 +477,15 @@ static void set_msi_source_id(struct pci
+     type = pdev_type(bus, devfn);
+     switch ( type )
+     {
++    case DEV_TYPE_PCIe_ENDPOINT:
+     case DEV_TYPE_PCIe_BRIDGE:
+     case DEV_TYPE_PCIe2PCI_BRIDGE:
+-    case DEV_TYPE_LEGACY_PCI_BRIDGE:
+-        break;
+-
+-    case DEV_TYPE_PCIe_ENDPOINT:
+         set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16, PCI_BDF2(bus, devfn));
+         break;
+ 
+     case DEV_TYPE_PCI:
++    case DEV_TYPE_LEGACY_PCI_BRIDGE:
++    /* case DEV_TYPE_PCI2PCIe_BRIDGE: */
+         ret = find_upstream_bridge(&bus, &devfn, &secbus);
+         if ( ret == 0 ) /* integrated PCI device */
+         {
+@@ -498,10 +497,15 @@ static void set_msi_source_id(struct pci
+             if ( pdev_type(bus, devfn) == DEV_TYPE_PCIe2PCI_BRIDGE )
+                 set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
+                             (bus << 8) | pdev->bus);
+-            else if ( pdev_type(bus, devfn) == DEV_TYPE_LEGACY_PCI_BRIDGE )
++            else
+                 set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
+                             PCI_BDF2(bus, devfn));
+         }
++        else
++            dprintk(XENLOG_WARNING VTDPREFIX,
++                    "d%d: no upstream bridge for %02x:%02x.%u\n",
++                    pdev->domain->domain_id,
++                    bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+         break;
+ 
+     default:

Modified: branches/wheezy/xen/debian/patches/series
==============================================================================
--- branches/wheezy/xen/debian/patches/series	Wed Apr 24 12:58:35 2013	(r1149)
+++ branches/wheezy/xen/debian/patches/series	Thu May  2 12:23:34 2013	(r1150)
@@ -12,8 +12,16 @@
 CVE-2013-0153-3
 CVE-2013-0153-4
 CVE-2013-1917
+CVE-2013-1918-1
+CVE-2013-1918-2
+CVE-2013-1918-3
+CVE-2013-1918-4
+CVE-2013-1918-5
+CVE-2013-1918-6
+CVE-2013-1918-7
 CVE-2013-1919
 CVE-2013-1920
+CVE-2013-1952
 CVE-2013-1964
 
 upstream-23001:9eb9948904cd



More information about the Pkg-xen-changes mailing list