[xen/f17] 6 security fixes CVE-2012-5510 CVE-2012-5511 CVE-2012-5512 CVE-2012-5513 CVE-2012-5514 CVE-2012-

myoung myoung at fedoraproject.org
Tue Dec 4 21:07:40 UTC 2012


commit eda6b648bfa5749475ee154ba88791bfbb3cd5a5
Author: Michael Young <m.a.young at durham.ac.uk>
Date:   Tue Dec 4 21:05:24 2012 +0000

    6 security fixes CVE-2012-5510 CVE-2012-5511 CVE-2012-5512 CVE-2012-5513
        CVE-2012-5514 CVE-2012-5515

 xen.spec        |   26 ++++++++-
 xsa26-4.1.patch |  107 +++++++++++++++++++++++++++++++++++
 xsa27-4.1.patch |  168 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 xsa28-4.1.patch |   36 ++++++++++++
 xsa29-4.1.patch |   49 ++++++++++++++++
 xsa30-4.1.patch |   57 +++++++++++++++++++
 xsa31-4.1.patch |   50 ++++++++++++++++
 7 files changed, 492 insertions(+), 1 deletions(-)
---
diff --git a/xen.spec b/xen.spec
index 2b3b9ca..cd2e1a9 100644
--- a/xen.spec
+++ b/xen.spec
@@ -20,7 +20,7 @@
 Summary: Xen is a virtual machine monitor
 Name:    xen
 Version: 4.1.3
-Release: 6%{?dist}
+Release: 7%{?dist}
 Group:   Development/Libraries
 License: GPLv2+ and LGPLv2+ and BSD
 URL:     http://xen.org/
@@ -84,6 +84,12 @@ Patch68: xsa21.patch
 Patch69: xsa22-4.1.patch
 Patch70: xsa23-4.0-4.1.patch
 Patch71: xsa24.patch
+Patch72: xsa26-4.1.patch
+Patch73: xsa27-4.1.patch
+Patch74: xsa28-4.1.patch
+Patch75: xsa29-4.1.patch
+Patch76: xsa30-4.1.patch
+Patch77: xsa31-4.1.patch
 
 Patch100: xen-configure-xend.patch
 
@@ -253,6 +259,12 @@ manage Xen virtual machines.
 %patch69 -p1
 %patch70 -p1
 %patch71 -p1
+%patch72 -p1
+%patch73 -p1
+%patch74 -p1
+%patch75 -p1
+%patch76 -p1
+%patch77 -p1
 
 %patch100 -p1
 
@@ -711,6 +723,18 @@ rm -rf %{buildroot}
 %endif
 
 %changelog
+* Tue Dec 04 2012 Michael Young <m.a.young at durham.ac.uk> - 4.1.3-7
+- 6 security fixes
+  A guest can cause xen to crash [XSA-26, CVE-2012-5510] (#883082)
+  An HVM guest can cause xen to run slowly or crash [XSA-27, CVE-2012-5511]
+    (#883084)
+  An HVM guest can cause xen to crash or leak information [XSA-28,
+    CVE-2012-5512] (#883085)
+  A PV guest can cause xen to crash and might be able escalate privileges
+    [XSA-29, CVE-2012-5513] (#883088)
+  An HVM guest can cause xen to hang [XSA-30, CVE-2012-5514] (#883091)
+  A guest can cause xen to hang [XSA-31, CVE-2012-5515] (#883092)
+
 * Tue Nov 13 2012 Michael Young <m.a.young at durham.ac.uk> - 4.1.3-6
 - 5 security fixes
   A guest can block a cpu by setting a bad VCPU deadline [XSA 20,
diff --git a/xsa26-4.1.patch b/xsa26-4.1.patch
new file mode 100644
index 0000000..e8b8e7d
--- /dev/null
+++ b/xsa26-4.1.patch
@@ -0,0 +1,107 @@
+gnttab: fix releasing of memory upon switches between versions
+
+gnttab_unpopulate_status_frames() incompletely freed the pages
+previously used as status frame in that they did not get removed from
+the domain's xenpage_list, thus causing subsequent list corruption
+when those pages did get allocated again for the same or another purpose.
+
+Similarly, grant_table_create() and gnttab_grow_table() both improperly
+clean up in the event of an error - pages already shared with the guest
+can't be freed by just passing them to free_xenheap_page(). Fix this by
+sharing the pages only after all allocations succeeded.
+
+This is CVE-2012-5510 / XSA-26.
+
+Signed-off-by: Jan Beulich <jbeulich at suse.com>
+Acked-by: Ian Campbell <ian.campbell at citrix.com>
+
+diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
+index 6c0aa6f..a180aef 100644
+--- a/xen/common/grant_table.c
++++ b/xen/common/grant_table.c
+@@ -1126,12 +1126,13 @@ fault:
+ }
+ 
+ static int
+-gnttab_populate_status_frames(struct domain *d, struct grant_table *gt)
++gnttab_populate_status_frames(struct domain *d, struct grant_table *gt,
++                              unsigned int req_nr_frames)
+ {
+     unsigned i;
+     unsigned req_status_frames;
+ 
+-    req_status_frames = grant_to_status_frames(gt->nr_grant_frames);
++    req_status_frames = grant_to_status_frames(req_nr_frames);
+     for ( i = nr_status_frames(gt); i < req_status_frames; i++ )
+     {
+         if ( (gt->status[i] = alloc_xenheap_page()) == NULL )
+@@ -1162,7 +1163,12 @@ gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt)
+ 
+     for ( i = 0; i < nr_status_frames(gt); i++ )
+     {
+-        page_set_owner(virt_to_page(gt->status[i]), dom_xen);
++        struct page_info *pg = virt_to_page(gt->status[i]);
++
++        BUG_ON(page_get_owner(pg) != d);
++        if ( test_and_clear_bit(_PGC_allocated, &pg->count_info) )
++            put_page(pg);
++        BUG_ON(pg->count_info & ~PGC_xen_heap);
+         free_xenheap_page(gt->status[i]);
+         gt->status[i] = NULL;
+     }
+@@ -1200,19 +1206,18 @@ gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
+         clear_page(gt->shared_raw[i]);
+     }
+ 
+-    /* Share the new shared frames with the recipient domain */
+-    for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
+-        gnttab_create_shared_page(d, gt, i);
+-
+-    gt->nr_grant_frames = req_nr_frames;
+-
+     /* Status pages - version 2 */
+     if (gt->gt_version > 1)
+     {
+-        if ( gnttab_populate_status_frames(d, gt) )
++        if ( gnttab_populate_status_frames(d, gt, req_nr_frames) )
+             goto shared_alloc_failed;
+     }
+ 
++    /* Share the new shared frames with the recipient domain */
++    for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
++        gnttab_create_shared_page(d, gt, i);
++    gt->nr_grant_frames = req_nr_frames;
++
+     return 1;
+ 
+ shared_alloc_failed:
+@@ -2134,7 +2139,7 @@ gnttab_set_version(XEN_GUEST_HANDLE(gnttab_set_version_t uop))
+ 
+     if ( op.version == 2 && gt->gt_version < 2 )
+     {
+-        res = gnttab_populate_status_frames(d, gt);
++        res = gnttab_populate_status_frames(d, gt, nr_grant_frames(gt));
+         if ( res < 0)
+             goto out_unlock;
+     }
+@@ -2449,9 +2454,6 @@ grant_table_create(
+         clear_page(t->shared_raw[i]);
+     }
+     
+-    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+-        gnttab_create_shared_page(d, t, i);
+-
+     /* Status pages for grant table - for version 2 */
+     t->status = xmalloc_array(grant_status_t *,
+                               grant_to_status_frames(max_nr_grant_frames));
+@@ -2459,6 +2461,10 @@ grant_table_create(
+         goto no_mem_4;
+     memset(t->status, 0,
+            grant_to_status_frames(max_nr_grant_frames) * sizeof(t->status[0]));
++
++    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
++        gnttab_create_shared_page(d, t, i);
++
+     t->nr_status_frames = 0;
+ 
+     /* Okay, install the structure. */
diff --git a/xsa27-4.1.patch b/xsa27-4.1.patch
new file mode 100644
index 0000000..f0764cb
--- /dev/null
+++ b/xsa27-4.1.patch
@@ -0,0 +1,168 @@
+hvm: Limit the size of large HVM op batches
+
+Doing large p2m updates for HVMOP_track_dirty_vram without preemption
+ties up the physical processor. Integrating preemption into the p2m
+updates is hard so simply limit to 1GB which is sufficient for a 15000
+* 15000 * 32bpp framebuffer.
+
+For HVMOP_modified_memory and HVMOP_set_mem_type preemptible add the
+necessary machinery to handle preemption.
+
+This is CVE-2012-5511 / XSA-27.
+
+Signed-off-by: Tim Deegan <tim at xen.org>
+Signed-off-by: Ian Campbell <ian.campbell at citrix.com>
+Acked-by: Ian Jackson <ian.jackson at eu.citrix.com>
+
+x86/paging: Don't allocate user-controlled amounts of stack memory.
+
+This is XSA-27 / CVE-2012-5511.
+
+Signed-off-by: Tim Deegan <tim at xen.org>
+Acked-by: Jan Beulich <jbeulich at suse.com>
+v2: Provide definition of GB to fix x86-32 compile.
+
+Signed-off-by: Jan Beulich <JBeulich at suse.com>
+Acked-by: Ian Jackson <ian.jackson at eu.citrix.com>
+
+
+diff -r 5639047d6c9f xen/arch/x86/hvm/hvm.c
+--- a/xen/arch/x86/hvm/hvm.c	Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/arch/x86/hvm/hvm.c	Mon Nov 19 16:00:33 2012 +0000
+@@ -3471,6 +3471,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
+         if ( !is_hvm_domain(d) )
+             goto param_fail2;
+ 
++        if ( a.nr > GB(1) >> PAGE_SHIFT )
++            goto param_fail2;
++
+         rc = xsm_hvm_param(d, op);
+         if ( rc )
+             goto param_fail2;
+@@ -3498,7 +3501,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
+         struct xen_hvm_modified_memory a;
+         struct domain *d;
+         struct p2m_domain *p2m;
+-        unsigned long pfn;
+ 
+         if ( copy_from_guest(&a, arg, 1) )
+             return -EFAULT;
+@@ -3526,8 +3528,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
+             goto param_fail3;
+ 
+         p2m = p2m_get_hostp2m(d);
+-        for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
++        while ( a.nr > 0 )
+         {
++            unsigned long pfn = a.first_pfn;
+             p2m_type_t t;
+             mfn_t mfn = gfn_to_mfn(p2m, pfn, &t);
+             if ( p2m_is_paging(t) )
+@@ -3548,6 +3551,19 @@ long do_hvm_op(unsigned long op, XEN_GUE
+                 /* don't take a long time and don't die either */
+                 sh_remove_shadows(d->vcpu[0], mfn, 1, 0);
+             }
++
++            a.first_pfn++;
++            a.nr--;
++
++            /* Check for continuation if it's not the last interation */
++            if ( a.nr > 0 && hypercall_preempt_check() )
++            {
++                if ( copy_to_guest(arg, &a, 1) )
++                    rc = -EFAULT;
++                else
++                    rc = -EAGAIN;
++                break;
++            }
+         }
+ 
+     param_fail3:
+@@ -3595,7 +3611,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
+         struct xen_hvm_set_mem_type a;
+         struct domain *d;
+         struct p2m_domain *p2m;
+-        unsigned long pfn;
+         
+         /* Interface types to internal p2m types */
+         p2m_type_t memtype[] = {
+@@ -3625,8 +3640,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
+             goto param_fail4;
+ 
+         p2m = p2m_get_hostp2m(d);
+-        for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
++        while ( a.nr > 0 )
+         {
++            unsigned long pfn = a.first_pfn;
+             p2m_type_t t;
+             p2m_type_t nt;
+             mfn_t mfn;
+@@ -3662,6 +3678,19 @@ long do_hvm_op(unsigned long op, XEN_GUE
+                     goto param_fail4;
+                 }
+             }
++
++            a.first_pfn++;
++            a.nr--;
++
++            /* Check for continuation if it's not the last interation */
++            if ( a.nr > 0 && hypercall_preempt_check() )
++            {
++                if ( copy_to_guest(arg, &a, 1) )
++                    rc = -EFAULT;
++                else
++                    rc = -EAGAIN;
++                goto param_fail4;
++            }
+         }
+ 
+         rc = 0;
+diff -r 5639047d6c9f xen/arch/x86/mm/paging.c
+--- a/xen/arch/x86/mm/paging.c	Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/arch/x86/mm/paging.c	Mon Nov 19 16:00:33 2012 +0000
+@@ -529,13 +529,18 @@ int paging_log_dirty_range(struct domain
+ 
+     if ( !d->arch.paging.log_dirty.fault_count &&
+          !d->arch.paging.log_dirty.dirty_count ) {
+-        int size = (nr + BITS_PER_LONG - 1) / BITS_PER_LONG;
+-        unsigned long zeroes[size];
+-        memset(zeroes, 0x00, size * BYTES_PER_LONG);
++        static uint8_t zeroes[PAGE_SIZE];
++        int off, size;
++
++        size = ((nr + BITS_PER_LONG - 1) / BITS_PER_LONG) * sizeof (long);
+         rv = 0;
+-        if ( copy_to_guest_offset(dirty_bitmap, 0, (uint8_t *) zeroes,
+-                                  size * BYTES_PER_LONG) != 0 )
+-            rv = -EFAULT;
++        for ( off = 0; !rv && off < size; off += sizeof zeroes )
++        {
++            int todo = min(size - off, (int) PAGE_SIZE);
++            if ( copy_to_guest_offset(dirty_bitmap, off, zeroes, todo) )
++                rv = -EFAULT;
++            off += todo;
++        }
+         goto out;
+     }
+     d->arch.paging.log_dirty.fault_count = 0;
+diff -r 5639047d6c9f xen/include/asm-x86/config.h
+--- a/xen/include/asm-x86/config.h	Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/include/asm-x86/config.h	Mon Nov 19 16:00:33 2012 +0000
+@@ -108,6 +108,9 @@ extern unsigned int trampoline_xen_phys_
+ extern unsigned char trampoline_cpu_started;
+ extern char wakeup_start[];
+ extern unsigned int video_mode, video_flags;
++
++#define GB(_gb) (_gb ## UL << 30)
++
+ #endif
+ 
+ #define asmlinkage
+@@ -123,7 +126,6 @@ extern unsigned int video_mode, video_fl
+ #define PML4_ADDR(_slot)                             \
+     ((((_slot ## UL) >> 8) * 0xffff000000000000UL) | \
+      (_slot ## UL << PML4_ENTRY_BITS))
+-#define GB(_gb) (_gb ## UL << 30)
+ #else
+ #define PML4_ENTRY_BYTES (1 << PML4_ENTRY_BITS)
+ #define PML4_ADDR(_slot)                             \
diff --git a/xsa28-4.1.patch b/xsa28-4.1.patch
new file mode 100644
index 0000000..fe4638e
--- /dev/null
+++ b/xsa28-4.1.patch
@@ -0,0 +1,36 @@
+x86/HVM: range check xen_hvm_set_mem_access.hvmmem_access before use
+
+Otherwise an out of bounds array access can happen if changing the
+default access is being requested, which - if it doesn't crash Xen -
+would subsequently allow reading arbitrary memory through
+HVMOP_get_mem_access (again, unless that operation crashes Xen).
+
+This is XSA-28 / CVE-2012-5512.
+
+Signed-off-by: Jan Beulich <jbeulich at suse.com>
+Acked-by: Tim Deegan <tim at xen.org>
+Acked-by: Ian Campbell <ian.campbell at citrix.com>
+
+diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
+index 66cf805..08b6418 100644
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -3699,7 +3699,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
+             return rc;
+ 
+         rc = -EINVAL;
+-        if ( !is_hvm_domain(d) )
++        if ( !is_hvm_domain(d) || a.hvmmem_access >= ARRAY_SIZE(memaccess) )
+             goto param_fail5;
+ 
+         p2m = p2m_get_hostp2m(d);
+@@ -3719,9 +3719,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
+              ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
+             goto param_fail5;
+             
+-        if ( a.hvmmem_access >= ARRAY_SIZE(memaccess) )
+-            goto param_fail5;
+-
+         for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
+         {
+             p2m_type_t t;
diff --git a/xsa29-4.1.patch b/xsa29-4.1.patch
new file mode 100644
index 0000000..f8f6e38
--- /dev/null
+++ b/xsa29-4.1.patch
@@ -0,0 +1,49 @@
+xen: add missing guest address range checks to XENMEM_exchange handlers
+
+Ever since its existence (3.0.3 iirc) the handler for this has been
+using non address range checking guest memory accessors (i.e.
+the ones prefixed with two underscores) without first range
+checking the accessed space (via guest_handle_okay()), allowing
+a guest to access and overwrite hypervisor memory.
+
+This is XSA-29 / CVE-2012-5513.
+
+Signed-off-by: Jan Beulich <jbeulich at suse.com>
+Acked-by: Ian Campbell <ian.campbell at citrix.com>
+Acked-by: Ian Jackson <ian.jackson at eu.citrix.com>
+
+diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
+index 2402984..1d877fc 100644
+--- a/xen/common/compat/memory.c
++++ b/xen/common/compat/memory.c
+@@ -114,6 +114,12 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE(void) compat)
+                   (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) )
+                 return -EINVAL;
+ 
++            if ( !compat_handle_okay(cmp.xchg.in.extent_start,
++                                     cmp.xchg.in.nr_extents) ||
++                 !compat_handle_okay(cmp.xchg.out.extent_start,
++                                     cmp.xchg.out.nr_extents) )
++                return -EFAULT;
++
+             start_extent = cmp.xchg.nr_exchanged;
+             end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) /
+                          (((1U << ABS(order_delta)) + 1) *
+diff --git a/xen/common/memory.c b/xen/common/memory.c
+index 4e7c234..59379d3 100644
+--- a/xen/common/memory.c
++++ b/xen/common/memory.c
+@@ -289,6 +289,13 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
+         goto fail_early;
+     }
+ 
++    if ( !guest_handle_okay(exch.in.extent_start, exch.in.nr_extents) ||
++         !guest_handle_okay(exch.out.extent_start, exch.out.nr_extents) )
++    {
++        rc = -EFAULT;
++        goto fail_early;
++    }
++
+     /* Only privileged guests can allocate multi-page contiguous extents. */
+     if ( !multipage_allocation_permitted(current->domain,
+                                          exch.in.extent_order) ||
diff --git a/xsa30-4.1.patch b/xsa30-4.1.patch
new file mode 100644
index 0000000..1b6cd47
--- /dev/null
+++ b/xsa30-4.1.patch
@@ -0,0 +1,57 @@
+xen: fix error handling of guest_physmap_mark_populate_on_demand()
+
+The only user of the "out" label bypasses a necessary unlock, thus
+enabling the caller to lock up Xen.
+
+Also, the function was never meant to be called by a guest for itself,
+so rather than inspecting the code paths in depth for potential other
+problems this might cause, and adjusting e.g. the non-guest printk()
+in the above error path, just disallow the guest access to it.
+
+Finally, the printk() (considering its potential of spamming the log,
+the more that it's not using XENLOG_GUEST), is being converted to
+P2M_DEBUG(), as debugging is what it apparently was added for in the
+first place.
+
+This is XSA-30 / CVE-2012-5514.
+
+Signed-off-by: Jan Beulich <jbeulich at suse.com>
+Acked-by: Ian Campbell <ian.campbell at citrix.com>
+Acked-by: George Dunlap <george.dunlap at eu.citrix.com>
+Acked-by: Ian Jackson <ian.jackson at eu.citrix.com>
+
+diff -r 5639047d6c9f xen/arch/x86/mm/p2m.c
+--- a/xen/arch/x86/mm/p2m.c	Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/arch/x86/mm/p2m.c	Thu Nov 22 17:07:37 2012 +0000
+@@ -2412,6 +2412,9 @@ guest_physmap_mark_populate_on_demand(st
+     int pod_count = 0;
+     int rc = 0;
+ 
++    if ( !IS_PRIV_FOR(current->domain, d) )
++        return -EPERM;
++
+     if ( !paging_mode_translate(d) )
+         return -EINVAL;
+ 
+@@ -2430,8 +2433,7 @@ guest_physmap_mark_populate_on_demand(st
+         omfn = gfn_to_mfn_query(p2m, gfn + i, &ot);
+         if ( p2m_is_ram(ot) )
+         {
+-            printk("%s: gfn_to_mfn returned type %d!\n",
+-                   __func__, ot);
++            P2M_DEBUG("gfn_to_mfn returned type %d!\n", ot);
+             rc = -EBUSY;
+             goto out;
+         }
+@@ -2453,10 +2455,10 @@ guest_physmap_mark_populate_on_demand(st
+         BUG_ON(p2m->pod.entry_count < 0);
+     }
+ 
++out:
+     audit_p2m(p2m, 1);
+     p2m_unlock(p2m);
+ 
+-out:
+     return rc;
+ }
+ 
diff --git a/xsa31-4.1.patch b/xsa31-4.1.patch
new file mode 100644
index 0000000..1f3d929
--- /dev/null
+++ b/xsa31-4.1.patch
@@ -0,0 +1,50 @@
+memop: limit guest specified extent order
+
+Allowing unbounded order values here causes almost unbounded loops
+and/or partially incomplete requests, particularly in PoD code.
+
+The added range checks in populate_physmap(), decrease_reservation(),
+and the "in" one in memory_exchange() architecturally all could use
+PADDR_BITS - PAGE_SHIFT, and are being artificially constrained to
+MAX_ORDER.
+
+This is XSA-31 / CVE-2012-5515.
+
+Signed-off-by: Jan Beulich <jbeulich at suse.com>
+Acked-by: Tim Deegan <tim at xen.org>
+Acked-by: Ian Jackson <ian.jackson at eu.citrix.com>
+
+diff --git a/xen/common/memory.c b/xen/common/memory.c
+index 4e7c234..9b9fb18 100644
+--- a/xen/common/memory.c
++++ b/xen/common/memory.c
+@@ -117,7 +117,8 @@ static void populate_physmap(struct memop_args *a)
+ 
+         if ( a->memflags & MEMF_populate_on_demand )
+         {
+-            if ( guest_physmap_mark_populate_on_demand(d, gpfn,
++            if ( a->extent_order > MAX_ORDER ||
++                 guest_physmap_mark_populate_on_demand(d, gpfn,
+                                                        a->extent_order) < 0 )
+                 goto out;
+         }
+@@ -216,7 +217,8 @@ static void decrease_reservation(struct memop_args *a)
+     xen_pfn_t gmfn;
+ 
+     if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
+-                                     a->nr_extents-1) )
++                                     a->nr_extents-1) ||
++         a->extent_order > MAX_ORDER )
+         return;
+ 
+     for ( i = a->nr_done; i < a->nr_extents; i++ )
+@@ -278,6 +280,9 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
+     if ( (exch.nr_exchanged > exch.in.nr_extents) ||
+          /* Input and output domain identifiers match? */
+          (exch.in.domid != exch.out.domid) ||
++         /* Extent orders are sensible? */
++         (exch.in.extent_order > MAX_ORDER) ||
++         (exch.out.extent_order > MAX_ORDER) ||
+          /* Sizes of input and output lists do not overflow a long? */
+          ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
+          ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||


More information about the scm-commits mailing list