WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: run-time callers of map_pages_to_xen

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: run-time callers of map_pages_to_xen() must check for errors
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Thu, 10 Mar 2011 22:20:17 +0000
Delivery-date: Thu, 10 Mar 2011 14:28:50 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxxxx>
# Date 1299687336 0
# Node ID 5f28dcea13555f7ab948c9cb95de3e79e0fbfc4b
# Parent  1eeccafe904216589da600cd3e890021fbb3f951
x86: run-time callers of map_pages_to_xen() must check for errors

Again, (out-of-memory) errors must not cause hypervisor crashes, and
hence ought to be propagated.

This also adjusts the cache attribute changing loop in
get_page_from_l1e() to not go through an unnecessary iteration. While
this could be considered mere cleanup, it is actually a requirement
for the subsequent now necessary error recovery path.

Also make a few functions static, easing the check for potential
callers needing adjustment.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---


diff -r 1eeccafe9042 -r 5f28dcea1355 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Mar 09 16:14:59 2011 +0000
+++ b/xen/arch/x86/mm.c Wed Mar 09 16:15:36 2011 +0000
@@ -767,8 +767,9 @@
     return (page_get_owner(page) == dom_io);
 }
 
-static void update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
+static int update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
 {
+    int err = 0;
 #ifdef __x86_64__
     bool_t alias = mfn >= PFN_DOWN(xen_phys_start) &&
          mfn < PFN_UP(xen_phys_start + (unsigned long)_end - XEN_VIRT_START);
@@ -776,12 +777,14 @@
         XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
 
     if ( unlikely(alias) && cacheattr )
-        map_pages_to_xen(xen_va, mfn, 1, 0);
-    map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+        err = map_pages_to_xen(xen_va, mfn, 1, 0);
+    if ( !err )
+        err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
                      PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
-    if ( unlikely(alias) && !cacheattr )
-        map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
+    if ( unlikely(alias) && !cacheattr && !err )
+        err = map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
 #endif
+    return err;
 }
 
 int
@@ -793,6 +796,7 @@
     uint32_t l1f = l1e_get_flags(l1e);
     struct vcpu *curr = current;
     struct domain *real_pg_owner;
+    bool_t write;
 
     if ( !(l1f & _PAGE_PRESENT) )
         return 1;
@@ -849,9 +853,9 @@
      * contribute to writeable mapping refcounts.  (This allows the
      * qemu-dm helper process in dom0 to map the domain's memory without
      * messing up the count of "real" writable mappings.) */
-    if ( (l1f & _PAGE_RW) &&
-         ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) &&
-         !get_page_type(page, PGT_writable_page) )
+    write = (l1f & _PAGE_RW) &&
+            ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner));
+    if ( write && !get_page_type(page, PGT_writable_page) )
         goto could_not_pin;
 
     if ( pte_flags_to_cacheattr(l1f) !=
@@ -862,22 +866,36 @@
 
         if ( is_xen_heap_page(page) )
         {
-            if ( (l1f & _PAGE_RW) &&
-                 ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) )
+            if ( write )
                 put_page_type(page);
             put_page(page);
             MEM_LOG("Attempt to change cache attributes of Xen heap page");
             return 0;
         }
 
-        while ( ((y & PGC_cacheattr_mask) >> PGC_cacheattr_base) != cacheattr )
-        {
+        do {
             x  = y;
             nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
-            y  = cmpxchg(&page->count_info, x, nx);
+        } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
+
+        if ( unlikely(update_xen_mappings(mfn, cacheattr) != 0) )
+        {
+            cacheattr = y & PGC_cacheattr_mask;
+            do {
+                x  = y;
+                nx = (x & ~PGC_cacheattr_mask) | cacheattr;
+            } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
+
+            if ( write )
+                put_page_type(page);
+            put_page(page);
+
+            MEM_LOG("Error updating mappings for mfn %lx (pfn %lx,"
+                    " from L1 entry %" PRIpte ") for %d",
+                    mfn, get_gpfn_from_mfn(mfn),
+                    l1e_get_intpte(l1e), l1e_owner->domain_id);
+            return 0;
         }
-
-        update_xen_mappings(mfn, cacheattr);
     }
 
     return 1;
@@ -2005,6 +2023,21 @@
 
 #endif
 
+static int cleanup_page_cacheattr(struct page_info *page)
+{
+    uint32_t cacheattr =
+        (page->count_info & PGC_cacheattr_mask) >> PGC_cacheattr_base;
+
+    if ( likely(cacheattr == 0) )
+        return 0;
+
+    page->count_info &= ~PGC_cacheattr_mask;
+
+    BUG_ON(is_xen_heap_page(page));
+
+    return update_xen_mappings(page_to_mfn(page), 0);
+}
+
 void put_page(struct page_info *page)
 {
     unsigned long nx, x, y = page->count_info;
@@ -2018,8 +2051,10 @@
 
     if ( unlikely((nx & PGC_count_mask) == 0) )
     {
-        cleanup_page_cacheattr(page);
-        free_domheap_page(page);
+        if ( cleanup_page_cacheattr(page) == 0 )
+            free_domheap_page(page);
+        else
+            MEM_LOG("Leaking pfn %lx", page_to_mfn(page));
     }
 }
 
@@ -2678,21 +2713,6 @@
 
 #endif
 
-void cleanup_page_cacheattr(struct page_info *page)
-{
-    uint32_t cacheattr =
-        (page->count_info & PGC_cacheattr_mask) >> PGC_cacheattr_base;
-
-    if ( likely(cacheattr == 0) )
-        return;
-
-    page->count_info &= ~PGC_cacheattr_mask;
-
-    BUG_ON(is_xen_heap_page(page));
-
-    update_xen_mappings(page_to_mfn(page), 0);
-}
-
 
 int new_guest_cr3(unsigned long mfn)
 {
diff -r 1eeccafe9042 -r 5f28dcea1355 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Wed Mar 09 16:14:59 2011 +0000
+++ b/xen/arch/x86/x86_64/mm.c  Wed Mar 09 16:15:36 2011 +0000
@@ -439,6 +439,7 @@
     l3_pgentry_t *l3_ro_mpt = NULL;
     l2_pgentry_t *l2_ro_mpt = NULL;
     struct page_info *l1_pg;
+    int err = 0;
 
     smap = info->spfn & (~((1UL << (L2_PAGETABLE_SHIFT - 2)) -1));
 
@@ -489,24 +490,25 @@
         memflags = MEMF_node(phys_to_nid(i << PAGE_SHIFT));
 
         l1_pg = mfn_to_page(alloc_hotadd_mfn(info));
-        map_pages_to_xen(rwva,
-                    page_to_mfn(l1_pg),
-                    1UL << PAGETABLE_ORDER,
-                    PAGE_HYPERVISOR);
+        err = map_pages_to_xen(rwva, page_to_mfn(l1_pg),
+                               1UL << PAGETABLE_ORDER,
+                               PAGE_HYPERVISOR);
+        if ( err )
+            break;
         memset((void *)rwva, 0x55, 1UL << L2_PAGETABLE_SHIFT);
         /* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */
         l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_from_page(l1_pg, 
_PAGE_PSE|_PAGE_PRESENT));
     }
 #undef CNT
 #undef MFN
-    return 0;
+    return err;
 }
 
 /*
  * Allocate and map the machine-to-phys table.
  * The L3 for RO/RWRW MPT and the L2 for compatible MPT should be setup already
  */
-int setup_m2p_table(struct mem_hotadd_info *info)
+static int setup_m2p_table(struct mem_hotadd_info *info)
 {
     unsigned long i, va, smap, emap;
     unsigned int n, memflags;
@@ -560,11 +562,13 @@
         else
         {
             l1_pg = mfn_to_page(alloc_hotadd_mfn(info));
-            map_pages_to_xen(
+            ret = map_pages_to_xen(
                         RDWR_MPT_VIRT_START + i * sizeof(unsigned long),
                         page_to_mfn(l1_pg),
                         1UL << PAGETABLE_ORDER,
                         PAGE_HYPERVISOR);
+            if ( ret )
+                goto error;
             memset((void *)(RDWR_MPT_VIRT_START + i * sizeof(unsigned long)),
                    0x55, 1UL << L2_PAGETABLE_SHIFT);
 
@@ -908,13 +912,13 @@
     flush_tlb_all();
 }
 
-/* Should we be paraniod failure in map_pages_to_xen? */
 static int setup_frametable_chunk(void *start, void *end,
                                   struct mem_hotadd_info *info)
 {
     unsigned long s = (unsigned long)start;
     unsigned long e = (unsigned long)end;
     unsigned long mfn;
+    int err;
 
     ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1)));
     ASSERT(!(e & ((1 << L2_PAGETABLE_SHIFT) - 1)));
@@ -922,14 +926,17 @@
     for ( ; s < e; s += (1UL << L2_PAGETABLE_SHIFT))
     {
         mfn = alloc_hotadd_mfn(info);
-        map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR);
+        err = map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER,
+                               PAGE_HYPERVISOR);
+        if ( err )
+            return err;
     }
     memset(start, -1, s - (unsigned long)start);
 
     return 0;
 }
 
-int extend_frame_table(struct mem_hotadd_info *info)
+static int extend_frame_table(struct mem_hotadd_info *info)
 {
     unsigned long cidx, nidx, eidx, spfn, epfn;
 
@@ -950,12 +957,16 @@
 
     while ( cidx < eidx )
     {
+        int err;
+
         nidx = find_next_bit(pdx_group_valid, eidx, cidx);
         if ( nidx >= eidx )
             nidx = eidx;
-        setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
+        err = setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
                                      pdx_to_page(nidx * PDX_GROUP_COUNT),
                                      info);
+        if ( err )
+            return err;
 
         cidx = find_next_zero_bit(pdx_group_valid, eidx, nidx);
     }
diff -r 1eeccafe9042 -r 5f28dcea1355 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Wed Mar 09 16:14:59 2011 +0000
+++ b/xen/include/asm-x86/mm.h  Wed Mar 09 16:15:36 2011 +0000
@@ -325,8 +325,6 @@
                    int preemptible);
 int _shadow_mode_refcounts(struct domain *d);
 
-void cleanup_page_cacheattr(struct page_info *page);
-
 int is_iomem_page(unsigned long mfn);
 
 void clear_superpage_mark(struct page_info *page);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: run-time callers of map_pages_to_xen() must check for errors, Xen patchbot-unstable <=