WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-4.1-testing] x86: run-time callers of map_pages_to_

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-4.1-testing] x86: run-time callers of map_pages_to_xen() must check for errors
From: Xen patchbot-4.1-testing <patchbot@xxxxxxx>
Date: Fri, 18 Mar 2011 21:25:11 +0000
Delivery-date: Fri, 18 Mar 2011 14:26:55 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxxxx>
# Date 1300197340 0
# Node ID 762155e9debda6c70861b8bd388efc53a49ff50c
# Parent  cd4d0c5dfa274331c7871e19070dc4f7fbcc0092
x86: run-time callers of map_pages_to_xen() must check for errors

Again, (out-of-memory) errors must not cause hypervisor crashes, and
hence ought to be propagated.

This also adjusts the cache attribute changing loop in
get_page_from_l1e() to not go through an unnecessary iteration. While
this could be considered mere cleanup, it is actually a requirement
for the subsequent now necessary error recovery path.

Also make a few functions static, easing the check for potential
callers needing adjustment.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
xen-unstable changeset:   22997:5f28dcea1355
xen-unstable date:        Wed Mar 09 16:15:36 2011 +0000

x86: don't BUG() post-boot in alloc_xen_pagetable()

Instead, propagate the condition to the caller, all of which also get
adjusted to check for that situation.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
xen-unstable changeset:   22996:1eeccafe9042
xen-unstable date:        Wed Mar 09 16:14:59 2011 +0000
---


diff -r cd4d0c5dfa27 -r 762155e9debd xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Mar 14 17:21:13 2011 +0000
+++ b/xen/arch/x86/mm.c Tue Mar 15 13:55:40 2011 +0000
@@ -767,8 +767,9 @@
     return (page_get_owner(page) == dom_io);
 }
 
-static void update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
+static int update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
 {
+    int err = 0;
 #ifdef __x86_64__
     bool_t alias = mfn >= PFN_DOWN(xen_phys_start) &&
          mfn < PFN_UP(xen_phys_start + (unsigned long)_end - XEN_VIRT_START);
@@ -776,12 +777,14 @@
         XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
 
     if ( unlikely(alias) && cacheattr )
-        map_pages_to_xen(xen_va, mfn, 1, 0);
-    map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+        err = map_pages_to_xen(xen_va, mfn, 1, 0);
+    if ( !err )
+        err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
                      PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
-    if ( unlikely(alias) && !cacheattr )
-        map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
+    if ( unlikely(alias) && !cacheattr && !err )
+        err = map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
 #endif
+    return err;
 }
 
 int
@@ -793,6 +796,7 @@
     uint32_t l1f = l1e_get_flags(l1e);
     struct vcpu *curr = current;
     struct domain *real_pg_owner;
+    bool_t write;
 
     if ( !(l1f & _PAGE_PRESENT) )
         return 1;
@@ -849,9 +853,9 @@
      * contribute to writeable mapping refcounts.  (This allows the
      * qemu-dm helper process in dom0 to map the domain's memory without
      * messing up the count of "real" writable mappings.) */
-    if ( (l1f & _PAGE_RW) &&
-         ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) &&
-         !get_page_type(page, PGT_writable_page) )
+    write = (l1f & _PAGE_RW) &&
+            ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner));
+    if ( write && !get_page_type(page, PGT_writable_page) )
         goto could_not_pin;
 
     if ( pte_flags_to_cacheattr(l1f) !=
@@ -862,22 +866,36 @@
 
         if ( is_xen_heap_page(page) )
         {
-            if ( (l1f & _PAGE_RW) &&
-                 ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) )
+            if ( write )
                 put_page_type(page);
             put_page(page);
             MEM_LOG("Attempt to change cache attributes of Xen heap page");
             return 0;
         }
 
-        while ( ((y & PGC_cacheattr_mask) >> PGC_cacheattr_base) != cacheattr )
-        {
+        do {
             x  = y;
             nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
-            y  = cmpxchg(&page->count_info, x, nx);
+        } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
+
+        if ( unlikely(update_xen_mappings(mfn, cacheattr) != 0) )
+        {
+            cacheattr = y & PGC_cacheattr_mask;
+            do {
+                x  = y;
+                nx = (x & ~PGC_cacheattr_mask) | cacheattr;
+            } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
+
+            if ( write )
+                put_page_type(page);
+            put_page(page);
+
+            MEM_LOG("Error updating mappings for mfn %lx (pfn %lx,"
+                    " from L1 entry %" PRIpte ") for %d",
+                    mfn, get_gpfn_from_mfn(mfn),
+                    l1e_get_intpte(l1e), l1e_owner->domain_id);
+            return 0;
         }
-
-        update_xen_mappings(mfn, cacheattr);
     }
 
     return 1;
@@ -2005,6 +2023,21 @@
 
 #endif
 
+static int cleanup_page_cacheattr(struct page_info *page)
+{
+    uint32_t cacheattr =
+        (page->count_info & PGC_cacheattr_mask) >> PGC_cacheattr_base;
+
+    if ( likely(cacheattr == 0) )
+        return 0;
+
+    page->count_info &= ~PGC_cacheattr_mask;
+
+    BUG_ON(is_xen_heap_page(page));
+
+    return update_xen_mappings(page_to_mfn(page), 0);
+}
+
 void put_page(struct page_info *page)
 {
     unsigned long nx, x, y = page->count_info;
@@ -2018,8 +2051,10 @@
 
     if ( unlikely((nx & PGC_count_mask) == 0) )
     {
-        cleanup_page_cacheattr(page);
-        free_domheap_page(page);
+        if ( cleanup_page_cacheattr(page) == 0 )
+            free_domheap_page(page);
+        else
+            MEM_LOG("Leaking pfn %lx", page_to_mfn(page));
     }
 }
 
@@ -2678,21 +2713,6 @@
 
 #endif
 
-void cleanup_page_cacheattr(struct page_info *page)
-{
-    uint32_t cacheattr =
-        (page->count_info & PGC_cacheattr_mask) >> PGC_cacheattr_base;
-
-    if ( likely(cacheattr == 0) )
-        return;
-
-    page->count_info &= ~PGC_cacheattr_mask;
-
-    BUG_ON(is_xen_heap_page(page));
-
-    update_xen_mappings(page_to_mfn(page), 0);
-}
-
 
 int new_guest_cr3(unsigned long mfn)
 {
@@ -5141,8 +5161,11 @@
     while ( nr_mfns != 0 )
     {
 #ifdef __x86_64__
-        l3_pgentry_t *pl3e = virt_to_xen_l3e(virt);
-        l3_pgentry_t ol3e = *pl3e;
+        l3_pgentry_t ol3e, *pl3e = virt_to_xen_l3e(virt);
+
+        if ( !pl3e )
+            return -ENOMEM;
+        ol3e = *pl3e;
 
         if ( cpu_has_page1gb &&
              !(((virt >> PAGE_SHIFT) | mfn) &
@@ -5262,6 +5285,8 @@
 #endif
 
         pl2e = virt_to_xen_l2e(virt);
+        if ( !pl2e )
+            return -ENOMEM;
 
         if ( ((((virt>>PAGE_SHIFT) | mfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
              (nr_mfns >= (1<<PAGETABLE_ORDER)) &&
diff -r cd4d0c5dfa27 -r 762155e9debd xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Mon Mar 14 17:21:13 2011 +0000
+++ b/xen/arch/x86/x86_32/mm.c  Tue Mar 15 13:55:40 2011 +0000
@@ -48,7 +48,8 @@
     if ( !early_boot )
     {
         void *v = alloc_xenheap_page();
-        BUG_ON(v == NULL);
+
+        BUG_ON(!dom0 && !v);
         return v;
     }
 
diff -r cd4d0c5dfa27 -r 762155e9debd xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Mon Mar 14 17:21:13 2011 +0000
+++ b/xen/arch/x86/x86_64/mm.c  Tue Mar 15 13:55:40 2011 +0000
@@ -84,8 +84,9 @@
     if ( !early_boot )
     {
         struct page_info *pg = alloc_domheap_page(NULL, 0);
-        BUG_ON(pg == NULL);
-        return page_to_virt(pg);
+
+        BUG_ON(!dom0 && !pg);
+        return pg ? page_to_virt(pg) : NULL;
     }
 
     mfn = alloc_boot_pages(1, 1);
@@ -100,6 +101,9 @@
     if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
     {
         l3_pgentry_t *pl3e = alloc_xen_pagetable();
+
+        if ( !pl3e )
+            return NULL;
         clear_page(pl3e);
         l4e_write(pl4e, l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR));
     }
@@ -112,9 +116,15 @@
     l3_pgentry_t *pl3e;
 
     pl3e = virt_to_xen_l3e(v);
+    if ( !pl3e )
+        return NULL;
+
     if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
     {
         l2_pgentry_t *pl2e = alloc_xen_pagetable();
+
+        if ( !pl2e )
+            return NULL;
         clear_page(pl2e);
         l3e_write(pl3e, l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR));
     }
@@ -429,6 +439,7 @@
     l3_pgentry_t *l3_ro_mpt = NULL;
     l2_pgentry_t *l2_ro_mpt = NULL;
     struct page_info *l1_pg;
+    int err = 0;
 
     smap = info->spfn & (~((1UL << (L2_PAGETABLE_SHIFT - 2)) -1));
 
@@ -479,24 +490,25 @@
         memflags = MEMF_node(phys_to_nid(i << PAGE_SHIFT));
 
         l1_pg = mfn_to_page(alloc_hotadd_mfn(info));
-        map_pages_to_xen(rwva,
-                    page_to_mfn(l1_pg),
-                    1UL << PAGETABLE_ORDER,
-                    PAGE_HYPERVISOR);
+        err = map_pages_to_xen(rwva, page_to_mfn(l1_pg),
+                               1UL << PAGETABLE_ORDER,
+                               PAGE_HYPERVISOR);
+        if ( err )
+            break;
         memset((void *)rwva, 0x55, 1UL << L2_PAGETABLE_SHIFT);
         /* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */
         l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_from_page(l1_pg, 
_PAGE_PSE|_PAGE_PRESENT));
     }
 #undef CNT
 #undef MFN
-    return 0;
+    return err;
 }
 
 /*
  * Allocate and map the machine-to-phys table.
  * The L3 for RO/RWRW MPT and the L2 for compatible MPT should be setup already
  */
-int setup_m2p_table(struct mem_hotadd_info *info)
+static int setup_m2p_table(struct mem_hotadd_info *info)
 {
     unsigned long i, va, smap, emap;
     unsigned int n, memflags;
@@ -550,11 +562,13 @@
         else
         {
             l1_pg = mfn_to_page(alloc_hotadd_mfn(info));
-            map_pages_to_xen(
+            ret = map_pages_to_xen(
                         RDWR_MPT_VIRT_START + i * sizeof(unsigned long),
                         page_to_mfn(l1_pg),
                         1UL << PAGETABLE_ORDER,
                         PAGE_HYPERVISOR);
+            if ( ret )
+                goto error;
             memset((void *)(RDWR_MPT_VIRT_START + i * sizeof(unsigned long)),
                    0x55, 1UL << L2_PAGETABLE_SHIFT);
 
@@ -898,13 +912,13 @@
     flush_tlb_all();
 }
 
-/* Should we be paraniod failure in map_pages_to_xen? */
 static int setup_frametable_chunk(void *start, void *end,
                                   struct mem_hotadd_info *info)
 {
     unsigned long s = (unsigned long)start;
     unsigned long e = (unsigned long)end;
     unsigned long mfn;
+    int err;
 
     ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1)));
     ASSERT(!(e & ((1 << L2_PAGETABLE_SHIFT) - 1)));
@@ -912,14 +926,17 @@
     for ( ; s < e; s += (1UL << L2_PAGETABLE_SHIFT))
     {
         mfn = alloc_hotadd_mfn(info);
-        map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR);
+        err = map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER,
+                               PAGE_HYPERVISOR);
+        if ( err )
+            return err;
     }
     memset(start, -1, s - (unsigned long)start);
 
     return 0;
 }
 
-int extend_frame_table(struct mem_hotadd_info *info)
+static int extend_frame_table(struct mem_hotadd_info *info)
 {
     unsigned long cidx, nidx, eidx, spfn, epfn;
 
@@ -940,12 +957,16 @@
 
     while ( cidx < eidx )
     {
+        int err;
+
         nidx = find_next_bit(pdx_group_valid, eidx, cidx);
         if ( nidx >= eidx )
             nidx = eidx;
-        setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
+        err = setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
                                      pdx_to_page(nidx * PDX_GROUP_COUNT),
                                      info);
+        if ( err )
+            return err;
 
         cidx = find_next_zero_bit(pdx_group_valid, eidx, nidx);
     }
diff -r cd4d0c5dfa27 -r 762155e9debd xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Mon Mar 14 17:21:13 2011 +0000
+++ b/xen/include/asm-x86/mm.h  Tue Mar 15 13:55:40 2011 +0000
@@ -325,8 +325,6 @@
                    int preemptible);
 int _shadow_mode_refcounts(struct domain *d);
 
-void cleanup_page_cacheattr(struct page_info *page);
-
 int is_iomem_page(unsigned long mfn);
 
 void clear_superpage_mark(struct page_info *page);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-4.1-testing] x86: run-time callers of map_pages_to_xen() must check for errors, Xen patchbot-4 . 1-testing <=