[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC 2/3] xen/common: memory: Add support for direct mapped domain in XEMEM_exchange



Direct mapped domain needs to retrieve the exact same underlying
physical page when the region is re-populated.

Therefore when memory is exchanged for direct mapped domain, we don't
want to free memory of the previous region neither allocate new memory.

Note that because of that, the hypercall XENMEM_exchange can only work
on memory region that has been populated with real RAM when the domain
has been created.

Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>

---
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
---
 xen/common/memory.c | 133 +++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 90 insertions(+), 43 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index ac707e9..94c9a78 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -517,10 +517,19 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
 
                 page = mfn_to_page(mfn);
 
-                if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
+                if ( is_domain_direct_mapped(d) )
                 {
-                    put_gfn(d, gmfn + k);
+                    if ( !get_page(page, d) )
+                        rc = -EINVAL;
+                    else
+                        put_page(page);
+                }
+                else if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
                     rc = -EINVAL;
+
+                if ( unlikely(rc) )
+                {
+                    put_gfn(d, gmfn + k);
                     goto fail;
                 }
 
@@ -530,17 +539,20 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
         }
 
         /* Allocate a chunk's worth of anonymous output pages. */
-        for ( j = 0; j < (1UL << out_chunk_order); j++ )
+        if ( !is_domain_direct_mapped(d) )
         {
-            page = alloc_domheap_pages(d, exch.out.extent_order,
-                                       MEMF_no_owner | memflags);
-            if ( unlikely(page == NULL) )
+            for ( j = 0; j < (1UL << out_chunk_order); j++ )
             {
-                rc = -ENOMEM;
-                goto fail;
-            }
+                page = alloc_domheap_pages(d, exch.out.extent_order,
+                                           MEMF_no_owner | memflags);
+                if ( unlikely(page == NULL) )
+                {
+                    rc = -ENOMEM;
+                    goto fail;
+                }
 
-            page_list_add(page, &out_chunk_list);
+                page_list_add(page, &out_chunk_list);
+            }
         }
 
         /*
@@ -552,47 +564,26 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
         {
             unsigned long gfn;
 
-            if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
+            if ( !is_domain_direct_mapped(d) &&
+                 !test_and_clear_bit(_PGC_allocated, &page->count_info) )
                 BUG();
             mfn = page_to_mfn(page);
             gfn = mfn_to_gmfn(d, mfn);
             /* Pages were unshared above */
             BUG_ON(SHARED_M2P(gfn));
             guest_physmap_remove_page(d, gfn, mfn, 0);
-            put_page(page);
+
+            /*
+             * For domain direct mapped, we want to be able to get
+             * the same page later, so don't deallocate it
+             */
+            if ( !is_domain_direct_mapped(d) )
+                put_page(page);
         }
 
         /* Assign each output page to the domain. */
-        for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j )
+        for ( j = 0; j < (1UL << out_chunk_order); j++ )
         {
-            if ( assign_pages(d, page, exch.out.extent_order,
-                              MEMF_no_refcount) )
-            {
-                unsigned long dec_count;
-                bool_t drop_dom_ref;
-
-                /*
-                 * Pages in in_chunk_list is stolen without
-                 * decreasing the tot_pages. If the domain is dying when
-                 * assign pages, we need decrease the count. For those pages
-                 * that has been assigned, it should be covered by
-                 * domain_relinquish_resources().
-                 */
-                dec_count = (((1UL << exch.in.extent_order) *
-                              (1UL << in_chunk_order)) -
-                             (j * (1UL << exch.out.extent_order)));
-
-                spin_lock(&d->page_alloc_lock);
-                drop_dom_ref = (dec_count &&
-                                !domain_adjust_tot_pages(d, -dec_count));
-                spin_unlock(&d->page_alloc_lock);
-
-                if ( drop_dom_ref )
-                    put_domain(d);
-
-                free_domheap_pages(page, exch.out.extent_order);
-                goto dying;
-            }
 
             if ( __copy_from_guest_offset(&gpfn, exch.out.extent_start,
                                           (i << out_chunk_order) + j, 1) )
@@ -601,7 +592,61 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
                 continue;
             }
 
-            mfn = page_to_mfn(page);
+            if ( is_domain_direct_mapped(d) )
+            {
+                if ( unlikely(d->is_dying) )
+                {
+                    gdprintk(XENLOG_INFO,
+                             "Cannot assign page to domain %d -- dying.\n",
+                             d->domain_id);
+                    goto dying;
+                }
+
+                if ( !check_range_domain_direct_mapped(d, gpfn,
+                                                       exch.out.extent_order) )
+                    goto dying;
+
+                mfn = gpfn;
+            }
+            else
+            {
+                page = page_list_remove_head(&out_chunk_list);
+
+                /* The outchunk list should always contain enough page */
+                BUG_ON(!page);
+
+                if ( assign_pages(d, page, exch.out.extent_order,
+                                  MEMF_no_refcount) )
+                {
+                    unsigned long dec_count;
+                    bool_t drop_dom_ref;
+
+                    /*
+                     * Pages in in_chunk_list is stolen without
+                     * decreasing the tot_pages. If the domain is dying when
+                     * assign pages, we need decrease the count. For those 
pages
+                     * that has been assigned, it should be covered by
+                     * domain_relinquish_resources().
+                     */
+                    dec_count = (((1UL << exch.in.extent_order) *
+                                  (1UL << in_chunk_order)) -
+                                 (j * (1UL << exch.out.extent_order)));
+
+                    spin_lock(&d->page_alloc_lock);
+                    drop_dom_ref = (dec_count &&
+                                    !domain_adjust_tot_pages(d, -dec_count));
+                    spin_unlock(&d->page_alloc_lock);
+
+                    if ( drop_dom_ref )
+                        put_domain(d);
+
+                    free_domheap_pages(page, exch.out.extent_order);
+                    goto dying;
+                }
+
+                mfn = page_to_mfn(page);
+            }
+
             guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
 
             if ( !paging_mode_translate(d) )
@@ -630,7 +675,8 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
  fail:
     /* Reassign any input pages we managed to steal. */
     while ( (page = page_list_remove_head(&in_chunk_list)) )
-        if ( assign_pages(d, page, 0, MEMF_no_refcount) )
+        if ( is_domain_direct_mapped(d) &&
+             assign_pages(d, page, 0, MEMF_no_refcount) )
         {
             BUG_ON(!d->is_dying);
             if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
@@ -640,6 +686,7 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
  dying:
     rcu_unlock_domain(d);
     /* Free any output pages we managed to allocate. */
+    BUG_ON(is_domain_direct_mapped(d) && !page_list_empty(&out_chunk_list));
     while ( (page = page_list_remove_head(&out_chunk_list)) )
         free_domheap_pages(page, exch.out.extent_order);
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.