[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 05/12] xen: introduce reserve_heap_pages



Introduce a function named reserve_heap_pages (similar to
alloc_heap_pages) that allocates a requested memory range. Call
__alloc_heap_pages for the implementation.

Change __alloc_heap_pages so that the original page doesn't get
modified, giving back unneeded memory top to bottom rather than bottom
to top.

Also introduce a function named reserve_domheap_pages, similar to
alloc_domheap_pages, that checks memflags before calling
reserve_heap_pages. It also assign_pages to the domain on success.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxx>
CC: andrew.cooper3@xxxxxxxxxx
CC: jbeulich@xxxxxxxx
CC: George Dunlap <george.dunlap@xxxxxxxxxx>
CC: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
---
 xen/common/page_alloc.c | 72 ++++++++++++++++++++++++++++++++++++++---
 xen/include/xen/mm.h    |  2 ++
 2 files changed, 69 insertions(+), 5 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 79ae64d4b8..3a9c1a291b 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -911,7 +911,7 @@ static struct page_info *get_free_buddy(unsigned int 
zone_lo,
     }
 }
 
-static void __alloc_heap_pages(struct page_info **pgo,
+static void __alloc_heap_pages(struct page_info *pg,
                                unsigned int order,
                                unsigned int memflags,
                                struct domain *d)
@@ -922,7 +922,7 @@ static void __alloc_heap_pages(struct page_info **pgo,
     bool need_tlbflush = false;
     uint32_t tlbflush_timestamp = 0;
     unsigned int dirty_cnt = 0;
-    struct page_info *pg = *pgo;
+    struct page_info *pg_start = pg;
 
     node = phys_to_nid(page_to_maddr(pg));
     zone = page_to_zone(pg);
@@ -934,10 +934,10 @@ static void __alloc_heap_pages(struct page_info **pgo,
     while ( buddy_order != order )
     {
         buddy_order--;
+        pg = pg_start + (1U << buddy_order);
         page_list_add_scrub(pg, node, zone, buddy_order,
                             (1U << buddy_order) > first_dirty ?
                             first_dirty : INVALID_DIRTY_IDX);
-        pg += 1U << buddy_order;
 
         if ( first_dirty != INVALID_DIRTY_IDX )
         {
@@ -948,7 +948,7 @@ static void __alloc_heap_pages(struct page_info **pgo,
                 first_dirty = 0; /* We've moved past original first_dirty */
         }
     }
-    *pgo = pg;
+    pg = pg_start;
 
     ASSERT(avail[node][zone] >= request);
     avail[node][zone] -= request;
@@ -1073,7 +1073,42 @@ static struct page_info *alloc_heap_pages(
         return NULL;
     }
 
-    __alloc_heap_pages(&pg, order, memflags, d);
+    __alloc_heap_pages(pg, order, memflags, d);
+    return pg;
+}
+
+static struct page_info *reserve_heap_pages(struct domain *d,
+                                            paddr_t start,
+                                            unsigned int order,
+                                            unsigned int memflags)
+{
+    nodeid_t node;
+    unsigned int zone;
+    struct page_info *pg;
+
+    if ( unlikely(order > MAX_ORDER) )
+        return NULL;
+
+    spin_lock(&heap_lock);
+
+    /*
+     * Claimed memory is considered unavailable unless the request
+     * is made by a domain with sufficient unclaimed pages.
+     */
+    if ( (outstanding_claims + (1UL << order) > total_avail_pages) &&
+          ((memflags & MEMF_no_refcount) ||
+           !d || d->outstanding_pages < (1UL << order)) )
+    {
+        spin_unlock(&heap_lock);
+        return NULL;
+    }
+
+    pg = maddr_to_page(start);
+    node = phys_to_nid(start);
+    zone = page_to_zone(pg);
+    page_list_del(pg, &heap(node, zone, order));
+
+    __alloc_heap_pages(pg, order, memflags, d);
     return pg;
 }
 
@@ -2385,6 +2420,33 @@ struct page_info *alloc_domheap_pages(
     return pg;
 }
 
+struct page_info *reserve_domheap_pages(
+    struct domain *d, paddr_t start, unsigned int order, unsigned int memflags)
+{
+    struct page_info *pg = NULL;
+
+    ASSERT(!in_irq());
+
+    if ( memflags & MEMF_no_owner )
+        memflags |= MEMF_no_refcount;
+    else if ( (memflags & MEMF_no_refcount) && d )
+    {
+        ASSERT(!(memflags & MEMF_no_refcount));
+        return NULL;
+    }
+
+    pg = reserve_heap_pages(d, start, order, memflags);
+
+    if ( d && !(memflags & MEMF_no_owner) &&
+         assign_pages(d, pg, order, memflags) )
+    {
+        free_heap_pages(pg, order, memflags & MEMF_no_scrub);
+        return NULL;
+    }
+
+    return pg;
+}
+
 void free_domheap_pages(struct page_info *pg, unsigned int order)
 {
     struct domain *d = page_get_owner(pg);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 9b62087be1..35407e1b68 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -199,6 +199,8 @@ void get_outstanding_claims(uint64_t *free_pages, uint64_t 
*outstanding_pages);
 void init_domheap_pages(paddr_t ps, paddr_t pe);
 struct page_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int memflags);
+struct page_info *reserve_domheap_pages(
+    struct domain *d, paddr_t start, unsigned int order, unsigned int 
memflags);
 void free_domheap_pages(struct page_info *pg, unsigned int order);
 unsigned long avail_domheap_pages_region(
     unsigned int node, unsigned int min_width, unsigned int max_width);
-- 
2.17.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.