[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 1/9] mm: Separate free page chunk merging into its own routine



This is needed for subsequent changes to memory scrubbing.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---

Changes in v2:
* Return boolean (as oppposed to integer) values in can_merge()
* Return new buddy head from merge_chunks()


 xen/common/page_alloc.c |   88 +++++++++++++++++++++++++++++-----------------
 1 files changed, 55 insertions(+), 33 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 68dba19..15fd7f4 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -924,11 +924,64 @@ static int reserve_offlined_page(struct page_info *head)
     return count;
 }
 
+static bool_t can_merge(struct page_info *buddy, unsigned int node,
+                        unsigned int order)
+{
+    if ( !mfn_valid(_mfn(page_to_mfn(buddy))) ||
+         !page_state_is(buddy, free) ||
+         (PFN_ORDER(buddy) != order) ||
+         (phys_to_nid(page_to_maddr(buddy)) != node) )
+        return false;
+
+    return true;
+}
+
+/* Returns new buddy head. */
+static struct page_info *merge_chunks(struct page_info *pg, unsigned int node,
+                                      unsigned int zone, unsigned int order)
+{
+    ASSERT(spin_is_locked(&heap_lock));
+
+    /* Merge chunks as far as possible. */
+    while ( order < MAX_ORDER )
+    {
+        unsigned long mask = 1UL << order;
+        struct page_info *buddy;
+
+        if ( (page_to_mfn(pg) & mask) )
+        {
+            /* Merge with predecessor block? */
+            buddy = pg - mask;
+            if ( !can_merge(buddy, node, order) )
+                break;
+
+            pg = buddy;
+            page_list_del(pg, &heap(node, zone, order));
+        }
+        else
+        {
+            /* Merge with successor block? */
+            buddy = pg + mask;
+            if ( !can_merge(buddy, node, order) )
+                break;
+
+            page_list_del(buddy, &heap(node, zone, order));
+        }
+
+        order++;
+    }
+
+    PFN_ORDER(pg) = order;
+    page_list_add_tail(pg, &heap(node, zone, order));
+
+    return pg;
+}
+
 /* Free 2^@order set of pages. */
 static void free_heap_pages(
     struct page_info *pg, unsigned int order)
 {
-    unsigned long mask, mfn = page_to_mfn(pg);
+    unsigned long mfn = page_to_mfn(pg);
     unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0;
     unsigned int zone = page_to_zone(pg);
 
@@ -975,38 +1028,7 @@ static void free_heap_pages(
         midsize_alloc_zone_pages = max(
             midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
 
-    /* Merge chunks as far as possible. */
-    while ( order < MAX_ORDER )
-    {
-        mask = 1UL << order;
-
-        if ( (page_to_mfn(pg) & mask) )
-        {
-            /* Merge with predecessor block? */
-            if ( !mfn_valid(_mfn(page_to_mfn(pg-mask))) ||
-                 !page_state_is(pg-mask, free) ||
-                 (PFN_ORDER(pg-mask) != order) ||
-                 (phys_to_nid(page_to_maddr(pg-mask)) != node) )
-                break;
-            pg -= mask;
-            page_list_del(pg, &heap(node, zone, order));
-        }
-        else
-        {
-            /* Merge with successor block? */
-            if ( !mfn_valid(_mfn(page_to_mfn(pg+mask))) ||
-                 !page_state_is(pg+mask, free) ||
-                 (PFN_ORDER(pg+mask) != order) ||
-                 (phys_to_nid(page_to_maddr(pg+mask)) != node) )
-                break;
-            page_list_del(pg + mask, &heap(node, zone, order));
-        }
-
-        order++;
-    }
-
-    PFN_ORDER(pg) = order;
-    page_list_add_tail(pg, &heap(node, zone, order));
+    pg = merge_chunks(pg, node, zone, order);
 
     if ( tainted )
         reserve_offlined_page(pg);
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.