[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory



The allocator uses independent calls to alloc_heap_pages in order to get the
desired amount of memory and then maps all the independent physical
addresses into a contiguous virtual address space.

In order to keep track of this regions a red-black tree is used.

Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/common/page_alloc.c | 131 ++++++++++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/mm.h    |   2 +
 2 files changed, 133 insertions(+)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 8500ed7..4ad5184 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -38,6 +38,7 @@
 #include <xen/event.h>
 #include <xen/tmem.h>
 #include <xen/tmem_xen.h>
+#include <xen/rbtree.h>
 #include <public/sysctl.h>
 #include <public/sched.h>
 #include <asm/page.h>
@@ -107,6 +108,13 @@ struct scrub_region {
 static struct scrub_region __initdata region[MAX_NUMNODES];
 static unsigned long __initdata chunk_size;
 
+static struct rb_root non_contiguous = { NULL, };
+struct va_page {
+    struct rb_node node;
+    void *va;
+    unsigned long mfn;
+};
+
 static void __init boot_bug(int line)
 {
     panic("Boot BUG at %s:%d", __FILE__, line);
@@ -1601,6 +1609,129 @@ void free_xenheap_pages(void *v, unsigned int order)
 
 #endif
 
+static struct va_page *va_xenheap_search(struct rb_root *root, void *va)
+{
+    struct rb_node *node = root->rb_node;
+
+    while ( node )
+    {
+        struct va_page *data = container_of(node, struct va_page, node);
+
+        if ( data->va == va )
+            return data;
+        if ( va < data->va )
+            node = node->rb_left;
+        else
+            node = node->rb_right;
+    }
+
+    return NULL;
+}
+
+static int va_xenheap_insert(struct rb_root *root, struct va_page *data)
+{
+    struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+    /* Figure out where to put new node */
+    while ( *new )
+    {
+        struct va_page *this = container_of(*new, struct va_page, node);
+
+        parent = *new;
+        if ( data->va < this->va )
+            new = &((*new)->rb_left);
+        else if ( data->va > this->va )
+            new = &((*new)->rb_right);
+        else
+            return -EEXIST;
+    }
+
+    /* Add new node and rebalance tree. */
+    rb_link_node(&data->node, parent, new);
+    rb_insert_color(&data->node, root);
+
+    return 0;
+}
+
+void *alloc_xenheap_noncontiguous(unsigned int pages, unsigned int memflags)
+{
+    unsigned long *mfn;
+    unsigned int i;
+    struct va_page *va_rb;
+    struct page_info *pg;
+    void *va = NULL;
+
+
+    mfn = xzalloc_array(unsigned long, pages);
+    if ( mfn == NULL )
+        return NULL;
+
+    for ( i = 0; i < pages; i++ )
+    {
+        pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, 1, memflags, NULL);
+        if ( pg == NULL )
+            goto error;
+        mfn[i] = page_to_mfn(pg);
+    }
+
+    va = vmap(mfn, pages);
+    if ( va == NULL )
+        goto error;
+
+    for ( i = 0; i < pages; i++ )
+    {
+        va_rb = xmalloc_bytes(sizeof(*va_rb));
+        if ( va_rb == NULL )
+            goto error;
+        va_rb->va = va + i * PAGE_SIZE;
+        va_rb->mfn = mfn[i];
+        BUG_ON(va_xenheap_insert(&non_contiguous, va_rb));
+    }
+
+    xfree(mfn);
+    return va;
+
+ error:
+    if ( va != NULL )
+    {
+        for ( i = 0; i < pages; i++ )
+        {
+            va_rb = va_xenheap_search(&non_contiguous, va + i * PAGE_SIZE);
+            if ( va_rb != NULL )
+            {
+                rb_erase(&va_rb->node, &non_contiguous);
+                xfree(va_rb);
+            }
+        }
+        vunmap(va);
+    }
+    for ( i = 0; i < pages; i++ )
+        if ( mfn[i] != 0 )
+            free_heap_pages(mfn_to_page(mfn[i]), 1);
+    xfree(mfn);
+    return NULL;
+}
+
+void free_xenheap_noncontiguous(void *va, unsigned int pages)
+{
+    struct va_page *va_rb;
+    int i;
+
+    if ( va == NULL || pages == 0 )
+        return;
+
+    vunmap(va);
+
+    for ( i = 0; i < pages; i++ )
+    {
+        va_rb = va_xenheap_search(&non_contiguous, va + i * PAGE_SIZE);
+        BUG_ON(va_rb == NULL);
+        free_heap_pages(mfn_to_page(va_rb->mfn), 1);
+        rb_erase(&va_rb->node, &non_contiguous);
+        xfree(va_rb);
+    }
+}
+
 
 
 /*************************
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index a066363..b1eae58 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -48,6 +48,8 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int 
memflags);
 void free_xenheap_pages(void *v, unsigned int order);
 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
+void *alloc_xenheap_noncontiguous(unsigned int pages, unsigned int memflags);
+void free_xenheap_noncontiguous(void *va, unsigned int pages);
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(
     unsigned long virt,
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.