[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 3/7] libxc: vnodes allocation on NUMA nodes.



If vnuma topology is defined and its a hardware NUMA machine,
allocate vnodes on physical numa nodes based on vnode_to_pnode map.

TODO:
* Unify allocation for domu with vnuma and without vnuma topology
initialized;

Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
---
 tools/libxc/xc_dom.h     |   10 ++++++
 tools/libxc/xc_dom_x86.c |   81 ++++++++++++++++++++++++++++++++++++++++------
 tools/libxc/xg_private.h |    1 +
 3 files changed, 83 insertions(+), 9 deletions(-)

diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
index 90679da..c8fa243 100644
--- a/tools/libxc/xc_dom.h
+++ b/tools/libxc/xc_dom.h
@@ -111,6 +111,15 @@ struct xc_dom_image {
     struct xc_dom_phys *phys_pages;
     int realmodearea_log;
 
+    /*
+     * vNUMA topology and memory allocation structure.
+     * Defines the way to allocate memory on per NUMA
+     * physical nodes that is defined by vnode_to_pnode.
+     */
+    uint32_t nr_vnodes;
+    uint64_t *vnuma_memszs;
+    unsigned int *vnode_to_pnode;
+
     /* malloc memory pool */
     struct xc_dom_mem *memblocks;
 
@@ -360,6 +369,7 @@ static inline xen_pfn_t xc_dom_p2m_guest(struct 
xc_dom_image *dom,
 int arch_setup_meminit(struct xc_dom_image *dom);
 int arch_setup_bootearly(struct xc_dom_image *dom);
 int arch_setup_bootlate(struct xc_dom_image *dom);
+int arch_boot_numa_alloc(struct xc_dom_image *dom);
 
 /*
  * Local variables:
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index e034d62..1839360 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -802,27 +802,47 @@ int arch_setup_meminit(struct xc_dom_image *dom)
     else
     {
         /* try to claim pages for early warning of insufficient memory avail */
+        rc = 0;
         if ( dom->claim_enabled ) {
             rc = xc_domain_claim_pages(dom->xch, dom->guest_domid,
                                        dom->total_pages);
             if ( rc )
+            {
+                xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+                             "%s: Failed to claim mem for dom\n",
+                             __FUNCTION__);
                 return rc;
+            }
         }
         /* setup initial p2m */
         for ( pfn = 0; pfn < dom->total_pages; pfn++ )
             dom->p2m_host[pfn] = pfn;
         
         /* allocate guest memory */
-        for ( i = rc = allocsz = 0;
-              (i < dom->total_pages) && !rc;
-              i += allocsz )
+        if ( dom->nr_vnodes > 0 )
         {
-            allocsz = dom->total_pages - i;
-            if ( allocsz > 1024*1024 )
-                allocsz = 1024*1024;
-            rc = xc_domain_populate_physmap_exact(
-                dom->xch, dom->guest_domid, allocsz,
-                0, 0, &dom->p2m_host[i]);
+            rc = arch_boot_numa_alloc(dom);
+            if ( rc )
+            {
+                xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+                             "%s: Failed to allocate memory on NUMA nodes\n",
+                             __FUNCTION__);
+                return rc;
+            }
+        }
+        else
+        {
+            for ( i = rc = allocsz = 0;
+                  (i < dom->total_pages) && !rc;
+                  i += allocsz )
+            {
+                allocsz = dom->total_pages - i;
+                if ( allocsz > 1024*1024 )
+                    allocsz = 1024*1024;
+                rc = xc_domain_populate_physmap_exact(
+                    dom->xch, dom->guest_domid, allocsz,
+                    0, 0, &dom->p2m_host[i]);
+            }
         }
 
         /* Ensure no unclaimed pages are left unused.
@@ -830,7 +850,50 @@ int arch_setup_meminit(struct xc_dom_image *dom)
         (void)xc_domain_claim_pages(dom->xch, dom->guest_domid,
                                     0 /* cancels the claim */);
     }
+    return rc;
+}
+
+int arch_boot_numa_alloc(struct xc_dom_image *dom)
+{ 
+    int rc;
+    unsigned int n;
+    unsigned long long guest_pages;
+    unsigned long long allocsz = 0, node_pfn_base, i;
+    unsigned long memflags;
+
+    rc = allocsz = node_pfn_base = 0;
 
+    allocsz = 0;
+    for ( n = 0; n < dom->nr_vnodes; n++ )
+    {
+        memflags = 0;
+        if ( dom->vnode_to_pnode[n] != VNUMA_NO_NODE )
+        {
+            memflags |= XENMEMF_exact_node(dom->vnode_to_pnode[n]);
+            memflags |= XENMEMF_exact_node_request;
+        }
+        guest_pages = (dom->vnuma_memszs[n] << 20) >> PAGE_SHIFT_X86;
+        for ( i = 0;
+            (i < guest_pages) && !rc;
+                i += allocsz )
+        {
+            allocsz = guest_pages - i;
+            if ( allocsz > 1024*1024 )
+                allocsz = 1024*1024;
+                rc = xc_domain_populate_physmap_exact(
+                                    dom->xch, dom->guest_domid, allocsz,
+                                    0, memflags, &dom->p2m_host[node_pfn_base 
+ i]);
+        }
+        if ( rc )
+        {
+            xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+                    "%s: Failed allocation of %Lu pages for vnode %d on pnode 
%d out of %lu\n",
+                    __FUNCTION__, guest_pages, n, dom->vnode_to_pnode[n], 
dom->total_pages);
+            return rc;
+        }
+        node_pfn_base += i;
+    }
+    
     return rc;
 }
 
diff --git a/tools/libxc/xg_private.h b/tools/libxc/xg_private.h
index 5ff2124..9554b71 100644
--- a/tools/libxc/xg_private.h
+++ b/tools/libxc/xg_private.h
@@ -127,6 +127,7 @@ typedef uint64_t l4_pgentry_64_t;
 #define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
 #define NRPAGES(x) (ROUNDUP(x, PAGE_SHIFT) >> PAGE_SHIFT)
 
+#define VNUMA_NO_NODE ~((unsigned int)0)
 
 /* XXX SMH: following skanky macros rely on variable p2m_size being set */
 /* XXX TJD: also, "guest_width" should be the guest's sizeof(unsigned long) */
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.