[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 08/21] libxl: functions to build vmemranges for PV guest
Introduce a arch-independent routine to generate one vmemrange per vnode. Also introduce arch-dependent routines for different architectures because part of the process is arch-specific -- ARM has yet have NUMA support and E820 is x86 only. For those x86 guests who care about machine E820 map (i.e. with e820_host=1), vnode is further split into several vmemranges to accommodate memory holes. A few stubs for libxl_arm.c are created. Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx> Cc: Ian Campbell <ian.campbell@xxxxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Dario Faggioli <dario.faggioli@xxxxxxxxxx> Cc: Elena Ufimtseva <ufimtseva@xxxxxxxxx> --- Changes in v4: 1. Adapt to new interface. 2. Address Ian Jackson's comments. Changes in v3: 1. Rewrite commit log. --- tools/libxl/libxl_arch.h | 6 ++++ tools/libxl/libxl_arm.c | 9 +++++ tools/libxl/libxl_internal.h | 5 +++ tools/libxl/libxl_vnuma.c | 34 +++++++++++++++++++ tools/libxl/libxl_x86.c | 76 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 130 insertions(+) diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h index d3bc136..e249048 100644 --- a/tools/libxl/libxl_arch.h +++ b/tools/libxl/libxl_arch.h @@ -27,4 +27,10 @@ int libxl__arch_domain_init_hw_description(libxl__gc *gc, int libxl__arch_domain_finalise_hw_description(libxl__gc *gc, libxl_domain_build_info *info, struct xc_dom_image *dom); + +/* build vNUMA vmemrange with arch specific information */ +int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc, + uint32_t domid, + libxl_domain_build_info *b_info, + libxl__domain_build_state *state); #endif diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c index 65a762b..d3968a7 100644 --- a/tools/libxl/libxl_arm.c +++ b/tools/libxl/libxl_arm.c @@ -707,6 +707,15 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc *gc, return 0; } +int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc, + uint32_t domid, + libxl_domain_build_info *info, + libxl__domain_build_state *state) +{ + /* Don't touch anything. */ + return 0; +} + /* * Local variables: * mode: C diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h index 39356ba..73d533a 100644 --- a/tools/libxl/libxl_internal.h +++ b/tools/libxl/libxl_internal.h @@ -3399,6 +3399,11 @@ int libxl__vnuma_config_check(libxl__gc *gc, const libxl_domain_build_info *b_info, const libxl__domain_build_state *state); +int libxl__vnuma_build_vmemrange_pv(libxl__gc *gc, + uint32_t domid, + libxl_domain_build_info *b_info, + libxl__domain_build_state *state); + _hidden int libxl__ms_vm_genid_set(libxl__gc *gc, uint32_t domid, const libxl_ms_vm_genid *id); diff --git a/tools/libxl/libxl_vnuma.c b/tools/libxl/libxl_vnuma.c index 4edfaa4..0189a4b 100644 --- a/tools/libxl/libxl_vnuma.c +++ b/tools/libxl/libxl_vnuma.c @@ -14,6 +14,7 @@ */ #include "libxl_osdeps.h" /* must come before any other headers */ #include "libxl_internal.h" +#include "libxl_arch.h" #include <stdlib.h> /* Sort vmemranges in ascending order with "start" */ @@ -128,6 +129,39 @@ out: return rc; } +/* Build vmemranges for PV guest */ +int libxl__vnuma_build_vmemrange_pv(libxl__gc *gc, + uint32_t domid, + libxl_domain_build_info *b_info, + libxl__domain_build_state *state) +{ + int i; + uint64_t next; + xen_vmemrange_t *v = NULL; + + assert(state->vmemranges == NULL); + + /* Generate one vmemrange for each virtual node. */ + next = 0; + for (i = 0; i < b_info->num_vnuma_nodes; i++) { + libxl_vnode_info *p = &b_info->vnuma_nodes[i]; + + GCREALLOC_ARRAY(v, i+1); + + v[i].start = next; + v[i].end = next + (p->memkb << 10); + v[i].flags = 0; + v[i].nid = i; + + next = v[i].end; + } + + state->vmemranges = v; + state->num_vmemranges = i; + + return libxl__arch_vnuma_build_vmemrange(gc, domid, b_info, state); +} + /* * Local variables: * mode: C diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c index d012b4d..4841fef 100644 --- a/tools/libxl/libxl_x86.c +++ b/tools/libxl/libxl_x86.c @@ -339,6 +339,82 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc *gc, return 0; } +int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc, + uint32_t domid, + libxl_domain_build_info *b_info, + libxl__domain_build_state *state) +{ + int nid, nr_vmemrange, rc; + uint32_t nr_e820, e820_count; + struct e820entry map[E820MAX]; + xen_vmemrange_t *vmemranges; + + /* Only touch vmemranges if it's PV guest and e820_host is true */ + if (!(b_info->type == LIBXL_DOMAIN_TYPE_PV && + libxl_defbool_val(b_info->u.pv.e820_host))) { + rc = 0; + goto out; + } + + rc = e820_host_sanitize(gc, b_info, map, &nr_e820); + if (rc) goto out; + + /* Ditch old vmemranges and start with host E820 map. Note, memory + * was gc allocated. + */ + state->vmemranges = NULL; + state->num_vmemranges = 0; + + e820_count = 0; + nr_vmemrange = 0; + vmemranges = NULL; + for (nid = 0; nid < b_info->num_vnuma_nodes; nid++) { + libxl_vnode_info *p = &b_info->vnuma_nodes[nid]; + uint64_t remaining_bytes = (p->memkb << 10), bytes; + + while (remaining_bytes > 0) { + if (e820_count >= nr_e820) { + rc = ERROR_NOMEM; + goto out; + } + + /* Skip non RAM region */ + if (map[e820_count].type != E820_RAM) { + e820_count++; + continue; + } + + GCREALLOC_ARRAY(vmemranges, nr_vmemrange+1); + + bytes = map[e820_count].size >= remaining_bytes ? + remaining_bytes : map[e820_count].size; + + vmemranges[nr_vmemrange].start = map[e820_count].addr; + vmemranges[nr_vmemrange].end = map[e820_count].addr + bytes; + + if (map[e820_count].size >= remaining_bytes) { + map[e820_count].addr += bytes; + map[e820_count].size -= bytes; + } else { + e820_count++; + } + + remaining_bytes -= bytes; + + vmemranges[nr_vmemrange].flags = 0; + vmemranges[nr_vmemrange].nid = nid; + nr_vmemrange++; + } + } + + state->vmemranges = vmemranges; + state->num_vmemranges = nr_vmemrange; + + rc = 0; +out: + return rc; +} + /* * Local variables: * mode: C -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |