|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 02 of 10 v3] libxl, libxc: introduce libxl_get_numainfo()
On Wed, 2012-07-04 at 17:18 +0100, Dario Faggioli wrote:
> # HG changeset patch
> # User Dario Faggioli <raistlin@xxxxxxxx>
> # Date 1341416323 -7200
> # Node ID 0ca91a203fc95d3d18bb436ecdc7106b0b2ff22f
> # Parent 8e367818e194c212cd1470aad663f3243ff53bdb
> libxl,libxc: introduce libxl_get_numainfo()
>
> Make some NUMA node information available to the toolstack. Achieve
> this by means of xc_numainfo(), which exposes memory size and amount
> of free memory of each node, as well as the relative distances of
> each node to all the others.
>
> For properly exposing distances we need the IDL to support arrays.
>
> Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
>
> ---
> Changes from v2:
> * converted libxl__zalloc(NULL, ...) to libxl_calloc(NOGC, ...).
> * Fixed the comment about memory ownership of libxl_get_numainfo().
> * Added a comment for libxl_numainfo in libxl_types.idl.
>
> Changes from v1:
> * malloc converted to libxl__zalloc(NOGC, ...).
> * The patch also accommodates some bits of what was in "libxc,
> libxl: introduce xc_nodemap_t and libxl_nodemap" which was
> removed as well, as full support for node maps at libxc
> level is not needed (yet!).
>
> diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
> --- a/tools/libxc/xc_misc.c
> +++ b/tools/libxc/xc_misc.c
> @@ -35,6 +35,20 @@ int xc_get_max_cpus(xc_interface *xch)
> return max_cpus;
> }
>
> +int xc_get_max_nodes(xc_interface *xch)
> +{
> + static int max_nodes = 0;
> + xc_physinfo_t physinfo;
> +
> + if ( max_nodes )
> + return max_nodes;
> +
> + if ( !xc_physinfo(xch, &physinfo) )
> + max_nodes = physinfo.max_node_id + 1;
> +
> + return max_nodes;
> +}
> +
> int xc_get_cpumap_size(xc_interface *xch)
> {
> return (xc_get_max_cpus(xch) + 7) / 8;
> diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
> --- a/tools/libxc/xenctrl.h
> +++ b/tools/libxc/xenctrl.h
> @@ -329,6 +329,12 @@ int xc_get_cpumap_size(xc_interface *xch
> /* allocate a cpumap */
> xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
>
> + /*
> + * NODEMAP handling
> + */
> +/* return maximum number of NUMA nodes the hypervisor supports */
> +int xc_get_max_nodes(xc_interface *xch);
> +
> /*
> * DOMAIN DEBUGGING FUNCTIONS
> */
> diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
> --- a/tools/libxl/libxl.c
> +++ b/tools/libxl/libxl.c
> @@ -3298,6 +3298,75 @@ fail:
> return ret;
> }
>
> +libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr)
> +{
> + GC_INIT(ctx);
> + xc_numainfo_t ninfo;
> + DECLARE_HYPERCALL_BUFFER(xc_node_to_memsize_t, memsize);
> + DECLARE_HYPERCALL_BUFFER(xc_node_to_memfree_t, memfree);
> + DECLARE_HYPERCALL_BUFFER(uint32_t, node_dists);
> + libxl_numainfo *ret = NULL;
> + int i, j, max_nodes;
> +
> + max_nodes = libxl_get_max_nodes(ctx);
> + if (max_nodes == 0)
> + {
> + LIBXL__LOG(ctx, XTL_ERROR, "Unable to determine number of NODES");
> + ret = NULL;
> + goto out;
> + }
> +
> + memsize = xc_hypercall_buffer_alloc
> + (ctx->xch, memsize, sizeof(*memsize) * max_nodes);
> + memfree = xc_hypercall_buffer_alloc
> + (ctx->xch, memfree, sizeof(*memfree) * max_nodes);
> + node_dists = xc_hypercall_buffer_alloc
> + (ctx->xch, node_dists, sizeof(*node_dists) * max_nodes * max_nodes);
> + if ((memsize == NULL) || (memfree == NULL) || (node_dists == NULL)) {
> + LIBXL__LOG_ERRNOVAL(ctx, XTL_ERROR, ENOMEM,
> + "Unable to allocate hypercall arguments");
> + goto fail;
> + }
> +
> + set_xen_guest_handle(ninfo.node_to_memsize, memsize);
> + set_xen_guest_handle(ninfo.node_to_memfree, memfree);
> + set_xen_guest_handle(ninfo.node_to_node_distance, node_dists);
> + ninfo.max_node_index = max_nodes - 1;
> + if (xc_numainfo(ctx->xch, &ninfo) != 0) {
> + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting numainfo");
> + goto fail;
> + }
> +
> + if (ninfo.max_node_index < max_nodes - 1)
> + max_nodes = ninfo.max_node_index + 1;
> +
> + *nr = max_nodes;
> +
> + ret = libxl__zalloc(NOGC, sizeof(libxl_numainfo) * max_nodes);
> + for (i = 0; i < max_nodes; i++)
> + ret[i].dists = libxl__calloc(NOGC, max_nodes, sizeof(*node_dists));
> +
> + for (i = 0; i < max_nodes; i++) {
> +#define V(mem, i) (mem[i] == INVALID_NUMAINFO_ID) ? \
> + LIBXL_NUMAINFO_INVALID_ENTRY : mem[i]
> + ret[i].size = V(memsize, i);
> + ret[i].free = V(memfree, i);
> + ret[i].num_dists = max_nodes;
> + for (j = 0; j < ret[i].num_dists; j++)
> + ret[i].dists[j] = V(node_dists, i * max_nodes + j);
> +#undef V
> + }
> +
> + fail:
> + xc_hypercall_buffer_free(ctx->xch, memsize);
> + xc_hypercall_buffer_free(ctx->xch, memfree);
> + xc_hypercall_buffer_free(ctx->xch, node_dists);
> +
> + out:
> + GC_FREE;
> + return ret;
> +}
> +
> const libxl_version_info* libxl_get_version_info(libxl_ctx *ctx)
> {
> union {
> diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
> --- a/tools/libxl/libxl.h
> +++ b/tools/libxl/libxl.h
> @@ -532,6 +532,9 @@ int libxl_domain_preserve(libxl_ctx *ctx
> /* get max. number of cpus supported by hypervisor */
> int libxl_get_max_cpus(libxl_ctx *ctx);
>
> +/* get max. number of NUMA nodes supported by hypervisor */
> +int libxl_get_max_nodes(libxl_ctx *ctx);
> +
> int libxl_domain_rename(libxl_ctx *ctx, uint32_t domid,
> const char *old_name, const char *new_name);
>
> @@ -604,6 +607,10 @@ void libxl_vminfo_list_free(libxl_vminfo
> libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nb_cpu_out);
> void libxl_cputopology_list_free(libxl_cputopology *, int nb_cpu);
>
> +#define LIBXL_NUMAINFO_INVALID_ENTRY (~(uint32_t)0)
> +libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr);
> +void libxl_numainfo_list_free(libxl_numainfo *, int nr);
> +
> libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
> int *nb_vcpu, int *nr_vcpus_out);
> void libxl_vcpuinfo_list_free(libxl_vcpuinfo *, int nr_vcpus);
> diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
> --- a/tools/libxl/libxl_types.idl
> +++ b/tools/libxl/libxl_types.idl
> @@ -433,6 +433,15 @@ libxl_physinfo = Struct("physinfo", [
> ("cap_hvm_directio", bool),
> ], dir=DIR_OUT)
>
> +# NUMA node characteristics: size and free are how much memory it has, and
> how
> +# much of it is free, respectively. dists is an array of distances from this
> +# node to each other node.
> +libxl_numainfo = Struct("numainfo", [
> + ("size", uint64),
> + ("free", uint64),
> + ("dists", Array(uint32, "num_dists")),
> + ], dir=DIR_OUT)
> +
> libxl_cputopology = Struct("cputopology", [
> ("core", uint32),
> ("socket", uint32),
> diff --git a/tools/libxl/libxl_utils.c b/tools/libxl/libxl_utils.c
> --- a/tools/libxl/libxl_utils.c
> +++ b/tools/libxl/libxl_utils.c
> @@ -572,6 +572,11 @@ int libxl_get_max_cpus(libxl_ctx *ctx)
> return xc_get_max_cpus(ctx->xch);
> }
>
> +int libxl_get_max_nodes(libxl_ctx *ctx)
> +{
> + return xc_get_max_nodes(ctx->xch);
> +}
> +
> int libxl__enum_from_string(const libxl_enum_string_table *t,
> const char *s, int *e)
> {
> @@ -594,6 +599,14 @@ void libxl_cputopology_list_free(libxl_c
> free(list);
> }
>
> +void libxl_numainfo_list_free(libxl_numainfo *list, int nr)
> +{
> + int i;
> + for (i = 0; i < nr; i++)
> + libxl_numainfo_dispose(&list[i]);
> + free(list);
> +}
> +
> void libxl_vcpuinfo_list_free(libxl_vcpuinfo *list, int nr)
> {
> int i;
> diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -484,6 +484,7 @@ typedef struct xen_sysctl_topologyinfo x
> DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
>
> /* XEN_SYSCTL_numainfo */
> +#define INVALID_NUMAINFO_ID (~0U)
> struct xen_sysctl_numainfo {
> /*
> * IN: maximum addressable entry in the caller-provided arrays.
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |