[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 6/7] libxl/libxc: Move libxl_get_numainfo()'s hypercall buffer management to libxc



On 09/02/15 20:04, Boris Ostrovsky wrote:
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
>  tools/libxc/include/xenctrl.h     |    4 ++-
>  tools/libxc/xc_misc.c             |   29 ++++++++++++++++++++------
>  tools/libxl/libxl.c               |   36 ++++++++-------------------------
>  tools/python/xen/lowlevel/xc/xc.c |   39 +++++++++++++++---------------------
>  4 files changed, 50 insertions(+), 58 deletions(-)
>
> diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
> index d94571d..4466cd7 100644
> --- a/tools/libxc/include/xenctrl.h
> +++ b/tools/libxc/include/xenctrl.h
> @@ -1228,6 +1228,7 @@ int xc_send_debug_keys(xc_interface *xch, char *keys);
>  typedef xen_sysctl_physinfo_t xc_physinfo_t;
>  typedef xen_sysctl_cputopo_t xc_cputopo_t;
>  typedef xen_sysctl_numainfo_t xc_numainfo_t;
> +typedef xen_sysctl_meminfo_t xc_meminfo_t;
>  
>  typedef uint32_t xc_cpu_to_node_t;
>  typedef uint32_t xc_cpu_to_socket_t;
> @@ -1238,7 +1239,8 @@ typedef uint32_t xc_node_to_node_dist_t;
>  
>  int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
>  int xc_cputopoinfo(xc_interface *xch, int *max_cpus, xc_cputopo_t *cputopo);
> -int xc_numainfo(xc_interface *xch, xc_numainfo_t *info);
> +int xc_numainfo(xc_interface *xch, int *max_nodes,
> +                xc_meminfo_t *meminfo, uint8_t *distance);
>  
>  int xc_sched_id(xc_interface *xch,
>                  int *sched_id);
> diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
> index 4c654f3..2dd5e13 100644
> --- a/tools/libxc/xc_misc.c
> +++ b/tools/libxc/xc_misc.c
> @@ -204,22 +204,37 @@ out:
>      return ret;
>  }
>  
> -int xc_numainfo(xc_interface *xch,
> -                xc_numainfo_t *put_info)
> +int xc_numainfo(xc_interface *xch, int *max_nodes,
> +                xc_meminfo_t *meminfo, uint8_t *distance)
>  {
>      int ret;
>      DECLARE_SYSCTL;
> +    DECLARE_HYPERCALL_BOUNCE(meminfo, *max_nodes * sizeof(*meminfo),
> +                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
> +    DECLARE_HYPERCALL_BOUNCE(distance,
> +                             *max_nodes * *max_nodes * sizeof(*distance),
> +                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
>  
> -    sysctl.cmd = XEN_SYSCTL_numainfo;
> +    if ((ret = xc_hypercall_bounce_pre(xch, meminfo)))
> +        goto out;
> +    if ((ret = xc_hypercall_bounce_pre(xch, distance)))
> +        goto out;

You can safely combine these two together with an ||

>  
> -    memcpy(&sysctl.u.numainfo, put_info, sizeof(*put_info));
> +    sysctl.u.numainfo.max_node_index = *max_nodes - 1;

As you have you have already changed these hypercalls, can we fix the
length handling so it doesn't require +1 or -1.  Xen should receive the
length of the arrays, and return the number of elements written, with no
further adjustment necessary.

~Andrew

> +    set_xen_guest_handle(sysctl.u.numainfo.meminfo, meminfo);
> +    set_xen_guest_handle(sysctl.u.numainfo.distance, distance);
>  
> +    sysctl.cmd = XEN_SYSCTL_numainfo;
>      if ((ret = do_sysctl(xch, &sysctl)) != 0)
> -        return ret;
> +        goto out;
>  
> -    memcpy(put_info, &sysctl.u.numainfo, sizeof(*put_info));
> +    *max_nodes = sysctl.u.numainfo.max_node_index + 1;
>  
> -    return 0;
> +out:
> +    xc_hypercall_bounce_post(xch, meminfo);
> +    xc_hypercall_bounce_post(xch, distance);
> +
> +    return ret;
>  }
>  
>  
> diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
> index f8d64c2..9c1c949 100644
> --- a/tools/libxl/libxl.c
> +++ b/tools/libxl/libxl.c
> @@ -5110,41 +5110,25 @@ libxl_cputopology *libxl_get_cpu_topology(libxl_ctx 
> *ctx, int *nb_cpu_out)
>  libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr)
>  {
>      GC_INIT(ctx);
> -    xc_numainfo_t ninfo;
> -    DECLARE_HYPERCALL_BUFFER(xen_sysctl_meminfo_t, meminfo);
> -    DECLARE_HYPERCALL_BUFFER(uint8_t, distance);
> +    xc_meminfo_t *meminfo;
> +    uint8_t *distance;
>      libxl_numainfo *ret = NULL;
>      int i, j, max_nodes;
>  
>      max_nodes = libxl_get_max_nodes(ctx);
> -    if (max_nodes < 0)
> -    {
> +    if (max_nodes < 0) {
>          LIBXL__LOG(ctx, XTL_ERROR, "Unable to determine number of NODES");
> -        ret = NULL;
>          goto out;
>      }
>  
> -    meminfo = xc_hypercall_buffer_alloc(ctx->xch, meminfo, sizeof(*meminfo) 
> * max_nodes);
> -    distance = xc_hypercall_buffer_alloc(ctx->xch, distance, 
> sizeof(*distance) * max_nodes * max_nodes);
> -    if ((meminfo == NULL) || (distance == NULL)) {
> -        LIBXL__LOG_ERRNOVAL(ctx, XTL_ERROR, ENOMEM,
> -                            "Unable to allocate hypercall arguments");
> -        goto fail;
> -    }
> +    meminfo = libxl__zalloc(gc, sizeof(*meminfo) * max_nodes);
> +    distance = libxl__zalloc(gc, sizeof(*distance) * max_nodes * max_nodes);
>  
> -    set_xen_guest_handle(ninfo.meminfo, meminfo);
> -    set_xen_guest_handle(ninfo.distance, distance);
> -    ninfo.max_node_index = max_nodes - 1;
> -    if (xc_numainfo(ctx->xch, &ninfo) != 0) {
> +    if (xc_numainfo(ctx->xch, &max_nodes, meminfo, distance) != 0) {
>          LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting numainfo");
> -        goto fail;
> +        goto out;
>      }
>  
> -    if (ninfo.max_node_index < max_nodes - 1)
> -        max_nodes = ninfo.max_node_index + 1;
> -
> -    *nr = max_nodes;
> -
>      ret = libxl__zalloc(NOGC, sizeof(libxl_numainfo) * max_nodes);
>      for (i = 0; i < max_nodes; i++)
>          ret[i].dists = libxl__calloc(NOGC, max_nodes, sizeof(*distance));
> @@ -5156,13 +5140,11 @@ libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, 
> int *nr)
>          ret[i].free = V(memfree, i);
>          ret[i].num_dists = max_nodes;
>          for (j = 0; j < ret[i].num_dists; j++)
> -            ret[i].dists[j] = distance[i*max_nodes + j];
> +            ret[i].dists[j] = distance[i * max_nodes + j];
>  #undef V
>      }
>  
> - fail:
> -    xc_hypercall_buffer_free(ctx->xch, meminfo);
> -    xc_hypercall_buffer_free(ctx->xch, distance);
> +    *nr = max_nodes;
>  
>   out:
>      GC_FREE;
> diff --git a/tools/python/xen/lowlevel/xc/xc.c 
> b/tools/python/xen/lowlevel/xc/xc.c
> index 6e49dc5..3fd4805 100644
> --- a/tools/python/xen/lowlevel/xc/xc.c
> +++ b/tools/python/xen/lowlevel/xc/xc.c
> @@ -1294,39 +1294,32 @@ out:
>  static PyObject *pyxc_numainfo(XcObject *self)
>  {
>  #define MAX_NODE_INDEX 31
> -    xc_numainfo_t ninfo = { 0 };
> -    int i, j, max_node_index, invalid_node;
> +    int i, j, max_nodes, invalid_node;
>      uint64_t free_heap;
>      PyObject *ret_obj = NULL, *node_to_node_dist_list_obj;
>      PyObject *node_to_memsize_obj, *node_to_memfree_obj;
>      PyObject *node_to_dma32_mem_obj, *node_to_node_dist_obj;
> -    DECLARE_HYPERCALL_BUFFER(xen_sysctl_meminfo_t, meminfo);
> -    DECLARE_HYPERCALL_BUFFER(uint8_t, distance);
> +    xc_meminfo_t *meminfo;
> +    uint8_t *distance;
>  
> -    meminfo = xc_hypercall_buffer_alloc(self->xc_handle, meminfo, 
> sizeof(*meminfo) * (MAX_NODE_INDEX+1));
> -    if ( meminfo == NULL )
> +    max_nodes = MAX_NODE_INDEX + 1;
> +    meminfo = calloc(max_nodes, sizeof(*meminfo));
> +    distance = calloc(max_nodes * max_nodes, sizeof(*distance));
> +    if ( meminfo == NULL || distance == NULL)
>          goto out;
> -    distance = xc_hypercall_buffer_alloc(self->xc_handle, distance, 
> sizeof(*distance)*(MAX_NODE_INDEX+1)*(MAX_NODE_INDEX+1));
> -    if ( distance == NULL )
> -        goto out;
> -
> -    set_xen_guest_handle(ninfo.meminfo, meminfo);
> -    set_xen_guest_handle(ninfo.distance, distance);
> -    ninfo.max_node_index = MAX_NODE_INDEX;
>  
> -    if ( xc_numainfo(self->xc_handle, &ninfo) != 0 )
> +    if ( xc_numainfo(self->xc_handle, &max_nodes, meminfo, distance) != 0 )
>          goto out;
>  
> -    max_node_index = ninfo.max_node_index;
> -    if ( max_node_index > MAX_NODE_INDEX )
> -        max_node_index = MAX_NODE_INDEX;
> +    if ( max_nodes > MAX_NODE_INDEX + 1 )
> +        max_nodes = MAX_NODE_INDEX + 1;
>  
>      /* Construct node-to-* lists. */
>      node_to_memsize_obj = PyList_New(0);
>      node_to_memfree_obj = PyList_New(0);
>      node_to_dma32_mem_obj = PyList_New(0);
>      node_to_node_dist_list_obj = PyList_New(0);
> -    for ( i = 0; i <= max_node_index; i++ )
> +    for ( i = 0; i < max_nodes; i++ )
>      {
>          PyObject *pyint;
>  
> @@ -1349,9 +1342,9 @@ static PyObject *pyxc_numainfo(XcObject *self)
>          /* Node to Node Distance */
>          node_to_node_dist_obj = PyList_New(0);
>          invalid_node = (meminfo[i].memsize == INVALID_MEM_SZ);
> -        for ( j = 0; j <= max_node_index; j++ )
> +        for ( j = 0; j < max_nodes; j++ )
>          {
> -            uint32_t dist = distance[i * (max_node_index + 1) + j];
> +            uint32_t dist = distance[i * max_nodes + j];
>              if ( invalid_node )
>              {
>                  PyList_Append(node_to_node_dist_obj, Py_None);
> @@ -1367,7 +1360,7 @@ static PyObject *pyxc_numainfo(XcObject *self)
>          Py_DECREF(node_to_node_dist_obj);
>      }
>  
> -    ret_obj = Py_BuildValue("{s:i}", "max_node_index", max_node_index);
> +    ret_obj = Py_BuildValue("{s:i}", "max_node_index", max_nodes - 1);
>  
>      PyDict_SetItemString(ret_obj, "node_memsize", node_to_memsize_obj);
>      Py_DECREF(node_to_memsize_obj);
> @@ -1383,8 +1376,8 @@ static PyObject *pyxc_numainfo(XcObject *self)
>      Py_DECREF(node_to_node_dist_list_obj);
>  
>  out:
> -    xc_hypercall_buffer_free(self->xc_handle, meminfo);
> -    xc_hypercall_buffer_free(self->xc_handle, distance);
> +    free(meminfo);
> +    free(distance);
>      return ret_obj ? ret_obj : pyxc_error_to_exception(self->xc_handle);
>  #undef MAX_NODE_INDEX
>  }



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.