|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH RFC 3/7] libxc/vnuma: per-domain vnuma structures.
On Tue, 2013-08-27 at 03:54 -0400, Elena Ufimtseva wrote:
> Makes use of domctl vnuma subop and initializes per-domain
> vnuma topology.
>
> Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
> ---
> tools/libxc/xc_dom.h | 9 +++++++
> tools/libxc/xc_domain.c | 63
> +++++++++++++++++++++++++++++++++++++++++++++++
> tools/libxc/xenctrl.h | 17 +++++++++++++
> 3 files changed, 89 insertions(+)
>
> diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
> index 86e23ee..4375f25 100644
> --- a/tools/libxc/xc_dom.h
> +++ b/tools/libxc/xc_dom.h
> @@ -114,6 +114,15 @@ struct xc_dom_image {
> struct xc_dom_phys *phys_pages;
> int realmodearea_log;
>
> + /* vNUMA topology and memory allocation structure
> + * Defines the way to allocate XEN
> + * memory from phys NUMA nodes by providing mask
> + * vnuma_to_pnuma */
> + int nr_vnodes;
> + struct vnuma_memblk *vnumablocks;
> + uint64_t *vmemsizes;
> + int *vnode_to_pnode;
> +
> /* malloc memory pool */
> struct xc_dom_mem *memblocks;
>
> diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
> index 3257e2a..98445e3 100644
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -24,6 +24,7 @@
> #include "xg_save_restore.h"
> #include <xen/memory.h>
> #include <xen/hvm/hvm_op.h>
> +#include "xg_private.h"
>
> int xc_domain_create(xc_interface *xch,
> uint32_t ssidref,
> @@ -1629,6 +1630,68 @@ int xc_domain_set_virq_handler(xc_interface *xch,
> uint32_t domid, int virq)
> return do_domctl(xch, &domctl);
> }
>
> +/* Informs XEN that domain is vNUMA aware */
"Xen" ;-)
> +int xc_domain_setvnodes(xc_interface *xch,
> + uint32_t domid,
> + uint16_t nr_vnodes,
> + uint16_t nr_vcpus,
> + vnuma_memblk_t *vmemblks,
> + int *vdistance,
> + int *vcpu_to_vnode,
> + int *vnode_to_pnode)
Can some of these be const?
> +{
> + int rc;
> + DECLARE_DOMCTL;
> + DECLARE_HYPERCALL_BUFFER(int, distbuf);
> + DECLARE_HYPERCALL_BUFFER(vnuma_memblk_t, membuf);
> + DECLARE_HYPERCALL_BUFFER(int, vcpumapbuf);
> + DECLARE_HYPERCALL_BUFFER(int, vntopbuf);
> +
> + rc = -EINVAL;
After the comment below about ENOMEM I think the value set here is
unused.
> + memset(&domctl, 0, sizeof(domctl));
DECLARE_DOMCTL will initialise domctl iff valgrind is enabled, which is
all that is required I think.
> + if ( vdistance == NULL || vcpu_to_vnode == NULL || vmemblks == NULL )
> + /* vnode_to_pnode can be null on non-NUMA machines */
> + {
> + PERROR("Parameters are wrong XEN_DOMCTL_setvnumainfo\n");
> + return -EINVAL;
> + }
> + distbuf = xc_hypercall_buffer_alloc
> + (xch, distbuf, sizeof(*vdistance) * nr_vnodes * nr_vnodes);
> + membuf = xc_hypercall_buffer_alloc
> + (xch, membuf, sizeof(*membuf) * nr_vnodes);
> + vcpumapbuf = xc_hypercall_buffer_alloc
> + (xch, vcpumapbuf, sizeof(*vcpu_to_vnode) * nr_vcpus);
> + vntopbuf = xc_hypercall_buffer_alloc
> + (xch, vntopbuf, sizeof(*vnode_to_pnode) * nr_vnodes);
> +
> + if (distbuf == NULL || membuf == NULL || vcpumapbuf == NULL || vntopbuf
> == NULL )
> + {
> + PERROR("Could not allocate memory for xc hypercall
> XEN_DOMCTL_setvnumainfo\n");
rc = -ENOMEM?
> + goto fail;
> + }
> + memcpy(distbuf, vdistance, sizeof(*vdistance) * nr_vnodes * nr_vnodes);
> + memcpy(vntopbuf, vnode_to_pnode, sizeof(*vnode_to_pnode) * nr_vnodes);
> + memcpy(vcpumapbuf, vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus);
> + memcpy(membuf, vmemblks, sizeof(*vmemblks) * nr_vnodes);
You can use DECLARE_HYPERCALL_BOUNCE and xc__hypercall_bounce_pre/post
which takes care of the alloc and copying stuff internally.
> +
> + set_xen_guest_handle(domctl.u.vnuma.vdistance, distbuf);
> + set_xen_guest_handle(domctl.u.vnuma.vnuma_memblks, membuf);
> + set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpumapbuf);
> + set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vntopbuf);
> +
> + domctl.cmd = XEN_DOMCTL_setvnumainfo;
> + domctl.domain = (domid_t)domid;
> + domctl.u.vnuma.nr_vnodes = nr_vnodes;
> + rc = do_domctl(xch, &domctl);
> +fail:
> + xc_hypercall_buffer_free(xch, distbuf);
> + xc_hypercall_buffer_free(xch, membuf);
> + xc_hypercall_buffer_free(xch, vcpumapbuf);
> + xc_hypercall_buffer_free(xch, vntopbuf);
> +
> + return rc;
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
> index f2cebaf..fb66cfa 100644
> --- a/tools/libxc/xenctrl.h
> +++ b/tools/libxc/xenctrl.h
> @@ -1083,6 +1083,23 @@ int xc_domain_set_memmap_limit(xc_interface *xch,
> uint32_t domid,
> unsigned long map_limitkb);
>
> +/*unsigned long xc_get_memory_hole_size(unsigned long start, unsigned long
> end);
What is this?
> +
> +int xc_domain_align_vnodes(xc_interface *xch,
> + uint32_t domid,
> + uint64_t *vmemareas,
> + vnuma_memblk_t *vnuma_memblks,
> + uint16_t nr_vnodes);
> +*/
> +int xc_domain_setvnodes(xc_interface *xch,
> + uint32_t domid,
> + uint16_t nr_vnodes,
> + uint16_t nr_vcpus,
> + vnuma_memblk_t *vmemareas,
> + int *vdistance,
> + int *vcpu_to_vnode,
> + int *vnode_to_pnode);
> +
> #if defined(__i386__) || defined(__x86_64__)
> /*
> * PC BIOS standard E820 types and structure.
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |