[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for 4.6 v2 2/3] libxc: introduce xc_domain_getvnuma



A simple wrapper for XENMEM_get_vnumainfo.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 tools/libxc/include/xenctrl.h | 18 +++++++++++++++
 tools/libxc/xc_domain.c       | 54 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 72 insertions(+)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 742724e..b477d8a 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1287,6 +1287,24 @@ int xc_domain_setvnuma(xc_interface *xch,
                         unsigned int *vdistance,
                         unsigned int *vcpu_to_vnode,
                         unsigned int *vnode_to_pnode);
+/*
+ * Retrieve vnuma configuration
+ * domid: IN, target domid
+ * nr_vnodes: IN/OUT, number of vnodes, not NULL
+ * nr_vmemranges: IN/OUT, number of vmemranges, not NULL
+ * nr_vcpus: IN/OUT, number of vcpus, not NULL
+ * vmemranges: OUT, an array which has length of nr_vmemranges
+ * vdistance: OUT, an array which has length of nr_vnodes * nr_vnodes
+ * vcpu_to_vnode: OUT, an array which has length of nr_vcpus
+ */
+int xc_domain_getvnuma(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t *nr_vnodes,
+                       uint32_t *nr_vmemranges,
+                       uint32_t *nr_vcpus,
+                       xen_vmemrange_t *vmemrange,
+                       unsigned int *vdistance,
+                       unsigned int *vcpu_to_vnode);
 
 #if defined(__i386__) || defined(__x86_64__)
 /*
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 780797f..09ef748 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -2493,6 +2493,60 @@ int xc_domain_setvnuma(xc_interface *xch,
     return rc;
 }
 
+int xc_domain_getvnuma(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t *nr_vnodes,
+                       uint32_t *nr_vmemranges,
+                       uint32_t *nr_vcpus,
+                       xen_vmemrange_t *vmemrange,
+                       unsigned int *vdistance,
+                       unsigned int *vcpu_to_vnode)
+{
+    int rc;
+    DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * *nr_vmemranges,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
+                             *nr_vnodes * *nr_vnodes,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * *nr_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    struct xen_vnuma_topology_info vnuma_topo;
+
+    if ( xc_hypercall_bounce_pre(xch, vmemrange)      ||
+         xc_hypercall_bounce_pre(xch, vdistance)      ||
+         xc_hypercall_bounce_pre(xch, vcpu_to_vnode) )
+    {
+        rc = -1;
+        errno = ENOMEM;
+        goto vnumaget_fail;
+    }
+
+    set_xen_guest_handle(vnuma_topo.vmemrange.h, vmemrange);
+    set_xen_guest_handle(vnuma_topo.vdistance.h, vdistance);
+    set_xen_guest_handle(vnuma_topo.vcpu_to_vnode.h, vcpu_to_vnode);
+
+    vnuma_topo.nr_vnodes = *nr_vnodes;
+    vnuma_topo.nr_vcpus = *nr_vcpus;
+    vnuma_topo.nr_vmemranges = *nr_vmemranges;
+    vnuma_topo.domid = domid;
+    vnuma_topo.pad = 0;
+
+    rc = do_memory_op(xch, XENMEM_get_vnumainfo, &vnuma_topo,
+                      sizeof(vnuma_topo));
+
+    *nr_vnodes = vnuma_topo.nr_vnodes;
+    *nr_vcpus = vnuma_topo.nr_vcpus;
+    *nr_vmemranges = vnuma_topo.nr_vmemranges;
+
+ vnumaget_fail:
+    xc_hypercall_bounce_post(xch, vmemrange);
+    xc_hypercall_bounce_post(xch, vdistance);
+    xc_hypercall_bounce_post(xch, vcpu_to_vnode);
+
+    return rc;
+}
+
 /*
  * Local variables:
  * mode: C
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.