[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 12/15] libxc: numa-sched: enable getting/specifying per-vcpu node-affinity



by providing the proper get/set interfaces and wiring them
to the new domctl-s from the previous commit.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
 tools/libxc/xc_domain.c |   82 +++++++++++++++++++++++++++++++++++++++++++++++
 tools/libxc/xenctrl.h   |   19 +++++++++++
 2 files changed, 101 insertions(+)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index b36c2ad..b1be3ee 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -272,6 +272,88 @@ out:
     return ret;
 }
 
+int xc_vcpu_setnodeaffinity(xc_interface *xch,
+                            uint32_t domid,
+                            int vcpu,
+                            xc_nodemap_t nodemap)
+{
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+    int ret = -1;
+    int nodesize;
+
+    nodesize = xc_get_cpumap_size(xch);
+    if (!nodesize)
+    {
+        PERROR("Could not get number of  nodes");
+        goto out;
+    }
+
+    local = xc_hypercall_buffer_alloc(xch, local, nodesize);
+    if ( local == NULL )
+    {
+        PERROR("Could not allocate memory for setvcpunodeaffinity domctl 
hypercall");
+        goto out;
+    }
+
+    domctl.cmd = XEN_DOMCTL_setvcpunodeaffinity;
+    domctl.domain = (domid_t)domid;
+    domctl.u.vcpuaffinity.vcpu = vcpu;
+
+    memcpy(local, nodemap, nodesize);
+
+    set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local);
+
+    domctl.u.vcpuaffinity.map.nr_bits = nodesize * 8;
+
+    ret = do_domctl(xch, &domctl);
+
+    xc_hypercall_buffer_free(xch, local);
+
+ out:
+    return ret;
+}
+
+int xc_vcpu_getnodeaffinity(xc_interface *xch,
+                            uint32_t domid,
+                            int vcpu,
+                            xc_nodemap_t nodemap)
+{
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+    int ret = -1;
+    int nodesize;
+
+    nodesize = xc_get_nodemap_size(xch);
+    if (!nodesize)
+    {
+        PERROR("Could not get number of nodes");
+        goto out;
+    }
+
+    local = xc_hypercall_buffer_alloc(xch, local, nodesize);
+    if (local == NULL)
+    {
+        PERROR("Could not allocate memory for getvcpunodeaffinity domctl 
hypercall");
+        goto out;
+    }
+
+    domctl.cmd = XEN_DOMCTL_getvcpunodeaffinity;
+    domctl.domain = (domid_t)domid;
+    domctl.u.vcpuaffinity.vcpu = vcpu;
+
+    set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local);
+    domctl.u.vcpuaffinity.map.nr_bits = nodesize * 8;
+
+    ret = do_domctl(xch, &domctl);
+
+    memcpy(nodemap, local, nodesize);
+
+    xc_hypercall_buffer_free(xch, local);
+out:
+    return ret;
+}
+
 int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
                               unsigned int *guest_width)
 {
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 58d51f3..8ba260e 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -551,6 +551,25 @@ int xc_domain_node_getaffinity(xc_interface *xch,
                                uint32_t domind,
                                xc_nodemap_t nodemap);
 
+/**
+ * These functions set and retrieves the NUMA node-affinity
+ * of a specific vcpu.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id one is interested in.
+ * @parm vcpu the vcpu one wants to set/get the affinity of.
+ * @parm nodemap the map of the affine nodes.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_vcpu_setnodeaffinity(xc_interface *xch,
+                            uint32_t domid,
+                            int vcpu,
+                            xc_nodemap_t nodemap);
+int xc_vcpu_getnodeaffinity(xc_interface *xch,
+                            uint32_t domid,
+                            int vcpu,
+                            xc_nodemap_t nodemap);
+
 int xc_vcpu_setaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.