|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 11/16] libxc: get and set soft and hard affinity
by using the new flag introduced in the parameters of
DOMCTL_{get,set}_vcpuaffinity.
This happens in two new xc calls: xc_vcpu_setaffinity_hard()
and xc_vcpu_setaffinity_soft() (an in the corresponding
getters, of course).
The existing xc_vcpu_{set,get}affinity() call is also retained,
with the following behavior:
* xc_vcpu_setaffinity() sets both the hard and soft affinity;
* xc_vcpu_getaffinity() gets the hard affinity.
This is mainly for backward compatibility reasons, i.e., trying
not to break existing callers/users.
Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
tools/libxc/xc_domain.c | 153 ++++++++++++++++++++++++++++++++++++++++++-----
tools/libxc/xenctrl.h | 53 ++++++++++++++++
2 files changed, 190 insertions(+), 16 deletions(-)
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index f9ae4bf..30bfe7b 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -189,13 +189,16 @@ int xc_domain_node_getaffinity(xc_interface *xch,
return ret;
}
-int xc_vcpu_setaffinity(xc_interface *xch,
- uint32_t domid,
- int vcpu,
- xc_cpumap_t cpumap)
+static int _vcpu_setaffinity(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap,
+ uint32_t flags,
+ xc_cpumap_t ecpumap)
{
DECLARE_DOMCTL;
- DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+ DECLARE_HYPERCALL_BUFFER(uint8_t, cpumap_local);
+ DECLARE_HYPERCALL_BUFFER(uint8_t, ecpumap_local);
int ret = -1;
int cpusize;
@@ -206,39 +209,119 @@ int xc_vcpu_setaffinity(xc_interface *xch,
goto out;
}
- local = xc_hypercall_buffer_alloc(xch, local, cpusize);
- if ( local == NULL )
+ cpumap_local = xc_hypercall_buffer_alloc(xch, cpumap_local, cpusize);
+ if ( cpumap_local == NULL )
+ {
+ PERROR("Could not allocate cpumap_local for DOMCTL_setvcpuaffinity");
+ goto out;
+ }
+ ecpumap_local = xc_hypercall_buffer_alloc(xch, ecpumap_local, cpusize);
+ if ( cpumap_local == NULL )
{
- PERROR("Could not allocate memory for setvcpuaffinity domctl
hypercall");
+ xc_hypercall_buffer_free(xch, cpumap_local);
+ PERROR("Could not allocate ecpumap_local for DOMCTL_setvcpuaffinity");
goto out;
}
domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
domctl.domain = (domid_t)domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
- /* Soft affinity is there, but not used anywhere for now, so... */
- domctl.u.vcpuaffinity.flags = XEN_VCPUAFFINITY_HARD;
-
- memcpy(local, cpumap, cpusize);
-
- set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+ domctl.u.vcpuaffinity.flags = flags;
+ memcpy(cpumap_local, cpumap, cpusize);
+ set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, cpumap_local);
domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
+ set_xen_guest_handle(domctl.u.vcpuaffinity.effective_affinity.bitmap,
+ ecpumap_local);
+ domctl.u.vcpuaffinity.effective_affinity.nr_bits = cpusize * 8;
+
ret = do_domctl(xch, &domctl);
- xc_hypercall_buffer_free(xch, local);
+ memcpy(ecpumap, ecpumap_local, cpusize);
+
+ xc_hypercall_buffer_free(xch, cpumap_local);
+ xc_hypercall_buffer_free(xch, ecpumap_local);
out:
return ret;
}
+int xc_vcpu_setaffinity_soft(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap,
+ xc_cpumap_t ecpumap)
+{
+ return _vcpu_setaffinity(xch,
+ domid,
+ vcpu,
+ cpumap,
+ XEN_VCPUAFFINITY_SOFT,
+ ecpumap);
+}
-int xc_vcpu_getaffinity(xc_interface *xch,
+int xc_vcpu_setaffinity_hard(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap,
+ xc_cpumap_t ecpumap)
+{
+ return _vcpu_setaffinity(xch,
+ domid,
+ vcpu,
+ cpumap,
+ XEN_VCPUAFFINITY_HARD,
+ ecpumap);
+}
+
+/* Provided for backword compattibility: sets both hard and soft affinity */
+int xc_vcpu_setaffinity(xc_interface *xch,
uint32_t domid,
int vcpu,
xc_cpumap_t cpumap)
{
+ xc_cpumap_t ecpumap;
+ int ret = -1;
+
+ ecpumap = xc_cpumap_alloc(xch);
+ if (ecpumap == NULL)
+ {
+ PERROR("Could not allocate memory for DOMCTL_setvcpuaffinity");
+ return -1;
+ }
+
+ ret = _vcpu_setaffinity(xch,
+ domid,
+ vcpu,
+ cpumap,
+ XEN_VCPUAFFINITY_SOFT,
+ ecpumap);
+
+ if ( ret )
+ {
+ PERROR("Could not set soft affinity via DOMCTL_setvcpuaffinity");
+ goto out;
+ }
+
+ ret = _vcpu_setaffinity(xch,
+ domid,
+ vcpu,
+ cpumap,
+ XEN_VCPUAFFINITY_HARD,
+ ecpumap);
+ out:
+ free(ecpumap);
+ return ret;
+}
+
+
+static int _vcpu_getaffinity(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap,
+ uint32_t flags)
+{
DECLARE_DOMCTL;
DECLARE_HYPERCALL_BUFFER(uint8_t, local);
int ret = -1;
@@ -261,6 +344,7 @@ int xc_vcpu_getaffinity(xc_interface *xch,
domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
domctl.domain = (domid_t)domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
+ domctl.u.vcpuaffinity.flags = flags;
set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
@@ -274,6 +358,43 @@ out:
return ret;
}
+int xc_vcpu_getaffinity_soft(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap)
+{
+ return _vcpu_getaffinity(xch,
+ domid,
+ vcpu,
+ cpumap,
+ XEN_VCPUAFFINITY_SOFT);
+}
+
+int xc_vcpu_getaffinity_hard(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap)
+{
+ return _vcpu_getaffinity(xch,
+ domid,
+ vcpu,
+ cpumap,
+ XEN_VCPUAFFINITY_HARD);
+}
+
+/* Provided for backward compatibility and wired to hard affinity */
+int xc_vcpu_getaffinity(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap)
+{
+ return _vcpu_getaffinity(xch,
+ domid,
+ vcpu,
+ cpumap,
+ XEN_VCPUAFFINITY_HARD);
+}
+
int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
unsigned int *guest_width)
{
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 4ac6b8a..ec80603 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -579,10 +579,63 @@ int xc_domain_node_getaffinity(xc_interface *xch,
uint32_t domind,
xc_nodemap_t nodemap);
+/**
+ * This functions specify the scheduling affinity for a vcpu. Soft
+ * affinity is on what pcpus a vcpu prefers to run. Hard affinity is
+ * on what pcpus a vcpu is allowed to run. When set independently (by
+ * the respective _soft and _hard calls) the effective affinity is
+ * also returned. What we call the effective affinity it the intersection
+ * of soft affinity, hard affinity and the set of the cpus of the cpupool
+ * the domain belongs to. It's basically what the Xen scheduler will
+ * actually use. Returning it to the caller allows him to check if that
+ * matches with, or at least is good enough for, his purposes.
+ *
+ * A xc_vcpu_setaffinity() call is provided, mainly for backward
+ * compatibility reasons, and what it does is setting both hard and
+ * soft affinity for the vcpu.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap the (hard, soft, both) new affinity map one wants to set
+ * @param ecpumap the effective affinity for the vcpu
+ */
+int xc_vcpu_setaffinity_soft(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap,
+ xc_cpumap_t ecpumap);
+int xc_vcpu_setaffinity_hard(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap,
+ xc_cpumap_t ecpumap);
int xc_vcpu_setaffinity(xc_interface *xch,
uint32_t domid,
int vcpu,
xc_cpumap_t cpumap);
+
+/**
+ * This functions retrieve the hard or soft scheduling affinity for
+ * a vcpu.
+ *
+ * A xc_vcpu_getaffinity() call is provided, mainly for backward
+ * compatibility reasons, and what it does is returning the hard
+ * affinity, exactly as xc_vcpu_getaffinity_hard().
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap is where the (hard, soft) affinity is returned
+ */
+int xc_vcpu_getaffinity_soft(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap);
+int xc_vcpu_getaffinity_hard(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap);
int xc_vcpu_getaffinity(xc_interface *xch,
uint32_t domid,
int vcpu,
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |