[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 11/15] xen: numa-sched: enable getting/specifying per-vcpu node-affinity



via two new DOMCTLs: getvcpunodeaffinity and setvcpunodeaffinity.

They're very similar to XEN_DOMCTL_{get,set}vcpuaffinity
(with the only exception that they take a nodemap instead than
a cpumap).

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
 tools/libxc/xc_domain.c     |    8 ++++---
 xen/common/domctl.c         |   47 ++++++++++++++++++++++++++++++++++++++-----
 xen/include/public/domctl.h |    8 +++++--
 xen/xsm/flask/hooks.c       |    2 ++
 4 files changed, 54 insertions(+), 11 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 81316d3..b36c2ad 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -219,9 +219,9 @@ int xc_vcpu_setaffinity(xc_interface *xch,
 
     memcpy(local, cpumap, cpusize);
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+    set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local);
 
-    domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
+    domctl.u.vcpuaffinity.map.nr_bits = cpusize * 8;
 
     ret = do_domctl(xch, &domctl);
 
@@ -260,8 +260,8 @@ int xc_vcpu_getaffinity(xc_interface *xch,
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-    domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
+    set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local);
+    domctl.u.vcpuaffinity.map.nr_bits = cpusize * 8;
 
     ret = do_domctl(xch, &domctl);
 
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 9760d50..7770d30 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -584,7 +584,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
     break;
 
     case XEN_DOMCTL_setvcpuaffinity:
-    case XEN_DOMCTL_getvcpuaffinity:
+    case XEN_DOMCTL_setvcpunodeaffinity:
     {
         struct vcpu *v;
 
@@ -600,8 +600,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         {
             cpumask_var_t new_affinity;
 
-            ret = xenctl_bitmap_to_cpumask(
-                &new_affinity, &op->u.vcpuaffinity.cpumap);
+            ret = xenctl_bitmap_to_cpumask(&new_affinity,
+                                           &op->u.vcpuaffinity.map);
             if ( !ret )
             {
                 ret = vcpu_set_affinity(v, new_affinity);
@@ -610,8 +610,45 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         }
         else
         {
-            ret = cpumask_to_xenctl_bitmap(
-                &op->u.vcpuaffinity.cpumap, v->cpu_affinity);
+            nodemask_t new_affinity;
+
+            ret = xenctl_bitmap_to_nodemask(&new_affinity,
+                                            &op->u.vcpuaffinity.map);
+            if ( !ret )
+                ret = vcpu_set_node_affinity(v, &new_affinity);
+        }
+    }
+    break;
+
+    case XEN_DOMCTL_getvcpuaffinity:
+    case XEN_DOMCTL_getvcpunodeaffinity:
+    {
+        struct vcpu *v;
+
+        ret = -EINVAL;
+        if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
+            break;
+
+        ret = -ESRCH;
+        if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
+            break;
+
+        if ( op->cmd == XEN_DOMCTL_getvcpuaffinity )
+        {
+            ret = cpumask_to_xenctl_bitmap(&op->u.vcpuaffinity.map,
+                                           v->cpu_affinity);
+        }
+        else
+        {
+            nodemask_t affinity;
+            int cpu;
+
+            nodes_clear(affinity);
+            for_each_cpu ( cpu, v->node_affinity )
+                node_set(cpu_to_node(cpu), affinity);
+
+            ret = nodemask_to_xenctl_bitmap(&op->u.vcpuaffinity.map,
+                                            &affinity);
         }
     }
     break;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 4c5b2bb..07d43f2 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -290,12 +290,14 @@ typedef struct xen_domctl_nodeaffinity 
xen_domctl_nodeaffinity_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
 
 
-/* Get/set which physical cpus a vcpu can execute on. */
+/* Get/set which physical cpus a vcpu can or prefer execute on. */
 /* XEN_DOMCTL_setvcpuaffinity */
 /* XEN_DOMCTL_getvcpuaffinity */
+/* XEN_DOMCTL_setvcpunodeaffinity */
+/* XEN_DOMCTL_getvcpunodeaffinity */
 struct xen_domctl_vcpuaffinity {
     uint32_t  vcpu;              /* IN */
-    struct xenctl_bitmap cpumap; /* IN/OUT */
+    struct xenctl_bitmap map;    /* IN/OUT */
 };
 typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
@@ -920,6 +922,8 @@ struct xen_domctl {
 #define XEN_DOMCTL_set_broken_page_p2m           67
 #define XEN_DOMCTL_setnodeaffinity               68
 #define XEN_DOMCTL_getnodeaffinity               69
+#define XEN_DOMCTL_setvcpunodeaffinity           70
+#define XEN_DOMCTL_getvcpunodeaffinity           71
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index fa0589a..d7cfeaf 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -616,10 +616,12 @@ static int flask_domctl(struct domain *d, int cmd)
         return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__UNPAUSE);
 
     case XEN_DOMCTL_setvcpuaffinity:
+    case XEN_DOMCTL_setvcpunodeaffinity:
     case XEN_DOMCTL_setnodeaffinity:
         return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SETAFFINITY);
 
     case XEN_DOMCTL_getvcpuaffinity:
+    case XEN_DOMCTL_getvcpunodeaffinity:
     case XEN_DOMCTL_getnodeaffinity:
         return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__GETAFFINITY);
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.