[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 09/14] xen: sched: DOMCTL_*vcpuaffinity works with hard and soft affinity



On 11/18/2013 06:17 PM, Dario Faggioli wrote:
by adding a flag for the caller to specify which one he cares about.

Add also another cpumap there. This way, in case of
DOMCTL_setvcpuaffinity, Xen can return back to the caller the
"effective affinity" of the vcpu. We call the effective affinity
the intersection between cpupool's cpus, the (new?) hard affinity
and the (new?) soft affinity.

The purpose of this is allowing the toolstack to figure out whether
or not the requested change produced sensible results, when combined
with the other settings that are already in place.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>

Looks good:

Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

---
Changes from v2:
  * in DOMCTL_[sg]etvcpuaffinity, flag is really a flag now,
    i.e., we accept request for setting and getting: (1) only
    hard affinity; (2) only soft affinity; (3) both; as
    suggested during review.
---
  tools/libxc/xc_domain.c     |    4 ++-
  xen/arch/x86/traps.c        |    4 ++-
  xen/common/domctl.c         |   54 ++++++++++++++++++++++++++++++++++++++++---
  xen/common/schedule.c       |   35 +++++++++++++++++++---------
  xen/common/wait.c           |    6 ++---
  xen/include/public/domctl.h |   15 ++++++++++--
  xen/include/xen/sched.h     |    3 ++
  7 files changed, 97 insertions(+), 24 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 1ccafc5..f9ae4bf 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -215,7 +215,9 @@ int xc_vcpu_setaffinity(xc_interface *xch,

      domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
      domctl.domain = (domid_t)domid;
-    domctl.u.vcpuaffinity.vcpu    = vcpu;
+    domctl.u.vcpuaffinity.vcpu = vcpu;
+    /* Soft affinity is there, but not used anywhere for now, so... */
+    domctl.u.vcpuaffinity.flags = XEN_VCPUAFFINITY_HARD;

      memcpy(local, cpumap, cpusize);

diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 4279cad..196ff68 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3093,7 +3093,7 @@ static void nmi_mce_softirq(void)
           * Make sure to wakeup the vcpu on the
           * specified processor.
           */
-        vcpu_set_affinity(st->vcpu, cpumask_of(st->processor));
+        vcpu_set_hard_affinity(st->vcpu, cpumask_of(st->processor));

          /* Affinity is restored in the iret hypercall. */
      }
@@ -3122,7 +3122,7 @@ void async_exception_cleanup(struct vcpu *curr)
      if ( !cpumask_empty(curr->cpu_hard_affinity_tmp) &&
           !cpumask_equal(curr->cpu_hard_affinity_tmp, curr->cpu_hard_affinity) 
)
      {
-        vcpu_set_affinity(curr, curr->cpu_hard_affinity_tmp);
+        vcpu_set_hard_affinity(curr, curr->cpu_hard_affinity_tmp);
          cpumask_clear(curr->cpu_hard_affinity_tmp);
      }

diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 5e0ac5c..84be0d6 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -617,19 +617,65 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
          if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
          {
              cpumask_var_t new_affinity;
+            cpumask_t *online;

              ret = xenctl_bitmap_to_cpumask(
                  &new_affinity, &op->u.vcpuaffinity.cpumap);
-            if ( !ret )
+            if ( ret )
+                break;
+
+            ret = -EINVAL;
+            if ( op->u.vcpuaffinity.flags & XEN_VCPUAFFINITY_HARD )
+                ret = vcpu_set_hard_affinity(v, new_affinity);
+            if ( op->u.vcpuaffinity.flags & XEN_VCPUAFFINITY_SOFT )
+                ret = vcpu_set_soft_affinity(v, new_affinity);
+
+            if ( ret )
+                goto setvcpuaffinity_out;
+
+            /*
+             * Report back to the caller what the "effective affinity", that
+             * is the intersection of cpupool's pcpus, the (new?) hard
+             * affinity and the (new?) soft-affinity.
+             */
+            if ( !guest_handle_is_null(op->u.vcpuaffinity.eff_cpumap.bitmap) )
              {
-                ret = vcpu_set_affinity(v, new_affinity);
-                free_cpumask_var(new_affinity);
+                online = cpupool_online_cpumask(v->domain->cpupool);
+                cpumask_and(new_affinity, online, v->cpu_hard_affinity);
+                if ( op->u.vcpuaffinity.flags & XEN_VCPUAFFINITY_SOFT)
+                    cpumask_and(new_affinity, new_affinity,
+                                v->cpu_soft_affinity);
+
+                ret = cpumask_to_xenctl_bitmap(
+                    &op->u.vcpuaffinity.eff_cpumap, new_affinity);
              }
+
+ setvcpuaffinity_out:
+            free_cpumask_var(new_affinity);
          }
          else
          {
+            cpumask_var_t affinity;
+
+            /*
+             * If the caller asks for both _HARD and _SOFT, what we return
+             * is the intersection of hard and soft affinity for the vcpu.
+             */
+            if ( !alloc_cpumask_var(&affinity) ) {
+                ret = -EFAULT;
+                break;
+            }
+            cpumask_setall(affinity);
+
+            if ( op->u.vcpuaffinity.flags & XEN_VCPUAFFINITY_HARD )
+                cpumask_copy(affinity, v->cpu_hard_affinity);
+            if ( op->u.vcpuaffinity.flags & XEN_VCPUAFFINITY_SOFT )
+                cpumask_and(affinity, affinity, v->cpu_soft_affinity);
+
              ret = cpumask_to_xenctl_bitmap(
-                &op->u.vcpuaffinity.cpumap, v->cpu_hard_affinity);
+                &op->u.vcpuaffinity.cpumap, affinity);
+
+            free_cpumask_var(affinity);
          }
      }
      break;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index c9ae521..6c53287 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -654,22 +654,14 @@ void sched_set_node_affinity(struct domain *d, nodemask_t 
*mask)
      SCHED_OP(DOM2OP(d), set_node_affinity, d, mask);
  }

-int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity)
+static int vcpu_set_affinity(
+    struct vcpu *v, const cpumask_t *affinity, cpumask_t **which)
  {
-    cpumask_t online_affinity;
-    cpumask_t *online;
      spinlock_t *lock;

-    if ( v->domain->is_pinned )
-        return -EINVAL;
-    online = VCPU2ONLINE(v);
-    cpumask_and(&online_affinity, affinity, online);
-    if ( cpumask_empty(&online_affinity) )
-        return -EINVAL;
-
      lock = vcpu_schedule_lock_irq(v);

-    cpumask_copy(v->cpu_hard_affinity, affinity);
+    cpumask_copy(*which, affinity);

      /* Always ask the scheduler to re-evaluate placement
       * when changing the affinity */
@@ -688,6 +680,27 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t 
*affinity)
      return 0;
  }

+int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity)
+{
+    cpumask_t online_affinity;
+    cpumask_t *online;
+
+    if ( v->domain->is_pinned )
+        return -EINVAL;
+
+    online = VCPU2ONLINE(v);
+    cpumask_and(&online_affinity, affinity, online);
+    if ( cpumask_empty(&online_affinity) )
+        return -EINVAL;
+
+    return vcpu_set_affinity(v, affinity, &v->cpu_hard_affinity);
+}
+
+int vcpu_set_soft_affinity(struct vcpu *v, const cpumask_t *affinity)
+{
+    return vcpu_set_affinity(v, affinity, &v->cpu_soft_affinity);
+}
+
  /* Block the currently-executing domain until a pertinent event occurs. */
  void vcpu_block(void)
  {
diff --git a/xen/common/wait.c b/xen/common/wait.c
index 3f6ff41..1f6b597 100644
--- a/xen/common/wait.c
+++ b/xen/common/wait.c
@@ -135,7 +135,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
      /* Save current VCPU affinity; force wakeup on *this* CPU only. */
      wqv->wakeup_cpu = smp_processor_id();
      cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
-    if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
+    if ( vcpu_set_hard_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
      {
          gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
          domain_crash_synchronous();
@@ -166,7 +166,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
  static void __finish_wait(struct waitqueue_vcpu *wqv)
  {
      wqv->esp = NULL;
-    (void)vcpu_set_affinity(current, &wqv->saved_affinity);
+    (void)vcpu_set_hard_affinity(current, &wqv->saved_affinity);
  }

  void check_wakeup_from_wait(void)
@@ -184,7 +184,7 @@ void check_wakeup_from_wait(void)
          /* Re-set VCPU affinity and re-enter the scheduler. */
          struct vcpu *curr = current;
          cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
-        if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
+        if ( vcpu_set_hard_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
          {
              gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
              domain_crash_synchronous();
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 01a3652..4f71450 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -300,8 +300,19 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
  /* XEN_DOMCTL_setvcpuaffinity */
  /* XEN_DOMCTL_getvcpuaffinity */
  struct xen_domctl_vcpuaffinity {
-    uint32_t  vcpu;              /* IN */
-    struct xenctl_bitmap cpumap; /* IN/OUT */
+    /* IN variables. */
+    uint32_t  vcpu;
+ /* Set/get the hard affinity for vcpu */
+#define _XEN_VCPUAFFINITY_HARD  0
+#define XEN_VCPUAFFINITY_HARD   (1U<<_XEN_VCPUAFFINITY_HARD)
+ /* Set/get the soft affinity for vcpu */
+#define _XEN_VCPUAFFINITY_SOFT  1
+#define XEN_VCPUAFFINITY_SOFT   (1U<<_XEN_VCPUAFFINITY_SOFT)
+    uint32_t flags;
+    /* IN/OUT variables. */
+    struct xenctl_bitmap cpumap;
+    /* OUT variables. */
+    struct xenctl_bitmap eff_cpumap;
  };
  typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
  DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 3575312..0f728b3 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -755,7 +755,8 @@ void scheduler_free(struct scheduler *sched);
  int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
  void vcpu_force_reschedule(struct vcpu *v);
  int cpu_disable_scheduler(unsigned int cpu);
-int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
+int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity);
+int vcpu_set_soft_affinity(struct vcpu *v, const cpumask_t *affinity);
  void restore_vcpu_affinity(struct domain *d);

  void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.