[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 for Xen 4.7 1/4] xen: enable per-VCPU parameter settings for RTDS scheduler



Add XEN_DOMCTL_SCHEDOP_getvcpuinfo and _putvcpuinfo hypercalls
to independently get and set the scheduling parameters of each
vCPU of a domain

Signed-off-by: Chong Li <chong.li@xxxxxxxxx>
Signed-off-by: Meng Xu <mengxu@xxxxxxxxxxxxx>
Signed-off-by: Sisu Xi <xisisu@xxxxxxxxx>

---
Changes on PATCH v5:
1) When processing XEN_DOMCTL_SCHEDOP_get/putvcpuinfo, we do
preemption check in a similar way to XEN_SYSCTL_pcitopoinfo

Changes on PATCH v4:
1) Add uint32_t vcpu_index to struct xen_domctl_scheduler_op.
When processing XEN_DOMCTL_SCHEDOP_get/putvcpuinfo, we call
hypercall_preemption_check in case the current hypercall lasts
too long. If we decide to preempt the current hypercall, we record
the index of the most-recent finished vcpu into the vcpu_index of
struct xen_domctl_scheduler_op. So when we resume the hypercall after
preemption, we start processing from the posion specified by vcpu_index,
and don't need to repeat the work that has already been done in the
hypercall before the preemption.
(This design is based on the do_grant_table_op() in grant_table.c)

2) Coding style changes

Changes on PATCH v3:
1) Remove struct xen_domctl_schedparam_t.

2) Change struct xen_domctl_scheduler_op.

3) Check if period/budget is within a validated range

Changes on PATCH v2:
1) Change struct xen_domctl_scheduler_op, for transferring per-vcpu parameters
between libxc and hypervisor.

2) Handler of XEN_DOMCTL_SCHEDOP_getinfo now just returns the default budget and
period values of RTDS scheduler.

3) Handler of XEN_DOMCTL_SCHEDOP_getvcpuinfo now can return a random subset of
the parameters of the VCPUs of a specific domain

CC: <dario.faggioli@xxxxxxxxxx>
CC: <george.dunlap@xxxxxxxxxxxxx>
CC: <dgolomb@xxxxxxxxxxxxxx>
CC: <mengxu@xxxxxxxxxxxxx>
CC: <jbeulich@xxxxxxxx>
CC: <lichong659@xxxxxxxxx>
---
 xen/common/sched_credit.c   |   4 ++
 xen/common/sched_credit2.c  |   4 ++
 xen/common/sched_rt.c       | 130 +++++++++++++++++++++++++++++++++++++++-----
 xen/common/schedule.c       |  15 ++++-
 xen/include/public/domctl.h |  59 ++++++++++++++++----
 5 files changed, 182 insertions(+), 30 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 0dce790..455c684 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1054,6 +1054,10 @@ csched_dom_cntl(
      * lock. Runq lock not needed anywhere in here. */
     spin_lock_irqsave(&prv->lock, flags);
 
+    if ( op->cmd == XEN_DOMCTL_SCHEDOP_putvcpuinfo ||
+         op->cmd == XEN_DOMCTL_SCHEDOP_getvcpuinfo )
+        return -EINVAL;
+
     if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
     {
         op->u.credit.weight = sdom->weight;
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 3c49ffa..c3049a0 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1421,6 +1421,10 @@ csched2_dom_cntl(
      * runq lock to update csvcs. */
     spin_lock_irqsave(&prv->lock, flags);
 
+    if ( op->cmd == XEN_DOMCTL_SCHEDOP_putvcpuinfo ||
+         op->cmd == XEN_DOMCTL_SCHEDOP_getvcpuinfo )
+        return -EINVAL;
+
     if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
     {
         op->u.credit2.weight = sdom->weight;
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 3f1d047..4fcbf40 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -86,6 +86,22 @@
 #define RTDS_DEFAULT_PERIOD     (MICROSECS(10000))
 #define RTDS_DEFAULT_BUDGET     (MICROSECS(4000))
 
+/*
+ * Max period: max delta of time type, because period is added to the time
+ * a vcpu activates, so this must not overflow.
+ * Min period: 10 us, considering the scheduling overhead (when period is
+ * too low, scheduling is invoked too frequently, causing high overhead).
+ */
+#define RTDS_MAX_PERIOD     (STIME_DELTA_MAX)
+#define RTDS_MIN_PERIOD     (MICROSECS(10))
+
+/*
+ * Min budget: 10 us, considering the scheduling overhead (when budget is
+ * consumed too fast, scheduling is invoked too frequently, causing
+ * high overhead).
+ */
+#define RTDS_MIN_BUDGET     (MICROSECS(10))
+
 #define UPDATE_LIMIT_SHIFT      10
 #define MAX_SCHEDULE            (MILLISECS(1))
 /*
@@ -1130,23 +1146,17 @@ rt_dom_cntl(
     unsigned long flags;
     int rc = 0;
 
+    xen_domctl_schedparam_vcpu_t local_sched;
+    s_time_t period, budget;
+    uint32_t index = 0;
+
     switch ( op->cmd )
     {
-    case XEN_DOMCTL_SCHEDOP_getinfo:
-        if ( d->max_vcpus > 0 )
-        {
-            spin_lock_irqsave(&prv->lock, flags);
-            svc = rt_vcpu(d->vcpu[0]);
-            op->u.rtds.period = svc->period / MICROSECS(1);
-            op->u.rtds.budget = svc->budget / MICROSECS(1);
-            spin_unlock_irqrestore(&prv->lock, flags);
-        }
-        else
-        {
-            /* If we don't have vcpus yet, let's just return the defaults. */
-            op->u.rtds.period = RTDS_DEFAULT_PERIOD;
-            op->u.rtds.budget = RTDS_DEFAULT_BUDGET;
-        }
+    case XEN_DOMCTL_SCHEDOP_getinfo: /* return the default parameters */
+        spin_lock_irqsave(&prv->lock, flags);
+        op->u.rtds.period = RTDS_DEFAULT_PERIOD / MICROSECS(1);
+        op->u.rtds.budget = RTDS_DEFAULT_BUDGET / MICROSECS(1);
+        spin_unlock_irqrestore(&prv->lock, flags);
         break;
     case XEN_DOMCTL_SCHEDOP_putinfo:
         if ( op->u.rtds.period == 0 || op->u.rtds.budget == 0 )
@@ -1163,6 +1173,96 @@ rt_dom_cntl(
         }
         spin_unlock_irqrestore(&prv->lock, flags);
         break;
+    case XEN_DOMCTL_SCHEDOP_getvcpuinfo:
+        if ( guest_handle_is_null(op->u.v.vcpus) )
+        {
+            rc = -EINVAL;
+            break;
+        }
+        while ( index < op->u.v.nr_vcpus )
+        {
+            if ( copy_from_guest_offset(&local_sched,
+                          op->u.v.vcpus, index, 1) )
+            {
+                rc = -EFAULT;
+                break;
+            }
+            if ( local_sched.vcpuid >= d->max_vcpus ||
+                          d->vcpu[local_sched.vcpuid] == NULL )
+            {
+                rc = -EINVAL;
+                break;
+            }
+
+            spin_lock_irqsave(&prv->lock, flags);
+            svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+            local_sched.s.rtds.budget = svc->budget / MICROSECS(1);
+            local_sched.s.rtds.period = svc->period / MICROSECS(1);
+            spin_unlock_irqrestore(&prv->lock, flags);
+
+            if ( __copy_to_guest_offset(op->u.v.vcpus, index,
+                    &local_sched, 1) )
+            {
+                rc = -EFAULT;
+                break;
+            }
+            if ( (++index > 0x3f) && hypercall_preempt_check() )
+                break;
+        }
+
+        if ( !rc && (op->u.v.nr_vcpus != index) )
+            op->u.v.nr_vcpus = index;
+        break;
+    case XEN_DOMCTL_SCHEDOP_putvcpuinfo:
+        if ( guest_handle_is_null(op->u.v.vcpus) )
+        {
+            rc = -EINVAL;
+            break;
+        }
+        while ( index < op->u.v.nr_vcpus )
+        {
+            if ( copy_from_guest_offset(&local_sched,
+                          op->u.v.vcpus, index, 1) )
+            {
+                rc = -EFAULT;
+                break;
+            }
+            if ( local_sched.vcpuid >= d->max_vcpus ||
+                          d->vcpu[local_sched.vcpuid] == NULL )
+            {
+                rc = -EINVAL;
+                break;
+            }
+
+            period = MICROSECS(local_sched.s.rtds.period);
+            budget = MICROSECS(local_sched.s.rtds.budget);
+            if ( period > RTDS_MAX_PERIOD || budget < RTDS_MIN_BUDGET ||
+                          budget > period || period < RTDS_MIN_PERIOD )
+            {
+                rc = -EINVAL;
+                break;
+            }
+
+            /*
+             * We accept period/budget less than 100 us, but will warn users 
about
+             * the large scheduling overhead due to it
+             */
+            if ( period < MICROSECS(100) || budget < MICROSECS(100) )
+                printk("Warning: period or budget set to less than 100us.\n"
+                       "This may result in high scheduling overhead.\n");
+
+            spin_lock_irqsave(&prv->lock, flags);
+            svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+            svc->period = period;
+            svc->budget = budget;
+            spin_unlock_irqrestore(&prv->lock, flags);
+
+            if ( (++index > 0x3f) && hypercall_preempt_check() )
+                break;
+        }
+        if ( !rc && (op->u.v.nr_vcpus != index) )
+            op->u.v.nr_vcpus = index;
+        break;
     }
 
     return rc;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index c195129..f4a4032 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1148,10 +1148,19 @@ long sched_adjust(struct domain *d, struct 
xen_domctl_scheduler_op *op)
     if ( ret )
         return ret;
 
-    if ( (op->sched_id != DOM2OP(d)->sched_id) ||
-         ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
-          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
+    if ( op->sched_id != DOM2OP(d)->sched_id )
         return -EINVAL;
+    else
+        switch ( op->cmd )
+        {
+        case XEN_DOMCTL_SCHEDOP_putinfo:
+        case XEN_DOMCTL_SCHEDOP_getinfo:
+        case XEN_DOMCTL_SCHEDOP_putvcpuinfo:
+        case XEN_DOMCTL_SCHEDOP_getvcpuinfo:
+            break;
+        default:
+            return -EINVAL;
+        }
 
     /* NB: the pluggable scheduler code needs to take care
      * of locking by itself. */
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 7a56b3f..b9975f6 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -338,24 +338,59 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
 #define XEN_SCHEDULER_ARINC653 7
 #define XEN_SCHEDULER_RTDS     8
 
-/* Set or get info? */
+typedef struct xen_domctl_sched_credit {
+    uint16_t weight;
+    uint16_t cap;
+} xen_domctl_sched_credit_t;
+
+typedef struct xen_domctl_sched_credit2 {
+    uint16_t weight;
+} xen_domctl_sched_credit2_t;
+
+typedef struct xen_domctl_sched_rtds {
+    uint32_t period;
+    uint32_t budget;
+} xen_domctl_sched_rtds_t;
+
+typedef struct xen_domctl_schedparam_vcpu {
+    union {
+        xen_domctl_sched_credit_t credit;
+        xen_domctl_sched_credit2_t credit2;
+        xen_domctl_sched_rtds_t rtds;
+    } s;
+    uint16_t vcpuid;
+    uint16_t padding[3];
+} xen_domctl_schedparam_vcpu_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_schedparam_vcpu_t);
+
+/*
+ * Set or get info?
+ * For schedulers supporting per-vcpu settings (e.g., RTDS):
+ *  XEN_DOMCTL_SCHEDOP_putinfo sets params for all vcpus;
+ *  XEN_DOMCTL_SCHEDOP_getinfo gets default params;
+ *  XEN_DOMCTL_SCHEDOP_put(get)vcpuinfo sets (gets) params of vcpus;
+ *
+ * For schedulers not supporting per-vcpu settings:
+ *  XEN_DOMCTL_SCHEDOP_putinfo sets params for all vcpus;
+ *  XEN_DOMCTL_SCHEDOP_getinfo gets domain-wise params;
+ *  XEN_DOMCTL_SCHEDOP_put(get)vcpuinfo returns error;
+ */
 #define XEN_DOMCTL_SCHEDOP_putinfo 0
 #define XEN_DOMCTL_SCHEDOP_getinfo 1
+#define XEN_DOMCTL_SCHEDOP_putvcpuinfo 2
+#define XEN_DOMCTL_SCHEDOP_getvcpuinfo 3
 struct xen_domctl_scheduler_op {
     uint32_t sched_id;  /* XEN_SCHEDULER_* */
     uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
     union {
-        struct xen_domctl_sched_credit {
-            uint16_t weight;
-            uint16_t cap;
-        } credit;
-        struct xen_domctl_sched_credit2 {
-            uint16_t weight;
-        } credit2;
-        struct xen_domctl_sched_rtds {
-            uint32_t period;
-            uint32_t budget;
-        } rtds;
+        xen_domctl_sched_credit_t credit;
+        xen_domctl_sched_credit2_t credit2;
+        xen_domctl_sched_rtds_t rtds;
+        struct {
+            XEN_GUEST_HANDLE_64(xen_domctl_schedparam_vcpu_t) vcpus;
+            uint32_t nr_vcpus;
+            uint32_t padding;
+        } v;
     } u;
 };
 typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.