[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 1/4] xen: enabling XL to set per-VCPU parameters of a domain for RTDS scheduler



Add two hypercalls(XEN_DOMCTL_SCHEDOP_getvcpuinfo/putvcpuinfo) to get/set a 
domain's
per-VCPU parameters. Hypercalls are handled in function rt_dom_cntl.

Add an array pointer in struct xen_domctl_sched_rtds(an union in struct 
xen_domctl_scheduler_op),
which is used for transferring data between tool and hypervisor.

Signed-off-by: Chong Li <chong.li@xxxxxxxxx>
Signed-off-by: Meng Xu <mengxu@xxxxxxxxxxxxx>
Signed-off-by: Sisu Xi <xisisu@xxxxxxxxx>
---
 xen/common/sched_rt.c       | 64 +++++++++++++++++++++++++++++++++++++++++++++
 xen/common/schedule.c       |  4 ++-
 xen/include/public/domctl.h | 22 +++++++++++++---
 3 files changed, 86 insertions(+), 4 deletions(-)

diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 7c39a9e..9add5a4 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -1085,6 +1085,9 @@ rt_dom_cntl(
     struct list_head *iter;
     unsigned long flags;
     int rc = 0;
+    xen_domctl_sched_rtds_params_t *local_sched;
+    int vcpu_index=0;
+    int i;
 
     switch ( op->cmd )
     {
@@ -1110,6 +1113,67 @@ rt_dom_cntl(
         }
         spin_unlock_irqrestore(&prv->lock, flags);
         break;
+    case XEN_DOMCTL_SCHEDOP_getvcpuinfo:
+        op->u.rtds.nr_vcpus = 0;
+        spin_lock_irqsave(&prv->lock, flags);
+        list_for_each( iter, &sdom->vcpu )
+            vcpu_index++;
+        spin_unlock_irqrestore(&prv->lock, flags);
+        op->u.rtds.nr_vcpus = vcpu_index;
+        local_sched = xzalloc_array(xen_domctl_sched_rtds_params_t,
+                vcpu_index);
+        if( local_sched == NULL )
+        {
+            return -ENOMEM;
+        }
+        vcpu_index = 0;
+        spin_lock_irqsave(&prv->lock, flags);
+        list_for_each( iter, &sdom->vcpu )
+        {
+            struct rt_vcpu *svc = list_entry(iter, struct rt_vcpu, sdom_elem);
+
+            local_sched[vcpu_index].budget = svc->budget / MICROSECS(1);
+            local_sched[vcpu_index].period = svc->period / MICROSECS(1);
+            local_sched[vcpu_index].index = vcpu_index;
+            vcpu_index++;
+        }
+        spin_unlock_irqrestore(&prv->lock, flags);
+        copy_to_guest(op->u.rtds.vcpus, local_sched, vcpu_index);
+        xfree(local_sched);
+        rc = 0;
+        break;
+    case XEN_DOMCTL_SCHEDOP_putvcpuinfo:
+        local_sched = xzalloc_array(xen_domctl_sched_rtds_params_t,
+                op->u.rtds.nr_vcpus);
+        if( local_sched == NULL )
+        {
+            return -ENOMEM;
+        }
+        copy_from_guest(local_sched, op->u.rtds.vcpus, op->u.rtds.nr_vcpus);
+
+        for( i = 0; i < op->u.rtds.nr_vcpus; i++ )
+        {
+            vcpu_index = 0;
+            spin_lock_irqsave(&prv->lock, flags);
+            list_for_each( iter, &sdom->vcpu )
+            {
+                struct rt_vcpu *svc = list_entry(iter, struct rt_vcpu, 
sdom_elem);
+                if ( local_sched[i].index == vcpu_index )
+                {
+                    if ( local_sched[i].period <= 0 || local_sched[i].budget 
<= 0 )
+                         return -EINVAL;
+
+                    svc->period = MICROSECS(local_sched[i].period);
+                    svc->budget = MICROSECS(local_sched[i].budget);
+                    break;
+                }
+                vcpu_index++;
+            }
+            spin_unlock_irqrestore(&prv->lock, flags);
+        }
+        xfree(local_sched);
+        rc = 0;
+        break;
     }
 
     return rc;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index f5a2e55..f820946 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1093,7 +1093,9 @@ long sched_adjust(struct domain *d, struct 
xen_domctl_scheduler_op *op)
 
     if ( (op->sched_id != DOM2OP(d)->sched_id) ||
          ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
-          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
+          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo) &&
+          (op->cmd != XEN_DOMCTL_SCHEDOP_putvcpuinfo) &&
+          (op->cmd != XEN_DOMCTL_SCHEDOP_getvcpuinfo)) )
         return -EINVAL;
 
     /* NB: the pluggable scheduler code needs to take care
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 10b51ef..490a6b6 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -342,6 +342,16 @@ struct xen_domctl_max_vcpus {
 typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
 
+struct xen_domctl_sched_rtds_params {
+    /* vcpus' info */
+    uint64_t period;
+    uint64_t budget;
+    uint16_t index;
+    uint16_t padding[3];
+};
+typedef struct xen_domctl_sched_rtds_params xen_domctl_sched_rtds_params_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_sched_rtds_params_t);
+
 
 /* XEN_DOMCTL_scheduler_op */
 /* Scheduler types. */
@@ -351,9 +361,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
 #define XEN_SCHEDULER_ARINC653 7
 #define XEN_SCHEDULER_RTDS     8
 
-/* Set or get info? */
+/* Set or get info */
 #define XEN_DOMCTL_SCHEDOP_putinfo 0
 #define XEN_DOMCTL_SCHEDOP_getinfo 1
+#define XEN_DOMCTL_SCHEDOP_getvcpuinfo 2
+#define XEN_DOMCTL_SCHEDOP_putvcpuinfo 3
+
 struct xen_domctl_scheduler_op {
     uint32_t sched_id;  /* XEN_SCHEDULER_* */
     uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
@@ -373,8 +386,11 @@ struct xen_domctl_scheduler_op {
             uint16_t weight;
         } credit2;
         struct xen_domctl_sched_rtds {
-            uint32_t period;
-            uint32_t budget;
+            uint64_t period;
+            uint64_t budget;
+            XEN_GUEST_HANDLE_64(xen_domctl_sched_rtds_params_t) vcpus;
+            uint16_t nr_vcpus;
+            uint16_t padding[3];
         } rtds;
     } u;
 };
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.