[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 for Xen 4.6 1/4] xen: enable per-VCPU parameter settings for RTDS scheduler



On Tue, 2015-07-07 at 09:59 +0100, Jan Beulich wrote:
> >>> On 29.06.15 at 04:44, <lichong659@xxxxxxxxx> wrote:
> > --- a/xen/common/Makefile
> > +++ b/xen/common/Makefile
> > @@ -31,7 +31,6 @@ obj-y += rbtree.o
> >  obj-y += rcupdate.o
> >  obj-y += sched_credit.o
> >  obj-y += sched_credit2.o
> > -obj-y += sched_sedf.o
> >  obj-y += sched_arinc653.o
> >  obj-y += sched_rt.o
> >  obj-y += schedule.o
> 
> Stray change. Or perhaps the file doesn't build anymore, in which case
> you should instead have stated that the patch is dependent upon the
> series removing SEDF.
> 
> > @@ -1157,8 +1158,75 @@ rt_dom_cntl(
> >          list_for_each( iter, &sdom->vcpu )
> >          {
> >              struct rt_vcpu * svc = list_entry(iter, struct rt_vcpu, 
> > sdom_elem);
> > -            svc->period = MICROSECS(op->u.rtds.period); /* transfer to 
> > nanosec */
> > -            svc->budget = MICROSECS(op->u.rtds.budget);
> > +            svc->period = MICROSECS(op->u.d.rtds.period); /* transfer to 
> > nanosec */
> > +            svc->budget = MICROSECS(op->u.d.rtds.budget);
> > +        }
> > +        spin_unlock_irqrestore(&prv->lock, flags);
> > +        break;
> > +    case XEN_DOMCTL_SCHEDOP_getvcpuinfo:
> > +        spin_lock_irqsave(&prv->lock, flags);
> > +        for( index = 0; index < op->u.v.nr_vcpus; index++ )
> 
> Coding style (more further down).
> 
> > +        {
> > +            if ( copy_from_guest_offset(&local_sched,
> > +                    op->u.v.vcpus, index, 1) )
> 
> Indentation.
> 
> > +            {
> > +                rc = -EFAULT;
> > +                break;
> > +            }
> > +            if ( local_sched.vcpuid >= d->max_vcpus
> > +                    || d->vcpu[local_sched.vcpuid] == NULL )
> 
> || belongs at the end of the first line. Indentation.
> 
> > +            {
> > +                rc = -EINVAL;
> > +                break;
> > +            }
> > +            svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
> > +
> > +            local_sched.vcpuid = svc->vcpu->vcpu_id;
> 
> Why? If at all, this should be an ASSERT().
> 
> > +            local_sched.s.rtds.budget = svc->budget / MICROSECS(1);
> > +            local_sched.s.rtds.period = svc->period / MICROSECS(1);
> > +            if( index >= op->u.v.nr_vcpus ) /* not enough guest buffer*/
> 
> Impossible due to the containing loop's condition.
> 
> > +            {
> > +                rc = -ENOBUFS;
> > +                break;
> > +            }
> > +            if ( copy_to_guest_offset(op->u.v.vcpus, index,
> 
> __copy_to_guest_offset()
> 
> > +    case XEN_DOMCTL_SCHEDOP_putvcpuinfo:
> > +        spin_lock_irqsave(&prv->lock, flags);
> > +        for( index = 0; index < op->u.v.nr_vcpus; index++ )
> > +        {
> > +            if ( copy_from_guest_offset(&local_sched,
> > +                    op->u.v.vcpus, index, 1) )
> > +            {
> > +                rc = -EFAULT;
> > +                break;
> > +            }
> > +            if ( local_sched.vcpuid >= d->max_vcpus
> > +                    || d->vcpu[local_sched.vcpuid] == NULL )
> > +            {
> > +                rc = -EINVAL;
> > +                break;
> > +            }
> > +            svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
> > +            svc->period = MICROSECS(local_sched.s.rtds.period);
> > +            svc->budget = MICROSECS(local_sched.s.rtds.budget);
> 
> Are all input values valid here?
> 
> > --- a/xen/common/schedule.c
> > +++ b/xen/common/schedule.c
> > @@ -65,7 +65,6 @@ DEFINE_PER_CPU(struct schedule_data, schedule_data);
> >  DEFINE_PER_CPU(struct scheduler *, scheduler);
> >  
> >  static const struct scheduler *schedulers[] = {
> > -    &sched_sedf_def,
> >      &sched_credit_def,
> >      &sched_credit2_def,
> >      &sched_arinc653_def,
> 
> See above.
> 
> > @@ -1054,7 +1053,9 @@ long sched_adjust(struct domain *d, struct 
> > xen_domctl_scheduler_op *op)
> >  
> >      if ( (op->sched_id != DOM2OP(d)->sched_id) ||
> >           ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
> > -          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
> > +          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo) &&
> > +          (op->cmd != XEN_DOMCTL_SCHEDOP_putvcpuinfo) &&
> > +          (op->cmd != XEN_DOMCTL_SCHEDOP_getvcpuinfo)) )
> >          return -EINVAL;
> 
> Convert to switch() please.
> 
> > --- a/xen/include/public/domctl.h
> > +++ b/xen/include/public/domctl.h
> > @@ -330,31 +330,59 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
> >  #define XEN_SCHEDULER_ARINC653 7
> >  #define XEN_SCHEDULER_RTDS     8
> >  
> > +typedef struct xen_domctl_sched_sedf {
> > +            uint64_aligned_t period;
> > +            uint64_aligned_t slice;
> > +            uint64_aligned_t latency;
> > +            uint32_t extratime;
> > +            uint32_t weight;
> > +} xen_domctl_sched_sedf_t;
> 
> Indentation.
> 
> > +typedef union xen_domctl_schedparam {
> > +    xen_domctl_sched_sedf_t sedf;
> > +    xen_domctl_sched_credit_t credit;
> > +    xen_domctl_sched_credit2_t credit2;
> > +    xen_domctl_sched_rtds_t rtds;
> > +} xen_domctl_schedparam_t;
> 
> I don't see the need for this extra wrapper type. Nor do I see the
> need for the typedef here and above - they're generally only
> created if you want to also define a matching guest handle type.
> 
> > +typedef struct xen_domctl_schedparam_vcpu {
> > +    union {
> > +        xen_domctl_sched_credit_t credit;
> > +        xen_domctl_sched_credit2_t credit2;
> > +        xen_domctl_sched_rtds_t rtds;
> > +    } s;
> > +    uint16_t vcpuid;
> 
> Explicit padding please.
> 
> > +} xen_domctl_schedparam_vcpu_t;
> > +DEFINE_XEN_GUEST_HANDLE(xen_domctl_schedparam_vcpu_t);
> > +
> >  /* Set or get info? */
> >  #define XEN_DOMCTL_SCHEDOP_putinfo 0
> >  #define XEN_DOMCTL_SCHEDOP_getinfo 1
> > +#define XEN_DOMCTL_SCHEDOP_putvcpuinfo 2
> > +#define XEN_DOMCTL_SCHEDOP_getvcpuinfo 3
> >  struct xen_domctl_scheduler_op {
> >      uint32_t sched_id;  /* XEN_SCHEDULER_* */
> >      uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
> >      union {
> > -        struct xen_domctl_sched_sedf {
> > -            uint64_aligned_t period;
> > -            uint64_aligned_t slice;
> > -            uint64_aligned_t latency;
> > -            uint32_t extratime;
> > -            uint32_t weight;
> > -        } sedf;
> > -        struct xen_domctl_sched_credit {
> > -            uint16_t weight;
> > -            uint16_t cap;
> > -        } credit;
> > -        struct xen_domctl_sched_credit2 {
> > -            uint16_t weight;
> > -        } credit2;
> > -        struct xen_domctl_sched_rtds {
> > -            uint32_t period;
> > -            uint32_t budget;
> > -        } rtds;
> > +        xen_domctl_schedparam_t d;
> 
> With this type gone I'm not even sure we need to wrap this in
> another union; not doing so would eliminate some of the other
> changes in this patch.
> 
So, Jan, just to be sure, do you mean (apart from the explicit padding)
something like this (attached, also)?

diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index bc45ea5..8210ecb 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -330,31 +330,56 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
 #define XEN_SCHEDULER_ARINC653 7
 #define XEN_SCHEDULER_RTDS     8
 
+struct xen_domctl_sched_sedf {
+    uint64_aligned_t period;
+    uint64_aligned_t slice;
+    uint64_aligned_t latency;
+    uint32_t extratime;
+    uint32_t weight;
+};
+
+struct xen_domctl_sched_credit {
+    uint16_t weight;
+    uint16_t cap;
+};
+
+struct xen_domctl_sched_credit2 {
+    uint16_t weight;
+};
+
+struct xen_domctl_sched_rtds {
+    uint32_t period;
+    uint32_t budget;
+};
+
+typedef struct xen_domctl_schedparam_vcpu {
+    union {
+        struct xen_domctl_sched_sedf sedf;
+        struct xen_domctl_sched_credit credit;
+        struct xen_domctl_sched_credit2 credit2;
+        struct xen_domctl_sched_rtds rtds;
+    } s;
+    uint16_t vcpuid;
+} xen_domctl_schedparam_vcpu_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_schedparam_vcpu_t);
+
 /* Set or get info? */
 #define XEN_DOMCTL_SCHEDOP_putinfo 0
 #define XEN_DOMCTL_SCHEDOP_getinfo 1
+#define XEN_DOMCTL_SCHEDOP_putvcpuinfo 2
+#define XEN_DOMCTL_SCHEDOP_getvcpuinfo 3
 struct xen_domctl_scheduler_op {
     uint32_t sched_id;  /* XEN_SCHEDULER_* */
     uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
     union {
-        struct xen_domctl_sched_sedf {
-            uint64_aligned_t period;
-            uint64_aligned_t slice;
-            uint64_aligned_t latency;
-            uint32_t extratime;
-            uint32_t weight;
-        } sedf;
-        struct xen_domctl_sched_credit {
-            uint16_t weight;
-            uint16_t cap;
-        } credit;
-        struct xen_domctl_sched_credit2 {
-            uint16_t weight;
-        } credit2;
-        struct xen_domctl_sched_rtds {
-            uint32_t period;
-            uint32_t budget;
-        } rtds;
+        struct xen_domctl_sched_sedf sedf;
+        struct xen_domctl_sched_credit credit;
+        struct xen_domctl_sched_credit2 credit2;
+        struct xen_domctl_sched_rtds rtds;
+        struct {
+            XEN_GUEST_HANDLE_64(xen_domctl_schedparam_vcpu_t) vcpus;
+            uint16_t nr_vcpus;
+        } v;
     } u;
 };
 typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;


If yes, I'd be fine with it too (George?)

Thanks and Regards,
Dario
-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)

Attachment: rtds-domctl.patch
Description: Text Data

Attachment: signature.asc
Description: This is a digitally signed message part

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.