[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 5/7] xen: sched: get rid of the per domain vCPU list in Credit2



On 08/10/15 13:53, Dario Faggioli wrote:
> As, curently, there is no reason for bothering having
> it and keeping it updated.
> 
> In fact, it is only used for dumping and changing
> vCPUs parameters, but that can be achieved easily with
> for_each_vcpu.
> 
> While there, improve alignment of comments, ad
> add a const qualifier to a pointer, making things
> more consistent with what happens everywhere else
> in the source file.
> 
> This also allows us to kill one of the remaining
> FIXMEs in the code, which is always good.
> 
> Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>

> ---
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> ---
>  xen/common/sched_credit2.c |   34 +++++++++++-----------------------
>  xen/common/sched_rt.c      |    2 +-
>  2 files changed, 12 insertions(+), 24 deletions(-)
> 
> diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
> index 234f798..178a665 100644
> --- a/xen/common/sched_credit2.c
> +++ b/xen/common/sched_credit2.c
> @@ -234,9 +234,8 @@ struct csched2_private {
>   * Virtual CPU
>   */
>  struct csched2_vcpu {
> -    struct list_head rqd_elem;  /* On the runqueue data list */
> -    struct list_head sdom_elem; /* On the domain vcpu list */
> -    struct list_head runq_elem; /* On the runqueue         */
> +    struct list_head rqd_elem;         /* On the runqueue data list  */
> +    struct list_head runq_elem;        /* On the runqueue            */
>      struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue */
>  
>      /* Up-pointers */
> @@ -261,7 +260,6 @@ struct csched2_vcpu {
>   * Domain
>   */
>  struct csched2_dom {
> -    struct list_head vcpu;
>      struct list_head sdom_elem;
>      struct domain *dom;
>      uint16_t weight;
> @@ -772,7 +770,6 @@ csched2_alloc_vdata(const struct scheduler *ops, struct 
> vcpu *vc, void *dd)
>          return NULL;
>  
>      INIT_LIST_HEAD(&svc->rqd_elem);
> -    INIT_LIST_HEAD(&svc->sdom_elem);
>      INIT_LIST_HEAD(&svc->runq_elem);
>  
>      svc->sdom = dd;
> @@ -876,9 +873,6 @@ csched2_vcpu_insert(const struct scheduler *ops, struct 
> vcpu *vc)
>  
>      BUG_ON(is_idle_vcpu(vc));
>  
> -    /* FIXME: Do we need the private lock here? */
> -    list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
> -
>      /* Add vcpu to runqueue of initial processor */
>      lock = vcpu_schedule_lock_irq(vc);
>  
> @@ -923,10 +917,6 @@ csched2_vcpu_remove(const struct scheduler *ops, struct 
> vcpu *vc)
>  
>          vcpu_schedule_unlock_irq(lock, vc);
>  
> -        /* Remove from sdom list.  Don't need a lock for this, as it's called
> -         * syncronously when nothing else can happen. */
> -        list_del_init(&svc->sdom_elem);
> -
>          svc->sdom->nr_vcpus--;
>      }
>  }
> @@ -1443,7 +1433,7 @@ csched2_dom_cntl(
>  
>          if ( op->u.credit2.weight != 0 )
>          {
> -            struct list_head *iter;
> +            struct vcpu *vc;
>              int old_weight;
>  
>              old_weight = sdom->weight;
> @@ -1451,9 +1441,9 @@ csched2_dom_cntl(
>              sdom->weight = op->u.credit2.weight;
>  
>              /* Update weights for vcpus, and max_weight for runqueues on 
> which they reside */
> -            list_for_each ( iter, &sdom->vcpu )
> +            for_each_vcpu ( d, vc )
>              {
> -                struct csched2_vcpu *svc = list_entry(iter, struct 
> csched2_vcpu, sdom_elem);
> +                struct csched2_vcpu *svc = CSCHED2_VCPU(vc);
>  
>                  /* NB: Locking order is important here.  Because we grab 
> this lock here, we
>                   * must never lock csched2_priv.lock if we're holding a 
> runqueue lock.
> @@ -1487,7 +1477,6 @@ csched2_alloc_domdata(const struct scheduler *ops, 
> struct domain *dom)
>          return NULL;
>  
>      /* Initialize credit and weight */
> -    INIT_LIST_HEAD(&sdom->vcpu);
>      INIT_LIST_HEAD(&sdom->sdom_elem);
>      sdom->dom = dom;
>      sdom->weight = CSCHED2_DEFAULT_WEIGHT;
> @@ -1539,9 +1528,7 @@ csched2_free_domdata(const struct scheduler *ops, void 
> *data)
>  static void
>  csched2_dom_destroy(const struct scheduler *ops, struct domain *dom)
>  {
> -    struct csched2_dom *sdom = CSCHED2_DOM(dom);
> -
> -    BUG_ON(!list_empty(&sdom->vcpu));
> +    BUG_ON(CSCHED2_DOM(dom)->nr_vcpus != 0);
>  
>      csched2_free_domdata(ops, CSCHED2_DOM(dom));
>  }
> @@ -1881,7 +1868,7 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu)
>  static void
>  csched2_dump(const struct scheduler *ops)
>  {
> -    struct list_head *iter_sdom, *iter_svc;
> +    struct list_head *iter_sdom;
>      struct csched2_private *prv = CSCHED2_PRIV(ops);
>      unsigned long flags;
>      int i, loop;
> @@ -1926,6 +1913,8 @@ csched2_dump(const struct scheduler *ops)
>      list_for_each( iter_sdom, &prv->sdom )
>      {
>          struct csched2_dom *sdom;
> +        struct vcpu *vc;
> +
>          sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem);
>  
>          printk("\tDomain: %d w %d v %d\n\t",
> @@ -1933,12 +1922,11 @@ csched2_dump(const struct scheduler *ops)
>                 sdom->weight,
>                 sdom->nr_vcpus);
>  
> -        list_for_each( iter_svc, &sdom->vcpu )
> +        for_each_vcpu( sdom->dom, vc )
>          {
> -            struct csched2_vcpu *svc;
> +            struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
>              spinlock_t *lock;
>  
> -            svc = list_entry(iter_svc, struct csched2_vcpu, sdom_elem);
>              lock = vcpu_schedule_lock(svc->vcpu);
>  
>              printk("\t%3d: ", ++loop);
> diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
> index 797adc1..0ca4902 100644
> --- a/xen/common/sched_rt.c
> +++ b/xen/common/sched_rt.c
> @@ -1147,7 +1147,7 @@ rt_dom_cntl(
>              break;
>          }
>          spin_lock_irqsave(&prv->lock, flags);
> -        for_each_vcpu( sdom->dom, vc )
> +        for_each_vcpu( d, vc )
>          {
>              svc = rt_vcpu(vc);
>              svc->period = MICROSECS(op->u.rtds.period); /* transfer to 
> nanosec */
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.