[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 6/6] xen: sched: get rid of the per domain vCPU list in Credit2



As, curently, there is no reason for bothering having
it and keeping it updated.

In fact, it is only used for dumping and changing
vCPUs parameters, but that can be achieved easily with
for_each_vcpu.

While there, improve alignment of comments, ad
add a const qualifier to a pointer, making things
more consistent with what happens everywhere else
in the source file.

This also allows us to kill one of the remaining
FIXMEs in the code, which is always good.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
Changes from v1:
 * removed spurious hunk about sched_rt.c, as noted
   during review;
 * fixed the BUG_ON inside csched2_dom_destroy(), as
   noted during review;
 * used 'v' instead of 'vc' for 'vcpu' (in new code),
   as suggested during review.
---
Not applying the review tags, as the patch changed a little
bit.
---
 xen/common/sched_credit2.c |   34 +++++++++++-----------------------
 1 file changed, 11 insertions(+), 23 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 59da919..df85ecd 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -234,9 +234,8 @@ struct csched2_private {
  * Virtual CPU
  */
 struct csched2_vcpu {
-    struct list_head rqd_elem;  /* On the runqueue data list */
-    struct list_head sdom_elem; /* On the domain vcpu list */
-    struct list_head runq_elem; /* On the runqueue         */
+    struct list_head rqd_elem;         /* On the runqueue data list  */
+    struct list_head runq_elem;        /* On the runqueue            */
     struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue */
 
     /* Up-pointers */
@@ -261,7 +260,6 @@ struct csched2_vcpu {
  * Domain
  */
 struct csched2_dom {
-    struct list_head vcpu;
     struct list_head sdom_elem;
     struct domain *dom;
     uint16_t weight;
@@ -772,7 +770,6 @@ csched2_alloc_vdata(const struct scheduler *ops, struct 
vcpu *vc, void *dd)
         return NULL;
 
     INIT_LIST_HEAD(&svc->rqd_elem);
-    INIT_LIST_HEAD(&svc->sdom_elem);
     INIT_LIST_HEAD(&svc->runq_elem);
 
     svc->sdom = dd;
@@ -876,9 +873,6 @@ csched2_vcpu_insert(const struct scheduler *ops, struct 
vcpu *vc)
 
     BUG_ON(is_idle_vcpu(vc));
 
-    /* FIXME: Do we need the private lock here? */
-    list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
-
     /* Add vcpu to runqueue of initial processor */
     lock = vcpu_schedule_lock_irq(vc);
 
@@ -923,10 +917,6 @@ csched2_vcpu_remove(const struct scheduler *ops, struct 
vcpu *vc)
 
         vcpu_schedule_unlock_irq(lock, vc);
 
-        /* Remove from sdom list.  Don't need a lock for this, as it's called
-         * syncronously when nothing else can happen. */
-        list_del_init(&svc->sdom_elem);
-
         svc->sdom->nr_vcpus--;
     }
 }
@@ -1443,7 +1433,7 @@ csched2_dom_cntl(
 
         if ( op->u.credit2.weight != 0 )
         {
-            struct list_head *iter;
+            struct vcpu *v;
             int old_weight;
 
             old_weight = sdom->weight;
@@ -1451,9 +1441,9 @@ csched2_dom_cntl(
             sdom->weight = op->u.credit2.weight;
 
             /* Update weights for vcpus, and max_weight for runqueues on which 
they reside */
-            list_for_each ( iter, &sdom->vcpu )
+            for_each_vcpu ( d, v )
             {
-                struct csched2_vcpu *svc = list_entry(iter, struct 
csched2_vcpu, sdom_elem);
+                struct csched2_vcpu *svc = CSCHED2_VCPU(v);
 
                 /* NB: Locking order is important here.  Because we grab this 
lock here, we
                  * must never lock csched2_priv.lock if we're holding a 
runqueue lock.
@@ -1487,7 +1477,6 @@ csched2_alloc_domdata(const struct scheduler *ops, struct 
domain *dom)
         return NULL;
 
     /* Initialize credit and weight */
-    INIT_LIST_HEAD(&sdom->vcpu);
     INIT_LIST_HEAD(&sdom->sdom_elem);
     sdom->dom = dom;
     sdom->weight = CSCHED2_DEFAULT_WEIGHT;
@@ -1539,9 +1528,7 @@ csched2_free_domdata(const struct scheduler *ops, void 
*data)
 static void
 csched2_dom_destroy(const struct scheduler *ops, struct domain *dom)
 {
-    struct csched2_dom *sdom = CSCHED2_DOM(dom);
-
-    BUG_ON(!list_empty(&sdom->vcpu));
+    BUG_ON(CSCHED2_DOM(dom)->nr_vcpus > 0);
 
     csched2_free_domdata(ops, CSCHED2_DOM(dom));
 }
@@ -1881,7 +1868,7 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu)
 static void
 csched2_dump(const struct scheduler *ops)
 {
-    struct list_head *iter_sdom, *iter_svc;
+    struct list_head *iter_sdom;
     struct csched2_private *prv = CSCHED2_PRIV(ops);
     unsigned long flags;
     int i, loop;
@@ -1926,6 +1913,8 @@ csched2_dump(const struct scheduler *ops)
     list_for_each( iter_sdom, &prv->sdom )
     {
         struct csched2_dom *sdom;
+        struct vcpu *v;
+
         sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem);
 
         printk("\tDomain: %d w %d v %d\n",
@@ -1933,12 +1922,11 @@ csched2_dump(const struct scheduler *ops)
                sdom->weight,
                sdom->nr_vcpus);
 
-        list_for_each( iter_svc, &sdom->vcpu )
+        for_each_vcpu( sdom->dom, v )
         {
-            struct csched2_vcpu *svc;
+            struct csched2_vcpu * const svc = CSCHED2_VCPU(v);
             spinlock_t *lock;
 
-            svc = list_entry(iter_svc, struct csched2_vcpu, sdom_elem);
             lock = vcpu_schedule_lock(svc->vcpu);
 
             printk("\t%3d: ", ++loop);


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.