[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 4/7] xen: credit2: tidy up functions names by removing leading '__'.



There is no reason for having pretty much all of the
functions whose names begin with double underscores
('__') to actually look like that.

In fact, that is misleading and makes the code hard
to read and understand. So, remove the '__'-s.

The only two that we keep are __runq_assign() and
__runq_deassign() (althought they're converted to
single underscore). In fact, in those cases, it is
indeed useful to have those sort of a "raw" variants.

In case of __runq_insert(), which is only called
once, by runq_insert(), merge the two functions.

No functional change intended.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
Cc: Anshul Makkar <anshul.makkar@xxxxxxxxxx>
---
Changes from v2:
* made 'runq_elem()' inline as well, as suggested during review.
---
 xen/common/sched_credit2.c |  114 +++++++++++++++++++-------------------------
 1 file changed, 49 insertions(+), 65 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 1be4385..1f57239 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -220,7 +220,7 @@ integer_param("sched_credit2_migrate_resist", 
opt_migrate_resist);
  * shift all time samples to the right.
  *
  * The details of the formulas used for load tracking are explained close to
- * __update_runq_load(). Let's just say here that, with full nanosecond time
+ * update_runq_load(). Let's just say here that, with full nanosecond time
  * granularity, a 30 bits wide 'decaying window' is ~1 second long.
  *
  * We want to consider the following equations:
@@ -232,7 +232,7 @@ integer_param("sched_credit2_migrate_resist", 
opt_migrate_resist);
  * Q-format fixed point arithmetic and load is the instantaneous load of a
  * runqueue, which basically is the number of runnable vcpus there are on the
  * runqueue (for the meaning of the other terms, look at the doc comment to
- *  __update_runq_load()).
+ *  update_runq_load()).
  *
  *  So, again, with full nanosecond granularity, and 1 second window, we have:
  *
@@ -593,14 +593,12 @@ static s_time_t c2t(struct csched2_runqueue_data *rqd, 
s_time_t credit, struct c
  * Runqueue related code
  */
 
-static /*inline*/ int
-__vcpu_on_runq(struct csched2_vcpu *svc)
+static inline int vcpu_on_runq(struct csched2_vcpu *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
 
-static /*inline*/ struct csched2_vcpu *
-__runq_elem(struct list_head *elem)
+static inline struct csched2_vcpu * runq_elem(struct list_head *elem)
 {
     return list_entry(elem, struct csched2_vcpu, runq_elem);
 }
@@ -712,8 +710,8 @@ __runq_elem(struct list_head *elem)
  * Which, in both cases, is what we expect.
  */
 static void
-__update_runq_load(const struct scheduler *ops,
-                  struct csched2_runqueue_data *rqd, int change, s_time_t now)
+update_runq_load(const struct scheduler *ops,
+                 struct csched2_runqueue_data *rqd, int change, s_time_t now)
 {
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, load = rqd->load;
@@ -799,8 +797,8 @@ __update_runq_load(const struct scheduler *ops,
 }
 
 static void
-__update_svc_load(const struct scheduler *ops,
-                  struct csched2_vcpu *svc, int change, s_time_t now)
+update_svc_load(const struct scheduler *ops,
+                struct csched2_vcpu *svc, int change, s_time_t now)
 {
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, vcpu_load;
@@ -864,17 +862,24 @@ update_load(const struct scheduler *ops,
 {
     trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0,  NULL);
 
-    __update_runq_load(ops, rqd, change, now);
+    update_runq_load(ops, rqd, change, now);
     if ( svc )
-        __update_svc_load(ops, svc, change, now);
+        update_svc_load(ops, svc, change, now);
 }
 
-static int
-__runq_insert(struct list_head *runq, struct csched2_vcpu *svc)
+static void
+runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
 {
     struct list_head *iter;
+    unsigned int cpu = svc->vcpu->processor;
+    struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
+    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+
+    ASSERT(!vcpu_on_runq(svc));
+    ASSERT(c2r(ops, cpu) == c2r(ops, svc->vcpu->processor));
+
     ASSERT(&svc->rqd->runq == runq);
     ASSERT(!is_idle_vcpu(svc->vcpu));
     ASSERT(!svc->vcpu->is_running);
@@ -882,33 +887,15 @@ __runq_insert(struct list_head *runq, struct csched2_vcpu 
*svc)
 
     list_for_each( iter, runq )
     {
-        struct csched2_vcpu * iter_svc = __runq_elem(iter);
+        struct csched2_vcpu * iter_svc = runq_elem(iter);
 
         if ( svc->credit > iter_svc->credit )
             break;
 
         pos++;
     }
-
     list_add_tail(&svc->runq_elem, iter);
 
-    return pos;
-}
-
-static void
-runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
-{
-    unsigned int cpu = svc->vcpu->processor;
-    struct list_head * runq = &c2rqd(ops, cpu)->runq;
-    int pos = 0;
-
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
-
-    ASSERT(!__vcpu_on_runq(svc));
-    ASSERT(c2r(ops, cpu) == c2r(ops, svc->vcpu->processor));
-
-    pos = __runq_insert(runq, svc);
-
     if ( unlikely(tb_init_done) )
     {
         struct {
@@ -922,14 +909,11 @@ runq_insert(const struct scheduler *ops, struct 
csched2_vcpu *svc)
                     sizeof(d),
                     (unsigned char *)&d);
     }
-
-    return;
 }
 
-static inline void
-__runq_remove(struct csched2_vcpu *svc)
+static inline void runq_remove(struct csched2_vcpu *svc)
 {
-    ASSERT(__vcpu_on_runq(svc));
+    ASSERT(vcpu_on_runq(svc));
     list_del_init(&svc->runq_elem);
 }
 
@@ -1296,8 +1280,8 @@ static void update_max_weight(struct 
csched2_runqueue_data *rqd, int new_weight,
 }
 
 #ifndef NDEBUG
-static /*inline*/ void
-__csched2_vcpu_check(struct vcpu *vc)
+static inline void
+csched2_vcpu_check(struct vcpu *vc)
 {
     struct csched2_vcpu * const svc = csched2_vcpu(vc);
     struct csched2_dom * const sdom = svc->sdom;
@@ -1315,7 +1299,7 @@ __csched2_vcpu_check(struct vcpu *vc)
     }
     SCHED_STAT_CRANK(vcpu_check);
 }
-#define CSCHED2_VCPU_CHECK(_vc)  (__csched2_vcpu_check(_vc))
+#define CSCHED2_VCPU_CHECK(_vc)  (csched2_vcpu_check(_vc))
 #else
 #define CSCHED2_VCPU_CHECK(_vc)
 #endif
@@ -1361,7 +1345,7 @@ csched2_alloc_vdata(const struct scheduler *ops, struct 
vcpu *vc, void *dd)
 
 /* Add and remove from runqueue assignment (not active run queue) */
 static void
-__runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
+_runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
 {
 
     svc->rqd = rqd;
@@ -1395,15 +1379,15 @@ runq_assign(const struct scheduler *ops, struct vcpu 
*vc)
 
     ASSERT(svc->rqd == NULL);
 
-    __runq_assign(svc, c2rqd(ops, vc->processor));
+    _runq_assign(svc, c2rqd(ops, vc->processor));
 }
 
 static void
-__runq_deassign(struct csched2_vcpu *svc)
+_runq_deassign(struct csched2_vcpu *svc)
 {
     struct csched2_runqueue_data *rqd = svc->rqd;
 
-    ASSERT(!__vcpu_on_runq(svc));
+    ASSERT(!vcpu_on_runq(svc));
     ASSERT(!(svc->flags & CSFLAG_scheduled));
 
     list_del_init(&svc->rqd_elem);
@@ -1422,7 +1406,7 @@ runq_deassign(const struct scheduler *ops, struct vcpu 
*vc)
 
     ASSERT(svc->rqd == c2rqd(ops, vc->processor));
 
-    __runq_deassign(svc);
+    _runq_deassign(svc);
 }
 
 static void
@@ -1437,11 +1421,11 @@ csched2_vcpu_sleep(const struct scheduler *ops, struct 
vcpu *vc)
     {
         tickle_cpu(vc->processor, svc->rqd);
     }
-    else if ( __vcpu_on_runq(svc) )
+    else if ( vcpu_on_runq(svc) )
     {
         ASSERT(svc->rqd == c2rqd(ops, vc->processor));
         update_load(ops, svc->rqd, svc, -1, NOW());
-        __runq_remove(svc);
+        runq_remove(svc);
     }
     else if ( svc->flags & CSFLAG_delayed_runq_add )
         __clear_bit(__CSFLAG_delayed_runq_add, &svc->flags);
@@ -1464,7 +1448,7 @@ csched2_vcpu_wake(const struct scheduler *ops, struct 
vcpu *vc)
         goto out;
     }
 
-    if ( unlikely(__vcpu_on_runq(svc)) )
+    if ( unlikely(vcpu_on_runq(svc)) )
     {
         SCHED_STAT_CRANK(vcpu_wake_onrunq);
         goto out;
@@ -1526,7 +1510,7 @@ csched2_context_saved(const struct scheduler *ops, struct 
vcpu *vc)
     /* If someone wants it on the runqueue, put it there. */
     /*
      * NB: We can get rid of CSFLAG_scheduled by checking for
-     * vc->is_running and __vcpu_on_runq(svc) here.  However,
+     * vc->is_running and vcpu_on_runq(svc) here.  However,
      * since we're accessing the flags cacheline anyway,
      * it seems a bit pointless; especially as we have plenty of
      * bits free.
@@ -1534,7 +1518,7 @@ csched2_context_saved(const struct scheduler *ops, struct 
vcpu *vc)
     if ( __test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags)
          && likely(vcpu_runnable(vc)) )
     {
-        ASSERT(!__vcpu_on_runq(svc));
+        ASSERT(!vcpu_on_runq(svc));
 
         runq_insert(ops, svc);
         runq_tickle(ops, svc, now);
@@ -1766,13 +1750,13 @@ static void migrate(const struct scheduler *ops,
     {
         int on_runq = 0;
         /* It's not running; just move it */
-        if ( __vcpu_on_runq(svc) )
+        if ( vcpu_on_runq(svc) )
         {
-            __runq_remove(svc);
+            runq_remove(svc);
             update_load(ops, svc->rqd, NULL, -1, now);
             on_runq = 1;
         }
-        __runq_deassign(svc);
+        _runq_deassign(svc);
 
         cpumask_and(cpumask_scratch_cpu(cpu), svc->vcpu->cpu_hard_affinity,
                     cpupool_domain_cpumask(svc->vcpu->domain));
@@ -1781,7 +1765,7 @@ static void migrate(const struct scheduler *ops,
         svc->vcpu->processor = cpumask_any(cpumask_scratch_cpu(cpu));
         ASSERT(svc->vcpu->processor < nr_cpu_ids);
 
-        __runq_assign(svc, trqd);
+        _runq_assign(svc, trqd);
         if ( on_runq )
         {
             update_load(ops, svc->rqd, NULL, 1, now);
@@ -1831,7 +1815,7 @@ static void balance_load(const struct scheduler *ops, int 
cpu, s_time_t now)
     ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
-    __update_runq_load(ops, st.lrqd, 0, now);
+    update_runq_load(ops, st.lrqd, 0, now);
 
 retry:
     if ( !read_trylock(&prv->lock) )
@@ -1849,7 +1833,7 @@ retry:
              || !spin_trylock(&st.orqd->lock) )
             continue;
 
-        __update_runq_load(ops, st.orqd, 0, now);
+        update_runq_load(ops, st.orqd, 0, now);
     
         delta = st.lrqd->b_avgload - st.orqd->b_avgload;
         if ( delta < 0 )
@@ -1950,7 +1934,7 @@ retry:
     {
         struct csched2_vcpu * push_svc = list_entry(push_iter, struct 
csched2_vcpu, rqd_elem);
 
-        __update_svc_load(ops, push_svc, 0, now);
+        update_svc_load(ops, push_svc, 0, now);
 
         if ( !vcpu_is_migrateable(push_svc, st.orqd) )
             continue;
@@ -1960,7 +1944,7 @@ retry:
             struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct 
csched2_vcpu, rqd_elem);
             
             if ( !inner_load_updated )
-                __update_svc_load(ops, pull_svc, 0, now);
+                update_svc_load(ops, pull_svc, 0, now);
         
             if ( !vcpu_is_migrateable(pull_svc, st.lrqd) )
                 continue;
@@ -2024,12 +2008,12 @@ csched2_vcpu_migrate(
     if ( unlikely(!cpumask_test_cpu(new_cpu, cpupool_domain_cpumask(d))) )
     {
         ASSERT(system_state == SYS_STATE_suspend);
-        if ( __vcpu_on_runq(svc) )
+        if ( vcpu_on_runq(svc) )
         {
-            __runq_remove(svc);
+            runq_remove(svc);
             update_load(ops, svc->rqd, NULL, -1, now);
         }
-        __runq_deassign(svc);
+        _runq_deassign(svc);
         vc->processor = new_cpu;
         return;
     }
@@ -2321,7 +2305,7 @@ csched2_runtime(const struct scheduler *ops, int cpu,
      * run until your credit ~= his */
     if ( ! list_empty(runq) )
     {
-        struct csched2_vcpu *swait = __runq_elem(runq->next);
+        struct csched2_vcpu *swait = runq_elem(runq->next);
 
         if ( ! is_idle_vcpu(swait->vcpu)
              && swait->credit > 0 )
@@ -2585,7 +2569,7 @@ csched2_schedule(
             ASSERT(snext->rqd == rqd);
             ASSERT(!snext->vcpu->is_running);
 
-            __runq_remove(snext);
+            runq_remove(snext);
             __set_bit(__CSFLAG_scheduled, &snext->flags);
         }
 
@@ -2795,7 +2779,7 @@ csched2_dump(const struct scheduler *ops)
         printk("RUNQ:\n");
         list_for_each( iter, runq )
         {
-            struct csched2_vcpu *svc = __runq_elem(iter);
+            struct csched2_vcpu *svc = runq_elem(iter);
 
             if ( svc )
             {


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.