[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/9] xen: credit2: tidy up functions names by removing leading '__'.



There is no reason for having pretty much all of the
functions whose names begin with double underscores
('__') to actually look like that.

In fact, that is misleading and makes the code hard
to read and understand. So, remove the '__'-s.

The only two that we keep are __runq_insert() and
__runq_assign() (althought they're converted to
single underscore). In fact, in those cases, it is
indeed useful to have those sort of a "raw" variants.

In case of __runq_insert(), which is only called
once, by runq_insert(), merge the two functions.

No functional change intended.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Anshul Makkar <anshul.makkar@xxxxxxxxxx>
---
 xen/common/sched_credit2.c |  114 +++++++++++++++++++-------------------------
 1 file changed, 49 insertions(+), 65 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 29973cf..b764cf9 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -221,7 +221,7 @@ integer_param("sched_credit2_migrate_resist", 
opt_migrate_resist);
  * shift all time samples to the right.
  *
  * The details of the formulas used for load tracking are explained close to
- * __update_runq_load(). Let's just say here that, with full nanosecond time
+ * update_runq_load(). Let's just say here that, with full nanosecond time
  * granularity, a 30 bits wide 'decaying window' is ~1 second long.
  *
  * We want to consider the following equations:
@@ -233,7 +233,7 @@ integer_param("sched_credit2_migrate_resist", 
opt_migrate_resist);
  * Q-format fixed point arithmetic and load is the instantaneous load of a
  * runqueue, which basically is the number of runnable vcpus there are on the
  * runqueue (for the meaning of the other terms, look at the doc comment to
- *  __update_runq_load()).
+ *  update_runq_load()).
  *
  *  So, again, with full nanosecond granularity, and 1 second window, we have:
  *
@@ -594,14 +594,12 @@ static s_time_t c2t(struct csched2_runqueue_data *rqd, 
s_time_t credit, struct c
  * Runqueue related code
  */
 
-static /*inline*/ int
-__vcpu_on_runq(struct csched2_vcpu *svc)
+static inline int vcpu_on_runq(struct csched2_vcpu *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
 
-static /*inline*/ struct csched2_vcpu *
-__runq_elem(struct list_head *elem)
+static struct csched2_vcpu * runq_elem(struct list_head *elem)
 {
     return list_entry(elem, struct csched2_vcpu, runq_elem);
 }
@@ -713,8 +711,8 @@ __runq_elem(struct list_head *elem)
  * Which, in both cases, is what we expect.
  */
 static void
-__update_runq_load(const struct scheduler *ops,
-                  struct csched2_runqueue_data *rqd, int change, s_time_t now)
+update_runq_load(const struct scheduler *ops,
+                 struct csched2_runqueue_data *rqd, int change, s_time_t now)
 {
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, load = rqd->load;
@@ -800,8 +798,8 @@ __update_runq_load(const struct scheduler *ops,
 }
 
 static void
-__update_svc_load(const struct scheduler *ops,
-                  struct csched2_vcpu *svc, int change, s_time_t now)
+update_svc_load(const struct scheduler *ops,
+                struct csched2_vcpu *svc, int change, s_time_t now)
 {
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, vcpu_load;
@@ -865,17 +863,24 @@ update_load(const struct scheduler *ops,
 {
     trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0,  NULL);
 
-    __update_runq_load(ops, rqd, change, now);
+    update_runq_load(ops, rqd, change, now);
     if ( svc )
-        __update_svc_load(ops, svc, change, now);
+        update_svc_load(ops, svc, change, now);
 }
 
-static int
-__runq_insert(struct list_head *runq, struct csched2_vcpu *svc)
+static void
+runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
 {
     struct list_head *iter;
+    unsigned int cpu = svc->vcpu->processor;
+    struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
+    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+
+    ASSERT(!vcpu_on_runq(svc));
+    ASSERT(c2r(ops, cpu) == c2r(ops, svc->vcpu->processor));
+
     ASSERT(&svc->rqd->runq == runq);
     ASSERT(!is_idle_vcpu(svc->vcpu));
     ASSERT(!svc->vcpu->is_running);
@@ -883,33 +888,15 @@ __runq_insert(struct list_head *runq, struct csched2_vcpu 
*svc)
 
     list_for_each( iter, runq )
     {
-        struct csched2_vcpu * iter_svc = __runq_elem(iter);
+        struct csched2_vcpu * iter_svc = runq_elem(iter);
 
         if ( svc->credit > iter_svc->credit )
             break;
 
         pos++;
     }
-
     list_add_tail(&svc->runq_elem, iter);
 
-    return pos;
-}
-
-static void
-runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
-{
-    unsigned int cpu = svc->vcpu->processor;
-    struct list_head * runq = &c2rqd(ops, cpu)->runq;
-    int pos = 0;
-
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
-
-    ASSERT(!__vcpu_on_runq(svc));
-    ASSERT(c2r(ops, cpu) == c2r(ops, svc->vcpu->processor));
-
-    pos = __runq_insert(runq, svc);
-
     if ( unlikely(tb_init_done) )
     {
         struct {
@@ -923,14 +910,11 @@ runq_insert(const struct scheduler *ops, struct 
csched2_vcpu *svc)
                     sizeof(d),
                     (unsigned char *)&d);
     }
-
-    return;
 }
 
-static inline void
-__runq_remove(struct csched2_vcpu *svc)
+static inline void runq_remove(struct csched2_vcpu *svc)
 {
-    ASSERT(__vcpu_on_runq(svc));
+    ASSERT(vcpu_on_runq(svc));
     list_del_init(&svc->runq_elem);
 }
 
@@ -1281,8 +1265,8 @@ static void update_max_weight(struct 
csched2_runqueue_data *rqd, int new_weight,
 }
 
 #ifndef NDEBUG
-static /*inline*/ void
-__csched2_vcpu_check(struct vcpu *vc)
+static inline void
+csched2_vcpu_check(struct vcpu *vc)
 {
     struct csched2_vcpu * const svc = csched2_vcpu(vc);
     struct csched2_dom * const sdom = svc->sdom;
@@ -1300,7 +1284,7 @@ __csched2_vcpu_check(struct vcpu *vc)
     }
     SCHED_STAT_CRANK(vcpu_check);
 }
-#define CSCHED2_VCPU_CHECK(_vc)  (__csched2_vcpu_check(_vc))
+#define CSCHED2_VCPU_CHECK(_vc)  (csched2_vcpu_check(_vc))
 #else
 #define CSCHED2_VCPU_CHECK(_vc)
 #endif
@@ -1346,7 +1330,7 @@ csched2_alloc_vdata(const struct scheduler *ops, struct 
vcpu *vc, void *dd)
 
 /* Add and remove from runqueue assignment (not active run queue) */
 static void
-__runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
+_runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
 {
 
     svc->rqd = rqd;
@@ -1380,15 +1364,15 @@ runq_assign(const struct scheduler *ops, struct vcpu 
*vc)
 
     ASSERT(svc->rqd == NULL);
 
-    __runq_assign(svc, c2rqd(ops, vc->processor));
+    _runq_assign(svc, c2rqd(ops, vc->processor));
 }
 
 static void
-__runq_deassign(struct csched2_vcpu *svc)
+_runq_deassign(struct csched2_vcpu *svc)
 {
     struct csched2_runqueue_data *rqd = svc->rqd;
 
-    ASSERT(!__vcpu_on_runq(svc));
+    ASSERT(!vcpu_on_runq(svc));
     ASSERT(!(svc->flags & CSFLAG_scheduled));
 
     list_del_init(&svc->rqd_elem);
@@ -1407,7 +1391,7 @@ runq_deassign(const struct scheduler *ops, struct vcpu 
*vc)
 
     ASSERT(svc->rqd == c2rqd(ops, vc->processor));
 
-    __runq_deassign(svc);
+    _runq_deassign(svc);
 }
 
 static void
@@ -1420,11 +1404,11 @@ csched2_vcpu_sleep(const struct scheduler *ops, struct 
vcpu *vc)
 
     if ( curr_on_cpu(vc->processor) == vc )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
-    else if ( __vcpu_on_runq(svc) )
+    else if ( vcpu_on_runq(svc) )
     {
         ASSERT(svc->rqd == c2rqd(ops, vc->processor));
         update_load(ops, svc->rqd, svc, -1, NOW());
-        __runq_remove(svc);
+        runq_remove(svc);
     }
     else if ( svc->flags & CSFLAG_delayed_runq_add )
         __clear_bit(__CSFLAG_delayed_runq_add, &svc->flags);
@@ -1447,7 +1431,7 @@ csched2_vcpu_wake(const struct scheduler *ops, struct 
vcpu *vc)
         goto out;
     }
 
-    if ( unlikely(__vcpu_on_runq(svc)) )
+    if ( unlikely(vcpu_on_runq(svc)) )
     {
         SCHED_STAT_CRANK(vcpu_wake_onrunq);
         goto out;
@@ -1509,7 +1493,7 @@ csched2_context_saved(const struct scheduler *ops, struct 
vcpu *vc)
     /* If someone wants it on the runqueue, put it there. */
     /*
      * NB: We can get rid of CSFLAG_scheduled by checking for
-     * vc->is_running and __vcpu_on_runq(svc) here.  However,
+     * vc->is_running and vcpu_on_runq(svc) here.  However,
      * since we're accessing the flags cacheline anyway,
      * it seems a bit pointless; especially as we have plenty of
      * bits free.
@@ -1517,7 +1501,7 @@ csched2_context_saved(const struct scheduler *ops, struct 
vcpu *vc)
     if ( __test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags)
          && likely(vcpu_runnable(vc)) )
     {
-        ASSERT(!__vcpu_on_runq(svc));
+        ASSERT(!vcpu_on_runq(svc));
 
         runq_insert(ops, svc);
         runq_tickle(ops, svc, now);
@@ -1749,13 +1733,13 @@ static void migrate(const struct scheduler *ops,
     {
         int on_runq = 0;
         /* It's not running; just move it */
-        if ( __vcpu_on_runq(svc) )
+        if ( vcpu_on_runq(svc) )
         {
-            __runq_remove(svc);
+            runq_remove(svc);
             update_load(ops, svc->rqd, NULL, -1, now);
             on_runq = 1;
         }
-        __runq_deassign(svc);
+        _runq_deassign(svc);
 
         cpumask_and(cpumask_scratch_cpu(cpu), svc->vcpu->cpu_hard_affinity,
                     cpupool_domain_cpumask(svc->vcpu->domain));
@@ -1764,7 +1748,7 @@ static void migrate(const struct scheduler *ops,
         svc->vcpu->processor = cpumask_any(cpumask_scratch_cpu(cpu));
         ASSERT(svc->vcpu->processor < nr_cpu_ids);
 
-        __runq_assign(svc, trqd);
+        _runq_assign(svc, trqd);
         if ( on_runq )
         {
             update_load(ops, svc->rqd, NULL, 1, now);
@@ -1814,7 +1798,7 @@ static void balance_load(const struct scheduler *ops, int 
cpu, s_time_t now)
     ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
-    __update_runq_load(ops, st.lrqd, 0, now);
+    update_runq_load(ops, st.lrqd, 0, now);
 
 retry:
     if ( !read_trylock(&prv->lock) )
@@ -1832,7 +1816,7 @@ retry:
              || !spin_trylock(&st.orqd->lock) )
             continue;
 
-        __update_runq_load(ops, st.orqd, 0, now);
+        update_runq_load(ops, st.orqd, 0, now);
     
         delta = st.lrqd->b_avgload - st.orqd->b_avgload;
         if ( delta < 0 )
@@ -1931,7 +1915,7 @@ retry:
     {
         struct csched2_vcpu * push_svc = list_entry(push_iter, struct 
csched2_vcpu, rqd_elem);
 
-        __update_svc_load(ops, push_svc, 0, now);
+        update_svc_load(ops, push_svc, 0, now);
 
         if ( !vcpu_is_migrateable(push_svc, st.orqd) )
             continue;
@@ -1941,7 +1925,7 @@ retry:
             struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct 
csched2_vcpu, rqd_elem);
             
             if ( !inner_load_updated )
-                __update_svc_load(ops, pull_svc, 0, now);
+                update_svc_load(ops, pull_svc, 0, now);
         
             if ( !vcpu_is_migrateable(pull_svc, st.lrqd) )
                 continue;
@@ -2009,12 +1993,12 @@ csched2_vcpu_migrate(
     if ( unlikely(!cpumask_test_cpu(new_cpu, cpupool_domain_cpumask(d))) )
     {
         ASSERT(system_state == SYS_STATE_suspend);
-        if ( __vcpu_on_runq(svc) )
+        if ( vcpu_on_runq(svc) )
         {
-            __runq_remove(svc);
+            runq_remove(svc);
             update_load(ops, svc->rqd, NULL, -1, now);
         }
-        __runq_deassign(svc);
+        _runq_deassign(svc);
         vc->processor = new_cpu;
         return;
     }
@@ -2302,7 +2286,7 @@ csched2_runtime(const struct scheduler *ops, int cpu,
      * run until your credit ~= his */
     if ( ! list_empty(runq) )
     {
-        struct csched2_vcpu *swait = __runq_elem(runq->next);
+        struct csched2_vcpu *swait = runq_elem(runq->next);
 
         if ( ! is_idle_vcpu(swait->vcpu)
              && swait->credit > 0 )
@@ -2566,7 +2550,7 @@ csched2_schedule(
             ASSERT(snext->rqd == rqd);
             ASSERT(!snext->vcpu->is_running);
 
-            __runq_remove(snext);
+            runq_remove(snext);
             __set_bit(__CSFLAG_scheduled, &snext->flags);
         }
 
@@ -2701,7 +2685,7 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu)
     loop = 0;
     list_for_each( iter, runq )
     {
-        svc = __runq_elem(iter);
+        svc = runq_elem(iter);
         if ( svc )
         {
             printk("\t%3d: ", ++loop);


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.