[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 1/5] xen/sched: Use %*pb[l] instead of cpumask_scn{, list}printf()



This removes all use of keyhandler_scratch as a bounce-buffer for the rendered
string.  In some cases, collapse combine adjacent printk()'s which are writing
parts of the same line.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
Acked-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
CC: Josh Whitehead <josh.whitehead@xxxxxxxxxxxxxxx>
CC: Robert VanVossen <robert.vanvossen@xxxxxxxxxxxxxxx>
CC: Meng Xu <mengxu@xxxxxxxxxxxxx>

v2:
 * Use ->bits for cpumasks
---
 xen/common/sched_credit.c  | 17 +++++------------
 xen/common/sched_credit2.c | 27 ++++++++++-----------------
 xen/common/sched_null.c    | 15 +++++----------
 xen/common/sched_rt.c      |  5 ++---
 4 files changed, 22 insertions(+), 42 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 84e744b..4560ab6 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -2044,7 +2044,6 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     spinlock_t *lock;
     unsigned long flags;
     int loop;
-#define cpustr keyhandler_scratch
 
     /*
      * We need both locks:
@@ -2059,11 +2058,10 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%s, ",
-           cpu, spc->nr_runnable, spc->runq_sort_last, cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
-    printk("core=%s\n", cpustr);
+    printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%*pb, core=%*pb\n",
+           cpu, spc->nr_runnable, spc->runq_sort_last,
+           nr_cpu_ids, per_cpu(cpu_sibling_mask, cpu)->bits,
+           nr_cpu_ids, per_cpu(cpu_core_mask, cpu)->bits);
 
     /* current VCPU (nothing to say if that's the idle vcpu). */
     svc = CSCHED_VCPU(curr_on_cpu(cpu));
@@ -2086,7 +2084,6 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
 
     pcpu_schedule_unlock(lock, cpu);
     spin_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
 }
 
 static void
@@ -2099,8 +2096,6 @@ csched_dump(const struct scheduler *ops)
 
     spin_lock_irqsave(&prv->lock, flags);
 
-#define idlers_buf keyhandler_scratch
-
     printk("info:\n"
            "\tncpus              = %u\n"
            "\tmaster             = %u\n"
@@ -2127,8 +2122,7 @@ csched_dump(const struct scheduler *ops)
            prv->ticks_per_tslice,
            prv->vcpu_migr_delay/ MICROSECS(1));
 
-    cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), prv->idlers);
-    printk("idlers: %s\n", idlers_buf);
+    printk("idlers: %*pb\n", nr_cpu_ids, prv->idlers->bits);
 
     printk("active vcpus:\n");
     loop = 0;
@@ -2151,7 +2145,6 @@ csched_dump(const struct scheduler *ops)
             vcpu_schedule_unlock(lock, svc->vcpu);
         }
     }
-#undef idlers_buf
 
     spin_unlock_irqrestore(&prv->lock, flags);
 }
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 2b16bce..4adb6fc 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -3654,12 +3654,11 @@ dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct csched2_private *prv = csched2_priv(ops);
     struct csched2_vcpu *svc;
-#define cpustr keyhandler_scratch
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk("CPU[%02d] runq=%d, sibling=%s, ", cpu, c2r(cpu), cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
-    printk("core=%s\n", cpustr);
+    printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
+           cpu, c2r(cpu),
+           nr_cpu_ids, per_cpu(cpu_sibling_mask, cpu)->bits,
+           nr_cpu_ids, per_cpu(cpu_core_mask, cpu)->bits);
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
     svc = csched2_vcpu(curr_on_cpu(cpu));
@@ -3668,7 +3667,6 @@ dump_pcpu(const struct scheduler *ops, int cpu)
         printk("\trun: ");
         csched2_dump_vcpu(prv, svc);
     }
-#undef cpustr
 }
 
 static void
@@ -3678,7 +3676,6 @@ csched2_dump(const struct scheduler *ops)
     struct csched2_private *prv = csched2_priv(ops);
     unsigned long flags;
     unsigned int i, j, loop;
-#define cpustr keyhandler_scratch
 
     /*
      * We need the private scheduler lock as we access global
@@ -3696,29 +3693,26 @@ csched2_dump(const struct scheduler *ops)
 
         fraction = (prv->rqd[i].avgload * 100) >> prv->load_precision_shift;
 
-        cpulist_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].active);
         printk("Runqueue %d:\n"
                "\tncpus              = %u\n"
-               "\tcpus               = %s\n"
+               "\tcpus               = %*pbl\n"
                "\tmax_weight         = %u\n"
                "\tpick_bias          = %u\n"
                "\tinstload           = %d\n"
                "\taveload            = %"PRI_stime" (~%"PRI_stime"%%)\n",
                i,
                cpumask_weight(&prv->rqd[i].active),
-               cpustr,
+               nr_cpu_ids, prv->rqd[i].active.bits,
                prv->rqd[i].max_weight,
                prv->rqd[i].pick_bias,
                prv->rqd[i].load,
                prv->rqd[i].avgload,
                fraction);
 
-        cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].idle);
-        printk("\tidlers: %s\n", cpustr);
-        cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].tickled);
-        printk("\ttickled: %s\n", cpustr);
-        cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].smt_idle);
-        printk("\tfully idle cores: %s\n", cpustr);
+        printk("\tidlers: %*pb\n", nr_cpu_ids, prv->rqd[i].idle.bits);
+        printk("\ttickled: %*pb\n", nr_cpu_ids, prv->rqd[i].tickled.bits);
+        printk("\tfully idle cores: %*pb\n",
+               nr_cpu_ids, prv->rqd[i].smt_idle.bits);
     }
 
     printk("Domain info:\n");
@@ -3779,7 +3773,6 @@ csched2_dump(const struct scheduler *ops)
     }
 
     read_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
 }
 
 static void *
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 7b039b7..fdaeab8 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -793,14 +793,13 @@ static void null_dump_pcpu(const struct scheduler *ops, 
int cpu)
     struct null_vcpu *nvc;
     spinlock_t *lock;
     unsigned long flags;
-#define cpustr keyhandler_scratch
 
     lock = pcpu_schedule_lock_irqsave(cpu, &flags);
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk("CPU[%02d] sibling=%s, ", cpu, cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
-    printk("core=%s", cpustr);
+    printk("CPU[%02d] sibling=%*pb, core=%*pb",
+           cpu,
+           nr_cpu_ids, per_cpu(cpu_sibling_mask, cpu)->bits,
+           nr_cpu_ids, per_cpu(cpu_core_mask, cpu)->bits);
     if ( per_cpu(npc, cpu).vcpu != NULL )
         printk(", vcpu=%pv", per_cpu(npc, cpu).vcpu);
     printk("\n");
@@ -815,7 +814,6 @@ static void null_dump_pcpu(const struct scheduler *ops, int 
cpu)
     }
 
     pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
-#undef cpustr
 }
 
 static void null_dump(const struct scheduler *ops)
@@ -824,12 +822,10 @@ static void null_dump(const struct scheduler *ops)
     struct list_head *iter;
     unsigned long flags;
     unsigned int loop;
-#define cpustr keyhandler_scratch
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    cpulist_scnprintf(cpustr, sizeof(cpustr), &prv->cpus_free);
-    printk("\tcpus_free = %s\n", cpustr);
+    printk("\tcpus_free = %*pbl\n", nr_cpu_ids, prv->cpus_free.bits);
 
     printk("Domain info:\n");
     loop = 0;
@@ -873,7 +869,6 @@ static void null_dump(const struct scheduler *ops)
     spin_unlock(&prv->waitq_lock);
 
     spin_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
 }
 
 const struct scheduler sched_null_def = {
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 59fbfa6..fffbeab 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -328,11 +328,10 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
 
     cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
     cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
-    cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask);
     printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
            " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
            " \t\t priority_level=%d has_extratime=%d\n"
-           " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%s\n",
+           " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%*pbl\n",
             svc->vcpu->domain->domain_id,
             svc->vcpu->vcpu_id,
             svc->vcpu->processor,
@@ -346,7 +345,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
             vcpu_on_q(svc),
             vcpu_runnable(svc->vcpu),
             svc->flags,
-            keyhandler_scratch);
+            nr_cpu_ids, mask->bits);
 }
 
 static void
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.