[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/6] xen/sched: Use %*pb[l] instead of cpumask_scn{, list}printf()



This removes all use of keyhandler_scratch as a bounce-buffer for the rendered
string.  In some cases, collapse combine adjacent printk()'s which are writing
parts of the same line.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Dario Faggioli <dfaggioli@xxxxxxxx>
CC: Josh Whitehead <josh.whitehead@xxxxxxxxxxxxxxx>
CC: Robert VanVossen <robert.vanvossen@xxxxxxxxxxxxxxx>
CC: Meng Xu <mengxu@xxxxxxxxxxxxx>
---
 xen/common/sched_credit.c  | 17 +++++------------
 xen/common/sched_credit2.c | 26 +++++++++-----------------
 xen/common/sched_null.c    | 15 +++++----------
 xen/common/sched_rt.c      |  5 ++---
 4 files changed, 21 insertions(+), 42 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 84e744b..fe48e1b 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -2044,7 +2044,6 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     spinlock_t *lock;
     unsigned long flags;
     int loop;
-#define cpustr keyhandler_scratch
 
     /*
      * We need both locks:
@@ -2059,11 +2058,10 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%s, ",
-           cpu, spc->nr_runnable, spc->runq_sort_last, cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
-    printk("core=%s\n", cpustr);
+    printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%*pb, core=%*pb\n",
+           cpu, spc->nr_runnable, spc->runq_sort_last,
+           nr_cpu_ids, per_cpu(cpu_sibling_mask, cpu),
+           nr_cpu_ids, per_cpu(cpu_core_mask, cpu));
 
     /* current VCPU (nothing to say if that's the idle vcpu). */
     svc = CSCHED_VCPU(curr_on_cpu(cpu));
@@ -2086,7 +2084,6 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
 
     pcpu_schedule_unlock(lock, cpu);
     spin_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
 }
 
 static void
@@ -2099,8 +2096,6 @@ csched_dump(const struct scheduler *ops)
 
     spin_lock_irqsave(&prv->lock, flags);
 
-#define idlers_buf keyhandler_scratch
-
     printk("info:\n"
            "\tncpus              = %u\n"
            "\tmaster             = %u\n"
@@ -2127,8 +2122,7 @@ csched_dump(const struct scheduler *ops)
            prv->ticks_per_tslice,
            prv->vcpu_migr_delay/ MICROSECS(1));
 
-    cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), prv->idlers);
-    printk("idlers: %s\n", idlers_buf);
+    printk("idlers: %*pb\n", nr_cpu_ids, prv->idlers);
 
     printk("active vcpus:\n");
     loop = 0;
@@ -2151,7 +2145,6 @@ csched_dump(const struct scheduler *ops)
             vcpu_schedule_unlock(lock, svc->vcpu);
         }
     }
-#undef idlers_buf
 
     spin_unlock_irqrestore(&prv->lock, flags);
 }
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 7438481..1df9d5f 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -3650,12 +3650,11 @@ dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct csched2_private *prv = csched2_priv(ops);
     struct csched2_vcpu *svc;
-#define cpustr keyhandler_scratch
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk("CPU[%02d] runq=%d, sibling=%s, ", cpu, c2r(cpu), cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
-    printk("core=%s\n", cpustr);
+    printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
+           cpu, c2r(cpu),
+           nr_cpu_ids, per_cpu(cpu_sibling_mask, cpu),
+           nr_cpu_ids, per_cpu(cpu_core_mask, cpu));
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
     svc = csched2_vcpu(curr_on_cpu(cpu));
@@ -3664,7 +3663,6 @@ dump_pcpu(const struct scheduler *ops, int cpu)
         printk("\trun: ");
         csched2_dump_vcpu(prv, svc);
     }
-#undef cpustr
 }
 
 static void
@@ -3674,7 +3672,6 @@ csched2_dump(const struct scheduler *ops)
     struct csched2_private *prv = csched2_priv(ops);
     unsigned long flags;
     unsigned int i, j, loop;
-#define cpustr keyhandler_scratch
 
     /*
      * We need the private scheduler lock as we access global
@@ -3692,29 +3689,25 @@ csched2_dump(const struct scheduler *ops)
 
         fraction = (prv->rqd[i].avgload * 100) >> prv->load_precision_shift;
 
-        cpulist_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].active);
         printk("Runqueue %d:\n"
                "\tncpus              = %u\n"
-               "\tcpus               = %s\n"
+               "\tcpus               = %*pbl\n"
                "\tmax_weight         = %u\n"
                "\tpick_bias          = %u\n"
                "\tinstload           = %d\n"
                "\taveload            = %"PRI_stime" (~%"PRI_stime"%%)\n",
                i,
                cpumask_weight(&prv->rqd[i].active),
-               cpustr,
+               nr_cpu_ids, &prv->rqd[i].active,
                prv->rqd[i].max_weight,
                prv->rqd[i].pick_bias,
                prv->rqd[i].load,
                prv->rqd[i].avgload,
                fraction);
 
-        cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].idle);
-        printk("\tidlers: %s\n", cpustr);
-        cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].tickled);
-        printk("\ttickled: %s\n", cpustr);
-        cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].smt_idle);
-        printk("\tfully idle cores: %s\n", cpustr);
+        printk("\tidlers: %*pb\n", nr_cpu_ids, &prv->rqd[i].idle);
+        printk("\ttickled: %*pb\n", nr_cpu_ids, &prv->rqd[i].tickled);
+        printk("\tfully idle cores: %*pb\n", nr_cpu_ids, 
&prv->rqd[i].smt_idle);
     }
 
     printk("Domain info:\n");
@@ -3775,7 +3768,6 @@ csched2_dump(const struct scheduler *ops)
     }
 
     read_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
 }
 
 /* Returns the ID of the runqueue the cpu is assigned to. */
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 784db71..c2509ff 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -795,14 +795,13 @@ static void null_dump_pcpu(const struct scheduler *ops, 
int cpu)
     struct null_vcpu *nvc;
     spinlock_t *lock;
     unsigned long flags;
-#define cpustr keyhandler_scratch
 
     lock = pcpu_schedule_lock_irqsave(cpu, &flags);
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk("CPU[%02d] sibling=%s, ", cpu, cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
-    printk("core=%s", cpustr);
+    printk("CPU[%02d] sibling=%*pb, core=%*pb",
+           cpu,
+           nr_cpu_ids, per_cpu(cpu_sibling_mask, cpu),
+           nr_cpu_ids, per_cpu(cpu_core_mask, cpu));
     if ( per_cpu(npc, cpu).vcpu != NULL )
         printk(", vcpu=d%dv%d", per_cpu(npc, cpu).vcpu->domain->domain_id,
                per_cpu(npc, cpu).vcpu->vcpu_id);
@@ -818,7 +817,6 @@ static void null_dump_pcpu(const struct scheduler *ops, int 
cpu)
     }
 
     pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
-#undef cpustr
 }
 
 static void null_dump(const struct scheduler *ops)
@@ -827,12 +825,10 @@ static void null_dump(const struct scheduler *ops)
     struct list_head *iter;
     unsigned long flags;
     unsigned int loop;
-#define cpustr keyhandler_scratch
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    cpulist_scnprintf(cpustr, sizeof(cpustr), &prv->cpus_free);
-    printk("\tcpus_free = %s\n", cpustr);
+    printk("\tcpus_free = %*pbl\n", nr_cpu_ids, &prv->cpus_free);
 
     printk("Domain info:\n");
     loop = 0;
@@ -876,7 +872,6 @@ static void null_dump(const struct scheduler *ops)
     spin_unlock(&prv->waitq_lock);
 
     spin_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
 }
 
 const struct scheduler sched_null_def = {
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index ac79f15..9784213 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -328,11 +328,10 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
 
     cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
     cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
-    cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask);
     printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
            " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
            " \t\t priority_level=%d has_extratime=%d\n"
-           " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%s\n",
+           " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%*pbl\n",
             svc->vcpu->domain->domain_id,
             svc->vcpu->vcpu_id,
             svc->vcpu->processor,
@@ -346,7 +345,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
             vcpu_on_q(svc),
             vcpu_runnable(svc->vcpu),
             svc->flags,
-            keyhandler_scratch);
+            nr_cpu_ids, mask);
 }
 
 static void
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.