[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 3/6] xen/common: Use %*pb[l] instead of {cpu, node}mask_scn{, list}printf()



On Thu, Sep 06, 2018 at 01:08:13PM +0100, Andrew Cooper wrote:
> This removes all use of keyhandler_scratch as a bounce-buffer for the rendered
> string.  In some cases, collapse combine adjacent printk()'s which are writing
> parts of the same line.
> 
> No functional change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>

> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Wei Liu <wei.liu2@xxxxxxxxxx>
> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> CC: Julien Grall <julien.grall@xxxxxxx>
> CC: Juergen Gross <jgross@xxxxxxxx>
> ---
>  xen/common/cpupool.c       | 12 +++---------
>  xen/common/event_channel.c |  6 ++----
>  xen/common/keyhandler.c    | 35 +++++++++--------------------------
>  3 files changed, 14 insertions(+), 39 deletions(-)
> 
> diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
> index 1e8edcb..16ca4c4 100644
> --- a/xen/common/cpupool.c
> +++ b/xen/common/cpupool.c
> @@ -732,12 +732,6 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
>      return ret;
>  }
>  
> -static void print_cpumap(const char *str, const cpumask_t *map)
> -{
> -    cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), map);
> -    printk("%s: %s\n", str, keyhandler_scratch);
> -}
> -
>  void dump_runq(unsigned char key)
>  {
>      unsigned long    flags;
> @@ -751,17 +745,17 @@ void dump_runq(unsigned char key)
>              sched_smt_power_savings? "enabled":"disabled");
>      printk("NOW=%"PRI_stime"\n", now);
>  
> -    print_cpumap("Online Cpus", &cpu_online_map);
> +    printk("Online Cpus: %*pbl\n", nr_cpu_ids, &cpu_online_map);
>      if ( !cpumask_empty(&cpupool_free_cpus) )
>      {
> -        print_cpumap("Free Cpus", &cpupool_free_cpus);
> +        printk("Free Cpus: %*pbl\n", nr_cpu_ids, &cpupool_free_cpus);
>          schedule_dump(NULL);
>      }
>  
>      for_each_cpupool(c)
>      {
>          printk("Cpupool %d:\n", (*c)->cpupool_id);
> -        print_cpumap("Cpus", (*c)->cpu_valid);
> +        printk("Cpus: %*pbl\n", nr_cpu_ids, (*c)->cpu_valid);
>          schedule_dump(*c);
>      }
>  
> diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
> index 381f30e..f34d4f0 100644
> --- a/xen/common/event_channel.c
> +++ b/xen/common/event_channel.c
> @@ -1377,11 +1377,9 @@ static void domain_dump_evtchn_info(struct domain *d)
>      unsigned int port;
>      int irq;
>  
> -    bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
> -                         d->poll_mask, d->max_vcpus);
>      printk("Event channel information for domain %d:\n"
> -           "Polling vCPUs: {%s}\n"
> -           "    port [p/m/s]\n", d->domain_id, keyhandler_scratch);
> +           "Polling vCPUs: {%*pbl}\n"
> +           "    port [p/m/s]\n", d->domain_id, d->max_vcpus, d->poll_mask);
>  
>      spin_lock(&d->event_lock);
>  
> diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
> index 777c8e9..93ae738 100644
> --- a/xen/common/keyhandler.c
> +++ b/xen/common/keyhandler.c
> @@ -250,22 +250,6 @@ static void reboot_machine(unsigned char key, struct 
> cpu_user_regs *regs)
>      machine_restart(0);
>  }
>  
> -static void cpuset_print(char *set, int size, const cpumask_t *mask)
> -{
> -    *set++ = '{';
> -    set += cpulist_scnprintf(set, size-2, mask);
> -    *set++ = '}';
> -    *set++ = '\0';
> -}
> -
> -static void nodeset_print(char *set, int size, const nodemask_t *mask)
> -{
> -    *set++ = '[';
> -    set += nodelist_scnprintf(set, size-2, mask);
> -    *set++ = ']';
> -    *set++ = '\0';
> -}
> -
>  static void periodic_timer_print(char *str, int size, uint64_t period)
>  {
>      if ( period == 0 )
> @@ -298,14 +282,14 @@ static void dump_domains(unsigned char key)
>          process_pending_softirqs();
>  
>          printk("General information for domain %u:\n", d->domain_id);
> -        cpuset_print(tmpstr, sizeof(tmpstr), d->dirty_cpumask);
>          printk("    refcnt=%d dying=%d pause_count=%d\n",
>                 atomic_read(&d->refcnt), d->is_dying,
>                 atomic_read(&d->pause_count));
>          printk("    nr_pages=%d xenheap_pages=%d shared_pages=%u 
> paged_pages=%u "
> -               "dirty_cpus=%s max_pages=%u\n", d->tot_pages, 
> d->xenheap_pages,
> -                atomic_read(&d->shr_pages), atomic_read(&d->paged_pages),
> -                tmpstr, d->max_pages);
> +               "dirty_cpus={%*pbl} max_pages=%u\n",
> +               d->tot_pages, d->xenheap_pages, atomic_read(&d->shr_pages),
> +               atomic_read(&d->paged_pages), nr_cpu_ids, d->dirty_cpumask,
> +               d->max_pages);
>          printk("    handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
>                 "%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
>                 d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
> @@ -324,8 +308,8 @@ static void dump_domains(unsigned char key)
>  
>          dump_pageframe_info(d);
>  
> -        nodeset_print(tmpstr, sizeof(tmpstr), &d->node_affinity);
> -        printk("NODE affinity for domain %d: %s\n", d->domain_id, tmpstr);
> +        printk("NODE affinity for domain %d: [%*pbl]\n",
> +               d->domain_id, MAX_NUMNODES, &d->node_affinity);
>  
>          printk("VCPU information and callbacks for domain %u:\n",
>                 d->domain_id);
> @@ -343,10 +327,9 @@ static void dump_domains(unsigned char key)
>              if ( vcpu_cpu_dirty(v) )
>                  printk("dirty_cpu=%u", v->dirty_cpu);
>              printk("\n");
> -            cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity);
> -            printk("    cpu_hard_affinity=%s ", tmpstr);
> -            cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity);
> -            printk("cpu_soft_affinity=%s\n", tmpstr);
> +            printk("    cpu_hard_affinity={%*pbl} 
> cpu_soft_affinity={%*pbl}\n",
> +                   nr_cpu_ids, v->cpu_hard_affinity,
> +                   nr_cpu_ids, v->cpu_soft_affinity);
>              printk("    pause_count=%d pause_flags=%lx\n",
>                     atomic_read(&v->pause_count), v->pause_flags);
>              arch_dump_vcpu_info(v);
> -- 
> 2.1.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.