|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 1/2] libxc/xentrace: Replace xc_tbuf_set_cpu_mask with CPU mask with xc_cpumap_t instead of uint32_t
On Fri, 2014-06-20 at 15:33 -0400, Konrad Rzeszutek Wilk wrote:
> We replace the implementation of xc_tbuf_set_cpu_mask with
> an xc_cpumap_t instead of a uint32_t. This means we can use an
> arbitrary bitmap without being limited to the 32-bits as
> previously we were. Furthermore since there is only one
> user of xc_tbuf_set_cpu_mask we just replace it and
> its user in one go.
>
> We also add an macro which can be used by both libxc and
> xentrace.
>
> And update the man page to describe this behavior.
>
> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
For the libxc bits: Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
> ---
> tools/libxc/xc_tbuf.c | 26 +++++++-----
> tools/libxc/xenctrl.h | 7 +++-
> tools/xentrace/xentrace.8 | 3 +
> tools/xentrace/xentrace.c | 97
> ++++++++++++++++++++++++++++++++++++++-------
> 4 files changed, 107 insertions(+), 26 deletions(-)
>
> diff --git a/tools/libxc/xc_tbuf.c b/tools/libxc/xc_tbuf.c
> index 8777492..d54da8a 100644
> --- a/tools/libxc/xc_tbuf.c
> +++ b/tools/libxc/xc_tbuf.c
> @@ -113,15 +113,23 @@ int xc_tbuf_disable(xc_interface *xch)
> return tbuf_enable(xch, 0);
> }
>
> -int xc_tbuf_set_cpu_mask(xc_interface *xch, uint32_t mask)
> +int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask, int bits)
> {
> DECLARE_SYSCTL;
> - DECLARE_HYPERCALL_BUFFER(uint8_t, bytemap);
> + DECLARE_HYPERCALL_BOUNCE(mask, XC_DIV_ROUND_UP(bits, 8),
> XC_HYPERCALL_BUFFER_BOUNCE_IN);
> int ret = -1;
> - uint64_t mask64 = mask;
> + int local_bits;
>
> - bytemap = xc_hypercall_buffer_alloc(xch, bytemap, sizeof(mask64));
> - if ( bytemap == NULL )
> + if ( bits <= 0 )
> + goto out;
> +
> + local_bits = xc_get_max_cpus(xch);
> + if ( bits > local_bits )
> + {
> + PERROR("Wrong amount of bits supplied: %d > %d!\n", bits,
> local_bits);
> + goto out;
> + }
> + if ( xc_hypercall_bounce_pre(xch, mask) )
> {
> PERROR("Could not allocate memory for xc_tbuf_set_cpu_mask
> hypercall");
> goto out;
> @@ -131,14 +139,12 @@ int xc_tbuf_set_cpu_mask(xc_interface *xch, uint32_t
> mask)
> sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
> sysctl.u.tbuf_op.cmd = XEN_SYSCTL_TBUFOP_set_cpu_mask;
>
> - bitmap_64_to_byte(bytemap, &mask64, sizeof (mask64) * 8);
> -
> - set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap);
> - sysctl.u.tbuf_op.cpu_mask.nr_bits = sizeof(bytemap) * 8;
> + set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, mask);
> + sysctl.u.tbuf_op.cpu_mask.nr_bits = bits;
>
> ret = do_sysctl(xch, &sysctl);
>
> - xc_hypercall_buffer_free(xch, bytemap);
> + xc_hypercall_bounce_post(xch, mask);
>
> out:
> return ret;
> diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
> index b55d857..b378312 100644
> --- a/tools/libxc/xenctrl.h
> +++ b/tools/libxc/xenctrl.h
> @@ -1449,6 +1449,11 @@ int xc_availheap(xc_interface *xch, int min_width, int
> max_width, int node,
> */
>
> /**
> + * Useful macro for converting byte arrays to bitmaps.
> + */
> +#define XC_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
> +
> +/**
> * xc_tbuf_enable - enable tracing buffers
> *
> * @parm xch a handle to an open hypervisor interface
> @@ -1489,7 +1494,7 @@ int xc_tbuf_set_size(xc_interface *xch, unsigned long
> size);
> */
> int xc_tbuf_get_size(xc_interface *xch, unsigned long *size);
>
> -int xc_tbuf_set_cpu_mask(xc_interface *xch, uint32_t mask);
> +int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask, int bits);
>
> int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask);
>
> diff --git a/tools/xentrace/xentrace.8 b/tools/xentrace/xentrace.8
> index ac18e9f..c176a96 100644
> --- a/tools/xentrace/xentrace.8
> +++ b/tools/xentrace/xentrace.8
> @@ -38,6 +38,9 @@ for new data.
> .TP
> .B -c, --cpu-mask=c
> set bitmask of CPUs to trace. It is limited to 32-bits.
> +If not specified, the cpu-mask of all of the available CPUs will be
> +constructed.
> +
> .TP
> .B -e, --evt-mask=e
> set event capture mask. If not specified the TRC_ALL will be used.
> diff --git a/tools/xentrace/xentrace.c b/tools/xentrace/xentrace.c
> index 8a38e32..ee1d021 100644
> --- a/tools/xentrace/xentrace.c
> +++ b/tools/xentrace/xentrace.c
> @@ -52,7 +52,7 @@ typedef struct settings_st {
> char *outfile;
> unsigned long poll_sleep; /* milliseconds to sleep between polls */
> uint32_t evt_mask;
> - uint32_t cpu_mask;
> + xc_cpumap_t cpu_mask;
> unsigned long tbuf_size;
> unsigned long disk_rsvd;
> unsigned long timeout;
> @@ -521,23 +521,66 @@ static struct t_struct *map_tbufs(unsigned long
> tbufs_mfn, unsigned int num,
> return &tbufs;
> }
>
> +void print_cpu_mask(xc_cpumap_t mask, int bits)
> +{
> + unsigned int v, had_printed = 0;
> + int i;
> +
> + fprintf(stderr, "change cpumask to 0x");
> +
> + for ( i = XC_DIV_ROUND_UP(bits, 8); i >= 0; i-- )
> + {
> + v = mask[i];
> + if ( v || had_printed || !i ) {
> + fprintf(stderr,"%x", v);
> + had_printed = 1;
> + }
> + }
> + fprintf(stderr, "\n");
> +}
> +
> +static void set_cpu_mask(xc_cpumap_t mask)
> +{
> + int bits, i, ret = 0;
> +
> + bits = xc_get_max_cpus(xc_handle);
> + if ( bits <= 0 )
> + goto out;
> +
> + if ( !mask )
> + {
> + mask = xc_cpumap_alloc(xc_handle);
> + if ( !mask )
> + goto out;
> +
> + /* Set it to include _all_ CPUs. */
> + for ( i = 0; i < XC_DIV_ROUND_UP(bits, 8); i++ )
> + mask[i] = 0xff;
> + }
> + /* And this will limit it to the exact amount of bits. */
> + ret = xc_tbuf_set_cpu_mask(xc_handle, mask, bits);
> + if ( ret != 0 )
> + goto out;
> +
> + print_cpu_mask(mask, bits);
> + return;
> +out:
> + PERROR("Failure to get trace buffer pointer from Xen and set the new
> mask");
> + exit(EXIT_FAILURE);
> +}
> +
> /**
> - * set_mask - set the cpu/event mask in HV
> + * set_mask - set the event mask in HV
> * @mask: the new mask
> * @type: the new mask type,0-event mask, 1-cpu mask
> *
> */
> -static void set_mask(uint32_t mask, int type)
> +static void set_evt_mask(uint32_t mask)
> {
> int ret = 0;
>
> - if (type == 1) {
> - ret = xc_tbuf_set_cpu_mask(xc_handle, mask);
> - fprintf(stderr, "change cpumask to 0x%x\n", mask);
> - } else if (type == 0) {
> - ret = xc_tbuf_set_evt_mask(xc_handle, mask);
> - fprintf(stderr, "change evtmask to 0x%x\n", mask);
> - }
> + ret = xc_tbuf_set_evt_mask(xc_handle, mask);
> + fprintf(stderr, "change evtmask to 0x%x\n", mask);
>
> if ( ret != 0 )
> {
> @@ -906,6 +949,23 @@ static int parse_evtmask(char *arg)
> return 0;
> }
>
> +static int parse_cpumask(const char *arg)
> +{
> + xc_cpumap_t map;
> + uint32_t v, i;
> +
> + map = malloc(sizeof(uint32_t));
> + if ( !map )
> + return -ENOMEM;
> +
> + v = argtol(arg, 0);
> + for ( i = 0; i < sizeof(uint32_t); i++ )
> + map[i] = (v >> (i * 8)) & 0xff;
> +
> + opts.cpu_mask = map;
> + return 0;
> +}
> +
> /* parse command line arguments */
> static void parse_args(int argc, char **argv)
> {
> @@ -937,7 +997,12 @@ static void parse_args(int argc, char **argv)
> break;
>
> case 'c': /* set new cpu mask for filtering*/
> - opts.cpu_mask = argtol(optarg, 0);
> + /* Set opts.cpu_mask later as we don't have 'xch' set yet. */
> + if ( parse_cpumask(optarg) )
> + {
> + perror("Not enough memory!");
> + exit(EXIT_FAILURE);
> + }
> break;
>
> case 'e': /* set new event mask for filtering*/
> @@ -1002,7 +1067,7 @@ int main(int argc, char **argv)
> opts.outfile = 0;
> opts.poll_sleep = POLL_SLEEP_MILLIS;
> opts.evt_mask = 0;
> - opts.cpu_mask = 0;
> + opts.cpu_mask = NULL;
> opts.disk_rsvd = 0;
> opts.disable_tracing = 1;
> opts.start_disabled = 0;
> @@ -1018,10 +1083,12 @@ int main(int argc, char **argv)
> }
>
> if ( opts.evt_mask != 0 )
> - set_mask(opts.evt_mask, 0);
> + set_evt_mask(opts.evt_mask);
> +
>
> - if ( opts.cpu_mask != 0 )
> - set_mask(opts.cpu_mask, 1);
> + set_cpu_mask(opts.cpu_mask);
> + /* We don't use it pass this point. */
> + free(opts.cpu_mask);
>
> if ( opts.timeout != 0 )
> alarm(opts.timeout);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |