[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] xen/arm: Expose the PMU to the guests


  • To: Michal Orzel <Michal.Orzel@xxxxxxx>
  • From: Bertrand Marquis <Bertrand.Marquis@xxxxxxx>
  • Date: Thu, 30 Sep 2021 09:43:04 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version; bh=+KOPRA8woA7GFpEEycwOKYeCiKXnsQNEbMEPlXO0pww=; b=RYWUzPk5k+KiUDhtXtEbW59WK2Yt0QrZlRDMBNWzO/qgJtyVlgtrDuCEWiQhl4apVUHTOB//hs0BVJalOrmeObRyNxHaCNctWB0wBxU1QpZsubwOthDA2jdaF4fXV0Lb2xx+7gZv9s3lDTOWWw9lETOl2RW9AIydjuNzVMCiPDnIxJBJgiKKDH0t0/UNl/LwmJi80GthLrnMJZzGi225iqJ4YVCDL73Xc6I+gnOfdZwR3PMz8pSaiDtLXLNXLc51agfGSp6ML8VI2MX5V1Q9Tf+ZMqlyXOrFztG5rsZzhPh8Yzu6APzRIkDYMJxRsaz/ef+YqyERmxUz+/oYtTNZWw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=X3F4eW0gzz02pPthrk3IJF7VOpsW5zzJV4+R507WrpX2q64cqVRix7jUcVJlxKfMds1Yak72M5KfxorLqV38NzgeDpDYb/iAW/FSVlk2NGqvLPARUso3pobvPEXDpKEbT9gB7Be64crOQBdT3RLx8YM070adQZKqMq5/Z4wXmwqefNa7zrH6Qb62lDnW7xazwM0AVVp0AsHfREQ6OtMdYq3i9bu4qTkDr5AEy1qz9fAWissizk7CSISNs08vvAfxe/vtFr/9aX5b3QrnKxpgFWun/hd7nqjT3vunN3Q7gg+FupS9XOKJB5Gaj7LTarnUzrtF8b49Z0rB0mr3Inb69A==
  • Authentication-results-original: arm.com; dkim=none (message not signed) header.d=none;arm.com; dmarc=none action=none header.from=arm.com;
  • Cc: Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Ian Jackson <iwj@xxxxxxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Juergen Gross <jgross@xxxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, Julien Grall <jgrall@xxxxxxxxxx>
  • Delivery-date: Thu, 30 Sep 2021 09:43:29 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Original-authentication-results: arm.com; dkim=none (message not signed) header.d=none;arm.com; dmarc=none action=none header.from=arm.com;
  • Thread-index: AQHXtd1dT9hAp9ZDCE6Sy/7vjqGAOqu8U2WA
  • Thread-topic: [PATCH] xen/arm: Expose the PMU to the guests

Hi Michal,

> On 30 Sep 2021, at 10:26, Michal Orzel <Michal.Orzel@xxxxxxx> wrote:
> 
> Add parameter vpmu to xl domain configuration syntax
> to enable the access to PMU registers by disabling
> the PMU traps.
> 
> This change does not expose the full PMU to the guest.
> Long term, we will want to at least expose the PMU
> interrupts, device-tree binding. For more use cases
> we will also need to save/restore the PMU context.
> 
> Signed-off-by: Michal Orzel <michal.orzel@xxxxxxx>
> Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>
Reviewed-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>

Cheers
Bertrand

> ---
> docs/man/xl.cfg.5.pod.in         | 12 ++++++++++++
> tools/include/libxl.h            |  7 +++++++
> tools/libs/light/libxl_arm.c     |  6 +++++-
> tools/libs/light/libxl_types.idl |  1 +
> tools/xl/xl_parse.c              |  2 ++
> xen/arch/arm/domain.c            | 10 ++++++++--
> xen/common/domain.c              |  2 +-
> xen/include/asm-arm/domain.h     |  1 +
> xen/include/public/domctl.h      |  5 ++++-
> 9 files changed, 41 insertions(+), 5 deletions(-)
> 
> diff --git a/docs/man/xl.cfg.5.pod.in b/docs/man/xl.cfg.5.pod.in
> index 4b1e3028d2..4a75203b9f 100644
> --- a/docs/man/xl.cfg.5.pod.in
> +++ b/docs/man/xl.cfg.5.pod.in
> @@ -2843,6 +2843,18 @@ Currently, only the "sbsa_uart" model is supported for 
> ARM.
> 
> =back
> 
> +=item B<vpmu=BOOLEAN>
> +
> +Specifies whether to enable the access to PMU registers by disabling
> +the PMU traps.
> +
> +This change does not expose the full PMU to the guest.
> +Currently there is no support for virtualization, interrupts, etc.
> +
> +Enabling this option may result in potential security holes.
> +
> +If this option is not specified then it will default to B<false>.
> +
> =head3 x86
> 
> =over 4
> diff --git a/tools/include/libxl.h b/tools/include/libxl.h
> index b9ba16d698..c6694e977d 100644
> --- a/tools/include/libxl.h
> +++ b/tools/include/libxl.h
> @@ -502,6 +502,13 @@
>  */
> #define LIBXL_HAVE_X86_MSR_RELAXED 1
> 
> +/*
> + * LIBXL_HAVE_ARM_VPMU indicates the toolstack has support for enabling
> + * the access to PMU registers by disabling the PMU traps. This is done
> + * by setting the libxl_domain_build_info arch_arm.vpmu field.
> + */
> +#define LIBXL_HAVE_ARM_VPMU 1
> +
> /*
>  * libxl ABI compatibility
>  *
> diff --git a/tools/libs/light/libxl_arm.c b/tools/libs/light/libxl_arm.c
> index e3140a6e00..bc614dcb05 100644
> --- a/tools/libs/light/libxl_arm.c
> +++ b/tools/libs/light/libxl_arm.c
> @@ -29,6 +29,9 @@ int libxl__arch_domain_prepare_config(libxl__gc *gc,
>     uint32_t vuart_irq;
>     bool vuart_enabled = false;
> 
> +    if (libxl_defbool_val(d_config->b_info.arch_arm.vpmu))
> +        config->flags |= XEN_DOMCTL_CDF_pmu;
> +
>     /*
>      * If pl011 vuart is enabled then increment the nr_spis to allow 
> allocation
>      * of SPI VIRQ for pl011.
> @@ -1186,8 +1189,9 @@ void 
> libxl__arch_domain_create_info_setdefault(libxl__gc *gc,
> void libxl__arch_domain_build_info_setdefault(libxl__gc *gc,
>                                               libxl_domain_build_info *b_info)
> {
> -    /* ACPI is disabled by default */
> +    /* ACPI and vPMU is disabled by default */
>     libxl_defbool_setdefault(&b_info->acpi, false);
> +    libxl_defbool_setdefault(&b_info->arch_arm.vpmu, false);
> 
>     if (b_info->type != LIBXL_DOMAIN_TYPE_PV)
>         return;
> diff --git a/tools/libs/light/libxl_types.idl 
> b/tools/libs/light/libxl_types.idl
> index 3f9fff653a..3524629909 100644
> --- a/tools/libs/light/libxl_types.idl
> +++ b/tools/libs/light/libxl_types.idl
> @@ -644,6 +644,7 @@ libxl_domain_build_info = Struct("domain_build_info",[
> 
>     ("arch_arm", Struct(None, [("gic_version", libxl_gic_version),
>                                ("vuart", libxl_vuart_type),
> +                               ("vpmu", libxl_defbool),
>                               ])),
>     ("arch_x86", Struct(None, [("msr_relaxed", libxl_defbool),
>                               ])),
> diff --git a/tools/xl/xl_parse.c b/tools/xl/xl_parse.c
> index 17dddb4cd5..e15c2e64f5 100644
> --- a/tools/xl/xl_parse.c
> +++ b/tools/xl/xl_parse.c
> @@ -2750,6 +2750,8 @@ skip_usbdev:
>                     "If it fixes an issue you are having please report to "
>                     "xen-devel@xxxxxxxxxxxxxxxxxxxx.\n");
> 
> +    xlu_cfg_get_defbool(config, "vpmu", &b_info->arch_arm.vpmu, 0);
> +
>     xlu_cfg_destroy(config);
> }
> 
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 19c756ac3d..02120ff640 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -276,6 +276,8 @@ static void ctxt_switch_to(struct vcpu *n)
>      * timer. The interrupt needs to be injected into the guest. */
>     WRITE_SYSREG(n->arch.cntkctl, CNTKCTL_EL1);
>     virt_timer_restore(n);
> +
> +    WRITE_SYSREG(n->arch.mdcr_el2, MDCR_EL2);
> }
> 
> /* Update per-VCPU guest runstate shared memory area (if registered). */
> @@ -586,6 +588,10 @@ int arch_vcpu_create(struct vcpu *v)
> 
>     v->arch.hcr_el2 = get_default_hcr_flags();
> 
> +    v->arch.mdcr_el2 = HDCR_TDRA | HDCR_TDOSA | HDCR_TDA;
> +    if ( !(v->domain->options & XEN_DOMCTL_CDF_pmu) )
> +        v->arch.mdcr_el2 |= HDCR_TPM | HDCR_TPMCR;
> +
>     if ( (rc = vcpu_vgic_init(v)) != 0 )
>         goto fail;
> 
> @@ -622,8 +628,8 @@ int arch_sanitise_domain_config(struct 
> xen_domctl_createdomain *config)
> {
>     unsigned int max_vcpus;
> 
> -    /* HVM and HAP must be set. IOMMU may or may not be */
> -    if ( (config->flags & ~XEN_DOMCTL_CDF_iommu) !=
> +    /* HVM and HAP must be set. IOMMU and PMU may or may not be */
> +    if ( (config->flags & ~(XEN_DOMCTL_CDF_iommu | XEN_DOMCTL_CDF_pmu)) !=
>          (XEN_DOMCTL_CDF_hvm | XEN_DOMCTL_CDF_hap) )
>     {
>         dprintk(XENLOG_INFO, "Unsupported configuration %#x\n",
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index 6ee5d033b0..63c4db8b9f 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -483,7 +483,7 @@ static int sanitise_domain_config(struct 
> xen_domctl_createdomain *config)
>          ~(XEN_DOMCTL_CDF_hvm | XEN_DOMCTL_CDF_hap |
>            XEN_DOMCTL_CDF_s3_integrity | XEN_DOMCTL_CDF_oos_off |
>            XEN_DOMCTL_CDF_xs_domain | XEN_DOMCTL_CDF_iommu |
> -           XEN_DOMCTL_CDF_nested_virt) )
> +           XEN_DOMCTL_CDF_nested_virt | XEN_DOMCTL_CDF_pmu) )
>     {
>         dprintk(XENLOG_INFO, "Unknown CDF flags %#x\n", config->flags);
>         return -EINVAL;
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index c9277b5c6d..14e575288f 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -166,6 +166,7 @@ struct arch_vcpu
> 
>     /* HYP configuration */
>     register_t hcr_el2;
> +    register_t mdcr_el2;
> 
>     uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */
> #ifdef CONFIG_ARM_32
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index 96696e3842..a55ceb81db 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -70,9 +70,12 @@ struct xen_domctl_createdomain {
> #define XEN_DOMCTL_CDF_iommu          (1U<<_XEN_DOMCTL_CDF_iommu)
> #define _XEN_DOMCTL_CDF_nested_virt   6
> #define XEN_DOMCTL_CDF_nested_virt    (1U << _XEN_DOMCTL_CDF_nested_virt)
> +/* Should we expose the vPMU to the guest? */
> +#define _XEN_DOMCTL_CDF_pmu           7
> +#define XEN_DOMCTL_CDF_pmu            (1U << _XEN_DOMCTL_CDF_pmu)
> 
> /* Max XEN_DOMCTL_CDF_* constant.  Used for ABI checking. */
> -#define XEN_DOMCTL_CDF_MAX XEN_DOMCTL_CDF_nested_virt
> +#define XEN_DOMCTL_CDF_MAX XEN_DOMCTL_CDF_pmu
> 
>     uint32_t flags;
> 
> -- 
> 2.29.0
> 




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.