[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 04/13] xen/arm: Add ARCH_WORKAROUND_2 probing



On Tue, 22 May 2018, Julien Grall wrote:
> As for Spectre variant-2, we rely on SMCCC 1.1 to provide the discovery
> mechanism for detecting the SSBD mitigation.
> 
> A new capability is also allocated for that purpose, and a config
> option.
> 
> This is part of XSA-263.
> 
> Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
> ---
>  xen/arch/arm/Kconfig             | 10 ++++++++++
>  xen/arch/arm/cpuerrata.c         | 39 +++++++++++++++++++++++++++++++++++++++
>  xen/include/asm-arm/cpuerrata.h  | 21 +++++++++++++++++++++
>  xen/include/asm-arm/cpufeature.h |  3 ++-
>  xen/include/asm-arm/smccc.h      |  6 ++++++
>  5 files changed, 78 insertions(+), 1 deletion(-)
> 
> diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig
> index 8174c0c635..0e2d027060 100644
> --- a/xen/arch/arm/Kconfig
> +++ b/xen/arch/arm/Kconfig
> @@ -73,6 +73,16 @@ config SBSA_VUART_CONSOLE
>         Allows a guest to use SBSA Generic UART as a console. The
>         SBSA Generic UART implements a subset of ARM PL011 UART.
>  
> +config ARM_SSBD
> +     bool "Speculative Store Bypass Disable" if EXPERT = "y"
> +     depends on HAS_ALTERNATIVE
> +     default y
> +     help
> +       This enables mitigation of bypassing of previous stores by speculative
> +       loads.

I would add a reference to spectre v4. What do you think of:

  This enables the mitigation of Spectre v4 attacks based on bypassing
  of previous memory stores by speculative loads.


> +       If unsure, say Y.
> +
>  endmenu
>  
>  menu "ARM errata workaround via the alternative framework"
> diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c
> index 1baa20654b..bcea2eb6e5 100644
> --- a/xen/arch/arm/cpuerrata.c
> +++ b/xen/arch/arm/cpuerrata.c
> @@ -235,6 +235,39 @@ static int enable_ic_inv_hardening(void *data)
>  
>  #endif
>  
> +#ifdef CONFIG_ARM_SSBD
> +
> +/*
> + * Assembly code may use the variable directly, so we need to make sure
> + * it fits in a register.
> + */
> +DEFINE_PER_CPU_READ_MOSTLY(register_t, ssbd_callback_required);
> +
> +static bool has_ssbd_mitigation(const struct arm_cpu_capabilities *entry)
> +{
> +    struct arm_smccc_res res;
> +    bool supported = true;
> +
> +    if ( smccc_ver < SMCCC_VERSION(1, 1) )
> +        return false;
> +
> +    /*
> +     * The probe function return value is either negative (unsupported
> +     * or mitigated), positive (unaffected), or zero (requires
> +     * mitigation). We only need to do anything in the last case.
> +     */
> +    arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID,
> +                      ARM_SMCCC_ARCH_WORKAROUND_2_FID, &res);
> +    if ( (int)res.a0 != 0 )
> +        supported = false;
> +
> +    if ( supported )
> +        this_cpu(ssbd_callback_required) = 1;
> +
> +    return supported;
> +}
> +#endif
> +
>  #define MIDR_RANGE(model, min, max)     \
>      .matches = is_affected_midr_range,  \
>      .midr_model = model,                \
> @@ -336,6 +369,12 @@ static const struct arm_cpu_capabilities arm_errata[] = {
>          .enable = enable_ic_inv_hardening,
>      },
>  #endif
> +#ifdef CONFIG_ARM_SSBD
> +    {
> +        .capability = ARM_SSBD,
> +        .matches = has_ssbd_mitigation,
> +    },
> +#endif
>      {},
>  };
>  
> diff --git a/xen/include/asm-arm/cpuerrata.h b/xen/include/asm-arm/cpuerrata.h
> index 4e45b237c8..e628d3ff56 100644
> --- a/xen/include/asm-arm/cpuerrata.h
> +++ b/xen/include/asm-arm/cpuerrata.h
> @@ -27,9 +27,30 @@ static inline bool check_workaround_##erratum(void)        
>      \
>  
>  CHECK_WORKAROUND_HELPER(766422, ARM32_WORKAROUND_766422, CONFIG_ARM_32)
>  CHECK_WORKAROUND_HELPER(834220, ARM64_WORKAROUND_834220, CONFIG_ARM_64)
> +CHECK_WORKAROUND_HELPER(ssbd, ARM_SSBD, CONFIG_ARM_SSBD)
>  
>  #undef CHECK_WORKAROUND_HELPER
>  
> +#ifdef CONFIG_ARM_SSBD
> +
> +#include <asm/current.h>
> +
> +DECLARE_PER_CPU(register_t, ssbd_callback_required);

It is becoming more common to have per-cpu capabilities and workarounds
(or at least per MPIDR). Instead of adding this add-hoc variable, should
we make cpu_hwcaps per-cpu, then implement this check with
cpus_have_cap (that would become per-cpu as well)?

It looks like the code would be simpler.


> +static inline bool cpu_require_ssbd_mitigation(void)
> +{
> +    return this_cpu(ssbd_callback_required);
> +}
> +
> +#else
> +
> +static inline bool cpu_require_ssbd_mitigation(void)
> +{
> +    return false;
> +}
>
> +#endif
> +
>  #endif /* __ARM_CPUERRATA_H__ */
>  /*
>   * Local variables:
> diff --git a/xen/include/asm-arm/cpufeature.h 
> b/xen/include/asm-arm/cpufeature.h
> index e557a095af..2a5c075d3b 100644
> --- a/xen/include/asm-arm/cpufeature.h
> +++ b/xen/include/asm-arm/cpufeature.h
> @@ -43,8 +43,9 @@
>  #define SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT 5
>  #define SKIP_CTXT_SWITCH_SERROR_SYNC 6
>  #define ARM_HARDEN_BRANCH_PREDICTOR 7
> +#define ARM_SSBD 8
>  
> -#define ARM_NCAPS           8
> +#define ARM_NCAPS           9
>  
>  #ifndef __ASSEMBLY__
>  
> diff --git a/xen/include/asm-arm/smccc.h b/xen/include/asm-arm/smccc.h
> index 8342cc33fe..650744d28b 100644
> --- a/xen/include/asm-arm/smccc.h
> +++ b/xen/include/asm-arm/smccc.h
> @@ -258,6 +258,12 @@ struct arm_smccc_res {
>                        ARM_SMCCC_OWNER_ARCH,         \
>                        0x8000)
>  
> +#define ARM_SMCCC_ARCH_WORKAROUND_2_FID             \
> +    ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,         \
> +                       ARM_SMCCC_CONV_32,           \
> +                       ARM_SMCCC_OWNER_ARCH,        \
> +                       0x7FFF)
> +
>  /* SMCCC error codes */
>  #define ARM_SMCCC_ERR_UNKNOWN_FUNCTION  (-1)
>  #define ARM_SMCCC_NOT_SUPPORTED         (-1)
> -- 
> 2.11.0
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.