[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 01/14] x86/cpu: Create Hygon Dhyana architecture support file



>>> On 21.02.19 at 10:48, <puwen@xxxxxxxx> wrote:
> Add x86 architecture support for a new processor: Hygon Dhyana Family
> 18h. Carve out initialization codes from amd.c needed by Dhyana into a
> separate file hygon.c by removing unnecessary codes and make Hygon
> initialization codes more clear.
> 
> To identify Hygon Dhyana CPU, add a new vendor type X86_VENDOR_HYGON
> for system recognition.
> 
> As opt_cpuid_mask_l7s0_eax and opt_cpuid_mask_l7s0_ebx are used by both
> AMD and Hygon, so move them to common.c.

I'm not convinced of this - it'll get in the way of introducing something
like Linux'es CONFIG_CPU_SUP_*. Our command line parsing logic
handles multiple instances of an option quite fine I think, so I'd prefer
if these were kept static to both files. Or did Andrew ask you to do
this?

> --- a/xen/arch/x86/cpu/Makefile
> +++ b/xen/arch/x86/cpu/Makefile
> @@ -8,4 +8,5 @@ obj-y += intel.o
>  obj-y += intel_cacheinfo.o
>  obj-y += mwait-idle.o
>  obj-y += shanghai.o
> +obj-y += hygon.o
>  obj-y += vpmu.o vpmu_amd.o vpmu_intel.o

Please insert at the alphabetically correct place.

> --- a/xen/arch/x86/cpu/cpu.h
> +++ b/xen/arch/x86/cpu/cpu.h
> @@ -13,11 +13,13 @@ extern bool_t opt_arat;
>  extern unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
>  extern unsigned int opt_cpuid_mask_xsave_eax;
>  extern unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
> +extern unsigned int opt_cpuid_mask_l7s0_eax, opt_cpuid_mask_l7s0_ebx;
>  
>  extern int get_model_name(struct cpuinfo_x86 *c);
>  extern void display_cacheinfo(struct cpuinfo_x86 *c);
>  
>  int intel_cpu_init(void);
>  int amd_init_cpu(void);
> +int hygon_init_cpu(void);
>  int centaur_init_cpu(void);
>  int shanghai_init_cpu(void);

Whereas here you should really put the new declaration at the end.

> --- /dev/null
> +++ b/xen/arch/x86/cpu/hygon.c
> @@ -0,0 +1,248 @@
> +#include <xen/init.h>
> +#include <asm/processor.h>
> +#include <asm/hvm/support.h>
> +#include <asm/spec_ctrl.h>
> +
> +#include "cpu.h"
> +
> +/*
> + * Sets caps in expected_levelling_cap, probes for the specified mask MSR, 
> and
> + * set caps in levelling_caps if it is found.  Returns the default value.
> + */
> +static uint64_t __init _probe_mask_msr(unsigned int msr, uint64_t caps)
> +{
> +     uint64_t value;
> +
> +     expected_levelling_cap |= caps;
> +
> +     if ((rdmsr_safe(msr, value) == 0) && (wrmsr_safe(msr, value) == 0))
> +             levelling_caps |= caps;
> +
> +     return value;
> +}
> +
> +/* Probe for the existance of the expected masking MSRs. */

Please don't drop the other part of the original comment.

> +static void __init noinline probe_masking_msrs(void)
> +{
> +     const struct cpuinfo_x86 *c = &boot_cpu_data;
> +
> +     /* Work out which masking MSRs we should have. */
> +     cpuidmask_defaults._1cd =
> +             _probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd);
> +     cpuidmask_defaults.e1cd =
> +             _probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd);
> +     if (c->cpuid_level >= 7)
> +             cpuidmask_defaults._7ab0 =
> +                     _probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0);

There's more relevant code here in the original function.

> +}
> +
> +/*
> + * Context switch CPUID masking state to the next domain.  Only called if
> + * CPUID Faulting isn't available, but masking MSRs have been detected.  A
> + * parameter of NULL is used to context switch to the default host state (by
> + * the cpu bringup-code, crash path, etc).
> + */
> +static void hygon_ctxt_switch_masking(const struct vcpu *next)
> +{
> +     struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
> +     const struct domain *nextd = next ? next->domain : NULL;
> +     const struct cpuidmasks *masks =
> +             (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
> +             ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
> +
> +     if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
> +             uint64_t val = masks->_1cd;
> +
> +             /*
> +              * OSXSAVE defaults to 1, which causes fast-forwarding of
> +              * Xen's real setting.  Clobber it if disabled by the guest
> +              * kernel.
> +              */
> +             if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
> +                 !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
> +                     val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 
> 32);
> +
> +             if (unlikely(these_masks->_1cd != val)) {
> +                     wrmsrl(MSR_K8_FEATURE_MASK, val);
> +                     these_masks->_1cd = val;
> +             }
> +     }
> +
> +#define LAZY(cap, msr, field)                                                
> \
> +     ({                                                              \
> +             if (unlikely(these_masks->field != masks->field) &&     \
> +                 ((levelling_caps & cap) == cap)) {                          
>                         \
> +                     wrmsrl(msr, masks->field);                      \
> +                     these_masks->field = masks->field;              \
> +             }                                                       \
> +     })
> +
> +     LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK,   e1cd);
> +     LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0);
> +#undef LAZY
> +}
> +
> +/*
> + * Mask the features and extended features returned by CPUID.  Parameters are
> + * set from the boot line via user-defined masks.
> + */
> +static void __init noinline hygon_init_levelling(void)
> +{
> +     probe_masking_msrs();
> +
> +     if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
> +             uint32_t ecx, edx, tmp;
> +
> +             cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
> +
> +             if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
> +                     ecx &= opt_cpuid_mask_ecx;
> +                     edx &= opt_cpuid_mask_edx;
> +             }
> +
> +             /* Fast-forward bits - Must be set. */
> +             if (ecx & cpufeat_mask(X86_FEATURE_XSAVE))
> +                     ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
> +             edx |= cpufeat_mask(X86_FEATURE_APIC);
> +
> +             /* Allow the HYPERVISOR bit to be set via guest policy. */
> +             ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
> +
> +             cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx;
> +     }
> +
> +     if ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) {
> +             uint32_t ecx, edx, tmp;
> +
> +             cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
> +
> +             if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
> +                     ecx &= opt_cpuid_mask_ext_ecx;
> +                     edx &= opt_cpuid_mask_ext_edx;
> +             }
> +
> +             /* Fast-forward bits - Must be set. */
> +             edx |= cpufeat_mask(X86_FEATURE_APIC);
> +
> +             cpuidmask_defaults.e1cd = ((uint64_t)ecx << 32) | edx;
> +     }
> +
> +     if ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) {
> +             uint32_t eax, ebx, tmp;
> +
> +             cpuid(0x00000007, &eax, &ebx, &tmp, &tmp);
> +
> +             if (~(opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx)) {
> +                     eax &= opt_cpuid_mask_l7s0_eax;
> +                     ebx &= opt_cpuid_mask_l7s0_ebx;
> +             }
> +
> +             cpuidmask_defaults._7ab0 &= ((uint64_t)eax << 32) | ebx;
> +     }
> +
> +     if (opt_cpu_info) {
> +             printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
> +             printk(XENLOG_INFO
> +                    "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
> +                    "e1c 0x%08x, 7a0 0x%08x, 7b0 0x%08x\n",
> +                    (uint32_t)cpuidmask_defaults._1cd,
> +                    (uint32_t)(cpuidmask_defaults._1cd >> 32),
> +                    (uint32_t)cpuidmask_defaults.e1cd,
> +                    (uint32_t)(cpuidmask_defaults.e1cd >> 32),
> +                    (uint32_t)(cpuidmask_defaults._7ab0 >> 32),
> +                    (uint32_t)cpuidmask_defaults._7ab0);
> +     }
> +
> +     if (levelling_caps)
> +             ctxt_switch_masking = hygon_ctxt_switch_masking;
> +}

This is a lot of duplicated code with only minor differences. I think
you would be better off calling into the AMD original functions.

> +static void init_hygon(struct cpuinfo_x86 *c)
> +{
> +     u32 l, h;
> +     unsigned long long value;
> +
> +     /* Attempt to set lfence to be Dispatch Serialising. */
> +     if (rdmsr_safe(MSR_AMD64_DE_CFG, value))
> +             /* Unable to read.  Assume the safer default. */
> +             __clear_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);
> +     else if (value & AMD64_DE_CFG_LFENCE_SERIALISE)
> +             /* Already dispatch serialising. */
> +             __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);

Didn't you cut off too much from the original code (which again
may better be shared, by splitting out into a function)?

> +     /*
> +      * If the user has explicitly chosen to disable Memory Disambiguation
> +      * to mitigiate Speculative Store Bypass, poke the appropriate MSR.
> +      */
> +     if (opt_ssbd && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) {
> +             value |= 1ull << 10;
> +             wrmsr_safe(MSR_AMD64_LS_CFG, value);
> +     }
> +
> +     display_cacheinfo(c);
> +
> +     if (cpu_has(c, X86_FEATURE_ITSC)) {
> +             __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
> +             __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
> +             __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
> +     }
> +
> +     c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
> +
> +     hygon_get_topology(c);
> +
> +     /* Hygon CPUs do not support SYSENTER outside of legacy mode. */
> +     __clear_bit(X86_FEATURE_SEP, c->x86_capability);
> +
> +     /* Hygon processors have APIC timer running in deep C states. */
> +     if ( opt_arat )

Please don't copy the bad spaces inside the parentheses here.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.