[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 10/16] x86/amd: Always probe and configure the masking MSRs



This allows the infrastructure to reused for system-wide quirk/errata
adjustments.

Replace the call to ctxt_switch_levelling() with amd_ctxt_switch_masking()
instead.  The CPUID Faulting aspect is not interesting at this point in boot,
and we want to explicitly propagate the masking MSR defaults into APs.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Julian Vetter <julian.vetter@xxxxxxxxxx>
CC: Teddy Astie <teddy.astie@xxxxxxxxxx>
---
 xen/arch/x86/cpu/amd.c   | 15 +++++++++++----
 xen/arch/x86/cpu/cpu.h   |  1 +
 xen/arch/x86/cpu/hygon.c |  2 +-
 3 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 36fea2e0a299..e8daf7415bb0 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -162,7 +162,7 @@ static void __init noinline probe_masking_msrs(void)
  * parameter of NULL is used to context switch to the default host state (by
  * the cpu bringup-code, crash path, etc).
  */
-static void cf_check amd_ctxt_switch_masking(const struct vcpu *next)
+void cf_check amd_ctxt_switch_masking(const struct vcpu *next)
 {
        struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
        const struct domain *nextd = next ? next->domain : NULL;
@@ -242,9 +242,12 @@ static void __init amd_init_levelling(void)
            boot_cpu_has(X86_FEATURE_CPUID_USER_DIS)) {
                expected_levelling_cap |= LCAP_faulting;
                levelling_caps |= LCAP_faulting;
-               return;
        }
 
+       /*
+        * Always probe for the MSRs too.  We reuse the infrastruture for
+        * quirks/errata/etc during boot.
+        */
        probe_masking_msrs();
 
        if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
@@ -299,7 +302,7 @@ static void __init amd_init_levelling(void)
                       (uint32_t)cpuidmask_defaults._6c);
        }
 
-       if (levelling_caps)
+       if (levelling_caps && !(levelling_caps & LCAP_faulting))
                ctxt_switch_masking = amd_ctxt_switch_masking;
 }
 
@@ -1015,7 +1018,11 @@ static void cf_check init_amd(struct cpuinfo_x86 *c)
        u32 l, h;
        uint64_t value;
 
-       ctxt_switch_levelling(NULL);
+       /*
+        * Reuse amd_ctxt_switch_masking() explicitly.  This propagates
+        * quirk/errata adjustments made duing early_init_amd() into the APs.
+        */
+       amd_ctxt_switch_masking(NULL);
 
        amd_init_de_cfg(c);
 
diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
index d2d37d1d5eec..cd93e51755af 100644
--- a/xen/arch/x86/cpu/cpu.h
+++ b/xen/arch/x86/cpu/cpu.h
@@ -20,6 +20,7 @@ extern void detect_ht(struct cpuinfo_x86 *c);
 extern bool detect_extended_topology(struct cpuinfo_x86 *c);
 
 void cf_check early_init_amd(void);
+void cf_check amd_ctxt_switch_masking(const struct vcpu *next);
 void amd_log_freq(const struct cpuinfo_x86 *c);
 void amd_init_de_cfg(const struct cpuinfo_x86 *c);
 void amd_init_lfence_dispatch(void);
diff --git a/xen/arch/x86/cpu/hygon.c b/xen/arch/x86/cpu/hygon.c
index bb1624882499..3a04efef5028 100644
--- a/xen/arch/x86/cpu/hygon.c
+++ b/xen/arch/x86/cpu/hygon.c
@@ -32,7 +32,7 @@ static void cf_check init_hygon(struct cpuinfo_x86 *c)
 {
        unsigned long long value;
 
-       ctxt_switch_levelling(NULL);
+       amd_ctxt_switch_masking(NULL);
 
        amd_init_de_cfg(c);
 
-- 
2.39.5




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.