[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 7/9] x86/amd: Support context switching legacy SSBD interface



It is critical that MSR_AMD64_LS_CFG is never modified outside of this
function, to avoid trampling on sibling settings.

For now, pass in NULL from the boot paths and just set Xen's default.  Later
patches will plumb in guest choices.  This now supercedes the older code which
wrote to MSR_AMD64_LS_CFG once during boot.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>
---
 xen/arch/x86/cpu/amd.c          | 89 ++++++++++++++++++++++++++++++++---------
 xen/arch/x86/smpboot.c          |  3 ++
 xen/include/asm-x86/processor.h |  2 +
 3 files changed, 75 insertions(+), 19 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index ea10dbd..3a8ead9 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -442,6 +442,74 @@ static struct ssbd_ls_cfg {
 } *ssbd_ls_cfg[4];
 static unsigned int ssbd_max_cores;
 
+/*
+ * Must only be called when the LEGACY_SSBD is in used.  Called with NULL to
+ * switch back to Xen's default value.
+ */
+void amd_ctxt_switch_legacy_ssbd(const struct vcpu *next)
+{
+       static DEFINE_PER_CPU(bool, ssbd);
+       bool *this_ssbd = &this_cpu(ssbd);
+       bool disable = opt_ssbd;
+       struct cpuinfo_x86 *c = &current_cpu_data;
+       unsigned int socket = c->phys_proc_id, core = c->cpu_core_id;
+       struct ssbd_ls_cfg *cfg;
+       uint64_t val;
+
+       ASSERT(cpu_has_legacy_ssbd);
+
+       /*
+        * Update hardware lazily, as these MSRs are expensive.  However, on
+        * the boot paths which pass NULL, force a write to set a consistent
+        * initial state.
+        */
+       if (*this_ssbd == disable && next)
+               return;
+
+       if (cpu_has_virt_sc_ssbd) {
+               wrmsrl(MSR_VIRT_SPEC_CTRL,
+                      disable ? SPEC_CTRL_SSBD : 0);
+               goto done;
+       }
+
+       val = ls_cfg_base | (disable ? ls_cfg_ssbd_mask : 0);
+
+       if (c->x86 < 0x17 || c->x86_num_siblings == 1) {
+               /* No threads to be concerned with. */
+               wrmsrl(MSR_AMD64_LS_CFG, val);
+               goto done;
+       }
+
+       /* Check that we won't overflow the worse-case allocation. */
+       BUG_ON(socket >= ARRAY_SIZE(ssbd_ls_cfg));
+       BUG_ON(core   >= ssbd_max_cores);
+
+       cfg = &ssbd_ls_cfg[socket][core];
+
+       if (disable) {
+               spin_lock(&cfg->lock);
+
+               /* First sibling to disable updates hardware. */
+               if (!cfg->disable_count)
+                       wrmsrl(MSR_AMD64_LS_CFG, val);
+               cfg->disable_count++;
+
+               spin_unlock(&cfg->lock);
+       } else {
+               spin_lock(&cfg->lock);
+
+               /* Last sibling to enable updates hardware. */
+               cfg->disable_count--;
+               if (!cfg->disable_count)
+                       wrmsrl(MSR_AMD64_LS_CFG, val);
+
+               spin_unlock(&cfg->lock);
+       }
+
+ done:
+       *this_ssbd = disable;
+}
+
 static int __init amd_init_legacy_ssbd(void)
 {
        const struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -505,6 +573,8 @@ static int __init amd_init_legacy_ssbd(void)
                        spin_lock_init(&ssbd_ls_cfg[socket][core].lock);
        }
 
+       amd_ctxt_switch_legacy_ssbd(NULL);
+
        return 0;
 }
 presmp_initcall(amd_init_legacy_ssbd);
@@ -753,25 +823,6 @@ static void init_amd(struct cpuinfo_x86 *c)
        if (c == &boot_cpu_data)
                amd_probe_legacy_ssbd();
 
-       /*
-        * If the user has explicitly chosen to disable Memory Disambiguation
-        * to mitigiate Speculative Store Bypass, poke the appropriate MSR.
-        */
-       if (opt_ssbd) {
-               int bit = -1;
-
-               switch (c->x86) {
-               case 0x15: bit = 54; break;
-               case 0x16: bit = 33; break;
-               case 0x17: bit = 10; break;
-               }
-
-               if (bit >= 0 && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) {
-                       value |= 1ull << bit;
-                       wrmsr_safe(MSR_AMD64_LS_CFG, value);
-               }
-       }
-
        /* MFENCE stops RDTSC speculation */
        if (!cpu_has_lfence_dispatch)
                __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 567cece..7d54201 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -376,6 +376,9 @@ void start_secondary(void *unused)
     if ( boot_cpu_has(X86_FEATURE_IBRSB) )
         wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
 
+    if ( cpu_has_legacy_ssbd )
+        amd_ctxt_switch_legacy_ssbd(NULL);
+
     if ( xen_guest )
         hypervisor_ap_setup();
 
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index df01ae3..e8d29a7 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -151,6 +151,8 @@ extern bool probe_cpuid_faulting(void);
 extern void ctxt_switch_levelling(const struct vcpu *next);
 extern void (*ctxt_switch_masking)(const struct vcpu *next);
 
+extern void amd_ctxt_switch_legacy_ssbd(const struct vcpu *next);
+
 extern bool_t opt_cpu_info;
 extern u32 cpuid_ext_features;
 extern u64 trampoline_misc_enable_off;
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.