[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/3] x86/spec-ctrl: Add support for modifying SSBD AMD VIA LS_CFG MSR



Adds support for modifying the LS_CFG MSR to enable SSBD on supporting
AMD CPUs.   There needs to be locking logic for family 17h with SMT
enabled since both threads share the same MSR.  Otherwise, a core just
needs to write to the LS_CFG MSR.  For more information see:
https://developer.amd.com/wp-content/resources/124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf

Signed-off-by: Brian Woods <brian.woods@xxxxxxx>
---
 xen/arch/x86/cpu/amd.c   |  10 +--
 xen/arch/x86/setup.c     |   2 +
 xen/arch/x86/smpboot.c   |   3 +
 xen/arch/x86/spec_ctrl.c | 201 ++++++++++++++++++++++++++++++++++++++++++++++-
 4 files changed, 207 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 06c9e9661b..e7ec0d99a7 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -607,16 +607,10 @@ static void init_amd(struct cpuinfo_x86 *c)
                case 0x17: bit = 10; break;
                }
 
-               if (bit >= 0)
-                       ssbd_amd_ls_cfg_mask = 1ull << bit;
-       }
 
-       if (ssbd_amd_ls_cfg_mask && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) {
-               if (!boot_cpu_has(X86_FEATURE_SSBD_AMD_LS_CFG))
+               if (bit >= 0 && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) {
+                       ssbd_amd_ls_cfg_mask = 1ull << bit;
                        setup_force_cpu_cap(X86_FEATURE_SSBD_AMD_LS_CFG);
-               if (opt_ssbd) {
-                       value |= ssbd_amd_ls_cfg_mask;
-                       wrmsr_safe(MSR_AMD64_LS_CFG, value);
                }
        }
 
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 419b46c033..b551852cbd 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1579,6 +1579,8 @@ void __init noreturn __start_xen(unsigned long mbi_p)
 
     arch_init_memory();
 
+    ssbd_amd_ls_cfg_init();
+
     alternative_instructions();
 
     local_irq_enable();
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index d4478e6132..07760c920d 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -366,6 +366,9 @@ void start_secondary(void *unused)
     if ( boot_cpu_has(X86_FEATURE_IBRSB) )
         wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
 
+    if ( xen_ssbd_amd_ls_cfg_en )
+        ssbd_amd_ls_cfg_set(true);
+
     if ( xen_guest )
         hypervisor_ap_setup();
 
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index baef907322..006e8fb14b 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -248,7 +248,7 @@ static void __init print_details(enum ind_thunk thunk, 
uint64_t caps)
            !boot_cpu_has(X86_FEATURE_SSBD)           ? "" :
            (default_xen_spec_ctrl & SPEC_CTRL_SSBD)  ? " SSBD+" : " SSBD-",
            !boot_cpu_has(X86_FEATURE_SSBD_AMD_LS_CFG)? "" :
-           (opt_ssbd && ssbd_amd_ls_cfg_mask)        ? " SSBD+" : " SSBD-",
+           xen_ssbd_amd_ls_cfg_en                    ? " SSBD+" : " SSBD-",
            opt_ibpb                                  ? " IBPB"  : "");
 
     /*
@@ -497,6 +497,201 @@ static __init int parse_xpti(const char *s)
 }
 custom_param("xpti", parse_xpti);
 
+/*
+ * For family 15h and 16h, there are no SMT enabled processors, so there
+ * is no need for locking, just need to set an MSR bit.   For 17h, it
+ * depends if SMT is enabled.  If SMT, are two threads share a single
+ * MSR so there needs to be a lock and a virtual bit for each thread.
+ */
+
+/* used for non SMT mitigations (no shared MSRs) */
+static void ssbd_amd_ls_cfg_set_nonsmt(bool enable_ssbd)
+{
+    unsigned long ls_cfg;
+
+    if ( enable_ssbd )
+    {
+        rdmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+        if ( !(ls_cfg & ssbd_amd_ls_cfg_mask) )
+        {
+            ls_cfg |= ssbd_amd_ls_cfg_mask;
+            wrmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+        }
+    }
+    else
+    {
+        rdmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+        if (ls_cfg & ssbd_amd_ls_cfg_mask)
+        {
+            ls_cfg &= ~ssbd_amd_ls_cfg_mask;
+            wrmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+        }
+    }
+}
+
+/* used for family 17h with SMT enabled (shared MSRs) */
+static void ssbd_amd_ls_cfg_set_smt(bool enable_ssbd)
+{
+    __u32 socket, core, thread;
+    uint64_t enable_mask;
+    unsigned long ls_cfg;
+    struct ssbd_amd_ls_cfg_smt_status *status;
+    struct cpuinfo_x86  *c =  &current_cpu_data;
+
+    socket = c->phys_proc_id;
+    core   = c->cpu_core_id;
+    thread = c->apicid && (c->x86_num_siblings - 1);
+
+    status = ssbd_amd_smt_status[socket] + core;
+    enable_mask = (1ull << thread);
+    spin_lock(&status->lock);
+
+    if ( enable_ssbd )
+    {
+        if ( !(status->mask & enable_mask) )
+        {
+            status->mask |= enable_mask;
+            rdmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+            if ( !(ls_cfg & ssbd_amd_ls_cfg_mask) )
+            {
+                ls_cfg |= ssbd_amd_ls_cfg_mask;
+                wrmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+            }
+        }
+    }
+    else
+    {
+        if ( (status->mask & enable_mask) )
+       {
+            status->mask &= ~enable_mask;
+            rdmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+            if ( (ls_cfg & ssbd_amd_ls_cfg_mask) && (status->mask == 0) )
+            {
+                ls_cfg &= ~ssbd_amd_ls_cfg_mask;
+                wrmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+            }
+        }
+    }
+
+    spin_unlock(&status->lock);
+}
+
+void ssbd_amd_ls_cfg_set(bool enable_ssbd)
+{
+    if ( !ssbd_amd_ls_cfg_mask ||
+         !boot_cpu_has(X86_FEATURE_SSBD_AMD_LS_CFG) ) {
+        dprintk(XENLOG_ERR, "SSBD AMD LS CFG: Invalid mask\n");
+        return;
+    }
+
+    if ( ssbd_amd_smt_en )
+        ssbd_amd_ls_cfg_set_smt(enable_ssbd);
+    else
+        ssbd_amd_ls_cfg_set_nonsmt(enable_ssbd);
+}
+
+/*
+ * used to init the boot cpu, we don't need to lock anything because
+ * it's just the boot CPU
+ */
+static __init void ssbd_amd_ls_cfg_set_init(void)
+{
+    __u32 socket, core, thread;
+    unsigned long long ls_cfg;
+    struct ssbd_amd_ls_cfg_smt_status *status;
+    struct cpuinfo_x86  *c =  &boot_cpu_data;
+
+    if ( ssbd_amd_smt_en )
+    {
+        socket = c->phys_proc_id;
+        core   = c->cpu_core_id;
+        thread = c->apicid && (c->x86_num_siblings - 1);
+
+        status = ssbd_amd_smt_status[socket] + core;
+        status->mask |= (1ull << thread);
+    }
+
+    rdmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+    if ( !(ls_cfg & ssbd_amd_ls_cfg_mask) )
+    {
+        ls_cfg |= ssbd_amd_ls_cfg_mask;
+        wrmsrl(MSR_AMD64_LS_CFG, ls_cfg);
+    }
+}
+
+__init void ssbd_amd_ls_cfg_init(void)
+{
+    int cores_per_socket, threads_per_core;
+    struct cpuinfo_x86  *c =  &boot_cpu_data;
+    int core,socket;
+
+    if ( !ssbd_amd_ls_cfg_mask ||
+         !boot_cpu_has(X86_FEATURE_SSBD_AMD_LS_CFG) )
+        goto ssbd_amd_ls_cfg_init_fail;
+
+    switch ( c->x86 )
+    {
+    case 0x15:
+    case 0x16:
+        break;
+    case 0x17:
+        cores_per_socket = c->x86_max_cores;
+        threads_per_core = c->x86_num_siblings;
+
+        if ( (cores_per_socket < 1) || (threads_per_core < 1) )
+        {
+            dprintk(XENLOG_ERR,
+                    "SSBD AMD LS CFG: error in topology decoding\n");
+            goto ssbd_amd_ls_cfg_init_fail;
+        }
+
+        if ( threads_per_core > 1 )
+        {
+            ssbd_amd_smt_en = true;
+            for ( socket = 0; socket < SSBD_AMD_MAX_SOCKET; socket++ )
+            {
+                ssbd_amd_smt_status[socket] =
+                  (struct ssbd_amd_ls_cfg_smt_status*)
+                  xmalloc_array (struct ssbd_amd_ls_cfg_smt_status,
+                                 cores_per_socket);
+                if ( ssbd_amd_smt_status[socket] == NULL )
+                {
+                    dprintk(XENLOG_ERR,
+                            "SSBD AMD LS CFG: error in status allocing\n");
+                    goto ssbd_amd_ls_cfg_init_fail;
+                }
+            }
+
+            for ( socket = 0; socket < SSBD_AMD_MAX_SOCKET; socket++ )
+            {
+                for ( core = 0; core < cores_per_socket; core++ )
+                {
+                    spin_lock_init(&(ssbd_amd_smt_status[socket] + 
core)->lock);
+                    (ssbd_amd_smt_status[socket] + core)->mask = 0;
+                }
+            }
+        }
+        break;
+    default:
+        goto ssbd_amd_ls_cfg_init_fail;
+    }
+
+    if ( xen_ssbd_amd_ls_cfg_en )
+        ssbd_amd_ls_cfg_set_init();
+
+    return;
+
+ssbd_amd_ls_cfg_init_fail:
+    for ( socket = 0; socket < SSBD_AMD_MAX_SOCKET; socket++ )
+        if ( ssbd_amd_smt_status[socket] != NULL )
+           xfree(ssbd_amd_smt_status[socket]);
+
+    setup_clear_cpu_cap(X86_FEATURE_SSBD_AMD_LS_CFG);
+    xen_ssbd_amd_ls_cfg_en = false;
+
+    return;
+}
+
 void __init init_speculation_mitigations(void)
 {
     enum ind_thunk thunk = THUNK_DEFAULT;
@@ -599,6 +794,10 @@ void __init init_speculation_mitigations(void)
     if ( boot_cpu_has(X86_FEATURE_SSBD) && opt_ssbd )
         default_xen_spec_ctrl |= SPEC_CTRL_SSBD;
 
+    /* if we have SSBD LS_CFG available, see whether we should use it. */
+    if ( boot_cpu_has(X86_FEATURE_SSBD_AMD_LS_CFG) && opt_ssbd )
+         xen_ssbd_amd_ls_cfg_en = true;
+
     /*
      * PV guests can poison the RSB to any virtual address from which
      * they can execute a call instruction.  This is necessarily outside
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.