[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 6/8] x86: (command line option to) avoid use of secondary hyper-threads



Shared resources (L1 cache and TLB in particular) present a risk of
information leak via side channels. Don't use hyperthreads in such
cases, but allow independent control of their use at the same time.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
An option to avoid the up/down cycle would be to avoid clearing the
sibling (and then perhaps also core) map of parked CPUs, allowing to
bail early from cpu_up_helper().

TBD: How to prevent the CPU from transiently becoming available for
     scheduling when being onlined at runtime?

TBD: For now the patch assumes all HT-enabled CPUs are affected by side
     channel attacks through shared resources. There are claims that AMD
     ones aren't, but it hasn't really become clear to me why that would
     be, as I don't see the fully associative L1 TLBs to be sufficient
     reason for there to not be other possible avenues (L2 TLB, caches).

--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -1040,6 +1040,13 @@ identical to the boot CPU will be parked
 ### hpetbroadcast (x86)
 > `= <boolean>`
 
+### ht (x86)
+> `= <boolean>`
+
+Default: `false`
+
+Control bring up of multiple hyper-threads per CPU core.
+
 ### hvm\_debug (x86)
 > `= <integer>`
 
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -62,6 +62,9 @@ boolean_param("nosmp", opt_nosmp);
 static unsigned int __initdata max_cpus;
 integer_param("maxcpus", max_cpus);
 
+int8_t __read_mostly opt_ht = -1;
+boolean_param("ht", opt_ht);
+
 /* opt_invpcid: If false, don't use INVPCID instruction even if available. */
 static bool __initdata opt_invpcid = true;
 boolean_param("invpcid", opt_invpcid);
@@ -1633,7 +1636,10 @@ void __init noreturn __start_xen(unsigne
                 int ret = cpu_up(i);
                 if ( ret != 0 )
                     printk("Failed to bring up CPU %u (error %d)\n", i, ret);
-                else if ( num_online_cpus() > max_cpus )
+                else if ( num_online_cpus() > max_cpus ||
+                          (!opt_ht &&
+                           cpu_data[i].compute_unit_id == INVALID_CUID &&
+                           cpumask_weight(per_cpu(cpu_sibling_mask, i)) > 1) )
                 {
                     ret = cpu_down(i);
                     if ( !ret )
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -23,6 +23,7 @@
 #include <asm/microcode.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
+#include <asm/setup.h>
 #include <asm/spec_ctrl.h>
 #include <asm/spec_ctrl_asm.h>
 
@@ -126,6 +127,9 @@ static int __init parse_spec_ctrl(const
 
             opt_eager_fpu = 0;
 
+            if ( opt_ht < 0 )
+                opt_ht = 1;
+
         disable_common:
             opt_rsb_pv = false;
             opt_rsb_hvm = false;
@@ -627,6 +631,9 @@ void __init init_speculation_mitigations
     if ( default_xen_spec_ctrl )
         setup_force_cpu_cap(X86_FEATURE_SC_MSR_IDLE);
 
+    if ( opt_ht < 0 )
+        opt_ht = 0;
+
     xpti_init_default(false);
     if ( opt_xpti == 0 )
         setup_force_cpu_cap(X86_FEATURE_NO_XPTI);
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -23,6 +23,7 @@
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/support.h>
 #include <asm/processor.h>
+#include <asm/setup.h>
 #include <asm/smp.h>
 #include <asm/numa.h>
 #include <xen/nodemask.h>
@@ -48,14 +49,27 @@ static void l3_cache_get(void *arg)
 
 long cpu_up_helper(void *data)
 {
-    int cpu = (unsigned long)data;
+    unsigned int cpu = (unsigned long)data;
     int ret = cpu_up(cpu);
+
     if ( ret == -EBUSY )
     {
         /* On EBUSY, flush RCU work and have one more go. */
         rcu_barrier();
         ret = cpu_up(cpu);
     }
+
+    if ( !ret && !opt_ht &&
+         cpu_data[cpu].compute_unit_id == INVALID_CUID &&
+         cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) > 1 )
+    {
+        ret = cpu_down_helper(data);
+        if ( ret )
+            printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
+        else
+            ret = -EPERM;
+    }
+
     return ret;
 }
 
--- a/xen/include/asm-x86/setup.h
+++ b/xen/include/asm-x86/setup.h
@@ -59,6 +59,8 @@ extern uint8_t kbd_shift_flags;
 extern unsigned long highmem_start;
 #endif
 
+extern int8_t opt_ht;
+
 #ifdef CONFIG_SHADOW_PAGING
 extern bool opt_dom0_shadow;
 #else




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.