[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] Speculative mitigation facilities report wrong status


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jin Nan Wang <jnwang@xxxxxxxx>
  • Date: Wed, 31 Jul 2019 09:30:17 +0000
  • Accept-language: zh-CN, en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1;spf=pass smtp.mailfrom=suse.com;dmarc=pass action=none header.from=suse.com;dkim=pass header.d=suse.com;arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=wfAC2jAR1hZ06ts/NAdqkSTn19yTQF+68ZyUJkWIxUQ=; b=fixT/kc4m5/qg4SjK8CCO3W3G3GXwWPU1EWN2ndLKC6FJLIr+93mtPcztK+Fb8pO1q623KToi5NpXrR2ApUNVGJCduYmHPMPPQUL1y3mhA2VY58X6IHgKvimaPsmCXpBpW5GNoBhnGvXW1KHa0GWFUKTPeGJXfdrM8QZhQuVZzH4kiwSwwSZsWMrFUINePqvPjBmlir0AcM6JOMCeDxepP77LoeAiwS3suD2uwZ2z/usf3YR1UOReq0R5rmg8aafgk/OpEQfQc5ZRY1mBTBMExsXiAoWZRriWqoIhQ5QdsTaZZlBWAAMeZ16wTv+p+6/hdbaptaoPq+4q4POr7amHg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=N2KOca97gUd4xgDonFr4YKzXStEw1gU64qXFNx7DpsOgiCBdlRVG0/kf3lpqxmPfYstOvWEFrZU944/3XcaTFvuSZWLu00jugjM19ywOg4/ovfdbnnzQ00oILmrjeZycpjqRo9CfVCfkvz1i6QoEacJD+873Gx4lgskr2KKNujdCdNSGt5SGCn8DFSgYiqNrMP9mGo1/P0LIJsg7+g9pr/meeecqYGPFv4WPTRGptMaS0IZ2brmPBDUQpo6qSdjp25fgiMNO/uY44H/gTj4HbN8EcmNOGkWMJEaf+2zTiQfeTq0+wDc+OYKSbfpSkbtsuAA2rNM2SZxiMXe3GWULbw==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=jnwang@xxxxxxxx;
  • Cc: "andrew.cooper3@xxxxxxxxxx" <andrew.cooper3@xxxxxxxxxx>, Jin Nan Wang <jnwang@xxxxxxxx>, "wl@xxxxxxx" <wl@xxxxxxx>, Jan Beulich <JBeulich@xxxxxxxx>, "roger.pau@xxxxxxxxxx" <roger.pau@xxxxxxxxxx>
  • Delivery-date: Wed, 31 Jul 2019 09:46:44 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVR4KQFSckKXZUXUiKuIVTbtEfig==
  • Thread-topic: [PATCH] Speculative mitigation facilities report wrong status

Diff with 'spec-ctrl=no' and without.
====================================================
--- xen.dmesg.5.log     2019-07-31 14:55:38.138173874 +0800
+++ xen.dmesg.6.log     2019-07-31 14:59:50.223516313 +0800
@@ -7,7 +7,7 @@
 (XEN) Xen version 4.12.0_14-1 (abuild@xxxxxxx) (gcc (SUSE Linux) 4.8.5) 
debug=n  Mon Jun 17 15:08:33 UTC 2019
 (XEN) Latest ChangeSet:
 (XEN) Bootloader: GRUB2 2.02
-(XEN) Command line: vga=gfx-1024x768x16 crashkernel=251M<4G ucode=scan 
console=vga,com1 loglvl=all guest_loglvl=all
+(XEN) Command line: vga=gfx-1024x768x16 crashkernel=251M<4G ucode=scan 
spec-ctrl=no console=vga,com1 loglvl=all guest_loglvl=all
 (XEN) Xen image load base address: 0
 (XEN) Video information:
 (XEN)  VGA is graphics mode 1024x768, 16 bpp
@@ -159,12 +159,12 @@
 (XEN) Speculative mitigation facilities:
 (XEN)   Hardware features: IBRS/IBPB STIBP L1D_FLUSH SSBD MD_CLEAR
 (XEN)   Compiled-in support: INDIRECT_THUNK SHADOW_PAGING
-(XEN)   Xen settings: BTI-Thunk JMP, SPEC_CTRL: IBRS+ SSBD-, Other: IBPB 
L1D_FLUSH VERW
+(XEN)   Xen settings: BTI-Thunk JMP, SPEC_CTRL: IBRS- SSBD-, Other:
 (XEN)   L1TF: believed vulnerable, maxphysaddr L1D 46, CPUID 46, Safe address 
300000000000
-(XEN)   Support for HVM VMs: MSR_SPEC_CTRL RSB EAGER_FPU MD_CLEAR
-(XEN)   Support for PV VMs: MSR_SPEC_CTRL RSB EAGER_FPU MD_CLEAR
-(XEN)   XPTI (64-bit PV only): Dom0 enabled, DomU enabled (with PCID)
-(XEN)   PV L1TF shadowing: Dom0 disabled, DomU enabled
+(XEN)   Support for HVM VMs: None MD_CLEAR
+(XEN)   Support for PV VMs: None MD_CLEAR
+(XEN)   XPTI (64-bit PV only): Dom0 disabled, DomU disabled (with PCID)
+(XEN)   PV L1TF shadowing: Dom0 disabled, DomU disabled
 (XEN) Using scheduler: SMP Credit Scheduler rev2 (credit2)
 (XEN) Initializing Credit2 scheduler
 (XEN)  load_precision_shift: 18
==================================================

In "Support for HVM VMs: Support for PV VMs: " lines,
Others feature is reported as "NONE", MD_CLEAR not.

code review:
xen/arch/x86/spec_ctrl.c:
    99         disable_common:
   100             opt_rsb_pv = false;
   101             opt_rsb_hvm = false;
   102             opt_md_clear_pv = 0;   <----- they have been disable when 
'spec-ctrl=no'
   103             opt_md_clear_hvm = 0;
   104

X86_FEATURE_SC_VERW_PV, X86_FEATURE_SC_VERW_HVM will not be enabled

 1070     if ( opt_md_clear_pv )
  1071         setup_force_cpu_cap(X86_FEATURE_SC_VERW_PV);
  1072     if ( opt_md_clear_pv || opt_md_clear_hvm )
  1073         setup_force_cpu_cap(X86_FEATURE_SC_VERW_IDLE);
  1074     if ( opt_md_clear_hvm && !(caps & ARCH_CAPS_SKIP_L1DFL) && 
!opt_l1d_flush )
  1075         setup_force_cpu_cap(X86_FEATURE_SC_VERW_HVM);

But when we report the status of MD_CLEAR, we use X86_FEATURE_MD_CLEAR to check.
it seems not good.

   360     printk("  Support for HVM VMs:%s%s%s%s%s\n",
   361            (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
   362             boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
   363             opt_eager_fpu)                           ? ""               
: " None",
   364            boot_cpu_has(X86_FEATURE_SC_MSR_HVM)      ? " MSR_SPEC_CTRL" 
: "",
   365            boot_cpu_has(X86_FEATURE_SC_RSB_HVM)      ? " RSB"           
: "",
   366            opt_eager_fpu                             ? " EAGER_FPU"     
: "",
   367    ---->   boot_cpu_has(X86_FEATURE_MD_CLEAR)        ? " MD_CLEAR"      
: "");
   368
   369 #endif
   370 #ifdef CONFIG_PV
   371     printk("  Support for PV VMs:%s%s%s%s%s\n",
   372            (boot_cpu_has(X86_FEATURE_SC_MSR_PV) ||
   373             boot_cpu_has(X86_FEATURE_SC_RSB_PV) ||
   374             opt_eager_fpu)                           ? ""               
: " None",
   375            boot_cpu_has(X86_FEATURE_SC_MSR_PV)       ? " MSR_SPEC_CTRL" 
: "",
   376            boot_cpu_has(X86_FEATURE_SC_RSB_PV)       ? " RSB"           
: "",
   377            opt_eager_fpu                             ? " EAGER_FPU"     
: "",
   378    ---->   boot_cpu_has(X86_FEATURE_MD_CLEAR)        ? " MD_CLEAR"      
: "");

There is a patch for this issue.

diff -Nurp xen-4.12.0-testing.orig/xen/arch/x86/spec_ctrl.c 
xen-4.12.0-testing/xen/arch/x86/spec_ctrl.c
--- xen-4.12.0-testing.orig/xen/arch/x86/spec_ctrl.c    2019-07-31 
13:49:41.755568027 +0800
+++ xen-4.12.0-testing/xen/arch/x86/spec_ctrl.c 2019-07-31 15:08:10.158994444 
+0800
@@ -360,22 +360,24 @@ static void __init print_details(enum in
     printk("  Support for HVM VMs:%s%s%s%s%s\n",
            (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
             boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
+            boot_cpu_has(X86_FEATURE_SC_VERW_HVM) ||
             opt_eager_fpu)                           ? ""               : " 
None",
            boot_cpu_has(X86_FEATURE_SC_MSR_HVM)      ? " MSR_SPEC_CTRL" : "",
            boot_cpu_has(X86_FEATURE_SC_RSB_HVM)      ? " RSB"           : "",
            opt_eager_fpu                             ? " EAGER_FPU"     : "",
-           boot_cpu_has(X86_FEATURE_MD_CLEAR)        ? " MD_CLEAR"      : "");
+           boot_cpu_has(X86_FEATURE_SC_VERW_HVM)     ? " MD_CLEAR"      : "");

 #endif
 #ifdef CONFIG_PV
     printk("  Support for PV VMs:%s%s%s%s%s\n",
            (boot_cpu_has(X86_FEATURE_SC_MSR_PV) ||
             boot_cpu_has(X86_FEATURE_SC_RSB_PV) ||
+            boot_cpu_has(X86_FEATURE_SC_VERW_PV) ||
             opt_eager_fpu)                           ? ""               : " 
None",
            boot_cpu_has(X86_FEATURE_SC_MSR_PV)       ? " MSR_SPEC_CTRL" : "",
            boot_cpu_has(X86_FEATURE_SC_RSB_PV)       ? " RSB"           : "",
            opt_eager_fpu                             ? " EAGER_FPU"     : "",
-           boot_cpu_has(X86_FEATURE_MD_CLEAR)        ? " MD_CLEAR"      : "");
+           boot_cpu_has(X86_FEATURE_SC_VERW_PV)      ? " MD_CLEAR"      : "");

     printk("  XPTI (64-bit PV only): Dom0 %s, DomU %s (with%s PCID)\n",
            opt_xpti_hwdom ? "enabled" : "disabled",

Signed-off-by: James Wang <jnwang@xxxxxxxx>
---
 xen/arch/x86/spec_ctrl.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index cada9a058e..759eee452d 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -366,22 +366,24 @@ static void __init print_details(enum ind_thunk thunk, 
uint64_t caps)
     printk("  Support for HVM VMs:%s%s%s%s%s\n",
            (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
             boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
+            boot_cpu_has(X86_FEATURE_SC_VERW_HVM) ||
             opt_eager_fpu)                           ? ""               : " 
None",
            boot_cpu_has(X86_FEATURE_SC_MSR_HVM)      ? " MSR_SPEC_CTRL" : "",
            boot_cpu_has(X86_FEATURE_SC_RSB_HVM)      ? " RSB"           : "",
            opt_eager_fpu                             ? " EAGER_FPU"     : "",
-           boot_cpu_has(X86_FEATURE_MD_CLEAR)        ? " MD_CLEAR"      : "");
+           boot_cpu_has(X86_FEATURE_SC_VERW_HVM)     ? " MD_CLEAR"      : "");
 
 #endif
 #ifdef CONFIG_PV
     printk("  Support for PV VMs:%s%s%s%s%s\n",
            (boot_cpu_has(X86_FEATURE_SC_MSR_PV) ||
             boot_cpu_has(X86_FEATURE_SC_RSB_PV) ||
+            boot_cpu_has(X86_FEATURE_SC_VERW_PV) ||
             opt_eager_fpu)                           ? ""               : " 
None",
            boot_cpu_has(X86_FEATURE_SC_MSR_PV)       ? " MSR_SPEC_CTRL" : "",
            boot_cpu_has(X86_FEATURE_SC_RSB_PV)       ? " RSB"           : "",
            opt_eager_fpu                             ? " EAGER_FPU"     : "",
-           boot_cpu_has(X86_FEATURE_MD_CLEAR)        ? " MD_CLEAR"      : "");
+           boot_cpu_has(X86_FEATURE_SC_VERW_PV)      ? " MD_CLEAR"      : "");
 
     printk("  XPTI (64-bit PV only): Dom0 %s, DomU %s (with%s PCID)\n",
            opt_xpti_hwdom ? "enabled" : "disabled",
-- 
2.22.0

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.