WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/hvm: add SMEP support to HVM guest

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/hvm: add SMEP support to HVM guest
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Thu, 16 Jun 2011 11:12:41 +0100
Delivery-date: Thu, 16 Jun 2011 03:30:49 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1307364408 -3600
# Node ID c34604d5a29336d902837542b915d3b09b27a361
# Parent  664c419b55681feb233b33e0028d0f0af371bedd
x86/hvm: add SMEP support to HVM guest

Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP
prevents software operating with CPL < 3 (supervisor mode) from fetching
instructions from any linear address with a valid translation for which the U/S
flag (bit 2) is 1 in every paging-structure entry controlling the translation
for the linear address.

This patch adds SMEP support to HVM guest.

Signed-off-by: Yang Wei <wei.y.yang@xxxxxxxxx>
Signed-off-by: Shan Haitao <haitao.shan@xxxxxxxxx>
Signed-off-by: Li Xin <xin.li@xxxxxxxxx>
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---


diff -r 664c419b5568 -r c34604d5a293 tools/libxc/xc_cpufeature.h
--- a/tools/libxc/xc_cpufeature.h       Wed Jun 08 13:40:46 2011 +0100
+++ b/tools/libxc/xc_cpufeature.h       Mon Jun 06 13:46:48 2011 +0100
@@ -124,5 +124,6 @@
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx) */
 #define X86_FEATURE_FSGSBASE     0 /* {RD,WR}{FS,GS}BASE instructions */
+#define X86_FEATURE_SMEP         7 /* Supervisor Mode Execution Protection */
 
 #endif /* __LIBXC_CPUFEATURE_H */
diff -r 664c419b5568 -r c34604d5a293 tools/libxc/xc_cpuid_x86.c
--- a/tools/libxc/xc_cpuid_x86.c        Wed Jun 08 13:40:46 2011 +0100
+++ b/tools/libxc/xc_cpuid_x86.c        Mon Jun 06 13:46:48 2011 +0100
@@ -352,6 +352,14 @@
             clear_bit(X86_FEATURE_PAE, regs[3]);
         break;
 
+    case 0x00000007: /* Intel-defined CPU features */
+        if ( input[1] == 0 ) {
+            regs[1] &= bitmaskof(X86_FEATURE_SMEP);
+        } else
+            regs[1] = 0;
+        regs[0] = regs[2] = regs[3] = 0;
+        break;
+
     case 0x0000000d:
         xc_cpuid_config_xsave(xch, domid, xfeature_mask, input, regs);
         break;
diff -r 664c419b5568 -r c34604d5a293 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Jun 08 13:40:46 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Mon Jun 06 13:46:48 2011 +0100
@@ -1661,8 +1661,9 @@
     v->arch.hvm_vcpu.guest_cr[4] = value;
     hvm_update_guest_cr(v, 4);
 
-    /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
-    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) ) {
+    /* Modifying CR4.{PSE,PAE,PGE,SMEP} invalidates all TLB entries. */
+    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE |
+                             X86_CR4_PAE | X86_CR4_SMEP) ) {
         if ( !nestedhvm_vmswitch_in_progress(v) && 
nestedhvm_vcpu_in_guestmode(v) )
             paging_update_nestedmode(v);
         else
@@ -2307,7 +2308,7 @@
 enum hvm_copy_result hvm_fetch_from_guest_virt(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
-    if ( hvm_nx_enabled(current) )
+    if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
         pfec |= PFEC_insn_fetch;
     return __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
@@ -2333,7 +2334,7 @@
 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
-    if ( hvm_nx_enabled(current) )
+    if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
         pfec |= PFEC_insn_fetch;
     return __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
@@ -2403,6 +2404,10 @@
             *ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
                      cpufeat_mask(X86_FEATURE_OSXSAVE) : 0;
         break;
+    case 0x7:
+        if ( (count == 0) && !cpu_has_smep )
+            *ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
+        break;
     case 0xb:
         /* Fix the x2APIC identifier. */
         *edx = v->vcpu_id * 2;
diff -r 664c419b5568 -r c34604d5a293 xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c      Wed Jun 08 13:40:46 2011 +0100
+++ b/xen/arch/x86/mm/guest_walk.c      Mon Jun 06 13:46:48 2011 +0100
@@ -134,7 +134,7 @@
     guest_l4e_t *l4p;
 #endif
     uint32_t gflags, mflags, iflags, rc = 0;
-    int pse;
+    int pse, smep;
 
     perfc_incr(guest_walk);
     memset(gw, 0, sizeof(*gw));
@@ -147,6 +147,15 @@
     mflags = mandatory_flags(v, pfec);
     iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
 
+    /* SMEP: kernel-mode instruction fetches from user-mode mappings
+     * should fault.  Unlike NX or invalid bits, we're looking for _all_
+     * entries in the walk to have _PAGE_USER set, so we need to do the
+     * whole walk as if it were a user-mode one and then invert the answer. */
+    smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v) 
+            && (pfec & PFEC_insn_fetch) && !(pfec & PFEC_user_mode) );
+    if ( smep )
+        mflags |= _PAGE_USER;
+
 #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
 
@@ -273,6 +282,10 @@
         rc |= ((gflags & mflags) ^ mflags);
     }
 
+    /* Now re-invert the user-mode requirement for SMEP. */
+    if ( smep ) 
+        rc ^= _PAGE_USER;
+
     /* Go back and set accessed and dirty bits only if the walk was a
      * success.  Although the PRMs say higher-level _PAGE_ACCESSED bits
      * get set whenever a lower-level PT is used, at least some hardware
diff -r 664c419b5568 -r c34604d5a293 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Jun 08 13:40:46 2011 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Mon Jun 06 13:46:48 2011 +0100
@@ -212,6 +212,8 @@
     (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_WP))
 #define hvm_pae_enabled(v) \
     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
+#define hvm_smep_enabled(v) \
+    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
 #define hvm_nx_enabled(v) \
     (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
 
@@ -321,6 +323,7 @@
         X86_CR4_DE  | X86_CR4_PSE | X86_CR4_PAE |       \
         X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
         X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
+        (cpu_has_smep ? X86_CR4_SMEP : 0) |             \
         (xsave_enabled(_v) ? X86_CR4_OSXSAVE : 0))))
 
 /* These exceptions must always be intercepted. */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/hvm: add SMEP support to HVM guest, Xen patchbot-unstable <=