[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/6] x86/msr: Clean up the MSR_EFER constants



The bit position constants are only used by the trampoline asm, but the
code is shorter and clearer when using the mask constants.  This halves
the number of constants used.

Consistently use _AC() for the bit constants, and start to use spaces
for indentation.  Furthermore, EFER contains the NX-Enable bit, to
rename the constant to EFER_NXE to match both the AMD and Intel specs.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/boot/trampoline.S  |  2 +-
 xen/arch/x86/boot/wakeup.S      |  7 +++----
 xen/arch/x86/cpu/intel.c        |  2 +-
 xen/arch/x86/efi/efi-boot.h     |  2 +-
 xen/arch/x86/hvm/hvm.c          |  4 ++--
 xen/include/asm-x86/hvm/hvm.h   |  2 +-
 xen/include/asm-x86/msr-index.h | 33 ++++++++++++---------------------
 7 files changed, 21 insertions(+), 31 deletions(-)

diff --git a/xen/arch/x86/boot/trampoline.S b/xen/arch/x86/boot/trampoline.S
index 5588c79..7e77e61e 100644
--- a/xen/arch/x86/boot/trampoline.S
+++ b/xen/arch/x86/boot/trampoline.S
@@ -138,7 +138,7 @@ trampoline_protmode_entry:
         or      $EFER_LME|EFER_SCE,%eax   /* Long Mode + SYSCALL/SYSRET */
         bt      $cpufeat_bit(X86_FEATURE_NX),%edi /* No Execute? */
         jnc     1f
-        btsl    $_EFER_NX,%eax  /* No Execute     */
+        or      $EFER_NXE, %eax /* No Execute */
 1:      wrmsr
 
         mov     $(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_NE |\
diff --git a/xen/arch/x86/boot/wakeup.S b/xen/arch/x86/boot/wakeup.S
index f9632ee..a37680b 100644
--- a/xen/arch/x86/boot/wakeup.S
+++ b/xen/arch/x86/boot/wakeup.S
@@ -144,11 +144,10 @@ wakeup_32:
         jz      .Lskip_eferw
         movl    $MSR_EFER,%ecx
         rdmsr
-        btsl    $_EFER_LME,%eax /* Long Mode      */
-        btsl    $_EFER_SCE,%eax /* SYSCALL/SYSRET */
-        btl     $20,%edi        /* No Execute?    */
+        or      $EFER_LME | EFER_SCE, %eax /* Long Mode + SYSCALL/SYSRET */
+        bt      $cpufeat_bit(X86_FEATURE_NX), %edi /* No Execute? */
         jnc     1f
-        btsl    $_EFER_NX,%eax  /* No Execute     */
+        or      $EFER_NXE, %eax  /* No Execute */
 1:      wrmsr
 .Lskip_eferw:
 
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 9477965..f3b04fe 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -247,7 +247,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
                printk(KERN_INFO "revised cpuid level: %d\n",
                       cpuid_eax(0));
        if (disable & MSR_IA32_MISC_ENABLE_XD_DISABLE) {
-               write_efer(read_efer() | EFER_NX);
+               write_efer(read_efer() | EFER_NXE);
                printk(KERN_INFO
                       "re-enabled NX (Execute Disable) protection\n");
        }
diff --git a/xen/arch/x86/efi/efi-boot.h b/xen/arch/x86/efi/efi-boot.h
index 5789d2c..09bb3f4 100644
--- a/xen/arch/x86/efi/efi-boot.h
+++ b/xen/arch/x86/efi/efi-boot.h
@@ -239,7 +239,7 @@ static void __init noreturn efi_arch_post_exit_boot(void)
     rdmsrl(MSR_EFER, efer);
     efer |= EFER_SCE;
     if ( cpuid_ext_features & cpufeat_mask(X86_FEATURE_NX) )
-        efer |= EFER_NX;
+        efer |= EFER_NXE;
     wrmsrl(MSR_EFER, efer);
     write_cr0(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP |
               X86_CR0_AM | X86_CR0_PG);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c23983c..4e247d0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -916,8 +916,8 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t 
value,
     if ( (value & EFER_LMA) && (!(value & EFER_LME) || !cr0_pg) )
         return "LMA/LME/CR0.PG inconsistency";
 
-    if ( (value & EFER_NX) && !p->extd.nx )
-        return "NX without feature";
+    if ( (value & EFER_NXE) && !p->extd.nx )
+        return "NXE without feature";
 
     if ( (value & EFER_SVME) && (!p->extd.svm || !nestedhvm_enabled(d)) )
         return "SVME without nested virt";
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index ef5e198..c9511c3 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -299,7 +299,7 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, 
uint8_t dest_mode);
 /* HVM guests on Intel hardware leak Xen's NX settings into guest context. */
 #define hvm_nx_enabled(v) \
     ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && cpu_has_nx) ||    \
-     ((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
+     ((v)->arch.hvm_vcpu.guest_efer & EFER_NXE))
 #define hvm_pku_enabled(v) \
     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PKE))
 
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 8fbccc8..1b10f3e 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -4,7 +4,6 @@
 /* CPU model specific register (MSR) numbers */
 
 /* x86-64 specific MSRs */
-#define MSR_EFER               0xc0000080 /* extended feature register */
 #define MSR_STAR               0xc0000081 /* legacy mode SYSCALL target */
 #define MSR_LSTAR              0xc0000082 /* long mode SYSCALL target */
 #define MSR_CSTAR              0xc0000083 /* compat mode SYSCALL target */
@@ -14,26 +13,6 @@
 #define MSR_SHADOW_GS_BASE     0xc0000102 /* SwapGS GS shadow */
 #define MSR_TSC_AUX            0xc0000103 /* Auxiliary TSC */
 
-/* EFER bits: */
-#define _EFER_SCE              0  /* SYSCALL/SYSRET */
-#define _EFER_LME              8  /* Long mode enable */
-#define _EFER_LMA              10 /* Long mode active (read-only) */
-#define _EFER_NX               11 /* No execute enable */
-#define _EFER_SVME             12 /* AMD: SVM enable */
-#define _EFER_LMSLE            13 /* AMD: Long-mode segment limit enable */
-#define _EFER_FFXSE            14 /* AMD: Fast FXSAVE/FXRSTOR enable */
-
-#define EFER_SCE               (1<<_EFER_SCE)
-#define EFER_LME               (1<<_EFER_LME)
-#define EFER_LMA               (1<<_EFER_LMA)
-#define EFER_NX                        (1<<_EFER_NX)
-#define EFER_SVME              (1<<_EFER_SVME)
-#define EFER_LMSLE             (1<<_EFER_LMSLE)
-#define EFER_FFXSE             (1<<_EFER_FFXSE)
-
-#define EFER_KNOWN_MASK                (EFER_SCE | EFER_LME | EFER_LMA | 
EFER_NX | \
-                                EFER_SVME | EFER_LMSLE | EFER_FFXSE)
-
 /* Speculation Controls. */
 #define MSR_SPEC_CTRL                  0x00000048
 #define SPEC_CTRL_IBRS                 (_AC(1, ULL) << 0)
@@ -49,6 +28,18 @@
 #define ARCH_CAPS_RSBA                 (_AC(1, ULL) << 2)
 #define ARCH_CAPS_SSB_NO               (_AC(1, ULL) << 4)
 
+#define MSR_EFER                        0xc0000080 /* Extended Feature Enable 
Register */
+#define EFER_SCE                        (_AC(1, ULL) <<  0) /* SYSCALL Enable 
*/
+#define EFER_LME                        (_AC(1, ULL) <<  8) /* Long Mode 
Enable */
+#define EFER_LMA                        (_AC(1, ULL) << 10) /* Long Mode 
Active */
+#define EFER_NXE                        (_AC(1, ULL) << 11) /* No Execute 
Enable */
+#define EFER_SVME                       (_AC(1, ULL) << 12) /* Secure Virtual 
Machine Enable */
+#define EFER_LMSLE                      (_AC(1, ULL) << 13) /* Long Mode 
Segment Limit Enable */
+#define EFER_FFXSE                      (_AC(1, ULL) << 14) /* Fast 
FXSAVE/FXRSTOR */
+
+#define EFER_KNOWN_MASK (EFER_SCE | EFER_LME | EFER_LMA | EFER_NXE | \
+                         EFER_SVME | EFER_LMSLE | EFER_FFXSE)
+
 /* Intel MSRs. Some also available on other CPUs */
 #define MSR_IA32_PERFCTR0              0x000000c1
 #define MSR_IA32_A_PERFCTR0            0x000004c1
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.