[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/6] x86/msr: Clean up the MSR_EFER constants



On Tue, Jun 26, 2018 at 02:18:13PM +0100, Andrew Cooper wrote:
> The bit position constants are only used by the trampoline asm, but the
> code is shorter and clearer when using the mask constants.  This halves
> the number of constants used.
> 
> Consistently use _AC() for the bit constants, and start to use spaces
> for indentation.  Furthermore, EFER contains the NX-Enable bit, to
> rename the constant to EFER_NXE to match both the AMD and Intel specs.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

Just a couple of comments.

> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Wei Liu <wei.liu2@xxxxxxxxxx>
> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> ---
>  xen/arch/x86/boot/trampoline.S  |  2 +-
>  xen/arch/x86/boot/wakeup.S      |  7 +++----
>  xen/arch/x86/cpu/intel.c        |  2 +-
>  xen/arch/x86/efi/efi-boot.h     |  2 +-
>  xen/arch/x86/hvm/hvm.c          |  4 ++--
>  xen/include/asm-x86/hvm/hvm.h   |  2 +-
>  xen/include/asm-x86/msr-index.h | 33 ++++++++++++---------------------
>  7 files changed, 21 insertions(+), 31 deletions(-)
> 
> diff --git a/xen/arch/x86/boot/trampoline.S b/xen/arch/x86/boot/trampoline.S
> index 5588c79..7e77e61e 100644
> --- a/xen/arch/x86/boot/trampoline.S
> +++ b/xen/arch/x86/boot/trampoline.S
> @@ -138,7 +138,7 @@ trampoline_protmode_entry:
>          or      $EFER_LME|EFER_SCE,%eax   /* Long Mode + SYSCALL/SYSRET */
>          bt      $cpufeat_bit(X86_FEATURE_NX),%edi /* No Execute? */
>          jnc     1f
> -        btsl    $_EFER_NX,%eax  /* No Execute     */
> +        or      $EFER_NXE, %eax /* No Execute */
>  1:      wrmsr
>  
>          mov     $(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_NE |\
> diff --git a/xen/arch/x86/boot/wakeup.S b/xen/arch/x86/boot/wakeup.S
> index f9632ee..a37680b 100644
> --- a/xen/arch/x86/boot/wakeup.S
> +++ b/xen/arch/x86/boot/wakeup.S
> @@ -144,11 +144,10 @@ wakeup_32:
>          jz      .Lskip_eferw
>          movl    $MSR_EFER,%ecx
>          rdmsr
> -        btsl    $_EFER_LME,%eax /* Long Mode      */
> -        btsl    $_EFER_SCE,%eax /* SYSCALL/SYSRET */
> -        btl     $20,%edi        /* No Execute?    */
> +        or      $EFER_LME | EFER_SCE, %eax /* Long Mode + SYSCALL/SYSRET */

I usually do: '$(EFER_LME | EFER_SCE), ...' because I think is clearer.

> +        bt      $cpufeat_bit(X86_FEATURE_NX), %edi /* No Execute? */
>          jnc     1f
> -        btsl    $_EFER_NX,%eax  /* No Execute     */
> +        or      $EFER_NXE, %eax  /* No Execute */
>  1:      wrmsr
>  .Lskip_eferw:
> diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
> index 8fbccc8..1b10f3e 100644
> --- a/xen/include/asm-x86/msr-index.h
> +++ b/xen/include/asm-x86/msr-index.h
> @@ -4,7 +4,6 @@
>  /* CPU model specific register (MSR) numbers */
>  
>  /* x86-64 specific MSRs */
> -#define MSR_EFER             0xc0000080 /* extended feature register */
>  #define MSR_STAR             0xc0000081 /* legacy mode SYSCALL target */
>  #define MSR_LSTAR            0xc0000082 /* long mode SYSCALL target */
>  #define MSR_CSTAR            0xc0000083 /* compat mode SYSCALL target */
> @@ -14,26 +13,6 @@
>  #define MSR_SHADOW_GS_BASE   0xc0000102 /* SwapGS GS shadow */
>  #define MSR_TSC_AUX          0xc0000103 /* Auxiliary TSC */
>  
> -/* EFER bits: */
> -#define _EFER_SCE            0  /* SYSCALL/SYSRET */
> -#define _EFER_LME            8  /* Long mode enable */
> -#define _EFER_LMA            10 /* Long mode active (read-only) */
> -#define _EFER_NX             11 /* No execute enable */
> -#define _EFER_SVME           12 /* AMD: SVM enable */
> -#define _EFER_LMSLE          13 /* AMD: Long-mode segment limit enable */
> -#define _EFER_FFXSE          14 /* AMD: Fast FXSAVE/FXRSTOR enable */
> -
> -#define EFER_SCE             (1<<_EFER_SCE)
> -#define EFER_LME             (1<<_EFER_LME)
> -#define EFER_LMA             (1<<_EFER_LMA)
> -#define EFER_NX                      (1<<_EFER_NX)
> -#define EFER_SVME            (1<<_EFER_SVME)
> -#define EFER_LMSLE           (1<<_EFER_LMSLE)
> -#define EFER_FFXSE           (1<<_EFER_FFXSE)
> -
> -#define EFER_KNOWN_MASK              (EFER_SCE | EFER_LME | EFER_LMA | 
> EFER_NX | \
> -                              EFER_SVME | EFER_LMSLE | EFER_FFXSE)
> -
>  /* Speculation Controls. */
>  #define MSR_SPEC_CTRL                        0x00000048
>  #define SPEC_CTRL_IBRS                       (_AC(1, ULL) << 0)
> @@ -49,6 +28,18 @@
>  #define ARCH_CAPS_RSBA                       (_AC(1, ULL) << 2)
>  #define ARCH_CAPS_SSB_NO             (_AC(1, ULL) << 4)
>  
> +#define MSR_EFER                        0xc0000080 /* Extended Feature 
> Enable Register */
> +#define EFER_SCE                        (_AC(1, ULL) <<  0) /* SYSCALL 
> Enable */
> +#define EFER_LME                        (_AC(1, ULL) <<  8) /* Long Mode 
> Enable */
> +#define EFER_LMA                        (_AC(1, ULL) << 10) /* Long Mode 
> Active */
> +#define EFER_NXE                        (_AC(1, ULL) << 11) /* No Execute 
> Enable */
> +#define EFER_SVME                       (_AC(1, ULL) << 12) /* Secure 
> Virtual Machine Enable */
> +#define EFER_LMSLE                      (_AC(1, ULL) << 13) /* Long Mode 
> Segment Limit Enable */
> +#define EFER_FFXSE                      (_AC(1, ULL) << 14) /* Fast 
> FXSAVE/FXRSTOR */

Isn't the align a little bit too much?

And for clarity I would also indent the bitmasks, so:

#define MSR_EFER      0xc0000080          /* Extended Feature Enable Register */
#define   EFER_SCE    (_AC(1, ULL) <<  0) /* SYSCALL Enable */

Or some such. But maybe that doesn't match the current style of the
file.

Roger.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.