[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 09/10] x86/hvm: Add SMAP support to HVM guest



> From: Wu, Feng
> Sent: Wednesday, May 07, 2014 4:20 PM
> Intel new CPU supports SMAP (Supervisor Mode Access Prevention).
> SMAP prevents supervisor-mode accesses to any linear address with
> a valid translation for which the U/S flag (bit 2) is 1 in every
> paging-structure entry controlling the translation for the linear
> address.
> 
> Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>

Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
>  xen/arch/x86/hvm/hvm.c        |  3 +++
>  xen/arch/x86/mm/guest_walk.c  | 40
> ++++++++++++++++++++++++++++++----------
>  xen/include/asm-x86/hvm/hvm.h | 18 +++++++++++++++++-
>  3 files changed, 50 insertions(+), 11 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index ac05160..76ccd07 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3039,6 +3039,9 @@ void hvm_cpuid(unsigned int input, unsigned int
> *eax, unsigned int *ebx,
>          if ( (count == 0) && !cpu_has_smep )
>              *ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
> 
> +        if ( (count == 0) && !cpu_has_smap )
> +            *ebx &= ~cpufeat_mask(X86_FEATURE_SMAP);
> +
>          /* Don't expose MPX to hvm when VMX support is not available */
>          if ( (count == 0) &&
>               (!(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
> diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
> index 70460b6..bb38fda 100644
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -144,7 +144,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain
> *p2m,
>      guest_l4e_t *l4p;
>  #endif
>      uint32_t gflags, mflags, iflags, rc = 0;
> -    int smep;
> +    bool_t smep = 0, smap = 0;
>      bool_t pse1G = 0, pse2M = 0;
>      p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE;
> 
> @@ -159,13 +159,33 @@ guest_walk_tables(struct vcpu *v, struct
> p2m_domain *p2m,
>      mflags = mandatory_flags(v, pfec);
>      iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
> 
> -    /* SMEP: kernel-mode instruction fetches from user-mode mappings
> -     * should fault.  Unlike NX or invalid bits, we're looking for _all_
> -     * entries in the walk to have _PAGE_USER set, so we need to do the
> -     * whole walk as if it were a user-mode one and then invert the answer.
> */
> -    smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v)
> -            && (pfec & PFEC_insn_fetch) && !(pfec & PFEC_user_mode) );
> -    if ( smep )
> +    if ( is_hvm_vcpu(v) && !(pfec & PFEC_user_mode) )
> +    {
> +        struct segment_register seg;
> +        const struct cpu_user_regs *regs = guest_cpu_user_regs();
> +
> +        hvm_get_segment_register(v, x86_seg_ss, &seg);
> +
> +        /* SMEP: kernel-mode instruction fetches from user-mode
> mappings
> +         * should fault.  Unlike NX or invalid bits, we're looking for _all_
> +         * entries in the walk to have _PAGE_USER set, so we need to do
> the
> +         * whole walk as if it were a user-mode one and then invert the
> answer. */
> +        smep =  hvm_smep_enabled(v) && (pfec & PFEC_insn_fetch);
> +
> +        /*
> +         * SMAP: kernel-mode data accesses from user-mode mappings
> should fault
> +         * A fault is considered as a SMAP violation if the following
> +         * conditions come true:
> +         *   - X86_CR4_SMAP is set in CR4
> +         *   - A user page is accessed
> +         *   - CPL = 3 or X86_EFLAGS_AC is clear
> +         *   - Page fault in kernel mode
> +         */
> +        smap = hvm_smap_enabled(v) &&
> +               ((seg.attr.fields.dpl == 3) || !(regs->eflags &
> X86_EFLAGS_AC));
> +    }
> +
> +    if ( smep || smap )
>          mflags |= _PAGE_USER;
> 
>  #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
> @@ -338,8 +358,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain
> *p2m,
>  #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
>  set_ad:
>  #endif
> -    /* Now re-invert the user-mode requirement for SMEP. */
> -    if ( smep )
> +    /* Now re-invert the user-mode requirement for SMEP and SMAP */
> +    if ( smep || smap )
>          rc ^= _PAGE_USER;
> 
>      /* Go back and set accessed and dirty bits only if the walk was a
> diff --git a/xen/include/asm-x86/hvm/hvm.h
> b/xen/include/asm-x86/hvm/hvm.h
> index c373930..0ab6b70 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -260,6 +260,8 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d,
> uint8_t dest, uint8_t dest_mode);
>      (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] &
> X86_CR4_PAE))
>  #define hvm_smep_enabled(v) \
>      (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] &
> X86_CR4_SMEP))
> +#define hvm_smap_enabled(v) \
> +    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] &
> X86_CR4_SMAP))
>  #define hvm_nx_enabled(v) \
>      (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
> 
> @@ -360,13 +362,26 @@ static inline bool_t hvm_vcpu_has_smep(void)
> 
>      hvm_cpuid(0, &eax, NULL, NULL, NULL);
> 
> -    if (eax < 7)
> +    if ( eax < 7 )
>          return 0;
> 
>      hvm_cpuid(7, NULL, &ebx, NULL, NULL);
>      return !!(ebx & cpufeat_mask(X86_FEATURE_SMEP));
>  }
> 
> +static inline bool_t hvm_vcpu_has_smap(void)
> +{
> +    unsigned int eax, ebx;
> +
> +    hvm_cpuid(0, &eax, NULL, NULL, NULL);
> +
> +    if ( eax < 7 )
> +        return 0;
> +
> +    hvm_cpuid(0x7, NULL, &ebx, NULL, NULL);
> +    return !!(ebx & cpufeat_mask(X86_FEATURE_SMAP));
> +}
> +
>  /* These reserved bits in lower 32 remain 0 after any load of CR0 */
>  #define HVM_CR0_GUEST_RESERVED_BITS             \
>      (~((unsigned long)                          \
> @@ -387,6 +402,7 @@ static inline bool_t hvm_vcpu_has_smep(void)
>          X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
>          X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
>          (hvm_vcpu_has_smep() ? X86_CR4_SMEP : 0) |      \
> +        (hvm_vcpu_has_smap() ? X86_CR4_SMAP : 0) |      \
>          (cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) |     \
>          ((nestedhvm_enabled((_v)->domain) && cpu_has_vmx)\
>                        ? X86_CR4_VMXE : 0)  |             \
> --
> 1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.