[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH for 4.9 6/6] x86/emul: Require callers to provide LMA in the emulation context



> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 31 March 2017 20:51
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich
> <JBeulich@xxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Tim
> (Xen.org) <tim@xxxxxxx>; Julien Grall <julien.grall@xxxxxxx>
> Subject: [PATCH for 4.9 6/6] x86/emul: Require callers to provide LMA in the
> emulation context
> 
> Long mode (or not) influences emulation behaviour in a number of cases.
> Instead of reusing the ->read_msr() hook to obtain EFER.LMA, require callers
> to provide it directly.
> 
> This simplifies all long mode checks during emulation to a simple boolean
> read, removing embedded msr reads.  It also allows for the removal of a local
> variable in the sysenter emulation block, and removes a latent bug in the
> syscall emulation block where rc contains a non X86EMUL_* constant for a
> period of time.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

emulate parts...

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> CC: Tim Deegan <tim@xxxxxxx>
> CC: Julien Grall <julien.grall@xxxxxxx>
> ---
>  tools/fuzz/x86_instruction_emulator/fuzz-emul.c |  1 +
>  tools/tests/x86_emulator/test_x86_emulator.c    |  4 ++
>  xen/arch/x86/hvm/emulate.c                      |  4 +-
>  xen/arch/x86/mm.c                               |  2 +
>  xen/arch/x86/mm/shadow/common.c                 |  5 +--
>  xen/arch/x86/traps.c                            |  1 +
>  xen/arch/x86/x86_emulate/x86_emulate.c          | 51 
> ++++++-------------------
>  xen/arch/x86/x86_emulate/x86_emulate.h          |  3 ++
>  8 files changed, 28 insertions(+), 43 deletions(-)
> 
> diff --git a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
> b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
> index 8488816..2e42e54 100644
> --- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
> +++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
> @@ -649,6 +649,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data_p,
> size_t size)
>      struct cpu_user_regs regs = {};
>      struct x86_emulate_ctxt ctxt = {
>          .regs = &regs,
> +        .lma = sizeof(void *) == 8,
>          .addr_size = 8 * sizeof(void *),
>          .sp_size = 8 * sizeof(void *),
>      };
> diff --git a/tools/tests/x86_emulator/test_x86_emulator.c
> b/tools/tests/x86_emulator/test_x86_emulator.c
> index 5be8ddc..efeb175 100644
> --- a/tools/tests/x86_emulator/test_x86_emulator.c
> +++ b/tools/tests/x86_emulator/test_x86_emulator.c
> @@ -319,6 +319,7 @@ int main(int argc, char **argv)
>      ctxt.regs = &regs;
>      ctxt.force_writeback = 0;
>      ctxt.vendor    = X86_VENDOR_UNKNOWN;
> +    ctxt.lma       = sizeof(void *) == 8;
>      ctxt.addr_size = 8 * sizeof(void *);
>      ctxt.sp_size   = 8 * sizeof(void *);
> 
> @@ -2922,6 +2923,7 @@ int main(int argc, char **argv)
>      {
>          decl_insn(vzeroupper);
> 
> +        ctxt.lma = false;
>          ctxt.sp_size = ctxt.addr_size = 32;
> 
>          asm volatile ( "vxorps %xmm2, %xmm2, %xmm3\n"
> @@ -2949,6 +2951,7 @@ int main(int argc, char **argv)
>              goto fail;
>          printf("okay\n");
> 
> +        ctxt.lma = true;
>          ctxt.sp_size = ctxt.addr_size = 64;
>      }
>      else
> @@ -3001,6 +3004,7 @@ int main(int argc, char **argv)
>              continue;
> 
>          memcpy(res, blobs[j].code, blobs[j].size);
> +        ctxt.lma = blobs[j].bitness == 64;
>          ctxt.addr_size = ctxt.sp_size = blobs[j].bitness;
> 
>          if ( ctxt.addr_size == sizeof(void *) * CHAR_BIT )
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index efac2e9..65cb707 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -2047,7 +2047,9 @@ void hvm_emulate_init_per_insn(
>      unsigned int pfec = PFEC_page_present;
>      unsigned long addr;
> 
> -    if ( hvm_long_mode_active(curr) &&
> +    hvmemul_ctxt->ctxt.lma = hvm_long_mode_active(curr);
> +
> +    if ( hvmemul_ctxt->ctxt.lma &&
>           hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l )
>          hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64;
>      else
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 3918a37..eda220d 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5410,6 +5410,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned
> long addr,
>          .ctxt = {
>              .regs = regs,
>              .vendor = d->arch.cpuid->x86_vendor,
> +            .lma = true,
>              .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
>              .sp_size   = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
>          },
> @@ -5564,6 +5565,7 @@ int mmio_ro_do_page_fault(struct vcpu *v,
> unsigned long addr,
>      struct x86_emulate_ctxt ctxt = {
>          .regs = regs,
>          .vendor = v->domain->arch.cpuid->x86_vendor,
> +        .lma = true,
>          .addr_size = addr_size,
>          .sp_size = addr_size,
>          .data = &mmio_ro_ctxt
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index 2fc796b..e42d3fd 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -328,15 +328,14 @@ const struct x86_emulate_ops
> *shadow_init_emulation(
> 
>      sh_ctxt->ctxt.regs = regs;
>      sh_ctxt->ctxt.vendor = v->domain->arch.cpuid->x86_vendor;
> +    sh_ctxt->ctxt.lma = hvm_long_mode_active(v);
> 
>      /* Segment cache initialisation. Primed with CS. */
>      creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
> 
>      /* Work out the emulation mode. */
> -    if ( hvm_long_mode_active(v) && creg->attr.fields.l )
> -    {
> +    if ( sh_ctxt->ctxt.lma && creg->attr.fields.l )
>          sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
> -    }
>      else
>      {
>          sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index 0d54baf..09dc2f1 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -2966,6 +2966,7 @@ static int emulate_privileged_op(struct
> cpu_user_regs *regs)
>      struct priv_op_ctxt ctxt = {
>          .ctxt.regs = regs,
>          .ctxt.vendor = currd->arch.cpuid->x86_vendor,
> +        .ctxt.lma = true,
>      };
>      int rc;
>      unsigned int eflags, ar;
> diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c
> b/xen/arch/x86/x86_emulate/x86_emulate.c
> index 7315ca6..cc354bc 100644
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -968,11 +968,10 @@ do {                                                    
>                 \
> 
>  #define validate_far_branch(cs, ip) ({                                  \
>      if ( sizeof(ip) <= 4 ) {                                            \
> -        ASSERT(in_longmode(ctxt, ops) <= 0);                            \
> +        ASSERT(!ctxt->lma);                                             \
>          generate_exception_if((ip) > (cs)->limit, EXC_GP, 0);           \
>      } else                                                              \
> -        generate_exception_if(in_longmode(ctxt, ops) &&                 \
> -                              (cs)->attr.fields.l                       \
> +        generate_exception_if(ctxt->lma && (cs)->attr.fields.l          \
>                                ? !is_canonical_address(ip)               \
>                                : (ip) > (cs)->limit, EXC_GP, 0);         \
>  })
> @@ -1630,20 +1629,6 @@ static bool vcpu_has(
>  #endif
> 
>  static int
> -in_longmode(
> -    struct x86_emulate_ctxt *ctxt,
> -    const struct x86_emulate_ops *ops)
> -{
> -    uint64_t efer;
> -
> -    if ( !ops->read_msr ||
> -         unlikely(ops->read_msr(MSR_EFER, &efer, ctxt) != X86EMUL_OKAY) )
> -        return -1;
> -
> -    return !!(efer & EFER_LMA);
> -}
> -
> -static int
>  realmode_load_seg(
>      enum x86_segment seg,
>      uint16_t sel,
> @@ -1767,8 +1752,7 @@ protmode_load_seg(
>           * Experimentally in long mode, the L and D bits are checked before
>           * the Present bit.
>           */
> -        if ( in_longmode(ctxt, ops) &&
> -             (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
> +        if ( ctxt->lma && (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
>              goto raise_exn;
>          sel = (sel ^ rpl) | cpl;
>          break;
> @@ -1818,10 +1802,8 @@ protmode_load_seg(
> 
>      if ( !is_x86_user_segment(seg) )
>      {
> -        int lm = (desc.b & (1u << 12)) ? 0 : in_longmode(ctxt, ops);
> +        bool lm = (desc.b & (1u << 12)) ? 0 : ctxt->lma;
> 
> -        if ( lm < 0 )
> -            return X86EMUL_UNHANDLEABLE;
>          if ( lm )
>          {
>              switch ( rc = ops->read(sel_seg, (sel & 0xfff8) + 8,
> @@ -5195,7 +5177,7 @@ x86_emulate(
>                  case 0x03: /* busy 16-bit TSS */
>                  case 0x04: /* 16-bit call gate */
>                  case 0x05: /* 16/32-bit task gate */
> -                    if ( in_longmode(ctxt, ops) )
> +                    if ( ctxt->lma )
>                          break;
>                      /* fall through */
>                  case 0x02: /* LDT */
> @@ -5242,7 +5224,7 @@ x86_emulate(
>                  {
>                  case 0x01: /* available 16-bit TSS */
>                  case 0x03: /* busy 16-bit TSS */
> -                    if ( in_longmode(ctxt, ops) )
> +                    if ( ctxt->lma )
>                          break;
>                      /* fall through */
>                  case 0x02: /* LDT */
> @@ -5292,10 +5274,7 @@ x86_emulate(
>          sreg.attr.bytes = 0xc93; /* G+DB+P+S+Data */
> 
>  #ifdef __x86_64__
> -        rc = in_longmode(ctxt, ops);
> -        if ( rc < 0 )
> -            goto cannot_emulate;
> -        if ( rc )
> +        if ( ctxt->lma )
>          {
>              cs.attr.bytes = 0xa9b; /* L+DB+P+S+Code */
> 
> @@ -5732,9 +5711,7 @@ x86_emulate(
>              dst.val = src.val;
>          break;
> 
> -    case X86EMUL_OPC(0x0f, 0x34): /* sysenter */ {
> -        int lm;
> -
> +    case X86EMUL_OPC(0x0f, 0x34): /* sysenter */
>          vcpu_must_have(sep);
>          generate_exception_if(mode_ring0(), EXC_GP, 0);
>          generate_exception_if(!in_protmode(ctxt, ops), EXC_GP, 0);
> @@ -5745,17 +5722,14 @@ x86_emulate(
>              goto done;
> 
>          generate_exception_if(!(msr_val & 0xfffc), EXC_GP, 0);
> -        lm = in_longmode(ctxt, ops);
> -        if ( lm < 0 )
> -            goto cannot_emulate;
> 
>          _regs.eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF |
> X86_EFLAGS_RF);
> 
>          cs.sel = msr_val & ~3; /* SELECTOR_RPL_MASK */
>          cs.base = 0;   /* flat segment */
>          cs.limit = ~0u;  /* 4GB limit */
> -        cs.attr.bytes = lm ? 0xa9b  /* G+L+P+S+Code */
> -                           : 0xc9b; /* G+DB+P+S+Code */
> +        cs.attr.bytes = ctxt->lma ? 0xa9b  /* G+L+P+S+Code */
> +                                  : 0xc9b; /* G+DB+P+S+Code */
> 
>          sreg.sel = cs.sel + 8;
>          sreg.base = 0;   /* flat segment */
> @@ -5770,16 +5744,15 @@ x86_emulate(
>          if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_EIP,
>                                   &msr_val, ctxt)) != X86EMUL_OKAY )
>              goto done;
> -        _regs.r(ip) = lm ? msr_val : (uint32_t)msr_val;
> +        _regs.r(ip) = ctxt->lma ? msr_val : (uint32_t)msr_val;
> 
>          if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_ESP,
>                                   &msr_val, ctxt)) != X86EMUL_OKAY )
>              goto done;
> -        _regs.r(sp) = lm ? msr_val : (uint32_t)msr_val;
> +        _regs.r(sp) = ctxt->lma ? msr_val : (uint32_t)msr_val;
> 
>          singlestep = _regs.eflags & X86_EFLAGS_TF;
>          break;
> -    }
> 
>      case X86EMUL_OPC(0x0f, 0x35): /* sysexit */
>          vcpu_must_have(sep);
> diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h
> b/xen/arch/x86/x86_emulate/x86_emulate.h
> index 215adf6..0479e30 100644
> --- a/xen/arch/x86/x86_emulate/x86_emulate.h
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.h
> @@ -457,6 +457,9 @@ struct x86_emulate_ctxt
>      /* Set this if writes may have side effects. */
>      bool force_writeback;
> 
> +    /* Long mode active? */
> +    bool lma;
> +
>      /* Caller data that can be used by x86_emulate_ops' routines. */
>      void *data;
> 
> --
> 2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.