[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 03/19] x86/emul: Simplfy emulation state setup



> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 28 November 2016 11:13
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; George Dunlap
> <George.Dunlap@xxxxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>
> Subject: [PATCH v2 03/19] x86/emul: Simplfy emulation state setup
> 
> The current code to set up emulation state is ad-hoc and error prone.
> 
>  * Consistently zero all emulation state structures.
>  * Avoid explicitly initialising some state to 0.
>  * Explicitly identify all input and output state in x86_emulate_ctxt.  This
>    involves rearanging some fields.
>  * Have x86_decode() explicitly initalise all output state at its start.
> 
> While making the above changes, two minor tweaks:
> 
>  * Move the calculation of hvmemul_ctxt->ctxt.swint_emulate from
>    _hvm_emulate_one() to hvm_emulate_init_once().  It doesn't need
>    recalculating for each instruction.
>  * Change force_writeback to being a boolean, to match its use.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Acked-by: Tim Deegan <tim@xxxxxxx>
> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> 
> v2:
>  * Split x86_emulate_ctxt into three sections
> ---
>  xen/arch/x86/hvm/emulate.c             | 28 +++++++++++++++-------------
>  xen/arch/x86/mm.c                      | 14 ++++++++------
>  xen/arch/x86/mm/shadow/common.c        |  4 ++--
>  xen/arch/x86/x86_emulate/x86_emulate.c |  1 +
>  xen/arch/x86/x86_emulate/x86_emulate.h | 32
> ++++++++++++++++++++++----------
>  5 files changed, 48 insertions(+), 31 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index f1f6e2f..3efeead 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1770,13 +1770,6 @@ static int _hvm_emulate_one(struct
> hvm_emulate_ctxt *hvmemul_ctxt,
> 
>      vio->mmio_retry = 0;
> 
> -    if ( cpu_has_vmx )
> -        hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
> -    else if ( cpu_has_svm_nrips )
> -        hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_icebp;
> -    else
> -        hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_all;
> -
>      rc = x86_emulate(&hvmemul_ctxt->ctxt, ops);
> 
>      if ( rc == X86EMUL_OKAY && vio->mmio_retry )
> @@ -1947,14 +1940,23 @@ void hvm_emulate_init_once(
>      struct hvm_emulate_ctxt *hvmemul_ctxt,
>      struct cpu_user_regs *regs)
>  {
> -    hvmemul_ctxt->intr_shadow =
> hvm_funcs.get_interrupt_shadow(current);
> -    hvmemul_ctxt->ctxt.regs = regs;
> -    hvmemul_ctxt->ctxt.force_writeback = 1;
> -    hvmemul_ctxt->seg_reg_accessed = 0;
> -    hvmemul_ctxt->seg_reg_dirty = 0;
> -    hvmemul_ctxt->set_context = 0;
> +    struct vcpu *curr = current;
> +
> +    memset(hvmemul_ctxt, 0, sizeof(*hvmemul_ctxt));
> +
> +    hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(curr);
>      hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
>      hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
> +
> +    hvmemul_ctxt->ctxt.regs = regs;
> +    hvmemul_ctxt->ctxt.force_writeback = true;
> +
> +    if ( cpu_has_vmx )
> +        hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
> +    else if ( cpu_has_svm_nrips )
> +        hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_icebp;
> +    else
> +        hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_all;
>  }
> 
>  void hvm_emulate_init_per_insn(
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 5b0e9f3..d365f59 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5337,7 +5337,14 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned
> long addr,
>      struct domain *d = v->domain;
>      struct page_info *page;
>      l1_pgentry_t      pte;
> -    struct ptwr_emulate_ctxt ptwr_ctxt;
> +    struct ptwr_emulate_ctxt ptwr_ctxt = {
> +        .ctxt = {
> +            .regs = regs,
> +            .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
> +            .sp_size   = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
> +            .swint_emulate = x86_swint_emulate_none,
> +        },
> +    };
>      int rc;
> 
>      /* Attempt to read the PTE that maps the VA being accessed. */
> @@ -5363,11 +5370,6 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned
> long addr,
>          goto bail;
>      }
> 
> -    ptwr_ctxt.ctxt.regs = regs;
> -    ptwr_ctxt.ctxt.force_writeback = 0;
> -    ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
> -        is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG;
> -    ptwr_ctxt.ctxt.swint_emulate = x86_swint_emulate_none;
>      ptwr_ctxt.cr2 = addr;
>      ptwr_ctxt.pte = pte;
> 
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index 7e5b8b0..a4a3c4b 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -385,8 +385,9 @@ const struct x86_emulate_ops
> *shadow_init_emulation(
>      struct vcpu *v = current;
>      unsigned long addr;
> 
> +    memset(sh_ctxt, 0, sizeof(*sh_ctxt));
> +
>      sh_ctxt->ctxt.regs = regs;
> -    sh_ctxt->ctxt.force_writeback = 0;
>      sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
> 
>      if ( is_pv_vcpu(v) )
> @@ -396,7 +397,6 @@ const struct x86_emulate_ops
> *shadow_init_emulation(
>      }
> 
>      /* Segment cache initialisation. Primed with CS. */
> -    sh_ctxt->valid_seg_regs = 0;
>      creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
> 
>      /* Work out the emulation mode. */
> diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c
> b/xen/arch/x86/x86_emulate/x86_emulate.c
> index d82e85d..532bd32 100644
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -1904,6 +1904,7 @@ x86_decode(
>      state->regs = ctxt->regs;
>      state->eip = ctxt->regs->eip;
> 
> +    /* Initialise output state in x86_emulate_ctxt */
>      ctxt->retire.byte = 0;
> 
>      op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt-
> >addr_size/8;
> diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h
> b/xen/arch/x86/x86_emulate/x86_emulate.h
> index ec824ce..ab566c0 100644
> --- a/xen/arch/x86/x86_emulate/x86_emulate.h
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.h
> @@ -410,6 +410,23 @@ struct cpu_user_regs;
> 
>  struct x86_emulate_ctxt
>  {
> +    /*
> +     * Input-only state:
> +     */
> +
> +    /* Software event injection support. */
> +    enum x86_swint_emulation swint_emulate;
> +
> +    /* Set this if writes may have side effects. */
> +    bool force_writeback;
> +
> +    /* Caller data that can be used by x86_emulate_ops' routines. */
> +    void *data;
> +
> +    /*
> +     * Input/output state:
> +     */
> +
>      /* Register state before/after emulation. */
>      struct cpu_user_regs *regs;
> 
> @@ -419,14 +436,12 @@ struct x86_emulate_ctxt
>      /* Stack pointer width in bits (16, 32 or 64). */
>      unsigned int sp_size;
> 
> -    /* Canonical opcode (see below). */
> -    unsigned int opcode;
> -
> -    /* Software event injection support. */
> -    enum x86_swint_emulation swint_emulate;
> +    /*
> +     * Output-only state:
> +     */
> 
> -    /* Set this if writes may have side effects. */
> -    uint8_t force_writeback;
> +    /* Canonical opcode (see below) (valid only on X86EMUL_OKAY). */
> +    unsigned int opcode;
> 
>      /* Retirement state, set by the emulator (valid only on X86EMUL_OKAY).
> */
>      union {
> @@ -437,9 +452,6 @@ struct x86_emulate_ctxt
>          } flags;
>          uint8_t byte;
>      } retire;
> -
> -    /* Caller data that can be used by x86_emulate_ops' routines. */
> -    void *data;
>  };
> 
>  /*
> --
> 2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.