[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 10/15] x86/hvm: Extend the hvm_copy_*() API with a pagefault_info pointer



> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 23 November 2016 15:39
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; Tim (Xen.org) <tim@xxxxxxx>; Jun Nakajima
> <jun.nakajima@xxxxxxxxx>; Kevin Tian <kevin.tian@xxxxxxxxx>
> Subject: [PATCH 10/15] x86/hvm: Extend the hvm_copy_*() API with a
> pagefault_info pointer
> 
> which is filled with pagefault information should one occur.
> 
> No functional change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> CC: Tim Deegan <tim@xxxxxxx>
> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> CC: Kevin Tian <kevin.tian@xxxxxxxxx>
> ---
>  xen/arch/x86/hvm/emulate.c        |  8 ++++---
>  xen/arch/x86/hvm/hvm.c            | 49 +++++++++++++++++++++++++---------
> -----
>  xen/arch/x86/hvm/vmx/vvmx.c       |  9 ++++---
>  xen/arch/x86/mm/shadow/common.c   |  5 ++--
>  xen/include/asm-x86/hvm/support.h | 23 +++++++++++++-----
>  5 files changed, 63 insertions(+), 31 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 3ebb200..e50ee24 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -770,6 +770,7 @@ static int __hvmemul_read(
>      struct hvm_emulate_ctxt *hvmemul_ctxt)
>  {
>      struct vcpu *curr = current;
> +    pagefault_info_t pfinfo;
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present;
>      struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
> @@ -790,8 +791,8 @@ static int __hvmemul_read(
>          pfec |= PFEC_user_mode;
> 
>      rc = ((access_type == hvm_access_insn_fetch) ?
> -          hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec) :
> -          hvm_copy_from_guest_virt(p_data, addr, bytes, pfec));
> +          hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo) :
> +          hvm_copy_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo));
> 
>      switch ( rc )
>      {
> @@ -878,6 +879,7 @@ static int hvmemul_write(
>      struct hvm_emulate_ctxt *hvmemul_ctxt =
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>      struct vcpu *curr = current;
> +    pagefault_info_t pfinfo;
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present | PFEC_write_access;
>      struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
> @@ -896,7 +898,7 @@ static int hvmemul_write(
>           (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
>          pfec |= PFEC_user_mode;
> 
> -    rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec);
> +    rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec, &pfinfo);
> 
>      switch ( rc )
>      {
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 804cd88..afba51f 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2859,6 +2859,7 @@ void hvm_task_switch(
>      struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
>      bool_t otd_writable, ntd_writable;
>      unsigned long eflags;
> +    pagefault_info_t pfinfo;
>      int exn_raised, rc;
>      struct {
>          u16 back_link,__blh;
> @@ -2925,7 +2926,7 @@ void hvm_task_switch(
>      }
> 
>      rc = hvm_copy_from_guest_virt(
> -        &tss, prev_tr.base, sizeof(tss), PFEC_page_present);
> +        &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
>      if ( rc != HVMCOPY_okay )
>          goto out;
> 
> @@ -2963,12 +2964,12 @@ void hvm_task_switch(
>                                  &tss.eip,
>                                  offsetof(typeof(tss), trace) -
>                                  offsetof(typeof(tss), eip),
> -                                PFEC_page_present);
> +                                PFEC_page_present, &pfinfo);
>      if ( rc != HVMCOPY_okay )
>          goto out;
> 
>      rc = hvm_copy_from_guest_virt(
> -        &tss, tr.base, sizeof(tss), PFEC_page_present);
> +        &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
>      /*
>       * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
>       * functions knew we want RO access.
> @@ -3008,7 +3009,8 @@ void hvm_task_switch(
>          tss.back_link = prev_tr.sel;
> 
>          rc = hvm_copy_to_guest_virt(tr.base + offsetof(typeof(tss), 
> back_link),
> -                                    &tss.back_link, sizeof(tss.back_link), 
> 0);
> +                                    &tss.back_link, sizeof(tss.back_link), 0,
> +                                    &pfinfo);
>          if ( rc == HVMCOPY_bad_gva_to_gfn )
>              exn_raised = 1;
>          else if ( rc != HVMCOPY_okay )
> @@ -3045,7 +3047,8 @@ void hvm_task_switch(
>                                          16 << segr.attr.fields.db,
>                                          &linear_addr) )
>          {
> -            rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0);
> +            rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0,
> +                                        &pfinfo);
>              if ( rc == HVMCOPY_bad_gva_to_gfn )
>                  exn_raised = 1;
>              else if ( rc != HVMCOPY_okay )
> @@ -3068,7 +3071,8 @@ void hvm_task_switch(
>  #define HVMCOPY_phys       (0u<<2)
>  #define HVMCOPY_virt       (1u<<2)
>  static enum hvm_copy_result __hvm_copy(
> -    void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
> +    void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
> +    pagefault_info_t *pfinfo)
>  {
>      struct vcpu *curr = current;
>      unsigned long gfn;
> @@ -3109,7 +3113,15 @@ static enum hvm_copy_result __hvm_copy(
>                  if ( pfec & PFEC_page_shared )
>                      return HVMCOPY_gfn_shared;
>                  if ( flags & HVMCOPY_fault )
> +                {
> +                    if ( pfinfo )
> +                    {
> +                        pfinfo->linear = addr;
> +                        pfinfo->ec = pfec;
> +                    }
> +
>                      hvm_inject_page_fault(pfec, addr);
> +                }
>                  return HVMCOPY_bad_gva_to_gfn;
>              }
>              gpa |= (paddr_t)gfn << PAGE_SHIFT;
> @@ -3279,7 +3291,7 @@ enum hvm_copy_result
> hvm_copy_to_guest_phys(
>  {
>      return __hvm_copy(buf, paddr, size,
>                        HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys,
> -                      0);
> +                      0, NULL);
>  }
> 
>  enum hvm_copy_result hvm_copy_from_guest_phys(
> @@ -3287,31 +3299,34 @@ enum hvm_copy_result
> hvm_copy_from_guest_phys(
>  {
>      return __hvm_copy(buf, paddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys,
> -                      0);
> +                      0, NULL);
>  }
> 
>  enum hvm_copy_result hvm_copy_to_guest_virt(
> -    unsigned long vaddr, void *buf, int size, uint32_t pfec)
> +    unsigned long vaddr, void *buf, int size, uint32_t pfec,
> +    pagefault_info_t *pfinfo)
>  {
>      return __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt,
> -                      PFEC_page_present | PFEC_write_access | pfec);
> +                      PFEC_page_present | PFEC_write_access | pfec, pfinfo);
>  }
> 
>  enum hvm_copy_result hvm_copy_from_guest_virt(
> -    void *buf, unsigned long vaddr, int size, uint32_t pfec)
> +    void *buf, unsigned long vaddr, int size, uint32_t pfec,
> +    pagefault_info_t *pfinfo)
>  {
>      return __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec, pfinfo);
>  }
> 
>  enum hvm_copy_result hvm_fetch_from_guest_virt(
> -    void *buf, unsigned long vaddr, int size, uint32_t pfec)
> +    void *buf, unsigned long vaddr, int size, uint32_t pfec,
> +    pagefault_info_t *pfinfo)
>  {
>      return __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
> -                      PFEC_page_present | PFEC_insn_fetch | pfec);
> +                      PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
>  }
> 
>  enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
> @@ -3319,7 +3334,7 @@ enum hvm_copy_result
> hvm_copy_to_guest_virt_nofault(
>  {
>      return __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt,
> -                      PFEC_page_present | PFEC_write_access | pfec);
> +                      PFEC_page_present | PFEC_write_access | pfec, NULL);
>  }
> 
>  enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
> @@ -3327,7 +3342,7 @@ enum hvm_copy_result
> hvm_copy_from_guest_virt_nofault(
>  {
>      return __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec, NULL);
>  }
> 
>  enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
> @@ -3335,7 +3350,7 @@ enum hvm_copy_result
> hvm_fetch_from_guest_virt_nofault(
>  {
>      return __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
> -                      PFEC_page_present | PFEC_insn_fetch | pfec);
> +                      PFEC_page_present | PFEC_insn_fetch | pfec, NULL);
>  }
> 
>  unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int
> len)
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c
> b/xen/arch/x86/hvm/vmx/vvmx.c
> index bcc4a97..7342d12 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -396,6 +396,7 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>      struct vcpu *v = current;
>      union vmx_inst_info info;
>      struct segment_register seg;
> +    pagefault_info_t pfinfo;
>      unsigned long base, index, seg_base, disp, offset;
>      int scale, size;
> 
> @@ -451,7 +452,7 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>              goto gp_fault;
> 
>          if ( poperandS != NULL &&
> -             hvm_copy_from_guest_virt(poperandS, base, size, 0)
> +             hvm_copy_from_guest_virt(poperandS, base, size, 0, &pfinfo)
>                    != HVMCOPY_okay )
>              return X86EMUL_EXCEPTION;
>          decode->mem = base;
> @@ -1611,6 +1612,7 @@ int nvmx_handle_vmptrst(struct cpu_user_regs
> *regs)
>      struct vcpu *v = current;
>      struct vmx_inst_decoded decode;
>      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
> +    pagefault_info_t pfinfo;
>      unsigned long gpa = 0;
>      int rc;
> 
> @@ -1620,7 +1622,7 @@ int nvmx_handle_vmptrst(struct cpu_user_regs
> *regs)
> 
>      gpa = nvcpu->nv_vvmcxaddr;
> 
> -    rc = hvm_copy_to_guest_virt(decode.mem, &gpa, decode.len, 0);
> +    rc = hvm_copy_to_guest_virt(decode.mem, &gpa, decode.len, 0,
> &pfinfo);
>      if ( rc != HVMCOPY_okay )
>          return X86EMUL_EXCEPTION;
> 
> @@ -1679,6 +1681,7 @@ int nvmx_handle_vmread(struct cpu_user_regs
> *regs)
>  {
>      struct vcpu *v = current;
>      struct vmx_inst_decoded decode;
> +    pagefault_info_t pfinfo;
>      u64 value = 0;
>      int rc;
> 
> @@ -1690,7 +1693,7 @@ int nvmx_handle_vmread(struct cpu_user_regs
> *regs)
> 
>      switch ( decode.type ) {
>      case VMX_INST_MEMREG_TYPE_MEMORY:
> -        rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0);
> +        rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0,
> &pfinfo);
>          if ( rc != HVMCOPY_okay )
>              return X86EMUL_EXCEPTION;
>          break;
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index c8b61b9..d28eae1 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -179,6 +179,7 @@ hvm_read(enum x86_segment seg,
>           enum hvm_access_type access_type,
>           struct sh_emulate_ctxt *sh_ctxt)
>  {
> +    pagefault_info_t pfinfo;
>      unsigned long addr;
>      int rc;
> 
> @@ -188,9 +189,9 @@ hvm_read(enum x86_segment seg,
>          return rc;
> 
>      if ( access_type == hvm_access_insn_fetch )
> -        rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0);
> +        rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
>      else
> -        rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0);
> +        rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
> 
>      switch ( rc )
>      {
> diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-
> x86/hvm/support.h
> index 9938450..4aa5a36 100644
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -83,16 +83,27 @@ enum hvm_copy_result
> hvm_copy_from_guest_phys(
>   *  HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
>   *                          ordinary machine memory.
>   *  HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a
> valid
> - *                          mapping to a guest physical address. In this case
> - *                          a page fault exception is automatically queued
> - *                          for injection into the current HVM VCPU.
> + *                          mapping to a guest physical address.  The
> + *                          pagefault_info_t structure will be filled in if
> + *                          provided, and a page fault exception is
> + *                          automatically queued for injection into the
> + *                          current HVM VCPU.
>   */
> +typedef struct pagefault_info
> +{
> +    unsigned long linear;
> +    int ec;
> +} pagefault_info_t;
> +
>  enum hvm_copy_result hvm_copy_to_guest_virt(
> -    unsigned long vaddr, void *buf, int size, uint32_t pfec);
> +    unsigned long vaddr, void *buf, int size, uint32_t pfec,
> +    pagefault_info_t *pfinfo);
>  enum hvm_copy_result hvm_copy_from_guest_virt(
> -    void *buf, unsigned long vaddr, int size, uint32_t pfec);
> +    void *buf, unsigned long vaddr, int size, uint32_t pfec,
> +    pagefault_info_t *pfinfo);
>  enum hvm_copy_result hvm_fetch_from_guest_virt(
> -    void *buf, unsigned long vaddr, int size, uint32_t pfec);
> +    void *buf, unsigned long vaddr, int size, uint32_t pfec,
> +    pagefault_info_t *pfinfo);
> 
>  /*
>   * As above (copy to/from a guest virtual address), but no fault is generated
> --
> 2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.