[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/2] x86/mem_event: Deliver gla fault EPT violation information






On Thu, Aug 7, 2014 at 7:34 PM, Andrew Cooper <andrew.cooper3@xxxxxxxxxx> wrote:
On 07/08/14 18:14, Tamas Lengyel wrote:
According to the Intel manual the gla_fault bit is only set if bit 7 (gla_valid) is 1 and it is reserved if bit 7 is 0 (cleared to 0). So the combination of gla_valid = 0 and gla_fault = 0 is safe to use as a fallback. Furthermore, there is no mem_access support for AMD NPT so at this point this is just a placeholder in the svm code.


That is entirely my point.  You are putting Intel-isms in a common interface which AMD currently cant use, and might cause issues for someone trying to reintroduce feature parity.

I think at a minimum, I think you need a gla_fault_info_valid bit unless you can guarentee that AMD NPT will never gain the ability to report a valid gla. (I don't know whether it can or not.  Looking at the code, it would appear that it can't currently, but I certainly not bet against it ever gaining this ability.)

~Andrew

That's fair. Looking at the AMD manual it seems they have the equivalent of the GLA_FLAULT bit as well but they use two separates bits to indicate the two conditions (instead of Intel's arguably more complex approach). So to add forward compatibility with AMD I'll just split the gla_fault into two separate variables, fault_pt to indicate the fault is part of translation or page table update and fault_pa to indicate its a fault on a physical address (either by direct access, after a translation or by a TLB hit).




On Thu, Aug 7, 2014 at 6:50 PM, Andrew Cooper <andrew.cooper3@xxxxxxxxxx> wrote:
On 07/08/14 17:43, Tamas K Lengyel wrote:
> On Intel EPT the exit qualification generated by a violation also includes a bit (EPT_GLA_FAULT) which describes the following information:
> Set if the access causing the EPT violation is to a guest-physical address that is the translation of a linear address. Clear if the access causing the EPT violation is to a paging-structure entry as part of a page walk or the update of an accessed or dirty bit.
>
> For more information see Table 27-7 in the Intel SDM.
>
> This patch extends the mem_event system to deliver this extra information, which could be useful for determining the cause of a violation.
>
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
> ---
>  xen/arch/x86/hvm/hvm.c         | 6 ++++--
>  xen/arch/x86/hvm/svm/svm.c     | 2 +-
>  xen/arch/x86/hvm/vmx/vmx.c     | 4 +++-
>  xen/arch/x86/mm/p2m.c          | 7 ++++---
>  xen/include/asm-x86/hvm/hvm.h  | 4 +++-
>  xen/include/asm-x86/p2m.h      | 6 +++---
>  xen/include/public/mem_event.h | 3 ++-
>  7 files changed, 20 insertions(+), 12 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index e834406..b09a905 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2724,6 +2724,7 @@ void hvm_inject_page_fault(int errcode, unsigned long cr2)
>
>  int hvm_hap_nested_page_fault(paddr_t gpa,
>                                bool_t gla_valid,
> +                              bool_t gla_fault,
>                                unsigned long gla,
>                                bool_t access_r,
>                                bool_t access_w,
> @@ -2832,8 +2833,9 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>
>          if ( violation )
>          {
> -            if ( p2m_mem_access_check(gpa, gla_valid, gla, access_r,
> -                                        access_w, access_x, &req_ptr) )
> +            if ( p2m_mem_access_check(gpa, gla_valid, gla_fault, gla,
> +                                        access_r, access_w, access_x,
> +                                        &req_ptr) )
>              {
>                  fall_through = 1;
>              } else {
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 76616ac..1dbb12f 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1403,7 +1403,7 @@ static void svm_do_nested_pgfault(struct vcpu *v,
>      p2m_access_t p2ma;
>      struct p2m_domain *p2m = NULL;
>
> -    ret = hvm_hap_nested_page_fault(gpa, 0, ~0ul,
> +    ret = hvm_hap_nested_page_fault(gpa, 0, 0, ~0ul,

How do you expect this to work?  You don't know whether it was a linear
fault or a paging-structure fault for the NPT case, so presenting it is
a paging-structure fault is certainly wrong.

~Andrew

>                                      1, /* All NPFs count as reads */
>                                      npfec & PFEC_write_access,
>                                      npfec & PFEC_insn_fetch);
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 2caa04a..0bd77b3 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2375,7 +2375,9 @@ static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
>      else
>          gla = ~0ull;
>      ret = hvm_hap_nested_page_fault(gpa,
> -                                    !!(qualification & EPT_GLA_VALID), gla,
> +                                    !!(qualification & EPT_GLA_VALID),
> +                                    !!(qualification & EPT_GLA_FAULT),
> +                                    gla,
>                                      !!(qualification & EPT_READ_VIOLATION),
>                                      !!(qualification & EPT_WRITE_VIOLATION),
>                                      !!(qualification & EPT_EXEC_VIOLATION));
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index bca9f0f..303dc2b 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1323,9 +1323,9 @@ void p2m_mem_paging_resume(struct domain *d)
>      }
>  }
>
> -bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
> -                          bool_t access_r, bool_t access_w, bool_t access_x,
> -                          mem_event_request_t **req_ptr)
> +bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, bool_t gla_fault,
> +                          unsigned long gla, bool_t access_r, bool_t access_w,
> +                          bool_t access_x, mem_event_request_t **req_ptr)
>  {
>      struct vcpu *v = current;
>      unsigned long gfn = gpa >> PAGE_SHIFT;
> @@ -1404,6 +1404,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
>          req->gfn = gfn;
>          req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
>          req->gla_valid = gla_valid;
> +        req->gla_fault = gla_fault;
>          req->gla = gla;
>          req->access_r = access_r;
>          req->access_w = access_w;
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 0ebd478..38562ff 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -456,7 +456,9 @@ static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs)
>  }
>
>  int hvm_hap_nested_page_fault(paddr_t gpa,
> -                              bool_t gla_valid, unsigned long gla,
> +                              bool_t gla_valid,
> +                              bool_t gla_fault,
> +                              unsigned long gla,
>                                bool_t access_r,
>                                bool_t access_w,
>                                bool_t access_x);
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 0ddbadb..8616fda 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -597,9 +597,9 @@ void p2m_mem_paging_resume(struct domain *d);
>   * been promoted with no underlying vcpu pause. If the req_ptr has been populated,
>   * then the caller must put the event in the ring (once having released get_gfn*
>   * locks -- caller must also xfree the request. */
> -bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
> -                          bool_t access_r, bool_t access_w, bool_t access_x,
> -                          mem_event_request_t **req_ptr);
> +bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, bool_t gla_fault,
> +                          unsigned long gla, bool_t access_r, bool_t access_w,
> +                          bool_t access_x, mem_event_request_t **req_ptr);
>  /* Resumes the running of the VCPU, restarting the last instruction */
>  void p2m_mem_access_resume(struct domain *d);
>
> diff --git a/xen/include/public/mem_event.h b/xen/include/public/mem_event.h
> index 3831b41..5785ff9 100644
> --- a/xen/include/public/mem_event.h
> +++ b/xen/include/public/mem_event.h
> @@ -62,7 +62,8 @@ typedef struct mem_event_st {
>      uint16_t access_w:1;
>      uint16_t access_x:1;
>      uint16_t gla_valid:1;
> -    uint16_t available:12;
> +    uint16_t gla_fault:1;
> +    uint16_t available:11;
>
>      uint16_t reason;
>  } mem_event_request_t, mem_event_response_t;




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.