[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.7] xen/nested_p2m: Don't walk EPT tables with a regular PT walker



hostmode->p2m_ga_to_gfn() is a plain PT walker, and is not appropriate for a
general L1 p2m walk.  It is fine for AMD as NPT share the same format as
normal pagetables.  For Intel EPT however, it is wrong.

The translation ends up correct (as the formats are sufficiently similar), but
the control bits in lower 12 bits differ in meaning.  A plain PT walker sets
A/D bits (bits 5 and 6) as it walks, but in EPT tables, these are the IPAT and
top bit of EMT (caching type).  This in turn causes problem when the EPT
tables are subsequently used.

Replace hostmode->p2m_ga_to_gfn() with nestedhap_walk_L1_p2m() in
paging_gva_to_gfn(), which is the correct function for the task.  This
involves making nestedhap_walk_L1_p2m() non-static, and adding
vmx_vmcs_enter/exit() pairs to nvmx_hap_walk_L1_p2m() as it is now reachable
from contexts other than v == current.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vvmx.c         |  4 ++++
 xen/arch/x86/mm/hap/nested_hap.c    |  2 +-
 xen/arch/x86/mm/p2m.c               | 19 ++++++++++++++-----
 xen/include/asm-x86/hvm/nestedhvm.h |  4 ++++
 4 files changed, 23 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 35f3687..faa8b69 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -2036,6 +2036,8 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, 
paddr_t *L1_gpa,
     uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
 
+    vmx_vmcs_enter(v);
+
     __vmread(EXIT_QUALIFICATION, &exit_qual);
     rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn, p2m_acc,
                              &exit_qual, &exit_reason);
@@ -2060,6 +2062,8 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, 
paddr_t *L1_gpa,
         break;
     }
 
+    vmx_vmcs_exit(v);
+
     return rc;
 }
 
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 9cee5a0..d41bb09 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -141,7 +141,7 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
  * walk is successful, the translated value is returned in
  * L1_gpa. The result value tells what to do next.
  */
-static int
+int
 nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
                       unsigned int *page_order, uint8_t *p2m_acc,
                       bool_t access_r, bool_t access_w, bool_t access_x)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 94eabf6..de5791b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2081,20 +2081,29 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
 
     if ( is_hvm_vcpu(v) && paging_mode_hap(v->domain) && nestedhvm_is_n2(v) )
     {
-        unsigned long gfn;
+        unsigned long l2_gfn, l1_gfn;
         struct p2m_domain *p2m;
         const struct paging_mode *mode;
-        uint32_t pfec_21 = *pfec;
         uint64_t np2m_base = nhvm_vcpu_p2m_base(v);
+        uint8_t l1_p2ma;
+        unsigned int l1_page_order;
+        int rv;
 
         /* translate l2 guest va into l2 guest gfn */
         p2m = p2m_get_nestedp2m(v, np2m_base);
         mode = paging_get_nestedmode(v);
-        gfn = mode->gva_to_gfn(v, p2m, va, pfec);
+        l2_gfn = mode->gva_to_gfn(v, p2m, va, pfec);
+
+        if ( l2_gfn == INVALID_GFN )
+            return INVALID_GFN;
 
         /* translate l2 guest gfn into l1 guest gfn */
-        return hostmode->p2m_ga_to_gfn(v, hostp2m, np2m_base,
-                                       gfn << PAGE_SHIFT, &pfec_21, NULL);
+        rv = nestedhap_walk_L1_p2m(v, l2_gfn, &l1_gfn, &l1_page_order, 
&l1_p2ma,
+                                   1,
+                                   !!(*pfec & PFEC_write_access),
+                                   !!(*pfec & PFEC_insn_fetch));
+
+        return rv == NESTEDHVM_PAGEFAULT_DONE ? l1_gfn : INVALID_GFN;
     }
 
     return hostmode->gva_to_gfn(v, hostp2m, va, pfec);
diff --git a/xen/include/asm-x86/hvm/nestedhvm.h 
b/xen/include/asm-x86/hvm/nestedhvm.h
index cf1a8f4..bc82425 100644
--- a/xen/include/asm-x86/hvm/nestedhvm.h
+++ b/xen/include/asm-x86/hvm/nestedhvm.h
@@ -56,6 +56,10 @@ bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v);
 int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
     bool_t access_r, bool_t access_w, bool_t access_x);
 
+int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
+                          unsigned int *page_order, uint8_t *p2m_acc,
+                          bool_t access_r, bool_t access_w, bool_t access_x);
+
 /* IO permission map */
 unsigned long *nestedhvm_vcpu_iomap_get(bool_t ioport_80, bool_t ioport_ed);
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.