[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/nested-hap: Fix handling of L0_ERROR



When nestedhvm_hap_nested_page_fault() returns L0_ERROR,
hvm_hap_nested_page_fault() operates on the adjusted gpa.  However, it
operates with the original npfec, which is no longer be correct.

In particular, it is possible to get a nested fault where the translation is
not present in L12 (and therefore L02), while it is present in L01.

When handling an L0_ERROR, adjust npfec as well as gpa.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Juergen Gross <jgross@xxxxxxxx>

This is one part of fixing nested virt in light of XSA-304.  Combined with:

diff --git a/xen/arch/x86/mm/hap/nested_hap.c
b/xen/arch/x86/mm/hap/nested_hap.c
index a509a40c93..bd58a86b46 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -167,7 +167,8 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t
L1_gpa, paddr_t *L0_gpa,
         goto out;

     rc = NESTEDHVM_PAGEFAULT_L0_ERROR;
-    if ( access_w && p2m_is_readonly(*p2mt) )
+    if ( (access_w && p2m_is_readonly(*p2mt)) ||
+         (access_x && *page_order) )
         goto out;

     if ( p2m_is_paging(*p2mt) || p2m_is_shared(*p2mt) || !p2m_is_ram(*p2mt) )

it does resolve the issue.  However, the above isn't correct in the general
case, and is still under development.
---
 xen/arch/x86/hvm/hvm.c              |  6 ++----
 xen/arch/x86/mm/hap/nested_hap.c    | 27 +++++++++++++++++++--------
 xen/include/asm-x86/hvm/nestedhvm.h |  2 +-
 3 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 818e705fd1..87eed13ee1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1729,10 +1729,8 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
          * the same as for shadow paging.
          */
 
-         rv = nestedhvm_hap_nested_page_fault(curr, &gpa,
-                                              npfec.read_access,
-                                              npfec.write_access,
-                                              npfec.insn_fetch);
+        rv = nestedhvm_hap_nested_page_fault(curr, &gpa, &npfec);
+
         switch (rv) {
         case NESTEDHVM_PAGEFAULT_DONE:
         case NESTEDHVM_PAGEFAULT_RETRY:
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index abe5958a52..9eba35f7e8 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -149,8 +149,7 @@ nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, 
paddr_t *L1_gpa,
 static int
 nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa,
                       p2m_type_t *p2mt, p2m_access_t *p2ma,
-                      unsigned int *page_order,
-                      bool_t access_r, bool_t access_w, bool_t access_x)
+                      unsigned int *page_order, struct npfec *npfec)
 {
     mfn_t mfn;
     int rc;
@@ -167,7 +166,7 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t 
L1_gpa, paddr_t *L0_gpa,
         goto out;
 
     rc = NESTEDHVM_PAGEFAULT_L0_ERROR;
-    if ( access_w && p2m_is_readonly(*p2mt) )
+    if ( npfec->write_access && p2m_is_readonly(*p2mt) )
         goto out;
 
     if ( p2m_is_paging(*p2mt) || p2m_is_shared(*p2mt) || !p2m_is_ram(*p2mt) )
@@ -181,6 +180,18 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t 
L1_gpa, paddr_t *L0_gpa,
     *L0_gpa = (mfn_x(mfn) << PAGE_SHIFT) + (L1_gpa & ~PAGE_MASK);
 out:
     __put_gfn(p2m, L1_gpa >> PAGE_SHIFT);
+
+    /*
+     * When reporting L0_ERROR, rewrite nfpec to match what would have occured
+     * if hardware had walked the L0, rather than the combined L02.
+     */
+    if ( rc == NESTEDHVM_PAGEFAULT_L0_ERROR )
+    {
+        npfec->present = !mfn_eq(mfn, INVALID_MFN);
+        npfec->gla_valid = 0;
+        npfec->kind = npfec_kind_unknown;
+    }
+
     return rc;
 }
 
@@ -191,7 +202,7 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t 
L1_gpa, paddr_t *L0_gpa,
  */
 int
 nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
-    bool_t access_r, bool_t access_w, bool_t access_x)
+                                struct npfec *npfec)
 {
     int rv;
     paddr_t L1_gpa, L0_gpa;
@@ -206,7 +217,8 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t 
*L2_gpa,
 
     /* walk the L1 P2M table */
     rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
-        access_r, access_w, access_x);
+                               npfec->read_access, npfec->write_access,
+                               npfec->insn_fetch);
 
     /* let caller to handle these two cases */
     switch (rv) {
@@ -222,9 +234,8 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t 
*L2_gpa,
     }
 
     /* ==> we have to walk L0 P2M */
-    rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa,
-        &p2mt_10, &p2ma_10, &page_order_10,
-        access_r, access_w, access_x);
+    rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, &p2mt_10, &p2ma_10,
+                               &page_order_10, npfec);
 
     /* let upper level caller to handle these two cases */
     switch (rv) {
diff --git a/xen/include/asm-x86/hvm/nestedhvm.h 
b/xen/include/asm-x86/hvm/nestedhvm.h
index 256fed733a..7b53f23e97 100644
--- a/xen/include/asm-x86/hvm/nestedhvm.h
+++ b/xen/include/asm-x86/hvm/nestedhvm.h
@@ -58,7 +58,7 @@ bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v);
 #define NESTEDHVM_PAGEFAULT_RETRY      5
 #define NESTEDHVM_PAGEFAULT_DIRECT_MMIO 6
 int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
-    bool_t access_r, bool_t access_w, bool_t access_x);
+                                    struct npfec *npfec);
 
 int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
                           unsigned int *page_order, uint8_t *p2m_acc,
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.