[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 02/10] nestedhap: Change nested p2m's walker to vendor-specific



From: Zhang Xiantao <xiantao.zhang@xxxxxxxxx>

EPT and NPT adopts differnt formats for each-level entry,
so change the walker functions to vendor-specific.

Signed-off-by: Zhang Xiantao <xiantao.zhang@xxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hvm/svm/nestedsvm.c        |   31 +++++++++++++++++++++
 xen/arch/x86/hvm/svm/svm.c              |    1 +
 xen/arch/x86/hvm/vmx/vmx.c              |    3 +-
 xen/arch/x86/hvm/vmx/vvmx.c             |   13 +++++++++
 xen/arch/x86/mm/hap/nested_hap.c        |   46 +++++++++++--------------------
 xen/include/asm-x86/hvm/hvm.h           |    5 +++
 xen/include/asm-x86/hvm/svm/nestedsvm.h |    3 ++
 xen/include/asm-x86/hvm/vmx/vvmx.h      |    5 +++
 8 files changed, 76 insertions(+), 31 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index ed0faa6..c1c6fa7 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1171,6 +1171,37 @@ nsvm_vmcb_hap_enabled(struct vcpu *v)
     return vcpu_nestedsvm(v).ns_hap_enabled;
 }
 
+/* This function uses L2_gpa to walk the P2M page table in L1. If the 
+ * walk is successful, the translated value is returned in
+ * L1_gpa. The result value tells what to do next.
+ */
+int
+nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
+                      unsigned int *page_order,
+                      bool_t access_r, bool_t access_w, bool_t access_x)
+{
+    uint32_t pfec;
+    unsigned long nested_cr3, gfn;
+
+    nested_cr3 = nhvm_vcpu_p2m_base(v);
+
+    pfec = PFEC_user_mode | PFEC_page_present;
+    if ( access_w )
+        pfec |= PFEC_write_access;
+    if ( access_x )
+        pfec |= PFEC_insn_fetch;
+
+    /* Walk the guest-supplied NPT table, just as if it were a pagetable */
+    gfn = paging_ga_to_gfn_cr3(v, nested_cr3, L2_gpa, &pfec, page_order);
+
+    if ( gfn == INVALID_GFN ) 
+        return NESTEDHVM_PAGEFAULT_INJECT;
+
+    *L1_gpa = (gfn << PAGE_SHIFT) + (L2_gpa & ~PAGE_MASK);
+    return NESTEDHVM_PAGEFAULT_DONE;
+}
+
+
 enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
 {
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 2c8504a..acd2d49 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2008,6 +2008,7 @@ static struct hvm_function_table __read_mostly 
svm_function_table = {
     .nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap,
     .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
     .nhvm_intr_blocked = nsvm_intr_blocked,
+    .nhvm_hap_walk_L1_p2m = nsvm_hap_walk_L1_p2m,
 };
 
 void svm_vmexit_handler(struct cpu_user_regs *regs)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 98309da..4abfa90 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1511,7 +1511,8 @@ static struct hvm_function_table __read_mostly 
vmx_function_table = {
     .nhvm_intr_blocked    = nvmx_intr_blocked,
     .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
     .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
-    .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled
+    .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
+    .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
 };
 
 struct hvm_function_table * __init start_vmx(void)
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 6999c25..53f6a4d 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1479,6 +1479,19 @@ int nvmx_msr_write_intercept(unsigned int msr, u64 
msr_content)
     return 1;
 }
 
+/* This function uses L2_gpa to walk the P2M page table in L1. If the 
+ * walk is successful, the translated value is returned in
+ * L1_gpa. The result value tells what to do next.
+ */
+int
+nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
+                      unsigned int *page_order,
+                      bool_t access_r, bool_t access_w, bool_t access_x)
+{
+    /*TODO:*/
+    return 0;
+}
+
 void nvmx_idtv_handling(void)
 {
     struct vcpu *v = current;
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index f9a5edc..8787c91 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -136,6 +136,22 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
     }
 }
 
+/* This function uses L2_gpa to walk the P2M page table in L1. If the 
+ * walk is successful, the translated value is returned in
+ * L1_gpa. The result value tells what to do next.
+ */
+static int
+nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
+                      unsigned int *page_order,
+                      bool_t access_r, bool_t access_w, bool_t access_x)
+{
+    ASSERT(hvm_funcs.nhvm_hap_walk_L1_p2m);
+
+    return hvm_funcs.nhvm_hap_walk_L1_p2m(v, L2_gpa, L1_gpa, page_order,
+        access_r, access_w, access_x);
+}
+
+
 /* This function uses L1_gpa to walk the P2M table in L0 hypervisor. If the
  * walk is successful, the translated value is returned in L0_gpa. The return 
  * value tells the upper level what to do.
@@ -175,36 +191,6 @@ out:
     return rc;
 }
 
-/* This function uses L2_gpa to walk the P2M page table in L1. If the 
- * walk is successful, the translated value is returned in
- * L1_gpa. The result value tells what to do next.
- */
-static int
-nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order,
-                      bool_t access_r, bool_t access_w, bool_t access_x)
-{
-    uint32_t pfec;
-    unsigned long nested_cr3, gfn;
-    
-    nested_cr3 = nhvm_vcpu_p2m_base(v);
-
-    pfec = PFEC_user_mode | PFEC_page_present;
-    if (access_w)
-        pfec |= PFEC_write_access;
-    if (access_x)
-        pfec |= PFEC_insn_fetch;
-
-    /* Walk the guest-supplied NPT table, just as if it were a pagetable */
-    gfn = paging_ga_to_gfn_cr3(v, nested_cr3, L2_gpa, &pfec, page_order);
-
-    if ( gfn == INVALID_GFN ) 
-        return NESTEDHVM_PAGEFAULT_INJECT;
-
-    *L1_gpa = (gfn << PAGE_SHIFT) + (L2_gpa & ~PAGE_MASK);
-    return NESTEDHVM_PAGEFAULT_DONE;
-}
-
 /*
  * The following function, nestedhap_page_fault(), is for steps (3)--(10).
  *
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index d3535b6..80f07e9 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -183,6 +183,11 @@ struct hvm_function_table {
     /* Virtual interrupt delivery */
     void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
     int (*virtual_intr_delivery_enabled)(void);
+
+    /*Walk nested p2m  */
+    int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa, paddr_t 
*L1_gpa,
+                      unsigned int *page_order,
+                      bool_t access_r, bool_t access_w, bool_t access_x);
 };
 
 extern struct hvm_function_table hvm_funcs;
diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h 
b/xen/include/asm-x86/hvm/svm/nestedsvm.h
index fa83023..0c90f30 100644
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h
@@ -133,6 +133,9 @@ int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t 
msr_content);
 void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v);
 void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);
 bool_t nestedsvm_gif_isset(struct vcpu *v);
+int nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
+                      unsigned int *page_order,
+                      bool_t access_r, bool_t access_w, bool_t access_x);
 
 #define NSVM_INTR_NOTHANDLED     3
 #define NSVM_INTR_NOTINTERCEPTED 2
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h 
b/xen/include/asm-x86/hvm/vmx/vvmx.h
index d97011d..422f006 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -108,6 +108,11 @@ void nvmx_domain_relinquish_resources(struct domain *d);
 
 int nvmx_handle_vmxon(struct cpu_user_regs *regs);
 int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
+
+int
+nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
+                      unsigned int *page_order,
+                      bool_t access_r, bool_t access_w, bool_t access_x);
 /*
  * Virtual VMCS layout
  *
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.