[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr



From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>

Fix assertion in __virt_to_maddr when starting nested SVM guest
in debug mode. Investigation has shown that svm_vmsave/svm_vmload
make use of __pa() with invalid address.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
Changes from V4:
        - Return #GP instead of #UD when fail to map nested VMCB (per Tim 
suggestion)
        - Rename the funtion to "nsvm_get_nvmcb_page" (per Tim/Christoph 
suggestion)
        - Use page_to_maddr instead of page_to_mfn (per Tim suggestiong)

 xen/arch/x86/hvm/svm/svm.c        |   70 ++++++++++++++++++++++++++++---------
 xen/include/asm-x86/hvm/svm/svm.h |   11 +++---
 2 files changed, 60 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 4cc4b15..40c3e15 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1779,15 +1779,15 @@ static void
 svm_vmexit_do_vmrun(struct cpu_user_regs *regs,
                     struct vcpu *v, uint64_t vmcbaddr)
 {
-    if (!nestedhvm_enabled(v->domain)) {
+    if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) {
         gdprintk(XENLOG_ERR, "VMRUN: nestedhvm disabled, injecting #UD\n");
         hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         return;
     }
 
-    if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
-        gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #UD\n");
-        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+    if ( !nestedsvm_vmcb_map(v, vmcbaddr) ) {
+        gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #GP\n");
+        hvm_inject_hw_exception(TRAP_gp_fault, HVM_DELIVER_NO_ERROR_CODE);
         return;
     }
 
@@ -1795,6 +1795,32 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs,
     return;
 }
 
+static struct page_info *
+nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr)
+{
+    p2m_type_t p2mt;
+    struct page_info *page;
+    struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+
+    if (!nestedsvm_vmcb_map(v, vmcbaddr))
+       return NULL;
+
+    /* Need to translate L1-GPA to MPA */
+    page = get_page_from_gfn(v->domain, 
+                            nv->nv_vvmcxaddr >> PAGE_SHIFT, 
+                            &p2mt, P2M_ALLOC | P2M_UNSHARE);
+    if ( !page )
+        return NULL;
+
+    if ( !p2m_is_ram(p2mt) || p2m_is_readonly(p2mt) )
+    {
+        put_page(page);
+        return NULL; 
+    }
+
+    return  page;
+}
+
 static void
 svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
                      struct cpu_user_regs *regs,
@@ -1802,24 +1828,30 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
 {
     int ret;
     unsigned int inst_len;
-    struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+    struct page_info *page;
 
     if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 )
         return;
 
-    if (!nestedhvm_enabled(v->domain)) {
+    if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) 
+    {
         gdprintk(XENLOG_ERR, "VMLOAD: nestedhvm disabled, injecting #UD\n");
         ret = TRAP_invalid_op;
         goto inject;
     }
 
-    if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
-        gdprintk(XENLOG_ERR, "VMLOAD: mapping vmcb failed, injecting #UD\n");
-        ret = TRAP_invalid_op;
+    page = nsvm_get_nvmcb_page(v, vmcbaddr);
+    if ( !page )
+    {
+        gdprintk(XENLOG_ERR,
+            "VMLOAD: mapping failed, injecting #GP\n");
+        ret = TRAP_gp_fault;
         goto inject;
     }
 
-    svm_vmload(nv->nv_vvmcx);
+    svm_vmload_pa(page_to_maddr(page));
+    put_page(page);
+
     /* State in L1 VMCB is stale now */
     v->arch.hvm_svm.vmcb_in_sync = 0;
 
@@ -1838,25 +1870,29 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
 {
     int ret;
     unsigned int inst_len;
-    struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+    struct page_info *page;
 
     if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 )
         return;
 
-    if (!nestedhvm_enabled(v->domain)) {
+    if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) 
+    {
         gdprintk(XENLOG_ERR, "VMSAVE: nestedhvm disabled, injecting #UD\n");
         ret = TRAP_invalid_op;
         goto inject;
     }
 
-    if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
-        gdprintk(XENLOG_ERR, "VMSAVE: mapping vmcb failed, injecting #UD\n");
-        ret = TRAP_invalid_op;
+    page = nsvm_get_nvmcb_page(v, vmcbaddr);
+    if ( !page )
+    {
+        gdprintk(XENLOG_ERR,
+            "VMSAVE: mapping vmcb failed, injecting #GP\n");
+        ret = TRAP_gp_fault;
         goto inject;
     }
 
-    svm_vmsave(nv->nv_vvmcx);
-
+    svm_vmsave_pa(page_to_maddr(page));
+    put_page(page);
     __update_guest_eip(regs, inst_len);
     return;
 
diff --git a/xen/include/asm-x86/hvm/svm/svm.h 
b/xen/include/asm-x86/hvm/svm/svm.h
index 64e7e25..1ffe6d6 100644
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -41,18 +41,21 @@
 #define SVM_REG_R14 (14)
 #define SVM_REG_R15 (15)
 
-static inline void svm_vmload(void *vmcb)
+#define svm_vmload(x)     svm_vmload_pa(__pa(x))
+#define svm_vmsave(x)     svm_vmsave_pa(__pa(x))
+
+static inline void svm_vmload_pa(paddr_t vmcb)
 {
     asm volatile (
         ".byte 0x0f,0x01,0xda" /* vmload */
-        : : "a" (__pa(vmcb)) : "memory" );
+        : : "a" (vmcb) : "memory" );
 }
 
-static inline void svm_vmsave(void *vmcb)
+static inline void svm_vmsave_pa(paddr_t vmcb)
 {
     asm volatile (
         ".byte 0x0f,0x01,0xdb" /* vmsave */
-        : : "a" (__pa(vmcb)) : "memory" );
+        : : "a" (vmcb) : "memory" );
 }
 
 static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
-- 
1.7.10.4



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.