WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] RFC: Nested VMX patch series 07: vmclear

To: "Dong, Eddie" <eddie.dong@xxxxxxxxx>, Tim Deegan <Tim.Deegan@xxxxxxxxxx>, Keir Fraser <keir@xxxxxxx>
Subject: [Xen-devel] RFC: Nested VMX patch series 07: vmclear
From: "Dong, Eddie" <eddie.dong@xxxxxxxxx>
Date: Wed, 1 Jun 2011 11:58:52 +0800
Accept-language: en-US
Acceptlanguage: en-US
Cc: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>, "Dong, Eddie" <eddie.dong@xxxxxxxxx>, "He, Qing" <qing.he@xxxxxxxxx>
Delivery-date: Tue, 31 May 2011 21:06:42 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <osstest-7468-mainreport@xxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcwgAhjDwUdZ/2BOTBqtK+IA8ti/WgAC9edgAAAdViAAAA3MIAAAF9kwAAAdbWAAABYxcAAAC+egAAALN5A=
Thread-topic: [Xen-devel] RFC: Nested VMX patch series 07: vmclear

Thx, Eddie

        Signed-off-by: Qing He <qing.he@xxxxxxxxx>
        Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>

diff -r 4c4c2256d301 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 01 09:10:36 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 01 09:13:45 2011 +0800
@@ -2445,6 +2445,11 @@
             update_guest_eip();
         break;
 
+    case EXIT_REASON_VMCLEAR:
+        if ( nvmx_handle_vmclear(regs) == X86EMUL_OKAY )
+            update_guest_eip();
+        break;
+ 
     case EXIT_REASON_VMPTRLD:
         if ( nvmx_handle_vmptrld(regs) == X86EMUL_OKAY )
             update_guest_eip();
@@ -2457,7 +2462,6 @@
 
     case EXIT_REASON_MWAIT_INSTRUCTION:
     case EXIT_REASON_MONITOR_INSTRUCTION:
-    case EXIT_REASON_VMCLEAR:
     case EXIT_REASON_VMLAUNCH:
     case EXIT_REASON_VMREAD:
     case EXIT_REASON_VMRESUME:
diff -r 4c4c2256d301 xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Wed Jun 01 09:10:36 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Wed Jun 01 09:13:45 2011 +0800
@@ -410,6 +410,14 @@
     regs->eflags = eflags;
 }
 
+static void __clear_current_vvmcs(struct vcpu *v)
+{
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    
+    if ( nvcpu->nv_n2vmcx )
+        __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
+}
+
 static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
@@ -446,6 +454,26 @@
    __map_io_bitmap (v, IO_BITMAP_B);
 }
 
+static void nvmx_purge_vvmcs(struct vcpu *v)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+    __clear_current_vvmcs(v);
+    if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
+        unmap_domain_page_global(nvcpu->nv_vvmcx);
+    nvcpu->nv_vvmcx == NULL;
+    nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+    if ( nvmx->iobitmap[0] ) {
+        unmap_domain_page_global(nvmx->iobitmap[0]);
+        nvmx->iobitmap[0] = NULL;
+    }
+    if ( nvmx->iobitmap[1] ) {
+        unmap_domain_page_global(nvmx->iobitmap[1]);
+        nvmx->iobitmap[1] = NULL;
+    }
+}
+
 /*
  * VMX instructions handling
  */
@@ -494,6 +522,7 @@
     if ( rc != X86EMUL_OKAY )
         return rc;
 
+    nvmx_purge_vvmcs(v);
     nvmx->vmxon_region_pa = 0;
 
     vmreturn(regs, VMSUCCEED);
@@ -521,6 +550,9 @@
         goto out;
     }
 
+    if ( nvcpu->nv_vvmcxaddr != gpa )
+        nvmx_purge_vvmcs(v);
+
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
@@ -558,3 +590,37 @@
     return X86EMUL_OKAY;
 }
 
+int nvmx_handle_vmclear(struct cpu_user_regs *regs)
+{
+    struct vcpu *v = current;
+    struct vmx_inst_decoded decode;
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    unsigned long gpa = 0;
+    int rc;
+
+    rc = decode_vmx_inst(regs, &decode, &gpa, 0);
+    if ( rc != X86EMUL_OKAY )
+        return rc;
+
+    if ( gpa & 0xfff )
+    {
+        vmreturn(regs, VMFAIL_INVALID);
+        goto out;
+    }
+
+    if ( gpa != nvcpu->nv_vvmcxaddr && nvcpu->nv_vvmcxaddr != VMCX_EADDR )
+    {
+        gdprintk(XENLOG_WARNING, 
+                 "vmclear gpa %lx not the same with current vmcs %lx\n",
+                 gpa, nvcpu->nv_vvmcxaddr);
+        vmreturn(regs, VMSUCCEED);
+        goto out;
+    }
+    nvmx_purge_vvmcs(v);
+
+    vmreturn(regs, VMSUCCEED);
+
+out:
+    return X86EMUL_OKAY;
+}
+
diff -r 4c4c2256d301 xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h        Wed Jun 01 09:10:36 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h        Wed Jun 01 09:13:45 2011 +0800
@@ -110,6 +110,7 @@
 void nvmx_destroy_vmcs(struct vcpu *v);
 int nvmx_handle_vmptrld(struct cpu_user_regs *regs);
 int nvmx_handle_vmptrst(struct cpu_user_regs *regs);
+int nvmx_handle_vmclear(struct cpu_user_regs *regs);
 
 #endif /* __ASM_X86_HVM_VVMX_H__ */


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel