WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 13 of 20] Emulation of VMRESUME/VMLAUNCH

To: Tim.Deegan@xxxxxxxxxx
Subject: [Xen-devel] [PATCH 13 of 20] Emulation of VMRESUME/VMLAUNCH
From: Eddie Dong <eddie.dong@xxxxxxxxx>
Date: Thu, 09 Jun 2011 16:25:18 +0800
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Thu, 09 Jun 2011 01:46:03 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1307607905@xxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1307607905@xxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Eddie Dong <eddie.dong@xxxxxxxxx>
# Date 1307607849 -28800
# Node ID a6f9b015835e503474c4a258e5c03178358c68e8
# Parent  549f110b69dc235e8159079f38d8225ccd1db649
Emulation of VMRESUME/VMLAUNCH

Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>

diff -r 549f110b69dc -r a6f9b015835e xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 09 16:24:09 2011 +0800
@@ -2175,6 +2175,11 @@ asmlinkage void vmx_vmexit_handler(struc
     /* Now enable interrupts so it's safe to take locks. */
     local_irq_enable();
 
+    /* XXX: This looks ugly, but we need a mechanism to ensure
+     * any pending vmresume has really happened
+     */
+    vcpu_nestedhvm(v).nv_vmswitch_in_progress = 0;
+
     if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
         return vmx_failed_vmentry(exit_reason, regs);
 
@@ -2469,10 +2474,18 @@ asmlinkage void vmx_vmexit_handler(struc
             update_guest_eip();
         break;
 
+    case EXIT_REASON_VMLAUNCH:
+        if ( nvmx_handle_vmlaunch(regs) == X86EMUL_OKAY )
+            update_guest_eip();
+        break;
+
+    case EXIT_REASON_VMRESUME:
+        if ( nvmx_handle_vmresume(regs) == X86EMUL_OKAY )
+            update_guest_eip();
+        break;
+
     case EXIT_REASON_MWAIT_INSTRUCTION:
     case EXIT_REASON_MONITOR_INSTRUCTION:
-    case EXIT_REASON_VMLAUNCH:
-    case EXIT_REASON_VMRESUME:
     case EXIT_REASON_GETSEC:
     case EXIT_REASON_INVEPT:
     case EXIT_REASON_INVVPID:
diff -r 549f110b69dc -r a6f9b015835e xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Thu Jun 09 16:24:09 2011 +0800
@@ -259,6 +259,13 @@ static void reg_write(struct cpu_user_re
     }
 }
 
+static inline u32 __n2_exec_control(struct vcpu *v)
+{
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+    return __get_vvmcs(nvcpu->nv_vvmcx, CPU_BASED_VM_EXEC_CONTROL);
+}
+
 static int vmx_inst_check_privilege(struct cpu_user_regs *regs, int 
vmxop_check)
 {
     struct vcpu *v = current;
@@ -484,6 +491,62 @@ int nvmx_handle_vmxoff(struct cpu_user_r
     return X86EMUL_OKAY;
 }
 
+int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    int rc;
+
+    rc = vmx_inst_check_privilege(regs, 0);
+    if ( rc != X86EMUL_OKAY )
+        return rc;
+
+    /* check VMCS is valid and IO BITMAP is set */
+    if ( (nvcpu->nv_vvmcxaddr != VMCX_EADDR) &&
+            ((nvmx->iobitmap[0] && nvmx->iobitmap[1]) ||
+            !(__n2_exec_control(v) & CPU_BASED_ACTIVATE_IO_BITMAP) ) )
+        nvcpu->nv_vmentry_pending = 1;
+    else
+        vmreturn(regs, VMFAIL_INVALID);
+
+    return X86EMUL_OKAY;
+}
+
+int nvmx_handle_vmresume(struct cpu_user_regs *regs)
+{
+    int launched;
+    struct vcpu *v = current;
+
+    launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
+                           NVMX_LAUNCH_STATE);
+    if ( !launched ) {
+       vmreturn (regs, VMFAIL_VALID);
+       return X86EMUL_EXCEPTION;
+    }
+    return nvmx_vmresume(v,regs);
+}
+
+int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
+{
+    int launched;
+    int rc;
+    struct vcpu *v = current;
+
+    launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
+                           NVMX_LAUNCH_STATE);
+    if ( launched ) {
+       vmreturn (regs, VMFAIL_VALID);
+       rc = X86EMUL_EXCEPTION;
+    }
+    else {
+        rc = nvmx_vmresume(v,regs);
+        if ( rc == X86EMUL_OKAY )
+            __set_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
+                        NVMX_LAUNCH_STATE, 1);
+    }
+    return rc;
+}
+
 int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
diff -r 549f110b69dc -r a6f9b015835e xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h        Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h        Thu Jun 09 16:24:09 2011 +0800
@@ -158,6 +158,8 @@ int nvmx_handle_vmptrst(struct cpu_user_
 int nvmx_handle_vmclear(struct cpu_user_regs *regs);
 int nvmx_handle_vmread(struct cpu_user_regs *regs);
 int nvmx_handle_vmwrite(struct cpu_user_regs *regs);
+int nvmx_handle_vmresume(struct cpu_user_regs *regs);
+int nvmx_handle_vmlaunch(struct cpu_user_regs *regs);
 
 #endif /* __ASM_X86_HVM_VVMX_H__ */
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>