WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] RFC: Nested VMX patch series 02: wrap APIs

To: "Dong, Eddie" <eddie.dong@xxxxxxxxx>, Tim Deegan <Tim.Deegan@xxxxxxxxxx>, Keir Fraser <keir@xxxxxxx>
Subject: [Xen-devel] RFC: Nested VMX patch series 02: wrap APIs
From: "Dong, Eddie" <eddie.dong@xxxxxxxxx>
Date: Wed, 1 Jun 2011 11:51:07 +0800
Accept-language: en-US
Acceptlanguage: en-US
Cc: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>, "Dong, Eddie" <eddie.dong@xxxxxxxxx>, "He, Qing" <qing.he@xxxxxxxxx>
Delivery-date: Tue, 31 May 2011 20:54:19 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <osstest-7468-mainreport@xxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcwgAhjDwUdZ/2BOTBqtK+IA8ti/WgAC9edgAAAdViAAAA3MIAAAF9kw
Thread-topic: [Xen-devel] RFC: Nested VMX patch series 02: wrap APIs
Wrap for common nested APIs.
Thx, Eddie

        Signed-off-by: Qing He <qing.he@xxxxxxxxx>
        Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>

diff -r 70ee714947fb xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Sat May 28 09:49:25 2011 +0800
+++ b/xen/arch/x86/hvm/hvm.c    Mon May 30 17:15:52 2011 +0800
@@ -3502,7 +3502,7 @@
                 /* Remove the check below once we have
                  * shadow-on-shadow.
                  */
-                if ( !paging_mode_hap(d) && a.value )
+                if ( cpu_has_svm && !paging_mode_hap(d) && a.value )
                     rc = -EINVAL;
                 /* Set up NHVM state for any vcpus that are already up */
                 if ( !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
diff -r 70ee714947fb xen/arch/x86/hvm/vmx/Makefile
--- a/xen/arch/x86/hvm/vmx/Makefile     Sat May 28 09:49:25 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/Makefile     Mon May 30 17:15:52 2011 +0800
@@ -4,3 +4,4 @@
 obj-y += vmcs.o
 obj-y += vmx.o
 obj-y += vpmu_core2.o
+obj-y += vvmx.o
diff -r 70ee714947fb xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Sat May 28 09:49:25 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/intr.c       Mon May 30 17:15:52 2011 +0800
@@ -109,6 +109,11 @@
     }
 }
 
+enum hvm_intblk nvmx_intr_blocked(struct vcpu *v)
+{
+    return hvm_intblk_none;
+}
+
 asmlinkage void vmx_intr_assist(void)
 {
     struct hvm_intack intack;
diff -r 70ee714947fb xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Sat May 28 09:49:25 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon May 30 17:15:52 2011 +0800
@@ -1407,7 +1407,14 @@
     .invlpg_intercept     = vmx_invlpg_intercept,
     .set_uc_mode          = vmx_set_uc_mode,
     .set_info_guest       = vmx_set_info_guest,
-    .set_rdtsc_exiting    = vmx_set_rdtsc_exiting
+    .set_rdtsc_exiting    = vmx_set_rdtsc_exiting,
+    .nhvm_vcpu_initialise = nvmx_vcpu_initialise,
+    .nhvm_vcpu_destroy    = nvmx_vcpu_destroy,
+    .nhvm_vcpu_reset      = nvmx_vcpu_reset,
+    .nhvm_vcpu_guestcr3   = nvmx_vcpu_guestcr3,
+    .nhvm_vcpu_hostcr3    = nvmx_vcpu_hostcr3,
+    .nhvm_vcpu_asid       = nvmx_vcpu_asid,
+    .nhvm_intr_blocked    = nvmx_intr_blocked
 };
 
 struct hvm_function_table * __init start_vmx(void)
diff -r 70ee714947fb xen/arch/x86/hvm/vmx/vvmx.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Mon May 30 17:15:52 2011 +0800
@@ -0,0 +1,93 @@
+/*
+ * vvmx.c: Support virtual VMX for nested virtualization.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ * Author: Qing He <qing.he@xxxxxxxxx>
+ *         Eddie Dong <eddie.dong@xxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <xen/config.h>
+#include <asm/types.h>
+#include <asm/p2m.h>
+#include <asm/hvm/vmx/vmx.h>
+#include <asm/hvm/vmx/vvmx.h>
+
+int nvmx_vcpu_initialise(struct vcpu *v)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+    nvcpu->nv_n2vmcx = alloc_xenheap_page();
+    if ( !nvcpu->nv_n2vmcx )
+    {
+        gdprintk(XENLOG_ERR, "nest: allocation for shadow vmcs failed\n");
+       goto out;
+    }
+    nvmx->vmxon_region_pa = 0;
+    nvcpu->nv_vvmcx = NULL;
+    nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+    nvmx->intr.intr_info = 0;
+    nvmx->intr.error_code = 0;
+    nvmx->iobitmap[0] = NULL;
+    nvmx->iobitmap[1] = NULL;
+    return 0;
+out:
+    return -ENOMEM;
+}
+ 
+void nvmx_vcpu_destroy(struct vcpu *v)
+{
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+    if ( nvcpu->nv_n2vmcx ) {
+        __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
+        free_xenheap_page(nvcpu->nv_n2vmcx);
+        nvcpu->nv_n2vmcx = NULL;
+    }
+    if ( nvcpu->nv_vvmcx ) {
+        unmap_domain_page_global(nvcpu->nv_vvmcx);
+        nvcpu->nv_vvmcx == NULL;
+    }
+    nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+}
+ 
+int nvmx_vcpu_reset(struct vcpu *v)
+{
+    return 0;
+}
+
+uint64_t nvmx_vcpu_guestcr3(struct vcpu *v)
+{
+    /* TODO */
+    ASSERT(0);
+    return 0;
+}
+
+uint64_t nvmx_vcpu_hostcr3(struct vcpu *v)
+{
+    /* TODO */
+    ASSERT(0);
+    return 0;
+}
+
+uint32_t nvmx_vcpu_asid(struct vcpu *v)
+{
+    /* TODO */
+    ASSERT(0);
+    return 0;
+}
+
diff -r 70ee714947fb xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h        Sat May 28 09:49:25 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h        Mon May 30 17:15:52 2011 +0800
@@ -37,5 +37,14 @@
 
 #define nvcpu_2_nvmx(nv)       ((*(nv)).u.nvmx)
 #define vcpu_2_nvmx(v) (nvcpu_2_nvmx(&vcpu_nestedhvm(v)))
+
+int nvmx_vcpu_initialise(struct vcpu *v);
+void nvmx_vcpu_destroy(struct vcpu *v);
+int nvmx_vcpu_reset(struct vcpu *v);
+uint64_t nvmx_vcpu_guestcr3(struct vcpu *v);
+uint64_t nvmx_vcpu_hostcr3(struct vcpu *v);
+uint32_t nvmx_vcpu_asid(struct vcpu *v);
+enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
+
 #endif /* __ASM_X86_HVM_VVMX_H__ */


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel