[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] nvmx: fix resource relinquish for nested VMX



The previous order of relinquish resource is:
relinquish_domain_resources() -> vcpu_destroy() -> nvmx_vcpu_destroy().
However some L1 resources like nv_vvmcx and io_bitmaps are free in
nvmx_vcpu_destroy(), therefore the relinquish_domain_resources()
will not reduce the refcnt of the domain to 0, therefore the latter
vcpu release functions will not be called.

To fix this issue, we need to release the nv_vvmcx and io_bitmaps in
relinquish_domain_resources().

Besides, after destroy the nested vcpu, we need to switch the vmx->vmcs
back to the L1 and let the vcpu_destroy() logic to free the L1 VMCS page.

Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c             |    3 +++
 xen/arch/x86/hvm/vmx/vmx.c         |    3 ++-
 xen/arch/x86/hvm/vmx/vvmx.c        |   11 +++++++++++
 xen/include/asm-x86/hvm/hvm.h      |    1 +
 xen/include/asm-x86/hvm/vmx/vvmx.h |    1 +
 5 files changed, 18 insertions(+), 1 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7f8a025..0576a24 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -561,6 +561,9 @@ int hvm_domain_initialise(struct domain *d)
 
 void hvm_domain_relinquish_resources(struct domain *d)
 {
+    if ( hvm_funcs.nhvm_domain_relinquish_resources )
+        hvm_funcs.nhvm_domain_relinquish_resources(d);
+
     hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
     hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index ffb86c1..3ea7012 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1547,7 +1547,8 @@ static struct hvm_function_table __read_mostly 
vmx_function_table = {
     .nhvm_vcpu_asid       = nvmx_vcpu_asid,
     .nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
     .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
-    .nhvm_intr_blocked    = nvmx_intr_blocked
+    .nhvm_intr_blocked    = nvmx_intr_blocked,
+    .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources
 };
 
 struct hvm_function_table * __init start_vmx(void)
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 2e0b79d..1f610eb 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -57,6 +57,9 @@ void nvmx_vcpu_destroy(struct vcpu *v)
 {
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
 
+    if ( nvcpu->nv_n1vmcx )
+        v->arch.hvm_vmx.vmcs = nvcpu->nv_n1vmcx;
+
     nvmx_purge_vvmcs(v);
     if ( nvcpu->nv_n2vmcx ) {
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
@@ -65,6 +68,14 @@ void nvmx_vcpu_destroy(struct vcpu *v)
     }
 }
  
+void nvmx_domain_relinquish_resources(struct domain *d)
+{
+    struct vcpu *v;
+
+    for_each_vcpu ( d, v )
+        nvmx_purge_vvmcs(v);
+}
+
 int nvmx_vcpu_reset(struct vcpu *v)
 {
     return 0;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 7243c4e..3592a8c 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -179,6 +179,7 @@ struct hvm_function_table {
     bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
 
     enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
+    void (*nhvm_domain_relinquish_resources)(struct domain *d);
 };
 
 extern struct hvm_function_table hvm_funcs;
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h 
b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 995f9f4..bbc34e7 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -96,6 +96,7 @@ uint32_t nvmx_vcpu_asid(struct vcpu *v);
 enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
 int nvmx_intercepts_exception(struct vcpu *v, 
                               unsigned int trap, int error_code);
+void nvmx_domain_relinquish_resources(struct domain *d);
 
 int nvmx_handle_vmxon(struct cpu_user_regs *regs);
 int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.