[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 5/6] vvmx: add per domain vmx msr policy



Having a policy per domain allows to sensibly query what VMX features
the domain has, which unblocks some other nested virt work items.

For now, make policy for each domain equal to hvm_max_vmx_msr_policy.
In the future it should be possible to independently configure
the policy for each domain.

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
 xen/arch/x86/domain.c              |  6 ++++++
 xen/arch/x86/hvm/vmx/vvmx.c        | 14 +++++++++++++-
 xen/include/asm-x86/domain.h       |  2 ++
 xen/include/asm-x86/hvm/vmx/vvmx.h |  3 +++
 4 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 49388f48d7..2a3518328e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -419,6 +419,7 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags,
     {
         d->arch.emulation_flags = 0;
         d->arch.cpuid = ZERO_BLOCK_PTR; /* Catch stray misuses. */
+        d->arch.vmx_msr = ZERO_BLOCK_PTR;
     }
     else
     {
@@ -464,6 +465,9 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags,
         if ( (rc = init_domain_cpuid_policy(d)) )
             goto fail;
 
+        if ( (rc = init_domain_vmx_msr_policy(d)) )
+            goto fail;
+
         d->arch.ioport_caps = 
             rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
         rc = -ENOMEM;
@@ -535,6 +539,7 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags,
     cleanup_domain_irq_mapping(d);
     free_xenheap_page(d->shared_info);
     xfree(d->arch.cpuid);
+    xfree(d->arch.vmx_msr);
     if ( paging_initialised )
         paging_final_teardown(d);
     free_perdomain_mappings(d);
@@ -549,6 +554,7 @@ void arch_domain_destroy(struct domain *d)
 
     xfree(d->arch.e820);
     xfree(d->arch.cpuid);
+    xfree(d->arch.vmx_msr);
 
     free_domain_pirqs(d);
     if ( !is_idle_domain(d) )
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 657371ec69..ae24dc4680 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -2078,6 +2078,18 @@ void __init calculate_hvm_max_policy(void)
     p->available &= ~0x20000;
 }
 
+int init_domain_vmx_msr_policy(struct domain *d)
+{
+    d->arch.vmx_msr = xmalloc(struct vmx_msr_policy);
+
+    if ( !d->arch.vmx_msr )
+        return -ENOMEM;
+
+    *d->arch.vmx_msr = hvm_max_vmx_msr_policy;
+
+    return 0;
+}
+
 /*
  * Capability reporting
  */
@@ -2085,7 +2097,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 
*msr_content)
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    struct vmx_msr_policy *p = &hvm_max_vmx_msr_policy;
+    struct vmx_msr_policy *p = d->arch.vmx_msr;
     u64 data;
     int r = 1;
 
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 924caac834..3cb753e46b 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -359,6 +359,8 @@ struct arch_domain
     /* CPUID Policy. */
     struct cpuid_policy *cpuid;
 
+    struct vmx_msr_policy *vmx_msr;
+
     struct PITState vpit;
 
     /* TSC management (emulation, pv, scaling, stats) */
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h 
b/xen/include/asm-x86/hvm/vmx/vvmx.h
index ca2fb2535c..627112bea8 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -246,5 +246,8 @@ int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
                         uint64_t *exit_qual, uint32_t *exit_reason);
 int nvmx_cpu_up_prepare(unsigned int cpu);
 void nvmx_cpu_dead(unsigned int cpu);
+
+int init_domain_vmx_msr_policy(struct domain *d);
+
 #endif /* __ASM_X86_HVM_VVMX_H__ */
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.