[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 4/6] x86/msr: add VMX MSRs into HVM_max domain policy



Currently, when nested virt is enabled, the set of L1 VMX features
is fixed and calculated by nvmx_msr_read_intercept() as an intersection
between the full set of Xen's supported L1 VMX features, the set of
actual H/W features and, for MSR_IA32_VMX_EPT_VPID_CAP, the set of
features that Xen uses.

Add calculate_hvm_max_vmx_policy() which will save the end result of
nvmx_msr_read_intercept() on current H/W into HVM_max domain policy.
There will be no functional change to what L1 sees in VMX MSRs. But the
actual use of HVM_max domain policy will happen later, when VMX MSRs
are handled by guest_rd/wrmsr().

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
 xen/arch/x86/msr.c | 140 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 140 insertions(+)

diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 955aba0849..388f19e50d 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -129,6 +129,144 @@ static void __init calculate_host_policy(void)
     *dp = raw_msr_domain_policy;
 }
 
+#define vmx_host_allowed_cpy(dp, msr, field)                 \
+    do {                                                     \
+        dp->msr.u.allowed_1.field =                          \
+            host_msr_domain_policy.msr.u.allowed_1.field;    \
+        dp->msr.u.allowed_0.field =                          \
+            host_msr_domain_policy.msr.u.allowed_0.field;    \
+    } while (0)
+
+static void __init calculate_hvm_max_vmx_policy(struct msr_domain_policy *dp)
+{
+    if ( !cpu_has_vmx )
+        return;
+
+    dp->vmx_basic.available = true;
+    dp->vmx_basic.u.raw = host_msr_domain_policy.vmx_basic.u.raw;
+
+    dp->vmx_pinbased_ctls.available = true;
+    dp->vmx_pinbased_ctls.u.raw =
+        ((uint64_t) VMX_PINBASED_CTLS_DEFAULT1 << 32) |
+        VMX_PINBASED_CTLS_DEFAULT1;
+    vmx_host_allowed_cpy(dp, vmx_pinbased_ctls, ext_intr_exiting);
+    vmx_host_allowed_cpy(dp, vmx_pinbased_ctls, nmi_exiting);
+    vmx_host_allowed_cpy(dp, vmx_pinbased_ctls, preempt_timer);
+
+    dp->vmx_procbased_ctls.available = true;
+    dp->vmx_procbased_ctls.u.raw =
+        ((uint64_t) VMX_PROCBASED_CTLS_DEFAULT1 << 32) |
+        VMX_PROCBASED_CTLS_DEFAULT1;
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, virtual_intr_pending);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, use_tsc_offseting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, hlt_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, invlpg_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, mwait_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, rdpmc_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, rdtsc_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, cr8_load_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, cr8_store_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, tpr_shadow);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, virtual_nmi_pending);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, mov_dr_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, uncond_io_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, activate_io_bitmap);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, monitor_trap_flag);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, activate_msr_bitmap);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, monitor_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, pause_exiting);
+    vmx_host_allowed_cpy(dp, vmx_procbased_ctls, activate_secondary_controls);
+
+    dp->vmx_exit_ctls.available = true;
+    dp->vmx_exit_ctls.u.raw =
+        ((uint64_t) VMX_EXIT_CTLS_DEFAULT1 << 32) |
+        VMX_EXIT_CTLS_DEFAULT1;
+    vmx_host_allowed_cpy(dp, vmx_exit_ctls, ia32e_mode);
+    vmx_host_allowed_cpy(dp, vmx_exit_ctls, load_perf_global_ctrl);
+    vmx_host_allowed_cpy(dp, vmx_exit_ctls, ack_intr_on_exit);
+    vmx_host_allowed_cpy(dp, vmx_exit_ctls, save_guest_pat);
+    vmx_host_allowed_cpy(dp, vmx_exit_ctls, load_host_pat);
+    vmx_host_allowed_cpy(dp, vmx_exit_ctls, save_guest_efer);
+    vmx_host_allowed_cpy(dp, vmx_exit_ctls, load_host_efer);
+    vmx_host_allowed_cpy(dp, vmx_exit_ctls, save_preempt_timer);
+
+    dp->vmx_entry_ctls.available = true;
+    dp->vmx_entry_ctls.u.raw =
+        ((uint64_t) VMX_ENTRY_CTLS_DEFAULT1 << 32) |
+        VMX_ENTRY_CTLS_DEFAULT1;
+    vmx_host_allowed_cpy(dp, vmx_entry_ctls, ia32e_mode);
+    vmx_host_allowed_cpy(dp, vmx_entry_ctls, load_perf_global_ctrl);
+    vmx_host_allowed_cpy(dp, vmx_entry_ctls, load_guest_pat);
+    vmx_host_allowed_cpy(dp, vmx_entry_ctls, load_guest_efer);
+
+    dp->vmx_misc.available = true;
+    dp->vmx_misc.u.raw = host_msr_domain_policy.vmx_misc.u.raw;
+    /* Do not support CR3-target feature now */
+    dp->vmx_misc.u.cr3_target = false;
+
+    dp->vmx_cr0_fixed0.available = true;
+    /* PG, PE bits must be 1 in VMX operation */
+    dp->vmx_cr0_fixed0.u.allowed_0.pe = true;
+    dp->vmx_cr0_fixed0.u.allowed_0.pg = true;
+
+    dp->vmx_cr0_fixed1.available = true;
+    /* allow 0-settings for all bits */
+    dp->vmx_cr0_fixed1.u.raw = 0xffffffff;
+
+    dp->vmx_cr4_fixed0.available = true;
+    /* VMXE bit must be 1 in VMX operation */
+    dp->vmx_cr4_fixed0.u.allowed_0.vmxe = true;
+
+    dp->vmx_cr4_fixed1.available = true;
+    /*
+     * Allowed CR4 bits will be updated during domain creation by
+     * hvm_cr4_guest_valid_bits()
+     */
+    dp->vmx_cr4_fixed1.u.raw = host_msr_domain_policy.vmx_cr4_fixed1.u.raw;
+
+    dp->vmx_vmcs_enum.available = true;
+    /* The max index of VVMCS encoding is 0x1f. */
+    dp->vmx_vmcs_enum.u.vmcs_encoding_max_idx = 0x1f;
+
+    if ( dp->vmx_procbased_ctls.u.allowed_1.activate_secondary_controls )
+    {
+        dp->vmx_procbased_ctls2.available = true;
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, 
virtualize_apic_accesses);
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, enable_ept);
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, 
descriptor_table_exiting);
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, enable_vpid);
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, unrestricted_guest);
+
+        if ( dp->vmx_procbased_ctls2.u.allowed_1.enable_ept ||
+             dp->vmx_procbased_ctls2.u.allowed_1.enable_vpid )
+        {
+            dp->vmx_ept_vpid_cap.available = true;
+            dp->vmx_ept_vpid_cap.u.raw = nept_get_ept_vpid_cap();
+        }
+    }
+
+    if ( dp->vmx_basic.u.default1_zero )
+    {
+        dp->vmx_true_pinbased_ctls.available = true;
+        dp->vmx_true_pinbased_ctls.u.raw = dp->vmx_pinbased_ctls.u.raw;
+
+        dp->vmx_true_procbased_ctls.available = true;
+        dp->vmx_true_procbased_ctls.u.raw = dp->vmx_procbased_ctls.u.raw;
+        vmx_host_allowed_cpy(dp, vmx_true_procbased_ctls, cr3_load_exiting);
+        vmx_host_allowed_cpy(dp, vmx_true_procbased_ctls, cr3_store_exiting);
+
+        dp->vmx_true_exit_ctls.available = true;
+        dp->vmx_true_exit_ctls.u.raw = dp->vmx_exit_ctls.u.raw;
+
+        dp->vmx_true_entry_ctls.available = true;
+        dp->vmx_true_entry_ctls.u.raw = dp->vmx_entry_ctls.u.raw;
+    }
+
+    dp->vmx_vmfunc.available = false;
+}
+
+#undef vmx_host_allowed_cpy
+
 static void __init calculate_hvm_max_policy(void)
 {
     struct msr_domain_policy *dp = &hvm_max_msr_domain_policy;
@@ -146,6 +284,8 @@ static void __init calculate_hvm_max_policy(void)
 
     /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
     vp->misc_features_enables.available = dp->plaform_info.available;
+
+    calculate_hvm_max_vmx_policy(dp);
 }
 
 static void __init calculate_pv_max_policy(void)
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.