handles VMX capability reporting MSRs.
Some features are masked so L1 would see a rather
simple configuration
Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
---
diff -r 694dcf6c3f06 xen/arch/x86/hvm/vmx/nest.c
--- a/xen/arch/x86/hvm/vmx/nest.c Wed Sep 08 19:47:14 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/nest.c Wed Sep 08 19:47:39 2010 +0800
@@ -1352,3 +1352,91 @@
return bypass_l0;
}
+
+/*
+ * Capability reporting
+ */
+int vmx_nest_msr_read_intercept(unsigned int msr, u64 *msr_content)
+{
+ u32 eax, edx;
+ u64 data = 0;
+ int r = 1;
+ u32 mask = 0;
+
+ if ( !is_nested_avail(current->domain) )
+ return 0;
+
+ switch (msr) {
+ case MSR_IA32_VMX_BASIC:
+ rdmsr(msr, eax, edx);
+ data = edx;
+ data = (data & ~0x1fff) | 0x1000; /* request 4KB for guest VMCS */
+ data &= ~(1 << 23); /* disable TRUE_xxx_CTLS */
+ data = (data << 32) | VVMCS_REVISION; /* VVMCS revision */
+ break;
+ case MSR_IA32_VMX_PINBASED_CTLS:
+#define REMOVED_PIN_CONTROL_CAP (PIN_BASED_PREEMPT_TIMER)
+ rdmsr(msr, eax, edx);
+ data = edx;
+ data = (data << 32) | eax;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS:
+ rdmsr(msr, eax, edx);
+#define REMOVED_EXEC_CONTROL_CAP (CPU_BASED_TPR_SHADOW \
+ | CPU_BASED_ACTIVATE_MSR_BITMAP \
+ | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
+ data = edx & ~REMOVED_EXEC_CONTROL_CAP;
+ data = (data << 32) | eax;
+ break;
+ case MSR_IA32_VMX_EXIT_CTLS:
+ rdmsr(msr, eax, edx);
+#define REMOVED_EXIT_CONTROL_CAP (VM_EXIT_SAVE_GUEST_PAT \
+ | VM_EXIT_LOAD_HOST_PAT \
+ | VM_EXIT_SAVE_GUEST_EFER \
+ | VM_EXIT_LOAD_HOST_EFER \
+ | VM_EXIT_SAVE_PREEMPT_TIMER)
+ data = edx & ~REMOVED_EXIT_CONTROL_CAP;
+ data = (data << 32) | eax;
+ break;
+ case MSR_IA32_VMX_ENTRY_CTLS:
+ rdmsr(msr, eax, edx);
+#define REMOVED_ENTRY_CONTROL_CAP (VM_ENTRY_LOAD_GUEST_PAT \
+ | VM_ENTRY_LOAD_GUEST_EFER)
+ data = edx & ~REMOVED_ENTRY_CONTROL_CAP;
+ data = (data << 32) | eax;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ mask = 0;
+
+ rdmsr(msr, eax, edx);
+ data = edx & mask;
+ data = (data << 32) | eax;
+ break;
+
+ /* pass through MSRs */
+ case IA32_FEATURE_CONTROL_MSR:
+ case MSR_IA32_VMX_MISC:
+ case MSR_IA32_VMX_CR0_FIXED0:
+ case MSR_IA32_VMX_CR0_FIXED1:
+ case MSR_IA32_VMX_CR4_FIXED0:
+ case MSR_IA32_VMX_CR4_FIXED1:
+ case MSR_IA32_VMX_VMCS_ENUM:
+ rdmsr(msr, eax, edx);
+ data = edx;
+ data = (data << 32) | eax;
+ break;
+
+ default:
+ r = 0;
+ break;
+ }
+
+ *msr_content = data;
+ return r;
+}
+
+int vmx_nest_msr_write_intercept(unsigned int msr, u64 msr_content)
+{
+ /* silently ignore for now */
+ return 1;
+}
diff -r 694dcf6c3f06 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Sep 08 19:47:14 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Sep 08 19:47:39 2010 +0800
@@ -1877,8 +1877,11 @@
*msr_content |= (u64)__vmread(GUEST_IA32_DEBUGCTL_HIGH) << 32;
#endif
break;
- case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
- goto gp_fault;
+ case IA32_FEATURE_CONTROL_MSR:
+ case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ if ( !vmx_nest_msr_read_intercept(msr, msr_content) )
+ goto gp_fault;
+ break;
case MSR_IA32_MISC_ENABLE:
rdmsrl(MSR_IA32_MISC_ENABLE, *msr_content);
/* Debug Trace Store is not supported. */
@@ -2043,8 +2046,11 @@
break;
}
- case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
- goto gp_fault;
+ case IA32_FEATURE_CONTROL_MSR:
+ case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ if ( !vmx_nest_msr_write_intercept(msr, msr_content) )
+ goto gp_fault;
+ break;
default:
if ( vpmu_do_wrmsr(msr, msr_content) )
return X86EMUL_OKAY;
diff -r 694dcf6c3f06 xen/include/asm-x86/hvm/vmx/nest.h
--- a/xen/include/asm-x86/hvm/vmx/nest.h Wed Sep 08 19:47:14 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/nest.h Wed Sep 08 19:47:39 2010 +0800
@@ -76,4 +76,9 @@
int vmx_nest_l2_vmexit_handler(struct cpu_user_regs *regs,
unsigned int exit_reason);
+int vmx_nest_msr_read_intercept(unsigned int msr,
+ u64 *msr_content);
+int vmx_nest_msr_write_intercept(unsigned int msr,
+ u64 msr_content);
+
#endif /* __ASM_X86_HVM_NEST_H__ */
diff -r 694dcf6c3f06 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Sep 08 19:47:14 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Sep 08 19:47:39 2010 +0800
@@ -161,18 +161,23 @@
#define PIN_BASED_EXT_INTR_MASK 0x00000001
#define PIN_BASED_NMI_EXITING 0x00000008
#define PIN_BASED_VIRTUAL_NMIS 0x00000020
+#define PIN_BASED_PREEMPT_TIMER 0x00000040
extern u32 vmx_pin_based_exec_control;
#define VM_EXIT_IA32E_MODE 0x00000200
#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
#define VM_EXIT_SAVE_GUEST_PAT 0x00040000
#define VM_EXIT_LOAD_HOST_PAT 0x00080000
+#define VM_EXIT_SAVE_GUEST_EFER 0x00100000
+#define VM_EXIT_LOAD_HOST_EFER 0x00200000
+#define VM_EXIT_SAVE_PREEMPT_TIMER 0x00400000
extern u32 vmx_vmexit_control;
#define VM_ENTRY_IA32E_MODE 0x00000200
#define VM_ENTRY_SMM 0x00000400
#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
#define VM_ENTRY_LOAD_GUEST_PAT 0x00004000
+#define VM_ENTRY_LOAD_GUEST_EFER 0x00008000
extern u32 vmx_vmentry_control;
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
diff -r 694dcf6c3f06 xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h Wed Sep 08 19:47:14 2010 +0800
+++ b/xen/include/asm-x86/msr-index.h Wed Sep 08 19:47:39 2010 +0800
@@ -172,6 +172,7 @@
#define MSR_IA32_VMX_CR0_FIXED1 0x487
#define MSR_IA32_VMX_CR4_FIXED0 0x488
#define MSR_IA32_VMX_CR4_FIXED1 0x489
+#define MSR_IA32_VMX_VMCS_ENUM 0x48a
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b
#define MSR_IA32_VMX_EPT_VPID_CAP 0x48c
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x48d
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|