|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 01/11] nested vmx: emulate MSR bitmaps
In nested vmx virtualization for MSR bitmaps, L0 hypervisor will trap all the VM
exit from L2 guest by disable the MSR_BITMAP feature. When handling this VM
exit,
L0 hypervisor judges whether L1 hypervisor uses MSR_BITMAP feature and the
corresponding bit is set to 1. If so, L0 will inject such VM exit into L1
hypervisor; otherwise, L0 will be responsible for handling this VM exit.
Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
---
xen/arch/x86/hvm/vmx/vmcs.c | 28 +++++++++++++++++++++++++
xen/arch/x86/hvm/vmx/vvmx.c | 39 ++++++++++++++++++++++++++++++++++-
xen/include/asm-x86/hvm/vmx/vmcs.h | 1 +
xen/include/asm-x86/hvm/vmx/vvmx.h | 1 +
4 files changed, 67 insertions(+), 2 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 0fbdd75..205e705 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -674,6 +674,34 @@ void vmx_disable_intercept_for_msr(struct vcpu *v, u32
msr, int type)
}
/*
+ * access_type: read == 0, write == 1
+ */
+int vmx_check_msr_bitmap(unsigned long *msr_bitmap, u32 msr, int access_type)
+{
+ int ret = 1;
+ if ( !msr_bitmap )
+ return 1;
+
+ if ( msr <= 0x1fff )
+ {
+ if ( access_type == 0 )
+ ret = test_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /*
read-low */
+ else if ( access_type == 1 )
+ ret = test_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /*
write-low */
+ }
+ else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+ {
+ msr &= 0x1fff;
+ if ( access_type == 0 )
+ ret = test_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /*
read-high */
+ else if ( access_type == 1 )
+ ret = test_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /*
write-high */
+ }
+ return ret;
+}
+
+
+/*
* Switch VMCS between layer 1 & 2 guest
*/
void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to)
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index ed47780..719bfce 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -48,6 +48,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
nvmx->intr.error_code = 0;
nvmx->iobitmap[0] = NULL;
nvmx->iobitmap[1] = NULL;
+ nvmx->msrbitmap = NULL;
return 0;
out:
return -ENOMEM;
@@ -561,6 +562,17 @@ static void __clear_current_vvmcs(struct vcpu *v)
__vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
}
+static void __map_msr_bitmap(struct vcpu *v)
+{
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+ unsigned long gpa;
+
+ if ( nvmx->msrbitmap )
+ hvm_unmap_guest_frame (nvmx->msrbitmap);
+ gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
+ nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT);
+}
+
static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
{
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
@@ -597,6 +609,10 @@ static void nvmx_purge_vvmcs(struct vcpu *v)
nvmx->iobitmap[i] = NULL;
}
}
+ if ( nvmx->msrbitmap ) {
+ hvm_unmap_guest_frame(nvmx->msrbitmap);
+ nvmx->msrbitmap = NULL;
+ }
}
u64 nvmx_get_tsc_offset(struct vcpu *v)
@@ -1153,6 +1169,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT);
nvcpu->nv_vvmcxaddr = gpa;
map_io_bitmap_all (v);
+ __map_msr_bitmap(v);
}
vmreturn(regs, VMSUCCEED);
@@ -1270,6 +1287,9 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
vmcs_encoding == IO_BITMAP_B_HIGH )
__map_io_bitmap (v, IO_BITMAP_B);
+ if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
+ __map_msr_bitmap(v);
+
vmreturn(regs, VMSUCCEED);
return X86EMUL_OKAY;
}
@@ -1320,6 +1340,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64
*msr_content)
CPU_BASED_RDTSC_EXITING |
CPU_BASED_MONITOR_TRAP_FLAG |
CPU_BASED_VIRTUAL_NMI_PENDING |
+ CPU_BASED_ACTIVATE_MSR_BITMAP |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
/* bit 1, 4-6,8,13-16,26 must be 1 (refer G4 of SDM) */
tmp = ( (1<<26) | (0xf << 13) | 0x100 | (0x7 << 4) | 0x2);
@@ -1497,8 +1518,6 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
case EXIT_REASON_TRIPLE_FAULT:
case EXIT_REASON_TASK_SWITCH:
case EXIT_REASON_CPUID:
- case EXIT_REASON_MSR_READ:
- case EXIT_REASON_MSR_WRITE:
case EXIT_REASON_VMCALL:
case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH:
@@ -1514,6 +1533,22 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
/* inject to L1 */
nvcpu->nv_vmexit_pending = 1;
break;
+ case EXIT_REASON_MSR_READ:
+ case EXIT_REASON_MSR_WRITE:
+ {
+ int status;
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_ACTIVATE_MSR_BITMAP )
+ {
+ status = vmx_check_msr_bitmap(nvmx->msrbitmap, regs->ecx,
+ !!(exit_reason == EXIT_REASON_MSR_WRITE));
+ if ( status )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ else
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ }
case EXIT_REASON_IO_INSTRUCTION:
ctrl = __n2_exec_control(v);
if ( ctrl & CPU_BASED_ACTIVATE_IO_BITMAP )
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index cc92f69..14ac773 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -427,6 +427,7 @@ int vmx_add_host_load_msr(u32 msr);
void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to);
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
+int vmx_check_msr_bitmap(unsigned long *msr_bitmap, u32 msr, int access_type);
#endif /* ASM_X86_HVM_VMX_VMCS_H__ */
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h
b/xen/include/asm-x86/hvm/vmx/vvmx.h
index b9137b8..067fbe4 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -26,6 +26,7 @@
struct nestedvmx {
paddr_t vmxon_region_pa;
void *iobitmap[2]; /* map (va) of L1 guest I/O bitmap */
+ void *msrbitmap; /* map (va) of L1 guest MSR bitmap */
/* deferred nested interrupt */
struct {
unsigned long intr_info;
--
1.7.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |