# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID a753630a6456541bc90c32a17e4b452bcece825d
# Parent 140dff9d90dca30cb8f8e258e8976ce2dafb73e2
[HVM][VMX] Clean up and audit VMX uses of instruction-length
info field. Todo: use by mmio decoder needs to be removed.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/hvm/svm/svm.c | 4
xen/arch/x86/hvm/vmx/io.c | 8 +
xen/arch/x86/hvm/vmx/vmx.c | 138 ++++++++++++++++++++++------------
xen/include/asm-x86/hvm/svm/emulate.h | 32 +------
xen/include/asm-x86/hvm/vmx/vmx.h | 38 ---------
5 files changed, 109 insertions(+), 111 deletions(-)
diff -r 140dff9d90dc -r a753630a6456 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Fri Sep 22 09:12:00 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Fri Sep 22 11:33:03 2006 +0100
@@ -1847,12 +1847,12 @@ static int svm_cr_access(struct vcpu *v,
where the prefix lives later on */
index = skip_prefix_bytes(buffer, sizeof(buffer));
- if (type == TYPE_MOV_TO_CR)
+ if ( type == TYPE_MOV_TO_CR )
{
inst_len = __get_instruction_length_from_list(
vmcb, list_a, ARR_SIZE(list_a), &buffer[index], &match);
}
- else
+ else /* type == TYPE_MOV_FROM_CR */
{
inst_len = __get_instruction_length_from_list(
vmcb, list_b, ARR_SIZE(list_b), &buffer[index], &match);
diff -r 140dff9d90dc -r a753630a6456 xen/arch/x86/hvm/vmx/io.c
--- a/xen/arch/x86/hvm/vmx/io.c Fri Sep 22 09:12:00 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/io.c Fri Sep 22 11:33:03 2006 +0100
@@ -108,11 +108,17 @@ asmlinkage void vmx_intr_assist(void)
return;
}
+ /* This could be moved earlier in the VMX resume sequence. */
__vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
__vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
- __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
+ /*
+ * Safe: the length will only be interpreted for software exceptions
+ * and interrupts. If we get here then delivery of some event caused a
+ * fault, and this always results in defined VM_EXIT_INSTRUCTION_LEN.
+ */
+ __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len); /* Safe */
__vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
if (unlikely(idtv_info_field & 0x800)) { /* valid error code */
diff -r 140dff9d90dc -r a753630a6456 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Fri Sep 22 09:12:00 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Sep 22 11:33:03 2006 +0100
@@ -597,7 +597,7 @@ static int vmx_instruction_length(struct
{
unsigned long inst_len;
- if (__vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len))
+ if ( __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len) ) /* XXX Unsafe XXX */
return 0;
return inst_len;
}
@@ -836,11 +836,16 @@ int start_vmx(void)
/*
* Not all cases receive valid value in the VM-exit instruction length field.
+ * Callers must know what they're doing!
*/
-#define __get_instruction_length(len) \
- __vmread(VM_EXIT_INSTRUCTION_LEN, &(len)); \
- if ((len) < 1 || (len) > 15) \
- __hvm_bug(®s);
+static int __get_instruction_length(void)
+{
+ int len;
+ __vmread(VM_EXIT_INSTRUCTION_LEN, &len); /* Safe: callers audited */
+ if ( (len < 1) || (len > 15) )
+ __hvm_bug(guest_cpu_user_regs());
+ return len;
+}
static void inline __update_guest_eip(unsigned long inst_len)
{
@@ -1051,15 +1056,18 @@ static int check_for_null_selector(unsig
int i, inst_len;
int inst_copy_from_guest(unsigned char *, unsigned long, int);
- __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
+ inst_len = __get_instruction_length(); /* Safe: INS/OUTS */
memset(inst, 0, MAX_INST_LEN);
- if (inst_copy_from_guest(inst, eip, inst_len) != inst_len) {
+ if ( inst_copy_from_guest(inst, eip, inst_len) != inst_len )
+ {
printf("check_for_null_selector: get guest instruction failed\n");
domain_crash_synchronous();
}
- for (i = 0; i < inst_len; i++) {
- switch (inst[i]) {
+ for ( i = 0; i < inst_len; i++ )
+ {
+ switch ( inst[i] )
+ {
case 0xf3: /* REPZ */
case 0xf2: /* REPNZ */
case 0xf0: /* LOCK */
@@ -1184,15 +1192,14 @@ static void vmx_io_instruction(unsigned
}
}
-int
-vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
-{
- unsigned long inst_len;
+static int vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
+{
int error = 0;
- error |= __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
+ /* NB. Skip transition instruction. */
error |= __vmread(GUEST_RIP, &c->eip);
- c->eip += inst_len; /* skip transition instruction */
+ c->eip += __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
+
error |= __vmread(GUEST_RSP, &c->esp);
error |= __vmread(GUEST_RFLAGS, &c->eflags);
@@ -1249,8 +1256,7 @@ vmx_world_save(struct vcpu *v, struct vm
return !error;
}
-int
-vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
+static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
{
unsigned long mfn, old_cr4, old_base_mfn;
int error = 0;
@@ -1364,8 +1370,7 @@ vmx_world_restore(struct vcpu *v, struct
enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
-int
-vmx_assist(struct vcpu *v, int mode)
+static int vmx_assist(struct vcpu *v, int mode)
{
struct vmx_assist_context c;
u32 magic;
@@ -1408,8 +1413,8 @@ vmx_assist(struct vcpu *v, int mode)
break;
/*
- * Restore the VMXASSIST_OLD_CONTEXT that was saved by
VMX_ASSIST_INVOKE
- * above.
+ * Restore the VMXASSIST_OLD_CONTEXT that was saved by
+ * VMX_ASSIST_INVOKE above.
*/
case VMX_ASSIST_RESTORE:
/* save the old context */
@@ -1552,7 +1557,8 @@ static int vmx_set_cr0(unsigned long val
}
}
- if ( vmx_assist(v, VMX_ASSIST_INVOKE) ) {
+ if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
+ {
set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.hvm_vmx.cpu_state);
__vmread(GUEST_RIP, &eip);
HVM_DBG_LOG(DBG_LEVEL_1,
@@ -1815,7 +1821,8 @@ static void mov_from_cr(int cr, int gp,
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
}
-static int vmx_cr_access(unsigned long exit_qualification, struct
cpu_user_regs *regs)
+static int vmx_cr_access(unsigned long exit_qualification,
+ struct cpu_user_regs *regs)
{
unsigned int gp, cr;
unsigned long value;
@@ -2069,6 +2076,47 @@ void restore_cpu_user_regs(struct cpu_us
}
#endif
+static void vmx_reflect_exception(struct vcpu *v)
+{
+ int error_code, intr_info, vector;
+
+ __vmread(VM_EXIT_INTR_INFO, &intr_info);
+ vector = intr_info & 0xff;
+ if ( intr_info & INTR_INFO_DELIVER_CODE_MASK )
+ __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
+ else
+ error_code = VMX_DELIVER_NO_ERROR_CODE;
+
+#ifndef NDEBUG
+ {
+ unsigned long rip;
+
+ __vmread(GUEST_RIP, &rip);
+ HVM_DBG_LOG(DBG_LEVEL_1, "rip = %lx, error_code = %x",
+ rip, error_code);
+ }
+#endif /* NDEBUG */
+
+ /*
+ * According to Intel Virtualization Technology Specification for
+ * the IA-32 Intel Architecture (C97063-002 April 2005), section
+ * 2.8.3, SW_EXCEPTION should be used for #BP and #OV, and
+ * HW_EXCEPTION used for everything else. The main difference
+ * appears to be that for SW_EXCEPTION, the EIP/RIP is incremented
+ * by VM_ENTER_INSTRUCTION_LEN bytes, whereas for HW_EXCEPTION,
+ * it is not.
+ */
+ if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_SW_EXCEPTION )
+ {
+ int ilen = __get_instruction_length(); /* Safe: software exception */
+ vmx_inject_sw_exception(v, vector, ilen);
+ }
+ else
+ {
+ vmx_inject_hw_exception(v, vector, error_code);
+ }
+}
+
asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
{
unsigned int exit_reason;
@@ -2116,7 +2164,8 @@ asmlinkage void vmx_vmexit_handler(struc
TRACE_VMEXIT(0,exit_reason);
- switch ( exit_reason ) {
+ switch ( exit_reason )
+ {
case EXIT_REASON_EXCEPTION_NMI:
{
/*
@@ -2242,43 +2291,38 @@ asmlinkage void vmx_vmexit_handler(struc
domain_crash_synchronous();
break;
case EXIT_REASON_CPUID:
+ inst_len = __get_instruction_length(); /* Safe: CPUID */
+ __update_guest_eip(inst_len);
vmx_vmexit_do_cpuid(®s);
- __get_instruction_length(inst_len);
- __update_guest_eip(inst_len);
break;
case EXIT_REASON_HLT:
- __get_instruction_length(inst_len);
+ inst_len = __get_instruction_length(); /* Safe: HLT */
__update_guest_eip(inst_len);
vmx_vmexit_do_hlt();
break;
case EXIT_REASON_INVLPG:
{
- unsigned long va;
-
+ unsigned long va;
+ inst_len = __get_instruction_length(); /* Safe: INVLPG */
+ __update_guest_eip(inst_len);
__vmread(EXIT_QUALIFICATION, &va);
vmx_vmexit_do_invlpg(va);
- __get_instruction_length(inst_len);
+ break;
+ }
+ case EXIT_REASON_VMCALL:
+ {
+ inst_len = __get_instruction_length(); /* Safe: VMCALL */
__update_guest_eip(inst_len);
- break;
- }
- case EXIT_REASON_VMCALL:
- {
- __get_instruction_length(inst_len);
__vmread(GUEST_RIP, &rip);
__vmread(EXIT_QUALIFICATION, &exit_qualification);
-
hvm_do_hypercall(®s);
- __update_guest_eip(inst_len);
break;
}
case EXIT_REASON_CR_ACCESS:
{
__vmread(GUEST_RIP, &rip);
- __get_instruction_length(inst_len);
__vmread(EXIT_QUALIFICATION, &exit_qualification);
-
- HVM_DBG_LOG(DBG_LEVEL_1, "rip = %lx, inst_len =%lx, exit_qualification
= %lx",
- rip, inst_len, exit_qualification);
+ inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
if ( vmx_cr_access(exit_qualification, ®s) )
__update_guest_eip(inst_len);
TRACE_VMEXIT(3,regs.error_code);
@@ -2291,19 +2335,19 @@ asmlinkage void vmx_vmexit_handler(struc
break;
case EXIT_REASON_IO_INSTRUCTION:
__vmread(EXIT_QUALIFICATION, &exit_qualification);
- __get_instruction_length(inst_len);
+ inst_len = __get_instruction_length(); /* Safe: IN, INS, OUT, OUTS */
vmx_io_instruction(exit_qualification, inst_len);
TRACE_VMEXIT(4,exit_qualification);
break;
case EXIT_REASON_MSR_READ:
- __get_instruction_length(inst_len);
+ inst_len = __get_instruction_length(); /* Safe: RDMSR */
+ __update_guest_eip(inst_len);
vmx_do_msr_read(®s);
+ break;
+ case EXIT_REASON_MSR_WRITE:
+ inst_len = __get_instruction_length(); /* Safe: WRMSR */
__update_guest_eip(inst_len);
- break;
- case EXIT_REASON_MSR_WRITE:
vmx_do_msr_write(®s);
- __get_instruction_length(inst_len);
- __update_guest_eip(inst_len);
break;
case EXIT_REASON_MWAIT_INSTRUCTION:
case EXIT_REASON_MONITOR_INSTRUCTION:
diff -r 140dff9d90dc -r a753630a6456 xen/include/asm-x86/hvm/svm/emulate.h
--- a/xen/include/asm-x86/hvm/svm/emulate.h Fri Sep 22 09:12:00 2006 +0100
+++ b/xen/include/asm-x86/hvm/svm/emulate.h Fri Sep 22 11:33:03 2006 +0100
@@ -94,15 +94,14 @@ static inline int __get_instruction_leng
static inline int __get_instruction_length(struct vmcb_struct *vmcb,
enum instruction_index instr, u8 *guest_eip_buf)
{
- return __get_instruction_length_from_list(vmcb, &instr, 1, guest_eip_buf,
- NULL);
+ return __get_instruction_length_from_list(
+ vmcb, &instr, 1, guest_eip_buf, NULL);
}
static inline unsigned int is_prefix(u8 opc)
{
- switch(opc)
- {
+ switch ( opc ) {
case 0x66:
case 0x67:
case 0x2E:
@@ -115,22 +114,7 @@ static inline unsigned int is_prefix(u8
case 0xF3:
case 0xF2:
#if __x86_64__
- case 0x40:
- case 0x41:
- case 0x42:
- case 0x43:
- case 0x44:
- case 0x45:
- case 0x46:
- case 0x47:
- case 0x48:
- case 0x49:
- case 0x4a:
- case 0x4b:
- case 0x4c:
- case 0x4d:
- case 0x4e:
- case 0x4f:
+ case 0x40 ... 0x4f:
#endif /* __x86_64__ */
return 1;
}
@@ -141,15 +125,15 @@ static inline int skip_prefix_bytes(u8 *
static inline int skip_prefix_bytes(u8 *buf, size_t size)
{
int index;
- for (index = 0; index < size && is_prefix(buf[index]); index ++)
- /* do nothing */ ;
+ for ( index = 0; index < size && is_prefix(buf[index]); index++ )
+ continue;
return index;
}
-static void inline __update_guest_eip(struct vmcb_struct *vmcb,
- int inst_len)
+static void inline __update_guest_eip(
+ struct vmcb_struct *vmcb, int inst_len)
{
ASSERT(inst_len > 0);
vmcb->rip += inst_len;
diff -r 140dff9d90dc -r a753630a6456 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Fri Sep 22 09:12:00 2006 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Fri Sep 22 11:33:03 2006 +0100
@@ -469,7 +469,7 @@ static inline void __vmx_inject_exceptio
if ( error_code != VMX_DELIVER_NO_ERROR_CODE ) {
__vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
- }
+ }
if ( ilen )
__vmwrite(VM_ENTRY_INSTRUCTION_LEN, ilen);
@@ -499,40 +499,4 @@ static inline void vmx_inject_extint(str
__vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
}
-static inline void vmx_reflect_exception(struct vcpu *v)
-{
- int error_code, intr_info, vector;
-
- __vmread(VM_EXIT_INTR_INFO, &intr_info);
- vector = intr_info & 0xff;
- if ( intr_info & INTR_INFO_DELIVER_CODE_MASK )
- __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
- else
- error_code = VMX_DELIVER_NO_ERROR_CODE;
-
-#ifndef NDEBUG
- {
- unsigned long rip;
-
- __vmread(GUEST_RIP, &rip);
- HVM_DBG_LOG(DBG_LEVEL_1, "rip = %lx, error_code = %x",
- rip, error_code);
- }
-#endif /* NDEBUG */
-
- /* According to Intel Virtualization Technology Specification for
- the IA-32 Intel Architecture (C97063-002 April 2005), section
- 2.8.3, SW_EXCEPTION should be used for #BP and #OV, and
- HW_EXCPEPTION used for everything else. The main difference
- appears to be that for SW_EXCEPTION, the EIP/RIP is incremented
- by VM_ENTER_INSTRUCTION_LEN bytes, whereas for HW_EXCEPTION,
- it is not. */
- if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_SW_EXCEPTION ) {
- int ilen;
- __vmread(VM_EXIT_INSTRUCTION_LEN, &ilen);
- vmx_inject_sw_exception(v, vector, ilen);
- } else
- vmx_inject_hw_exception(v, vector, error_code);
-}
-
#endif /* __ASM_X86_HVM_VMX_VMX_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|