# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1196091174 0
# Node ID 9f61a0add5b63f61a0942d9331bd448f8118e081
# Parent dc3a566f9e44153b2b9b9171948a76acdd9f4af4
x86_emulate: Emulate CPUID and HLT.
vmx realmode: Fix decode & emulate loop, add hooks for CPUID, HLT and
WBINVD. Also do not hook realmode entry off of vmentry failure any
more.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/x86/hvm/vmx/realmode.c | 89 ++++++++++++++++++++++----
xen/arch/x86/hvm/vmx/vmx.c | 120 +++++++++++++++++-------------------
xen/arch/x86/hvm/vmx/x86_32/exits.S | 19 +++++
xen/arch/x86/hvm/vmx/x86_64/exits.S | 17 ++++-
xen/arch/x86/x86_32/asm-offsets.c | 1
xen/arch/x86/x86_64/asm-offsets.c | 1
xen/arch/x86/x86_emulate.c | 23 ++++++
xen/include/asm-x86/hvm/vmx/vmx.h | 4 +
xen/include/asm-x86/x86_emulate.h | 12 +++
9 files changed, 205 insertions(+), 81 deletions(-)
diff -r dc3a566f9e44 -r 9f61a0add5b6 xen/arch/x86/hvm/vmx/realmode.c
--- a/xen/arch/x86/hvm/vmx/realmode.c Mon Nov 26 13:54:45 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/realmode.c Mon Nov 26 15:32:54 2007 +0000
@@ -29,6 +29,16 @@ struct realmode_emulate_ctxt {
unsigned long insn_buf_eip;
struct segment_register seg_reg[10];
+
+ union {
+ struct {
+ unsigned int hlt:1;
+ unsigned int mov_ss:1;
+ unsigned int sti:1;
+ unsigned int exn_raised:1;
+ } flags;
+ unsigned int flag_word;
+ };
};
static void realmode_deliver_exception(
@@ -251,14 +261,8 @@ realmode_write_segment(
struct realmode_emulate_ctxt *rm_ctxt =
container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register));
-
if ( seg == x86_seg_ss )
- {
- u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
- intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
- __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
- }
-
+ rm_ctxt->flags.mov_ss = 1;
return X86EMUL_OKAY;
}
@@ -337,13 +341,37 @@ static int realmode_write_rflags(
unsigned long val,
struct x86_emulate_ctxt *ctxt)
{
+ struct realmode_emulate_ctxt *rm_ctxt =
+ container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
- {
- u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
- intr_shadow ^= VMX_INTR_SHADOW_STI;
- __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
- }
-
+ rm_ctxt->flags.sti = 1;
+ return X86EMUL_OKAY;
+}
+
+static int realmode_wbinvd(
+ struct x86_emulate_ctxt *ctxt)
+{
+ vmx_wbinvd_intercept();
+ return X86EMUL_OKAY;
+}
+
+static int realmode_cpuid(
+ unsigned int *eax,
+ unsigned int *ebx,
+ unsigned int *ecx,
+ unsigned int *edx,
+ struct x86_emulate_ctxt *ctxt)
+{
+ vmx_cpuid_intercept(eax, ebx, ecx, edx);
+ return X86EMUL_OKAY;
+}
+
+static int realmode_hlt(
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct realmode_emulate_ctxt *rm_ctxt =
+ container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
+ rm_ctxt->flags.hlt = 1;
return X86EMUL_OKAY;
}
@@ -354,6 +382,7 @@ static int realmode_inject_hw_exception(
struct realmode_emulate_ctxt *rm_ctxt =
container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
+ rm_ctxt->flags.exn_raised = 1;
realmode_deliver_exception(vector, 0, rm_ctxt);
return X86EMUL_OKAY;
@@ -383,6 +412,9 @@ static struct x86_emulate_ops realmode_e
.write_io = realmode_write_io,
.read_cr = realmode_read_cr,
.write_rflags = realmode_write_rflags,
+ .wbinvd = realmode_wbinvd,
+ .cpuid = realmode_cpuid,
+ .hlt = realmode_hlt,
.inject_hw_exception = realmode_inject_hw_exception,
.inject_sw_interrupt = realmode_inject_sw_interrupt
};
@@ -393,6 +425,7 @@ int vmx_realmode(struct cpu_user_regs *r
struct realmode_emulate_ctxt rm_ctxt;
unsigned long intr_info;
int i, rc = 0;
+ u32 intr_shadow, new_intr_shadow;
rm_ctxt.ctxt.regs = regs;
@@ -410,6 +443,9 @@ int vmx_realmode(struct cpu_user_regs *r
__vmwrite(VM_ENTRY_INTR_INFO, 0);
realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
}
+
+ intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
+ new_intr_shadow = intr_shadow;
while ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
!softirq_pending(smp_processor_id()) &&
@@ -421,7 +457,34 @@ int vmx_realmode(struct cpu_user_regs *r
(uint32_t)(rm_ctxt.seg_reg[x86_seg_cs].base + regs->eip),
sizeof(rm_ctxt.insn_buf));
+ rm_ctxt.flag_word = 0;
+
rc = x86_emulate(&rm_ctxt.ctxt, &realmode_emulator_ops);
+
+ /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
+ if ( rm_ctxt.flags.mov_ss )
+ new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
+ else
+ new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
+
+ /* STI instruction toggles STI shadow, else we just clear it. */
+ if ( rm_ctxt.flags.sti )
+ new_intr_shadow ^= VMX_INTR_SHADOW_STI;
+ else
+ new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
+
+ /* Update interrupt shadow information in VMCS only if it changes. */
+ if ( intr_shadow != new_intr_shadow )
+ {
+ intr_shadow = new_intr_shadow;
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
+ }
+
+ /* HLT happens after instruction retire, if no interrupt/exception. */
+ if ( unlikely(rm_ctxt.flags.hlt) &&
+ !rm_ctxt.flags.exn_raised &&
+ !hvm_local_events_need_delivery(curr) )
+ hvm_hlt(regs->eflags);
if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
{
diff -r dc3a566f9e44 -r 9f61a0add5b6 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Mon Nov 26 13:54:45 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Nov 26 15:32:54 2007 +0000
@@ -1055,10 +1055,7 @@ static void vmx_update_guest_cr(struct v
v->arch.hvm_vcpu.hw_cr[0] =
v->arch.hvm_vcpu.guest_cr[0] |
- X86_CR0_NE | X86_CR0_PG | X86_CR0_WP;
-#ifdef VMXASSIST
- v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_PE;
-#endif
+ X86_CR0_NE | X86_CR0_PG | X86_CR0_WP | X86_CR0_PE;
__vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
__vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
break;
@@ -1254,10 +1251,11 @@ static void vmx_do_no_device_fault(void)
}
#define bitmaskof(idx) (1U << ((idx) & 31))
-static void vmx_do_cpuid(struct cpu_user_regs *regs)
-{
- unsigned int input = regs->eax;
- unsigned int eax, ebx, ecx, edx;
+void vmx_cpuid_intercept(
+ unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ unsigned int input = *eax;
#ifdef VMXASSIST
if ( input == 0x40000003 )
@@ -1266,7 +1264,7 @@ static void vmx_do_cpuid(struct cpu_user
* NB. Unsupported interface for private use of VMXASSIST only.
* Note that this leaf lives at <max-hypervisor-leaf> + 1.
*/
- u64 value = ((u64)regs->edx << 32) | (u32)regs->ecx;
+ u64 value = ((u64)*edx << 32) | (u32)*ecx;
p2m_type_t p2mt;
unsigned long mfn;
struct vcpu *v = current;
@@ -1290,58 +1288,70 @@ static void vmx_do_cpuid(struct cpu_user
unmap_domain_page(p);
gdprintk(XENLOG_INFO, "Output value is 0x%"PRIx64".\n", value);
- regs->ecx = (u32)value;
- regs->edx = (u32)(value >> 32);
+ *ecx = (u32)value;
+ *edx = (u32)(value >> 32);
return;
}
#endif
- hvm_cpuid(input, &eax, &ebx, &ecx, &edx);
+ hvm_cpuid(input, eax, ebx, ecx, edx);
switch ( input )
{
case 0x00000001:
- ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED;
- ebx &= NUM_THREADS_RESET_MASK;
- ecx &= ~(bitmaskof(X86_FEATURE_VMXE) |
- bitmaskof(X86_FEATURE_EST) |
- bitmaskof(X86_FEATURE_TM2) |
- bitmaskof(X86_FEATURE_CID) |
- bitmaskof(X86_FEATURE_PDCM) |
- bitmaskof(X86_FEATURE_DSCPL));
- edx &= ~(bitmaskof(X86_FEATURE_HT) |
- bitmaskof(X86_FEATURE_ACPI) |
- bitmaskof(X86_FEATURE_ACC) |
- bitmaskof(X86_FEATURE_DS));
+ *ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED;
+ *ebx &= NUM_THREADS_RESET_MASK;
+ *ecx &= ~(bitmaskof(X86_FEATURE_VMXE) |
+ bitmaskof(X86_FEATURE_EST) |
+ bitmaskof(X86_FEATURE_TM2) |
+ bitmaskof(X86_FEATURE_CID) |
+ bitmaskof(X86_FEATURE_PDCM) |
+ bitmaskof(X86_FEATURE_DSCPL));
+ *edx &= ~(bitmaskof(X86_FEATURE_HT) |
+ bitmaskof(X86_FEATURE_ACPI) |
+ bitmaskof(X86_FEATURE_ACC) |
+ bitmaskof(X86_FEATURE_DS));
break;
case 0x00000004:
- cpuid_count(input, regs->ecx, &eax, &ebx, &ecx, &edx);
- eax &= NUM_CORES_RESET_MASK;
+ cpuid_count(input, *ecx, eax, ebx, ecx, edx);
+ *eax &= NUM_CORES_RESET_MASK;
break;
case 0x00000006:
case 0x00000009:
case 0x0000000A:
- eax = ebx = ecx = edx = 0;
+ *eax = *ebx = *ecx = *edx = 0;
break;
case 0x80000001:
/* Only a few features are advertised in Intel's 0x80000001. */
- ecx &= (bitmaskof(X86_FEATURE_LAHF_LM));
- edx &= (bitmaskof(X86_FEATURE_NX) |
- bitmaskof(X86_FEATURE_LM) |
- bitmaskof(X86_FEATURE_SYSCALL));
- break;
- }
+ *ecx &= (bitmaskof(X86_FEATURE_LAHF_LM));
+ *edx &= (bitmaskof(X86_FEATURE_NX) |
+ bitmaskof(X86_FEATURE_LM) |
+ bitmaskof(X86_FEATURE_SYSCALL));
+ break;
+ }
+
+ HVMTRACE_3D(CPUID, current, input,
+ ((uint64_t)*eax << 32) | *ebx, ((uint64_t)*ecx << 32) | *edx);
+}
+
+static void vmx_do_cpuid(struct cpu_user_regs *regs)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ eax = regs->eax;
+ ebx = regs->ebx;
+ ecx = regs->ecx;
+ edx = regs->edx;
+
+ vmx_cpuid_intercept(&eax, &ebx, &ecx, &edx);
regs->eax = eax;
regs->ebx = ebx;
regs->ecx = ecx;
regs->edx = edx;
-
- HVMTRACE_3D(CPUID, current, input,
- ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
}
#define CASE_GET_REG_P(REG, reg) \
@@ -2696,19 +2706,22 @@ static void wbinvd_ipi(void *info)
wbinvd();
}
+void vmx_wbinvd_intercept(void)
+{
+ if ( list_empty(&(domain_hvm_iommu(current->domain)->pdev_list)) )
+ return;
+
+ if ( cpu_has_wbinvd_exiting )
+ on_each_cpu(wbinvd_ipi, NULL, 1, 1);
+ else
+ wbinvd();
+}
+
static void vmx_failed_vmentry(unsigned int exit_reason,
struct cpu_user_regs *regs)
{
unsigned int failed_vmentry_reason = (uint16_t)exit_reason;
unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
-
-#ifndef VMXASSIST
- if ( (failed_vmentry_reason == EXIT_REASON_INVALID_GUEST_STATE) &&
- (exit_qualification == 0) &&
- !(current->arch.hvm_vcpu.hw_cr[0] & X86_CR0_PE) &&
- (vmx_realmode(regs) == 0) )
- return;
-#endif
printk("Failed vm entry (exit reason 0x%x) ", exit_reason);
switch ( failed_vmentry_reason )
@@ -2976,24 +2989,7 @@ asmlinkage void vmx_vmexit_handler(struc
{
inst_len = __get_instruction_length(); /* Safe: INVD, WBINVD */
__update_guest_eip(inst_len);
- if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) )
- {
- if ( cpu_has_wbinvd_exiting )
- {
- on_each_cpu(wbinvd_ipi, NULL, 1, 1);
- }
- else
- {
- wbinvd();
- /* Disable further WBINVD intercepts. */
- if ( (exit_reason == EXIT_REASON_WBINVD) &&
- (vmx_cpu_based_exec_control &
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) )
- __vmwrite(SECONDARY_VM_EXEC_CONTROL,
- vmx_secondary_exec_control &
- ~SECONDARY_EXEC_WBINVD_EXITING);
- }
- }
+ vmx_wbinvd_intercept();
break;
}
diff -r dc3a566f9e44 -r 9f61a0add5b6 xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S Mon Nov 26 13:54:45 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S Mon Nov 26 15:32:54 2007 +0000
@@ -103,7 +103,12 @@ ENTRY(vmx_asm_do_vmentry)
movl $GUEST_RFLAGS,%eax
VMWRITE(UREGS_eflags)
- cmpl $0,VCPU_vmx_launched(%ebx)
+#ifndef VMXASSIST
+ testb $X86_CR0_PE,VCPU_hvm_guest_cr0(%ebx)
+ jz vmx_goto_realmode
+#endif
+
+ cmpb $0,VCPU_vmx_launched(%ebx)
je vmx_launch
/*vmx_resume:*/
@@ -114,9 +119,19 @@ ENTRY(vmx_asm_do_vmentry)
ud2
vmx_launch:
- movl $1,VCPU_vmx_launched(%ebx)
+ movb $1,VCPU_vmx_launched(%ebx)
HVM_RESTORE_ALL_NOSEGREGS
VMLAUNCH
pushf
call vm_launch_fail
ud2
+
+#ifndef VMXASSIST
+vmx_goto_realmode:
+ sti
+ movl %esp,%eax
+ push %eax
+ call vmx_realmode
+ addl $4,%esp
+ jmp vmx_asm_do_vmentry
+#endif
diff -r dc3a566f9e44 -r 9f61a0add5b6 xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S Mon Nov 26 13:54:45 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S Mon Nov 26 15:32:54 2007 +0000
@@ -121,7 +121,12 @@ ENTRY(vmx_asm_do_vmentry)
movl $GUEST_RFLAGS,%eax
VMWRITE(UREGS_eflags)
- cmpl $0,VCPU_vmx_launched(%rbx)
+#ifndef VMXASSIST
+ testb $X86_CR0_PE,VCPU_hvm_guest_cr0(%rbx)
+ jz vmx_goto_realmode
+#endif
+
+ cmpb $0,VCPU_vmx_launched(%rbx)
je vmx_launch
/*vmx_resume:*/
@@ -132,9 +137,17 @@ ENTRY(vmx_asm_do_vmentry)
ud2
vmx_launch:
- movl $1,VCPU_vmx_launched(%rbx)
+ movb $1,VCPU_vmx_launched(%rbx)
HVM_RESTORE_ALL_NOSEGREGS
VMLAUNCH
pushfq
call vm_launch_fail
ud2
+
+#ifndef VMXASSIST
+vmx_goto_realmode:
+ sti
+ movq %rsp,%rdi
+ call vmx_realmode
+ jmp vmx_asm_do_vmentry
+#endif
diff -r dc3a566f9e44 -r 9f61a0add5b6 xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Mon Nov 26 13:54:45 2007 +0000
+++ b/xen/arch/x86/x86_32/asm-offsets.c Mon Nov 26 15:32:54 2007 +0000
@@ -83,6 +83,7 @@ void __dummy__(void)
BLANK();
OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
+ OFFSET(VCPU_hvm_guest_cr0, struct vcpu, arch.hvm_vcpu.guest_cr[0]);
OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
BLANK();
diff -r dc3a566f9e44 -r 9f61a0add5b6 xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Mon Nov 26 13:54:45 2007 +0000
+++ b/xen/arch/x86/x86_64/asm-offsets.c Mon Nov 26 15:32:54 2007 +0000
@@ -98,6 +98,7 @@ void __dummy__(void)
BLANK();
OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
+ OFFSET(VCPU_hvm_guest_cr0, struct vcpu, arch.hvm_vcpu.guest_cr[0]);
OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
BLANK();
diff -r dc3a566f9e44 -r 9f61a0add5b6 xen/arch/x86/x86_emulate.c
--- a/xen/arch/x86/x86_emulate.c Mon Nov 26 13:54:45 2007 +0000
+++ b/xen/arch/x86/x86_emulate.c Mon Nov 26 15:32:54 2007 +0000
@@ -167,7 +167,8 @@ static uint8_t opcode_table[256] = {
ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
/* 0xF0 - 0xF7 */
0, ImplicitOps, 0, 0,
- 0, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM,
+ ImplicitOps, ImplicitOps,
+ ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM,
/* 0xF8 - 0xFF */
ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
ImplicitOps, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM
@@ -225,7 +226,8 @@ static uint8_t twobyte_table[256] = {
ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov,
ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov,
/* 0xA0 - 0xA7 */
- ImplicitOps, ImplicitOps, 0, DstBitBase|SrcReg|ModRM, 0, 0, 0, 0,
+ ImplicitOps, ImplicitOps, ImplicitOps, DstBitBase|SrcReg|ModRM,
+ 0, 0, 0, 0,
/* 0xA8 - 0xAF */
ImplicitOps, ImplicitOps, 0, DstBitBase|SrcReg|ModRM,
0, 0, 0, DstReg|SrcMem|ModRM,
@@ -2450,6 +2452,12 @@ x86_emulate(
src.val = EXC_DB;
goto swint;
+ case 0xf4: /* hlt */
+ fail_if(ops->hlt == NULL);
+ if ( (rc = ops->hlt(ctxt)) != 0 )
+ goto done;
+ break;
+
case 0xf5: /* cmc */
_regs.eflags ^= EFLG_CF;
break;
@@ -2783,6 +2791,17 @@ x86_emulate(
src.val = x86_seg_fs;
goto pop_seg;
+ case 0xa2: /* cpuid */ {
+ unsigned int eax = _regs.eax, ebx = _regs.ebx;
+ unsigned int ecx = _regs.ecx, edx = _regs.edx;
+ fail_if(ops->cpuid == NULL);
+ if ( (rc = ops->cpuid(&eax, &ebx, &ecx, &edx, ctxt)) != 0 )
+ goto done;
+ _regs.eax = eax; _regs.ebx = ebx;
+ _regs.ecx = ecx; _regs.edx = edx;
+ break;
+ }
+
case 0xa8: /* push %%gs */
src.val = x86_seg_gs;
goto push_seg;
diff -r dc3a566f9e44 -r 9f61a0add5b6 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Mon Nov 26 13:54:45 2007 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Mon Nov 26 15:32:54 2007 +0000
@@ -33,6 +33,10 @@ void vmx_do_resume(struct vcpu *);
void vmx_do_resume(struct vcpu *);
void set_guest_time(struct vcpu *v, u64 gtime);
void vmx_vlapic_msr_changed(struct vcpu *v);
+void vmx_cpuid_intercept(
+ unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx);
+void vmx_wbinvd_intercept(void);
int vmx_realmode(struct cpu_user_regs *regs);
int vmx_realmode_io_complete(void);
diff -r dc3a566f9e44 -r 9f61a0add5b6 xen/include/asm-x86/x86_emulate.h
--- a/xen/include/asm-x86/x86_emulate.h Mon Nov 26 13:54:45 2007 +0000
+++ b/xen/include/asm-x86/x86_emulate.h Mon Nov 26 15:32:54 2007 +0000
@@ -275,6 +275,18 @@ struct x86_emulate_ops
int (*wbinvd)(
struct x86_emulate_ctxt *ctxt);
+ /* cpuid: Emulate CPUID via given set of EAX-EDX inputs/outputs. */
+ int (*cpuid)(
+ unsigned int *eax,
+ unsigned int *ebx,
+ unsigned int *ecx,
+ unsigned int *edx,
+ struct x86_emulate_ctxt *ctxt);
+
+ /* hlt: Emulate HLT. */
+ int (*hlt)(
+ struct x86_emulate_ctxt *ctxt);
+
/* inject_hw_exception */
int (*inject_hw_exception)(
uint8_t vector,
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|