[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 1/4] xen: Define new struct hvm_trap and cleanup vmx exception
Define new struct hvm_trap to represent information of trap, and renames hvm_inject_exception to hvm_inject_trap, then define a couple of wrappers around that function for existing callers. Signed-off-by: Keir Fraser <keir@xxxxxxx> Signed-off-by: Xudong Hao <xudong.hao@xxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 4 +- xen/arch/x86/hvm/hvm.c | 90 ++++++++++------- xen/arch/x86/hvm/io.c | 2 +- xen/arch/x86/hvm/svm/emulate.c | 4 +- xen/arch/x86/hvm/svm/nestedsvm.c | 16 ++-- xen/arch/x86/hvm/svm/svm.c | 66 +++++++------ xen/arch/x86/hvm/vmx/intr.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 169 ++++++++++++++----------------- xen/arch/x86/hvm/vmx/vpmu_core2.c | 8 +- xen/arch/x86/hvm/vmx/vvmx.c | 6 +- xen/arch/x86/mm/shadow/common.c | 2 +- xen/arch/x86/mm/shadow/multi.c | 2 +- xen/include/asm-x86/hvm/hvm.h | 22 +++-- xen/include/asm-x86/hvm/svm/nestedsvm.h | 3 +- xen/include/asm-x86/hvm/vcpu.h | 7 +- xen/include/asm-x86/hvm/vmx/vmx.h | 1 - 16 files changed, 205 insertions(+), 199 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 2b50670..9bfba48 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -326,7 +326,7 @@ static int hvmemul_linear_to_phys( { if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared ) return X86EMUL_RETRY; - hvm_inject_exception(TRAP_page_fault, pfec, addr); + hvm_inject_page_fault(pfec, addr); return X86EMUL_EXCEPTION; } @@ -349,7 +349,7 @@ static int hvmemul_linear_to_phys( ASSERT(!reverse); if ( npfn != INVALID_GFN ) return X86EMUL_UNHANDLEABLE; - hvm_inject_exception(TRAP_page_fault, pfec, addr & PAGE_MASK); + hvm_inject_page_fault(pfec, addr & PAGE_MASK); return X86EMUL_EXCEPTION; } *reps = done; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index efd5587..f3c87bc 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -347,12 +347,10 @@ void hvm_do_resume(struct vcpu *v) } /* Inject pending hw/sw trap */ - if (v->arch.hvm_vcpu.inject_trap != -1) + if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) { - hvm_inject_exception(v->arch.hvm_vcpu.inject_trap, - v->arch.hvm_vcpu.inject_error_code, - v->arch.hvm_vcpu.inject_cr2); - v->arch.hvm_vcpu.inject_trap = -1; + hvm_inject_trap(&v->arch.hvm_vcpu.inject_trap); + v->arch.hvm_vcpu.inject_trap.vector = -1; } } @@ -1047,7 +1045,7 @@ int hvm_vcpu_initialise(struct vcpu *v) spin_lock_init(&v->arch.hvm_vcpu.tm_lock); INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list); - v->arch.hvm_vcpu.inject_trap = -1; + v->arch.hvm_vcpu.inject_trap.vector = -1; #ifdef CONFIG_COMPAT rc = setup_compat_arg_xlat(v); @@ -1194,18 +1192,19 @@ void hvm_triple_fault(void) domain_shutdown(v->domain, SHUTDOWN_reboot); } -void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2) +void hvm_inject_trap(struct hvm_trap *trap) { struct vcpu *curr = current; if ( nestedhvm_enabled(curr->domain) && !nestedhvm_vmswitch_in_progress(curr) && nestedhvm_vcpu_in_guestmode(curr) && - nhvm_vmcx_guest_intercepts_trap(curr, trapnr, errcode) ) + nhvm_vmcx_guest_intercepts_trap( + curr, trap->vector, trap->error_code) ) { enum nestedhvm_vmexits nsret; - nsret = nhvm_vcpu_vmexit_trap(curr, trapnr, errcode, cr2); + nsret = nhvm_vcpu_vmexit_trap(curr, trap); switch ( nsret ) { @@ -1221,7 +1220,26 @@ void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2) } } - hvm_funcs.inject_exception(trapnr, errcode, cr2); + hvm_funcs.inject_trap(trap); +} + +void hvm_inject_hw_exception(unsigned int trapnr, int errcode) +{ + struct hvm_trap trap = { + .vector = trapnr, + .type = X86_EVENTTYPE_HW_EXCEPTION, + .error_code = errcode }; + hvm_inject_trap(&trap); +} + +void hvm_inject_page_fault(int errcode, unsigned long cr2) +{ + struct hvm_trap trap = { + .vector = TRAP_page_fault, + .type = X86_EVENTTYPE_HW_EXCEPTION, + .error_code = errcode, + .cr2 = cr2 }; + hvm_inject_trap(&trap); } int hvm_hap_nested_page_fault(unsigned long gpa, @@ -1270,7 +1288,7 @@ int hvm_hap_nested_page_fault(unsigned long gpa, return -1; case NESTEDHVM_PAGEFAULT_MMIO: if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return 1; } } @@ -1337,7 +1355,7 @@ int hvm_hap_nested_page_fault(unsigned long gpa, { put_gfn(p2m->domain, gfn); if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); rc = 1; goto out; } @@ -1380,7 +1398,7 @@ int hvm_hap_nested_page_fault(unsigned long gpa, { gdprintk(XENLOG_WARNING, "trying to write to read-only grant mapping\n"); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); rc = 1; goto out_put_gfn; } @@ -1441,7 +1459,7 @@ int hvm_handle_xsetbv(u64 new_bv) return 0; err: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return -1; } @@ -1457,7 +1475,7 @@ int hvm_set_efer(uint64_t value) { gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " "EFER: 0x%"PRIx64"\n", value); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -1466,7 +1484,7 @@ int hvm_set_efer(uint64_t value) { gdprintk(XENLOG_WARNING, "Trying to change EFER.LME with paging enabled\n"); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -1722,7 +1740,7 @@ int hvm_set_cr0(unsigned long value) return X86EMUL_OKAY; gpf: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -1808,7 +1826,7 @@ int hvm_set_cr4(unsigned long value) return X86EMUL_OKAY; gpf: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -2104,7 +2122,7 @@ static int hvm_load_segment_selector( unmap_and_fail: hvm_unmap_entry(pdesc); fail: - hvm_inject_exception(fault_type, sel & 0xfffc, 0); + hvm_inject_hw_exception(fault_type, sel & 0xfffc); hvm_map_fail: return 1; } @@ -2137,9 +2155,9 @@ void hvm_task_switch( if ( ((tss_sel & 0xfff8) + 7) > gdt.limit ) { - hvm_inject_exception((taskswitch_reason == TSW_iret) ? + hvm_inject_hw_exception((taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault, - tss_sel & 0xfff8, 0); + tss_sel & 0xfff8); goto out; } @@ -2164,21 +2182,21 @@ void hvm_task_switch( if ( !tr.attr.fields.p ) { - hvm_inject_exception(TRAP_no_segment, tss_sel & 0xfff8, 0); + hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8); goto out; } if ( tr.attr.fields.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) ) { - hvm_inject_exception( + hvm_inject_hw_exception( (taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault, - tss_sel & 0xfff8, 0); + tss_sel & 0xfff8); goto out; } if ( tr.limit < (sizeof(tss)-1) ) { - hvm_inject_exception(TRAP_invalid_tss, tss_sel & 0xfff8, 0); + hvm_inject_hw_exception(TRAP_invalid_tss, tss_sel & 0xfff8); goto out; } @@ -2283,7 +2301,7 @@ void hvm_task_switch( goto out; if ( (tss.trace & 1) && !exn_raised ) - hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0); + hvm_inject_hw_exception(TRAP_debug, tss_sel & 0xfff8); tr.attr.fields.type = 0xb; /* busy 32-bit tss */ hvm_set_segment_register(v, x86_seg_tr, &tr); @@ -2362,7 +2380,7 @@ static enum hvm_copy_result __hvm_copy( if ( pfec == PFEC_page_shared ) return HVMCOPY_gfn_shared; if ( flags & HVMCOPY_fault ) - hvm_inject_exception(TRAP_page_fault, pfec, addr); + hvm_inject_page_fault(pfec, addr); return HVMCOPY_bad_gva_to_gfn; } } @@ -2849,7 +2867,7 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) return ret; gp_fault: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); ret = X86EMUL_EXCEPTION; *msr_content = -1ull; goto out; @@ -2962,7 +2980,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content) return ret; gp_fault: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -4267,13 +4285,13 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL ) goto param_fail8; - if ( v->arch.hvm_vcpu.inject_trap != -1 ) + if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) rc = -EBUSY; else { - v->arch.hvm_vcpu.inject_trap = tr.trap; - v->arch.hvm_vcpu.inject_error_code = tr.error_code; - v->arch.hvm_vcpu.inject_cr2 = tr.cr2; + v->arch.hvm_vcpu.inject_trap.vector = tr.trap; + v->arch.hvm_vcpu.inject_trap.error_code = tr.error_code; + v->arch.hvm_vcpu.inject_trap.cr2 = tr.cr2; } param_fail8: @@ -4431,11 +4449,9 @@ int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, return -EOPNOTSUPP; } -int -nhvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr, - int errcode, unsigned long cr2) +int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap) { - return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trapnr, errcode, cr2); + return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trap); } uint64_t nhvm_vcpu_guestcr3(struct vcpu *v) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 41a2ede..31af045 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -200,7 +200,7 @@ int handle_mmio(void) return 0; case X86EMUL_EXCEPTION: if ( ctxt.exn_pending ) - hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0); + hvm_inject_hw_exception(ctxt.exn_vector, ctxt.exn_error_code); break; default: break; diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c index 6000bff..0c72f00 100644 --- a/xen/arch/x86/hvm/svm/emulate.c +++ b/xen/arch/x86/hvm/svm/emulate.c @@ -147,7 +147,7 @@ static int fetch(struct vcpu *v, u8 *buf, unsigned long addr, int len) /* Not OK: fetches from non-RAM pages are not supportable. */ gdprintk(XENLOG_WARNING, "Bad instruction fetch at %#lx (%#lx)\n", (unsigned long) guest_cpu_user_regs()->eip, addr); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return 0; } return 1; @@ -216,7 +216,7 @@ int __get_instruction_length_from_list(struct vcpu *v, gdprintk(XENLOG_WARNING, "%s: Mismatch between expected and actual instruction bytes: " "eip = %lx\n", __func__, (unsigned long)vmcb->rip); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return 0; done: diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c index 8714bb0..6ed3260 100644 --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -735,8 +735,8 @@ nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs) default: gdprintk(XENLOG_ERR, "nsvm_vcpu_vmentry failed, injecting #UD\n"); - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); - /* Must happen after hvm_inject_exception or it doesn't work right. */ + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); + /* Must happen after hvm_inject_hw_exception or it doesn't work right. */ nv->nv_vmswitch_in_progress = 0; return 1; } @@ -796,12 +796,12 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs, } int -nsvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr, - int errcode, unsigned long cr2) +nsvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap) { ASSERT(vcpu_nestedhvm(v).nv_vvmcx != NULL); - nestedsvm_vmexit_defer(v, VMEXIT_EXCEPTION_DE + trapnr, errcode, cr2); + nestedsvm_vmexit_defer(v, VMEXIT_EXCEPTION_DE + trap->vector, + trap->error_code, trap->cr2); return NESTEDHVM_VMEXIT_DONE; } @@ -1176,7 +1176,7 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v) } if ( nv->nv_vmexit_pending ) { - /* hvm_inject_exception() must have run before. + /* hvm_inject_hw_exception() must have run before. * exceptions have higher priority than interrupts. */ return hvm_intblk_rflags_ie; @@ -1509,7 +1509,7 @@ void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v) unsigned int inst_len; if ( !nestedhvm_enabled(v->domain) ) { - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); return; } @@ -1529,7 +1529,7 @@ void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v) vintr_t intr; if ( !nestedhvm_enabled(v->domain) ) { - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); return; } diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index e717dda..e568e33 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -109,7 +109,7 @@ void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len) curr->arch.hvm_svm.vmcb->interrupt_shadow = 0; if ( regs->eflags & X86_EFLAGS_TF ) - hvm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE); } static void svm_cpu_down(void) @@ -1066,14 +1066,14 @@ static void svm_vcpu_destroy(struct vcpu *v) passive_domain_destroy(v); } -static void svm_inject_exception( - unsigned int trapnr, int errcode, unsigned long cr2) +static void svm_inject_trap(struct hvm_trap *trap) { struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; eventinj_t event = vmcb->eventinj; + struct hvm_trap _trap = *trap; - switch ( trapnr ) + switch ( _trap.vector ) { case TRAP_debug: if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF ) @@ -1081,6 +1081,9 @@ static void svm_inject_exception( __restore_debug_registers(curr); vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | 0x4000); } + if ( cpu_has_monitor_trap_flag ) + break; + /* fall through */ case TRAP_int3: if ( curr->domain->debugger_attached ) { @@ -1093,29 +1096,30 @@ static void svm_inject_exception( if ( unlikely(event.fields.v) && (event.fields.type == X86_EVENTTYPE_HW_EXCEPTION) ) { - trapnr = hvm_combine_hw_exceptions(event.fields.vector, trapnr); - if ( trapnr == TRAP_double_fault ) - errcode = 0; + _trap.vector = hvm_combine_hw_exceptions( + event.fields.vector, _trap.vector); + if ( _trap.vector == TRAP_double_fault ) + _trap.error_code = 0; } event.bytes = 0; event.fields.v = 1; event.fields.type = X86_EVENTTYPE_HW_EXCEPTION; - event.fields.vector = trapnr; - event.fields.ev = (errcode != HVM_DELIVER_NO_ERROR_CODE); - event.fields.errorcode = errcode; + event.fields.vector = _trap.vector; + event.fields.ev = (_trap.error_code != HVM_DELIVER_NO_ERROR_CODE); + event.fields.errorcode = _trap.error_code; vmcb->eventinj = event; - if ( trapnr == TRAP_page_fault ) + if ( _trap.vector == TRAP_page_fault ) { - curr->arch.hvm_vcpu.guest_cr[2] = cr2; - vmcb_set_cr2(vmcb, cr2); - HVMTRACE_LONG_2D(PF_INJECT, errcode, TRC_PAR_LONG(cr2)); + curr->arch.hvm_vcpu.guest_cr[2] = _trap.cr2; + vmcb_set_cr2(vmcb, _trap.cr2); + HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code, TRC_PAR_LONG(_trap.cr2)); } else { - HVMTRACE_2D(INJ_EXC, trapnr, errcode); + HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code); } } @@ -1361,7 +1365,7 @@ static void svm_fpu_dirty_intercept(void) { /* Check if l1 guest must make FPU ready for the l2 guest */ if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS ) - hvm_inject_exception(TRAP_no_device, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(TRAP_no_device, HVM_DELIVER_NO_ERROR_CODE); else vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS); return; @@ -1579,7 +1583,7 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) return X86EMUL_OKAY; gpf: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -1708,7 +1712,7 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) return X86EMUL_OKAY; gpf: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -1784,13 +1788,13 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs, { if (!nestedhvm_enabled(v->domain)) { gdprintk(XENLOG_ERR, "VMRUN: nestedhvm disabled, injecting #UD\n"); - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); return; } if (!nestedsvm_vmcb_map(v, vmcbaddr)) { gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #UD\n"); - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); return; } @@ -1830,7 +1834,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, return; inject: - hvm_inject_exception(ret, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(ret, HVM_DELIVER_NO_ERROR_CODE); return; } @@ -1864,7 +1868,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, return; inject: - hvm_inject_exception(ret, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(ret, HVM_DELIVER_NO_ERROR_CODE); return; } @@ -1880,11 +1884,11 @@ static void svm_vmexit_ud_intercept(struct cpu_user_regs *regs) switch ( rc ) { case X86EMUL_UNHANDLEABLE: - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); break; case X86EMUL_EXCEPTION: if ( ctxt.exn_pending ) - hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0); + hvm_inject_hw_exception(ctxt.exn_vector, ctxt.exn_error_code); /* fall through */ default: hvm_emulate_writeback(&ctxt); @@ -1998,7 +2002,7 @@ static struct hvm_function_table __read_mostly svm_function_table = { .set_guest_pat = svm_set_guest_pat, .get_guest_pat = svm_get_guest_pat, .set_tsc_offset = svm_set_tsc_offset, - .inject_exception = svm_inject_exception, + .inject_trap = svm_inject_trap, .init_hypercall_page = svm_init_hypercall_page, .event_pending = svm_event_pending, .do_pmu_interrupt = svm_do_pmu_interrupt, @@ -2212,7 +2216,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) break; } - hvm_inject_exception(TRAP_page_fault, regs->error_code, va); + hvm_inject_page_fault(regs->error_code, va); break; } @@ -2285,7 +2289,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) __update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip); } else if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); break; case VMEXIT_CR0_READ ... VMEXIT_CR15_READ: @@ -2293,7 +2297,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) ) svm_vmexit_do_cr_access(vmcb, regs); else if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); break; case VMEXIT_INVLPG: @@ -2303,7 +2307,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) __update_guest_eip(regs, vmcb->nextrip - vmcb->rip); } else if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); break; case VMEXIT_INVLPGA: @@ -2349,7 +2353,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) case VMEXIT_MONITOR: case VMEXIT_MWAIT: - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); break; case VMEXIT_VMRUN: @@ -2368,7 +2372,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) svm_vmexit_do_clgi(regs, v); break; case VMEXIT_SKINIT: - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); break; case VMEXIT_XSETBV: diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c index d675011..590c483 100644 --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -251,7 +251,7 @@ void vmx_intr_assist(void) } else if ( intack.source == hvm_intsrc_mce ) { - vmx_inject_hw_exception(TRAP_machine_check, HVM_DELIVER_NO_ERROR_CODE); + hvm_inject_hw_exception(TRAP_machine_check, HVM_DELIVER_NO_ERROR_CODE); } else { diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index d5cb279..c96d18b 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -268,7 +268,7 @@ long_mode_do_msr_write(unsigned int msr, uint64_t msr_content) uncanonical_address: HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", msr); - vmx_inject_hw_exception(TRAP_gp_fault, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return HNDL_exception_raised; } @@ -1310,10 +1310,9 @@ void nvmx_enqueue_n2_exceptions(struct vcpu *v, nvmx->intr.intr_info, nvmx->intr.error_code); } -static int nvmx_vmexit_exceptions(struct vcpu *v, unsigned int trapnr, - int errcode, unsigned long cr2) +static int nvmx_vmexit_trap(struct vcpu *v, struct hvm_trap *trap) { - nvmx_enqueue_n2_exceptions(v, trapnr, errcode); + nvmx_enqueue_n2_exceptions(v, trap->vector, trap->error_code); return NESTEDHVM_VMEXIT_DONE; } @@ -1344,22 +1343,62 @@ static void __vmx_inject_exception(int trap, int type, int error_code) curr->arch.hvm_vmx.vmx_emulate = 1; } -void vmx_inject_hw_exception(int trap, int error_code) +void vmx_inject_extint(int trap) +{ + struct vcpu *v = current; + u32 pin_based_cntrl; + + if ( nestedhvm_vcpu_in_guestmode(v) ) { + pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, + PIN_BASED_VM_EXEC_CONTROL); + if ( pin_based_cntrl & PIN_BASED_EXT_INTR_MASK ) { + nvmx_enqueue_n2_exceptions (v, + INTR_INFO_VALID_MASK | (X86_EVENTTYPE_EXT_INTR<<8) | trap, + HVM_DELIVER_NO_ERROR_CODE); + return; + } + } + __vmx_inject_exception(trap, X86_EVENTTYPE_EXT_INTR, + HVM_DELIVER_NO_ERROR_CODE); +} + +void vmx_inject_nmi(void) +{ + struct vcpu *v = current; + u32 pin_based_cntrl; + + if ( nestedhvm_vcpu_in_guestmode(v) ) { + pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, + PIN_BASED_VM_EXEC_CONTROL); + if ( pin_based_cntrl & PIN_BASED_NMI_EXITING ) { + nvmx_enqueue_n2_exceptions (v, + INTR_INFO_VALID_MASK | (X86_EVENTTYPE_NMI<<8) | TRAP_nmi, + HVM_DELIVER_NO_ERROR_CODE); + return; + } + } + __vmx_inject_exception(2, X86_EVENTTYPE_NMI, + HVM_DELIVER_NO_ERROR_CODE); +} + +static void vmx_inject_trap(struct hvm_trap *trap) { unsigned long intr_info; struct vcpu *curr = current; + struct hvm_trap _trap = *trap; - int type = X86_EVENTTYPE_HW_EXCEPTION; + if ( (_trap.vector == TRAP_page_fault) && + (_trap.type == X86_EVENTTYPE_HW_EXCEPTION) ) + current->arch.hvm_vcpu.guest_cr[2] = _trap.cr2; if ( nestedhvm_vcpu_in_guestmode(curr) ) intr_info = vcpu_2_nvmx(curr).intr.intr_info; else intr_info = __vmread(VM_ENTRY_INTR_INFO); - switch ( trap ) + switch ( _trap.vector ) { case TRAP_debug: - type = X86_EVENTTYPE_SW_EXCEPTION; if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF ) { __restore_debug_registers(curr); @@ -1368,7 +1407,6 @@ void vmx_inject_hw_exception(int trap, int error_code) if ( cpu_has_monitor_trap_flag ) break; /* fall through */ - case TRAP_int3: if ( curr->domain->debugger_attached ) { @@ -1376,91 +1414,34 @@ void vmx_inject_hw_exception(int trap, int error_code) domain_pause_for_debugger(); return; } - - type = X86_EVENTTYPE_SW_EXCEPTION; - __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 1); /* int3 */ - break; - - default: - if ( trap > TRAP_last_reserved ) - { - type = X86_EVENTTYPE_SW_EXCEPTION; - __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 2); /* int imm8 */ - } - break; } if ( unlikely(intr_info & INTR_INFO_VALID_MASK) && (((intr_info >> 8) & 7) == X86_EVENTTYPE_HW_EXCEPTION) ) { - trap = hvm_combine_hw_exceptions((uint8_t)intr_info, trap); - if ( trap == TRAP_double_fault ) - error_code = 0; + _trap.vector = hvm_combine_hw_exceptions( + (uint8_t)intr_info, _trap.vector); + if ( _trap.vector == TRAP_double_fault ) + _trap.error_code = 0; } if ( nestedhvm_vcpu_in_guestmode(curr) && - nvmx_intercepts_exception(curr, trap, error_code) ) + nvmx_intercepts_exception(curr, _trap.vector, _trap.error_code) ) { nvmx_enqueue_n2_exceptions (curr, - INTR_INFO_VALID_MASK | (type<<8) | trap, - error_code); + INTR_INFO_VALID_MASK | (_trap.type<<8) | _trap.vector, + _trap.error_code); return; } else - __vmx_inject_exception(trap, type, error_code); + __vmx_inject_exception(_trap.vector, _trap.type, _trap.error_code); - if ( trap == TRAP_page_fault ) - HVMTRACE_LONG_2D(PF_INJECT, error_code, + if ( (_trap.vector == TRAP_page_fault) && + (_trap.type == X86_EVENTTYPE_HW_EXCEPTION) ) + HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code, TRC_PAR_LONG(current->arch.hvm_vcpu.guest_cr[2])); else - HVMTRACE_2D(INJ_EXC, trap, error_code); -} - -void vmx_inject_extint(int trap) -{ - struct vcpu *v = current; - u32 pin_based_cntrl; - - if ( nestedhvm_vcpu_in_guestmode(v) ) { - pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - PIN_BASED_VM_EXEC_CONTROL); - if ( pin_based_cntrl & PIN_BASED_EXT_INTR_MASK ) { - nvmx_enqueue_n2_exceptions (v, - INTR_INFO_VALID_MASK | (X86_EVENTTYPE_EXT_INTR<<8) | trap, - HVM_DELIVER_NO_ERROR_CODE); - return; - } - } - __vmx_inject_exception(trap, X86_EVENTTYPE_EXT_INTR, - HVM_DELIVER_NO_ERROR_CODE); -} - -void vmx_inject_nmi(void) -{ - struct vcpu *v = current; - u32 pin_based_cntrl; - - if ( nestedhvm_vcpu_in_guestmode(v) ) { - pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - PIN_BASED_VM_EXEC_CONTROL); - if ( pin_based_cntrl & PIN_BASED_NMI_EXITING ) { - nvmx_enqueue_n2_exceptions (v, - INTR_INFO_VALID_MASK | (X86_EVENTTYPE_NMI<<8) | TRAP_nmi, - HVM_DELIVER_NO_ERROR_CODE); - return; - } - } - __vmx_inject_exception(2, X86_EVENTTYPE_NMI, - HVM_DELIVER_NO_ERROR_CODE); -} - -static void vmx_inject_exception( - unsigned int trapnr, int errcode, unsigned long cr2) -{ - if ( trapnr == TRAP_page_fault ) - current->arch.hvm_vcpu.guest_cr[2] = cr2; - - vmx_inject_hw_exception(trapnr, errcode); + HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code); } static int vmx_event_pending(struct vcpu *v) @@ -1532,7 +1513,7 @@ static struct hvm_function_table __read_mostly vmx_function_table = { .set_guest_pat = vmx_set_guest_pat, .get_guest_pat = vmx_get_guest_pat, .set_tsc_offset = vmx_set_tsc_offset, - .inject_exception = vmx_inject_exception, + .inject_trap = vmx_inject_trap, .init_hypercall_page = vmx_init_hypercall_page, .event_pending = vmx_event_pending, .do_pmu_interrupt = vmx_do_pmu_interrupt, @@ -1554,7 +1535,7 @@ static struct hvm_function_table __read_mostly vmx_function_table = { .nhvm_vcpu_hostcr3 = nvmx_vcpu_hostcr3, .nhvm_vcpu_asid = nvmx_vcpu_asid, .nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception, - .nhvm_vcpu_vmexit_trap = nvmx_vmexit_exceptions, + .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap, .nhvm_intr_blocked = nvmx_intr_blocked }; @@ -1618,7 +1599,7 @@ static void update_guest_eip(void) } if ( regs->eflags & X86_EFLAGS_TF ) - vmx_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE); + hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE); } static void vmx_fpu_dirty_intercept(void) @@ -1922,7 +1903,7 @@ done: return X86EMUL_OKAY; gp_fault: - vmx_inject_hw_exception(TRAP_gp_fault, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -2030,7 +2011,7 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( (rc < 0) || (vmx_add_host_load_msr(msr) < 0) ) - vmx_inject_hw_exception(TRAP_machine_check, 0); + hvm_inject_hw_exception(TRAP_machine_check, 0); else { __vmwrite(GUEST_IA32_DEBUGCTL, msr_content); @@ -2073,7 +2054,7 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) return X86EMUL_OKAY; gp_fault: - vmx_inject_hw_exception(TRAP_gp_fault, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -2222,11 +2203,11 @@ static void vmx_vmexit_ud_intercept(struct cpu_user_regs *regs) switch ( rc ) { case X86EMUL_UNHANDLEABLE: - vmx_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); break; case X86EMUL_EXCEPTION: if ( ctxt.exn_pending ) - hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0); + hvm_inject_hw_exception(ctxt.exn_vector, ctxt.exn_error_code); /* fall through */ default: hvm_emulate_writeback(&ctxt); @@ -2440,7 +2421,12 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) if ( handled < 0 ) { - vmx_inject_exception(TRAP_int3, HVM_DELIVER_NO_ERROR_CODE, 0); + struct hvm_trap trap = { + .vector = TRAP_int3, + .type = X86_EVENTTYPE_SW_EXCEPTION, + .error_code = HVM_DELIVER_NO_ERROR_CODE + }; + hvm_inject_trap(&trap); break; } else if ( handled ) @@ -2476,8 +2462,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) break; } - v->arch.hvm_vcpu.guest_cr[2] = exit_qualification; - vmx_inject_hw_exception(TRAP_page_fault, regs->error_code); + hvm_inject_page_fault(regs->error_code, exit_qualification); break; case TRAP_nmi: if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) != @@ -2658,7 +2643,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) * as far as vmexit. */ WARN_ON(exit_reason == EXIT_REASON_GETSEC); - vmx_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); break; case EXIT_REASON_TPR_BELOW_THRESHOLD: @@ -2666,7 +2651,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) case EXIT_REASON_APIC_ACCESS: if ( !vmx_handle_eoi_write() && !handle_mmio() ) - vmx_inject_hw_exception(TRAP_gp_fault, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); break; case EXIT_REASON_IO_INSTRUCTION: @@ -2675,7 +2660,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) { /* INS, OUTS */ if ( !handle_mmio() ) - vmx_inject_hw_exception(TRAP_gp_fault, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); } else { diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index 266d4a6..c79103e 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -421,7 +421,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) ) return 1; gdprintk(XENLOG_WARNING, "Debug Store is not supported on this cpu\n"); - vmx_inject_hw_exception(TRAP_gp_fault, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return 0; } } @@ -437,7 +437,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) case MSR_CORE_PERF_GLOBAL_STATUS: gdprintk(XENLOG_INFO, "Can not write readonly MSR: " "MSR_PERF_GLOBAL_STATUS(0x38E)!\n"); - vmx_inject_hw_exception(TRAP_gp_fault, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return 1; case MSR_IA32_PEBS_ENABLE: if ( msr_content & 1 ) @@ -452,7 +452,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) gdprintk(XENLOG_WARNING, "Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n", msr_content); - vmx_inject_hw_exception(TRAP_gp_fault, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return 1; } core2_vpmu_cxt->pmu_enable->ds_area_enable = msr_content ? 1 : 0; @@ -544,7 +544,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) break; } if (inject_gp) - vmx_inject_hw_exception(TRAP_gp_fault, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); else wrmsrl(msr, msr_content); } diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index b0ae0ee..fc733a9 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -304,12 +304,12 @@ vmexit: invalid_op: gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: invalid_op\n"); - hvm_inject_exception(TRAP_invalid_op, 0, 0); + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); return X86EMUL_EXCEPTION; gp_fault: gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: gp_fault\n"); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -386,7 +386,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs, return X86EMUL_OKAY; gp_fault: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 59be993..dc245be 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -135,7 +135,7 @@ static int hvm_translate_linear_addr( if ( !okay ) { - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index 9368385..4f56ae6 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -4825,7 +4825,7 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v, if ( gfn == INVALID_GFN ) { if ( is_hvm_vcpu(v) ) - hvm_inject_exception(TRAP_page_fault, pfec, vaddr); + hvm_inject_page_fault(pfec, vaddr); else propagate_page_fault(vaddr, pfec); return _mfn(BAD_GVA_TO_GFN); diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 22f9451..65f7e20 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -71,6 +71,13 @@ enum hvm_intblk { #define HVM_HAP_SUPERPAGE_2MB 0x00000001 #define HVM_HAP_SUPERPAGE_1GB 0x00000002 +struct hvm_trap { + int vector; + unsigned int type; /* X86_EVENTTYPE_* */ + int error_code; /* HVM_DELIVER_NO_ERROR_CODE if n/a */ + unsigned long cr2; /* Only for TRAP_page_fault h/w exception */ +}; + /* * The hardware virtual machine (HVM) interface abstracts away from the * x86/x86_64 CPU virtualization assist specifics. Currently this interface @@ -124,8 +131,7 @@ struct hvm_function_table { void (*set_tsc_offset)(struct vcpu *v, u64 offset); - void (*inject_exception)(unsigned int trapnr, int errcode, - unsigned long cr2); + void (*inject_trap)(struct hvm_trap *trap); void (*init_hypercall_page)(struct domain *d, void *hypercall_page); @@ -162,10 +168,7 @@ struct hvm_function_table { struct cpu_user_regs *regs); int (*nhvm_vcpu_vmexit)(struct vcpu *v, struct cpu_user_regs *regs, uint64_t exitcode); - int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, - unsigned int trapnr, - int errcode, - unsigned long cr2); + int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, struct hvm_trap *trap); uint64_t (*nhvm_vcpu_guestcr3)(struct vcpu *v); uint64_t (*nhvm_vcpu_hostcr3)(struct vcpu *v); uint32_t (*nhvm_vcpu_asid)(struct vcpu *v); @@ -320,7 +323,9 @@ void hvm_migrate_timers(struct vcpu *v); void hvm_do_resume(struct vcpu *v); void hvm_migrate_pirqs(struct vcpu *v); -void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2); +void hvm_inject_trap(struct hvm_trap *trap); +void hvm_inject_hw_exception(unsigned int trapnr, int errcode); +void hvm_inject_page_fault(int errcode, unsigned long cr2); static inline int hvm_event_pending(struct vcpu *v) { @@ -479,8 +484,7 @@ int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, /* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to * 'trapnr' exception. */ -int nhvm_vcpu_vmexit_trap(struct vcpu *v, - unsigned int trapnr, int errcode, unsigned long cr2); +int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap); /* returns l2 guest cr3 in l2 guest physical address space. */ uint64_t nhvm_vcpu_guestcr3(struct vcpu *v); diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h b/xen/include/asm-x86/hvm/svm/nestedsvm.h index f6951b3..fa83023 100644 --- a/xen/include/asm-x86/hvm/svm/nestedsvm.h +++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h @@ -114,8 +114,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs); int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs); int nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs, uint64_t exitcode); -int nsvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr, - int errcode, unsigned long cr2); +int nsvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap); uint64_t nsvm_vcpu_guestcr3(struct vcpu *v); uint64_t nsvm_vcpu_hostcr3(struct vcpu *v); uint32_t nsvm_vcpu_asid(struct vcpu *v); diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index 537da96..f2da72d 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -164,10 +164,9 @@ struct hvm_vcpu { /* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */ void (*fpu_exception_callback)(void *, struct cpu_user_regs *); void *fpu_exception_callback_arg; - /* Pending hw/sw interrupt */ - int inject_trap; /* -1 for nothing to inject */ - int inject_error_code; - unsigned long inject_cr2; + + /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */ + struct hvm_trap inject_trap; struct viridian_vcpu viridian; }; diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h index f003f84..accfa3f 100644 --- a/xen/include/asm-x86/hvm/vmx/vmx.h +++ b/xen/include/asm-x86/hvm/vmx/vmx.h @@ -387,7 +387,6 @@ static inline int __vmxon(u64 addr) return rc; } -void vmx_inject_hw_exception(int trap, int error_code); void vmx_inject_extint(int trap); void vmx_inject_nmi(void); -- 1.5.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |