[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/3] xen: Add instruction length parameter in function hvm_inject_exception
VMX exception: Pass the instruction length field down to exception emulation, by changing hypercall parameter and adding a parameter in fucntion hvm_inject_exception(), so that exception emulation can set correct instruction length. Signed-off-by: Xudong Hao <xudong.hao@xxxxxxxxx> Signed-off-by: Xiantao Zhang <xiantao.zhang@xxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 4 +- xen/arch/x86/hvm/hvm.c | 40 +++++++++++++++++++------------------ xen/arch/x86/hvm/io.c | 2 +- xen/arch/x86/hvm/svm/emulate.c | 4 +- xen/arch/x86/hvm/svm/nestedsvm.c | 6 ++-- xen/arch/x86/hvm/svm/svm.c | 34 ++++++++++++++++---------------- xen/arch/x86/hvm/vmx/vmx.c | 2 +- xen/arch/x86/hvm/vmx/vvmx.c | 6 ++-- xen/arch/x86/mm/shadow/common.c | 2 +- xen/arch/x86/mm/shadow/multi.c | 2 +- xen/include/asm-x86/hvm/hvm.h | 4 +- xen/include/asm-x86/hvm/vcpu.h | 1 + xen/include/public/hvm/hvm_op.h | 2 + 13 files changed, 57 insertions(+), 52 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 2b50670..b6d1984 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -326,7 +326,7 @@ static int hvmemul_linear_to_phys( { if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared ) return X86EMUL_RETRY; - hvm_inject_exception(TRAP_page_fault, pfec, addr); + hvm_inject_exception(TRAP_page_fault, 0, pfec, addr); return X86EMUL_EXCEPTION; } @@ -349,7 +349,7 @@ static int hvmemul_linear_to_phys( ASSERT(!reverse); if ( npfn != INVALID_GFN ) return X86EMUL_UNHANDLEABLE; - hvm_inject_exception(TRAP_page_fault, pfec, addr & PAGE_MASK); + hvm_inject_exception(TRAP_page_fault, 0, pfec, addr & PAGE_MASK); return X86EMUL_EXCEPTION; } *reps = done; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index efd5587..fa8b220 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -350,6 +350,7 @@ void hvm_do_resume(struct vcpu *v) if (v->arch.hvm_vcpu.inject_trap != -1) { hvm_inject_exception(v->arch.hvm_vcpu.inject_trap, + v->arch.hvm_vcpu.instruction_len, v->arch.hvm_vcpu.inject_error_code, v->arch.hvm_vcpu.inject_cr2); v->arch.hvm_vcpu.inject_trap = -1; @@ -1194,7 +1195,7 @@ void hvm_triple_fault(void) domain_shutdown(v->domain, SHUTDOWN_reboot); } -void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2) +void hvm_inject_exception(unsigned int trapnr, int inslen, int errcode, unsigned long cr2) { struct vcpu *curr = current; @@ -1221,7 +1222,7 @@ void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2) } } - hvm_funcs.inject_exception(trapnr, errcode, cr2); + hvm_funcs.inject_exception(trapnr, inslen, errcode, cr2); } int hvm_hap_nested_page_fault(unsigned long gpa, @@ -1270,7 +1271,7 @@ int hvm_hap_nested_page_fault(unsigned long gpa, return -1; case NESTEDHVM_PAGEFAULT_MMIO: if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return 1; } } @@ -1337,7 +1338,7 @@ int hvm_hap_nested_page_fault(unsigned long gpa, { put_gfn(p2m->domain, gfn); if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); rc = 1; goto out; } @@ -1380,7 +1381,7 @@ int hvm_hap_nested_page_fault(unsigned long gpa, { gdprintk(XENLOG_WARNING, "trying to write to read-only grant mapping\n"); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); rc = 1; goto out_put_gfn; } @@ -1441,7 +1442,7 @@ int hvm_handle_xsetbv(u64 new_bv) return 0; err: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return -1; } @@ -1457,7 +1458,7 @@ int hvm_set_efer(uint64_t value) { gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " "EFER: 0x%"PRIx64"\n", value); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } @@ -1466,7 +1467,7 @@ int hvm_set_efer(uint64_t value) { gdprintk(XENLOG_WARNING, "Trying to change EFER.LME with paging enabled\n"); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } @@ -1722,7 +1723,7 @@ int hvm_set_cr0(unsigned long value) return X86EMUL_OKAY; gpf: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } @@ -1808,7 +1809,7 @@ int hvm_set_cr4(unsigned long value) return X86EMUL_OKAY; gpf: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } @@ -2104,7 +2105,7 @@ static int hvm_load_segment_selector( unmap_and_fail: hvm_unmap_entry(pdesc); fail: - hvm_inject_exception(fault_type, sel & 0xfffc, 0); + hvm_inject_exception(fault_type, 0, sel & 0xfffc, 0); hvm_map_fail: return 1; } @@ -2139,7 +2140,7 @@ void hvm_task_switch( { hvm_inject_exception((taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault, - tss_sel & 0xfff8, 0); + 0, tss_sel & 0xfff8, 0); goto out; } @@ -2164,7 +2165,7 @@ void hvm_task_switch( if ( !tr.attr.fields.p ) { - hvm_inject_exception(TRAP_no_segment, tss_sel & 0xfff8, 0); + hvm_inject_exception(TRAP_no_segment, 0, tss_sel & 0xfff8, 0); goto out; } @@ -2172,13 +2173,13 @@ void hvm_task_switch( { hvm_inject_exception( (taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault, - tss_sel & 0xfff8, 0); + 0, tss_sel & 0xfff8, 0); goto out; } if ( tr.limit < (sizeof(tss)-1) ) { - hvm_inject_exception(TRAP_invalid_tss, tss_sel & 0xfff8, 0); + hvm_inject_exception(TRAP_invalid_tss, 0, tss_sel & 0xfff8, 0); goto out; } @@ -2283,7 +2284,7 @@ void hvm_task_switch( goto out; if ( (tss.trace & 1) && !exn_raised ) - hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0); + hvm_inject_exception(TRAP_debug, 0, tss_sel & 0xfff8, 0); tr.attr.fields.type = 0xb; /* busy 32-bit tss */ hvm_set_segment_register(v, x86_seg_tr, &tr); @@ -2362,7 +2363,7 @@ static enum hvm_copy_result __hvm_copy( if ( pfec == PFEC_page_shared ) return HVMCOPY_gfn_shared; if ( flags & HVMCOPY_fault ) - hvm_inject_exception(TRAP_page_fault, pfec, addr); + hvm_inject_exception(TRAP_page_fault, 0, pfec, addr); return HVMCOPY_bad_gva_to_gfn; } } @@ -2849,7 +2850,7 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) return ret; gp_fault: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); ret = X86EMUL_EXCEPTION; *msr_content = -1ull; goto out; @@ -2962,7 +2963,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content) return ret; gp_fault: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } @@ -4272,6 +4273,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) else { v->arch.hvm_vcpu.inject_trap = tr.trap; + v->arch.hvm_vcpu.instruction_len = tr.inslen; v->arch.hvm_vcpu.inject_error_code = tr.error_code; v->arch.hvm_vcpu.inject_cr2 = tr.cr2; } diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 41a2ede..bd856fd 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -200,7 +200,7 @@ int handle_mmio(void) return 0; case X86EMUL_EXCEPTION: if ( ctxt.exn_pending ) - hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0); + hvm_inject_exception(ctxt.exn_vector, ctxt.exn_insn_len, ctxt.exn_error_code, 0); break; default: break; diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c index 6000bff..8b67f2f 100644 --- a/xen/arch/x86/hvm/svm/emulate.c +++ b/xen/arch/x86/hvm/svm/emulate.c @@ -147,7 +147,7 @@ static int fetch(struct vcpu *v, u8 *buf, unsigned long addr, int len) /* Not OK: fetches from non-RAM pages are not supportable. */ gdprintk(XENLOG_WARNING, "Bad instruction fetch at %#lx (%#lx)\n", (unsigned long) guest_cpu_user_regs()->eip, addr); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return 0; } return 1; @@ -216,7 +216,7 @@ int __get_instruction_length_from_list(struct vcpu *v, gdprintk(XENLOG_WARNING, "%s: Mismatch between expected and actual instruction bytes: " "eip = %lx\n", __func__, (unsigned long)vmcb->rip); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return 0; done: diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c index 8714bb0..ac04f3b 100644 --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -735,7 +735,7 @@ nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs) default: gdprintk(XENLOG_ERR, "nsvm_vcpu_vmentry failed, injecting #UD\n"); - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_invalid_op, 0, HVM_DELIVER_NO_ERROR_CODE, 0); /* Must happen after hvm_inject_exception or it doesn't work right. */ nv->nv_vmswitch_in_progress = 0; return 1; @@ -1509,7 +1509,7 @@ void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v) unsigned int inst_len; if ( !nestedhvm_enabled(v->domain) ) { - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_invalid_op, 0, HVM_DELIVER_NO_ERROR_CODE, 0); return; } @@ -1529,7 +1529,7 @@ void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v) vintr_t intr; if ( !nestedhvm_enabled(v->domain) ) { - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_invalid_op, 0, HVM_DELIVER_NO_ERROR_CODE, 0); return; } diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index e717dda..87fb8a1 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -109,7 +109,7 @@ void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len) curr->arch.hvm_svm.vmcb->interrupt_shadow = 0; if ( regs->eflags & X86_EFLAGS_TF ) - hvm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_debug, 0, HVM_DELIVER_NO_ERROR_CODE, 0); } static void svm_cpu_down(void) @@ -1067,7 +1067,7 @@ static void svm_vcpu_destroy(struct vcpu *v) } static void svm_inject_exception( - unsigned int trapnr, int errcode, unsigned long cr2) + unsigned int trapnr, int inslen, int errcode, unsigned long cr2) { struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; @@ -1361,7 +1361,7 @@ static void svm_fpu_dirty_intercept(void) { /* Check if l1 guest must make FPU ready for the l2 guest */ if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS ) - hvm_inject_exception(TRAP_no_device, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_no_device, 0, HVM_DELIVER_NO_ERROR_CODE, 0); else vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS); return; @@ -1579,7 +1579,7 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) return X86EMUL_OKAY; gpf: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } @@ -1708,7 +1708,7 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) return X86EMUL_OKAY; gpf: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } @@ -1784,13 +1784,13 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs, { if (!nestedhvm_enabled(v->domain)) { gdprintk(XENLOG_ERR, "VMRUN: nestedhvm disabled, injecting #UD\n"); - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_invalid_op, 0, HVM_DELIVER_NO_ERROR_CODE, 0); return; } if (!nestedsvm_vmcb_map(v, vmcbaddr)) { gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #UD\n"); - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_invalid_op, 0, HVM_DELIVER_NO_ERROR_CODE, 0); return; } @@ -1830,7 +1830,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, return; inject: - hvm_inject_exception(ret, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(ret, 0, HVM_DELIVER_NO_ERROR_CODE, 0); return; } @@ -1864,7 +1864,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, return; inject: - hvm_inject_exception(ret, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(ret, 0, HVM_DELIVER_NO_ERROR_CODE, 0); return; } @@ -1880,11 +1880,11 @@ static void svm_vmexit_ud_intercept(struct cpu_user_regs *regs) switch ( rc ) { case X86EMUL_UNHANDLEABLE: - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_invalid_op, 0, HVM_DELIVER_NO_ERROR_CODE, 0); break; case X86EMUL_EXCEPTION: if ( ctxt.exn_pending ) - hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0); + hvm_inject_exception(ctxt.exn_vector, ctxt.exn_insn_len, ctxt.exn_error_code, 0); /* fall through */ default: hvm_emulate_writeback(&ctxt); @@ -2212,7 +2212,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) break; } - hvm_inject_exception(TRAP_page_fault, regs->error_code, va); + hvm_inject_exception(TRAP_page_fault, 0, regs->error_code, va); break; } @@ -2285,7 +2285,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) __update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip); } else if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); break; case VMEXIT_CR0_READ ... VMEXIT_CR15_READ: @@ -2293,7 +2293,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) ) svm_vmexit_do_cr_access(vmcb, regs); else if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); break; case VMEXIT_INVLPG: @@ -2303,7 +2303,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) __update_guest_eip(regs, vmcb->nextrip - vmcb->rip); } else if ( !handle_mmio() ) - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); break; case VMEXIT_INVLPGA: @@ -2349,7 +2349,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) case VMEXIT_MONITOR: case VMEXIT_MWAIT: - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_invalid_op, 0, HVM_DELIVER_NO_ERROR_CODE, 0); break; case VMEXIT_VMRUN: @@ -2368,7 +2368,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) svm_vmexit_do_clgi(regs, v); break; case VMEXIT_SKINIT: - hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + hvm_inject_exception(TRAP_invalid_op, 0, HVM_DELIVER_NO_ERROR_CODE, 0); break; case VMEXIT_XSETBV: diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index d5cb279..e15b7a4 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2226,7 +2226,7 @@ static void vmx_vmexit_ud_intercept(struct cpu_user_regs *regs) break; case X86EMUL_EXCEPTION: if ( ctxt.exn_pending ) - hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0); + hvm_inject_exception(ctxt.exn_vector, ctxt.exn_insn_len, ctxt.exn_error_code, 0); /* fall through */ default: hvm_emulate_writeback(&ctxt); diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index b0ae0ee..6a55edb 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -304,12 +304,12 @@ vmexit: invalid_op: gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: invalid_op\n"); - hvm_inject_exception(TRAP_invalid_op, 0, 0); + hvm_inject_exception(TRAP_invalid_op, 0, 0, 0); return X86EMUL_EXCEPTION; gp_fault: gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: gp_fault\n"); - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } @@ -386,7 +386,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs, return X86EMUL_OKAY; gp_fault: - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 59be993..8b864d2 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -135,7 +135,7 @@ static int hvm_translate_linear_addr( if ( !okay ) { - hvm_inject_exception(TRAP_gp_fault, 0, 0); + hvm_inject_exception(TRAP_gp_fault, 0, 0, 0); return X86EMUL_EXCEPTION; } diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index 9368385..199bd85 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -4825,7 +4825,7 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v, if ( gfn == INVALID_GFN ) { if ( is_hvm_vcpu(v) ) - hvm_inject_exception(TRAP_page_fault, pfec, vaddr); + hvm_inject_exception(TRAP_page_fault, 0, pfec, vaddr); else propagate_page_fault(vaddr, pfec); return _mfn(BAD_GVA_TO_GFN); diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 22f9451..3ba1615 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -124,7 +124,7 @@ struct hvm_function_table { void (*set_tsc_offset)(struct vcpu *v, u64 offset); - void (*inject_exception)(unsigned int trapnr, int errcode, + void (*inject_exception)(unsigned int trapnr, int inslen, int errcode, unsigned long cr2); void (*init_hypercall_page)(struct domain *d, void *hypercall_page); @@ -320,7 +320,7 @@ void hvm_migrate_timers(struct vcpu *v); void hvm_do_resume(struct vcpu *v); void hvm_migrate_pirqs(struct vcpu *v); -void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2); +void hvm_inject_exception(unsigned int trapnr, int inslen, int errcode, unsigned long cr2); static inline int hvm_event_pending(struct vcpu *v) { diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index 537da96..c325fd1 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -166,6 +166,7 @@ struct hvm_vcpu { void *fpu_exception_callback_arg; /* Pending hw/sw interrupt */ int inject_trap; /* -1 for nothing to inject */ + int instruction_len; int inject_error_code; unsigned long inject_cr2; diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h index 6a78f75..4bafd88 100644 --- a/xen/include/public/hvm/hvm_op.h +++ b/xen/include/public/hvm/hvm_op.h @@ -219,6 +219,8 @@ struct xen_hvm_inject_trap { uint32_t vcpuid; /* Trap number */ uint32_t trap; + /* Intruction length */ + uint32_t inslen; /* Error code, or -1 to skip */ uint32_t error_code; /* CR2 for page faults */ -- 1.5.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |