# HG changeset patch # User cegger # Date 1287145234 -7200 Implement SVM specific interrupt handling Signed-off-by: Christoph Egger diff -r 87774fa72e28 -r 8649a9cd1da9 xen/arch/x86/hvm/svm/entry.S --- a/xen/arch/x86/hvm/svm/entry.S +++ b/xen/arch/x86/hvm/svm/entry.S @@ -56,6 +56,11 @@ ENTRY(svm_asm_do_resume) call svm_intr_assist get_current(bx) + + movl NESTEDHVM_hostflags(r(bx)),%eax + and $NESTEDHVM_forcevmexit,%eax + jnz .Lsvm_trace_forcevmexit + CLGI mov VCPU_processor(r(bx)),%eax @@ -162,6 +167,7 @@ ENTRY(svm_asm_do_resume) STGI .globl svm_stgi_label svm_stgi_label: +.Lsvm_forcevmexit_label: call_with_regs(svm_vmexit_handler) jmp svm_asm_do_resume @@ -173,3 +179,8 @@ svm_stgi_label: .Lsvm_trace: call svm_trace_vmentry jmp .Lsvm_trace_done + +.Lsvm_trace_forcevmexit: + call svm_trace_vmentry + jmp .Lsvm_forcevmexit_label + diff -r 87774fa72e28 -r 8649a9cd1da9 xen/arch/x86/hvm/svm/intr.c --- a/xen/arch/x86/hvm/svm/intr.c +++ b/xen/arch/x86/hvm/svm/intr.c @@ -33,6 +33,7 @@ #include #include #include +#include /* for nestedhvm_vcpu_in_guestmode */ #include #include #include @@ -58,7 +59,7 @@ static void svm_inject_nmi(struct vcpu * */ vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET; } - + static void svm_inject_extint(struct vcpu *v, int vector) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -72,7 +73,7 @@ static void svm_inject_extint(struct vcp ASSERT(vmcb->eventinj.fields.v == 0); vmcb->eventinj = event; } - + static void enable_intr_window(struct vcpu *v, struct hvm_intack intack) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -117,15 +118,46 @@ asmlinkage void svm_intr_assist(void) struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; struct hvm_intack intack; + enum hvm_intblk intblk; /* Crank the handle on interrupt state. */ pt_update_irq(v); do { + int rc; + intack = hvm_vcpu_has_pending_irq(v); if ( likely(intack.source == hvm_intsrc_none) ) return; + intblk = hvm_interrupt_blocked(v, intack); + if ( intblk == hvm_intblk_svm_gif ) { + ASSERT(nestedhvm_enabled(v->domain)); + return; + } + + /* Interrupts for the nested guest are already + * in the vmcb. + */ + if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) ) + { + rc = nestedhvm_vcpu_interrupt(v, intack); + switch (rc) { + case NESTEDHVM_INTR_NOTINTERCEPTED: + /* Inject interrupt into 2nd level guest directly. */ + break; + case NESTEDHVM_INTR_NOTHANDLED: + case NESTEDHVM_INTR_FORCEVMEXIT: + return; + case NESTEDHVM_INTR_MASKED: + /* Guest already enabled an interrupt window. */ + return; + default: + panic("%s: nestedhvm_vcpu_interrupt can't handle value 0x%x\n", + __func__, rc); + } + } + /* * Pending IRQs must be delayed if: * 1. An event is already pending. This is despite the fact that SVM @@ -140,8 +172,7 @@ asmlinkage void svm_intr_assist(void) * have cleared the interrupt out of the IRR. * 2. The IRQ is masked. */ - if ( unlikely(vmcb->eventinj.fields.v) || - hvm_interrupt_blocked(v, intack) ) + if ( unlikely(vmcb->eventinj.fields.v) || intblk ) { enable_intr_window(v, intack); return; diff -r 87774fa72e28 -r 8649a9cd1da9 xen/arch/x86/hvm/svm/nestedsvm.c --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -24,6 +24,30 @@ #include #include #include /* paging_mode_hap */ +#include /* for local_event_delivery_(en|dis)able */ + +static int +nestedsvm_vcpu_clgi(struct vcpu *v) +{ + struct nestedsvm *svm = vcpu_nestedhvm(v).nh_arch; + + /* clear gif flag */ + svm->ns_gif = 0; + local_event_delivery_disable(); /* mask events for PV drivers */ + return 0; +} + +static int +nestedsvm_vcpu_stgi(struct vcpu *v) +{ + struct nestedsvm *svm = vcpu_nestedhvm(v).nh_arch; + + /* Always set the GIF to make hvm_interrupt_blocked work. */ + svm->ns_gif = 1; + + local_event_delivery_enable(); /* unmask events for PV drivers */ + return 0; +} /* Interface methods */ int nsvm_vcpu_initialise(struct vcpu *v) @@ -107,7 +131,7 @@ int nsvm_vcpu_destroy(struct vcpu *v) int nsvm_vcpu_reset(struct vcpu *v) { - return 0; + return nestedsvm_vcpu_stgi(v); } static void nsvm_vmcb_loadsave(struct vmcb_struct *from, @@ -509,6 +533,8 @@ nsvm_vcpu_vmrun(struct vcpu *v, struct c return ret; } + nestedsvm_vcpu_stgi(v); + return 0; } @@ -518,6 +544,8 @@ nsvm_vcpu_vmexit(struct vcpu *v, struct struct nestedhvm *hvm = &vcpu_nestedhvm(v); struct vmcb_struct *ns_vmcb; + nestedsvm_vcpu_clgi(v); + ns_vmcb = hvm->nh_vmcx; ns_vmcb->exitcode = exitcode; @@ -916,6 +944,25 @@ nsvm_vcpu_vmexit_trap(struct vcpu *v, un return nsret; } +enum hvm_intblk nsvm_intr_blocked(struct vcpu *v) +{ + struct nestedhvm *hvm = &vcpu_nestedhvm(v); + struct nestedsvm *svm = hvm->nh_arch; + + ASSERT(nestedhvm_enabled(v->domain)); + + if ( !nestedsvm_gif_isset(v) ) + return hvm_intblk_svm_gif; + + if ( nestedhvm_vcpu_in_guestmode(v) ) { + if ( svm->ns_hostflags.fields.vintrmask ) + if ( !svm->ns_hostflags.fields.rflagsif ) + return hvm_intblk_rflags_ie; + } + + return hvm_intblk_none; +} + /* MSR handling */ int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content) { @@ -964,3 +1011,63 @@ int nsvm_wrmsr(struct vcpu *v, unsigned return ret; } + +/* Virtual GIF */ +bool_t +nestedsvm_gif_isset(struct vcpu *v) +{ + struct nestedsvm *svm = vcpu_nestedhvm(v).nh_arch; + + return (!!svm->ns_gif); +} + +void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v) +{ + int ret; + unsigned int inst_len; + + if ( !nestedhvm_enabled(v->domain) ) { + hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + return; + } + + if ( (inst_len = __get_instruction_length(v, INSTR_STGI)) == 0 ) + return; + + ret = nestedsvm_vcpu_stgi(v); + if (ret) + /* On failure, nestedsvm_vcpu_stgi injected an exception, + * almost a #GP or #UD. + */ + return; + + __update_guest_eip(regs, inst_len); +} + +void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v) +{ + int ret; + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + unsigned int inst_len; + + if ( !nestedhvm_enabled(v->domain) ) { + hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + return; + } + + if ( (inst_len = __get_instruction_length(v, INSTR_CLGI)) == 0 ) + return; + + ret = nestedsvm_vcpu_clgi(v); + if (ret) + /* On failure, nestedsvm_vcpu_clgi injected an exception, + * almost a #GP or #UD. + */ + return; + + /* After a CLGI no interrupts should come */ + vmcb->vintr.fields.irq = 0; + vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR; + + __update_guest_eip(regs, inst_len); +} diff -r 87774fa72e28 -r 8649a9cd1da9 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -78,8 +78,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(void * static bool_t amd_erratum383_found __read_mostly; -static void inline __update_guest_eip( - struct cpu_user_regs *regs, unsigned int inst_len) +void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len) { struct vcpu *curr = current; @@ -1549,6 +1548,7 @@ static struct hvm_function_table __read_ .nhvm_vmcx_guest_intercepts_exitcode = nsvm_vmcb_guest_intercepts_exitcode, .nhvm_vmcx_prepare4vmexit = nsvm_vmcb_prepare4vmexit, .nhvm_vmcx_isvalid = nsvm_vmcb_isvalid, + .nhvm_intr_blocked = nsvm_intr_blocked, }; asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) @@ -1816,7 +1816,11 @@ asmlinkage void svm_vmexit_handler(struc svm_vmexit_do_vmsave(vmcb, regs, v, regs->eax); break; case VMEXIT_STGI: + svm_vmexit_do_stgi(regs, v); + break; case VMEXIT_CLGI: + svm_vmexit_do_clgi(regs, v); + break; case VMEXIT_SKINIT: hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); break; diff -r 87774fa72e28 -r 8649a9cd1da9 xen/include/asm-x86/hvm/svm/nestedsvm.h --- a/xen/include/asm-x86/hvm/svm/nestedsvm.h +++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h @@ -24,6 +24,7 @@ #include struct nestedsvm { + bool_t ns_gif; uint64_t ns_msr_hsavepa; /* MSR HSAVE_PA value */ /* Cached real intercepts of the nested guest */ @@ -69,11 +70,17 @@ int nsvm_vmcb_prepare4vmexit(struct vcpu int nsvm_vmcb_isvalid(struct vcpu *v, uint64_t vmcxaddr); int nsvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr, int errcode, unsigned long cr2); +enum hvm_intblk nsvm_intr_blocked(struct vcpu *v); /* MSRs */ int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content); int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content); +/* Interrupts, vGIF */ +void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v); +void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v); +bool_t nestedsvm_gif_isset(struct vcpu *v); + #endif /* ASM_X86_HVM_SVM_NESTEDSVM_H__ */ /* diff -r 87774fa72e28 -r 8649a9cd1da9 xen/include/asm-x86/hvm/svm/svm.h --- a/xen/include/asm-x86/hvm/svm/svm.h +++ b/xen/include/asm-x86/hvm/svm/svm.h @@ -61,6 +61,7 @@ static inline void svm_vmsave(void *vmcb } unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr); +void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len); extern u32 svm_feature_flags;