# HG changeset patch # User cegger # Date 1289584012 -3600 Implement SVM specific interrupt handling Signed-off-by: Christoph Egger diff -r 3bfc06e2e41a -r b18448601670 xen/arch/x86/hvm/svm/intr.c --- a/xen/arch/x86/hvm/svm/intr.c +++ b/xen/arch/x86/hvm/svm/intr.c @@ -33,6 +33,7 @@ #include #include #include +#include /* for nestedhvm_vcpu_in_guestmode */ #include #include #include @@ -58,7 +59,7 @@ static void svm_inject_nmi(struct vcpu * */ vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET; } - + static void svm_inject_extint(struct vcpu *v, int vector) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -72,7 +73,7 @@ static void svm_inject_extint(struct vcp ASSERT(vmcb->eventinj.fields.v == 0); vmcb->eventinj = event; } - + static void enable_intr_window(struct vcpu *v, struct hvm_intack intack) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -80,6 +81,20 @@ static void enable_intr_window(struct vc ASSERT(intack.source != hvm_intsrc_none); + if ( nestedhvm_enabled(v->domain) ) { + struct nestedvcpu *nv = &vcpu_nestedhvm(v); + if ( nv->nv_hostflags.fields.vmentry_pending ) { + struct vmcb_struct *gvmcb = nv->nv_vmcx; + + /* check if l1 guest injects interrupt into l2 guest via vintr. + * return here or l2 guest looses interrupts, otherwise. + */ + ASSERT(gvmcb != NULL); + if ( gvmcb->vintr.fields.irq ) + return; + } + } + HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source, vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1); @@ -117,6 +132,7 @@ asmlinkage void svm_intr_assist(void) struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; struct hvm_intack intack; + enum hvm_intblk intblk; /* Crank the handle on interrupt state. */ pt_update_irq(v); @@ -126,6 +142,39 @@ asmlinkage void svm_intr_assist(void) if ( likely(intack.source == hvm_intsrc_none) ) return; + intblk = hvm_interrupt_blocked(v, intack); + if ( intblk == hvm_intblk_svm_gif ) { + ASSERT(nestedhvm_enabled(v->domain)); + return; + } + + /* Interrupts for the nested guest are already + * in the vmcb. + */ + if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) ) + { + int rc; + + /* l2 guest was running when an interrupt for + * the l1 guest occured. + */ + rc = nestedsvm_vcpu_interrupt(v, intack); + switch (rc) { + case NSVM_INTR_NOTINTERCEPTED: + /* Inject interrupt into 2nd level guest directly. */ + break; + case NSVM_INTR_NOTHANDLED: + case NSVM_INTR_FORCEVMEXIT: + return; + case NSVM_INTR_MASKED: + /* Guest already enabled an interrupt window. */ + return; + default: + panic("%s: nestedsvm_vcpu_interrupt can't handle value 0x%x\n", + __func__, rc); + } + } + /* * Pending IRQs must be delayed if: * 1. An event is already pending. This is despite the fact that SVM @@ -140,8 +189,7 @@ asmlinkage void svm_intr_assist(void) * have cleared the interrupt out of the IRR. * 2. The IRQ is masked. */ - if ( unlikely(vmcb->eventinj.fields.v) || - hvm_interrupt_blocked(v, intack) ) + if ( unlikely(vmcb->eventinj.fields.v) || intblk ) { enable_intr_window(v, intack); return; diff -r 3bfc06e2e41a -r b18448601670 xen/arch/x86/hvm/svm/nestedsvm.c --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -24,6 +24,30 @@ #include #include #include /* paging_mode_hap */ +#include /* for local_event_delivery_(en|dis)able */ + +static int +nestedsvm_vcpu_clgi(struct vcpu *v) +{ + struct nestedsvm *svm = &vcpu_nestedsvm(v); + + /* clear gif flag */ + svm->ns_gif = 0; + local_event_delivery_disable(); /* mask events for PV drivers */ + return 0; +} + +static int +nestedsvm_vcpu_stgi(struct vcpu *v) +{ + struct nestedsvm *svm = &vcpu_nestedsvm(v); + + /* Always set the GIF to make hvm_interrupt_blocked work. */ + svm->ns_gif = 1; + + local_event_delivery_enable(); /* unmask events for PV drivers */ + return 0; +} static int nestedsvm_vmcb_isvalid(struct vcpu *v, uint64_t vmcxaddr) @@ -134,7 +158,7 @@ int nsvm_vcpu_reset(struct vcpu *v) svm->ns_vmexit.exitinfo1 = 0; svm->ns_vmexit.exitinfo2 = 0; - return 0; + return nestedsvm_vcpu_stgi(v); } static void nsvm_vmcb_loadsave(struct vmcb_struct *from, @@ -532,6 +556,8 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct return ret; } + nestedsvm_vcpu_stgi(v); + return 0; } @@ -576,6 +602,8 @@ nsvm_vcpu_vmexit(struct vcpu *v, struct struct nestedsvm *svm = &vcpu_nestedsvm(v); struct vmcb_struct *ns_vmcb; + nestedsvm_vcpu_clgi(v); + ns_vmcb = nv->nv_vmcx; if (nv->nv_hostflags.fields.vmexit_pending) { @@ -942,6 +970,32 @@ nsvm_vmcb_hap_enabled(struct vcpu *v) return vcpu_nestedsvm(v).ns_hap_enabled; } +enum hvm_intblk nsvm_intr_blocked(struct vcpu *v) +{ + struct nestedsvm *svm = &vcpu_nestedsvm(v); + struct nestedvcpu *nv = &vcpu_nestedhvm(v); + + ASSERT(nestedhvm_enabled(v->domain)); + + if ( !nestedsvm_gif_isset(v) ) + return hvm_intblk_svm_gif; + + if ( nestedhvm_vcpu_in_guestmode(v) ) { + if ( svm->ns_hostflags.fields.vintrmask ) + if ( !svm->ns_hostflags.fields.rflagsif ) + return hvm_intblk_rflags_ie; + } + + if ( nv->nv_hostflags.fields.vmexit_pending ) { + /* hvm_inject_exception() must have run before. + * exceptions have higher priority than interrupts. + */ + return hvm_intblk_rflags_ie; + } + + return hvm_intblk_none; +} + /* MSR handling */ int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content) { @@ -1182,4 +1236,110 @@ asmlinkage void nsvm_vcpu_switch(struct } } +/* Interrupts, Virtual GIF */ +int +nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack) +{ + int ret; + enum hvm_intblk intr; + uint64_t exitcode = VMEXIT_INTR; + uint64_t exitinfo2 = 0; + struct nestedvcpu *nv = &vcpu_nestedhvm(v); + struct nestedsvm *svm = &vcpu_nestedsvm(v); + ASSERT(nestedhvm_vcpu_in_guestmode(v)); + intr = nhvm_interrupt_blocked(v); + if ( intr != hvm_intblk_none ) + return NSVM_INTR_MASKED; + + switch (intack.source) { + case hvm_intsrc_pic: + case hvm_intsrc_lapic: + exitcode = VMEXIT_INTR; + exitinfo2 = intack.vector; + break; + case hvm_intsrc_nmi: + exitcode = VMEXIT_NMI; + exitinfo2 = intack.vector; + break; + case hvm_intsrc_mce: + exitcode = VMEXIT_EXCEPTION_MC; + exitinfo2 = intack.vector; + break; + case hvm_intsrc_none: + return NSVM_INTR_NOTHANDLED; + default: + BUG(); + } + + svm->ns_vmexit.exitcode = exitcode; + svm->ns_vmexit.exitinfo1 = intack.source; + svm->ns_vmexit.exitinfo2 = exitinfo2; + ret = nsvm_vmcb_guest_intercepts_exitcode(v, + guest_cpu_user_regs(), exitcode); + if (ret) { + nv->nv_hostflags.fields.vmexit_pending = 1; + return NSVM_INTR_FORCEVMEXIT; + } + + return NSVM_INTR_NOTINTERCEPTED; +} + +bool_t +nestedsvm_gif_isset(struct vcpu *v) +{ + struct nestedsvm *svm = &vcpu_nestedsvm(v); + + return (!!svm->ns_gif); +} + +void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v) +{ + int ret; + unsigned int inst_len; + + if ( !nestedhvm_enabled(v->domain) ) { + hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + return; + } + + if ( (inst_len = __get_instruction_length(v, INSTR_STGI)) == 0 ) + return; + + ret = nestedsvm_vcpu_stgi(v); + if (ret) + /* On failure, nestedsvm_vcpu_stgi injected an exception, + * almost a #GP or #UD. + */ + return; + + __update_guest_eip(regs, inst_len); +} + +void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v) +{ + int ret; + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + unsigned int inst_len; + + if ( !nestedhvm_enabled(v->domain) ) { + hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + return; + } + + if ( (inst_len = __get_instruction_length(v, INSTR_CLGI)) == 0 ) + return; + + ret = nestedsvm_vcpu_clgi(v); + if (ret) + /* On failure, nestedsvm_vcpu_clgi injected an exception, + * almost a #GP or #UD. + */ + return; + + /* After a CLGI no interrupts should come */ + vmcb->vintr.fields.irq = 0; + vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR; + + __update_guest_eip(regs, inst_len); +} diff -r 3bfc06e2e41a -r b18448601670 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -78,8 +78,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(void * static bool_t amd_erratum383_found __read_mostly; -static void inline __update_guest_eip( - struct cpu_user_regs *regs, unsigned int inst_len) +void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len) { struct vcpu *curr = current; @@ -1540,6 +1539,7 @@ static struct hvm_function_table __read_ .nhvm_vcpu_asid = nsvm_vcpu_asid, .nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap, .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled, + .nhvm_intr_blocked = nsvm_intr_blocked, }; asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) @@ -1805,7 +1805,11 @@ asmlinkage void svm_vmexit_handler(struc svm_vmexit_do_vmsave(vmcb, regs, v, regs->eax); break; case VMEXIT_STGI: + svm_vmexit_do_stgi(regs, v); + break; case VMEXIT_CLGI: + svm_vmexit_do_clgi(regs, v); + break; case VMEXIT_SKINIT: hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); break; diff -r 3bfc06e2e41a -r b18448601670 xen/include/asm-x86/hvm/svm/nestedsvm.h --- a/xen/include/asm-x86/hvm/svm/nestedsvm.h +++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h @@ -23,7 +23,12 @@ #include #include +enum hvm_intblk_svm { + hvm_intblk_svm_gif = hvm_intblk_arch, /* GIF cleared */ +}; + struct nestedsvm { + bool_t ns_gif; uint64_t ns_msr_hsavepa; /* MSR HSAVE_PA value */ /* Cached real intercepts of the nested guest */ @@ -101,11 +106,23 @@ int nsvm_vmcb_guest_intercepts_exitcode( struct cpu_user_regs *regs, uint64_t exitcode); int nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr); bool_t nsvm_vmcb_hap_enabled(struct vcpu *v); +enum hvm_intblk nsvm_intr_blocked(struct vcpu *v); /* MSRs */ int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content); int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content); +/* Interrupts, vGIF */ +void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v); +void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v); +bool_t nestedsvm_gif_isset(struct vcpu *v); + +#define NSVM_INTR_NOTHANDLED 3 +#define NSVM_INTR_NOTINTERCEPTED 2 +#define NSVM_INTR_FORCEVMEXIT 1 +#define NSVM_INTR_MASKED 0 +int nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack); + #endif /* ASM_X86_HVM_SVM_NESTEDSVM_H__ */ /* diff -r 3bfc06e2e41a -r b18448601670 xen/include/asm-x86/hvm/svm/svm.h --- a/xen/include/asm-x86/hvm/svm/svm.h +++ b/xen/include/asm-x86/hvm/svm/svm.h @@ -61,6 +61,7 @@ static inline void svm_vmsave(void *vmcb } unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr); +void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len); extern u32 svm_feature_flags;