x86: don't hold off NMI delivery when MCE is masked Likely through copy'n'paste, all three instances of guest MCE processing jumped to the wrong place (where NMI processing code correctly jumps to) when MCE-s are temporarily masked (due to one currently being processed by the guest). A nested, unmasked NMI should get delivered immediately, however. Signed-off-by: Jan Beulich --- a/xen/arch/x86/x86_32/entry.S +++ b/xen/arch/x86/x86_32/entry.S @@ -214,6 +214,7 @@ test_all_events: jnz process_softirqs testb $1,VCPU_mce_pending(%ebx) jnz process_mce +.Ltest_guest_nmi: testb $1,VCPU_nmi_pending(%ebx) jnz process_nmi test_guest_events: @@ -243,7 +244,7 @@ process_softirqs: /* %ebx: struct vcpu */ process_mce: testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx) - jnz test_guest_events + jnz .Ltest_guest_nmi sti movb $0,VCPU_mce_pending(%ebx) call set_guest_machinecheck_trapbounce --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -103,6 +103,7 @@ ENTRY(compat_test_all_events) jnz compat_process_softirqs testb $1,VCPU_mce_pending(%rbx) jnz compat_process_mce +.Lcompat_test_guest_nmi: testb $1,VCPU_nmi_pending(%rbx) jnz compat_process_nmi compat_test_guest_events: @@ -133,7 +134,7 @@ compat_process_softirqs: /* %rbx: struct vcpu */ compat_process_mce: testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx) - jnz compat_test_guest_events + jnz .Lcompat_test_guest_nmi sti movb $0,VCPU_mce_pending(%rbx) call set_guest_machinecheck_trapbounce --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -192,6 +192,7 @@ test_all_events: jnz process_softirqs testb $1,VCPU_mce_pending(%rbx) jnz process_mce +.Ltest_guest_nmi: testb $1,VCPU_nmi_pending(%rbx) jnz process_nmi test_guest_events: @@ -220,7 +221,7 @@ process_softirqs: /* %rbx: struct vcpu */ process_mce: testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx) - jnz test_guest_events + jnz .Ltest_guest_nmi sti movb $0,VCPU_mce_pending(%rbx) call set_guest_machinecheck_trapbounce