# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1259676120 0
# Node ID 14d9fb7a326211eda1dbda07eb995e0b04cf678d
# Parent 939933401650d5f5e221cfa8005d3872b16ef8f0
x86: fix MCE/NMI injection
This attempts to address all the concerns raised in
http://lists.xensource.com/archives/html/xen-devel/2009-11/msg01195.html,
but I'm nevertheless still not convinced that all aspects of the
injection handling really work reliably. In particular, while the
patch here on top of the fixes for the problems menioned in the
referenced mail also adds code to keep send_guest_trap() from
injecting multiple events at a time, I don't think the is the right
mechanism - it should be possible to handle NMI/MCE nested within
each other.
Another fix on top of the ones for the earlier described problems is
that the vCPU affinity restore logic didn't account for software
injected NMIs - these never set cpu_affinity_tmp, but due to it most
likely being different from cpu_affinity it would have got restored
(to a potentially random value) nevertheless.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
xen/arch/x86/cpu/mcheck/mctelem.c | 9 ---
xen/arch/x86/nmi.c | 5 +-
xen/arch/x86/traps.c | 92 ++++++++++++++++++++++++++++++++++---
xen/arch/x86/x86_32/asm-offsets.c | 5 +-
xen/arch/x86/x86_32/entry.S | 22 ++++----
xen/arch/x86/x86_32/traps.c | 11 +---
xen/arch/x86/x86_64/asm-offsets.c | 5 +-
xen/arch/x86/x86_64/compat/entry.S | 22 ++++----
xen/arch/x86/x86_64/compat/traps.c | 10 ----
xen/arch/x86/x86_64/entry.S | 22 ++++----
xen/arch/x86/x86_64/traps.c | 56 ----------------------
xen/common/domain.c | 11 ++--
xen/include/asm-x86/domain.h | 10 ++++
xen/include/asm-x86/system.h | 7 ++
xen/include/asm-x86/traps.h | 2
xen/include/xen/sched.h | 22 +++-----
16 files changed, 173 insertions(+), 138 deletions(-)
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/cpu/mcheck/mctelem.c
--- a/xen/arch/x86/cpu/mcheck/mctelem.c Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/cpu/mcheck/mctelem.c Tue Dec 01 14:02:00 2009 +0000
@@ -122,15 +122,6 @@ static struct mc_telem_ctl {
/* Lock protecting all processing lists */
static DEFINE_SPINLOCK(processing_lock);
-static void *cmpxchgptr(void *ptr, void *old, void *new)
-{
- unsigned long *ulp = (unsigned long *)ptr;
- unsigned long a = (unsigned long)old;
- unsigned long b = (unsigned long)new;
-
- return (void *)cmpxchg(ulp, a, b);
-}
-
static void mctelem_xchg_head(struct mctelem_ent **headp,
struct mctelem_ent **old,
struct mctelem_ent *new)
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/nmi.c
--- a/xen/arch/x86/nmi.c Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/nmi.c Tue Dec 01 14:02:00 2009 +0000
@@ -475,10 +475,11 @@ static void do_nmi_stats(unsigned char k
((v = d->vcpu[0]) == NULL) )
return;
- if ( v->nmi_pending || (v->trap_priority >= VCPU_TRAP_NMI) )
+ i = v->async_exception_mask & (1 << VCPU_TRAP_NMI);
+ if ( v->nmi_pending || i )
printk("dom0 vpu0: NMI %s%s\n",
v->nmi_pending ? "pending " : "",
- (v->trap_priority >= VCPU_TRAP_NMI) ? "masked " : "");
+ i ? "masked " : "");
else
printk("dom0 vcpu0: NMI neither pending nor masked\n");
}
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/traps.c Tue Dec 01 14:02:00 2009 +0000
@@ -51,6 +51,7 @@
#include <asm/system.h>
#include <asm/io.h>
#include <asm/atomic.h>
+#include <asm/bitops.h>
#include <asm/desc.h>
#include <asm/debugreg.h>
#include <asm/smp.h>
@@ -2892,6 +2893,82 @@ static void nmi_mce_softirq(void)
* a safe (non-NMI/MCE) context.
*/
vcpu_kick(st->vcpu);
+ st->vcpu = NULL;
+}
+
+void async_exception_cleanup(struct vcpu *curr)
+{
+ int trap;
+
+ if ( !curr->async_exception_mask )
+ return;
+
+ /* Restore affinity. */
+ if ( !cpus_empty(curr->cpu_affinity_tmp) &&
+ !cpus_equal(curr->cpu_affinity_tmp, curr->cpu_affinity) )
+ {
+ vcpu_set_affinity(curr, &curr->cpu_affinity_tmp);
+ cpus_clear(curr->cpu_affinity_tmp);
+ }
+
+ if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
+ trap = __scanbit(curr->async_exception_mask, VCPU_TRAP_NONE);
+ else
+ for ( trap = VCPU_TRAP_NONE + 1; trap <= VCPU_TRAP_LAST; ++trap )
+ if ( (curr->async_exception_mask ^
+ curr->async_exception_state(trap).old_mask) == (1 << trap) )
+ break;
+ ASSERT(trap <= VCPU_TRAP_LAST);
+
+ /* inject vMCE to PV_Guest including DOM0. */
+ if ( trap == VCPU_TRAP_MCE )
+ {
+ gdprintk(XENLOG_DEBUG, "MCE: Return from vMCE# trap!\n");
+ if ( curr->vcpu_id == 0 )
+ {
+ struct domain *d = curr->domain;
+
+ if ( !d->arch.vmca_msrs.nr_injection )
+ {
+ printk(XENLOG_WARNING "MCE: ret from vMCE#, "
+ "no injection node\n");
+ goto end;
+ }
+
+ d->arch.vmca_msrs.nr_injection--;
+ if ( !list_empty(&d->arch.vmca_msrs.impact_header) )
+ {
+ struct bank_entry *entry;
+
+ entry = list_entry(d->arch.vmca_msrs.impact_header.next,
+ struct bank_entry, list);
+ gdprintk(XENLOG_DEBUG, "MCE: delete last injection node\n");
+ list_del(&entry->list);
+ }
+ else
+ printk(XENLOG_ERR "MCE: didn't found last injection node\n");
+
+ /* further injection */
+ if ( d->arch.vmca_msrs.nr_injection > 0 &&
+ guest_has_trap_callback(d, 0, TRAP_machine_check) &&
+ !test_and_set_bool(curr->mce_pending) )
+ {
+ int cpu = smp_processor_id();
+ cpumask_t affinity;
+
+ curr->cpu_affinity_tmp = curr->cpu_affinity;
+ cpus_clear(affinity);
+ cpu_set(cpu, affinity);
+ printk(XENLOG_DEBUG "MCE: CPU%d set affinity, old %d\n",
+ cpu, curr->processor);
+ vcpu_set_affinity(curr, &affinity);
+ }
+ }
+ }
+
+end:
+ /* Restore previous asynchronous exception mask. */
+ curr->async_exception_mask = curr->async_exception_state(trap).old_mask;
}
static void nmi_dom0_report(unsigned int reason_idx)
@@ -3255,7 +3332,7 @@ int send_guest_trap(struct domain *d, ui
int send_guest_trap(struct domain *d, uint16_t vcpuid, unsigned int trap_nr)
{
struct vcpu *v;
- struct softirq_trap *st;
+ struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id());
BUG_ON(d == NULL);
BUG_ON(vcpuid >= d->max_vcpus);
@@ -3263,25 +3340,27 @@ int send_guest_trap(struct domain *d, ui
switch (trap_nr) {
case TRAP_nmi:
+ if ( cmpxchgptr(&st->vcpu, NULL, v) )
+ return -EBUSY;
if ( !test_and_set_bool(v->nmi_pending) ) {
- st = &per_cpu(softirq_trap, smp_processor_id());
- st->domain = dom0;
- st->vcpu = dom0->vcpu[0];
- st->processor = st->vcpu->processor;
+ st->domain = d;
+ st->processor = v->processor;
/* not safe to wake up a vcpu here */
raise_softirq(NMI_MCE_SOFTIRQ);
return 0;
}
+ st->vcpu = NULL;
break;
case TRAP_machine_check:
+ if ( cmpxchgptr(&st->vcpu, NULL, v) )
+ return -EBUSY;
/* We are called by the machine check (exception or polling) handlers
* on the physical CPU that reported a machine check error. */
if ( !test_and_set_bool(v->mce_pending) ) {
- st = &per_cpu(softirq_trap, smp_processor_id());
st->domain = d;
st->vcpu = v;
st->processor = v->processor;
@@ -3290,6 +3369,7 @@ int send_guest_trap(struct domain *d, ui
raise_softirq(NMI_MCE_SOFTIRQ);
return 0;
}
+ st->vcpu = NULL;
break;
}
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/x86_32/asm-offsets.c Tue Dec 01 14:02:00 2009 +0000
@@ -68,8 +68,9 @@ void __dummy__(void)
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
- OFFSET(VCPU_old_trap_priority, struct vcpu, old_trap_priority);
- OFFSET(VCPU_trap_priority, struct vcpu, trap_priority);
+ OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
+ OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
+ OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/x86_32/entry.S Tue Dec 01 14:02:00 2009 +0000
@@ -259,31 +259,33 @@ process_softirqs:
ALIGN
/* %ebx: struct vcpu */
process_mce:
- cmpw $VCPU_TRAP_MCE,VCPU_trap_priority(%ebx)
- jae test_guest_events
+ testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
+ jnz test_guest_events
sti
movb $0,VCPU_mce_pending(%ebx)
call set_guest_machinecheck_trapbounce
test %eax,%eax
jz test_all_events
- movw VCPU_trap_priority(%ebx),%dx # safe priority for the
- movw %dx,VCPU_old_trap_priority(%ebx) # iret hypercall
- movw $VCPU_TRAP_MCE,VCPU_trap_priority(%ebx)
+ movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
+ movb %dl,VCPU_mce_old_mask(%ebx) # iret hypercall
+ orl $1 << VCPU_TRAP_MCE,%edx
+ movb %dl,VCPU_async_exception_mask(%ebx)
jmp process_trap
ALIGN
/* %ebx: struct vcpu */
process_nmi:
- cmpw $VCPU_TRAP_NMI,VCPU_trap_priority(%ebx)
- jae test_guest_events
+ cmpw $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%ebx)
+ jnz test_guest_events
sti
movb $0,VCPU_nmi_pending(%ebx)
call set_guest_nmi_trapbounce
test %eax,%eax
jz test_all_events
- movw VCPU_trap_priority(%ebx),%dx # safe priority for the
- movw %dx,VCPU_old_trap_priority(%ebx) # iret hypercall
- movw $VCPU_TRAP_NMI,VCPU_trap_priority(%ebx)
+ movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
+ movb %dl,VCPU_nmi_old_mask(%ebx) # iret hypercall
+ orl $1 << VCPU_TRAP_NMI,%edx
+ movb %dl,VCPU_async_exception_mask(%ebx)
/* FALLTHROUGH */
process_trap:
leal VCPU_trap_bounce(%ebx),%edx
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/x86_32/traps.c Tue Dec 01 14:02:00 2009 +0000
@@ -13,6 +13,7 @@
#include <xen/nmi.h>
#include <asm/current.h>
#include <asm/flushtlb.h>
+#include <asm/traps.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
@@ -271,16 +272,10 @@ unsigned long do_iret(void)
goto exit_and_crash;
}
- /* Restore affinity. */
- if ((v->trap_priority >= VCPU_TRAP_NMI)
- && !cpus_equal(v->cpu_affinity_tmp, v->cpu_affinity))
- vcpu_set_affinity(v, &v->cpu_affinity_tmp);
-
- /* Restore previous trap priority */
- v->trap_priority = v->old_trap_priority;
-
/* Restore upcall mask from supplied EFLAGS.IF. */
vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
+
+ async_exception_cleanup(v);
/*
* The hypercall exit path will overwrite EAX with this return
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/x86_64/asm-offsets.c Tue Dec 01 14:02:00 2009 +0000
@@ -93,8 +93,9 @@ void __dummy__(void)
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
- OFFSET(VCPU_old_trap_priority, struct vcpu, old_trap_priority);
- OFFSET(VCPU_trap_priority, struct vcpu, trap_priority);
+ OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
+ OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
+ OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/x86_64/compat/entry.S
--- a/xen/arch/x86/x86_64/compat/entry.S Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/x86_64/compat/entry.S Tue Dec 01 14:02:00 2009 +0000
@@ -132,31 +132,33 @@ compat_process_softirqs:
ALIGN
/* %rbx: struct vcpu */
compat_process_mce:
- cmpw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)
- jae compat_test_guest_events
+ testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
+ jnz compat_test_guest_events
sti
movb $0,VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
testl %eax,%eax
jz compat_test_all_events
- movw VCPU_trap_priority(%rbx),%dx # safe priority for the
- movw %dx,VCPU_old_trap_priority(%rbx) # iret hypercall
- movw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)
+ movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+ movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall
+ orl $1 << VCPU_TRAP_MCE,%edx
+ movb %dl,VCPU_async_exception_mask(%rbx)
jmp compat_process_trap
ALIGN
/* %rbx: struct vcpu */
compat_process_nmi:
- cmpw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)
- jae compat_test_guest_events
+ cmpw $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
+ jnz compat_test_guest_events
sti
movb $0,VCPU_nmi_pending(%rbx)
call set_guest_nmi_trapbounce
testl %eax,%eax
jz compat_test_all_events
- movw VCPU_trap_priority(%rbx),%dx # safe priority for the
- movw %dx,VCPU_old_trap_priority(%rbx) # iret hypercall
- movw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)
+ movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+ movb %dl,VCPU_nmi_old_mask(%rbx) # iret hypercall
+ orl $1 << VCPU_TRAP_NMI,%edx
+ movb %dl,VCPU_async_exception_mask(%rbx)
/* FALLTHROUGH */
compat_process_trap:
leaq VCPU_trap_bounce(%rbx),%rdx
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/x86_64/compat/traps.c
--- a/xen/arch/x86/x86_64/compat/traps.c Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/x86_64/compat/traps.c Tue Dec 01 14:02:00 2009 +0000
@@ -147,16 +147,10 @@ unsigned int compat_iret(void)
else
regs->_esp += 16;
- /* Restore affinity. */
- if ((v->trap_priority >= VCPU_TRAP_NMI)
- && !cpus_equal(v->cpu_affinity_tmp, v->cpu_affinity))
- vcpu_set_affinity(v, &v->cpu_affinity_tmp);
-
- /* Restore previous trap priority */
- v->trap_priority = v->old_trap_priority;
-
/* Restore upcall mask from supplied EFLAGS.IF. */
vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
+
+ async_exception_cleanup(v);
/*
* The hypercall exit path will overwrite EAX with this return
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/x86_64/entry.S Tue Dec 01 14:02:00 2009 +0000
@@ -234,31 +234,33 @@ process_softirqs:
ALIGN
/* %rbx: struct vcpu */
process_mce:
- cmpw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)
- jae test_guest_events
+ testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
+ jnz test_guest_events
sti
movb $0,VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
test %eax,%eax
jz test_all_events
- movw VCPU_trap_priority(%rbx),%dx # safe priority for the
- movw %dx,VCPU_old_trap_priority(%rbx) # iret hypercall
- movw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)
+ movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+ movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall
+ orl $1 << VCPU_TRAP_MCE,%edx
+ movb %dl,VCPU_async_exception_mask(%rbx)
jmp process_trap
ALIGN
/* %rbx: struct vcpu */
process_nmi:
- cmpw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)
- jae test_guest_events
+ cmpw $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
+ jnz test_guest_events
sti
movb $0,VCPU_nmi_pending(%rbx)
call set_guest_nmi_trapbounce
test %eax,%eax
jz test_all_events
- movw VCPU_trap_priority(%rbx),%dx # safe priority for the
- movw %dx,VCPU_old_trap_priority(%rbx) # iret hypercall
- movw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)
+ movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+ movb %dl,VCPU_nmi_old_mask(%rbx) # iret hypercall
+ orl $1 << VCPU_TRAP_NMI,%edx
+ movb %dl,VCPU_async_exception_mask(%rbx)
/* FALLTHROUGH */
process_trap:
leaq VCPU_trap_bounce(%rbx),%rdx
diff -r 939933401650 -r 14d9fb7a3262 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/arch/x86/x86_64/traps.c Tue Dec 01 14:02:00 2009 +0000
@@ -270,9 +270,6 @@ unsigned long do_iret(void)
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct iret_context iret_saved;
struct vcpu *v = current;
- struct domain *d = v->domain;
- struct bank_entry *entry;
- int cpu = smp_processor_id();
if ( unlikely(copy_from_user(&iret_saved, (void *)regs->rsp,
sizeof(iret_saved))) )
@@ -308,59 +305,10 @@ unsigned long do_iret(void)
regs->rcx = iret_saved.rcx;
}
- /* Restore affinity. */
- if ((v->trap_priority >= VCPU_TRAP_NMI)
- && !cpus_equal(v->cpu_affinity_tmp, v->cpu_affinity))
- vcpu_set_affinity(v, &v->cpu_affinity_tmp);
-
- /* inject vMCE to PV_Guest including DOM0. */
- if (v->trap_priority >= VCPU_TRAP_NMI) {
- printk(KERN_DEBUG "MCE: Return from vMCE# trap!\n");
- if ( v->vcpu_id == 0 ) {
- if ( !d->arch.vmca_msrs.nr_injection ) {
- printk(KERN_WARNING "MCE: Ret from vMCE#, "
- "No injection Node\n");
- goto end;
- }
-
- d->arch.vmca_msrs.nr_injection--;
- if (!list_empty(&d->arch.vmca_msrs.impact_header)) {
- entry = list_entry(d->arch.vmca_msrs.impact_header.next,
- struct bank_entry, list);
- printk(KERN_DEBUG "MCE: Delete last injection Node\n");
- list_del(&entry->list);
- }
- else
- printk(KERN_DEBUG "MCE: Not found last injection "
- "Node, something Wrong!\n");
-
- /* futher injection*/
- if ( d->arch.vmca_msrs.nr_injection > 0) {
- if ( d->arch.vmca_msrs.nr_injection > 0 &&
- guest_has_trap_callback(d, v->vcpu_id,
- TRAP_machine_check) &&
- !test_and_set_bool(dom0->vcpu[0]->mce_pending)) {
- cpumask_t affinity;
-
- dom0->vcpu[0]->cpu_affinity_tmp =
- dom0->vcpu[0]->cpu_affinity;
- cpus_clear(affinity);
- cpu_set(cpu, affinity);
- printk(KERN_DEBUG "MCE: CPU%d set affinity, old %d\n", cpu,
- dom0->vcpu[0]->processor);
- vcpu_set_affinity(dom0->vcpu[0], &affinity);
- vcpu_kick(dom0->vcpu[0]);
- }
- }
- }
- } /* end of outer-if */
-
-end:
- /* Restore previous trap priority */
- v->trap_priority = v->old_trap_priority;
-
/* Restore upcall mask from supplied EFLAGS.IF. */
vcpu_info(v, evtchn_upcall_mask) = !(iret_saved.rflags & X86_EFLAGS_IF);
+
+ async_exception_cleanup(v);
/* Saved %rax gets written back to regs->rax in entry.S. */
return iret_saved.rax;
diff -r 939933401650 -r 14d9fb7a3262 xen/common/domain.c
--- a/xen/common/domain.c Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/common/domain.c Tue Dec 01 14:02:00 2009 +0000
@@ -726,10 +726,11 @@ void vcpu_reset(struct vcpu *v)
v->fpu_initialised = 0;
v->fpu_dirtied = 0;
v->is_initialised = 0;
- v->nmi_pending = 0;
- v->mce_pending = 0;
- v->old_trap_priority = VCPU_TRAP_NONE;
- v->trap_priority = VCPU_TRAP_NONE;
+#ifdef VCPU_TRAP_LAST
+ v->async_exception_mask = 0;
+ memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
+#endif
+ cpus_clear(v->cpu_affinity_tmp);
clear_bit(_VPF_blocked, &v->pause_flags);
domain_unlock(v->domain);
@@ -855,6 +856,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN
break;
+#ifdef VCPU_TRAP_NMI
case VCPUOP_send_nmi:
if ( !guest_handle_is_null(arg) )
return -EINVAL;
@@ -863,6 +865,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN
vcpu_kick(v);
break;
+#endif
default:
rc = arch_do_vcpu_op(cmd, v, arg);
diff -r 939933401650 -r 14d9fb7a3262 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/include/asm-x86/domain.h Tue Dec 01 14:02:00 2009 +0000
@@ -17,6 +17,16 @@
#define is_pv_32on64_domain(d) (0)
#endif
#define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain))
+
+#define VCPU_TRAP_NMI 1
+#define VCPU_TRAP_MCE 2
+#define VCPU_TRAP_LAST VCPU_TRAP_MCE
+
+#define nmi_state async_exception_state(VCPU_TRAP_NMI)
+#define mce_state async_exception_state(VCPU_TRAP_MCE)
+
+#define nmi_pending nmi_state.pending
+#define mce_pending mce_state.pending
struct trap_bounce {
uint32_t error_code;
diff -r 939933401650 -r 14d9fb7a3262 xen/include/asm-x86/system.h
--- a/xen/include/asm-x86/system.h Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/include/asm-x86/system.h Tue Dec 01 14:02:00 2009 +0000
@@ -133,6 +133,13 @@ static always_inline unsigned long __cmp
}
#define __HAVE_ARCH_CMPXCHG
+
+#define cmpxchgptr(ptr,o,n) ({ \
+ const __typeof__(**(ptr)) *__o = (o); \
+ __typeof__(**(ptr)) *__n = (n); \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)__o, \
+ (unsigned long)__n,sizeof(*(ptr)))); \
+})
/*
* Both Intel and AMD agree that, from a programmer's viewpoint:
diff -r 939933401650 -r 14d9fb7a3262 xen/include/asm-x86/traps.h
--- a/xen/include/asm-x86/traps.h Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/include/asm-x86/traps.h Tue Dec 01 14:02:00 2009 +0000
@@ -29,6 +29,8 @@ struct cpu_user_regs;
struct cpu_user_regs;
extern void machine_check_vector(struct cpu_user_regs *regs, long error_code);
+
+void async_exception_cleanup(struct vcpu *);
/**
* guest_has_trap_callback
diff -r 939933401650 -r 14d9fb7a3262 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Tue Dec 01 13:59:47 2009 +0000
+++ b/xen/include/xen/sched.h Tue Dec 01 14:02:00 2009 +0000
@@ -113,20 +113,16 @@ struct vcpu
bool_t is_initialised;
/* Currently running on a CPU? */
bool_t is_running;
- /* MCE callback pending for this VCPU? */
- bool_t mce_pending;
- /* NMI callback pending for this VCPU? */
- bool_t nmi_pending;
-
- /* Higher priorized traps may interrupt lower priorized traps,
- * lower priorized traps wait until higher priorized traps finished.
- * Note: This concept is known as "system priority level" (spl)
- * in the UNIX world. */
- uint16_t old_trap_priority;
- uint16_t trap_priority;
+
+#ifdef VCPU_TRAP_LAST
#define VCPU_TRAP_NONE 0
-#define VCPU_TRAP_NMI 1
-#define VCPU_TRAP_MCE 2
+ struct {
+ bool_t pending;
+ uint8_t old_mask;
+ } async_exception_state[VCPU_TRAP_LAST];
+#define async_exception_state(t) async_exception_state[(t)-1]
+ uint8_t async_exception_mask;
+#endif
/* Require shutdown to be deferred for some asynchronous operation? */
bool_t defer_shutdown;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|