[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 17/27] x86/traps: move some PV specific functions and struct to pv/traps.c



Those functions need to be moved at the same time. Also move
softirq_trap because it is only used in that one place.

Fix some coding style issues while moving.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/pv/traps.c     | 105 ++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/traps.c        |  93 ---------------------------------------
 xen/include/asm-x86/traps.h |   6 ---
 3 files changed, 105 insertions(+), 99 deletions(-)

diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index be215df57a..0c1600d886 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -237,6 +237,111 @@ bool guest_has_trap_callback(const struct domain *d, 
unsigned int vcpuid,
     return t->address;
 }
 
+struct softirq_trap {
+    struct domain *domain;  /* domain to inject trap */
+    struct vcpu *vcpu;      /* vcpu to inject trap */
+    int processor;          /* physical cpu to inject trap */
+};
+static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
+
+static void nmi_mce_softirq(void)
+{
+    int cpu = smp_processor_id();
+    struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
+
+    BUG_ON(st->vcpu == NULL);
+
+    /*
+     * Set the tmp value unconditionally, so that
+     * the check in the iret hypercall works.
+     */
+    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
+                 st->vcpu->cpu_hard_affinity);
+
+    if ( (cpu != st->processor) ||
+         (st->processor != st->vcpu->processor) )
+    {
+        /*
+         * We are on a different physical cpu.
+         * Make sure to wakeup the vcpu on the
+         * specified processor.
+         */
+        vcpu_set_hard_affinity(st->vcpu, cpumask_of(st->processor));
+
+        /* Affinity is restored in the iret hypercall. */
+    }
+
+    /*
+     * Only used to defer wakeup of domain/vcpu to
+     * a safe (non-NMI/MCE) context.
+     */
+    vcpu_kick(st->vcpu);
+    st->vcpu = NULL;
+}
+
+void __init pv_trap_init(void)
+{
+    /* The 32-on-64 hypercall vector is only accessible from ring 1. */
+    _set_gate(idt_table + HYPERCALL_VECTOR,
+              SYS_DESC_trap_gate, 1, entry_int82);
+
+    /* Fast trap for int80 (faster than taking the #GP-fixup path). */
+    _set_gate(idt_table + 0x80, SYS_DESC_trap_gate, 3, &int80_direct_trap);
+
+    open_softirq(NMI_MCE_SOFTIRQ, nmi_mce_softirq);
+}
+
+int send_guest_trap(struct domain *d, uint16_t vcpuid, unsigned int trap_nr)
+{
+    struct vcpu *v;
+    struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id());
+
+    BUG_ON(d == NULL);
+    BUG_ON(vcpuid >= d->max_vcpus);
+    v = d->vcpu[vcpuid];
+
+    switch ( trap_nr )
+    {
+    case TRAP_nmi:
+        if ( cmpxchgptr(&st->vcpu, NULL, v) )
+            return -EBUSY;
+        if ( !test_and_set_bool(v->nmi_pending) )
+        {
+            st->domain = d;
+            st->processor = v->processor;
+
+            /* not safe to wake up a vcpu here */
+            raise_softirq(NMI_MCE_SOFTIRQ);
+            return 0;
+        }
+        st->vcpu = NULL;
+        break;
+
+    case TRAP_machine_check:
+        if ( cmpxchgptr(&st->vcpu, NULL, v) )
+            return -EBUSY;
+
+        /*
+        * We are called by the machine check (exception or polling) handlers
+        * on the physical CPU that reported a machine check error.
+         */
+        if ( !test_and_set_bool(v->mce_pending) )
+        {
+            st->domain = d;
+            st->processor = v->processor;
+
+            /* not safe to wake up a vcpu here */
+            raise_softirq(NMI_MCE_SOFTIRQ);
+            return 0;
+        }
+        st->vcpu = NULL;
+        break;
+    }
+
+    /* delivery failed */
+    return -EIO;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 29a83994bd..287503cd56 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1477,39 +1477,6 @@ void do_general_protection(struct cpu_user_regs *regs)
     panic("GENERAL PROTECTION FAULT\n[error_code=%04x]", regs->error_code);
 }
 
-static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
-
-static void nmi_mce_softirq(void)
-{
-    int cpu = smp_processor_id();
-    struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
-
-    BUG_ON(st->vcpu == NULL);
-
-    /* Set the tmp value unconditionally, so that
-     * the check in the iret hypercall works. */
-    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
-                 st->vcpu->cpu_hard_affinity);
-
-    if ((cpu != st->processor)
-       || (st->processor != st->vcpu->processor))
-    {
-        /* We are on a different physical cpu.
-         * Make sure to wakeup the vcpu on the
-         * specified processor.
-         */
-        vcpu_set_hard_affinity(st->vcpu, cpumask_of(st->processor));
-
-        /* Affinity is restored in the iret hypercall. */
-    }
-
-    /* Only used to defer wakeup of domain/vcpu to
-     * a safe (non-NMI/MCE) context.
-     */
-    vcpu_kick(st->vcpu);
-    st->vcpu = NULL;
-}
-
 static void pci_serr_softirq(void)
 {
     printk("\n\nNMI - PCI system error (SERR)\n");
@@ -1871,18 +1838,6 @@ void __init init_idt_traps(void)
     this_cpu(compat_gdt_table) = boot_cpu_compat_gdt_table;
 }
 
-void __init pv_trap_init(void)
-{
-    /* The 32-on-64 hypercall vector is only accessible from ring 1. */
-    _set_gate(idt_table + HYPERCALL_VECTOR,
-              SYS_DESC_trap_gate, 1, entry_int82);
-
-    /* Fast trap for int80 (faster than taking the #GP-fixup path). */
-    _set_gate(idt_table + 0x80, SYS_DESC_trap_gate, 3, &int80_direct_trap);
-
-    open_softirq(NMI_MCE_SOFTIRQ, nmi_mce_softirq);
-}
-
 extern void (*const autogen_entrypoints[NR_VECTORS])(void);
 void __init trap_init(void)
 {
@@ -1915,54 +1870,6 @@ void __init trap_init(void)
     open_softirq(PCI_SERR_SOFTIRQ, pci_serr_softirq);
 }
 
-int send_guest_trap(struct domain *d, uint16_t vcpuid, unsigned int trap_nr)
-{
-    struct vcpu *v;
-    struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id());
-
-    BUG_ON(d == NULL);
-    BUG_ON(vcpuid >= d->max_vcpus);
-    v = d->vcpu[vcpuid];
-
-    switch (trap_nr) {
-    case TRAP_nmi:
-        if ( cmpxchgptr(&st->vcpu, NULL, v) )
-            return -EBUSY;
-        if ( !test_and_set_bool(v->nmi_pending) ) {
-               st->domain = d;
-               st->processor = v->processor;
-
-               /* not safe to wake up a vcpu here */
-               raise_softirq(NMI_MCE_SOFTIRQ);
-               return 0;
-        }
-        st->vcpu = NULL;
-        break;
-
-    case TRAP_machine_check:
-        if ( cmpxchgptr(&st->vcpu, NULL, v) )
-            return -EBUSY;
-
-        /* We are called by the machine check (exception or polling) handlers
-         * on the physical CPU that reported a machine check error. */
-
-        if ( !test_and_set_bool(v->mce_pending) ) {
-                st->domain = d;
-                st->processor = v->processor;
-
-                /* not safe to wake up a vcpu here */
-                raise_softirq(NMI_MCE_SOFTIRQ);
-                return 0;
-        }
-        st->vcpu = NULL;
-        break;
-    }
-
-    /* delivery failed */
-    return -EIO;
-}
-
-
 void activate_debugregs(const struct vcpu *curr)
 {
     ASSERT(curr == current);
diff --git a/xen/include/asm-x86/traps.h b/xen/include/asm-x86/traps.h
index 26625ce5a6..8cf6105d8d 100644
--- a/xen/include/asm-x86/traps.h
+++ b/xen/include/asm-x86/traps.h
@@ -19,12 +19,6 @@
 #ifndef ASM_TRAP_H
 #define ASM_TRAP_H
 
-struct softirq_trap {
-       struct domain *domain;  /* domain to inject trap */
-       struct vcpu *vcpu;      /* vcpu to inject trap */
-       int processor;          /* physical cpu to inject trap */
-};
-
 struct cpu_user_regs;
 
 void async_exception_cleanup(struct vcpu *);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.