[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 07/21] x86/xen: split xen_smp_intr_init()/xen_smp_intr_free()



xen_smp_intr_init() and xen_smp_intr_free() have PV-specific code and as
a praparatory change to splitting smp.c we need to split these fucntions.
Create xen_smp_intr_init_pv()/xen_smp_intr_free_pv().

Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
---
 arch/x86/xen/enlighten_pv.c |  9 +++++++++
 arch/x86/xen/smp.c          | 29 ++++++++++++++++++-----------
 arch/x86/xen/smp.h          |  8 ++++++++
 3 files changed, 35 insertions(+), 11 deletions(-)

diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index b9ff23c..acfd896 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1478,12 +1478,21 @@ static int xen_cpu_up_prepare_pv(unsigned int cpu)
                     cpu, rc);
                return rc;
        }
+
+       rc = xen_smp_intr_init_pv(cpu);
+       if (rc) {
+               WARN(1, "xen_smp_intr_init_pv() for CPU %d failed: %d\n",
+                    cpu, rc);
+               return rc;
+       }
+
        return 0;
 }
 
 static int xen_cpu_dead_pv(unsigned int cpu)
 {
        xen_smp_intr_free(cpu);
+       xen_smp_intr_free_pv(cpu);
 
        xen_teardown_timer(cpu);
 
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 0dee6f5..ff6aaff 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -132,9 +132,10 @@ void xen_smp_intr_free(unsigned int cpu)
                kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
                per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
        }
-       if (xen_hvm_domain())
-               return;
+}
 
+void xen_smp_intr_free_pv(unsigned int cpu)
+{
        if (per_cpu(xen_irq_work, cpu).irq >= 0) {
                unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
                per_cpu(xen_irq_work, cpu).irq = -1;
@@ -148,11 +149,12 @@ void xen_smp_intr_free(unsigned int cpu)
                kfree(per_cpu(xen_pmu_irq, cpu).name);
                per_cpu(xen_pmu_irq, cpu).name = NULL;
        }
-};
+}
+
 int xen_smp_intr_init(unsigned int cpu)
 {
        int rc;
-       char *resched_name, *callfunc_name, *debug_name, *pmu_name;
+       char *resched_name, *callfunc_name, *debug_name;
 
        resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
        rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
@@ -199,12 +201,17 @@ int xen_smp_intr_init(unsigned int cpu)
        per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
        per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
 
-       /*
-        * The IRQ worker on PVHVM goes through the native path and uses the
-        * IPI mechanism.
-        */
-       if (xen_hvm_domain())
-               return 0;
+       return 0;
+
+ fail:
+       xen_smp_intr_free(cpu);
+       return rc;
+}
+
+int xen_smp_intr_init_pv(unsigned int cpu)
+{
+       int rc;
+       char *callfunc_name, *pmu_name;
 
        callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
        rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
@@ -233,7 +240,7 @@ int xen_smp_intr_init(unsigned int cpu)
        return 0;
 
  fail:
-       xen_smp_intr_free(cpu);
+       xen_smp_intr_free_pv(cpu);
        return rc;
 }
 
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 9beef33..a059adb 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -11,6 +11,8 @@ extern void xen_send_IPI_self(int vector);
 
 extern int xen_smp_intr_init(unsigned int cpu);
 extern void xen_smp_intr_free(unsigned int cpu);
+extern int xen_smp_intr_init_pv(unsigned int cpu);
+extern void xen_smp_intr_free_pv(unsigned int cpu);
 
 #else /* CONFIG_SMP */
 
@@ -19,6 +21,12 @@ static inline int xen_smp_intr_init(unsigned int cpu)
        return 0;
 }
 static inline void xen_smp_intr_free(unsigned int cpu) {}
+
+static inline int xen_smp_intr_init_pv(unsigned int cpu)
+{
+       return 0;
+}
+static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
 #endif /* CONFIG_SMP */
 
 #endif
-- 
2.9.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.