[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] xen/arm: implement smp_call_function



Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
---
 xen/arch/arm/gic.c        |    3 ++
 xen/arch/arm/smp.c        |   73 ++++++++++++++++++++++++++++++++++++++++++++-
 xen/include/asm-arm/gic.h |    1 +
 xen/include/asm-arm/smp.h |    2 ++
 4 files changed, 78 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index c9f64f1..9fab92b 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -658,6 +658,9 @@ static void do_sgi(struct cpu_user_regs *regs, int 
othercpu, enum gic_sgi sgi)
     case GIC_SGI_DUMP_STATE:
         dump_execstate(regs);
         break;
+    case GIC_SGI_CALL_FUNCTION:
+        smp_call_function_interrupt();
+        break;
     default:
         panic("Unhandled SGI %d on CPU%d\n", sgi, smp_processor_id());
         break;
diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c
index a902d84..a6d2476 100644
--- a/xen/arch/arm/smp.c
+++ b/xen/arch/arm/smp.c
@@ -4,6 +4,8 @@
 #include <asm/cpregs.h>
 #include <asm/page.h>
 #include <asm/gic.h>
+#include <xen/spinlock.h>
+#include <xen/smp.h>
 
 void flush_tlb_mask(const cpumask_t *mask)
 {
@@ -16,7 +18,76 @@ void smp_call_function(
     void *info,
     int wait)
 {
-    panic("%s not implmented\n", __func__);
+    cpumask_t allbutself;
+
+    cpumask_andnot(&allbutself, &cpu_online_map,
+                   cpumask_of(smp_processor_id()));
+    on_selected_cpus(&allbutself, func, info, wait);
+}
+
+/*
+ * Structure and data for smp_call_function()/on_selected_cpus().
+ */
+static DEFINE_SPINLOCK(call_lock);
+static struct call_data_struct {
+    void (*func) (void *info);
+    void *info;
+    int wait;
+    cpumask_t selected;
+} call_data;
+
+void on_selected_cpus(
+    const cpumask_t *selected,
+    void (*func) (void *info),
+    void *info,
+    int wait)
+{
+    unsigned int nr_cpus;
+
+    ASSERT(local_irq_is_enabled());
+
+    spin_lock(&call_lock);
+
+    cpumask_copy(&call_data.selected, selected);
+
+    nr_cpus = cpumask_weight(&call_data.selected);
+    if ( nr_cpus == 0 )
+        goto out;
+
+    call_data.func = func;
+    call_data.info = info;
+    call_data.wait = wait;
+
+    send_SGI_mask(&call_data.selected, GIC_SGI_CALL_FUNCTION);
+
+    while ( !cpumask_empty(&call_data.selected) )
+        cpu_relax();
+
+out:
+    spin_unlock(&call_lock);
+}
+
+void smp_call_function_interrupt(void)
+{
+    void (*func)(void *info) = call_data.func;
+    void *info = call_data.info;
+    unsigned int cpu = smp_processor_id();
+
+    if ( !cpumask_test_cpu(cpu, &call_data.selected) )
+        return;
+
+    if ( call_data.wait )
+    {
+        (*func)(info);
+        mb();
+        cpumask_clear_cpu(cpu, &call_data.selected);
+    }
+    else
+    {
+        mb();
+        cpumask_clear_cpu(cpu, &call_data.selected);
+        (*func)(info);
+    }
 }
 
 void smp_send_event_check_mask(const cpumask_t *mask)
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 4f2c8b8..47354dd 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -173,6 +173,7 @@ extern void gic_restore_state(struct vcpu *v);
 enum gic_sgi {
     GIC_SGI_EVENT_CHECK = 0,
     GIC_SGI_DUMP_STATE  = 1,
+    GIC_SGI_CALL_FUNCTION = 2,
 };
 extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi);
 extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi);
diff --git a/xen/include/asm-arm/smp.h b/xen/include/asm-arm/smp.h
index 1c2746b..866c2f7 100644
--- a/xen/include/asm-arm/smp.h
+++ b/xen/include/asm-arm/smp.h
@@ -24,6 +24,8 @@ make_cpus_ready(unsigned int max_cpus, unsigned long 
boot_phys_offset);
 
 extern void smp_clear_cpu_maps (void);
 extern int smp_get_max_cpus (void);
+extern void smp_call_function_interrupt(void);
+
 #endif
 /*
  * Local variables:
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.