[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen: events: do not unmask polled ipis on restore.



The Xen PV spinlock backend attempts to setup an IPI to IRQ binding
which is only to be used via xen_poll_irq rather received directly.

Unfortunately restore_cpu_ipis unconditionally unmasks each IPI
leading to:

[   21.970432] ------------[ cut here ]------------
[   21.970432] kernel BUG at 
/local/scratch/ianc/devel/kernels/linux-2.6/arch/x86/xen/spinlock.c:343!
[   21.970432] invalid opcode: 0000 [#1] SMP
[   21.970432] last sysfs file: /sys/devices/virtual/net/lo/operstate
[   21.970432] Modules linked in:
[   21.970432]
[   21.970432] Pid: 0, comm: swapper Not tainted 
(2.6.32.24-x86_32p-xen-01034-g787c727 #34)
[   21.970432] EIP: 0061:[<c102e209>] EFLAGS: 00010046 CPU: 3
[   21.970432] EIP is at dummy_handler+0x3/0x7
[   21.970432] EAX: 0000021c EBX: dfc16880 ECX: 0000001a EDX: 00000000
[   21.970432] ESI: dfc02c00 EDI: 00000001 EBP: dfc47e10 ESP: dfc47e10
[   21.970432]  DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0069
[   21.970432] Process swapper (pid: 0, ti=dfc46000 task=dfc39440 
task.ti=dfc46000)
[   21.970432] Stack:
[   21.970432]  dfc47e30 c10a39f0 0000021c 00000000 00000000 dfc16880 0000021c 
00000001
[   21.970432] <0> dfc47e40 c10a4f08 0000021c 00000000 dfc47e78 c12240a7 
c1839284 c1839284
[   21.970432] <0> 00000200 00000000 00000000 f5720000 c1f3d028 c1f3d02c 
00000180 dfc47e90
[   21.970432] Call Trace:
[   21.970432]  [<c10a39f0>] ? handle_IRQ_event+0x5f/0x122
[   21.970432]  [<c10a4f08>] ? handle_percpu_irq+0x2f/0x55
[   21.970432]  [<c12240a7>] ? __xen_evtchn_do_upcall+0xdb/0x15f
[   21.970432]  [<c122481e>] ? xen_evtchn_do_upcall+0x20/0x30
[   21.970432]  [<c1030d47>] ? xen_do_upcall+0x7/0xc
[   21.970432]  [<c102007b>] ? apic_reg_read+0xd3/0x22d
[   21.970432]  [<c1002227>] ? hypercall_page+0x227/0x1005
[   21.970432]  [<c102d30b>] ? xen_force_evtchn_callback+0xf/0x14
[   21.970432]  [<c102da7c>] ? check_events+0x8/0xc
[   21.970432]  [<c102da3b>] ? xen_irq_enable_direct_end+0x0/0x1
[   21.970432]  [<c105e485>] ? finish_task_switch+0x62/0xba
[   21.970432]  [<c14e3f84>] ? schedule+0x808/0x89d
[   21.970432]  [<c1084dc5>] ? hrtimer_start_expires+0x1a/0x22
[   21.970432]  [<c1085154>] ? tick_nohz_restart_sched_tick+0x15a/0x162
[   21.970432]  [<c102f43a>] ? cpu_idle+0x6d/0x6f
[   21.970432]  [<c14db29e>] ? cpu_bringup_and_idle+0xd/0xf
[   21.970432] Code: 5d 0f 95 c0 0f b6 c0 c3 55 66 83 78 02 00 89 e5 5d 0f 95 
c0 0f b6 c0 c3 55 b2 01 86 10 31 c0 84 d2 89 e5 0f 94 c0 5d c3 55 89 e5 <0f> 0b 
eb fe 55 80 3d 4c ce 84 c1 00 89 e5 57 56 89 c6 53 74 15
[   21.970432] EIP: [<c102e209>] dummy_handler+0x3/0x7 SS:ESP 0069:dfc47e10
[   21.970432] ---[ end trace c0b71f7e12cf3011 ]---

Add a new bind function which explicitly binds a polled-only IPI and
track this state in the event channel core so that we can do the right
thing on restore.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 arch/x86/xen/spinlock.c |   16 +++-------------
 drivers/xen/events.c    |   45 ++++++++++++++++++++++++++++++++++++++++-----
 include/xen/events.h    |    4 ++++
 3 files changed, 47 insertions(+), 18 deletions(-)

diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 36a5141..09655ca 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -338,29 +338,19 @@ static void xen_spin_unlock(struct raw_spinlock *lock)
                xen_spin_unlock_slow(xl);
 }
 
-static irqreturn_t dummy_handler(int irq, void *dev_id)
-{
-       BUG();
-       return IRQ_HANDLED;
-}
-
 void __cpuinit xen_init_lock_cpu(int cpu)
 {
        int irq;
        const char *name;
 
        name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
-       irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
+       irq = bind_polled_ipi_to_irq(XEN_SPIN_UNLOCK_VECTOR,
                                     cpu,
-                                    dummy_handler,
                                     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
-                                    name,
-                                    NULL);
+                                    name);
 
-       if (irq >= 0) {
-               disable_irq(irq); /* make sure it's never delivered */
+       if (irq >= 0)
                per_cpu(lock_kicker_irq, cpu) = irq;
-       }
 
        printk("cpu %d spinlock event irq %d\n", cpu, irq);
 }
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 7b29ae1..f8b53b5 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -94,7 +94,10 @@ struct irq_info
 
        union {
                unsigned short virq;
-               enum ipi_vector ipi;
+               struct {
+                       enum ipi_vector vector;
+                       unsigned char polled;
+               } ipi;
                struct {
                        unsigned short gsi;
                        unsigned char vector;
@@ -148,7 +151,7 @@ static struct irq_info mk_evtchn_info(unsigned short evtchn)
 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
 {
        return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
-                       .cpu = 0, .u.ipi = ipi };
+                       .cpu = 0, .u.ipi.vector = ipi, .u.ipi.polled = 0 };
 }
 
 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
@@ -191,7 +194,7 @@ static enum ipi_vector ipi_from_irq(unsigned irq)
        BUG_ON(info == NULL);
        BUG_ON(info->type != IRQT_IPI);
 
-       return info->u.ipi;
+       return info->u.ipi.vector;
 }
 
 static unsigned virq_from_irq(unsigned irq)
@@ -971,6 +974,33 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
        return irq;
 }
 
+
+static irqreturn_t polled_ipi_handler(int irq, void *dev_id)
+{
+       BUG();
+       return IRQ_HANDLED;
+}
+
+int bind_polled_ipi_to_irq(enum ipi_vector ipi,
+                          unsigned int cpu,
+                          unsigned long irqflags,
+                          const char *devname)
+{
+       int irq, retval;
+
+       irq = bind_ipi_to_irqhandler(ipi, cpu, polled_ipi_handler,
+                                    irqflags, devname, NULL);
+       if (irq < 0)
+               return irq;
+
+       info_for_irq(irq)->u.ipi.polled = 1;
+
+       disable_irq(irq); /* make sure it's never delivered */
+
+       return irq;
+
+}
+
 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
 {
        free_irq(irq, dev_id);
@@ -1290,6 +1320,7 @@ static void restore_cpu_virqs(unsigned int cpu)
 static void restore_cpu_ipis(unsigned int cpu)
 {
        struct evtchn_bind_ipi bind_ipi;
+       int polled;
        int ipi, irq, evtchn;
 
        for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
@@ -1306,12 +1337,16 @@ static void restore_cpu_ipis(unsigned int cpu)
                evtchn = bind_ipi.port;
 
                /* Record the new mapping. */
+               polled = info_for_irq(irq)->u.ipi.polled;
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_ipi_info(evtchn, ipi);
                bind_evtchn_to_cpu(evtchn, cpu);
 
-               /* Ready for use. */
-               unmask_evtchn(evtchn);
+               /* Ready for use. Polled IPIs remain masked */
+               if (polled)
+                       info_for_irq(irq)->u.ipi.polled = 1;
+               else
+                       unmask_evtchn(evtchn);
 
        }
 }
diff --git a/include/xen/events.h b/include/xen/events.h
index 7e17e2a..b2f09ad 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -24,6 +24,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
                           unsigned long irqflags,
                           const char *devname,
                           void *dev_id);
+int bind_polled_ipi_to_irq(enum ipi_vector ipi,
+                          unsigned int cpu,
+                          unsigned long irqflags,
+                          const char *devname);
 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
                                          unsigned int remote_port,
                                          irq_handler_t handler,
-- 
1.5.6.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.