On Thu, 2011-02-03 at 18:29 +0000, Anirban Sinha wrote:
> > Something like the following against konrad's stable/irq.rework branch.
>
> Can you please rediff the patch against stock 2.6.37 and send it over?
> I would have done it myself but I am busy with something else.
The branch merges cleanly into 2.6.37, you can find it at:
git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen.git
stable/irq.rework
I don't think the patch applies conceptually to v2.6.37 since it has
other logic on the irq allocation path outside of irq_alloc_desc* which
is protected by irq_mapping_update_lock and this prevents reducing the
scope of the lock in the trivial way, that code goes away in the
irq.rework branch.
However I think reducing the scope of the lock is unnecessarily
complicating things. Simply switching from a spinlock to a mutex is the
simplest option, I don't think any of the interrupt setup/teardown code
paths happen in a context which would make this a problem.
I'm a little be concerned that I have never seen this lockdep warning (I
habitually run with the various lock debugging on) but I think the
analysis is correct. The patch below works for me in so much as it
doesn't make anything worse, but I wasn't seeing the issue to start
with. Does it work for you?
Ian.
8<-----------------------------
>From c9319d37767827f2f9260607eda4c61bae01ce4e Mon Sep 17 00:00:00 2001
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Fri, 4 Feb 2011 08:59:23 +0000
Subject: [PATCH] xen: events: switch irq_mapping_update_lock to a mutex
xen_allocate_irq_{dynamic,msi} call irq_desc_alloc* which may sleep.
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Reported-by: Anirban Sinha <ani@xxxxxxxxxxx>
Cc: Jeremy Fitzhardinge <jeremy@xxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
---
drivers/xen/events.c | 38 +++++++++++++++++++-------------------
1 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 267b6a1..625a387 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -54,7 +54,7 @@
* This lock protects updates to the following mapping and
reference-count
* arrays. The lock does not need to be acquired to read the mapping
tables.
*/
-static DEFINE_SPINLOCK(irq_mapping_update_lock);
+static DEFINE_MUTEX(irq_mapping_update_lock);
/* IRQ <-> VIRQ mapping. */
static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ...
NR_VIRQS-1] = -1};
@@ -603,7 +603,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi,
int shareable, char *name)
int irq = 0;
struct physdev_irq irq_op;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
@@ -642,7 +642,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi,
int shareable, char *name)
pirq_to_irq[pirq] = irq;
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -670,7 +670,7 @@ static int find_unbound_pirq(int type)
void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
{
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
if (alloc & XEN_ALLOC_IRQ) {
*irq = xen_allocate_irq_dynamic();
@@ -694,7 +694,7 @@ void xen_allocate_pirq_msi(char *name, int *irq, int
*pirq, int alloc)
pirq_to_irq[*pirq] = *irq;
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
}
int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
int type)
@@ -724,7 +724,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct
msi_desc *msidesc, int type)
map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
}
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = xen_allocate_irq_dynamic();
@@ -747,7 +747,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct
msi_desc *msidesc, int type)
(type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
#endif
@@ -759,7 +759,7 @@ int xen_destroy_irq(int irq)
struct irq_info *info = info_for_irq(irq);
int rc = -ENOENT;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
desc = irq_to_desc(irq);
if (!desc)
@@ -780,7 +780,7 @@ int xen_destroy_irq(int irq)
xen_free_irq(irq);
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return rc;
}
@@ -803,7 +803,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
{
int irq;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = evtchn_to_irq[evtchn];
@@ -817,7 +817,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq_info[irq] = mk_evtchn_info(evtchn);
}
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -828,7 +828,7 @@ static int bind_ipi_to_irq(unsigned int ipi,
unsigned int cpu)
struct evtchn_bind_ipi bind_ipi;
int evtchn, irq;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(ipi_to_irq, cpu)[ipi];
@@ -854,7 +854,7 @@ static int bind_ipi_to_irq(unsigned int ipi,
unsigned int cpu)
}
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -864,7 +864,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int
cpu)
struct evtchn_bind_virq bind_virq;
int evtchn, irq;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(virq_to_irq, cpu)[virq];
@@ -889,7 +889,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int
cpu)
bind_evtchn_to_cpu(evtchn, cpu);
}
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -899,7 +899,7 @@ static void unbind_from_irq(unsigned int irq)
struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
if (VALID_EVTCHN(evtchn)) {
close.port = evtchn;
@@ -931,7 +931,7 @@ static void unbind_from_irq(unsigned int irq)
xen_free_irq(irq);
}
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
}
int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -1181,7 +1181,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
will also be masked. */
disable_irq(irq);
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */
BUG_ON(evtchn_to_irq[evtchn] != -1);
@@ -1192,7 +1192,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_evtchn_info(evtchn);
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
/* new event channels are always bound to cpu 0 */
irq_set_affinity(irq, cpumask_of(0));
--
1.5.6.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|