|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v6 1/5] arm/irq: Keep track of irq affinities
Currently on Arm the desc->affinity mask of an irq is never updated,
which makes it hard to know the actual affinity of an interrupt.
Fix this by updating the field in irq_set_affinity.
Signed-off-by: Mykyta Poturai <mykyta_poturai@xxxxxxxx>
---
v5->v6:
* add missing locking around irq_set_affinity calls
v4->v5:
* add locking
v3->v4:
* patch introduced
---
xen/arch/arm/gic-vgic.c | 2 ++
xen/arch/arm/irq.c | 9 +++++++--
xen/arch/arm/vgic.c | 14 ++++++++++++--
xen/arch/arm/vgic/vgic-mmio-v2.c | 11 +++++------
xen/arch/arm/vgic/vgic.c | 15 ++++++++-------
5 files changed, 34 insertions(+), 17 deletions(-)
diff --git a/xen/arch/arm/gic-vgic.c b/xen/arch/arm/gic-vgic.c
index ea48c5375a..5253caf002 100644
--- a/xen/arch/arm/gic-vgic.c
+++ b/xen/arch/arm/gic-vgic.c
@@ -232,7 +232,9 @@ static void gic_update_one_lr(struct vcpu *v, int i)
if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
{
struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
+ spin_lock(&p->desc->lock);
irq_set_affinity(p->desc, cpumask_of(v_target->processor));
+ spin_unlock(&p->desc->lock);
clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
}
}
diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
index 73e58a5108..7204bc2b68 100644
--- a/xen/arch/arm/irq.c
+++ b/xen/arch/arm/irq.c
@@ -216,10 +216,15 @@ static inline struct domain *irq_get_domain(struct
irq_desc *desc)
return irq_get_guest_info(desc)->d;
}
+/* Must be called with desc->lock held */
void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
{
- if ( desc != NULL )
- desc->handler->set_affinity(desc, mask);
+ if ( desc == NULL )
+ return;
+
+ ASSERT(spin_is_locked(&desc->lock));
+ cpumask_copy(desc->affinity, mask);
+ desc->handler->set_affinity(desc, mask);
}
int request_irq(unsigned int irq, unsigned int irqflags,
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 6647071ad4..c59f6873db 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -445,7 +445,9 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new,
unsigned int irq)
if ( list_empty(&p->inflight) )
{
+ spin_lock(&p->desc->lock);
irq_set_affinity(p->desc, cpumask_of(new->processor));
+ spin_unlock(&p->desc->lock);
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
return true;
}
@@ -453,7 +455,9 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new,
unsigned int irq)
if ( !list_empty(&p->lr_queue) )
{
vgic_remove_irq_from_queues(old, p);
+ spin_lock(&p->desc->lock);
irq_set_affinity(p->desc, cpumask_of(new->processor));
+ spin_unlock(&p->desc->lock);
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
vgic_inject_irq(new->domain, new, irq, true);
return true;
@@ -473,6 +477,7 @@ void arch_move_irqs(struct vcpu *v)
struct domain *d = v->domain;
struct pending_irq *p;
struct vcpu *v_target;
+ unsigned long flags;
int i;
/*
@@ -494,7 +499,13 @@ void arch_move_irqs(struct vcpu *v)
p = irq_to_pending(v_target, virq);
if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
+ {
+ if ( !p->desc )
+ continue;
+ spin_lock_irqsave(&p->desc->lock, flags);
irq_set_affinity(p->desc, cpu_mask);
+ spin_unlock_irqrestore(&p->desc->lock, flags);
+ }
}
}
@@ -574,8 +585,8 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, unsigned
int n)
spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
if ( p->desc != NULL )
{
- irq_set_affinity(p->desc, cpumask_of(v_target->processor));
spin_lock_irqsave(&p->desc->lock, flags);
+ irq_set_affinity(p->desc, cpumask_of(v_target->processor));
/*
* The irq cannot be a PPI, we only support delivery of SPIs
* to guests.
@@ -944,4 +955,3 @@ void vgic_check_inflight_irqs_pending(struct vcpu *v,
unsigned int rank, uint32_
* indent-tabs-mode: nil
* End:
*/
-
diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
index b7c2d7ce99..fc04741ca1 100644
--- a/xen/arch/arm/vgic/vgic-mmio-v2.c
+++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
@@ -159,24 +159,23 @@ static void vgic_mmio_write_target(struct vcpu *vcpu,
for ( i = 0; i < len; i++ )
{
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, NULL, intid + i);
+ struct irq_desc *desc = irq_to_desc(irq->hwintid);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ spin_lock_irqsave(&desc->lock, flags);
+ spin_lock(&irq->irq_lock);
irq->targets = (val >> (i * 8)) & cpu_mask;
if ( irq->targets )
{
irq->target_vcpu = vcpu->domain->vcpu[ffs(irq->targets) - 1];
if ( irq->hw )
- {
- struct irq_desc *desc = irq_to_desc(irq->hwintid);
-
irq_set_affinity(desc,
cpumask_of(irq->target_vcpu->processor));
- }
}
else
irq->target_vcpu = NULL;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&desc->lock, flags);
vgic_put_irq(vcpu->domain, irq);
}
}
diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c
index b2c0e1873a..81ba4099ef 100644
--- a/xen/arch/arm/vgic/vgic.c
+++ b/xen/arch/arm/vgic/vgic.c
@@ -812,21 +812,22 @@ void arch_move_irqs(struct vcpu *v)
{
struct vgic_irq *irq = vgic_get_irq(d, NULL, i + VGIC_NR_PRIVATE_IRQS);
unsigned long flags;
+ irq_desc_t *desc;
if ( !irq )
continue;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ desc = irq_to_desc(irq->hwintid);
- /* Only hardware mapped vIRQs that are targeting this vCPU. */
- if ( irq->hw && irq->target_vcpu == v)
- {
- irq_desc_t *desc = irq_to_desc(irq->hwintid);
+ spin_lock_irqsave(&desc->lock, flags);
+ spin_lock(&irq->irq_lock);
+ /* Only hardware mapped vIRQs that are targeting this vCPU. */
+ if ( irq->hw && irq->target_vcpu == v )
irq_set_affinity(desc, cpumask_of(v->processor));
- }
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&desc->lock, flags);
vgic_put_irq(d, irq);
}
}
--
2.51.2
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |