[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen/arm: fix rank/vgic locks inversion bug



The locking order is: first rank lock, then vgic lock. The order is
respected everywhere, except for gic_update_one_lr.

gic_update_one_lr is called with the vgic lock held, but it calls
vgic_get_target_vcpu, which tries to obtain the rank lock. This can
cause deadlocks.

We already have a version of vgic_get_target_vcpu that doesn't take the
rank lock: __vgic_get_target_vcpu. Also the only routine that modify
the target vcpu are vgic_store_itargetsr and vgic_store_irouter. They
call vgic_migrate_irq, which already take the vgic lock.

Solve the lock inversion problem, by not taking the rank lock in
gic_update_one_lr (calling __vgic_get_target_vcpu instead of
vgic_get_target_vcpu). We make this safe, by placing modifications to
rank->vcpu within regions protected by the vgic lock.

Signed-off-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>

---
There are supposed to be 2 Coverity IDs associated with this, but the
system the currently down.

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index a5348f2..3483192 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -506,7 +506,7 @@ static void gic_update_one_lr(struct vcpu *v, int i)
             list_del_init(&p->inflight);
             if ( test_and_clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
             {
-                struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
+                struct vcpu *v_target = __vgic_get_target_vcpu(v, irq);
                 irq_set_affinity(p->desc, cpumask_of(v_target->processor));
             }
         }
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 3dbcfe8..666ebdb 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -95,6 +95,7 @@ static void vgic_store_itargetsr(struct domain *d, struct 
vgic_irq_rank *rank,
 {
     unsigned int i;
     unsigned int virq;
+    unsigned long flags;
 
     ASSERT(spin_is_locked(&rank->lock));
 
@@ -116,6 +117,7 @@ static void vgic_store_itargetsr(struct domain *d, struct 
vgic_irq_rank *rank,
     {
         unsigned int new_target, old_target;
         uint8_t new_mask;
+        struct vcpu *old;
 
         /*
          * Don't need to mask as we rely on new_mask to fit for only one
@@ -153,16 +155,18 @@ static void vgic_store_itargetsr(struct domain *d, struct 
vgic_irq_rank *rank,
         new_target--;
 
         old_target = rank->vcpu[offset];
+        old = d->vcpu[old_target];
 
+        spin_lock_irqsave(&old->arch.vgic.lock, flags);
         /* Only migrate the vIRQ if the target vCPU has changed */
         if ( new_target != old_target )
         {
-            vgic_migrate_irq(d->vcpu[old_target],
+            vgic_migrate_irq(old,
                              d->vcpu[new_target],
                              virq);
         }
-
         rank->vcpu[offset] = new_target;
+        spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
     }
 }
 
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index d61479d..880c643 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -122,6 +122,7 @@ static void vgic_store_irouter(struct domain *d, struct 
vgic_irq_rank *rank,
 {
     struct vcpu *new_vcpu, *old_vcpu;
     unsigned int virq;
+    unsigned long flags;
 
     /* There is 1 vIRQ per IROUTER */
     virq = offset / NR_BYTES_PER_IROUTER;
@@ -150,11 +151,13 @@ static void vgic_store_irouter(struct domain *d, struct 
vgic_irq_rank *rank,
     if ( !new_vcpu )
         return;
 
+    spin_lock_irqsave(&old_vcpu->arch.vgic.lock, flags);
     /* Only migrate the IRQ if the target vCPU has changed */
     if ( new_vcpu != old_vcpu )
         vgic_migrate_irq(old_vcpu, new_vcpu, virq);
 
     rank->vcpu[offset] = new_vcpu->vcpu_id;
+    spin_unlock_irqrestore(&old_vcpu->arch.vgic.lock, flags);
 }
 
 static inline bool vgic_reg64_check_access(struct hsr_dabt dabt)
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 364d5f0..4f7971f 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -219,7 +219,7 @@ int vcpu_vgic_free(struct vcpu *v)
 }
 
 /* The function should be called by rank lock taken. */
-static struct vcpu *__vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
+struct vcpu *__vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
 {
     struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
 
@@ -257,9 +257,11 @@ static int vgic_get_virq_priority(struct vcpu *v, unsigned 
int virq)
 
 void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
 {
-    unsigned long flags;
     struct pending_irq *p = irq_to_pending(old, irq);
 
+    ASSERT(spin_is_locked(&old->arch.vgic.lock));
+    ASSERT(!local_irq_is_enabled());
+
     /* nothing to do for virtual interrupts */
     if ( p->desc == NULL )
         return;
@@ -270,12 +272,9 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
 
     perfc_incr(vgic_irq_migrates);
 
-    spin_lock_irqsave(&old->arch.vgic.lock, flags);
-
     if ( list_empty(&p->inflight) )
     {
         irq_set_affinity(p->desc, cpumask_of(new->processor));
-        spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
         return;
     }
     /* If the IRQ is still lr_pending, re-inject it to the new vcpu */
@@ -285,7 +284,6 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
         list_del_init(&p->lr_queue);
         list_del_init(&p->inflight);
         irq_set_affinity(p->desc, cpumask_of(new->processor));
-        spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
         vgic_vcpu_inject_irq(new, irq);
         return;
     }
@@ -293,8 +291,6 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
      * and wait for the EOI */
     if ( !list_empty(&p->inflight) )
         set_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
-
-    spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
 }
 
 void arch_move_irqs(struct vcpu *v)
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index 672f649..c911b33 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -293,6 +293,7 @@ extern int domain_vgic_init(struct domain *d, unsigned int 
nr_spis);
 extern void domain_vgic_free(struct domain *d);
 extern int vcpu_vgic_init(struct vcpu *v);
 extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq);
+extern struct vcpu *__vgic_get_target_vcpu(struct vcpu *v, unsigned int virq);
 extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
 extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
 extern void vgic_clear_pending_irqs(struct vcpu *v);

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.