[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 2/2] vgic: refuse irq migration when one is already in progress



When an irq migration is already in progress, but not yet completed
(GIC_IRQ_GUEST_MIGRATING is set), refuse any other irq migration
requests for the same irq.

This patch implements this approach by returning success or failure from
vgic_migrate_irq, and avoiding irq target changes on failure. It prints
a warning in case the irq migration fails.

It also moves the clear_bit of GIC_IRQ_GUEST_MIGRATING to after the
physical irq affinity has been changed so that all operations regarding
irq migration are completed.

Signed-off-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
 xen/arch/arm/gic.c         |  3 ++-
 xen/arch/arm/vgic-v2.c     |  7 +++----
 xen/arch/arm/vgic-v3.c     |  7 ++++---
 xen/arch/arm/vgic.c        | 14 +++++++++-----
 xen/include/asm-arm/vgic.h |  2 +-
 5 files changed, 19 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index de996d9..dc07df1 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -508,10 +508,11 @@ static void gic_update_one_lr(struct vcpu *v, int i)
              * next pcpu, inflight is already cleared. No concurrent
              * accesses to inflight. */
             smp_wmb();
-            if ( test_and_clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
+            if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
             {
                 struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
                 irq_set_affinity(p->desc, cpumask_of(v_target->processor));
+                clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
             }
         }
     }
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 0674f7b..dc9f95b 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -156,12 +156,11 @@ static void vgic_store_itargetsr(struct domain *d, struct 
vgic_irq_rank *rank,
         /* Only migrate the vIRQ if the target vCPU has changed */
         if ( new_target != old_target )
         {
-            vgic_migrate_irq(d->vcpu[old_target],
+            if ( vgic_migrate_irq(d->vcpu[old_target],
                              d->vcpu[new_target],
-                             virq);
+                             virq) )
+                write_atomic(&rank->vcpu[offset], new_target);
         }
-
-        write_atomic(&rank->vcpu[offset], new_target);
     }
 }
 
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 0679e76..1e9890b 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -151,9 +151,10 @@ static void vgic_store_irouter(struct domain *d, struct 
vgic_irq_rank *rank,
 
     /* Only migrate the IRQ if the target vCPU has changed */
     if ( new_vcpu != old_vcpu )
-        vgic_migrate_irq(old_vcpu, new_vcpu, virq);
-
-    write_atomic(&rank->vcpu[offset], new_vcpu->vcpu_id);
+    {
+        if ( vgic_migrate_irq(old_vcpu, new_vcpu, virq) )
+            write_atomic(&rank->vcpu[offset], new_vcpu->vcpu_id);
+    }
 }
 
 static inline bool vgic_reg64_check_access(struct hsr_dabt dabt)
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 67d75a6..5eef359 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -237,18 +237,21 @@ static int vgic_get_virq_priority(struct vcpu *v, 
unsigned int virq)
     return priority;
 }
 
-void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
+bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
 {
     unsigned long flags;
     struct pending_irq *p = irq_to_pending(old, irq);
 
     /* nothing to do for virtual interrupts */
     if ( p->desc == NULL )
-        return;
+        return true;
 
     /* migration already in progress, no need to do anything */
     if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
-        return;
+    {
+        gdprintk(XENLOG_WARNING, "irq %d migration failed: requested while in 
progress\n", irq);
+        return false;
+    }
 
     perfc_incr(vgic_irq_migrates);
 
@@ -258,7 +261,7 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
     {
         irq_set_affinity(p->desc, cpumask_of(new->processor));
         spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-        return;
+        return true;
     }
     /* If the IRQ is still lr_pending, re-inject it to the new vcpu */
     if ( !list_empty(&p->lr_queue) )
@@ -269,7 +272,7 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
         irq_set_affinity(p->desc, cpumask_of(new->processor));
         spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
         vgic_vcpu_inject_irq(new, irq);
-        return;
+        return true;
     }
     /* if the IRQ is in a GICH_LR register, set GIC_IRQ_GUEST_MIGRATING
      * and wait for the EOI */
@@ -277,6 +280,7 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
         set_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
 
     spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
+    return true;
 }
 
 void arch_move_irqs(struct vcpu *v)
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index 894c3f1..544867a 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -314,7 +314,7 @@ extern int vcpu_vgic_free(struct vcpu *v);
 extern bool vgic_to_sgi(struct vcpu *v, register_t sgir,
                         enum gic_sgi_mode irqmode, int virq,
                         const struct sgi_target *target);
-extern void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int 
irq);
+extern bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int 
irq);
 
 /* Reserve a specific guest vIRQ */
 extern bool vgic_reserve_virq(struct domain *d, unsigned int virq);
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.