[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 11/57] ARM: VGIC: change to level-IRQ compatible IRQ injection interface



At the moment vgic_vcpu_inject_irq() is the interface for Xen internal
code and virtual devices to inject IRQs into a guest. This interface has
two shortcomings:
1) It requires a VCPU pointer, which we may not know (and don't need!)
for shared interrupts. A second function (vgic_vcpu_inject_spi()), was
there to work around this issue.
2) This interface only really supports edge triggered IRQs, which is
what the Xen VGIC emulates only anyway. However this needs to and will
change, so we need to add the desired level (high or low) to the
interface.
This replaces the existing injection call (taking a VCPU and an IRQ
parameter) with a new one, taking domain, VCPU, IRQ and level parameters.
The VCPU can be NULL in case we don't know and don't care.
We change all call sites to use this new interface. This still doesn't
give us the missing level IRQ handling, but at least prepares the callers
to do the right thing later automatically.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxxxxx>
---
Changelog RFC ... v1:
- no change

 xen/arch/arm/domain.c      |  4 ++--
 xen/arch/arm/gic-v3-lpi.c  |  2 +-
 xen/arch/arm/irq.c         |  2 +-
 xen/arch/arm/time.c        |  2 +-
 xen/arch/arm/vgic.c        | 43 +++++++++++++++++++++++++------------------
 xen/arch/arm/vpl011.c      |  2 +-
 xen/arch/arm/vtimer.c      |  4 ++--
 xen/include/asm-arm/vgic.h |  4 ++--
 8 files changed, 35 insertions(+), 28 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 8546443bad..a7bba3ad44 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -952,14 +952,14 @@ void vcpu_mark_events_pending(struct vcpu *v)
     if ( already_pending )
         return;
 
-    vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
+    vgic_inject_irq(v->domain, v, v->domain->arch.evtchn_irq, true);
 }
 
 /* The ARM spec declares that even if local irqs are masked in
  * the CPSR register, an irq should wake up a cpu from WFI anyway.
  * For this reason we need to check for irqs that need delivery,
  * ignoring the CPSR register, *after* calling SCHEDOP_block to
- * avoid races with vgic_vcpu_inject_irq.
+ * avoid races with vgic_inject_irq.
  */
 void vcpu_block_unless_event_pending(struct vcpu *v)
 {
diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c
index 84582157b8..efd5cd62fb 100644
--- a/xen/arch/arm/gic-v3-lpi.c
+++ b/xen/arch/arm/gic-v3-lpi.c
@@ -153,7 +153,7 @@ void vgic_vcpu_inject_lpi(struct domain *d, unsigned int 
virq)
     if ( vcpu_id >= d->max_vcpus )
           return;
 
-    vgic_vcpu_inject_irq(d->vcpu[vcpu_id], virq);
+    vgic_inject_irq(d, d->vcpu[vcpu_id], virq, true);
 }
 
 /*
diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
index 29af10e82c..aa4e832cae 100644
--- a/xen/arch/arm/irq.c
+++ b/xen/arch/arm/irq.c
@@ -225,7 +225,7 @@ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, 
int is_fiq)
          * The irq cannot be a PPI, we only support delivery of SPIs to
          * guests.
         */
-        vgic_vcpu_inject_spi(info->d, info->virq);
+        vgic_inject_irq(info->d, NULL, info->virq, true);
         goto out_no_end;
     }
 
diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c
index 36f640f0c1..c11fcfeadd 100644
--- a/xen/arch/arm/time.c
+++ b/xen/arch/arm/time.c
@@ -260,7 +260,7 @@ static void vtimer_interrupt(int irq, void *dev_id, struct 
cpu_user_regs *regs)
 
     current->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0);
     WRITE_SYSREG32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL_EL0);
-    vgic_vcpu_inject_irq(current, current->arch.virt_timer.irq);
+    vgic_inject_irq(current->domain, current, current->arch.virt_timer.irq, 
true);
 }
 
 /*
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index c3fdcebbde..3c77d5fef6 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -291,7 +291,7 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
         vgic_remove_irq_from_queues(old, p);
         irq_set_affinity(p->desc, cpumask_of(new->processor));
         spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-        vgic_vcpu_inject_irq(new, irq);
+        vgic_inject_irq(new->domain, new, irq, true);
         return true;
     }
     /* if the IRQ is in a GICH_LR register, set GIC_IRQ_GUEST_MIGRATING
@@ -450,7 +450,7 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum 
gic_sgi_mode irqmode,
                         sgir, target->list);
                 continue;
             }
-            vgic_vcpu_inject_irq(d->vcpu[vcpuid], virq);
+            vgic_inject_irq(d, d->vcpu[vcpuid], virq, true);
         }
         break;
     case SGI_TARGET_OTHERS:
@@ -459,12 +459,12 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum 
gic_sgi_mode irqmode,
         {
             if ( i != current->vcpu_id && d->vcpu[i] != NULL &&
                  is_vcpu_online(d->vcpu[i]) )
-                vgic_vcpu_inject_irq(d->vcpu[i], virq);
+                vgic_inject_irq(d, d->vcpu[i], virq, true);
         }
         break;
     case SGI_TARGET_SELF:
         perfc_incr(vgic_sgi_self);
-        vgic_vcpu_inject_irq(d->vcpu[current->vcpu_id], virq);
+        vgic_inject_irq(d, current, virq, true);
         break;
     default:
         gprintk(XENLOG_WARNING,
@@ -524,13 +524,29 @@ void vgic_remove_irq_from_queues(struct vcpu *v, struct 
pending_irq *p)
     gic_remove_from_lr_pending(v, p);
 }
 
-void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq)
+int vgic_inject_irq(struct domain *d, struct vcpu *v, unsigned int virq,
+                    bool level)
 {
     uint8_t priority;
     struct pending_irq *iter, *n;
     unsigned long flags;
     bool running;
 
+    /*
+     * For edge triggered interrupts we always ignore a "falling edge".
+     * For level triggered interrupts we shouldn't, but do anyways.
+     */
+    if ( !level )
+        return 0;
+
+    if ( !v )
+    {
+        /* The IRQ needs to be an SPI if no vCPU is specified. */
+        ASSERT(virq >= 32 && virq <= vgic_num_irqs(d));
+
+        v = vgic_get_target_vcpu(d->vcpu[0], virq);
+    };
+
     spin_lock_irqsave(&v->arch.vgic.lock, flags);
 
     n = irq_to_pending(v, virq);
@@ -538,14 +554,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int 
virq)
     if ( unlikely(!n) )
     {
         spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
-        return;
+        return 0;
     }
 
     /* vcpu offline */
     if ( test_bit(_VPF_down, &v->pause_flags) )
     {
         spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
-        return;
+        return 0;
     }
 
     set_bit(GIC_IRQ_GUEST_QUEUED, &n->status);
@@ -582,22 +598,13 @@ out:
         perfc_incr(vgic_cross_cpu_intr_inject);
         smp_send_event_check_mask(cpumask_of(v->processor));
     }
-}
-
-void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq)
-{
-    struct vcpu *v;
 
-    /* the IRQ needs to be an SPI */
-    ASSERT(virq >= 32 && virq <= vgic_num_irqs(d));
-
-    v = vgic_get_target_vcpu(d->vcpu[0], virq);
-    vgic_vcpu_inject_irq(v, virq);
+    return 0;
 }
 
 void arch_evtchn_inject(struct vcpu *v)
 {
-    vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
+    vgic_inject_irq(v->domain, v, v->domain->arch.evtchn_irq, true);
 }
 
 bool vgic_evtchn_irq_pending(struct vcpu *v)
diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c
index 7788c2fc32..5dcf4bec18 100644
--- a/xen/arch/arm/vpl011.c
+++ b/xen/arch/arm/vpl011.c
@@ -68,7 +68,7 @@ static void vpl011_update_interrupt_status(struct domain *d)
      * status bit has been set since the last time.
      */
     if ( uartmis & ~vpl011->shadow_uartmis )
-        vgic_vcpu_inject_spi(d, GUEST_VPL011_SPI);
+        vgic_inject_irq(d, NULL, GUEST_VPL011_SPI, true);
 
     vpl011->shadow_uartmis = uartmis;
 }
diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c
index f52a723a5f..8164f6c7f1 100644
--- a/xen/arch/arm/vtimer.c
+++ b/xen/arch/arm/vtimer.c
@@ -46,7 +46,7 @@ static void phys_timer_expired(void *data)
     if ( !(t->ctl & CNTx_CTL_MASK) )
     {
         perfc_incr(vtimer_phys_inject);
-        vgic_vcpu_inject_irq(t->v, t->irq);
+        vgic_inject_irq(t->v->domain, t->v, t->irq, true);
     }
     else
         perfc_incr(vtimer_phys_masked);
@@ -56,7 +56,7 @@ static void virt_timer_expired(void *data)
 {
     struct vtimer *t = data;
     t->ctl |= CNTx_CTL_MASK;
-    vgic_vcpu_inject_irq(t->v, t->irq);
+    vgic_inject_irq(t->v->domain, t->v, t->irq, true);
     perfc_incr(vtimer_virt_inject);
 }
 
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index d03298e12c..b75fdeb068 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -202,8 +202,8 @@ extern int domain_vgic_init(struct domain *d, unsigned int 
nr_spis);
 extern void domain_vgic_free(struct domain *d);
 extern int vcpu_vgic_init(struct vcpu *v);
 extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq);
-extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
-extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
+extern int vgic_inject_irq(struct domain *d, struct vcpu *v, unsigned int virq,
+                           bool level);
 extern void vgic_remove_irq_from_queues(struct vcpu *v, struct pending_irq *p);
 extern void gic_remove_from_lr_pending(struct vcpu *v, struct pending_irq *p);
 extern void vgic_clear_pending_irqs(struct vcpu *v);
-- 
2.14.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.