[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 13/30] ARM: GICv3: forward pending LPIs to guests



Hi Andre,

On 06/04/17 00:19, Andre Przywara wrote:
diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
index 270a136..f4d7949 100644
--- a/xen/arch/arm/gic-v2.c
+++ b/xen/arch/arm/gic-v2.c
@@ -1217,6 +1217,12 @@ static int __init gicv2_init(void)
     return 0;
 }

+void gicv2_do_LPI(unsigned int lpi)

This should be static.

+{
+    /* No LPIs in a GICv2 */
+    BUG();
+}
+

[...]

diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c
index 0785701..d8baebc 100644
--- a/xen/arch/arm/gic-v3-lpi.c
+++ b/xen/arch/arm/gic-v3-lpi.c
@@ -136,6 +136,62 @@ uint64_t gicv3_get_redist_address(unsigned int cpu, bool 
use_pta)
         return per_cpu(lpi_redist, cpu).redist_id << 16;
 }

+/*
+ * Handle incoming LPIs, which are a bit special, because they are potentially
+ * numerous and also only get injected into guests. Treat them specially here,
+ * by just looking up their target vCPU and virtual LPI number and hand it
+ * over to the injection function.
+ * Please note that LPIs are edge-triggered only, also have no active state,
+ * so spurious interrupts on the host side are no issue (we can just ignore
+ * them).
+ * Also a guest cannot expect that firing interrupts that haven't been
+ * fully configured yet will reach the CPU, so we don't need to care about
+ * this special case.
+ */
+void gicv3_do_LPI(unsigned int lpi)

Ditto.

+{
+    struct domain *d;
+    union host_lpi *hlpip, hlpi;
+    struct vcpu *vcpu;
+

As mentioned on the previous version, you will need irq_enter and irq_exit in the return path. So the common code know you are in an interrupt handler (see in_irq()).

[...]

diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 5128f13..2a14305 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -1548,12 +1548,24 @@ struct pending_irq *vgic_v3_lpi_to_pending(struct 
domain *d, unsigned int lpi)
     return pirq;
 }

+/* Retrieve the priority of an LPI from its struct pending_irq. */
+int vgic_v3_lpi_get_priority(struct domain *d, uint32_t vlpi)

This should be static.

+{
+    struct pending_irq *p = vgic_v3_lpi_to_pending(d, vlpi);
+
+    if ( !p )
+        return GIC_PRI_IRQ;
+
+    return p->lpi_priority;
+}
+
 static const struct vgic_ops v3_ops = {
     .vcpu_init   = vgic_v3_vcpu_init,
     .domain_init = vgic_v3_domain_init,
     .domain_free = vgic_v3_domain_free,
     .emulate_reg  = vgic_v3_emulate_reg,
     .lpi_to_pending = vgic_v3_lpi_to_pending,
+    .lpi_get_priority = vgic_v3_lpi_get_priority,
     /*
      * We use both AFF1 and AFF0 in (v)MPIDR. Thus, the max number of CPU
      * that can be supported is up to 4096(==256*16) in theory.

[...]

diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index 7c86f5b..08d6294 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -66,12 +66,14 @@ struct pending_irq
 #define GIC_IRQ_GUEST_VISIBLE  2
 #define GIC_IRQ_GUEST_ENABLED  3
 #define GIC_IRQ_GUEST_MIGRATING   4
+#define GIC_IRQ_GUEST_LPI_PENDING 5     /* Caches the pending bit of an LPI. */

Please use the big comment above to describe GUEST_LPI_PENDING.

     unsigned long status;
     struct irq_desc *desc; /* only set it the irq corresponds to a physical 
irq */
     unsigned int irq;
 #define GIC_INVALID_LR         (uint8_t)~0
     uint8_t lr;
     uint8_t priority;
+    uint8_t lpi_priority;       /* Caches the priority if this is an LPI. */
     /* inflight is used to append instances of pending_irq to
      * vgic.inflight_irqs */
     struct list_head inflight;
@@ -136,6 +138,7 @@ struct vgic_ops {
     bool (*emulate_reg)(struct cpu_user_regs *regs, union hsr hsr);
     /* lookup the struct pending_irq for a given LPI interrupt */
     struct pending_irq *(*lpi_to_pending)(struct domain *d, unsigned int vlpi);
+    int (*lpi_get_priority)(struct domain *d, uint32_t vlpi);
     /* Maximum number of vCPU supported */
     const unsigned int max_vcpus;
 };


Cheers,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.