[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 30/49] ARM: new VGIC: Add ENABLE registers handlers



As the enable register handlers are shared between the v2 and v3
emulation, their implementation goes into vgic-mmio.c, to be easily
referenced from the v3 emulation as well later.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxxxxx>
---
 xen/arch/arm/vgic/vgic-mmio-v2.c |   4 +-
 xen/arch/arm/vgic/vgic-mmio.c    | 114 +++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/vgic/vgic-mmio.h    |  11 ++++
 3 files changed, 127 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
index 0926b3243e..eca6840ff9 100644
--- a/xen/arch/arm/vgic/vgic-mmio-v2.c
+++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
@@ -74,10 +74,10 @@ static const struct vgic_register_region 
vgic_v2_dist_registers[] = {
         vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
         VGIC_ACCESS_32bit),
     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER,
-        vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
+        vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
         VGIC_ACCESS_32bit),
     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER,
-        vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
+        vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
         VGIC_ACCESS_32bit),
     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
         vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c
index 59703a6909..3d9fa02a10 100644
--- a/xen/arch/arm/vgic/vgic-mmio.c
+++ b/xen/arch/arm/vgic/vgic-mmio.c
@@ -39,6 +39,120 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr,
     /* Ignore */
 }
 
+/*
+ * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
+ * of the enabled bit, so there is only one function for both here.
+ */
+unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,
+                    paddr_t addr, unsigned int len)
+{
+    u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+    u32 value = 0;
+    int i;
+
+    /* Loop over all IRQs affected by this read */
+    for ( i = 0; i < len * 8; i++ )
+    {
+        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+
+        if ( irq->enabled )
+            value |= (1U << i);
+
+        vgic_put_irq(vcpu->domain, irq);
+    }
+
+    return value;
+}
+
+static void vgic_handle_hardware_irq(irq_desc_t *desc, int irq_type,
+                     bool enable)
+{
+    unsigned long flags;
+
+//  irq_set_affinity(desc, cpumask_of(v_target->processor));
+    spin_lock_irqsave(&desc->lock, flags);
+    if ( enable )
+    {
+        gic_set_irq_type(desc, irq_type == VGIC_CONFIG_LEVEL ?
+                 IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING);
+        desc->handler->enable(desc);
+    }
+    else
+        desc->handler->disable(desc);
+    spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+void vgic_mmio_write_senable(struct vcpu *vcpu,
+                 paddr_t addr, unsigned int len,
+                 unsigned long val)
+{
+    u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+    irq_desc_t *desc;
+    int i;
+    unsigned long flags;
+    enum vgic_irq_config config;
+
+    for_each_set_bit( i, &val, len * 8 )
+    {
+        struct vgic_irq *irq;
+
+        irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+
+        spin_lock_irqsave(&irq->irq_lock, flags);
+        irq->enabled = true;
+        if ( irq->hw )
+        {
+            /*
+             * The irq cannot be a PPI, we only support delivery
+             * of SPIs to guests.
+             */
+            ASSERT(irq->hwintid >= 32);
+
+            desc = irq_to_desc(irq->hwintid);
+            config = irq->config;
+        }
+        else
+            desc = NULL;
+        vgic_queue_irq_unlock(vcpu->domain, irq, flags);
+
+        vgic_put_irq(vcpu->domain, irq);
+
+        if ( desc )
+            vgic_handle_hardware_irq(desc, config, true);
+    }
+}
+
+void vgic_mmio_write_cenable(struct vcpu *vcpu,
+                 paddr_t addr, unsigned int len,
+                 unsigned long val)
+{
+    u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+    int i;
+
+    for_each_set_bit( i, &val, len * 8 )
+    {
+        struct vgic_irq *irq;
+        unsigned long flags;
+        irq_desc_t *desc;
+
+        irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+        spin_lock_irqsave(&irq->irq_lock, flags);
+
+        irq->enabled = false;
+
+        if ( irq->hw )
+            desc = irq_to_desc(irq->hwintid);
+        else
+            desc = NULL;
+
+        spin_unlock_irqrestore(&irq->irq_lock, flags);
+        vgic_put_irq(vcpu->domain, irq);
+
+        if ( desc )
+            vgic_handle_hardware_irq(desc, 0, false);
+    }
+}
+
 static int match_region(const void *key, const void *elt)
 {
     const unsigned int offset = (unsigned long)key;
diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h
index 10ac682296..9f34bd1aec 100644
--- a/xen/arch/arm/vgic/vgic-mmio.h
+++ b/xen/arch/arm/vgic/vgic-mmio.h
@@ -137,6 +137,17 @@ unsigned long vgic_mmio_read_rao(struct vcpu *vcpu,
 void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr,
                         unsigned int len, unsigned long val);
 
+unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,
+                    paddr_t addr, unsigned int len);
+
+void vgic_mmio_write_senable(struct vcpu *vcpu,
+                 paddr_t addr, unsigned int len,
+                 unsigned long val);
+
+void vgic_mmio_write_cenable(struct vcpu *vcpu,
+                 paddr_t addr, unsigned int len,
+                 unsigned long val);
+
 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
 
 /* Find the proper register handler entry given a certain address offset */
-- 
2.14.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.