[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 25/49] ARM: new VGIC: Add GICv2 world switch backend



Processing maintenance interrupts and accessing the list registers
are dependent on the host's GIC version.
Introduce vgic-v2.c to contain GICv2 specific functions.
Implement the GICv2 specific code for syncing the emulation state
into the VGIC registers.
This also adds the hook to let Xen setup the host GIC addresses.

This is based on Linux commit 140b086dd197, written by Marc Zyngier.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxxxxx>
---
 xen/arch/arm/vgic/vgic-v2.c | 261 ++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/vgic/vgic.c    |  20 ++++
 xen/arch/arm/vgic/vgic.h    |   8 ++
 3 files changed, 289 insertions(+)
 create mode 100644 xen/arch/arm/vgic/vgic-v2.c

diff --git a/xen/arch/arm/vgic/vgic-v2.c b/xen/arch/arm/vgic/vgic-v2.c
new file mode 100644
index 0000000000..10fc467ffa
--- /dev/null
+++ b/xen/arch/arm/vgic/vgic-v2.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ * Imported from Linux ("new" KVM VGIC) and heavily adapted to Xen.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/arm_vgic.h>
+#include <asm/bug.h>
+#include <asm/io.h>
+#include <xen/sched.h>
+#include <xen/sizes.h>
+
+#include "vgic.h"
+
+#define GICH_ELRSR0                     0x30
+#define GICH_ELRSR1                     0x34
+#define GICH_LR0                        0x100
+
+#define GICH_LR_VIRTUALID               (0x3ff << 0)
+#define GICH_LR_PHYSID_CPUID_SHIFT      (10)
+#define GICH_LR_PHYSID_CPUID            (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_PRIORITY_SHIFT          23
+#define GICH_LR_STATE                   (3 << 28)
+#define GICH_LR_PENDING_BIT             (1 << 28)
+#define GICH_LR_ACTIVE_BIT              (1 << 29)
+#define GICH_LR_EOI                     (1 << 19)
+#define GICH_LR_HW                      (1 << 31)
+
+static struct {
+    bool enabled;
+    paddr_t dbase;          /* Distributor interface address */
+    paddr_t cbase;          /* CPU interface address & size */
+    paddr_t csize;
+    paddr_t vbase;          /* Virtual CPU interface address */
+    void __iomem *hbase;        /* Hypervisor control interface */
+
+    /* Offset to add to get an 8kB contiguous region if GIC is aliased */
+    uint32_t aliased_offset;
+} gic_v2_hw_data;
+
+void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t csize,
+              paddr_t vbase, void __iomem *hbase,
+              uint32_t aliased_offset)
+{
+    gic_v2_hw_data.enabled = true;
+    gic_v2_hw_data.dbase = dbase;
+    gic_v2_hw_data.cbase = cbase;
+    gic_v2_hw_data.csize = csize;
+    gic_v2_hw_data.vbase = vbase;
+    gic_v2_hw_data.hbase = hbase;
+    gic_v2_hw_data.aliased_offset = aliased_offset;
+}
+
+void vgic_v2_set_underflow(struct vcpu *vcpu)
+{
+    gic_hw_ops->update_hcr_status(GICH_HCR_UIE, 1);
+}
+
+/*
+ * transfer the content of the LRs back into the corresponding ap_list:
+ * - active bit is transferred as is
+ * - pending bit is
+ *   - transferred as is in case of edge sensitive IRQs
+ *   - set to the line-level (resample time) for level sensitive IRQs
+ */
+void vgic_v2_fold_lr_state(struct vcpu *vcpu)
+{
+    struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+    struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
+    int lr;
+    unsigned long flags;
+
+    cpuif->vgic_hcr &= ~GICH_HCR_UIE;
+
+    for ( lr = 0; lr < vgic_cpu->used_lrs; lr++ )
+    {
+        u32 val = cpuif->vgic_lr[lr];
+        u32 intid = val & GICH_LR_VIRTUALID;
+        struct vgic_irq *irq;
+
+        irq = vgic_get_irq(vcpu->domain, vcpu, intid);
+
+        spin_lock_irqsave(&irq->irq_lock, flags);
+
+        /* Always preserve the active bit */
+        irq->active = !!(val & GICH_LR_ACTIVE_BIT);
+
+        /* Edge is the only case where we preserve the pending bit */
+        if ( irq->config == VGIC_CONFIG_EDGE && (val & GICH_LR_PENDING_BIT) )
+        {
+            irq->pending_latch = true;
+
+            if ( vgic_irq_is_sgi(intid) )
+            {
+                u32 cpuid = val & GICH_LR_PHYSID_CPUID;
+
+                cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+                irq->source |= (1 << cpuid);
+            }
+        }
+
+        if ( irq->hw && irq->config == VGIC_CONFIG_LEVEL &&
+            (val & GICH_LR_PENDING_BIT) )
+        {
+            irq->line_level = gic_read_pending_state(irq->hwintid);
+
+            if ( !irq->line_level )
+                            gic_set_active_state(irq->hwintid, true);
+        }
+
+        spin_unlock_irqrestore(&irq->irq_lock, flags);
+        vgic_put_irq(vcpu->domain, irq);
+    }
+
+    vgic_cpu->used_lrs = 0;
+}
+
+/*
+ * Populates the particular LR with the state of a given IRQ:
+ * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
+ * - for a level sensitive IRQ the pending state value is unchanged;
+ *   it is dictated directly by the input level
+ *
+ * If @irq describes an SGI with multiple sources, we choose the
+ * lowest-numbered source VCPU and clear that bit in the source bitmap.
+ *
+ * The irq_lock must be held by the caller.
+ */
+void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr)
+{
+    u32 val = irq->intid;
+
+    if ( irq_is_pending(irq) )
+    {
+        val |= GICH_LR_PENDING_BIT;
+
+        if ( irq->config == VGIC_CONFIG_EDGE )
+            irq->pending_latch = false;
+
+        if ( vgic_irq_is_sgi(irq->intid) )
+        {
+            u32 src = ffs(irq->source);
+
+            BUG_ON(!src);
+            val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
+            irq->source &= ~(1 << (src - 1));
+            if ( irq->source )
+                irq->pending_latch = true;
+        }
+    }
+
+    if ( irq->active )
+        val |= GICH_LR_ACTIVE_BIT;
+
+    if ( irq->hw )
+    {
+        val |= GICH_LR_HW;
+        val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
+        /*
+         * Never set pending+active on a HW interrupt, as the
+         * pending state is kept at the physical distributor
+         * level.
+         */
+        if ( irq->active && irq_is_pending(irq) )
+            val &= ~GICH_LR_PENDING_BIT;
+    }
+    else
+    {
+        if ( irq->config == VGIC_CONFIG_LEVEL )
+            val |= GICH_LR_EOI;
+    }
+
+    /*
+     * Level-triggered mapped IRQs are special because we only observe
+     * rising edges as input to the VGIC.  We therefore lower the line
+     * level here, so that we can take new virtual IRQs.  See
+     * vgic_v2_fold_lr_state for more info.
+     */
+    if ( irq->hw && irq->config == VGIC_CONFIG_LEVEL &&
+        (val & GICH_LR_PENDING_BIT) )
+        irq->line_level = false;
+
+    /* The GICv2 LR only holds five bits of priority. */
+    val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
+
+    vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
+}
+
+void vgic_v2_clear_lr(struct vcpu *vcpu, int lr)
+{
+    vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
+}
+
+static void save_lrs(struct vcpu *vcpu, void __iomem *base)
+{
+    struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+    u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+    u64 elrsr;
+    int i;
+
+    elrsr = readl_relaxed(base + GICH_ELRSR0);
+    if ( unlikely(used_lrs > 32) )
+        elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
+
+    for ( i = 0; i < used_lrs; i++ )
+    {
+        if ( elrsr & (1UL << i) )
+            cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
+        else
+            cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
+
+        writel_relaxed(0, base + GICH_LR0 + (i * 4));
+    }
+}
+
+void vgic_v2_save_state(struct vcpu *vcpu)
+{
+    u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+
+    if ( used_lrs )
+    {
+        save_lrs(vcpu, gic_v2_hw_data.hbase);
+        writel_relaxed(0, gic_v2_hw_data.hbase + GICH_HCR);
+    }
+}
+
+void vgic_v2_restore_state(struct vcpu *vcpu)
+{
+    struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+    u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+    int i;
+
+    if ( used_lrs )
+    {
+        writel_relaxed(cpu_if->vgic_hcr,
+                       gic_v2_hw_data.hbase + GICH_HCR);
+        for ( i = 0; i < used_lrs; i++ )
+            writel_relaxed(cpu_if->vgic_lr[i],
+                           gic_v2_hw_data.hbase + GICH_LR0 + (i * 4));
+    }
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c
index a1f77130d4..f4f2a04a60 100644
--- a/xen/arch/arm/vgic/vgic.c
+++ b/xen/arch/arm/vgic/vgic.c
@@ -488,6 +488,7 @@ retry:
 
 static inline void vgic_fold_lr_state(struct vcpu *vcpu)
 {
+    vgic_v2_fold_lr_state(vcpu);
 }
 
 /* Requires the irq_lock to be held. */
@@ -495,14 +496,18 @@ static inline void vgic_populate_lr(struct vcpu *vcpu,
                                     struct vgic_irq *irq, int lr)
 {
     ASSERT(spin_is_locked(&irq->irq_lock));
+
+    vgic_v2_populate_lr(vcpu, irq, lr);
 }
 
 static inline void vgic_clear_lr(struct vcpu *vcpu, int lr)
 {
+    vgic_v2_clear_lr(vcpu, lr);
 }
 
 static inline void vgic_set_underflow(struct vcpu *vcpu)
 {
+    vgic_v2_set_underflow(vcpu);
 }
 
 /* Requires the ap_list_lock to be held. */
@@ -573,6 +578,11 @@ next:
         vgic_clear_lr(vcpu, count);
 }
 
+static inline void vgic_save_state(struct vcpu *vcpu)
+{
+    vgic_v2_save_state(vcpu);
+}
+
 /*
  * gic_clear_lrs() - Update the VGIC state from hardware after a guest's run.
  * @vcpu: the VCPU.
@@ -592,11 +602,18 @@ void gic_clear_lrs(struct vcpu *vcpu)
     if ( list_empty(&vcpu->arch.vgic_cpu.ap_list_head) )
         return;
 
+    vgic_save_state(vcpu);
+
     if ( vgic_cpu->used_lrs )
         vgic_fold_lr_state(vcpu);
     vgic_prune_ap_list(vcpu);
 }
 
+static inline void vgic_restore_state(struct vcpu *vcpu)
+{
+    vgic_v2_restore_state(vcpu);
+}
+
 /*
  * gic_inject() - flush the emulation state into the hardware on guest entry
  *
@@ -625,7 +642,10 @@ void gic_inject(void)
     spin_lock(&current->arch.vgic_cpu.ap_list_lock);
     vgic_flush_lr_state(current);
     spin_unlock(&current->arch.vgic_cpu.ap_list_lock);
+
+    vgic_restore_state(current);
 }
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h
index 47fc58b81e..771ca6f046 100644
--- a/xen/arch/arm/vgic/vgic.h
+++ b/xen/arch/arm/vgic/vgic.h
@@ -41,4 +41,12 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq)
     atomic_inc(&irq->refcount);
 }
 
+void vgic_v2_fold_lr_state(struct vcpu *vcpu);
+void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr);
+void vgic_v2_clear_lr(struct vcpu *vcpu, int lr);
+void vgic_v2_set_underflow(struct vcpu *vcpu);
+
+void vgic_v2_save_state(struct vcpu *vcpu);
+void vgic_v2_restore_state(struct vcpu *vcpu);
+
 #endif
-- 
2.14.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.