[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH v1 08/10] xen/arm: Add support for GIC v3



On Wed, 19 Mar 2014, vijay.kilari@xxxxxxxxx wrote:
> From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
> 
> Add support for GIC v3 specification.
> This driver assumes that ARE and SRE
> is enable by default.
> 
> Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
> ---
>  xen/arch/arm/Makefile             |    2 +-
>  xen/arch/arm/gic-v3.c             |  944 
> +++++++++++++++++++++++++++++++++++++
>  xen/arch/arm/gic.c                |   12 +
>  xen/include/asm-arm/domain.h      |    4 +
>  xen/include/asm-arm/gic.h         |    9 +
>  xen/include/asm-arm/gic_v3_defs.h |  211 +++++++++
>  6 files changed, 1181 insertions(+), 1 deletion(-)
> 
> diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
> index 20f59f4..a11c699 100644
> --- a/xen/arch/arm/Makefile
> +++ b/xen/arch/arm/Makefile
> @@ -10,7 +10,7 @@ obj-y += vpsci.o
>  obj-y += domctl.o
>  obj-y += sysctl.o
>  obj-y += domain_build.o
> -obj-y += gic.o gic-v2.o
> +obj-y += gic.o gic-v2.o gic-v3.o
>  obj-y += io.o
>  obj-y += irq.o
>  obj-y += kernel.o
> diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c
> new file mode 100644
> index 0000000..81a36ba
> --- /dev/null
> +++ b/xen/arch/arm/gic-v3.c
> @@ -0,0 +1,944 @@
> +/*
> + * xen/arch/arm/gic-v3.c
> + *
> + * ARM Generic Interrupt Controller support v3 version
> + * based on xen/arch/arm/gic-v2.c
> + * 
> + * Vijaya Kumar K <vijaya.kumar@xxxxxxxxxxxxxxxxxx>
> + * Copyright (c) 2014 Cavium Inc.
> + * 
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + */
> +
> +#include <xen/config.h>
> +#include <xen/lib.h>
> +#include <xen/init.h>
> +#include <xen/cpu.h>
> +#include <xen/mm.h>
> +#include <xen/irq.h>
> +#include <xen/sched.h>
> +#include <xen/errno.h>
> +#include <xen/serial.h>
> +#include <xen/softirq.h>
> +#include <xen/list.h>
> +#include <xen/device_tree.h>
> +#include <asm/p2m.h>
> +#include <asm/domain.h>
> +#include <asm/platform.h>
> +
> +#include <asm/gic_v3_defs.h>
> +#include <asm/gic.h>
> +#include <asm/io.h>
> +
> +struct rdist_region {
> +    paddr_t rdist_base;
> +    paddr_t rdist_base_size;
> +    void __iomem *map_rdist_base;
> +};
> +     
> +/* Global state */
> +static struct {
> +    paddr_t dbase;            /* Address of distributor registers */
> +    paddr_t dbase_size;
> +    void __iomem *map_dbase;  /* Mapped address of distributor registers */
> +    struct rdist_region *rdist_regions;
> +    u32  rdist_stride;
> +    unsigned int rdist_count; /* Number of rdist regions count */
> +    unsigned int lines;       /* Number of interrupts (SPIs + PPIs + SGIs) */
> +    struct dt_irq maintenance;
> +    unsigned int cpus;
> +    int hw_version;
> +    spinlock_t lock;
> +} gic;
> +
> +struct gic_state_data {
> +    uint32_t gic_hcr, gic_vmcr;
> +    uint32_t gic_apr0[4];
> +    uint32_t gic_apr1[4];
> +    uint64_t gic_lr[16];
> +};
> +
> +#define GICD ((volatile unsigned char *) gic.map_dbase)
> +/* Only one region is implemented which is enough for 0-31 cpus */
> +#define GICR ((volatile unsigned char *) gic.rdist_regions[0].map_rdist_base)
> +
> +/* per-cpu re-distributor base */
> +static DEFINE_PER_CPU(paddr_t, rbase);
> +static DEFINE_PER_CPU(paddr_t, phy_rbase);
> +
> +static unsigned nr_lrs;
> +static uint32_t nr_priorities;
> +
> +/* The GIC mapping of CPU interfaces does not necessarily match the
> + * logical CPU numbering. Let's use mapping as returned by the GIC
> + * itself
> + */
> +
> +#define gic_data_rdist_rd_base()        (this_cpu(rbase))
> +#define gic_data_rdist_sgi_base()       (gic_data_rdist_rd_base() + SZ_64K)
> +
> +static inline u64 read_cpuid_mpidr(void)
> +{
> +   return READ_SYSREG(MPIDR_EL1);
> +}
> +
> +static u64 gich_read_lr(int lr)
> +{
> +    switch (lr) 
> +    {
> +        case 0: /* ICH_LRn is 64 bit */
> +            return READ_SYSREG(ICH_LR0_EL2);
> +            break;
> +        case 1:
> +            return READ_SYSREG(ICH_LR1_EL2);
> +            break;
> +        case 2:
> +            return READ_SYSREG(ICH_LR2_EL2);
> +            break;
> +        case 3:
> +            return READ_SYSREG(ICH_LR3_EL2);
> +            break;
> +        case 4:
> +            return READ_SYSREG(ICH_LR4_EL2);
> +            break;
> +        case 5:
> +            return READ_SYSREG(ICH_LR5_EL2);
> +            break;
> +        case 6:
> +            return READ_SYSREG(ICH_LR6_EL2);
> +            break;
> +        case 7:
> +            return READ_SYSREG(ICH_LR7_EL2);
> +            break;
> +        case 8:
> +            return READ_SYSREG(ICH_LR8_EL2);
> +            break;
> +        case 9:
> +            return READ_SYSREG(ICH_LR9_EL2);
> +            break;
> +        case 10:
> +            return READ_SYSREG(ICH_LR10_EL2);
> +            break;
> +        case 11:
> +            return READ_SYSREG(ICH_LR11_EL2);
> +            break;
> +        case 12:
> +            return READ_SYSREG(ICH_LR12_EL2);
> +            break;
> +        case 13:
> +            return READ_SYSREG(ICH_LR13_EL2);
> +            break;
> +        case 14:
> +            return READ_SYSREG(ICH_LR14_EL2);
> +            break;
> +        case 15:
> +            return READ_SYSREG(ICH_LR15_EL2);
> +            break;
> +        default:
> +            return 0;
> +    }
> +}
> +
> +static void gich_write_lr(int lr, u64 val)
> +{
> +    switch (lr) 
> +    {
> +        case 0:
> +           WRITE_SYSREG(val, ICH_LR0_EL2);
> +           break;
> +        case 1:
> +           WRITE_SYSREG(val, ICH_LR1_EL2);
> +           break;
> +        case 2:
> +           WRITE_SYSREG(val, ICH_LR2_EL2);
> +           break;
> +        case 3:
> +           WRITE_SYSREG(val, ICH_LR3_EL2);
> +           break;
> +        case 4:
> +           WRITE_SYSREG(val, ICH_LR4_EL2);
> +           break;
> +        case 5:
> +           WRITE_SYSREG(val, ICH_LR5_EL2);
> +           break;
> +        case 6:
> +           WRITE_SYSREG(val, ICH_LR6_EL2);
> +           break;
> +        case 7:
> +           WRITE_SYSREG(val, ICH_LR7_EL2);
> +           break;
> +        case 8:
> +           WRITE_SYSREG(val, ICH_LR8_EL2);
> +           break;
> +        case 9:
> +           WRITE_SYSREG(val, ICH_LR9_EL2);
> +           break;
> +        case 10:
> +           WRITE_SYSREG(val, ICH_LR10_EL2);
> +           break;
> +        case 11:
> +           WRITE_SYSREG(val, ICH_LR11_EL2);
> +           break;
> +        case 12:
> +           WRITE_SYSREG(val, ICH_LR12_EL2);
> +           break;
> +        case 13:
> +           WRITE_SYSREG(val, ICH_LR13_EL2);
> +           break;
> +        case 14:
> +           WRITE_SYSREG(val, ICH_LR14_EL2);
> +           break;
> +        case 15:
> +           WRITE_SYSREG(val, ICH_LR15_EL2);
> +           break;
> +        default:
> +           return;
> +    }
> +}
> +
> +static void gic_enable_sre(void)
> +{
> +    uint32_t val;
> +
> +    val = READ_SYSREG32(ICC_SRE_EL2);
> +    val |= GICC_SRE_EL2_SRE | GICC_SRE_EL2_ENEL1 | GICC_SRE_EL2_DFB | 
> GICC_SRE_EL2_DIB;
> +    WRITE_SYSREG32(val, ICC_SRE_EL2);
> +    isb();
> +}
> +
> +/* Wait for completion of a distributor change */
> +static void gic_do_wait_for_rwp(paddr_t base)
> +{
> +    u32 val;
> +    do {
> +        val = readl_relaxed((void *)base + GICD_CTLR);
> +        val = readl_relaxed(GICD + GICD_CTLR);
> +        val = GICD[GICD_CTLR];
> +        cpu_relax();
> +    } while (val & GICD_CTLR_RWP);
> +}

As much I think that this busy loop is terrible unfortunately it is
part of the spec :-(
Might be worth adding a comment on the function to explain why and when
it is required.
It is also worth considering whether it makes sense to use the
notification of command completion by interrupt instead.



> +static void gic_dist_wait_for_rwp(void)
> +{
> +    gic_do_wait_for_rwp((paddr_t)GICD);
> +}
> +
> +static void gic_redist_wait_for_rwp(void)
> +{
> +    gic_do_wait_for_rwp(gic_data_rdist_rd_base());
> +}
> +
> +static void gic_wait_for_rwp(int irq)
> +{
> +    if (irq < 32)
> +         gic_redist_wait_for_rwp();
> +    else
> +         gic_dist_wait_for_rwp();
> +}
> +
> +static unsigned int gic_mask_cpu(const cpumask_t *cpumask)
> +{
> +    unsigned int cpu;
> +    cpumask_t possible_mask;
> +
> +    cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
> +    cpu = cpumask_any(&possible_mask);
> +    return cpu;
> +}
> +
> +static unsigned int gic_nr_lines(void)
> +{
> +    return gic.lines;
> +}
> +
> +static unsigned int gic_nr_lrs(void)
> +{
> +    return nr_lrs;
> +}
> +
> +static void write_aprn_regs(struct gic_state_data *d)
> +{
> +    switch(nr_priorities)
> +    {
> +        case 7:
> +            WRITE_SYSREG32(d->gic_apr0[2], ICH_AP0R2_EL2);
> +            WRITE_SYSREG32(d->gic_apr1[2], ICH_AP1R2_EL2);
> +        case 6:
> +            WRITE_SYSREG32(d->gic_apr0[1], ICH_AP0R1_EL2);
> +            WRITE_SYSREG32(d->gic_apr1[1], ICH_AP1R1_EL2);
> +        case 5:
> +            WRITE_SYSREG32(d->gic_apr0[0], ICH_AP0R0_EL2);
> +            WRITE_SYSREG32(d->gic_apr1[0], ICH_AP1R0_EL2);
> +            break;
> +        default:
> +          panic("Write Undefined active priorities \n");
> +    }
> +}
> +
> +static void read_aprn_regs(struct gic_state_data *d)
> +{
> +    switch(nr_priorities)
> +    {
> +        case 7:
> +            d->gic_apr0[2] = READ_SYSREG32(ICH_AP0R2_EL2);
> +            d->gic_apr1[2] = READ_SYSREG32(ICH_AP1R2_EL2);
> +        case 6:
> +            d->gic_apr0[1] = READ_SYSREG32(ICH_AP0R1_EL2);
> +            d->gic_apr1[1] = READ_SYSREG32(ICH_AP1R1_EL2);
> +        case 5:
> +            d->gic_apr0[0] = READ_SYSREG32(ICH_AP0R0_EL2);
> +            d->gic_apr1[0] = READ_SYSREG32(ICH_AP1R0_EL2);
> +            break;
> +        default:
> +          panic("Read Undefined active priorities \n");
> +    }
> +}
> +
> +static int gic_state_init(struct vcpu *v)
> +{
> +     v->arch.gic_state = (struct gic_state_data *)xzalloc(struct 
> gic_state_data);
> +     if(!v->arch.gic_state)
> +        return -ENOMEM;
> +     return 0; 
> +}
> +
> +static void save_state(struct vcpu *v)
> +{
> +    int i;
> +    struct gic_state_data *d;
> +    d = (struct gic_state_data *)v->arch.gic_state;
> +
> +    /* No need for spinlocks here because interrupts are disabled around
> +     * this call and it only accesses struct vcpu fields that cannot be
> +     * accessed simultaneously by another pCPU.
> +     */
> +    for ( i=0; i<nr_lrs; i++)
> +        d->gic_lr[i] = gich_read_lr(i);
> +
> +    read_aprn_regs(d); 
> +
> +    d->gic_vmcr = READ_SYSREG32(ICH_VMCR_EL2);
> +}
> +
> +static void restore_state(struct vcpu *v)
> +{
> +    int i;
> +    struct gic_state_data *d;
> +    d = (struct gic_state_data *)v->arch.gic_state;
> +
> +    for ( i=0; i<nr_lrs; i++)
> +        gich_write_lr(i, d->gic_lr[i]);
> +
> +    write_aprn_regs(d);
> +
> +    WRITE_SYSREG32(d->gic_vmcr, ICH_VMCR_EL2);
> +}
> +
> +static void gic_dump_state(struct vcpu *v)
> +{
> +    int i;
> +    struct gic_state_data *d;
> +    d = (struct gic_state_data *)v->arch.gic_state;
> +    if ( v == current )
> +    {
> +        for ( i = 0; i < nr_lrs; i++ )
> +            printk("   HW_LR[%d]=%lx\n", i, gich_read_lr(i));
> +    }
> +    else
> +    {
> +        for ( i = 0; i < nr_lrs; i++ )
> +            printk("   VCPU_LR[%d]=%lx\n", i, d->gic_lr[i]);
> +    }
> +}
> + 
> +static void gic_enable_irq(int irq)
> +{
> +    uint32_t enabler;
> +
> +    /* Enable routing */
> +    if(irq > 31)
> +    {
> +        enabler = readl_relaxed(GICD + GICD_ISENABLER + (irq / 32) * 4);
> +        writel_relaxed(enabler | (1u << (irq % 32)), GICD + GICD_ISENABLER + 
> (irq / 32) * 4);
> +    }
> +    else
> +    {
> +        enabler = readl_relaxed((void *)gic_data_rdist_sgi_base() + 
> GICR_ISENABLER0);
> +        writel_relaxed(enabler | (1u << irq), (void 
> *)gic_data_rdist_sgi_base() + GICR_ISENABLER0);
> +    }
> +    gic_wait_for_rwp(irq);
> +}
> +
> +static void gic_disable_irq(int irq)
> +{
> +    /* Disable routing */
> +    if(irq > 31)
> +        writel_relaxed(1u << (irq % 32), GICD + GICD_ICENABLER + ((irq / 32) 
> * 4));
> +    else
> +        writel_relaxed(1u << irq, (void *)gic_data_rdist_sgi_base() + 
> GICR_ICENABLER0);
> +}
> +
> +static void gic_eoi_irq(int irq)
> +{
> +    /* Lower the priority */
> +    WRITE_SYSREG32(irq, ICC_EOIR1_EL1);
> +}
> +
> +static void gic_dir_irq(int irq)
> +{
> +    /* Deactivate */
> +    WRITE_SYSREG32(irq, ICC_DIR_EL1);
> +}
> +
> +static unsigned int gic_ack_irq(void)
> +{
> +    return (READ_SYSREG32(ICC_IAR1_EL1) & GICC_IA_IRQ);
> +}
> +
> +static u64 gic_mpidr_to_affinity(u64 mpidr)
> +{
> +    /* Make sure we don't broadcast the interrupt */
> +    return mpidr & ~GICD_IROUTER_SPI_MODE_ANY;
> +}
> +
> +/*
> + * - needs to be called with gic.lock held
> + * - needs to be called with a valid cpu_mask, ie each cpu in the mask has
> + * already called gic_cpu_init
> + */
> +static void gic_set_irq_property(unsigned int irq, bool_t level,
> +                                   const cpumask_t *cpu_mask,
> +                                   unsigned int priority)
> +{
> +    uint32_t cfg, edgebit;
> +    u64 affinity;
> +    unsigned int cpu = gic_mask_cpu(cpu_mask);
> +    paddr_t rebase;
> +
> +
> +    /* Set edge / level */
> +    if (irq < 16)
> +     /* SGI's are always edge-triggered not need to call GICD_ICFGR0 */
> +       cfg = readl_relaxed((void *)gic_data_rdist_sgi_base() + GICR_ICFGR0);
> +    else if (irq < 32)
> +       cfg = readl_relaxed((void *)gic_data_rdist_sgi_base() + GICR_ICFGR1);
> +    else
> +       cfg = readl_relaxed(GICD + GICD_ICFGR + (irq / 16) * 4);
> +
> +    edgebit = 2u << (2 * (irq % 16));
> +    if ( level )
> +        cfg &= ~edgebit;
> +    else
> +        cfg |= edgebit;
> +
> +    if (irq < 16)
> +       writel_relaxed(cfg, (void *)gic_data_rdist_sgi_base() + GICR_ICFGR0);
> +    else if (irq < 32)
> +       writel_relaxed(cfg, (void *)gic_data_rdist_sgi_base() + GICR_ICFGR1);
> +    else
> +       writel_relaxed(cfg, GICD + GICD_ICFGR + (irq / 16) * 4);
> +
> +
> +    /* need to check if ARE is set to access IROUTER */
> +    affinity = gic_mpidr_to_affinity(cpu_logical_map(cpu));
> +    if (irq > 31)
> +        writeq_relaxed(affinity, (GICD + GICD_IROUTER + irq * 8));
> +
> +    /* Set priority */
> +    if (irq < 32)
> +    {
> +     rebase = gic_data_rdist_sgi_base();

code style

> +        writeb_relaxed(priority, (void *)rebase + GICR_IPRIORITYR0 + irq);
> +    }
> +    else 
> +    {
> +        writeb_relaxed(priority, GICD + GICD_IPRIORITYR + irq);
> +
> +    }
> +}
> +
> +static void __init gic_dist_init(void)
> +{
> +    uint32_t type;
> +    u64 affinity;
> +    int i;
> +
> +    /* Disable the distributor */
> +    writel_relaxed(0, GICD + GICD_CTLR);
> +
> +    type = readl_relaxed(GICD + GICD_TYPER);
> +    gic.lines = 32 * ((type & GICD_TYPE_LINES) + 1);
> +
> +    printk("GIC: %d lines, (IID %8.8x).\n",
> +           gic.lines, readl_relaxed(GICD + GICD_IIDR));
> +
> +    /* Default all global IRQs to level, active low */
> +    for ( i = 32; i < gic.lines; i += 16 )
> +        writel_relaxed(0, GICD + GICD_ICFGR + (i / 16) * 4);
> +
> +    /* Default priority for global interrupts */
> +    for ( i = 32; i < gic.lines; i += 4 )
> +        writel_relaxed((GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 | 
> GIC_PRI_IRQ), GICD + GICD_IPRIORITYR + (i / 4) * 4);
> +
> +    /* Disable all global interrupts */
> +    for ( i = 32; i < gic.lines; i += 32 )
> +        writel_relaxed(0xffffffff, GICD + GICD_ICENABLER + (i / 32) * 4);
> +
> +    gic_dist_wait_for_rwp();
> +
> +    /* Turn on the distributor */
> +    writel_relaxed(GICD_CTL_ENABLE | GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A 
> | GICD_CTLR_ENABLE_G1, GICD + GICD_CTLR);
> + 
> +    /* Route all global IRQs to this CPU */
> +    affinity = gic_mpidr_to_affinity(read_cpuid_mpidr());
> +    for ( i = 31; i < gic.lines; i++ )
> +        writeq_relaxed(affinity, GICD + GICD_IROUTER + i * 8);
> +}
> +
> +static void gic_enable_redist(void)
> +{
> +    paddr_t rbase;
> +    u32 val;
> +
> +    rbase = this_cpu(rbase);
> +
> +    /* Wake up this CPU redistributor */
> +    val = readl_relaxed((void *)rbase + GICR_WAKER);
> +    val &= ~GICR_WAKER_ProcessorSleep;
> +    writel_relaxed(val, (void *)rbase + GICR_WAKER);
> +
> +    do {
> +         val = readl_relaxed((void *)rbase + GICR_WAKER);
> +         cpu_relax();
> +    } while (val & GICR_WAKER_ChildrenAsleep);
> +}
> +
> +static int __init gic_populate_rdist(void)
> +{
> +    u64 mpidr = cpu_logical_map(smp_processor_id());
> +    u64 typer;
> +    u64 aff;
> +    int i;
> +    uint32_t reg;
> +
> +    aff  = mpidr & ((1 << 24) - 1);
> +    aff |= (mpidr >> 8) & (0xffUL << 24);
> +
> +    for (i = 0; i < gic.rdist_count; i++) {
> +        uint32_t ptr = 0;
> +
> +        reg = readl_relaxed(GICR + ptr + GICR_PIDR0);
> +        if ((reg & 0xff) != GICR_PIDR0_GICv3) { /* We're in trouble... */
> +            printk("No redistributor present @%x\n", ptr);
> +            break;
> +        }
> +
> +        do {
> +            typer = readq_relaxed(GICR + ptr + GICR_TYPER);
> +            if ((typer >> 32) == aff) {
> +                              
> +                this_cpu(rbase) = (u64)GICR + ptr;
> +                this_cpu(phy_rbase) = gic.rdist_regions[i].rdist_base + ptr;
> +
> +                printk("CPU%d: found redistributor %llx region %d\n",
> +                            smp_processor_id(), (unsigned long long) mpidr, 
> i);
> +                return 0;
> +            }
> +
> +            if (gic.rdist_stride) {
> +                ptr += gic.rdist_stride;
> +            } else {
> +                ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
> +                if (typer & GICR_TYPER_VLPIS)
> +                    ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
> +                }
> +            } while (!(typer & GICR_TYPER_LAST));

code style: the indentation is wrong


> +        }
> +
> +        /* We couldn't even deal with ourselves... */
> +        printk("CPU%d: mpidr %lx has no re-distributor!\n",
> +                  smp_processor_id(), (unsigned long)mpidr);
> +        return -ENODEV;
> +}
> +
> +static void __cpuinit gic_cpu_init(void)
> +{
> +    int i;
> +    paddr_t rbase_sgi;
> +
> +    /* Register ourselves with the rest of the world */
> +    if (gic_populate_rdist())
> +        return;
> +
> +    gic_enable_redist();
> +
> +    rbase_sgi = gic_data_rdist_sgi_base();
> +
> +    /*
> +     * Set priority on PPI and SGI interrupts
> +     */
> +    for (i = 0; i < 16; i += 4)
> +        writel_relaxed((GIC_PRI_IPI<<24 | GIC_PRI_IPI<<16 | GIC_PRI_IPI<<8 | 
> GIC_PRI_IPI), (void *)rbase_sgi + GICR_IPRIORITYR0 + (i / 4) * 4);
> +        //writel_relaxed(0x0, (void *)rbase + GICR_IPRIORITYR0 + (i / 4) * 
> 4);
> +        //writel_relaxed(0xa0a0a0a0, (void *)rbase + GICR_IPRIORITYR0 + (i / 
> 4) * 4);

What?


> +
> +    for (i = 16; i < 32; i += 4)
> +        writel_relaxed((GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 | 
> GIC_PRI_IRQ), (void *)rbase_sgi + GICR_IPRIORITYR0 + (i / 4) * 4);
> +
> +    /*
> +     * Disable all PPI interrupts, ensure all SGI interrupts are
> +     * enabled.
> +     */
> +    writel_relaxed(0xffff0000, (void *)rbase_sgi + GICR_ICENABLER0);
> +    writel_relaxed(0x0000ffff, (void *)rbase_sgi + GICR_ISENABLER0);
> +
> +    gic_redist_wait_for_rwp();
> +
> +    /* Enable system registers */
> +    gic_enable_sre();
> +
> +    WRITE_SYSREG32(0, ICC_BPR1_EL1);
> +    /* Set priority mask register */
> +    WRITE_SYSREG32(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
> +
> +    /* EOI drops priority too (mode 0) */
> +    WRITE_SYSREG32(GICC_CTLR_EL1_EOImode_drop, ICC_CTLR_EL1);
> +
> +    WRITE_SYSREG32(1, ICC_IGRPEN1_EL1);
> +}
> +
> +static void gic_cpu_disable(void)
> +{
> +    WRITE_SYSREG32(0, ICC_CTLR_EL1);
> +}
> +
> +static void __cpuinit gic_hyp_init(void)
> +{
> +    uint32_t vtr;
> +
> +    vtr = READ_SYSREG32(ICH_VTR_EL2);
> +    nr_lrs  = (vtr & GICH_VTR_NRLRGS) + 1;
> +    nr_priorities = ((vtr >> GICH_VTR_PRIBITS_SHIFT) & 
> GICH_VTR_PRIBITS_MASK) + 1;
> +
> +    WRITE_SYSREG32(GICH_VMCR_EOI | GICH_VMCR_VENG1, ICH_VMCR_EL2);
> +    WRITE_SYSREG32(GICH_HCR_VGRP1EIE | GICH_HCR_EN, ICH_HCR_EL2);
> +
> +    update_cpu_lr_mask();
> +    vtr = READ_SYSREG32(ICH_HCR_EL2);
> +}
> +
> +/* Set up the per-CPU parts of the GIC for a secondary CPU */
> +static int __cpuinit gic_init_secondary_cpu(struct notifier_block *nfb,
> +                                       unsigned long action, void *hcpu)
> +{
> +    if (action == CPU_STARTING)
> +    {
> +        spin_lock(&gic.lock);
> +        gic_cpu_init();
> +        gic_hyp_init();
> +        spin_unlock(&gic.lock);
> +    }
> +    return NOTIFY_DONE;
> +}
> +
> +static struct notifier_block gic_cpu_nb = {
> +    .notifier_call = gic_init_secondary_cpu,
> +    .priority = 100
> +};
> +
> +static void gic_smp_init(void)
> +{
> +   register_cpu_notifier(&gic_cpu_nb);
> +}
> +
> +static void __cpuinit gic_hyp_disable(void)
> +{
> +    uint32_t vtr;
> +    vtr = READ_SYSREG32(ICH_HCR_EL2);
> +    vtr &= ~0x1;
> +    WRITE_SYSREG32( vtr, ICH_HCR_EL2);
> +}
> +
> +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
> +                                   u64 cluster_id)
> +{
> +    int cpu = *base_cpu;
> +    u64 mpidr = cpu_logical_map(cpu);
> +    u16 tlist = 0;
> +
> +    while (cpu < nr_cpu_ids) {
> +        /*
> +         * If we ever get a cluster of more than 16 CPUs, just
> +         * scream and skip that CPU.
> +         */
> +        tlist |= 1 << (mpidr & 0xf);
> +
> +        cpu = cpumask_next(cpu, mask);
> +        mpidr = cpu_logical_map(cpu);
> +
> +        if (cluster_id != (mpidr & ~0xffUL)) {
> +            cpu--;
> +            goto out;
> +        }
> +    }
> +out:
> +    *base_cpu = cpu;
> +    return tlist;
> +}
> +
> +static void send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
> +{
> +    u64 val;
> +
> +    val  = (cluster_id & 0xff00ff0000UL) << 16; /* Aff3 + Aff2 */
> +    val |= (cluster_id & 0xff00) << 8;          /* Aff1 */
> +    val |= irq << 24;
> +    val |= tlist;
> +
> +    WRITE_SYSREG(val, ICC_SGI1R_EL1);   
> +}
> +
> +static void gic_send_sgi(const cpumask_t *cpumask, enum gic_sgi sgi)
> +{
> +    int cpu = 0;
> +
> +    dsb();
> +
> +    for_each_cpu(cpu, cpumask) {
> +        u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
> +        u16 tlist;
> +
> +        tlist = gic_compute_target_list(&cpu, cpumask, cluster_id);
> +        send_sgi(cluster_id, tlist, sgi);
> +    }
> +}
> +
> +/* Shut down the per-CPU GIC interface */
> +static void gic_disable_interface(void)
> +{
> +    ASSERT(!local_irq_is_enabled());
> +
> +    spin_lock(&gic.lock);
> +    gic_cpu_disable();
> +    gic_hyp_disable();
> +    spin_unlock(&gic.lock);
> +}
> +
> +static void gic_update_lr(int lr, unsigned int virtual_irq,
> +        unsigned int state, unsigned int priority)
> +{
> +    u64 maintenance_int = GICH_LR_MAINTENANCE_IRQ;
> +    u64 grp = GICH_LR_GRP1;
> +    u64 val = 0;
> +
> +    BUG_ON(lr >= nr_lrs);
> +    BUG_ON(lr < 0);
> +
> +    val =  ((((u64)state) & 0x3) << GICH_LR_STATE_SHIFT) | grp | 
> maintenance_int |
> +        ((((u64)priority) & 0xff) << GICH_LR_PRIORITY_SHIFT) |
> +        (((u64)virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT) 
> |
> +        (((u64)virtual_irq & GICH_LR_PHYSICAL_MASK) << 
> GICH_LR_PHYSICAL_SHIFT);
> +
> +    gich_write_lr(lr, val);
> +}

This function has to change after 

http://marc.info/?l=xen-devel&m=139523241201086


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.