[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 13/16] xen/arm: Add support for GIC v3



On 04/23/2014 06:01 PM, Ian Campbell wrote:
> On Tue, 2014-04-15 at 16:47 +0530, vijay.kilari@xxxxxxxxx wrote:
> 
>> +#include <xen/config.h>
>> +#include <xen/lib.h>
>> +#include <xen/init.h>
>> +#include <xen/cpu.h>
>> +#include <xen/mm.h>
>> +#include <xen/irq.h>
>> +#include <xen/sched.h>
>> +#include <xen/errno.h>
>> +#include <xen/serial.h>
>> +#include <xen/softirq.h>
>> +#include <xen/list.h>
>> +#include <xen/device_tree.h>
>> +#include <xen/delay.h>
>> +#include <asm/p2m.h>
>> +#include <asm/domain.h>
>> +#include <asm/platform.h>
> 
> Do you really need all of these? serial.h for example?
> 
>> +
>> +#include <asm/gic_v3_defs.h>
>> +#include <asm/gic.h>
>> +#include <asm/io.h>
>> +#include <asm/device.h>
>> +
>> +static struct gic_hw_operations gic_ops;
>> +
>> +struct rdist_region {
>> +    paddr_t rdist_base;
>> +    paddr_t rdist_base_size;
>> +    void __iomem *map_rdist_base;
> 
> "base", "size" and "map" would be adequate field names I think.
> 
>> +};
>> +
>> +/* Global state */
>> +static struct {
>> +    paddr_t dbase;            /* Address of distributor registers */
>> +    paddr_t dbase_size;
>> +    void __iomem *map_dbase;  /* Mapped address of distributor registers */
>> +    struct rdist_region *rdist_regions;
>> +    u32  rdist_stride;
>> +    unsigned int rdist_count; /* Number of rdist regions count */
>> +    unsigned int lines;       /* Number of interrupts (SPIs + PPIs + SGIs) 
>> */
>> +    struct dt_irq maintenance;
>> +}gic;
>> +
>> +/* per-cpu re-distributor base */
>> +static DEFINE_PER_CPU(void __iomem*, rbase);
> 
> Does this end up containing one of the gic.rdist_regions[i].map entries?
> Any reason to duplicate this in that map field as well? Can't each
> processor map it as it is initialised and store it here directly?
> 
> I suppose we have plenty of ioremap space on v8, so nr_cpus*2*64k isn't
> too bad and there's no need to go for per-pcpu pagetables with a
> dedicated virtual address region for the redistributors.
> 
>> +static u64 gich_read_lr(int lr)
>> +{
>> +    switch ( lr )
>> +    {
>> +    case 0: /* ICH_LRn is 64 bit */
>> +        return READ_SYSREG(ICH_LR0_EL2);
>> +        break;
> 
> All of these breaks are redundant. I think you could get away with
>       case N: return READ_(..._LRn_EL2);
> for brevity even.
>> +
>> +/* Wait for completion of a distributor change */
>> +static void gicv3_do_wait_for_rwp(void __iomem *base)
>> +{
>> +    u32 val;
>> +    u32 count = 1000000;
>> +
>> +    do {
>> +        val = readl_relaxed(base + GICD_CTLR);
>> +        if ( !(val & GICD_CTLR_RWP) )
>> +           break;
>> +        cpu_relax();
>> +        udelay(1);
> 
> Ick. Is there no event when rwp changes, so we could do wfe here?
> 
> Could we at least use NOW() and MILLISECS() to construct a delay of a
> known length?
> 
>> +    } while ( count-- );
>> +
>> +    if ( !count )
>> +        dprintk(XENLOG_WARNING, "RWP timeout\n");
> 
> Shouldn't we panic here?
> 
> And if we are going to panic, we might as well wait forever? (Perhaps
> with a warning after some number of iterations.
> 
>> +static unsigned int gicv3_mask_cpu(const cpumask_t *cpumask)
> 
> this returns an arbitrary cpu from cpumask? Can we name it as such
> please.
> 
> The v2 equivalent returns a value suitable for GICD_ITARGETSR, which
> might contain multiple valid target CPUs. Does GICD_IROUTER not have the
> same property?
> 
>> +{
>> +    unsigned int cpu;
>> +    cpumask_t possible_mask;
>> +
>> +    cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
>> +    cpu = cpumask_any(&possible_mask);
>> +    return cpu;
>> +}
>> +
>> +static void write_aprn_regs(union gic_state_data *d)
> 
> Given the usage I think this should be restore_aprn_regs.
> 
>> +{
>> +    ASSERT((nr_priorities > 4 && nr_priorities < 8));
>> +    /* Write APRn register based on number of priorities
>> +       plaform has implemented */
> 
> "platform"
> 
>> +    switch ( nr_priorities )
>> +    {
>> +    case 7:
>> +        WRITE_SYSREG32(d->v3.apr0[2], ICH_AP0R2_EL2);
>> +        WRITE_SYSREG32(d->v3.apr1[2], ICH_AP1R2_EL2);
>> +        /* Fall through */
>> +    case 6:
>> +        WRITE_SYSREG32(d->v3.apr0[1], ICH_AP0R1_EL2);
>> +        WRITE_SYSREG32(d->v3.apr1[1], ICH_AP1R1_EL2);
>> +        /* Fall through */
>> +    case 5:
>> +        WRITE_SYSREG32(d->v3.apr0[0], ICH_AP0R0_EL2);
>> +        WRITE_SYSREG32(d->v3.apr1[0], ICH_AP1R0_EL2);
>> +        break;
>> +    default:
> 
> BUG_ON?
> 
>> +        break;
>> +    }
>> +}
>> +
>> +static void read_aprn_regs(union gic_state_data *d)
> 
> The same three comments as write_* apply here too.
> 
>> +static void gicv3_save_state(struct vcpu *v)
>> +{
>> +    int i;
>> +
>> +    /* No need for spinlocks here because interrupts are disabled around
>> +     * this call and it only accesses struct vcpu fields that cannot be
>> +     * accessed simultaneously by another pCPU.
>> +     */
>> +    for ( i = 0; i < nr_lrs; i++)
>> +        v->arch.gic.v3.lr[i] = gich_read_lr(i);
>> +
>> +    read_aprn_regs(&v->arch.gic); 
>> +    v->arch.gic.v3.vmcr = READ_SYSREG32(ICH_VMCR_EL2);
>> +}
>> +
>> +static void gicv3_restore_state(struct vcpu *v)
>> +{
>> +    int i;
>> +
>> +    for ( i = 0; i < nr_lrs; i++)
>> +        gich_write_lr(i, v->arch.gic.v3.lr[i]);
> 
> I wonder if the compiler could do a better job of this using the same
> switch and fallthrough method you used for aprn regs?
> 
>> +
>> +    write_aprn_regs(&v->arch.gic);
>> +
>> +    WRITE_SYSREG32(v->arch.gic.v3.vmcr, ICH_VMCR_EL2);
>> +}
> 
>> +static void gicv3_enable_irq(struct irq_desc *irqd)
>> +{
>> +    int irq = irqd->irq;
>> +    uint32_t enabler;
>> +
>> +    /* Enable routing */
>> +    if ( irq < NR_GIC_LOCAL_IRQS )
>> +    {
>> +        enabler = readl_relaxed(GICD_RDIST_SGI_BASE + GICR_ISENABLER0);
>> +        enabler |= (1u << irq);
>> +        writel_relaxed(enabler, GICD_RDIST_SGI_BASE + GICR_ISENABLER0);
> 
> I don't think the enable registers need read-modufy-wirte, do they? You
> just write the bit you want to enable, like you do with the clear
> ICENABLER registers.
> 
>> +static u64 gic_mpidr_to_affinity(u64 mpidr)
>> +{
>> +    u64 aff;
>> +    /* Make sure we don't broadcast the interrupt */
>> +     aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
> 
> Indentation here is squiffy.
> 
>> +            MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
>> +            MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
>> +            MPIDR_AFFINITY_LEVEL(mpidr, 0)) & ~GICD_IROUTER_SPI_MODE_ANY;
>> +    return aff;
>> +}
>> +
>> +/*
>> + * - needs to be called with gic.lock held
>> + * - needs to be called with a valid cpu_mask, ie each cpu in the mask has
>> + * already called gic_cpu_init
>> + */
>> +static void gicv3_set_irq_property(unsigned int irq, bool_t level,
>> +                                   const cpumask_t *cpu_mask,
>> +                                   unsigned int priority)
>> +{
>> +    uint32_t cfg, edgebit;
>> +    u64 affinity;
>> +    unsigned int cpu = gicv3_mask_cpu(cpu_mask);
>> +
>> +
>> +    /* Set edge / level */
>> +    if ( irq < NR_GIC_SGI )
>> +        /* SGI's are always edge-triggered not need to call GICD_ICFGR0 */
> 
> s/not/no/ I think.
> 
> But then given the comment you do anyway?
> 
>> +        cfg = readl_relaxed(GICD_RDIST_SGI_BASE + GICR_ICFGR0);
>> +    else if ( irq < NR_GIC_LOCAL_IRQS )
>> +        cfg = readl_relaxed(GICD_RDIST_SGI_BASE + GICR_ICFGR1);
>> +    else
>> +        cfg = readl_relaxed(GICD + GICD_ICFGR + (irq / 16) * 4);
>> +
>> +    edgebit = 2u << (2 * (irq % 16));
>> +    if ( level )
>> +        cfg &= ~edgebit;
>> +    else
>> +        cfg |= edgebit;
>> +
>> +    if ( irq < NR_GIC_SGI )
>> +       writel_relaxed(cfg, GICD_RDIST_SGI_BASE + GICR_ICFGR0);
>> +    else if ( irq < NR_GIC_LOCAL_IRQS )
>> +       writel_relaxed(cfg, GICD_RDIST_SGI_BASE + GICR_ICFGR1);
>> +    else
>> +       writel_relaxed(cfg, GICD + GICD_ICFGR + (irq / 16) * 4);
>> +
>> +    affinity = gic_mpidr_to_affinity(cpu_logical_map(cpu));
>> +    if ( irq >= NR_GIC_LOCAL_IRQS )
>> +        writeq_relaxed(affinity, (GICD + GICD_IROUTER + irq * 8));
>> +
>> +    /* Set priority */
>> +    if ( irq < NR_GIC_LOCAL_IRQS )
>> +    {
> 
> The {}s here aren't needed.
> 
>> +        writeb_relaxed(priority, GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + 
>> irq);
>> +    }
>> +    else 
>> +    {
>> +        writeb_relaxed(priority, GICD + GICD_IPRIORITYR + irq);
>> +    }
>> +}
>> +
>> +static void gicv3_enable_redist(void)
>> +{
>> +    u32 val;
>> +    /* Wait for 1s */
>> +    u32 count = 1000000;
> 
> NOW() + MILLISECS(...) based again?
> 
>> +
>> +    /* Wake up this CPU redistributor */
>> +    val = readl_relaxed(GICD_RDIST_BASE + GICR_WAKER);
>> +    val &= ~GICR_WAKER_ProcessorSleep;
>> +    writel_relaxed(val, GICD_RDIST_BASE + GICR_WAKER);
>> +
>> +    do {
>> +         val = readl_relaxed(GICD_RDIST_BASE + GICR_WAKER);
>> +         if ( !(val & GICR_WAKER_ChildrenAsleep) )
>> +           break;
>> +         cpu_relax();
>> +         udelay(1);
>> +    } while ( count-- );
>> +
>> +    if ( !count )
>> +        gdprintk(XENLOG_WARNING, "Redist enable RWP timeout\n");
>> +}
>> +
>> +static int __init gicv3_populate_rdist(void)
>> +{
>> +    u64 mpidr = cpu_logical_map(smp_processor_id());
>> +    u64 typer;
>> +    uint32_t aff;
> 
> You have an interesting mix of u64 and uint32_t. Please stick to one or
> the other, preferable uintXX_t.
> 
>> +    int i;
>> +    uint32_t reg;
>> +
>> +    aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
>> +           MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
>> +           MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
>> +           MPIDR_AFFINITY_LEVEL(mpidr, 0));
> 
> Is this not gic_mpidr_to_affinity?

Actually it's not the same. The "level 3" is shift of 24 rather than 32.
This is to match TYPER register.

Linux code (where this function came from) has a comment above this
"Convert affinity to a 32bit value that can be matched to GICR_TYPER
bits [63:32]."

Regards,

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.