[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC v01 1/3] arm: omap: introduce iommu module



On 01/22/2014 03:52 PM, Andrii Tseglytskyi wrote:
> omap IOMMU module is designed to handle access to external
> omap MMUs, connected to the L3 bus.

Hello Andrii,

Thanks for the patch. See my comment inline (I won't add the same
comment as Stefano).

> Change-Id: I96bbf2738e9dd2e21662e0986ca15c60183e669e
> Signed-off-by: Andrii Tseglytskyi <andrii.tseglytskyi@xxxxxxxxxxxxxxx>
> ---

[..]

> +struct mmu_info {
> +     const char                      *name;
> +     paddr_t                         mem_start;
> +     u32                                     mem_size;
> +     u32                                     *pagetable;
> +     void __iomem            *mem_map;
> +};
> +
> +static struct mmu_info omap_ipu_mmu = {

static const?

> +     .name           = "IPU_L2_MMU",
> +     .mem_start      = 0x55082000,
> +     .mem_size       = 0x1000,
> +     .pagetable      = NULL,
> +};
> +
> +static struct mmu_info omap_dsp_mmu = {

static const?

> +     .name           = "DSP_L2_MMU",
> +     .mem_start      = 0x4a066000,
> +     .mem_size       = 0x1000,
> +     .pagetable      = NULL,
> +};
> +
> +static struct mmu_info *mmu_list[] = {

static const?

> +     &omap_ipu_mmu,
> +     &omap_dsp_mmu,
> +};
> +
> +#define mmu_for_each(pfunc, data)                                            
> \
> +({                                                                           
>                                 \
> +     u32 __i;                                                                
>                         \
> +     int __res = 0;                                                          
>                 \
> +                                                                             
>                                 \
> +     for (__i = 0; __i < ARRAY_SIZE(mmu_list); __i++) {      \
> +             __res |= pfunc(mmu_list[__i], data);                    \

You res |= will result to a "wrong" errno if you have multiple failure.
Would it be better to have:

__res = pfunc(...)
if ( __res )
  break;

> +     }                                                                       
>                                 \
> +     __res;                                                                  
>                         \
> +})
> +
> +static int mmu_check_mem_range(struct mmu_info *mmu, paddr_t addr)
> +{
> +     if ((addr >= mmu->mem_start) && (addr < (mmu->mem_start + 
> mmu->mem_size)))
> +             return 1;
> +
> +     return 0;
> +}
> +
> +static inline struct mmu_info *mmu_lookup(u32 addr)
> +{
> +     u32 i;
> +
> +     for (i = 0; i < ARRAY_SIZE(mmu_list); i++) {
> +             if (mmu_check_mem_range(mmu_list[i], addr))
> +                     return mmu_list[i];
> +     }
> +
> +     return NULL;
> +}
> +
> +static inline u32 mmu_virt_to_phys(u32 reg, u32 va, u32 mask)
> +{
> +     return (reg & mask) | (va & (~mask));
> +}
> +
> +static inline u32 mmu_phys_to_virt(u32 reg, u32 pa, u32 mask)
> +{
> +     return (reg & ~mask) | pa;
> +}
> +
> +static int mmu_mmio_check(struct vcpu *v, paddr_t addr)
> +{
> +     return mmu_for_each(mmu_check_mem_range, addr);
> +}

As I understand your cover letter, the device (and therefore the MMU) is
only passthrough to a single guest, right?

If so, your mmu_mmio_check should check if the domain is handling the
device.
With your current code any guest can write to this range and rewriting
the MMU page table.

> +
> +static int mmu_copy_pagetable(struct mmu_info *mmu)
> +{
> +     void __iomem *pagetable = NULL;
> +     u32 pgaddr;
> +
> +     ASSERT(mmu);
> +
> +     /* read address where kernel MMU pagetable is stored */
> +     pgaddr = readl(mmu->mem_map + MMU_TTB);
> +     pagetable = ioremap(pgaddr, IOPGD_TABLE_SIZE);
> +     if (!pagetable) {
> +             printk("%s: %s failed to map pagetable\n",
> +                        __func__, mmu->name);
> +             return -EINVAL;
> +     }
> +
> +     /*
> +      * pagetable can be changed since last time
> +      * we accessed it therefore we need to copy it each time
> +      */
> +     memcpy(mmu->pagetable, pagetable, IOPGD_TABLE_SIZE);
> +
> +     iounmap(pagetable);
> +
> +     return 0;
> +}

I'm confused, it should copy from the guest MMU pagetable, right? In
this case you should use map_domain_page.
ioremap *MUST* only be used with device memory, otherwise memory
coherency is not guaranteed.

[..]

> +static int mmu_mmio_write(struct vcpu *v, mmio_info_t *info)
> +{
> +     struct domain *dom = v->domain;
> +     struct mmu_info *mmu = NULL;
> +    struct cpu_user_regs *regs = guest_cpu_user_regs();
> +    register_t *r = select_user_reg(regs, info->dabt.reg);
> +     int res;
> +
> +     mmu = mmu_lookup(info->gpa);
> +     if (!mmu) {
> +             printk("%s: can't get mmu for addr 0x%08x\n", __func__, 
> (u32)info->gpa);
> +             return -EINVAL;
> +     }
> +
> +     /*
> +      * make sure that user register is written first in this function
> +      * following calls may expect valid data in it
> +      */
> +    writel(*r, mmu->mem_map + ((u32)(info->gpa) - mmu->mem_start));

Hmmm ... I think this is very confusing, you should only write to the
memory if mmu_trap_write_access has not failed. And use "*r" where it's
needed.

Writing to the device memory could have side effect (for instance
updating the page table with the wrong translation...).

> +
> +     res = mmu_trap_write_access(dom, mmu, info);
> +     if (res)
> +             return res;
> +
> +    return 1;
> +}
> +
> +static int mmu_init(struct mmu_info *mmu, u32 data)
> +{
> +     ASSERT(mmu);
> +     ASSERT(!mmu->mem_map);
> +     ASSERT(!mmu->pagetable);
> +
> +    mmu->mem_map = ioremap(mmu->mem_start, mmu->mem_size);

Can you use ioremap_nocache instead of ioremap? The behavior is the same
but the former name is less confusing.

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.