[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 13/14] x86: add iommu_ops to modify and flush IOMMU mappings



>>> On 23.08.18 at 11:47, <paul.durrant@xxxxxxxxxx> wrote:
> --- a/xen/common/iommu_op.c
> +++ b/xen/common/iommu_op.c
> @@ -123,6 +123,156 @@ static int iommu_op_enable_modification(
>      return 0;
>  }
>  
> +static int iommuop_map(struct xen_iommu_op_map *op)
> +{
> +    struct domain *d, *currd = current->domain;
> +    struct domain_iommu *iommu = dom_iommu(currd);

const (also in unmap)?

> +static int iommuop_unmap(struct xen_iommu_op_unmap *op)
> +{
> +    struct domain *d, *currd = current->domain;
> +    struct domain_iommu *iommu = dom_iommu(currd);
> +    bfn_t bfn = _bfn(op->bfn);
> +    mfn_t mfn;
> +    bool readonly;
> +    unsigned int prot;
> +    struct page_info *page;
> +    int rc;
> +
> +    if ( op->pad ||
> +         (op->flags & ~XEN_IOMMUOP_unmap_all) )
> +        return -EINVAL;
> +
> +    if ( !iommu->iommu_op_ranges )
> +        return -EOPNOTSUPP;
> +
> +    /* Per-device unmapping not yet supported */
> +    if ( !(op->flags & XEN_IOMMUOP_unmap_all) )
> +        return -EINVAL;
> +
> +    if ( !rangeset_contains_singleton(iommu->iommu_op_ranges, bfn_x(bfn)) ||

Again the question about a malicious multi-vCPU guest trying to
utilize the gap between the check here and ...

> +         iommu_lookup_page(currd, bfn, &mfn, &prot) ||
> +         !mfn_valid(mfn) )
> +        return -ENOENT;
> +
> +    readonly = !(prot & IOMMUF_writable);
> +
> +    d = rcu_lock_domain_by_any_id(op->domid);
> +    if ( !d )
> +        return -ESRCH;
> +
> +    rc = get_paged_gfn(d, _gfn(op->gfn), !(prot & IOMMUF_writable), NULL,
> +                       &page);
> +    if ( rc )
> +        goto unlock;
> +
> +    put_page(page); /* release extra reference just taken */
> +
> +    rc = -EINVAL;
> +    if ( !mfn_eq(page_to_mfn(page), mfn) )
> +        goto unlock;
> +
> +    /* release reference taken in map */
> +    if ( !readonly )
> +        put_page_type(page);
> +    put_page(page);
> +
> +    rc = rangeset_remove_singleton(iommu->iommu_op_ranges, bfn_x(bfn));

... the actual removal here.

> +    if ( rc )
> +        goto unlock;

You've already put the page ref - failure to remove needs to be fatal
to the guest, or you'd need to re-obtain refs.

> +    if ( iommu_unmap_page(currd, bfn) )
> +        rc = -EIO;

Similarly here: All records of the page having a mapping are gone.

> @@ -135,6 +285,22 @@ static void iommu_op(xen_iommu_op_t *op)
>          op->status = 
> iommu_op_enable_modification(&op->u.enable_modification);
>          break;
>  
> +    case XEN_IOMMUOP_map:
> +        this_cpu(iommu_dont_flush_iotlb) = 1;
> +        op->status = iommuop_map(&op->u.map);
> +        this_cpu(iommu_dont_flush_iotlb) = 0;
> +        break;
> +
> +    case XEN_IOMMUOP_unmap:
> +        this_cpu(iommu_dont_flush_iotlb) = 1;
> +        op->status = iommuop_unmap(&op->u.unmap);
> +        this_cpu(iommu_dont_flush_iotlb) = 0;
> +        break;

How is this flush suppression secure?

> --- a/xen/include/public/iommu_op.h
> +++ b/xen/include/public/iommu_op.h
> @@ -80,6 +80,113 @@ struct xen_iommu_op_enable_modification {
>  #define XEN_IOMMU_CAP_per_device_mappings (1u << 
> _XEN_IOMMU_CAP_per_device_mappings)
>  };
>  
> +/*
> + * XEN_IOMMUOP_map: Map a guest page in the IOMMU.
> + */

Single line comment please (also below).

> +#define XEN_IOMMUOP_map 3
> +
> +struct xen_iommu_op_map {
> +    /* IN - The domid of the guest */
> +    domid_t domid;
> +    /*
> +     * IN - flags controlling the mapping. This should be a bitwise OR of the
> +     *      flags defined below.
> +     */
> +    uint16_t flags;
> +
> +    /*
> +     * Should the mapping be created for all initiators?
> +     *
> +     * NOTE: This flag is currently required as the implementation does not 
> yet
> +     *       support pre-device mappings.
> +     */
> +#define _XEN_IOMMUOP_map_all 0
> +#define XEN_IOMMUOP_map_all (1 << (_XEN_IOMMUOP_map_all))

Stray extra parentheses (also further down).

> +/*
> + * XEN_IOMMUOP_flush: Flush the IOMMU TLB.
> + */
> +#define XEN_IOMMUOP_flush 5
> +
> +struct xen_iommu_op_flush {
> +    /*
> +     * IN - flags controlling flushing. This should be a bitwise OR of the
> +     *      flags defined below.
> +     */
> +    uint16_t flags;
> +
> +    /*
> +     * Should the mappings flushed for all initiators?
> +     *
> +     * NOTE: This flag is currently required as the implementation does not 
> yet
> +     *       support pre-device mappings.
> +     */
> +#define _XEN_IOMMUOP_flush_all 0
> +#define XEN_IOMMUOP_flush_all (1 << (_XEN_IOMMUOP_flush_all))
> +
> +    uint16_t pad0;
> +    uint32_t pad1;
> +    /*
> +     * IN - Segment/Bus/Device/Function of the initiator.
> +     *
> +     * NOTE: This is ignored if XEN_IOMMUOP_flush_all is set.
> +     */
> +    uint64_t sbdf;
> +};

No means to flush a single mapping, or a sub-range of the entire
address space?

> --- a/xen/include/xlat.lst
> +++ b/xen/include/xlat.lst
> @@ -80,7 +80,10 @@
>  !    iommu_op                        iommu_op.h
>  !    iommu_op_buf                    iommu_op.h
>  !    iommu_op_enable_modification    iommu_op.h
> +!    iommu_op_flush                  iommu_op.h

As long as there's no bfn_t in there, this could again be ? instead of ! ?

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.