[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 7/7] x86: add iommu_ops to map and unmap pages, and also to flush the IOTLB



This patch adds iommu_ops to allow a domain with control_iommu privilege
to map and unmap pages from any guest over which it has mapping privilege
in the IOMMU.
These operations implicitly disable IOTLB flushing so that the caller can
batch operations and then explicitly flush the IOTLB using the iommu_op
also added by this patch.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/iommu_op.c       | 186 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/iommu_op.h |  37 +++++++++
 xen/include/xlat.lst          |   2 +
 3 files changed, 225 insertions(+)

diff --git a/xen/arch/x86/iommu_op.c b/xen/arch/x86/iommu_op.c
index ac81b98b7a..b10c916279 100644
--- a/xen/arch/x86/iommu_op.c
+++ b/xen/arch/x86/iommu_op.c
@@ -24,6 +24,174 @@
 #include <xen/hypercall.h>
 #include <xen/iommu.h>
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(page) _mfn(__page_to_mfn(page))
+
+struct check_rdm_ctxt {
+    bfn_t bfn;
+};
+
+static int check_rdm(xen_pfn_t start, xen_ulong_t nr, u32 id, void *arg)
+{
+    struct check_rdm_ctxt *ctxt = arg;
+
+    if ( bfn_x(ctxt->bfn) >= start &&
+         bfn_x(ctxt->bfn) < start + nr )
+        return -EINVAL;
+
+    return 1;
+}
+
+static int iommuop_map(struct xen_iommu_op_map *op, unsigned int flags)
+{
+    struct domain *d, *od, *currd = current->domain;
+    struct domain_iommu *iommu = dom_iommu(currd);
+    const struct iommu_ops *ops = iommu->platform_ops;
+    domid_t domid = op->domid;
+    gfn_t gfn = _gfn(op->gfn);
+    bfn_t bfn = _bfn(op->bfn);
+    mfn_t mfn;
+    struct check_rdm_ctxt ctxt = {
+        .bfn = bfn,
+    };
+    p2m_type_t p2mt;
+    p2m_query_t p2mq;
+    struct page_info *page;
+    unsigned int prot;
+    int rc;
+
+    if (op->pad0 != 0 || op->pad1 != 0)
+        return -EINVAL;
+
+    /*
+     * Both map_page and lookup_page operations must be implemented.
+     * The lookup_page method is not used here but is relied upon by
+     * iommuop_unmap() to drop the page reference taken here.
+     */
+    if ( !ops->map_page || !ops->lookup_page )
+        return -ENOSYS;
+
+    /* Check whether the specified BFN falls in a reserved region */
+    rc = iommu_get_reserved_device_memory(check_rdm, &ctxt);
+    if ( rc )
+        return rc;
+
+    d = rcu_lock_domain_by_any_id(domid);
+    if ( !d )
+        return -ESRCH;
+
+    p2mq = (flags & XEN_IOMMUOP_map_readonly) ?
+        P2M_UNSHARE : P2M_ALLOC;
+    page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, p2mq);
+
+    rc = -ENOENT;
+    if ( !page )
+        goto unlock;
+
+    if ( p2m_is_paged(p2mt) )
+    {
+        p2m_mem_paging_populate(d, gfn_x(gfn));
+        goto release;
+    }
+
+    if ( (p2mq & P2M_UNSHARE) && p2m_is_shared(p2mt) )
+        goto release;
+
+    /*
+     * Make sure the page is RAM and, if it is read-only, that the
+     * read-only flag is present.
+     */
+    rc = -EPERM;
+    if ( !p2m_is_any_ram(p2mt) ||
+         (p2m_is_readonly(p2mt) && !(flags & XEN_IOMMUOP_map_readonly)) )
+        goto release;
+
+    /*
+     * If the calling domain does not own the page then make sure it
+     * has mapping privilege over the page owner.
+     */
+    od = page_get_owner(page);
+    if ( od != currd )
+    {
+        rc = xsm_domain_memory_map(XSM_TARGET, od);
+        if ( rc )
+            goto release;
+    }
+
+    prot = IOMMUF_readable;
+    if ( !(flags & XEN_IOMMUOP_map_readonly) )
+        prot |= IOMMUF_writable;
+
+    mfn = page_to_mfn(page);
+
+    rc = 0;
+    if ( !ops->map_page(currd, bfn, mfn, prot) )
+        goto unlock; /* keep the page ref */
+
+    rc = -EIO;
+
+ release:
+    put_page(page);
+
+ unlock:
+    rcu_unlock_domain(d);
+
+    return rc;
+}
+
+static int iommuop_unmap(struct xen_iommu_op_unmap *op)
+{
+    struct domain *currd = current->domain;
+    struct domain_iommu *iommu = dom_iommu(currd);
+    const struct iommu_ops *ops = iommu->platform_ops;
+    bfn_t bfn = _bfn(op->bfn);
+    mfn_t mfn;
+    struct check_rdm_ctxt ctxt = {
+        .bfn = bfn,
+    };
+    unsigned int flags;
+    struct page_info *page;
+    int rc;
+
+    /*
+     * Both unmap_page and lookup_page operations must be implemented.
+     */
+    if ( !ops->unmap_page || !ops->lookup_page )
+        return -ENOSYS;
+
+    /* Check whether the specified BFN falls in a reserved region */
+    rc = iommu_get_reserved_device_memory(check_rdm, &ctxt);
+    if ( rc )
+        return rc;
+
+    if ( ops->lookup_page(currd, bfn, &mfn, &flags) ||
+         !mfn_valid(mfn) )
+        return -ENOENT;
+
+    page = mfn_to_page(mfn);
+
+    if ( ops->unmap_page(currd, bfn) )
+        return -EIO;
+
+    put_page(page);
+    return 0;
+}
+
+static int iommuop_flush(void)
+{
+    struct domain *currd = current->domain;
+    struct domain_iommu *iommu = dom_iommu(currd);
+    const struct iommu_ops *ops = iommu->platform_ops;
+
+    if ( ops->iotlb_flush_all(currd) )
+        return -EIO;
+
+    return 0;
+}
+
 struct get_rdm_ctxt {
     unsigned int max_entries;
     unsigned int nr_entries;
@@ -101,6 +269,22 @@ static void iommu_op(xen_iommu_op_t *op)
         op->status = iommuop_query_reserved(&op->u.query_reserved);
         break;
 
+    case XEN_IOMMUOP_map:
+        this_cpu(iommu_dont_flush_iotlb) = 1;
+        op->status = iommuop_map(&op->u.map, op->flags);
+        this_cpu(iommu_dont_flush_iotlb) = 0;
+        break;
+
+    case XEN_IOMMUOP_unmap:
+        this_cpu(iommu_dont_flush_iotlb) = 1;
+        op->status = iommuop_unmap(&op->u.unmap);
+        this_cpu(iommu_dont_flush_iotlb) = 0;
+        break;
+
+    case XEN_IOMMUOP_flush:
+        op->status = iommuop_flush();
+        break;
+
     default:
         op->status = -EOPNOTSUPP;
         break;
@@ -195,6 +379,8 @@ int 
compat_iommu_op(XEN_GUEST_HANDLE_PARAM(compat_iommu_op_t) uops,
          * we need to fix things up here.
          */
 #define XLAT_iommu_op_u_query_reserved XEN_IOMMUOP_query_reserved
+#define XLAT_iommu_op_u_map XEN_IOMMUOP_map
+#define XLAT_iommu_op_u_unmap XEN_IOMMUOP_unmap
         u = cmp.op;
 
 #define XLAT_iommu_op_query_reserved_HNDL_regions(_d_, _s_) \
diff --git a/xen/include/public/iommu_op.h b/xen/include/public/iommu_op.h
index 24b8b9e0cc..9a782603de 100644
--- a/xen/include/public/iommu_op.h
+++ b/xen/include/public/iommu_op.h
@@ -57,13 +57,50 @@ struct xen_iommu_op_query_reserved {
     XEN_GUEST_HANDLE(xen_iommu_reserved_region_t) regions;
 };
 
+/*
+ * XEN_IOMMUOP_map: Map a page in the IOMMU.
+ */
+#define XEN_IOMMUOP_map 2
+
+struct xen_iommu_op_map {
+    /* IN - The IOMMU frame number which will hold the new mapping */
+    xen_bfn_t bfn;
+    /* IN - The guest frame number of the page to be mapped */
+    xen_pfn_t gfn;
+    /* IN - The domid of the guest */
+    domid_t domid;
+    unsigned short pad0;
+    unsigned int pad1;
+};
+
+/*
+ * XEN_IOMMUOP_unmap: Remove a mapping in the IOMMU.
+ */
+#define XEN_IOMMUOP_unmap 3
+
+struct xen_iommu_op_unmap {
+    /* IN - The IOMMU frame number holding the mapping to be cleared */
+    xen_bfn_t bfn;
+};
+
+/*
+ * XEN_IOMMUOP_flush: Flush the IOMMU TLB.
+ */
+#define XEN_IOMMUOP_flush 4
+
 struct xen_iommu_op {
     uint16_t op;
     uint16_t flags; /* op specific flags */
+
+#define _XEN_IOMMUOP_map_readonly 0
+#define XEN_IOMMUOP_map_readonly (1 << (_XEN_IOMMUOP_map_readonly))
+
     int32_t status; /* op completion status: */
                     /* 0 for success otherwise, negative errno */
     union {
         struct xen_iommu_op_query_reserved query_reserved;
+        struct xen_iommu_op_map map;
+        struct xen_iommu_op_unmap unmap;
     } u;
 };
 typedef struct xen_iommu_op xen_iommu_op_t;
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index a2070b6d7d..dddafc7422 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -78,6 +78,8 @@
 ?      vcpu_hvm_x86_64                 hvm/hvm_vcpu.h
 !      iommu_reserved_region           iommu_op.h
 !      iommu_op_query_reserved         iommu_op.h
+!      iommu_op_map                    iommu_op.h
+!      iommu_op_unmap                  iommu_op.h
 !      iommu_op                        iommu_op.h
 ?      kexec_exec                      kexec.h
 !      kexec_image                     kexec.h
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.