[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/7] vtd: add lookup_page method to iommu_ops



This patch adds a new method to the VT-d IOMMU implementation to find the
MFN currently mapped by the specified BFN. This functionality will be used
by a subsequent patch.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/drivers/passthrough/vtd/iommu.c | 39 +++++++++++++++++++++++++++++++++++++
 xen/drivers/passthrough/vtd/iommu.h |  2 ++
 xen/include/xen/iommu.h             |  2 ++
 3 files changed, 43 insertions(+)

diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index a27529412a..bc4fc36d5f 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1827,6 +1827,44 @@ static int __must_check intel_iommu_unmap_page(struct 
domain *d,
     return dma_pte_clear_one(d, (paddr_t)bfn_x(bfn) << PAGE_SHIFT_4K);
 }
 
+static int intel_iommu_lookup_page(struct domain *d, bfn_t bfn, mfn_t *mfn,
+                                   unsigned int *flags)
+{
+    struct domain_iommu *hd = dom_iommu(d);
+    struct dma_pte *page = NULL, *pte = NULL, val;
+    u64 pg_maddr;
+
+    spin_lock(&hd->arch.mapping_lock);
+
+    pg_maddr =
+        addr_to_dma_page_maddr(d, (paddr_t)bfn_x(bfn) << PAGE_SHIFT_4K, 1);
+    if ( pg_maddr == 0 )
+    {
+        spin_unlock(&hd->arch.mapping_lock);
+        return -ENOMEM;
+    }
+    page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
+    pte = page + (bfn_x(bfn) & LEVEL_MASK);
+    val = *pte;
+    if (!dma_pte_present(val)) {
+        unmap_vtd_domain_page(page);
+        spin_unlock(&hd->arch.mapping_lock);
+        return -ENOMEM;
+    }
+    unmap_vtd_domain_page(page);
+    spin_unlock(&hd->arch.mapping_lock);
+
+    *mfn = _mfn(dma_get_pte_addr(val) >> PAGE_SHIFT_4K);
+
+    *flags = 0;
+    if (dma_get_pte_prot(val) & DMA_PTE_READ)
+        *flags |= IOMMUF_readable;
+    if (dma_get_pte_prot(val) & DMA_PTE_WRITE)
+        *flags |= IOMMUF_writable;
+
+    return 0;
+}
+
 int iommu_pte_flush(struct domain *d, u64 bfn, u64 *pte,
                     int order, int present)
 {
@@ -2652,6 +2690,7 @@ const struct iommu_ops intel_iommu_ops = {
     .teardown = iommu_domain_teardown,
     .map_page = intel_iommu_map_page,
     .unmap_page = intel_iommu_unmap_page,
+    .lookup_page = intel_iommu_lookup_page,
     .free_page_table = iommu_free_page_table,
     .reassign_device = reassign_device_ownership,
     .get_device_group_id = intel_iommu_group_id,
diff --git a/xen/drivers/passthrough/vtd/iommu.h 
b/xen/drivers/passthrough/vtd/iommu.h
index 72c1a2e3cd..5eda66868e 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -272,9 +272,11 @@ struct dma_pte {
 #define dma_set_pte_prot(p, prot) do { \
         (p).val = ((p).val & ~DMA_PTE_PROT) | ((prot) & DMA_PTE_PROT); \
     } while (0)
+#define dma_get_pte_prot(p) ((p).val & DMA_PTE_PROT)
 #define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
 #define dma_set_pte_addr(p, addr) do {\
             (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
+#define dma_get_pte_addr(p) ((p).val & PAGE_MASK_4K)
 #define dma_pte_present(p) (((p).val & DMA_PTE_PROT) != 0)
 #define dma_pte_superpage(p) (((p).val & DMA_PTE_SP) != 0)
 
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index fd6f6fb05a..40099e8f32 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -179,6 +179,8 @@ struct iommu_ops {
     int __must_check (*map_page)(struct domain *d, bfn_t bfn, mfn_t mfn,
                                  unsigned int flags);
     int __must_check (*unmap_page)(struct domain *d, bfn_t bfn);
+    int __must_check (*lookup_page)(struct domain *d, bfn_t bfn, mfn_t *mfn,
+                                    unsigned int *flags);
     void (*free_page_table)(struct page_info *);
 #ifdef CONFIG_X86
     void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned 
int value);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.