[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 03/13] iommu: make use of type-safe BFN and MFN in exported functions



This patch modifies the declaration of the entry points to the IOMMU
sub-system to use bfn_t and mfn_t in place of unsigned long. A subsequent
patch will similarly modify the methods in the iommu_ops structure.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

v3:
 - Removed most of the uses of an intermediate 'frame' variable.

v2:
 - Addressed comments from Jan.
 - Use intermediate 'frame' variable to avoid directly encapsulating
   mfn or gfn values as bfns.
---
 xen/arch/arm/p2m.c                    |  3 ++-
 xen/arch/x86/mm.c                     | 10 ++++----
 xen/arch/x86/mm/p2m-ept.c             | 10 +++++---
 xen/arch/x86/mm/p2m-pt.c              | 45 ++++++++++++++++++++---------------
 xen/arch/x86/mm/p2m.c                 | 16 ++++++++-----
 xen/arch/x86/x86_64/mm.c              |  5 ++--
 xen/common/grant_table.c              | 12 +++++-----
 xen/common/memory.c                   |  4 ++--
 xen/drivers/passthrough/iommu.c       | 25 ++++++++++---------
 xen/drivers/passthrough/vtd/x86/vtd.c |  3 ++-
 xen/include/xen/iommu.h               | 14 +++++++----
 11 files changed, 85 insertions(+), 62 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 14791388ad..d719db1e30 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -953,7 +953,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
 
     if ( need_iommu(p2m->domain) &&
          (lpae_valid(orig_pte) || lpae_valid(*entry)) )
-        rc = iommu_iotlb_flush(p2m->domain, gfn_x(sgfn), 1UL << page_order);
+        rc = iommu_iotlb_flush(p2m->domain, _bfn(gfn_x(sgfn)),
+                               1UL << page_order);
     else
         rc = 0;
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index a1a1f5f3c3..99070f916d 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2729,14 +2729,14 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
         struct domain *d = page_get_owner(page);
         if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
         {
-            gfn_t gfn = _gfn(mfn_to_gmfn(d, mfn_x(page_to_mfn(page))));
+            mfn_t mfn = page_to_mfn(page);
 
             if ( (x & PGT_type_mask) == PGT_writable_page )
-                iommu_ret = iommu_unmap_page(d, gfn_x(gfn));
+                iommu_ret = iommu_unmap_page(d, _bfn(mfn_x(mfn)));
             else if ( type == PGT_writable_page )
-                iommu_ret = iommu_map_page(d, gfn_x(gfn),
-                                           mfn_x(page_to_mfn(page)),
-                                           IOMMUF_readable|IOMMUF_writable);
+                iommu_ret = iommu_map_page(d, _bfn(mfn_x(mfn)), mfn,
+                                           IOMMUF_readable |
+                                           IOMMUF_writable);
         }
     }
 
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 14b593923b..2089b5232d 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -870,15 +870,19 @@ out:
             rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, 
vtd_pte_present);
         else
         {
+            bfn_t bfn = _bfn(gfn);
+
             if ( iommu_flags )
                 for ( i = 0; i < (1 << order); i++ )
                 {
-                    rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, 
iommu_flags);
+                    rc = iommu_map_page(d, bfn_add(bfn, i),
+                                        mfn_add(mfn, i), iommu_flags);
                     if ( unlikely(rc) )
                     {
                         while ( i-- )
                             /* If statement to satisfy __must_check. */
-                            if ( iommu_unmap_page(p2m->domain, gfn + i) )
+                            if ( iommu_unmap_page(p2m->domain,
+                                                  bfn_add(bfn, i)) )
                                 continue;
 
                         break;
@@ -887,7 +891,7 @@ out:
             else
                 for ( i = 0; i < (1 << order); i++ )
                 {
-                    ret = iommu_unmap_page(d, gfn + i);
+                    ret = iommu_unmap_page(d, bfn_add(bfn, i));
                     if ( !rc )
                         rc = ret;
                 }
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index b8c5d2ed26..a441af388a 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -687,29 +687,36 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, 
mfn_t mfn,
             if ( iommu_old_flags )
                 amd_iommu_flush_pages(p2m->domain, gfn, page_order);
         }
-        else if ( iommu_pte_flags )
-            for ( i = 0; i < (1UL << page_order); i++ )
-            {
-                rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
-                                    iommu_pte_flags);
-                if ( unlikely(rc) )
+        else
+        {
+            bfn_t bfn = _bfn(gfn);
+
+            if ( iommu_pte_flags )
+                for ( i = 0; i < (1UL << page_order); i++ )
                 {
-                    while ( i-- )
-                        /* If statement to satisfy __must_check. */
-                        if ( iommu_unmap_page(p2m->domain, gfn + i) )
-                            continue;
+                    rc = iommu_map_page(p2m->domain, bfn_add(bfn, i),
+                                        mfn_add(mfn, i), iommu_pte_flags);
+                    if ( unlikely(rc) )
+                    {
+                        while ( i-- )
+                            /* If statement to satisfy __must_check. */
+                            if ( iommu_unmap_page(p2m->domain,
+                                                  bfn_add(bfn, i)) )
+                                continue;
 
-                    break;
+                        break;
+                    }
                 }
-            }
-        else
-            for ( i = 0; i < (1UL << page_order); i++ )
-            {
-                int ret = iommu_unmap_page(p2m->domain, gfn + i);
+            else
+                for ( i = 0; i < (1UL << page_order); i++ )
+                {
+                    int ret = iommu_unmap_page(p2m->domain,
+                                               bfn_add(bfn, i));
 
-                if ( !rc )
-                    rc = ret;
-            }
+                    if ( !rc )
+                        rc = ret;
+                }
+        }
     }
 
     /*
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c53cab44d9..e8a3c04d09 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -714,9 +714,11 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long 
gfn_l, unsigned long mfn,
 
         if ( need_iommu(p2m->domain) )
         {
+            bfn_t bfn = _bfn(mfn);
+
             for ( i = 0; i < (1 << page_order); i++ )
             {
-                int ret = iommu_unmap_page(p2m->domain, mfn + i);
+                int ret = iommu_unmap_page(p2m->domain, bfn_add(bfn, i));
 
                 if ( !rc )
                     rc = ret;
@@ -773,16 +775,17 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, 
mfn_t mfn,
     {
         if ( need_iommu(d) && t == p2m_ram_rw )
         {
+            bfn_t bfn = _bfn(mfn_x(mfn));
+
             for ( i = 0; i < (1 << page_order); i++ )
             {
-                rc = iommu_map_page(d, mfn_x(mfn_add(mfn, i)),
-                                    mfn_x(mfn_add(mfn, i)),
+                rc = iommu_map_page(d, bfn_add(bfn, i), mfn_add(mfn, i),
                                     IOMMUF_readable|IOMMUF_writable);
                 if ( rc != 0 )
                 {
                     while ( i-- > 0 )
                         /* If statement to satisfy __must_check. */
-                        if ( iommu_unmap_page(d, mfn_x(mfn_add(mfn, i))) )
+                        if ( iommu_unmap_page(d, bfn_add(bfn, i)) )
                             continue;
 
                     return rc;
@@ -1157,7 +1160,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l,
     {
         if ( !need_iommu(d) )
             return 0;
-        return iommu_map_page(d, gfn_l, gfn_l, 
IOMMUF_readable|IOMMUF_writable);
+        return iommu_map_page(d, _bfn(gfn_l), _mfn(gfn_l),
+                              IOMMUF_readable | IOMMUF_writable);
     }
 
     gfn_lock(p2m, gfn, 0);
@@ -1247,7 +1251,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l)
     {
         if ( !need_iommu(d) )
             return 0;
-        return iommu_unmap_page(d, gfn_l);
+        return iommu_unmap_page(d, _bfn(gfn_l));
     }
 
     gfn_lock(p2m, gfn, 0);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index cca4ae926e..cc58e4cef4 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1429,13 +1429,14 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
     if ( iommu_enabled && !iommu_passthrough && !need_iommu(hardware_domain) )
     {
         for ( i = spfn; i < epfn; i++ )
-            if ( iommu_map_page(hardware_domain, i, i, 
IOMMUF_readable|IOMMUF_writable) )
+            if ( iommu_map_page(hardware_domain, _bfn(i), _mfn(i),
+                                IOMMUF_readable | IOMMUF_writable) )
                 break;
         if ( i != epfn )
         {
             while (i-- > old_max)
                 /* If statement to satisfy __must_check. */
-                if ( iommu_unmap_page(hardware_domain, i) )
+                if ( iommu_unmap_page(hardware_domain, _bfn(i)) )
                     continue;
 
             goto destroy_m2p;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index d2610e320c..261e3b17a0 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1144,13 +1144,13 @@ map_grant_ref(
              !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
         {
             if ( !(kind & MAPKIND_WRITE) )
-                err = iommu_map_page(ld, mfn_x(mfn), mfn_x(mfn),
-                                     IOMMUF_readable|IOMMUF_writable);
+                err = iommu_map_page(ld, _bfn(mfn_x(mfn)), mfn,
+                                     IOMMUF_readable | IOMMUF_writable);
         }
         else if ( act_pin && !old_pin )
         {
             if ( !kind )
-                err = iommu_map_page(ld, mfn_x(mfn), mfn_x(mfn),
+                err = iommu_map_page(ld, _bfn(mfn_x(mfn)), mfn,
                                      IOMMUF_readable);
         }
         if ( err )
@@ -1413,10 +1413,10 @@ unmap_common(
 
         kind = mapkind(lgt, rd, op->mfn);
         if ( !kind )
-            err = iommu_unmap_page(ld, mfn_x(op->mfn));
+            err = iommu_unmap_page(ld, _bfn(mfn_x(op->mfn)));
         else if ( !(kind & MAPKIND_WRITE) )
-            err = iommu_map_page(ld, mfn_x(op->mfn),
-                                 mfn_x(op->mfn), IOMMUF_readable);
+            err = iommu_map_page(ld, _bfn(mfn_x(op->mfn)), op->mfn,
+                                 IOMMUF_readable);
 
         double_gt_unlock(lgt, rgt);
 
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 75010b78a5..e1616eda32 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -833,11 +833,11 @@ int xenmem_add_to_physmap(struct domain *d, struct 
xen_add_to_physmap *xatp,
 
         this_cpu(iommu_dont_flush_iotlb) = 0;
 
-        ret = iommu_iotlb_flush(d, xatp->idx - done, done);
+        ret = iommu_iotlb_flush(d, _bfn(xatp->idx - done), done);
         if ( unlikely(ret) && rc >= 0 )
             rc = ret;
 
-        ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
+        ret = iommu_iotlb_flush(d, _bfn(xatp->gpfn - done), done);
         if ( unlikely(ret) && rc >= 0 )
             rc = ret;
     }
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 46db9d0d04..44797f9e92 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -255,7 +255,7 @@ void iommu_domain_destroy(struct domain *d)
     arch_iommu_domain_destroy(d);
 }
 
-int iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
+int iommu_map_page(struct domain *d, bfn_t bfn, mfn_t mfn,
                    unsigned int flags)
 {
     const struct domain_iommu *hd = dom_iommu(d);
@@ -264,13 +264,13 @@ int iommu_map_page(struct domain *d, unsigned long bfn, 
unsigned long mfn,
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->map_page(d, bfn, mfn, flags);
+    rc = hd->platform_ops->map_page(d, bfn_x(bfn), mfn_x(mfn), flags);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
             printk(XENLOG_ERR
-                   "d%d: IOMMU mapping bfn %#lx to mfn %#lx failed: %d\n",
-                   d->domain_id, bfn, mfn, rc);
+                   "d%d: IOMMU mapping bfn %"PRI_bfn" to mfn %"PRI_mfn" 
failed: %d\n",
+                   d->domain_id, bfn_x(bfn), mfn_x(mfn), rc);
 
         if ( !is_hardware_domain(d) )
             domain_crash(d);
@@ -279,7 +279,7 @@ int iommu_map_page(struct domain *d, unsigned long bfn, 
unsigned long mfn,
     return rc;
 }
 
-int iommu_unmap_page(struct domain *d, unsigned long bfn)
+int iommu_unmap_page(struct domain *d, bfn_t bfn)
 {
     const struct domain_iommu *hd = dom_iommu(d);
     int rc;
@@ -287,13 +287,13 @@ int iommu_unmap_page(struct domain *d, unsigned long bfn)
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->unmap_page(d, bfn);
+    rc = hd->platform_ops->unmap_page(d, bfn_x(bfn));
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
             printk(XENLOG_ERR
-                   "d%d: IOMMU unmapping bfn %#lx failed: %d\n",
-                   d->domain_id, bfn, rc);
+                   "d%d: IOMMU unmapping bfn %"PRI_bfn" failed: %d\n",
+                   d->domain_id, bfn_x(bfn), rc);
 
         if ( !is_hardware_domain(d) )
             domain_crash(d);
@@ -319,8 +319,7 @@ static void iommu_free_pagetables(unsigned long unused)
                             cpumask_cycle(smp_processor_id(), 
&cpu_online_map));
 }
 
-int iommu_iotlb_flush(struct domain *d, unsigned long bfn,
-                      unsigned int page_count)
+int iommu_iotlb_flush(struct domain *d, bfn_t bfn, unsigned int page_count)
 {
     const struct domain_iommu *hd = dom_iommu(d);
     int rc;
@@ -328,13 +327,13 @@ int iommu_iotlb_flush(struct domain *d, unsigned long bfn,
     if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush 
)
         return 0;
 
-    rc = hd->platform_ops->iotlb_flush(d, bfn, page_count);
+    rc = hd->platform_ops->iotlb_flush(d, bfn_x(bfn), page_count);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
             printk(XENLOG_ERR
-                   "d%d: IOMMU IOTLB flush failed: %d, bfn %#lx, page count 
%u\n",
-                   d->domain_id, rc, bfn, page_count);
+                   "d%d: IOMMU IOTLB flush failed: %d, bfn %"PRI_bfn", page 
count %u\n",
+                   d->domain_id, rc, bfn_x(bfn), page_count);
 
         if ( !is_hardware_domain(d) )
             domain_crash(d);
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c 
b/xen/drivers/passthrough/vtd/x86/vtd.c
index cc2bfea162..dc37dce4b6 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -155,7 +155,8 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
         tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
         for ( j = 0; j < tmp; j++ )
         {
-            int ret = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
+            int ret = iommu_map_page(d, _bfn(pfn * tmp + j),
+                                     _mfn(pfn * tmp + j),
                                      IOMMUF_readable|IOMMUF_writable);
 
             if ( !rc )
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 14ada0c14e..a3c36c1148 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -24,6 +24,7 @@
 #include <xen/spinlock.h>
 #include <xen/pci.h>
 #include <xen/typesafe.h>
+#include <xen/mm.h>
 #include <public/hvm/ioreq.h>
 #include <public/domctl.h>
 #include <asm/device.h>
@@ -42,6 +43,11 @@ TYPE_SAFE(uint64_t, bfn);
 #undef bfn_x
 #endif
 
+static inline bfn_t bfn_add(bfn_t bfn, unsigned long i)
+{
+    return _bfn(bfn_x(bfn) + i);
+}
+
 #define IOMMU_PAGE_SHIFT 12
 #define IOMMU_PAGE_SIZE  (_AC(1,L) << IOMMU_PAGE_SHIFT)
 #define IOMMU_PAGE_MASK  (~(IOMMU_PAGE_SIZE - 1))
@@ -86,9 +92,9 @@ void iommu_teardown(struct domain *d);
 #define IOMMUF_readable  (1u<<_IOMMUF_readable)
 #define _IOMMUF_writable 1
 #define IOMMUF_writable  (1u<<_IOMMUF_writable)
-int __must_check iommu_map_page(struct domain *d, unsigned long bfn,
-                                unsigned long mfn, unsigned int flags);
-int __must_check iommu_unmap_page(struct domain *d, unsigned long bfn);
+int __must_check iommu_map_page(struct domain *d, bfn_t bfn,
+                                mfn_t mfn, unsigned int flags);
+int __must_check iommu_unmap_page(struct domain *d, bfn_t bfn);
 
 enum iommu_feature
 {
@@ -213,7 +219,7 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain 
*d,
 int iommu_do_domctl(struct xen_domctl *, struct domain *d,
                     XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
 
-int __must_check iommu_iotlb_flush(struct domain *d, unsigned long bfn,
+int __must_check iommu_iotlb_flush(struct domain *d, bfn_t bfn,
                                    unsigned int page_count);
 int __must_check iommu_iotlb_flush_all(struct domain *d);
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.