[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] iommu: specify page_count rather than page_order to iommu_map/unmap()...



...and remove alignment assertions.

Testing shows that certain callers of iommu_legacy_map/unmap() specify
order > 0 ranges that are not order aligned thus causing one of the
IS_ALIGNED() assertions to fire.

This patch removes those assertions and modifies iommu_map/unmap() and
iommu_legacy_map/unmap() to take a page_count argument rather than a
page_order. Using a count actually makes more sense because the valid
set of mapping orders is specific to the IOMMU implementation and to it
should be up to the implementation specific code to translate a mapping
count into an optimal set of mapping orders (when the code is finally
modified to support orders > 0).

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Reported-by: Chao Gao <chao.gao@xxxxxxxxx>
Reported-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm.c                   |  6 ++----
 xen/arch/x86/mm/p2m-ept.c           |  5 +++--
 xen/arch/x86/mm/p2m-pt.c            |  4 ++--
 xen/arch/x86/mm/p2m.c               |  9 +++++----
 xen/arch/x86/x86_64/mm.c            |  6 ++----
 xen/common/grant_table.c            |  8 ++++----
 xen/drivers/passthrough/iommu.c     | 29 +++++++++++------------------
 xen/drivers/passthrough/x86/iommu.c |  4 ++--
 xen/include/xen/iommu.h             |  8 ++++----
 9 files changed, 35 insertions(+), 44 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 7ec5954b03..caccfe3f79 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2801,11 +2801,9 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
             mfn_t mfn = page_to_mfn(page);
 
             if ( (x & PGT_type_mask) == PGT_writable_page )
-                iommu_ret = iommu_legacy_unmap(d, _dfn(mfn_x(mfn)),
-                                               PAGE_ORDER_4K);
+                iommu_ret = iommu_legacy_unmap(d, _dfn(mfn_x(mfn)), 1);
             else if ( type == PGT_writable_page )
-                iommu_ret = iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn,
-                                             PAGE_ORDER_4K,
+                iommu_ret = iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn, 1,
                                              IOMMUF_readable |
                                              IOMMUF_writable);
         }
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 2b2bf31aad..56341ca678 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -885,8 +885,9 @@ out:
             rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, 
vtd_pte_present);
         else if ( need_iommu_pt_sync(d) )
             rc = iommu_flags ?
-                iommu_legacy_map(d, _dfn(gfn), mfn, order, iommu_flags) :
-                iommu_legacy_unmap(d, _dfn(gfn), order);
+                iommu_legacy_map(d, _dfn(gfn), mfn, 1u << order,
+                                 iommu_flags) :
+                iommu_legacy_unmap(d, _dfn(gfn), 1u << order);
     }
 
     unmap_domain_page(table);
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 12f92cf1f0..ac86a895a0 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -694,9 +694,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t 
mfn,
 
         if ( need_iommu_pt_sync(p2m->domain) )
             rc = iommu_pte_flags ?
-                iommu_legacy_map(d, _dfn(gfn), mfn, page_order,
+                iommu_legacy_map(d, _dfn(gfn), mfn, 1u << page_order,
                                  iommu_pte_flags) :
-                iommu_legacy_unmap(d, _dfn(gfn), page_order);
+                iommu_legacy_unmap(d, _dfn(gfn), 1u << page_order);
         else if ( iommu_use_hap_pt(d) && iommu_old_flags )
             amd_iommu_flush_pages(p2m->domain, gfn, page_order);
     }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index d14ce57dd5..ae3d2acd36 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -780,7 +780,8 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long 
gfn_l, unsigned long mfn,
 
     if ( !paging_mode_translate(p2m->domain) )
         return need_iommu_pt_sync(p2m->domain) ?
-            iommu_legacy_unmap(p2m->domain, _dfn(mfn), page_order) : 0;
+            iommu_legacy_unmap(p2m->domain, _dfn(mfn), 1u << page_order) :
+            0;
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
     P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
@@ -827,7 +828,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
 
     if ( !paging_mode_translate(d) )
         return (need_iommu_pt_sync(d) && t == p2m_ram_rw) ?
-            iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn, page_order,
+            iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn, 1u << page_order,
                              IOMMUF_readable | IOMMUF_writable) : 0;
 
     /* foreign pages are added thru p2m_add_foreign */
@@ -1308,7 +1309,7 @@ int set_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l,
     {
         if ( !need_iommu_pt_sync(d) )
             return 0;
-        return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
+        return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l), 1,
                                 IOMMUF_readable | IOMMUF_writable);
     }
 
@@ -1399,7 +1400,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l)
     {
         if ( !need_iommu_pt_sync(d) )
             return 0;
-        return iommu_legacy_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
+        return iommu_legacy_unmap(d, _dfn(gfn_l), 1);
     }
 
     gfn_lock(p2m, gfn, 0);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index d8f558bc3a..a5afab402f 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1436,16 +1436,14 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
          !need_iommu_pt_sync(hardware_domain) )
     {
         for ( i = spfn; i < epfn; i++ )
-            if ( iommu_legacy_map(hardware_domain, _dfn(i), _mfn(i),
-                                  PAGE_ORDER_4K,
+            if ( iommu_legacy_map(hardware_domain, _dfn(i), _mfn(i), 1,
                                   IOMMUF_readable | IOMMUF_writable) )
                 break;
         if ( i != epfn )
         {
             while (i-- > old_max)
                 /* If statement to satisfy __must_check. */
-                if ( iommu_legacy_unmap(hardware_domain, _dfn(i),
-                                        PAGE_ORDER_4K) )
+                if ( iommu_legacy_unmap(hardware_domain, _dfn(i), 1) )
                     continue;
 
             goto destroy_m2p;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index fd099a8f25..4bd0b46166 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1134,13 +1134,13 @@ map_grant_ref(
              !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
         {
             if ( !(kind & MAPKIND_WRITE) )
-                err = iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
+                err = iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 1,
                                        IOMMUF_readable | IOMMUF_writable);
         }
         else if ( act_pin && !old_pin )
         {
             if ( !kind )
-                err = iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
+                err = iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 1,
                                        IOMMUF_readable);
         }
         if ( err )
@@ -1389,9 +1389,9 @@ unmap_common(
 
         kind = mapkind(lgt, rd, op->mfn);
         if ( !kind )
-            err = iommu_legacy_unmap(ld, _dfn(mfn_x(op->mfn)), 0);
+            err = iommu_legacy_unmap(ld, _dfn(mfn_x(op->mfn)), 1);
         else if ( !(kind & MAPKIND_WRITE) )
-            err = iommu_legacy_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 0,
+            err = iommu_legacy_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 1,
                                    IOMMUF_readable);
 
         double_gt_unlock(lgt, rgt);
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index bd1af35a13..b7a08d105d 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -226,7 +226,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
                   == PGT_writable_page) )
                 mapping |= IOMMUF_writable;
 
-            ret = iommu_map(d, _dfn(dfn), _mfn(mfn), mapping, 0,
+            ret = iommu_map(d, _dfn(dfn), _mfn(mfn), mapping, 1,
                             &flush_flags);
 
             if ( !rc )
@@ -311,7 +311,7 @@ void iommu_domain_destroy(struct domain *d)
 }
 
 int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
-              unsigned int page_order, unsigned int flags,
+              unsigned int page_count, unsigned int flags,
               unsigned int *flush_flags)
 {
     const struct domain_iommu *hd = dom_iommu(d);
@@ -321,10 +321,7 @@ int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
-    ASSERT(IS_ALIGNED(mfn_x(mfn), (1ul << page_order)));
-
-    for ( i = 0; i < (1ul << page_order); i++ )
+    for ( i = 0; i < page_count; i++ )
     {
         rc = hd->platform_ops->map_page(d, dfn_add(dfn, i), mfn_add(mfn, i),
                                         flags, flush_flags);
@@ -354,15 +351,14 @@ int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
 }
 
 int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
-                     unsigned int page_order, unsigned int flags)
+                     unsigned int page_count, unsigned int flags)
 {
     unsigned int flush_flags = 0;
-    int rc = iommu_map(d, dfn, mfn, page_order, flags, &flush_flags);
+    int rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
     {
-        int err = iommu_iotlb_flush(d, dfn, (1u << page_order),
-                                    flush_flags);
+        int err = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
 
         if ( !rc )
             rc = err;
@@ -371,7 +367,7 @@ int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
     return rc;
 }
 
-int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order,
+int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_count,
                 unsigned int *flush_flags)
 {
     const struct domain_iommu *hd = dom_iommu(d);
@@ -381,9 +377,7 @@ int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int 
page_order,
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
-
-    for ( i = 0; i < (1ul << page_order); i++ )
+    for ( i = 0; i < page_count; i++ )
     {
         int err = hd->platform_ops->unmap_page(d, dfn_add(dfn, i),
                                                flush_flags);
@@ -409,15 +403,14 @@ int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int 
page_order,
     return rc;
 }
 
-int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
+int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned int page_count)
 {
     unsigned int flush_flags = 0;
-    int rc = iommu_unmap(d, dfn, page_order, &flush_flags);
+    int rc = iommu_unmap(d, dfn, page_count, &flush_flags);
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
     {
-        int err = iommu_iotlb_flush(d, dfn, (1u << page_order),
-                                    flush_flags);
+        int err = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
 
         if ( !rc )
             rc = err;
diff --git a/xen/drivers/passthrough/x86/iommu.c 
b/xen/drivers/passthrough/x86/iommu.c
index e40d7a7d7b..53d4dbc60c 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -67,7 +67,7 @@ int arch_iommu_populate_page_table(struct domain *d)
             {
                 ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
                 BUG_ON(SHARED_M2P(gfn));
-                rc = iommu_map(d, _dfn(gfn), _mfn(mfn), PAGE_ORDER_4K,
+                rc = iommu_map(d, _dfn(gfn), _mfn(mfn), 1,
                                IOMMUF_readable | IOMMUF_writable,
                                &flush_flags);
             }
@@ -245,7 +245,7 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
         if ( paging_mode_translate(d) )
             rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
         else
-            rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
+            rc = iommu_map(d, _dfn(pfn), _mfn(pfn), 1,
                            IOMMUF_readable | IOMMUF_writable, &flush_flags);
 
         if ( rc )
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index cdc8021cbd..82fb86c7ff 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -111,17 +111,17 @@ enum
 #define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
 
 int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
-                           unsigned int page_order, unsigned int flags,
+                           unsigned int page_count, unsigned int flags,
                            unsigned int *flush_flags);
 int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
-                             unsigned int page_order,
+                             unsigned int page_count,
                              unsigned int *flush_flags);
 
 int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
-                                  unsigned int page_order,
+                                  unsigned int page_count,
                                   unsigned int flags);
 int __must_check iommu_legacy_unmap(struct domain *d, dfn_t dfn,
-                                    unsigned int page_order);
+                                    unsigned int page_count);
 
 int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
                                    unsigned int *flags);
-- 
2.20.1.2.gb21ebb671


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.