[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/iommu: remove usage of {set/clear}_identity_p2m_entry against PV domains



Switch rmrr_identity_mapping to use iommu_{un}map in order to
establish RMRR mappings for PV domains, like it's done in
arch_iommu_hwdom_init. This solves the issue of a PV hardware domain
not getting RMRR mappings because {set/clear}_identity_p2m_entry was
not properly updating the iommu page tables.

As rmrr_identity_mapping was the last user of
{set/clear}_identity_p2m_entry against PV domains modify the function
so it's only usable against translated domains, as the other p2m
related functions.

Reported-by: Roman Shaposhnik <roman@xxxxxxxxxx>
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wl@xxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Roman Shaposhnik <roman@xxxxxxxxxx>
---
 xen/arch/x86/mm/p2m.c               | 11 ++++-------
 xen/drivers/passthrough/vtd/iommu.c | 23 ++++++++++++++++++-----
 2 files changed, 22 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index fef97c82f6..d36a58b1a6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1341,10 +1341,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l,
 
     if ( !paging_mode_translate(p2m->domain) )
     {
-        if ( !need_iommu_pt_sync(d) )
-            return 0;
-        return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
-                                IOMMUF_readable | IOMMUF_writable);
+        ASSERT_UNREACHABLE();
+        return -ENXIO;
     }
 
     gfn_lock(p2m, gfn, 0);
@@ -1432,9 +1430,8 @@ int clear_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l)
 
     if ( !paging_mode_translate(d) )
     {
-        if ( !need_iommu_pt_sync(d) )
-            return 0;
-        return iommu_legacy_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
+        ASSERT_UNREACHABLE();
+        return -ENXIO;
     }
 
     gfn_lock(p2m, gfn, 0);
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 5d72270c5b..62df5ca5aa 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1969,6 +1969,7 @@ static int rmrr_identity_mapping(struct domain *d, bool_t 
map,
     unsigned long end_pfn = PAGE_ALIGN_4K(rmrr->end_address) >> PAGE_SHIFT_4K;
     struct mapped_rmrr *mrmrr;
     struct domain_iommu *hd = dom_iommu(d);
+    unsigned int flush_flags = 0;
 
     ASSERT(pcidevs_locked());
     ASSERT(rmrr->base_address < rmrr->end_address);
@@ -1982,7 +1983,7 @@ static int rmrr_identity_mapping(struct domain *d, bool_t 
map,
         if ( mrmrr->base == rmrr->base_address &&
              mrmrr->end == rmrr->end_address )
         {
-            int ret = 0;
+            int ret = 0, err;
 
             if ( map )
             {
@@ -1995,13 +1996,20 @@ static int rmrr_identity_mapping(struct domain *d, 
bool_t map,
 
             while ( base_pfn < end_pfn )
             {
-                if ( clear_identity_p2m_entry(d, base_pfn) )
-                    ret = -ENXIO;
+                if ( paging_mode_translate(d) )
+                    ret = clear_identity_p2m_entry(d, base_pfn);
+                else
+                    ret = iommu_unmap(d, _dfn(base_pfn), PAGE_ORDER_4K,
+                                      &flush_flags);
                 base_pfn++;
             }
 
             list_del(&mrmrr->list);
             xfree(mrmrr);
+            /* Keep the previous error code if there's one. */
+            err = iommu_iotlb_flush_all(d, flush_flags);
+            if ( !ret )
+                ret = err;
             return ret;
         }
     }
@@ -2011,8 +2019,13 @@ static int rmrr_identity_mapping(struct domain *d, 
bool_t map,
 
     while ( base_pfn < end_pfn )
     {
-        int err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag);
+        int err;
 
+        if ( paging_mode_translate(d) )
+            err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag);
+        else
+            err = iommu_map(d, _dfn(base_pfn), _mfn(base_pfn), PAGE_ORDER_4K,
+                            IOMMUF_readable | IOMMUF_writable, &flush_flags);
         if ( err )
             return err;
         base_pfn++;
@@ -2026,7 +2039,7 @@ static int rmrr_identity_mapping(struct domain *d, bool_t 
map,
     mrmrr->count = 1;
     list_add_tail(&mrmrr->list, &hd->arch.mapped_rmrrs);
 
-    return 0;
+    return iommu_iotlb_flush_all(d, flush_flags);
 }
 
 static int intel_iommu_add_device(u8 devfn, struct pci_dev *pdev)
-- 
2.20.1 (Apple Git-117)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.