[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 09/30] x86/vtd: fix and simplify mapping RMRR regions



The current code used by Intel VTd will only map RMRR regions for the
hardware domain, but will fail to map RMRR regions for unprivileged domains
unless the page tables are shared between EPT and IOMMU. Fix this and
simplify the code, removing the {set/clear}_identity_p2m_entry helpers and
just using the normal MMIO mapping functions. Introduce a new MMIO
mapping/unmapping helper, that takes care of checking for pending IRQs if
the mapped region is big enough that it cannot be done in one shot.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Feng Wu <feng.wu@xxxxxxxxx>
---
 xen/arch/x86/mm/p2m.c               | 86 -------------------------------------
 xen/drivers/passthrough/vtd/iommu.c | 21 +++++----
 xen/include/asm-x86/p2m.h           |  5 ---
 xen/include/xen/p2m-common.h        | 30 +++++++++++++
 4 files changed, 42 insertions(+), 100 deletions(-)

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 9526fff..44492ae 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1029,56 +1029,6 @@ int set_mmio_p2m_entry(struct domain *d, unsigned long 
gfn, mfn_t mfn,
     return set_typed_p2m_entry(d, gfn, mfn, order, p2m_mmio_direct, access);
 }
 
-int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
-                           p2m_access_t p2ma, unsigned int flag)
-{
-    p2m_type_t p2mt;
-    p2m_access_t a;
-    mfn_t mfn;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    int ret;
-
-    if ( !paging_mode_translate(p2m->domain) )
-    {
-        if ( !need_iommu(d) )
-            return 0;
-        return iommu_map_page(d, gfn, gfn, IOMMUF_readable|IOMMUF_writable);
-    }
-
-    gfn_lock(p2m, gfn, 0);
-
-    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
-
-    if ( p2mt == p2m_invalid || p2mt == p2m_mmio_dm )
-        ret = p2m_set_entry(p2m, gfn, _mfn(gfn), PAGE_ORDER_4K,
-                            p2m_mmio_direct, p2ma);
-    else if ( mfn_x(mfn) == gfn && p2mt == p2m_mmio_direct && a == p2ma )
-    {
-        ret = 0;
-        /*
-         * PVH fixme: during Dom0 PVH construction, p2m entries are being set
-         * but iomem regions are not mapped with IOMMU. This makes sure that
-         * RMRRs are correctly mapped with IOMMU.
-         */
-        if ( is_hardware_domain(d) && !iommu_use_hap_pt(d) )
-            ret = iommu_map_page(d, gfn, gfn, IOMMUF_readable|IOMMUF_writable);
-    }
-    else
-    {
-        if ( flag & XEN_DOMCTL_DEV_RDM_RELAXED )
-            ret = 0;
-        else
-            ret = -EBUSY;
-        printk(XENLOG_G_WARNING
-               "Cannot setup identity map d%d:%lx,"
-               " gfn already mapped to %lx.\n",
-               d->domain_id, gfn, mfn_x(mfn));
-    }
-
-    gfn_unlock(p2m, gfn, 0);
-    return ret;
-}
-
 /*
  * Returns:
  *    0        for success
@@ -1127,42 +1077,6 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long 
gfn, mfn_t mfn,
     return rc;
 }
 
-int clear_identity_p2m_entry(struct domain *d, unsigned long gfn)
-{
-    p2m_type_t p2mt;
-    p2m_access_t a;
-    mfn_t mfn;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    int ret;
-
-    if ( !paging_mode_translate(d) )
-    {
-        if ( !need_iommu(d) )
-            return 0;
-        return iommu_unmap_page(d, gfn);
-    }
-
-    gfn_lock(p2m, gfn, 0);
-
-    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
-    if ( p2mt == p2m_mmio_direct && mfn_x(mfn) == gfn )
-    {
-        ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K,
-                            p2m_invalid, p2m->default_access);
-        gfn_unlock(p2m, gfn, 0);
-    }
-    else
-    {
-        gfn_unlock(p2m, gfn, 0);
-        printk(XENLOG_G_WARNING
-               "non-identity map d%d:%lx not cleared (mapped to %lx)\n",
-               d->domain_id, gfn, mfn_x(mfn));
-        ret = 0;
-    }
-
-    return ret;
-}
-
 /* Returns: 0 for success, -errno for failure */
 int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 {
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 919993e..714a19e 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1896,6 +1896,7 @@ static int rmrr_identity_mapping(struct domain *d, bool_t 
map,
     unsigned long end_pfn = PAGE_ALIGN_4K(rmrr->end_address) >> PAGE_SHIFT_4K;
     struct mapped_rmrr *mrmrr;
     struct domain_iommu *hd = dom_iommu(d);
+    int ret = 0;
 
     ASSERT(pcidevs_locked());
     ASSERT(rmrr->base_address < rmrr->end_address);
@@ -1909,8 +1910,6 @@ static int rmrr_identity_mapping(struct domain *d, bool_t 
map,
         if ( mrmrr->base == rmrr->base_address &&
              mrmrr->end == rmrr->end_address )
         {
-            int ret = 0;
-
             if ( map )
             {
                 ++mrmrr->count;
@@ -1920,9 +1919,10 @@ static int rmrr_identity_mapping(struct domain *d, 
bool_t map,
             if ( --mrmrr->count )
                 return 0;
 
-            while ( base_pfn < end_pfn )
+            ret = modify_mmio_11(d, base_pfn, end_pfn - base_pfn, false);
+            while ( !iommu_use_hap_pt(d) && base_pfn < end_pfn )
             {
-                if ( clear_identity_p2m_entry(d, base_pfn) )
+                if ( iommu_unmap_page(d, base_pfn) )
                     ret = -ENXIO;
                 base_pfn++;
             }
@@ -1936,12 +1936,15 @@ static int rmrr_identity_mapping(struct domain *d, 
bool_t map,
     if ( !map )
         return -ENOENT;
 
-    while ( base_pfn < end_pfn )
+    ret = modify_mmio_11(d, base_pfn, end_pfn - base_pfn, true);
+    if ( ret )
+        return ret;
+    while ( !iommu_use_hap_pt(d) && base_pfn < end_pfn )
     {
-        int err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag);
-
-        if ( err )
-            return err;
+        ret = iommu_map_page(d, base_pfn, base_pfn,
+                             IOMMUF_readable|IOMMUF_writable);
+        if ( ret )
+            return ret;
         base_pfn++;
     }
 
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 7035860..ccf19e5 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -602,11 +602,6 @@ int set_mmio_p2m_entry(struct domain *d, unsigned long 
gfn, mfn_t mfn,
 int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
                          unsigned int order);
 
-/* Set identity addresses in the p2m table (for pass-through) */
-int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
-                           p2m_access_t p2ma, unsigned int flag);
-int clear_identity_p2m_entry(struct domain *d, unsigned long gfn);
-
 /* Add foreign mapping to the guest's p2m table. */
 int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
                     unsigned long gpfn, domid_t foreign_domid);
diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
index 3be1e91..5f6b4ef 100644
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -2,6 +2,7 @@
 #define _XEN_P2M_COMMON_H
 
 #include <public/vm_event.h>
+#include <xen/softirq.h>
 
 /*
  * Additional access types, which are used to further restrict
@@ -46,6 +47,35 @@ int unmap_mmio_regions(struct domain *d,
                        mfn_t mfn);
 
 /*
+ * Preemptive Helper for mapping/unmapping MMIO regions.
+ */
+static inline int modify_mmio_11(struct domain *d, unsigned long pfn,
+                                 unsigned long nr_pages, bool map)
+{
+    int rc;
+
+    while ( nr_pages > 0 )
+    {
+        rc = (map ? map_mmio_regions : unmap_mmio_regions)
+             (d, _gfn(pfn), nr_pages, _mfn(pfn));
+        if ( rc == 0 )
+            break;
+        if ( rc < 0 )
+        {
+            printk(XENLOG_ERR
+                "Failed to %smap %#lx - %#lx into domain %d memory map: %d\n",
+                   map ? "" : "un", pfn, pfn + nr_pages, d->domain_id, rc);
+            return rc;
+        }
+        nr_pages -= rc;
+        pfn += rc;
+        process_pending_softirqs();
+    }
+
+    return rc;
+}
+
+/*
  * Set access type for a region of gfns.
  * If gfn == INVALID_GFN, sets the default access type.
  */
-- 
2.7.4 (Apple Git-66)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.