[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/6] xen: extend XEN_DOMCTL_memory_mapping to handle cacheability



Reuse the existing padding field to pass cacheability information about
the memory mapping, specifically, whether the memory should be mapped as
normal memory or as device memory (this is what we have today).

Add a cacheability parameter to map_mmio_regions. 0 means device
memory, which is what we have today.

On ARM, map device memory as p2m_mmio_direct_dev (as it is already done
today) and normal memory as p2m_ram_rw.

On x86, return error if the cacheability requested is not device memory.

Signed-off-by: Stefano Stabellini <stefanos@xxxxxxxxxx>
CC: JBeulich@xxxxxxxx
CC: andrew.cooper3@xxxxxxxxxx
---
 xen/arch/arm/gic-v2.c            |  3 ++-
 xen/arch/arm/p2m.c               | 19 +++++++++++++++++--
 xen/arch/arm/platforms/exynos5.c |  4 ++--
 xen/arch/arm/platforms/omap5.c   |  8 ++++----
 xen/arch/arm/vgic-v2.c           |  2 +-
 xen/arch/arm/vgic/vgic-v2.c      |  2 +-
 xen/arch/x86/hvm/dom0_build.c    |  7 +++++--
 xen/arch/x86/mm/p2m.c            |  6 +++++-
 xen/common/domctl.c              |  8 +++++---
 xen/drivers/vpci/header.c        |  3 ++-
 xen/include/public/domctl.h      |  4 +++-
 xen/include/xen/p2m-common.h     |  3 ++-
 12 files changed, 49 insertions(+), 20 deletions(-)

diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
index e7eb01f..1ea3da2 100644
--- a/xen/arch/arm/gic-v2.c
+++ b/xen/arch/arm/gic-v2.c
@@ -690,7 +690,8 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d)
 
         ret = map_mmio_regions(d, gaddr_to_gfn(v2m_data->addr),
                                PFN_UP(v2m_data->size),
-                               maddr_to_mfn(v2m_data->addr));
+                               maddr_to_mfn(v2m_data->addr),
+                               CACHEABILITY_DEVMEM);
         if ( ret )
         {
             printk(XENLOG_ERR "GICv2: Map v2m frame to d%d failed.\n",
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 30cfb01..5b8fcc5 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1068,9 +1068,24 @@ int unmap_regions_p2mt(struct domain *d,
 int map_mmio_regions(struct domain *d,
                      gfn_t start_gfn,
                      unsigned long nr,
-                     mfn_t mfn)
+                     mfn_t mfn,
+                     uint32_t cache_policy)
 {
-    return p2m_insert_mapping(d, start_gfn, nr, mfn, p2m_mmio_direct_dev);
+    p2m_type_t t;
+
+    switch ( cache_policy )
+    {
+    case CACHEABILITY_MEMORY:
+        t = p2m_ram_rw;
+        break;
+    case CACHEABILITY_DEVMEM:
+        t = p2m_mmio_direct_dev;
+        break;
+    default:
+        return -ENOSYS;
+    }
+
+    return p2m_insert_mapping(d, start_gfn, nr, mfn, t);
 }
 
 int unmap_mmio_regions(struct domain *d,
diff --git a/xen/arch/arm/platforms/exynos5.c b/xen/arch/arm/platforms/exynos5.c
index 6560507..3af5fd3 100644
--- a/xen/arch/arm/platforms/exynos5.c
+++ b/xen/arch/arm/platforms/exynos5.c
@@ -83,11 +83,11 @@ static int exynos5250_specific_mapping(struct domain *d)
 {
     /* Map the chip ID */
     map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_CHIPID), 1,
-                     maddr_to_mfn(EXYNOS5_PA_CHIPID));
+                     maddr_to_mfn(EXYNOS5_PA_CHIPID), CACHEABILITY_DEVMEM);
 
     /* Map the PWM region */
     map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_TIMER), 2,
-                     maddr_to_mfn(EXYNOS5_PA_TIMER));
+                     maddr_to_mfn(EXYNOS5_PA_TIMER), CACHEABILITY_DEVMEM);
 
     return 0;
 }
diff --git a/xen/arch/arm/platforms/omap5.c b/xen/arch/arm/platforms/omap5.c
index aee24e4..4899332 100644
--- a/xen/arch/arm/platforms/omap5.c
+++ b/xen/arch/arm/platforms/omap5.c
@@ -99,19 +99,19 @@ static int omap5_specific_mapping(struct domain *d)
 {
     /* Map the PRM module */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRM_BASE), 2,
-                     maddr_to_mfn(OMAP5_PRM_BASE));
+                     maddr_to_mfn(OMAP5_PRM_BASE), CACHEABILITY_DEVMEM);
 
     /* Map the PRM_MPU */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRCM_MPU_BASE), 1,
-                     maddr_to_mfn(OMAP5_PRCM_MPU_BASE));
+                     maddr_to_mfn(OMAP5_PRCM_MPU_BASE), CACHEABILITY_DEVMEM);
 
     /* Map the Wakeup Gen */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_WKUPGEN_BASE), 1,
-                     maddr_to_mfn(OMAP5_WKUPGEN_BASE));
+                     maddr_to_mfn(OMAP5_WKUPGEN_BASE), CACHEABILITY_DEVMEM);
 
     /* Map the on-chip SRAM */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_SRAM_PA), 32,
-                     maddr_to_mfn(OMAP5_SRAM_PA));
+                     maddr_to_mfn(OMAP5_SRAM_PA), CACHEABILITY_DEVMEM);
 
     return 0;
 }
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index bf77899..2c9c15c 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -691,7 +691,7 @@ static int vgic_v2_domain_init(struct domain *d)
      * region of the guest.
      */
     ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
-                           maddr_to_mfn(vbase));
+                           maddr_to_mfn(vbase), CACHEABILITY_DEVMEM);
     if ( ret )
         return ret;
 
diff --git a/xen/arch/arm/vgic/vgic-v2.c b/xen/arch/arm/vgic/vgic-v2.c
index b5ba4ac..bb305a2 100644
--- a/xen/arch/arm/vgic/vgic-v2.c
+++ b/xen/arch/arm/vgic/vgic-v2.c
@@ -309,7 +309,7 @@ int vgic_v2_map_resources(struct domain *d)
      * region of the guest.
      */
     ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
-                           maddr_to_mfn(vbase));
+                           maddr_to_mfn(vbase), CACHEABILITY_DEVMEM);
     if ( ret )
     {
         gdprintk(XENLOG_ERR, "Unable to remap VGIC CPU to VCPU\n");
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 3e29cd3..3058560 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -67,8 +67,11 @@ static int __init modify_identity_mmio(struct domain *d, 
unsigned long pfn,
 
     for ( ; ; )
     {
-        rc = (map ? map_mmio_regions : unmap_mmio_regions)
-             (d, _gfn(pfn), nr_pages, _mfn(pfn));
+        if ( map )
+            rc = map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn),
+                                  CACHEABILITY_DEVMEM);
+        else
+            rc = unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
         if ( rc == 0 )
             break;
         if ( rc < 0 )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4bdc5e3..0bbb2a4 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2110,12 +2110,16 @@ static unsigned int mmio_order(const struct domain *d,
 int map_mmio_regions(struct domain *d,
                      gfn_t start_gfn,
                      unsigned long nr,
-                     mfn_t mfn)
+                     mfn_t mfn,
+                     uint32_t cache_policy)
 {
     int ret = 0;
     unsigned long i;
     unsigned int iter, order;
 
+    if ( cache_policy != CACHEABILITY_DEVMEM )
+        return -ENOSYS;
+
     if ( !paging_mode_translate(d) )
         return 0;
 
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index b294881..f4a7c16 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -935,6 +935,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         unsigned long nr_mfns = op->u.memory_mapping.nr_mfns;
         unsigned long mfn_end = mfn + nr_mfns - 1;
         int add = op->u.memory_mapping.add_mapping;
+        uint32_t cache_policy = op->u.memory_mapping.cache_policy;
 
         ret = -EINVAL;
         if ( mfn_end < mfn || /* wrap? */
@@ -961,10 +962,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         if ( add )
         {
             printk(XENLOG_G_DEBUG
-                   "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
-                   d->domain_id, gfn, mfn, nr_mfns);
+                   "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx cache=%u\n",
+                   d->domain_id, gfn, mfn, nr_mfns, cache_policy);
 
-            ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn));
+            ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn),
+                                   cache_policy);
             if ( ret < 0 )
                 printk(XENLOG_G_WARNING
                        "memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx 
ret:%ld\n",
diff --git a/xen/drivers/vpci/header.c b/xen/drivers/vpci/header.c
index 4573cca..f968c2d 100644
--- a/xen/drivers/vpci/header.c
+++ b/xen/drivers/vpci/header.c
@@ -52,7 +52,8 @@ static int map_range(unsigned long s, unsigned long e, void 
*data,
          * - {un}map_mmio_regions doesn't support preemption.
          */
 
-        rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s))
+        rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s),
+                                         CACHEABILITY_DEVMEM)
                       : unmap_mmio_regions(map->d, _gfn(s), size, _mfn(s));
         if ( rc == 0 )
         {
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 4a46c28..82704dd 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -571,12 +571,14 @@ struct xen_domctl_bind_pt_irq {
 */
 #define DPCI_ADD_MAPPING         1
 #define DPCI_REMOVE_MAPPING      0
+#define CACHEABILITY_DEVMEM      0 /* device memory, the default */
+#define CACHEABILITY_MEMORY      1 /* normal memory */
 struct xen_domctl_memory_mapping {
     uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
     uint64_aligned_t first_mfn; /* first page (machine page) in range */
     uint64_aligned_t nr_mfns;   /* number of pages in range (>0) */
     uint32_t add_mapping;       /* add or remove mapping */
-    uint32_t padding;           /* padding for 64-bit aligned structure */
+    uint32_t cache_policy;      /* cacheability of the memory mapping */
 };
 
 
diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
index 58031a6..1c945a1 100644
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -14,7 +14,8 @@ guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t 
mfn,
 int map_mmio_regions(struct domain *d,
                      gfn_t start_gfn,
                      unsigned long nr,
-                     mfn_t mfn);
+                     mfn_t mfn,
+                     uint32_t cache_policy);
 int unmap_mmio_regions(struct domain *d,
                        gfn_t start_gfn,
                        unsigned long nr,
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.