[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [For Xen-4.10 RFC PATCH 3/3] Prevent redundant icache flushes in populate_physmap()



populate_physmap() calls alloc_heap_pages() per requested extent. As
alloc_heap_pages() performs icache maintenance operations affecting the
entire instruction cache, this leads to redundant cache flushes when
allocating multiple extents in populate_physmap().

To alleviate this problem, introduce a new flag "MEMF_no_icache_flush"
which can be used prevent alloc_heap_pages() to perform unnecessary
icache maintenance operations. Use the flag in populate_physmap() and
perform the required icache maintenance function at the end of the
operation.

Signed-off-by: Punit Agrawal <punit.agrawal@xxxxxxx>
---
 xen/common/memory.c        | 6 ++++++
 xen/common/page_alloc.c    | 2 +-
 xen/include/asm-x86/page.h | 4 ++++
 xen/include/xen/mm.h       | 2 ++
 4 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index ad0b33ceb6..507f363924 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -162,6 +162,8 @@ static void populate_physmap(struct memop_args *a)
     if ( unlikely(!d->creation_finished) )
         a->memflags |= MEMF_no_tlbflush;
 
+    a->memflags |= MEMF_no_icache_flush;
+
     for ( i = a->nr_done; i < a->nr_extents; i++ )
     {
         if ( i != a->nr_done && hypercall_preempt_check() )
@@ -253,6 +255,10 @@ static void populate_physmap(struct memop_args *a)
 out:
     if ( need_tlbflush )
         filtered_flush_tlb_mask(tlbflush_timestamp);
+
+    if ( a->memflags & MEMF_no_icache_flush )
+        invalidate_icache();
+
     a->nr_done = i;
 }
 
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 15450a3b6d..1a51bc6b15 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -838,7 +838,7 @@ static struct page_info *alloc_heap_pages(
         /* Ensure cache and RAM are consistent for platforms where the
          * guest can control its own visibility of/through the cache.
          */
-        flush_page_to_ram(page_to_mfn(&pg[i]), true);
+        flush_page_to_ram(page_to_mfn(&pg[i]), !(memflags & 
MEMF_no_icache_flush));
     }
 
     spin_unlock(&heap_lock);
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index bc5946b9d2..13dc9e2299 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -375,6 +375,10 @@ perms_strictly_increased(uint32_t old_flags, uint32_t 
new_flags)
 
 #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
 
+static inline void invalidate_icache(void)
+{
+}
+
 #endif /* __X86_PAGE_H__ */
 
 /*
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 88de3c1fa6..ee50d4cd7b 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -224,6 +224,8 @@ struct npfec {
 #define  MEMF_no_owner    (1U<<_MEMF_no_owner)
 #define _MEMF_no_tlbflush 6
 #define  MEMF_no_tlbflush (1U<<_MEMF_no_tlbflush)
+#define _MEMF_no_icache_flush 7
+#define  MEMF_no_icache_flush (1U<<_MEMF_no_icache_flush)
 #define _MEMF_node        8
 #define  MEMF_node_mask   ((1U << (8 * sizeof(nodeid_t))) - 1)
 #define  MEMF_node(n)     ((((n) + 1) & MEMF_node_mask) << _MEMF_node)
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.