[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 2/6] xen: arm: consolidate body of flush_xen_data_tlb_range_va_local



This is almost identical on both sub architectures.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
v4: New patch
---
 xen/include/asm-arm/arm32/page.h |   19 +++----------------
 xen/include/asm-arm/arm64/page.h |   19 +++----------------
 xen/include/asm-arm/page.h       |   18 ++++++++++++++++++
 3 files changed, 24 insertions(+), 32 deletions(-)

diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index b0a2025..d839d03 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -63,23 +63,10 @@ static inline void flush_xen_data_tlb_local(void)
                  : : "r" (r0) /* dummy */: "memory");
 }
 
-/*
- * Flush a range of VA's hypervisor mappings from the data TLB of the
- * local processor. This is not sufficient when changing code mappings
- * or for self modifying code.
- */
-static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
-                                                     unsigned long size)
+/* Flush TLB of local processor for address va. */
+static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
 {
-    unsigned long end = va + size;
-    dsb(sy); /* Ensure preceding are visible */
-    while ( va < end ) {
-        asm volatile(STORE_CP32(0, TLBIMVAH)
-                     : : "r" (va) : "memory");
-        va += PAGE_SIZE;
-    }
-    dsb(sy); /* Ensure completion of the TLB flush */
-    isb();
+    asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
 }
 
 /* Ask the MMU to translate a VA for us */
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 65332a3..897d79b 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -55,23 +55,10 @@ static inline void flush_xen_data_tlb_local(void)
         : : : "memory");
 }
 
-/*
- * Flush a range of VA's hypervisor mappings from the data TLB of the
- * local processor. This is not sufficient when changing code mappings
- * or for self modifying code.
- */
-static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
-                                                     unsigned long size)
+/* Flush TLB of local processor for address va. */
+static inline void  __flush_xen_data_tlb_one_local(vaddr_t va)
 {
-    unsigned long end = va + size;
-    dsb(sy); /* Ensure preceding are visible */
-    while ( va < end ) {
-        asm volatile("tlbi vae2, %0;"
-                     : : "r" (va>>PAGE_SHIFT) : "memory");
-        va += PAGE_SIZE;
-    }
-    dsb(sy); /* Ensure completion of the TLB flush */
-    isb();
+    asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
 }
 
 /* Ask the MMU to translate a VA for us */
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index d18ec2a..bbecacf 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -306,6 +306,24 @@ static inline void clean_and_invalidate_xen_dcache_va_range
             : : "r" (_p), "m" (*_p));                                   \
 } while (0)
 
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB of the
+ * local processor. This is not sufficient when changing code mappings
+ * or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
+                                                     unsigned long size)
+{
+    unsigned long end = va + size;
+    dsb(sy); /* Ensure preceding are visible */
+    while ( va < end ) {
+        __flush_xen_data_tlb_one_local(va);
+        va += PAGE_SIZE;
+    }
+    dsb(sy); /* Ensure completion of the TLB flush */
+    isb();
+}
+
 /* Flush the dcache for an entire page. */
 void flush_page_to_ram(unsigned long mfn);
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.