[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/4] xen: arm: clarify naming of the Xen TLB flushing functions



All of the flush_xen_*_tlb functions operate on the local processor only. Add
_local to the name and update the comments to clarify.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/arch/arm/mm.c                |   22 +++++++++++-----------
 xen/include/asm-arm/arm32/page.h |   21 +++++++++++++--------
 xen/include/asm-arm/arm64/page.h |   20 ++++++++++++--------
 3 files changed, 36 insertions(+), 27 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 305879f..fb1bbf2 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -217,7 +217,7 @@ void set_fixmap(unsigned map, unsigned long mfn, unsigned 
attributes)
     pte.pt.ai = attributes;
     pte.pt.xn = 1;
     write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
-    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+    flush_xen_data_tlb_local_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
 }
 
 /* Remove a mapping from a fixmap entry */
@@ -225,7 +225,7 @@ void clear_fixmap(unsigned map)
 {
     lpae_t pte = {0};
     write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
-    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+    flush_xen_data_tlb_local_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
 }
 
 #ifdef CONFIG_DOMAIN_PAGE
@@ -303,7 +303,7 @@ void *map_domain_page(unsigned long mfn)
      * We may not have flushed this specific subpage at map time,
      * since we only flush the 4k page not the superpage
      */
-    flush_xen_data_tlb_range_va(va, PAGE_SIZE);
+    flush_xen_data_tlb_local_range_va(va, PAGE_SIZE);
 
     return (void *)va;
 }
@@ -404,7 +404,7 @@ void __init remove_early_mappings(void)
 {
     lpae_t pte = {0};
     write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START), pte);
-    flush_xen_data_tlb_range_va(BOOT_FDT_VIRT_START, SECOND_SIZE);
+    flush_xen_data_tlb_local_range_va(BOOT_FDT_VIRT_START, SECOND_SIZE);
 }
 
 extern void relocate_xen(uint64_t ttbr, void *src, void *dst, size_t len);
@@ -468,7 +468,7 @@ void __init setup_pagetables(unsigned long 
boot_phys_offset, paddr_t xen_paddr)
     dest_va = BOOT_RELOC_VIRT_START;
     pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT);
     write_pte(boot_second + second_table_offset(dest_va), pte);
-    flush_xen_data_tlb_range_va(dest_va, SECOND_SIZE);
+    flush_xen_data_tlb_local_range_va(dest_va, SECOND_SIZE);
 #ifdef CONFIG_ARM_64
     ttbr = (uintptr_t) xen_pgtable + phys_offset;
 #else
@@ -516,7 +516,7 @@ void __init setup_pagetables(unsigned long 
boot_phys_offset, paddr_t xen_paddr)
     /* From now on, no mapping may be both writable and executable. */
     WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
     /* Flush everything after setting WXN bit. */
-    flush_xen_text_tlb();
+    flush_xen_text_tlb_local();
 
 #ifdef CONFIG_ARM_32
     per_cpu(xen_pgtable, 0) = cpu0_pgtable;
@@ -589,7 +589,7 @@ void __cpuinit mmu_init_secondary_cpu(void)
 {
     /* From now on, no mapping may be both writable and executable. */
     WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
-    flush_xen_text_tlb();
+    flush_xen_text_tlb_local();
 }
 
 /* Create Xen's mappings of memory.
@@ -617,7 +617,7 @@ static void __init create_32mb_mappings(lpae_t *second,
         write_pte(p + i, pte);
         pte.pt.base += 1 << LPAE_SHIFT;
     }
-    flush_xen_data_tlb();
+    flush_xen_data_tlb_local();
 }
 
 #ifdef CONFIG_ARM_32
@@ -696,7 +696,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
         vaddr += FIRST_SIZE;
     }
 
-    flush_xen_data_tlb();
+    flush_xen_data_tlb_local();
 }
 #endif
 
@@ -841,7 +841,7 @@ static int create_xen_entries(enum xenmap_operation op,
                 BUG();
         }
     }
-    flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
+    flush_xen_data_tlb_local_range_va(virt, PAGE_SIZE * nr_mfns);
 
     rc = 0;
 
@@ -904,7 +904,7 @@ static void set_pte_flags_on_range(const char *p, unsigned 
long l, enum mg mg)
         }
         write_pte(xen_xenmap + i, pte);
     }
-    flush_xen_text_tlb();
+    flush_xen_text_tlb_local();
 }
 
 /* Release all __init and __initdata ranges to be reused */
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index b8221ca..daa4931 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -27,13 +27,15 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
 #define __clean_and_invalidate_xen_dcache_one(R) STORE_CP32(R, DCCIMVAC)
 
 /*
- * Flush all hypervisor mappings from the TLB and branch predictor.
+ * Flush all hypervisor mappings from the TLB and branch predictor of
+ * the local processor.
+ *
  * This is needed after changing Xen code mappings.
  *
  * The caller needs to issue the necessary DSB and D-cache flushes
  * before calling flush_xen_text_tlb.
  */
-static inline void flush_xen_text_tlb(void)
+static inline void flush_xen_text_tlb_local(void)
 {
     register unsigned long r0 asm ("r0");
     asm volatile (
@@ -47,10 +49,11 @@ static inline void flush_xen_text_tlb(void)
 }
 
 /*
- * Flush all hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush all hypervisor mappings from the data TLB of the local
+ * processor. This is not sufficient when changing code mappings or
+ * for self modifying code.
  */
-static inline void flush_xen_data_tlb(void)
+static inline void flush_xen_data_tlb_local(void)
 {
     register unsigned long r0 asm ("r0");
     asm volatile("dsb;" /* Ensure preceding are visible */
@@ -61,10 +64,12 @@ static inline void flush_xen_data_tlb(void)
 }
 
 /*
- * Flush a range of VA's hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the data TLB of the
+ * local processor. This is not sufficient when changing code mappings
+ * or for self modifying code.
  */
-static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long 
size)
+static inline void flush_xen_data_tlb_local_range_va(unsigned long va,
+                                                     unsigned long size)
 {
     unsigned long end = va + size;
     dsb(); /* Ensure preceding are visible */
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 3352821..9bea476 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -22,13 +22,14 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
 #define __clean_and_invalidate_xen_dcache_one(R) "dc  civac, %" #R ";"
 
 /*
- * Flush all hypervisor mappings from the TLB
+ * Flush all hypervisor mappings from the TLB of the local processor.
+ *
  * This is needed after changing Xen code mappings.
  *
  * The caller needs to issue the necessary DSB and D-cache flushes
  * before calling flush_xen_text_tlb.
  */
-static inline void flush_xen_text_tlb(void)
+static inline void flush_xen_text_tlb_local(void)
 {
     asm volatile (
         "isb;"       /* Ensure synchronization with previous changes to text */
@@ -40,10 +41,11 @@ static inline void flush_xen_text_tlb(void)
 }
 
 /*
- * Flush all hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush all hypervisor mappings from the data TLB of the local
+ * processor. This is not sufficient when changing code mappings or
+ * for self modifying code.
  */
-static inline void flush_xen_data_tlb(void)
+static inline void flush_xen_data_tlb_local(void)
 {
     asm volatile (
         "dsb    sy;"                    /* Ensure visibility of PTE writes */
@@ -54,10 +56,12 @@ static inline void flush_xen_data_tlb(void)
 }
 
 /*
- * Flush a range of VA's hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the data TLB of the
+ * local processor. This is not sufficient when changing code mappings
+ * or for self modifying code.
  */
-static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long 
size)
+static inline void flush_xen_data_tlb_local_range_va(unsigned long va,
+                                                     unsigned long size)
 {
     unsigned long end = va + size;
     dsb(); /* Ensure preceding are visible */
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.