[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 10 of 11] arm: Actually free __init/__initdata ranges on boot



# HG changeset patch
# User Tim Deegan <tim@xxxxxxx>
# Date 1331311595 0
# Node ID ec6712d0af2786332af7dd6f6caab50878bc96d0
# Parent  80b8756718af745fae2778c625a8eea6235974fe
arm: Actually free __init/__initdata ranges on boot.

Signed-off-by: Tim Deegan <tim@xxxxxxx>

diff -r 80b8756718af -r ec6712d0af27 xen/arch/arm/mm.c
--- a/xen/arch/arm/mm.c Fri Mar 09 16:46:35 2012 +0000
+++ b/xen/arch/arm/mm.c Fri Mar 09 16:46:35 2012 +0000
@@ -48,6 +48,8 @@ unsigned long frametable_virt_end;
 
 unsigned long max_page;
 
+extern char __init_begin[], __init_end[];
+
 /* Map a 4k page in a fixmap entry */
 void set_fixmap(unsigned map, unsigned long mfn, unsigned attributes)
 {
@@ -205,17 +207,7 @@ void __init setup_pagetables(unsigned lo
     /* Undo the temporary map */
     pte.bits = 0;
     write_pte(xen_second + second_table_offset(dest_va), pte);
-    /*
-     * Have removed a mapping previously used for .text. Flush everything
-     * for safety.
-     */
-    asm volatile (
-        "dsb;"                        /* Ensure visibility of PTE write */
-        STORE_CP32(0, TLBIALLH)       /* Flush hypervisor TLB */
-        STORE_CP32(0, BPIALL)         /* Flush branch predictor */
-        "dsb;"                        /* Ensure completion of TLB+BP flush */
-        "isb;"
-        : : "r" (i /*dummy*/) : "memory");
+    flush_xen_text_tlb();
 
     /* Link in the fixmap pagetable */
     pte = mfn_to_xen_entry((((unsigned long) xen_fixmap) + phys_offset)
@@ -251,13 +243,7 @@ void __init setup_pagetables(unsigned lo
     pte.pt.table = 1;
     write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte);
     /* Have changed a mapping used for .text. Flush everything for safety. */
-    asm volatile (
-        "dsb;"                        /* Ensure visibility of PTE write */
-        STORE_CP32(0, TLBIALLH)       /* Flush hypervisor TLB */
-        STORE_CP32(0, BPIALL)         /* Flush branch predictor */
-        "dsb;"                        /* Ensure completion of TLB+BP flush */
-        "isb;"
-        : : "r" (i /*dummy*/) : "memory");
+    flush_xen_text_tlb();
 
     /* From now on, no mapping may be both writable and executable. */
     WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR);
@@ -328,6 +314,64 @@ void __init setup_frametable_mappings(pa
     frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pages * sizeof(struct 
page_info));
 }
 
+enum mg { mg_clear, mg_ro, mg_rw, mg_rx };
+static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg)
+{
+    lpae_t pte;
+    int i;
+
+    ASSERT(is_kernel(p) && is_kernel(p + l));
+
+    /* Can only guard in page granularity */
+    ASSERT(!((unsigned long) p & ~PAGE_MASK));
+    ASSERT(!(l & ~PAGE_MASK));
+
+    for ( i = (p - _start) / PAGE_SIZE; 
+          i < (p + l - _start) / PAGE_SIZE; 
+          i++ )
+    {
+        pte = xen_xenmap[i];
+        switch ( mg )
+        {
+        case mg_clear:
+            pte.pt.valid = 0;
+            break;
+        case mg_ro:
+            pte.pt.valid = 1;
+            pte.pt.pxn = 1;
+            pte.pt.xn = 1;
+            pte.pt.ro = 1;
+            break;
+        case mg_rw:
+            pte.pt.valid = 1;
+            pte.pt.pxn = 1;
+            pte.pt.xn = 1;
+            pte.pt.ro = 0;
+            break;
+        case mg_rx:
+            pte.pt.valid = 1;
+            pte.pt.pxn = 0;
+            pte.pt.xn = 0;
+            pte.pt.ro = 1;
+            break;
+        }
+        write_pte(xen_xenmap + i, pte);
+    }
+    flush_xen_text_tlb();
+}
+
+/* Release all __init and __initdata ranges to be reused */
+void free_init_memory(void)
+{
+    paddr_t pa = virt_to_maddr(__init_begin);
+    unsigned long len = __init_end - __init_begin;
+    set_pte_flags_on_range(__init_begin, len, mg_rw);
+    memset(__init_begin, 0xcc, len);
+    set_pte_flags_on_range(__init_begin, len, mg_clear);
+    init_domheap_pages(pa, pa + len);
+    printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
+}
+
 void arch_dump_shared_mem_info(void)
 {
 }
diff -r 80b8756718af -r ec6712d0af27 xen/arch/arm/setup.c
--- a/xen/arch/arm/setup.c      Fri Mar 09 16:46:35 2012 +0000
+++ b/xen/arch/arm/setup.c      Fri Mar 09 16:46:35 2012 +0000
@@ -38,8 +38,6 @@
 #include <asm/setup.h>
 #include "gic.h"
 
-extern const char __init_begin[], __init_end[], __bss_start[];
-
 /* Spinlock for serializing CPU bringup */
 unsigned long __initdata boot_gate = 1;
 /* Number of non-boot CPUs ready to enter C */
@@ -47,12 +45,7 @@ unsigned long __initdata ready_cpus = 0;
 
 static __attribute_used__ void init_done(void)
 {
-    /* TODO: free (or page-protect) the init areas.
-       memset(__init_begin, 0xcc, __init_end - __init_begin);
-       free_xen_data(__init_begin, __init_end);
-    */
-    printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
-
+    free_init_memory();
     startup_cpu_idle_loop();
 }
 
diff -r 80b8756718af -r ec6712d0af27 xen/include/asm-arm/mm.h
--- a/xen/include/asm-arm/mm.h  Fri Mar 09 16:46:35 2012 +0000
+++ b/xen/include/asm-arm/mm.h  Fri Mar 09 16:46:35 2012 +0000
@@ -278,6 +278,10 @@ extern struct domain *dom_xen, *dom_io, 
 #define memguard_guard_stack(_p)       ((void)0)
 #define memguard_guard_range(_p,_l)    ((void)0)
 #define memguard_unguard_range(_p,_l)  ((void)0)
+
+/* Release all __init and __initdata ranges to be reused */
+void free_init_memory(void);
+
 int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
                                           unsigned int order);
 
diff -r 80b8756718af -r ec6712d0af27 xen/include/asm-arm/page.h
--- a/xen/include/asm-arm/page.h        Fri Mar 09 16:46:35 2012 +0000
+++ b/xen/include/asm-arm/page.h        Fri Mar 09 16:46:35 2012 +0000
@@ -203,6 +203,22 @@ static inline void write_pte(lpae_t *p, 
 }
 
 /*
+ * Flush all hypervisor mappings from the TLB and branch predictor.
+ * This is needed after changing Xen code mappings. 
+ */
+static inline void flush_xen_text_tlb(void)
+{
+    register unsigned long r0 asm ("r0");
+    asm volatile (
+        "dsb;"                        /* Ensure visibility of PTE writes */
+        STORE_CP32(0, TLBIALLH)       /* Flush hypervisor TLB */
+        STORE_CP32(0, BPIALL)         /* Flush branch predictor */
+        "dsb;"                        /* Ensure completion of TLB+BP flush */
+        "isb;"
+        : : "r" (r0) /*dummy*/ : "memory");
+}
+
+/*
  * Flush all hypervisor mappings from the data TLB. This is not
  * sufficient when changing code mappings or for self modifying code.
  */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.