[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 28/28] xen/arm32: head: Use a page mapping for the 1:1 mapping in create_page_tables()



At the moment the function create_page_tables() will use 1GB/2MB
mapping for the identity mapping. As we don't know what is present
before and after Xen in memory, we may end up to map
device/reserved-memory with cacheable memory. This may result to
mismatched attributes as other users may access the same region
differently.

To prevent any issues, we should only map the strict minimum in the
1:1 mapping. A check in xen.lds.S already guarantees anything
necessary for turning on the MMU fits in a page (at the moment 4K).

As only one page will be mapped for the 1:1 mapping, it is necessary
to pre-allocate a page for the 3rd level table.

For simplicity, all the tables that may be necessary for setting up the
1:1 mapping are linked together in advance. They will then be linked to
the boot page tables at the correct level.

Signed-off-by: Julien Grall <julien.grall@xxxxxxx>

---
    Changes in v3:
        - Patch added
---
 xen/arch/arm/arm32/head.S | 119 ++++++++++++++++++----------------------------
 xen/arch/arm/mm.c         |   2 +-
 2 files changed, 48 insertions(+), 73 deletions(-)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 6d03fecaf2..dec6266803 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -444,73 +444,13 @@ ENDPROC(cpu_init)
  *   r6 : Identity map in place
  */
 create_page_tables:
-        /*
-         * If Xen is loaded at exactly XEN_VIRT_START then we don't
-         * need an additional 1:1 mapping, the virtual mapping will
-         * suffice.
-         */
-        cmp   r9, #XEN_VIRT_START
-        moveq r6, #1                 /* r6 := identity map now in place */
-        movne r6, #0                 /* r6 := identity map not yet in place */
-
-        ldr   r4, =boot_pgtable
-        add   r4, r4, r10            /* r4 := paddr (boot_pagetable) */
-
-        /* Setup boot_pgtable: */
-        ldr   r1, =boot_second
-        add   r1, r1, r10            /* r1 := paddr (boot_second) */
-
-        /* ... map boot_second in boot_pgtable[0] */
-        orr   r2, r1, #PT_UPPER(PT)  /* r2:r3 := table map of boot_second */
-        orr   r2, r2, #PT_LOWER(PT)  /* (+ rights for linear PT) */
-        mov   r3, #0x0
-        strd  r2, r3, [r4, #0]       /* Map it in slot 0 */
-
-        /* ... map of paddr(start) in boot_pgtable */
-        lsrs  r1, r9, #FIRST_SHIFT   /* Offset of base paddr in boot_pgtable */
-        beq   1f                     /* If it is in slot 0 then map in 
boot_second
-                                      * later on */
-        lsl   r2, r1, #FIRST_SHIFT   /* Base address for 1GB mapping */
-        orr   r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
-        orr   r2, r2, #PT_LOWER(MEM)
-        lsl   r1, r1, #3             /* r1 := Slot offset */
-        mov   r3, #0x0
-        strd  r2, r3, [r4, r1]       /* Mapping of paddr(start) */
-        mov   r6, #1                 /* r6 := identity map now in place */
-
-1:      /* Setup boot_second: */
-        ldr   r4, =boot_second
-        add   r4, r4, r10            /* r4 := paddr (boot_second) */
-
-        ldr   r1, =boot_third
-        add   r1, r1, r10            /* r1 := paddr (boot_third) */
-
-        /* ... map boot_third in boot_second[1] */
-        orr   r2, r1, #PT_UPPER(PT)  /* r2:r3 := table map of boot_third */
-        orr   r2, r2, #PT_LOWER(PT)  /* (+ rights for linear PT) */
-        mov   r3, #0x0
-        strd  r2, r3, [r4, #8]       /* Map it in slot 1 */
-
-        /* ... map of paddr(start) in boot_second */
-        cmp   r6, #1                 /* r6 is set if already created */
-        beq   1f
-        lsr   r2, r9, #SECOND_SHIFT  /* Offset of base paddr in boot_second */
-        ldr   r3, =LPAE_ENTRY_MASK
-        and   r1, r2, r3
-        cmp   r1, #1
-        beq   virtphys_clash         /* It's in slot 1, which we cannot handle 
*/
-
-        lsl   r2, r2, #SECOND_SHIFT  /* Base address for 2MB mapping */
-        orr   r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
-        orr   r2, r2, #PT_LOWER(MEM)
-        mov   r3, #0x0
-        lsl   r1, r1, #3             /* r1 := Slot offset */
-        strd  r2, r3, [r4, r1]       /* Mapping of paddr(start) */
-        mov   r6, #1                 /* r6 := identity map now in place */
+        /* Prepare the page-tables for mapping Xen */
+        ldr   r0, =XEN_VIRT_START
+        create_table_entry boot_pgtable, boot_second, r0, FIRST_SHIFT
+        create_table_entry boot_second, boot_third, r0, SECOND_SHIFT
 
         /* Setup boot_third: */
-1:      ldr   r4, =boot_third
-        add   r4, r4, r10            /* r4 := paddr (boot_third) */
+        adr_l r4, boot_third, mmu=0
 
         lsr   r2, r9, #THIRD_SHIFT  /* Base address for 4K mapping */
         lsl   r2, r2, #THIRD_SHIFT
@@ -527,16 +467,51 @@ create_page_tables:
         blo   1b
 
         /*
-         * Defer fixmap and dtb mapping until after paging enabled, to
-         * avoid them clashing with the 1:1 mapping.
+         * If Xen is loaded at exactly XEN_VIRT_START then we don't
+         * need an additional 1:1 mapping, the virtual mapping will
+         * suffice.
          */
+        cmp   r9, #XEN_VIRT_START
+        moveq pc, lr
 
-        /* boot pagetable setup complete */
+1:
+        /*
+         * Only the first page of Xen will be part of the 1:1 mapping.
+         * All the boot_*_id tables are linked together even if they may
+         * not be all used. They will then be linked to the boot page
+         * tables at the correct level.
+         */
+        create_table_entry boot_second_id, boot_third_id, r9, SECOND_SHIFT
+        create_mapping_entry boot_third_id, r9, r9
+
+        /*
+         * Find the first slot used. Link boot_second_id into boot_first
+         * if the slot is not 0. For slot 0, the tables associated with
+         * the 1:1 mapping will need to be linked in boot_second.
+         */
+        lsr   r0, r9, #FIRST_SHIFT
+        mov_w r1, LPAE_ENTRY_MASK
+        ands  r0, r0, r1             /* r0 := first slot */
+        beq   1f
+        /* It is not in slot 0, Link boot_second_id into boot_first */
+        create_table_entry boot_pgtable, boot_second_id, r9, FIRST_SHIFT
+        mov   pc, lr
+
+1:
+        /*
+         * Find the second slot used. Link boot_third_id into boot_second
+         * if the slot is not 1 (runtime Xen mapping is 2M - 4M).
+         * For slot 1, Xen is not yet able to handle it.
+         */
+        lsr   r0, r9, #SECOND_SHIFT
+        mov_w r1, LPAE_ENTRY_MASK
+        and   r0, r0, r1             /* x0 := first slot */
+        cmp   r0, #1
+        beq   virtphys_clash
+        /* It is not in slot 1, link boot_third_id into boot_second */
+        create_table_entry boot_second, boot_third_id, r9, SECOND_SHIFT
+        mov   pc, lr
 
-        cmp   r6, #1                /* Did we manage to create an identity 
mapping ? */
-        moveq pc, lr
-        PRINT("Unable to build boot page tables - Failed to identity map 
Xen.\r\n")
-        b     fail
 virtphys_clash:
         /* Identity map clashes with boot_third, which we cannot handle yet */
         PRINT("- Unable to build boot page tables - virt and phys addresses 
clash. -\r\n")
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 72ffea7472..9e0fdc39f9 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -105,9 +105,9 @@ DEFINE_BOOT_PAGE_TABLE(boot_pgtable);
 #ifdef CONFIG_ARM_64
 DEFINE_BOOT_PAGE_TABLE(boot_first);
 DEFINE_BOOT_PAGE_TABLE(boot_first_id);
+#endif
 DEFINE_BOOT_PAGE_TABLE(boot_second_id);
 DEFINE_BOOT_PAGE_TABLE(boot_third_id);
-#endif
 DEFINE_BOOT_PAGE_TABLE(boot_second);
 DEFINE_BOOT_PAGE_TABLE(boot_third);
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.