[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 15/16] xen/arm: mm: Rework setup_xenheap_mappings()



From: Julien Grall <julien.grall@xxxxxxx>

The current implementation of setup_xenheap_mappings() is using 1GB
mappings. This can lead to unexpected result because the mapping
may alias a non-cachable region (such as device or reserved regions).
For more details see B2.8 in ARM DDI 0487H.a.

map_pages_to_xen() was recently reworked to allow superpage mappings,
support contiguous mapping and deal with the use of page-tables before
they are mapped.

Most of the code in setup_xenheap_mappings() is now replaced with a
single call to map_pages_to_xen().

Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>
Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>

---
    Changes in v4:
        - Fix typo
        - Add Stefano's reviewed-by

    Changes in v3:
        - Don't use 1GB mapping
        - Re-order code in setup_mm() in a separate patch

    Changes in v2:
        - New patch
---
 xen/arch/arm/mm.c | 87 ++++++++++-------------------------------------
 1 file changed, 18 insertions(+), 69 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index b019e4b35b55..65af44f42232 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -138,17 +138,6 @@ static DEFINE_PAGE_TABLE(cpu0_pgtable);
 static DEFINE_PAGE_TABLES(cpu0_dommap, DOMHEAP_SECOND_PAGES);
 #endif
 
-#ifdef CONFIG_ARM_64
-/* The first page of the first level mapping of the xenheap. The
- * subsequent xenheap first level pages are dynamically allocated, but
- * we need this one to bootstrap ourselves. */
-static DEFINE_PAGE_TABLE(xenheap_first_first);
-/* The zeroeth level slot which uses xenheap_first_first. Used because
- * setup_xenheap_mappings otherwise relies on mfn_to_virt which isn't
- * valid for a non-xenheap mapping. */
-static __initdata int xenheap_first_first_slot = -1;
-#endif
-
 /* Common pagetable leaves */
 /* Second level page tables.
  *
@@ -831,77 +820,37 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
 void __init setup_xenheap_mappings(unsigned long base_mfn,
                                    unsigned long nr_mfns)
 {
-    lpae_t *first, pte;
-    unsigned long mfn, end_mfn;
-    vaddr_t vaddr;
-
-    /* Align to previous 1GB boundary */
-    mfn = base_mfn & ~((FIRST_SIZE>>PAGE_SHIFT)-1);
+    int rc;
 
     /* First call sets the xenheap physical and virtual offset. */
     if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) )
     {
+        unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
+
         xenheap_mfn_start = _mfn(base_mfn);
         xenheap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
+        /*
+         * The base address may not be aligned to the first level
+         * size (e.g. 1GB when using 4KB pages). This would prevent
+         * superpage mappings for all the regions because the virtual
+         * address and machine address should both be suitably aligned.
+         *
+         * Prevent that by offsetting the start of the xenheap virtual
+         * address.
+         */
         xenheap_virt_start = DIRECTMAP_VIRT_START +
-            (base_mfn - mfn) * PAGE_SIZE;
+            (base_mfn - mfn_gb) * PAGE_SIZE;
     }
 
     if ( base_mfn < mfn_x(xenheap_mfn_start) )
         panic("cannot add xenheap mapping at %lx below heap start %lx\n",
               base_mfn, mfn_x(xenheap_mfn_start));
 
-    end_mfn = base_mfn + nr_mfns;
-
-    /*
-     * Virtual address aligned to previous 1GB to match physical
-     * address alignment done above.
-     */
-    vaddr = (vaddr_t)__mfn_to_virt(base_mfn) & FIRST_MASK;
-
-    while ( mfn < end_mfn )
-    {
-        int slot = zeroeth_table_offset(vaddr);
-        lpae_t *p = &xen_pgtable[slot];
-
-        if ( p->pt.valid )
-        {
-            /* mfn_to_virt is not valid on the 1st 1st mfn, since it
-             * is not within the xenheap. */
-            first = slot == xenheap_first_first_slot ?
-                xenheap_first_first : mfn_to_virt(lpae_get_mfn(*p));
-        }
-        else if ( xenheap_first_first_slot == -1)
-        {
-            /* Use xenheap_first_first to bootstrap the mappings */
-            first = xenheap_first_first;
-
-            pte = pte_of_xenaddr((vaddr_t)xenheap_first_first);
-            pte.pt.table = 1;
-            write_pte(p, pte);
-
-            xenheap_first_first_slot = slot;
-        }
-        else
-        {
-            mfn_t first_mfn = alloc_boot_pages(1, 1);
-
-            clear_page(mfn_to_virt(first_mfn));
-            pte = mfn_to_xen_entry(first_mfn, MT_NORMAL);
-            pte.pt.table = 1;
-            write_pte(p, pte);
-            first = mfn_to_virt(first_mfn);
-        }
-
-        pte = mfn_to_xen_entry(_mfn(mfn), MT_NORMAL);
-        /* TODO: Set pte.pt.contig when appropriate. */
-        write_pte(&first[first_table_offset(vaddr)], pte);
-
-        mfn += FIRST_SIZE>>PAGE_SHIFT;
-        vaddr += FIRST_SIZE;
-    }
-
-    flush_xen_tlb_local();
+    rc = map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn),
+                          _mfn(base_mfn), nr_mfns,
+                          PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
+    if ( rc )
+        panic("Unable to setup the xenheap mappings.\n");
 }
 #endif
 
-- 
2.32.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.