[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 05/19] xen/arm: mm: Add support for the contiguous bit



From: Julien Grall <jgrall@xxxxxxxxxx>

In follow-up patches, we will use xen_pt_update() (or its callers)
to handle large mappings (e.g. frametable, xenheap). They are also
not going to be modified once created.

The page-table entries have an hint to indicate that whether an
entry is contiguous to another 16 entries (assuming 4KB). When the
processor support the hint, one TLB entry will be created per
contiguous region.

For now this is tied to _PAGE_BLOCK. We can untie it in the future
if there are use-cases where we may want to use _PAGE_BLOCK without
setting the contiguous (couldn't think of any yet).

Note that to avoid extra complexity, mappings with the contiguous
bit set cannot be removed. Given the expected use, this restriction
ought to be fine.

Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>

---
    Changes in v3:
        - New patch
---
 xen/arch/arm/include/asm/page.h |  4 ++
 xen/arch/arm/mm.c               | 80 ++++++++++++++++++++++++++++++---
 2 files changed, 77 insertions(+), 7 deletions(-)

diff --git a/xen/arch/arm/include/asm/page.h b/xen/arch/arm/include/asm/page.h
index 07998df47bac..e7cd62190c7f 100644
--- a/xen/arch/arm/include/asm/page.h
+++ b/xen/arch/arm/include/asm/page.h
@@ -70,6 +70,7 @@
  * [5]   Page present
  * [6]   Only populate page tables
  * [7]   Superpage mappings is allowed
+ * [8]   Set contiguous bit (internal flag)
  */
 #define PAGE_AI_MASK(x) ((x) & 0x7U)
 
@@ -86,6 +87,9 @@
 #define _PAGE_BLOCK_BIT     7
 #define _PAGE_BLOCK         (1U << _PAGE_BLOCK_BIT)
 
+#define _PAGE_CONTIG_BIT    8
+#define _PAGE_CONTIG        (1U << _PAGE_CONTIG_BIT)
+
 /*
  * _PAGE_DEVICE and _PAGE_NORMAL are convenience defines. They are not
  * meant to be used outside of this header.
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 3af69b396bd1..fd16c1541ce2 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1237,6 +1237,8 @@ static int xen_pt_update_entry(mfn_t root, unsigned long 
virt,
         /* Set permission */
         pte.pt.ro = PAGE_RO_MASK(flags);
         pte.pt.xn = PAGE_XN_MASK(flags);
+        /* Set contiguous bit */
+        pte.pt.contig = !!(flags & _PAGE_CONTIG);
     }
 
     write_pte(entry, pte);
@@ -1289,6 +1291,51 @@ static int xen_pt_mapping_level(unsigned long vfn, mfn_t 
mfn, unsigned long nr,
      return level;
 }
 
+#define XEN_PT_4K_NR_CONTIG 16
+
+/*
+ * Check whether the contiguous bit can be set. Return the number of
+ * contiguous entry allowed. If not allowed, return 1.
+ */
+static unsigned int xen_pt_check_contig(unsigned long vfn, mfn_t mfn,
+                                        unsigned int level, unsigned long left,
+                                        unsigned int flags)
+{
+    unsigned long nr_contig;
+
+    /*
+     * Allow the contiguous bit to set when the caller requests block
+     * mapping.
+     */
+    if ( !(flags & _PAGE_BLOCK) )
+        return 1;
+
+    /*
+     * We don't allow to remove mapping with the contiguous bit set.
+     * So shortcut the logic and directly return 1.
+     */
+    if ( mfn_eq(mfn, INVALID_MFN) )
+        return 1;
+
+    /*
+     * The number of contiguous entries varies depending on the page
+     * granularity used. The logic below assumes 4KB.
+     */
+    BUILD_BUG_ON(PAGE_SIZE != SZ_4K);
+
+    /*
+     * In order to enable the contiguous bit, we should have enough entries
+     * to map left and both the virtual and physical address should be
+     * aligned to the size of 16 translation tables entries.
+     */
+    nr_contig = BIT(XEN_PT_LEVEL_ORDER(level), UL) * XEN_PT_4K_NR_CONTIG;
+
+    if ( (left < nr_contig) || ((mfn_x(mfn) | vfn) & (nr_contig - 1)) )
+        return 1;
+
+    return XEN_PT_4K_NR_CONTIG;
+}
+
 static DEFINE_SPINLOCK(xen_pt_lock);
 
 static int xen_pt_update(unsigned long virt,
@@ -1322,6 +1369,12 @@ static int xen_pt_update(unsigned long virt,
         return -EINVAL;
     }
 
+    if ( flags & _PAGE_CONTIG )
+    {
+        mm_printk("_PAGE_CONTIG is an internal only flag.\n");
+        return -EINVAL;
+    }
+
     if ( !IS_ALIGNED(virt, PAGE_SIZE) )
     {
         mm_printk("The virtual address is not aligned to the page-size.\n");
@@ -1333,21 +1386,34 @@ static int xen_pt_update(unsigned long virt,
     while ( left )
     {
         unsigned int order, level;
+        unsigned int nr_contig;
+        unsigned int new_flags;
 
         level = xen_pt_mapping_level(vfn, mfn, left, flags);
         order = XEN_PT_LEVEL_ORDER(level);
 
         ASSERT(left >= BIT(order, UL));
 
-        rc = xen_pt_update_entry(root, pfn_to_paddr(vfn), mfn, level, flags);
-        if ( rc )
-            break;
+        /*
+         * Check if we can set the contiguous mapping and update the
+         * flags accordingly.
+         */
+        nr_contig = xen_pt_check_contig(vfn, mfn, level, left, flags);
+        new_flags = flags | ((nr_contig > 1) ? _PAGE_CONTIG : 0);
 
-        vfn += 1U << order;
-        if ( !mfn_eq(mfn, INVALID_MFN) )
-            mfn = mfn_add(mfn, 1U << order);
+        for ( ; nr_contig > 0; nr_contig-- )
+        {
+            rc = xen_pt_update_entry(root, pfn_to_paddr(vfn), mfn, level,
+                                     new_flags);
+            if ( rc )
+                break;
 
-        left -= (1U << order);
+            vfn += 1U << order;
+            if ( !mfn_eq(mfn, INVALID_MFN) )
+                mfn = mfn_add(mfn, 1U << order);
+
+            left -= (1U << order);
+        }
     }
 
     /*
-- 
2.32.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.