[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4] xen/arm: Add RESERVE option to reserve non-leaf level table entries



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>

On x86, for the pages mapped with PAGE_HYPERVISOR attribute
non-leaf page tables are allocated with valid pte entries.
and with MAP_SMALL_PAGES attribute only non-leaf page tables are
allocated with invalid (valid bit set to 0) pte entries.
However on arm this is not the case. On arm for the pages
mapped with PAGE_HYPERVISOR and MAP_SMALL_PAGES both
non-leaf and leaf level page table are allocated with valid bit
in pte entries.

This behaviour in arm makes common vmap code fail to
allocate memory beyond 128MB as described below.

In vm_init, map_pages_to_xen() is called for mapping
vm_bitmap. Initially one page of vm_bitmap is allocated
and mapped using PAGE_HYPERVISOR attribute.
For the rest of vm_bitmap pages, MAP_SMALL_PAGES attribute
is used to map.

In ARM for both PAGE_HYPERVISOR and MAP_SMALL_PAGES, valid bit
is set to 1 in pte entry for these mapping.

In vm_alloc(), map_pages_to_xen() is failing for >128MB because
for this next vm_bitmap page the mapping is already set in vm_init()
with valid bit set in pte entry. So map_pages_to_xen() in
ARM returns error.

With this patch, MAP_SMALL_PAGES is dropped and arch specific
populate_pt_range() api is introduced to populate non-leaf
page table entries for the requested pages. Added RESERVE option
to map non-leaf page table entries.

Signed-off-by: Vijaya Kumar K<Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v4:
 - Update commit title
 - Dropped all changes to page.h except dropping MAP_SMALL_PAGES
 - Dropped get_pte_flags() and is_pte_preset()
v3:
 - Fix typos in commit message
 - Introduce arch specific api populate_pt_range
v2:
 - Rename PTE_INVALID to PAGE_PRESENT
 - Re-define PAGE_* macros with PAGE_PRESENT
 - Rename parameter ai to flags
 - Introduce macro to check present flag and extract attribute
   index values
---
 xen/arch/arm/mm.c          |   13 ++++++++++++-
 xen/arch/x86/mm.c          |    6 ++++++
 xen/common/vmap.c          |    2 +-
 xen/include/asm-arm/page.h |    1 -
 xen/include/xen/mm.h       |    7 ++++++-
 5 files changed, 25 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 7d4ba0c..d103f8f 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -827,7 +827,8 @@ static int create_xen_table(lpae_t *entry)
 
 enum xenmap_operation {
     INSERT,
-    REMOVE
+    REMOVE,
+    RESERVE
 };
 
 static int create_xen_entries(enum xenmap_operation op,
@@ -859,12 +860,15 @@ static int create_xen_entries(enum xenmap_operation op,
 
         switch ( op ) {
             case INSERT:
+            case RESERVE:
                 if ( third[third_table_offset(addr)].pt.valid )
                 {
                     printk("create_xen_entries: trying to replace an existing 
mapping addr=%lx mfn=%lx\n",
                            addr, mfn);
                     return -EINVAL;
                 }
+                if ( op == RESERVE )
+                    break;
                 pte = mfn_to_xen_entry(mfn, ai);
                 pte.pt.table = 1;
                 write_pte(&third[third_table_offset(addr)], pte);
@@ -898,6 +902,13 @@ int map_pages_to_xen(unsigned long virt,
 {
     return create_xen_entries(INSERT, virt, mfn, nr_mfns, flags);
 }
+
+int populate_pt_range(unsigned long virt, unsigned long mfn,
+                      unsigned long nr_mfns)
+{
+    return create_xen_entries(RESERVE, virt, mfn, nr_mfns, 0);
+}
+
 void destroy_xen_mappings(unsigned long v, unsigned long e)
 {
     create_xen_entries(REMOVE, v, 0, (e - v) >> PAGE_SHIFT, 0);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 45acfb5..2387cc0 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5701,6 +5701,12 @@ int map_pages_to_xen(
     return 0;
 }
 
+int populate_pt_range(unsigned long virt, unsigned long mfn,
+                      unsigned long nr_mfns)
+{
+    return map_pages_to_xen(virt, mfn, nr_mfns, MAP_SMALL_PAGES);
+}
+
 void destroy_xen_mappings(unsigned long s, unsigned long e)
 {
     bool_t locking = system_state > SYS_STATE_boot;
diff --git a/xen/common/vmap.c b/xen/common/vmap.c
index 783cea3..739d468 100644
--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -40,7 +40,7 @@ void __init vm_init(void)
     bitmap_fill(vm_bitmap, vm_low);
 
     /* Populate page tables for the bitmap if necessary. */
-    map_pages_to_xen(va, 0, vm_low - nr, MAP_SMALL_PAGES);
+    populate_pt_range(va, 0, vm_low - nr);
 }
 
 void *vm_alloc(unsigned int nr, unsigned int align)
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 3e7b0ae..8de5e26 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -64,7 +64,6 @@
 #define PAGE_HYPERVISOR         (WRITEALLOC)
 #define PAGE_HYPERVISOR_NOCACHE (DEV_SHARED)
 #define PAGE_HYPERVISOR_WC      (DEV_WC)
-#define MAP_SMALL_PAGES         PAGE_HYPERVISOR
 
 /*
  * Stage 2 Memory Type.
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index fd60c9c..4bc3d8e 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -55,7 +55,12 @@ int map_pages_to_xen(
     unsigned long nr_mfns,
     unsigned int flags);
 void destroy_xen_mappings(unsigned long v, unsigned long e);
-
+/*
+ * Create only non-leaf page table entries for the
+ * page range in Xen virtual address space.
+ */
+int populate_pt_range(unsigned long virt, unsigned long mfn,
+                      unsigned long nr_mfns);
 /* Claim handling */
 unsigned long domain_adjust_tot_pages(struct domain *d, long pages);
 int domain_set_outstanding_pages(struct domain *d, unsigned long pages);
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.