[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1] boot allocator: Use arch helper for virt_to_mfn on DIRECTMAP



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>

On ARM, virt_to_mfn uses the hardware for address
translation. So if the virtual address is not mapped translation
fault is raised.
On ARM with NUMA, While initializing second memory node,
panic is triggered from init_node_heap() when virt_to_mfn()
is called for DIRECTMAP_VIRT region address.

The init_node_heap() makes a check on MFN passed to ensure that
MFN less than max MFN. For this, check is made against virt_to_mfn of
DIRECTMAP_VIRT_END region. Since DIRECMAP_VIRT region is not mapped
to any physical memory on ARM, it fails.

In this patch, instead of calling virt_to_mfn(), arch helper
arch_directmap_virt_to_mfn() is introduced. For ARM this arch helper
will return 0 and for x86 this helper does virt_to_mfn.

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>
---
 xen/common/page_alloc.c    | 8 ++++++--
 xen/include/asm-arm/mm.h   | 8 ++++++++
 xen/include/asm-x86/page.h | 9 +++++++++
 3 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 42c20cb..c69a678 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -533,8 +533,12 @@ static unsigned long init_node_heap(int node, unsigned 
long mfn,
         needed = 0;
     }
 #ifdef DIRECTMAP_VIRT_END
+    /*
+     * DIRECTMAP_VIRT is not mapped by all architectures.
+     * Use arch specific for virt_to_mfn on DIRECTMAP memory.
+     */
     else if ( *use_tail && nr >= needed &&
-              (mfn + nr) <= (virt_to_mfn(eva - 1) + 1) &&
+              (mfn + nr) <= (arch_directmap_virt_to_mfn(eva - 1) + 1) &&
               (!xenheap_bits ||
                !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
@@ -543,7 +547,7 @@ static unsigned long init_node_heap(int node, unsigned long 
mfn,
                       PAGE_SIZE - sizeof(**avail) * NR_ZONES;
     }
     else if ( nr >= needed &&
-              (mfn + needed) <= (virt_to_mfn(eva - 1) + 1) &&
+              (mfn + needed) <= (arch_directmap_virt_to_mfn(eva - 1) + 1) &&
               (!xenheap_bits ||
                !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 60ccbf3..aa34c7a 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -260,6 +260,14 @@ static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa, 
unsigned int flags)
 #define virt_to_mfn(va)   (virt_to_maddr(va) >> PAGE_SHIFT)
 #define mfn_to_virt(mfn)  (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
 
+/*
+ * ARM does not map DIRECTMAP_VIRT to physical memory.
+ * Hence return 0.
+ */
+static inline unsigned long arch_directmap_virt_to_mfn(unsigned long va)
+{
+    return 0;
+}
 
 /* Convert between Xen-heap virtual addresses and page-info structures. */
 static inline struct page_info *virt_to_page(const void *v)
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 46faffc..10c19c1 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -374,6 +374,15 @@ perms_strictly_increased(uint32_t old_flags, uint32_t 
new_flags)
     return ((of | (of ^ nf)) == nf);
 }
 
+/*
+ * x86 map DIRECTMAP_VIRT to physical memory. Get the mfn for directmap
+ * memory region.
+ */
+static inline unsigned long arch_directmap_virt_to_mfn(unsigned long va)
+{
+    return virt_to_mfn(va);
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.