[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 07/13] xen/arm: compile and initialize vmap



Rename EARLY_VMAP_VIRT_END and EARLY_VMAP_VIRT_START to
VMAP_VIRT_END and VMAP_VIRT_START.

Defining VMAP_VIRT_START triggers the compilation of common/vmap.c.

Define PAGE_HYPERVISOR and MAP_SMALL_PAGES (unused on ARM).

Implement map_pages_to_xen and destroy_xen_mappings.

Call vm_init from start_xen.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/arm/mm.c            |   92 ++++++++++++++++++++++++++++++++++++++++-
 xen/arch/arm/setup.c         |    3 +
 xen/include/asm-arm/config.h |    4 +-
 xen/include/asm-arm/page.h   |    3 +
 xen/include/asm-x86/page.h   |    8 ----
 xen/include/xen/mm.h         |    7 +++
 6 files changed, 104 insertions(+), 13 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 4d4556b..240904e 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -33,6 +33,7 @@
 #include <xen/err.h>
 #include <asm/page.h>
 #include <asm/current.h>
+#include <asm/flushtlb.h>
 #include <public/memory.h>
 #include <xen/sched.h>
 #include <xsm/xsm.h>
@@ -429,9 +430,9 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t 
pe)
 /* Map the physical memory range start -  start + len into virtual
  * memory and return the virtual address of the mapping.
  * start has to be 2MB aligned.
- * len has to be < EARLY_VMAP_VIRT_END - EARLY_VMAP_VIRT_START.
+ * len has to be < VMAP_VIRT_END - VMAP_VIRT_START.
  */
-static unsigned long early_vmap_start = EARLY_VMAP_VIRT_END;
+static unsigned long early_vmap_start = VMAP_VIRT_END;
 void* __init early_ioremap(paddr_t start, size_t len, unsigned attributes)
 {
     paddr_t end = start + len;
@@ -444,7 +445,7 @@ void* __init early_ioremap(paddr_t start, size_t len, 
unsigned attributes)
     ASSERT(!(early_vmap_start & (~SECOND_MASK)));
 
     /* The range we need to map is too big */
-    if ( early_vmap_start >= EARLY_VMAP_VIRT_START )
+    if ( early_vmap_start >= VMAP_VIRT_START )
         return NULL;
 
     map_start = early_vmap_start;
@@ -467,6 +468,91 @@ void *__init arch_vmap_virt_end(void)
     return (void *)early_vmap_start;
 }
 
+static int create_xen_table(lpae_t *entry)
+{
+    void *p;
+    lpae_t pte;
+
+    p = alloc_xenheap_pages(0, 0);
+    if ( p == NULL )
+        return -ENOMEM;
+    clear_page(p);
+    pte = mfn_to_xen_entry(virt_to_mfn(p));
+    pte.pt.table = 1;
+    write_pte(entry, pte);
+    return 0;
+}
+
+enum xenmap_operation {
+    INSERT,
+    REMOVE
+};
+
+static int create_xen_entries(enum xenmap_operation op,
+                              unsigned long virt,
+                              unsigned long mfn,
+                              unsigned long nr_mfns)
+{
+    int rc;
+    unsigned long addr = virt, addr_end = addr + nr_mfns * PAGE_SIZE;
+    lpae_t pte;
+    lpae_t *third = NULL;
+
+    for(; addr < addr_end; addr += PAGE_SIZE, mfn++)
+    {
+        if ( !xen_second[second_linear_offset(addr)].pt.valid ||
+             !xen_second[second_linear_offset(addr)].pt.table )
+        {
+            rc = create_xen_table(&xen_second[second_linear_offset(addr)]);
+            if ( rc < 0 ) {
+                printk("create_xen_entries: L2 failed\n");
+                goto out;
+            }
+        }
+
+        BUG_ON(!xen_second[second_linear_offset(addr)].pt.valid);
+
+        third = __va((paddr_t)xen_second[second_linear_offset(addr)].pt.base
+                << PAGE_SHIFT);
+        if ( third[third_table_offset(addr)].pt.valid )
+            flush_tlb_local();
+
+        switch ( op ) {
+            case INSERT:
+                pte = mfn_to_xen_entry(mfn);
+                pte.pt.table = 1;
+                write_pte(&third[third_table_offset(addr)], pte);
+                break;
+            case REMOVE:
+                memset(&pte, 0x00, sizeof(pte));
+                write_pte(&third[third_table_offset(addr)], pte);
+                break;
+            default:
+                printk("create_xen_entries: invalid op\n");
+                break;
+        }
+    }
+    flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
+
+    rc = 0;
+
+out:
+    return rc;
+}
+
+int map_pages_to_xen(unsigned long virt,
+                     unsigned long mfn,
+                     unsigned long nr_mfns,
+                     unsigned int flags)
+{
+    ASSERT(flags == PAGE_HYPERVISOR);
+    return create_xen_entries(INSERT, virt, mfn, nr_mfns);
+}
+void destroy_xen_mappings(unsigned long v, unsigned long e)
+{
+    create_xen_entries(REMOVE, v, 0, (e - v) >> PAGE_SHIFT);
+}
+
 enum mg { mg_clear, mg_ro, mg_rw, mg_rx };
 static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg)
 {
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index cfe3d94..59646d6 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -34,6 +34,7 @@
 #include <xen/keyhandler.h>
 #include <xen/cpu.h>
 #include <xen/pfn.h>
+#include <xen/vmap.h>
 #include <asm/page.h>
 #include <asm/current.h>
 #include <asm/setup.h>
@@ -480,6 +481,8 @@ void __init start_xen(unsigned long boot_phys_offset,
 
     console_init_postirq();
 
+    vm_init();
+
     do_presmp_initcalls();
 
     for_each_present_cpu ( i )
diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
index 8be8563..1f87acc 100644
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -95,11 +95,11 @@
 #define FIXMAP_ADDR(n)        (mk_unsigned_long(0x00400000) + (n) * PAGE_SIZE)
 #define BOOT_MISC_VIRT_START   mk_unsigned_long(0x00600000)
 #define FRAMETABLE_VIRT_START  mk_unsigned_long(0x02000000)
-#define EARLY_VMAP_VIRT_START  mk_unsigned_long(0x10000000)
+#define VMAP_VIRT_START        mk_unsigned_long(0x10000000)
 #define XENHEAP_VIRT_START     mk_unsigned_long(0x40000000)
 #define DOMHEAP_VIRT_START     mk_unsigned_long(0x80000000)
 
-#define EARLY_VMAP_VIRT_END    XENHEAP_VIRT_START
+#define VMAP_VIRT_END          XENHEAP_VIRT_START
 #define HYPERVISOR_VIRT_START  XEN_VIRT_START
 
 #define DOMHEAP_ENTRIES        1024  /* 1024 2MB mapping slots */
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 14e63eb..5287a92 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -58,6 +58,9 @@
 #define DEV_WC        BUFFERABLE
 #define DEV_CACHED    WRITEBACK
 
+#define PAGE_HYPERVISOR         (MATTR_MEM)
+#define MAP_SMALL_PAGES         PAGE_HYPERVISOR
+
 /*
  * Stage 2 Memory Type.
  *
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index b2f3859..e53e1e5 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -338,14 +338,6 @@ l3_pgentry_t *virt_to_xen_l3e(unsigned long v);
 
 extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
 
-/* Map machine page range in Xen virtual address space. */
-int map_pages_to_xen(
-    unsigned long virt,
-    unsigned long mfn,
-    unsigned long nr_mfns,
-    unsigned int flags);
-void destroy_xen_mappings(unsigned long v, unsigned long e);
-
 /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
 static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
 {
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 28512fb..efc45c7 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -48,6 +48,13 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int 
memflags);
 void free_xenheap_pages(void *v, unsigned int order);
 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
+/* Map machine page range in Xen virtual address space. */
+int map_pages_to_xen(
+    unsigned long virt,
+    unsigned long mfn,
+    unsigned long nr_mfns,
+    unsigned int flags);
+void destroy_xen_mappings(unsigned long v, unsigned long e);
 
 /* Claim handling */
 unsigned long domain_adjust_tot_pages(struct domain *d, long pages);
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.