WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [MINIOS]Mapping page frames on demand add

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [MINIOS]Mapping page frames on demand added to the memory management.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 05 Jul 2006 16:40:17 +0000
Delivery-date: Wed, 05 Jul 2006 09:42:24 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 4db818a7dc3f3bb3b9dace0a7fa8a1f80f2b0d26
# Parent  462d6e4cb29a620685f7c382a2372edcc99e2e4a
[MINIOS]Mapping page frames on demand added to the memory management.
Signed-off-by: Steven Smith <sos22@xxxxxxxxx>
Signed-off-by: Grzegorz Milos <gm281@xxxxxxxxx>
---
 extras/mini-os/include/mm.h |    8 ++
 extras/mini-os/mm.c         |  125 +++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 129 insertions(+), 4 deletions(-)

diff -r 462d6e4cb29a -r 4db818a7dc3f extras/mini-os/include/mm.h
--- a/extras/mini-os/include/mm.h       Wed Jul 05 14:27:27 2006 +0100
+++ b/extras/mini-os/include/mm.h       Wed Jul 05 14:29:13 2006 +0100
@@ -196,9 +196,11 @@ static __inline__ paddr_t machine_to_phy
 #define to_virt(x)                 ((void *)((unsigned long)(x)+VIRT_START))
 
 #define virt_to_pfn(_virt)         (PFN_DOWN(to_phys(_virt)))
+#define virt_to_mfn(_virt)         (pfn_to_mfn(virt_to_pfn(_virt)))
 #define mach_to_virt(_mach)        (to_virt(machine_to_phys(_mach)))
+#define virt_to_mach(_virt)        (phys_to_machine(to_phys(_virt)))
 #define mfn_to_virt(_mfn)          (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT))
-#define pfn_to_virt(_pfn)          (to_virt(_pfn << PAGE_SHIFT))
+#define pfn_to_virt(_pfn)          (to_virt((_pfn) << PAGE_SHIFT))
 
 /* Pagetable walking. */
 #define pte_to_mfn(_pte)           (((_pte) & (PADDR_MASK&PAGE_MASK)) >> 
L1_PAGETABLE_SHIFT)
@@ -206,7 +208,7 @@ static __inline__ paddr_t machine_to_phy
 
 void init_mm(void);
 unsigned long alloc_pages(int order);
-#define alloc_page()    alloc_pages(0);
+#define alloc_page()    alloc_pages(0)
 void free_pages(void *pointer, int order);
 
 static __inline__ int get_order(unsigned long size)
@@ -219,4 +221,6 @@ static __inline__ int get_order(unsigned
 }
 
 
+void *map_frames(unsigned long *f, unsigned long n);
+
 #endif /* _MM_H_ */
diff -r 462d6e4cb29a -r 4db818a7dc3f extras/mini-os/mm.c
--- a/extras/mini-os/mm.c       Wed Jul 05 14:27:27 2006 +0100
+++ b/extras/mini-os/mm.c       Wed Jul 05 14:29:13 2006 +0100
@@ -343,7 +343,7 @@ void free_pages(void *pointer, int order
                 break;
             
             /* Merge with successor */
-            freed_ct = (chunk_tail_t *)((char *)to_merge_ch + mask);
+            freed_ct = (chunk_tail_t *)((char *)to_merge_ch + mask) - 1;
         }
         
         /* We are commited to merging, unlink the chunk */
@@ -612,6 +612,107 @@ void mem_test(unsigned long *start_add, 
 
 }
 
+static pgentry_t *demand_map_pgt;
+static void *demand_map_area_start;
+
+static void init_demand_mapping_area(unsigned long max_pfn)
+{
+    unsigned long mfn;
+    pgentry_t *tab;
+    unsigned long start_addr;
+    unsigned long pt_pfn;
+    unsigned offset;
+
+    /* Round up to four megs.  + 1024 rather than + 1023 since we want
+       to be sure we don't end up in the same place we started. */
+    max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1);
+    if (max_pfn == 0 ||
+            (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >=
+            HYPERVISOR_VIRT_START) {
+        printk("Too much memory; no room for demand map hole.\n");
+        do_exit();
+    }
+
+    demand_map_area_start = pfn_to_virt(max_pfn);
+    printk("Demand map pfns start at %lx (%p).\n", max_pfn,
+            demand_map_area_start);
+    start_addr = (unsigned long)demand_map_area_start;
+
+    tab = (pgentry_t *)start_info.pt_base;
+    mfn = virt_to_mfn(start_info.pt_base);
+    pt_pfn = virt_to_pfn(alloc_page());
+
+#if defined(__x86_64__)
+    offset = l4_table_offset(start_addr);
+    if (!(tab[offset] & _PAGE_PRESENT)) {
+        new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
+        pt_pfn = virt_to_pfn(alloc_page());
+    }
+    ASSERT(tab[offset] & _PAGE_PRESENT);
+    mfn = pte_to_mfn(tab[offset]);
+    tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+#endif
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+    offset = l3_table_offset(start_addr);
+    if (!(tab[offset] & _PAGE_PRESENT)) {
+        new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
+        pt_pfn = virt_to_pfn(alloc_page());
+    }
+    ASSERT(tab[offset] & _PAGE_PRESENT);
+    mfn = pte_to_mfn(tab[offset]);
+    tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+#endif
+    offset = l2_table_offset(start_addr);
+    if (tab[offset] & _PAGE_PRESENT) {
+        printk("Demand map area already has a page table covering it?\n");
+        BUG();
+    }
+    demand_map_pgt = pfn_to_virt(pt_pfn);
+    new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
+    ASSERT(tab[offset] & _PAGE_PRESENT);
+}
+
+void *map_frames(unsigned long *f, unsigned long n)
+{
+    unsigned long x;
+    unsigned long y = 0;
+    mmu_update_t mmu_updates[16];
+    int rc;
+
+    if (n > 16) {
+        printk("Tried to map too many (%ld) frames at once.\n", n);
+        return NULL;
+    }
+
+    /* Find a run of n contiguous frames */
+    for (x = 0; x <= 1024 - n; x += y + 1) {
+        for (y = 0; y < n; y++)
+            if (demand_map_pgt[y] & _PAGE_PRESENT)
+                break;
+        if (y == n)
+            break;
+    }
+    if (y != n) {
+        printk("Failed to map %ld frames!\n", n);
+        return NULL;
+    }
+
+    /* Found it at x.  Map it in. */
+    for (y = 0; y < n; y++) {
+        mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]);
+        mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT;
+    }
+
+    rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF);
+    if (rc < 0) {
+        printk("Map %ld failed: %d.\n", n, rc);
+        return NULL;
+    } else {
+        return (void *)(unsigned long)((unsigned long)demand_map_area_start +
+                x * PAGE_SIZE);
+    }
+}
+
 void init_mm(void)
 {
 
@@ -643,4 +744,24 @@ void init_mm(void)
            (u_long)to_virt(PFN_PHYS(max_pfn)), PFN_PHYS(max_pfn));
     init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));
     printk("MM: done\n");
-}
+
+    init_demand_mapping_area(max_pfn);
+    printk("Initialised demand area.\n");
+}
+
+void sanity_check(void)
+{
+    int x;
+    chunk_head_t *head;
+
+    for (x = 0; x < FREELIST_SIZE; x++) {
+        for (head = free_head[x]; !FREELIST_EMPTY(head); head = head->next) {
+            ASSERT(!allocated_in_map(virt_to_pfn(head)));
+            if (head->next)
+                ASSERT(head->next->pprev == &head->next);
+        }
+        if (free_head[x]) {
+            ASSERT(free_head[x]->pprev == &free_head[x]);
+        }
+    }
+}

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [MINIOS]Mapping page frames on demand added to the memory management., Xen patchbot-unstable <=