# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID a3c6479c87ef06c7bc8a743ef8360211dcda2532
# Parent 60b60f75a2219c843277311c77f48285495affa7
[MINIOS] Refactored mm.c and sched.c. x86 arch specific code got moved to
arch/x86/mm.c and arch/x86/sched.c. Header files were also refactored:
arch specific code got moved to include/x86/arch_mm.h and
include/x86/sched_mm.h.
Signed-off-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxxxxxxx>
Signed-off-by: Grzegorz Milos <gm281@xxxxxxxxx>
---
extras/mini-os/Makefile | 7
extras/mini-os/arch/x86/mm.c | 428 ++++++++++++++++++++++++++++++++
extras/mini-os/arch/x86/sched.c | 150 +++++++++++
extras/mini-os/include/mm.h | 175 -------------
extras/mini-os/include/sched.h | 26 +
extras/mini-os/include/x86/arch_mm.h | 209 +++++++++++++++
extras/mini-os/include/x86/arch_sched.h | 58 ++++
extras/mini-os/mm.c | 376 ----------------------------
extras/mini-os/sched.c | 137 ----------
9 files changed, 875 insertions(+), 691 deletions(-)
diff -r 60b60f75a221 -r a3c6479c87ef extras/mini-os/Makefile
--- a/extras/mini-os/Makefile Wed Nov 22 10:10:29 2006 +0000
+++ b/extras/mini-os/Makefile Wed Nov 22 10:11:36 2006 +0000
@@ -55,9 +55,10 @@ endif
endif
ifeq ($(TARGET_ARCH),ia64)
-CFLAGS += -mfixed-range=f12-f15,f32-f127
-ASFLAGS += -x assembler-with-cpp -ansi -Wall
-ASFLAGS += -mfixed-range=f12-f15,f32-f127
+CFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -mconstant-gp
+ASFLAGS += -x assembler-with-cpp -Wall
+ASFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -fomit-frame-pointer
+ASFLAGS += -fno-builtin -fno-common -fno-strict-aliasing -mconstant-gp
ARCH_LINKS = IA64_LINKS # Special link on ia64 needed
define arch_links
[ -e include/ia64/asm-xsi-offsets.h ] || ln -sf
../../../../xen/include/asm-ia64/asm-xsi-offsets.h
include/ia64/asm-xsi-offsets.h
diff -r 60b60f75a221 -r a3c6479c87ef extras/mini-os/include/mm.h
--- a/extras/mini-os/include/mm.h Wed Nov 22 10:10:29 2006 +0000
+++ b/extras/mini-os/include/mm.h Wed Nov 22 10:11:36 2006 +0000
@@ -29,182 +29,15 @@
#include <xen/arch-x86_32.h>
#elif defined(__x86_64__)
#include <xen/arch-x86_64.h>
+#elif defined(__ia64__)
+#include <xen/arch-ia64.h>
#else
#error "Unsupported architecture"
#endif
#include <lib.h>
+#include <arch_mm.h>
-#define L1_FRAME 1
-#define L2_FRAME 2
-#define L3_FRAME 3
-
-#define L1_PAGETABLE_SHIFT 12
-
-#if defined(__i386__)
-
-#if !defined(CONFIG_X86_PAE)
-
-#define L2_PAGETABLE_SHIFT 22
-
-#define L1_PAGETABLE_ENTRIES 1024
-#define L2_PAGETABLE_ENTRIES 1024
-
-#define PADDR_BITS 32
-#define PADDR_MASK (~0UL)
-
-#define NOT_L1_FRAMES 1
-#define PRIpte "08lx"
-typedef unsigned long pgentry_t;
-
-#else /* defined(CONFIG_X86_PAE) */
-
-#define L2_PAGETABLE_SHIFT 21
-#define L3_PAGETABLE_SHIFT 30
-
-#define L1_PAGETABLE_ENTRIES 512
-#define L2_PAGETABLE_ENTRIES 512
-#define L3_PAGETABLE_ENTRIES 4
-
-#define PADDR_BITS 44
-#define PADDR_MASK ((1ULL << PADDR_BITS)-1)
-
-#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
-
-/*
- * If starting from virtual address greater than 0xc0000000,
- * this value will be 2 to account for final mid-level page
- * directory which is always mapped in at this location.
- */
-#define NOT_L1_FRAMES 3
-#define PRIpte "016llx"
-typedef uint64_t pgentry_t;
-
-#endif /* !defined(CONFIG_X86_PAE) */
-
-#elif defined(__x86_64__)
-
-#define L2_PAGETABLE_SHIFT 21
-#define L3_PAGETABLE_SHIFT 30
-#define L4_PAGETABLE_SHIFT 39
-
-#define L1_PAGETABLE_ENTRIES 512
-#define L2_PAGETABLE_ENTRIES 512
-#define L3_PAGETABLE_ENTRIES 512
-#define L4_PAGETABLE_ENTRIES 512
-
-/* These are page-table limitations. Current CPUs support only 40-bit phys. */
-#define PADDR_BITS 52
-#define VADDR_BITS 48
-#define PADDR_MASK ((1UL << PADDR_BITS)-1)
-#define VADDR_MASK ((1UL << VADDR_BITS)-1)
-
-#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
-#define L3_MASK ((1UL << L4_PAGETABLE_SHIFT) - 1)
-
-#define NOT_L1_FRAMES 3
-#define PRIpte "016lx"
-typedef unsigned long pgentry_t;
-
-#endif
-
-#define L1_MASK ((1UL << L2_PAGETABLE_SHIFT) - 1)
-
-/* Given a virtual address, get an entry offset into a page table. */
-#define l1_table_offset(_a) \
- (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
-#define l2_table_offset(_a) \
- (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
-#define l3_table_offset(_a) \
- (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
-#endif
-#if defined(__x86_64__)
-#define l4_table_offset(_a) \
- (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
-#endif
-
-#define _PAGE_PRESENT 0x001UL
-#define _PAGE_RW 0x002UL
-#define _PAGE_USER 0x004UL
-#define _PAGE_PWT 0x008UL
-#define _PAGE_PCD 0x010UL
-#define _PAGE_ACCESSED 0x020UL
-#define _PAGE_DIRTY 0x040UL
-#define _PAGE_PAT 0x080UL
-#define _PAGE_PSE 0x080UL
-#define _PAGE_GLOBAL 0x100UL
-
-#if defined(__i386__)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
-#if defined(CONFIG_X86_PAE)
-#define L3_PROT (_PAGE_PRESENT)
-#endif /* CONFIG_X86_PAE */
-#elif defined(__x86_64__)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#endif /* __i386__ || __x86_64__ */
-
-#ifndef CONFIG_X86_PAE
-#define PAGE_SIZE (1UL << L1_PAGETABLE_SHIFT)
-#else
-#define PAGE_SIZE (1ULL << L1_PAGETABLE_SHIFT)
-#endif
-#define PAGE_SHIFT L1_PAGETABLE_SHIFT
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT)
-#define PFN_DOWN(x) ((x) >> L1_PAGETABLE_SHIFT)
-#define PFN_PHYS(x) ((x) << L1_PAGETABLE_SHIFT)
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-/* Definitions for machine and pseudophysical addresses. */
-#ifdef CONFIG_X86_PAE
-typedef unsigned long long paddr_t;
-typedef unsigned long long maddr_t;
-#else
-typedef unsigned long paddr_t;
-typedef unsigned long maddr_t;
-#endif
-
-extern unsigned long *phys_to_machine_mapping;
-extern char _text, _etext, _edata, _end;
-#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-static __inline__ maddr_t phys_to_machine(paddr_t phys)
-{
- maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
- machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
- return machine;
-}
-
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
-static __inline__ paddr_t machine_to_phys(maddr_t machine)
-{
- paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
- phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
- return phys;
-}
-
-#define VIRT_START ((unsigned long)&_text)
-
-#define to_phys(x) ((unsigned long)(x)-VIRT_START)
-#define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START))
-
-#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt)))
-#define virt_to_mfn(_virt) (pfn_to_mfn(virt_to_pfn(_virt)))
-#define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach)))
-#define virt_to_mach(_virt) (phys_to_machine(to_phys(_virt)))
-#define mfn_to_virt(_mfn) (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT))
-#define pfn_to_virt(_pfn) (to_virt((_pfn) << PAGE_SHIFT))
-
-/* Pagetable walking. */
-#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >>
L1_PAGETABLE_SHIFT)
-#define pte_to_virt(_pte) to_virt(mfn_to_pfn(pte_to_mfn(_pte)) <<
PAGE_SHIFT)
void init_mm(void);
unsigned long alloc_pages(int order);
@@ -220,6 +53,8 @@ static __inline__ int get_order(unsigned
return order;
}
+void arch_init_demand_mapping_area(unsigned long max_pfn);
+void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p);
void *map_frames(unsigned long *f, unsigned long n);
diff -r 60b60f75a221 -r a3c6479c87ef extras/mini-os/include/sched.h
--- a/extras/mini-os/include/sched.h Wed Nov 22 10:10:29 2006 +0000
+++ b/extras/mini-os/include/sched.h Wed Nov 22 10:11:36 2006 +0000
@@ -3,36 +3,40 @@
#include <list.h>
#include <time.h>
+#include <arch_sched.h>
struct thread
{
char *name;
char *stack;
+#if !defined(__ia64__)
unsigned long sp; /* Stack pointer */
unsigned long ip; /* Instruction pointer */
+#else /* !defined(__ia64__) */
+ thread_regs_t regs;
+#endif /* !defined(__ia64__) */
struct list_head thread_list;
u32 flags;
s_time_t wakeup_time;
};
+extern struct thread *idle_thread;
+void idle_thread_fn(void *unused);
+#define RUNNABLE_FLAG 0x00000001
+
+#define is_runnable(_thread) (_thread->flags & RUNNABLE_FLAG)
+#define set_runnable(_thread) (_thread->flags |= RUNNABLE_FLAG)
+#define clear_runnable(_thread) (_thread->flags &= ~RUNNABLE_FLAG)
+
+#define switch_threads(prev, next) arch_switch_threads(prev, next)
+
void init_sched(void);
void run_idle_thread(void);
struct thread* create_thread(char *name, void (*function)(void *), void *data);
void schedule(void);
-static inline struct thread* get_current(void)
-{
- struct thread **current;
-#ifdef __i386__
- __asm__("andl %%esp,%0; ":"=r" (current) : "r" (~8191UL));
-#else
- __asm__("andq %%rsp,%0; ":"=r" (current) : "r" (~8191UL));
-#endif
- return *current;
-}
-
#define current get_current()
diff -r 60b60f75a221 -r a3c6479c87ef extras/mini-os/mm.c
--- a/extras/mini-os/mm.c Wed Nov 22 10:10:29 2006 +0000
+++ b/extras/mini-os/mm.c Wed Nov 22 10:11:36 2006 +0000
@@ -48,10 +48,6 @@
#define DEBUG(_f, _a...) ((void)0)
#endif
-unsigned long *phys_to_machine_mapping;
-extern char *stack;
-extern void page_walk(unsigned long virt_addr);
-
/*********************
* ALLOCATION BITMAP
* One bit per page of memory. Bit set => page is allocated.
@@ -226,11 +222,11 @@ static void init_page_allocator(unsigned
/* All allocated by default. */
memset(alloc_bitmap, ~0, bitmap_size);
/* Free up the memory we've been given to play with. */
- map_free(min>>PAGE_SHIFT, range>>PAGE_SHIFT);
+ map_free(PHYS_PFN(min), range>>PAGE_SHIFT);
/* The buddy lists are addressed in high memory. */
- min += VIRT_START;
- max += VIRT_START;
+ min = (unsigned long) to_virt(min);
+ max = (unsigned long) to_virt(max);
while ( range != 0 )
{
@@ -297,7 +293,7 @@ unsigned long alloc_pages(int order)
free_head[i] = spare_ch;
}
- map_alloc(to_phys(alloc_ch)>>PAGE_SHIFT, 1<<order);
+ map_alloc(PHYS_PFN(to_phys(alloc_ch)), 1<<order);
return((unsigned long)alloc_ch);
@@ -365,350 +361,6 @@ void free_pages(void *pointer, int order
}
-void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
- unsigned long offset, unsigned long level)
-{
- pgentry_t *tab = (pgentry_t *)start_info.pt_base;
- unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
- unsigned long prot_e, prot_t, pincmd;
- mmu_update_t mmu_updates[1];
- struct mmuext_op pin_request;
-
- DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
- "prev_l_mfn=%lx, offset=%lx",
- level, *pt_pfn, prev_l_mfn, offset);
-
- /* We need to clear the page, otherwise we might fail to map it
- as a page table page */
- memset((unsigned long*)pfn_to_virt(*pt_pfn), 0, PAGE_SIZE);
-
- switch ( level )
- {
- case L1_FRAME:
- prot_e = L1_PROT;
- prot_t = L2_PROT;
- pincmd = MMUEXT_PIN_L1_TABLE;
- break;
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
- case L2_FRAME:
- prot_e = L2_PROT;
- prot_t = L3_PROT;
- pincmd = MMUEXT_PIN_L2_TABLE;
- break;
-#endif
-#if defined(__x86_64__)
- case L3_FRAME:
- prot_e = L3_PROT;
- prot_t = L4_PROT;
- pincmd = MMUEXT_PIN_L3_TABLE;
- break;
-#endif
- default:
- printk("new_pt_frame() called with invalid level number %d\n", level);
- do_exit();
- break;
- }
- /* Update the entry */
-#if defined(__x86_64__)
- tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
- tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
-#endif
-#if defined(CONFIG_X86_PAE)
- tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
-#endif
-
- mmu_updates[0].ptr = ((pgentry_t)tab[l2_table_offset(pt_page)] &
PAGE_MASK) +
- sizeof(pgentry_t) * l1_table_offset(pt_page);
- mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
- (prot_e & ~_PAGE_RW);
- if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
- {
- printk("PTE for new page table page could not be updated\n");
- do_exit();
- }
-
- /* Pin the page to provide correct protection */
- pin_request.cmd = pincmd;
- pin_request.arg1.mfn = pfn_to_mfn(*pt_pfn);
- if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
- {
- printk("ERROR: pinning failed\n");
- do_exit();
- }
-
- /* Now fill the new page table page with entries.
- Update the page directory as well. */
- mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) +
sizeof(pgentry_t) * offset;
- mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
- if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
- {
- printk("ERROR: mmu_update failed\n");
- do_exit();
- }
- *pt_pfn += 1;
-}
-
-/* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
-static int need_pt_frame(unsigned long virt_address, int level)
-{
- unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
-#if defined(__x86_64__)
- unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
-#else
- unsigned long hyp_virt_end = 0xffffffff;
-#endif
-
- /* In general frames will _not_ be needed if they were already
- allocated to map the hypervisor into our VA space */
-#if defined(__x86_64__)
- if(level == L3_FRAME)
- {
- if(l4_table_offset(virt_address) >=
- l4_table_offset(hyp_virt_start) &&
- l4_table_offset(virt_address) <=
- l4_table_offset(hyp_virt_end))
- return 0;
- return 1;
- } else
-#endif
-
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
- if(level == L2_FRAME)
- {
-#if defined(__x86_64__)
- if(l4_table_offset(virt_address) >=
- l4_table_offset(hyp_virt_start) &&
- l4_table_offset(virt_address) <=
- l4_table_offset(hyp_virt_end))
-#endif
- if(l3_table_offset(virt_address) >=
- l3_table_offset(hyp_virt_start) &&
- l3_table_offset(virt_address) <=
- l3_table_offset(hyp_virt_end))
- return 0;
-
- return 1;
- } else
-#endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */
-
- /* Always need l1 frames */
- if(level == L1_FRAME)
- return 1;
-
- printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n",
- level, hyp_virt_start, hyp_virt_end);
- return -1;
-}
-
-void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
-{
- unsigned long start_address, end_address;
- unsigned long pfn_to_map, pt_pfn = *start_pfn;
- static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
- pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
- unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
- unsigned long offset;
- int count = 0;
-
- pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) *
L1_PAGETABLE_ENTRIES;
-
- if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
- {
- printk("WARNING: Mini-OS trying to use Xen virtual space. "
- "Truncating memory from %dMB to ",
- ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned
long)&_text)>>20);
- *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
- printk("%dMB\n",
- ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned
long)&_text)>>20);
- }
-
- start_address = (unsigned long)pfn_to_virt(pfn_to_map);
- end_address = (unsigned long)pfn_to_virt(*max_pfn);
-
- /* We worked out the virtual memory range to map, now mapping loop */
- printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
-
- while(start_address < end_address)
- {
- tab = (pgentry_t *)start_info.pt_base;
- mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
-
-#if defined(__x86_64__)
- offset = l4_table_offset(start_address);
- /* Need new L3 pt frame */
- if(!(start_address & L3_MASK))
- if(need_pt_frame(start_address, L3_FRAME))
- new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
-
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
- offset = l3_table_offset(start_address);
- /* Need new L2 pt frame */
- if(!(start_address & L2_MASK))
- if(need_pt_frame(start_address, L2_FRAME))
- new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
-
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
- offset = l2_table_offset(start_address);
- /* Need new L1 pt frame */
- if(!(start_address & L1_MASK))
- if(need_pt_frame(start_address, L1_FRAME))
- new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
-
- page = tab[offset];
- mfn = pte_to_mfn(page);
- offset = l1_table_offset(start_address);
-
- mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) +
sizeof(pgentry_t) * offset;
- mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) <<
PAGE_SHIFT | L1_PROT;
- count++;
- if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
- {
- if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
- {
- printk("PTE could not be updated\n");
- do_exit();
- }
- count = 0;
- }
- start_address += PAGE_SIZE;
- }
- *start_pfn = pt_pfn;
-}
-
-
-void mem_test(unsigned long *start_add, unsigned long *end_add)
-{
- unsigned long mask = 0x10000;
- unsigned long *pointer;
-
- for(pointer = start_add; pointer < end_add; pointer++)
- {
- if(!(((unsigned long)pointer) & 0xfffff))
- {
- printk("Writing to %lx\n", pointer);
- page_walk((unsigned long)pointer);
- }
- *pointer = (unsigned long)pointer & ~mask;
- }
-
- for(pointer = start_add; pointer < end_add; pointer++)
- {
- if(((unsigned long)pointer & ~mask) != *pointer)
- printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
- (unsigned long)pointer,
- *pointer,
- ((unsigned long)pointer & ~mask));
- }
-
-}
-
-static pgentry_t *demand_map_pgt;
-static void *demand_map_area_start;
-
-static void init_demand_mapping_area(unsigned long max_pfn)
-{
- unsigned long mfn;
- pgentry_t *tab;
- unsigned long start_addr;
- unsigned long pt_pfn;
- unsigned offset;
-
- /* Round up to four megs. + 1024 rather than + 1023 since we want
- to be sure we don't end up in the same place we started. */
- max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1);
- if (max_pfn == 0 ||
- (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >=
- HYPERVISOR_VIRT_START) {
- printk("Too much memory; no room for demand map hole.\n");
- do_exit();
- }
-
- demand_map_area_start = pfn_to_virt(max_pfn);
- printk("Demand map pfns start at %lx (%p).\n", max_pfn,
- demand_map_area_start);
- start_addr = (unsigned long)demand_map_area_start;
-
- tab = (pgentry_t *)start_info.pt_base;
- mfn = virt_to_mfn(start_info.pt_base);
- pt_pfn = virt_to_pfn(alloc_page());
-
-#if defined(__x86_64__)
- offset = l4_table_offset(start_addr);
- if (!(tab[offset] & _PAGE_PRESENT)) {
- new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
- pt_pfn = virt_to_pfn(alloc_page());
- }
- ASSERT(tab[offset] & _PAGE_PRESENT);
- mfn = pte_to_mfn(tab[offset]);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
- offset = l3_table_offset(start_addr);
- if (!(tab[offset] & _PAGE_PRESENT)) {
- new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
- pt_pfn = virt_to_pfn(alloc_page());
- }
- ASSERT(tab[offset] & _PAGE_PRESENT);
- mfn = pte_to_mfn(tab[offset]);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
- offset = l2_table_offset(start_addr);
- if (tab[offset] & _PAGE_PRESENT) {
- printk("Demand map area already has a page table covering it?\n");
- BUG();
- }
- demand_map_pgt = pfn_to_virt(pt_pfn);
- new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
- ASSERT(tab[offset] & _PAGE_PRESENT);
-}
-
-void *map_frames(unsigned long *f, unsigned long n)
-{
- unsigned long x;
- unsigned long y = 0;
- mmu_update_t mmu_updates[16];
- int rc;
-
- if (n > 16) {
- printk("Tried to map too many (%ld) frames at once.\n", n);
- return NULL;
- }
-
- /* Find a run of n contiguous frames */
- for (x = 0; x <= 1024 - n; x += y + 1) {
- for (y = 0; y < n; y++)
- if (demand_map_pgt[x+y] & _PAGE_PRESENT)
- break;
- if (y == n)
- break;
- }
- if (y != n) {
- printk("Failed to map %ld frames!\n", n);
- return NULL;
- }
-
- /* Found it at x. Map it in. */
- for (y = 0; y < n; y++) {
- mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]);
- mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT;
- }
-
- rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF);
- if (rc < 0) {
- printk("Map %ld failed: %d.\n", n, rc);
- return NULL;
- } else {
- return (void *)(unsigned long)((unsigned long)demand_map_area_start +
- x * PAGE_SIZE);
- }
-}
void init_mm(void)
{
@@ -717,22 +369,7 @@ void init_mm(void)
printk("MM: Init\n");
- printk(" _text: %p\n", &_text);
- printk(" _etext: %p\n", &_etext);
- printk(" _edata: %p\n", &_edata);
- printk(" stack start: %p\n", &stack);
- printk(" _end: %p\n", &_end);
-
- /* First page follows page table pages and 3 more pages (store page etc) */
- start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
- start_info.nr_pt_frames + 3;
- max_pfn = start_info.nr_pages;
-
- printk(" start_pfn: %lx\n", start_pfn);
- printk(" max_pfn: %lx\n", max_pfn);
-
- build_pagetable(&start_pfn, &max_pfn);
-
+ arch_init_mm(&start_pfn, &max_pfn);
/*
* now we can initialise the page allocator
*/
@@ -742,8 +379,7 @@ void init_mm(void)
init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));
printk("MM: done\n");
- init_demand_mapping_area(max_pfn);
- printk("Initialised demand area.\n");
+ arch_init_demand_mapping_area(max_pfn);
}
void sanity_check(void)
diff -r 60b60f75a221 -r a3c6479c87ef extras/mini-os/sched.c
--- a/extras/mini-os/sched.c Wed Nov 22 10:10:29 2006 +0000
+++ b/extras/mini-os/sched.c Wed Nov 22 10:11:36 2006 +0000
@@ -54,81 +54,8 @@
#define DEBUG(_f, _a...) ((void)0)
#endif
-
-#define RUNNABLE_FLAG 0x00000001
-
-#define is_runnable(_thread) (_thread->flags & RUNNABLE_FLAG)
-#define set_runnable(_thread) (_thread->flags |= RUNNABLE_FLAG)
-#define clear_runnable(_thread) (_thread->flags &= ~RUNNABLE_FLAG)
-
-
struct thread *idle_thread = NULL;
LIST_HEAD(exited_threads);
-
-void idle_thread_fn(void *unused);
-
-void dump_stack(struct thread *thread)
-{
- unsigned long *bottom = (unsigned long *)(thread->stack + 2*4*1024);
- unsigned long *pointer = (unsigned long *)thread->sp;
- int count;
- if(thread == current)
- {
-#ifdef __i386__
- asm("movl %%esp,%0"
- : "=r"(pointer));
-#else
- asm("movq %%rsp,%0"
- : "=r"(pointer));
-#endif
- }
- printk("The stack for \"%s\"\n", thread->name);
- for(count = 0; count < 25 && pointer < bottom; count ++)
- {
- printk("[0x%lx] 0x%lx\n", pointer, *pointer);
- pointer++;
- }
-
- if(pointer < bottom) printk(" ... continues.\n");
-}
-
-#ifdef __i386__
-#define switch_threads(prev, next) do { \
- unsigned long esi,edi; \
- __asm__ __volatile__("pushfl\n\t" \
- "pushl %%ebp\n\t" \
- "movl %%esp,%0\n\t" /* save ESP */ \
- "movl %4,%%esp\n\t" /* restore ESP */ \
- "movl $1f,%1\n\t" /* save EIP */ \
- "pushl %5\n\t" /* restore EIP */ \
- "ret\n\t" \
- "1:\t" \
- "popl %%ebp\n\t" \
- "popfl" \
- :"=m" (prev->sp),"=m" (prev->ip), \
- "=S" (esi),"=D" (edi) \
- :"m" (next->sp),"m" (next->ip), \
- "2" (prev), "d" (next)); \
-} while (0)
-#elif __x86_64__
-#define switch_threads(prev, next) do { \
- unsigned long rsi,rdi; \
- __asm__ __volatile__("pushfq\n\t" \
- "pushq %%rbp\n\t" \
- "movq %%rsp,%0\n\t" /* save RSP */ \
- "movq %4,%%rsp\n\t" /* restore RSP */ \
- "movq $1f,%1\n\t" /* save RIP */ \
- "pushq %5\n\t" /* restore RIP */ \
- "ret\n\t" \
- "1:\t" \
- "popq %%rbp\n\t" \
- "popfq" \
- :"=m" (prev->sp),"=m" (prev->ip), \
- "=S" (rsi),"=D" (rdi) \
- :"m" (next->sp),"m" (next->ip), \
- "2" (prev), "d" (next)); \
-} while (0)
-#endif
void inline print_runqueue(void)
{
@@ -250,50 +177,6 @@ void exit_thread(void)
schedule();
}
-/* Pushes the specified value onto the stack of the specified thread */
-static void stack_push(struct thread *thread, unsigned long value)
-{
- thread->sp -= sizeof(unsigned long);
- *((unsigned long *)thread->sp) = value;
-}
-
-struct thread* create_thread(char *name, void (*function)(void *), void *data)
-{
- struct thread *thread;
- unsigned long flags;
-
- thread = xmalloc(struct thread);
- /* Allocate 2 pages for stack, stack will be 2pages aligned */
- thread->stack = (char *)alloc_pages(1);
- thread->name = name;
- printk("Thread \"%s\": pointer: 0x%lx, stack: 0x%lx\n", name, thread,
- thread->stack);
-
- thread->sp = (unsigned long)thread->stack + 4096 * 2;
- /* Save pointer to the thread on the stack, used by current macro */
- *((unsigned long *)thread->stack) = (unsigned long)thread;
-
- stack_push(thread, (unsigned long) function);
- stack_push(thread, (unsigned long) data);
- thread->ip = (unsigned long) thread_starter;
-
- /* Not runable, not exited, not sleeping */
- thread->flags = 0;
- thread->wakeup_time = 0LL;
- set_runnable(thread);
- local_irq_save(flags);
- if(idle_thread != NULL) {
- list_add_tail(&thread->thread_list, &idle_thread->thread_list);
- } else if(function != idle_thread_fn)
- {
- printk("BUG: Not allowed to create thread before initialising
scheduler.\n");
- BUG();
- }
- local_irq_restore(flags);
- return thread;
-}
-
-
void block(struct thread *thread)
{
thread->wakeup_time = 0LL;
@@ -327,26 +210,6 @@ void idle_thread_fn(void *unused)
}
}
-void run_idle_thread(void)
-{
- /* Switch stacks and run the thread */
-#if defined(__i386__)
- __asm__ __volatile__("mov %0,%%esp\n\t"
- "push %1\n\t"
- "ret"
- :"=m" (idle_thread->sp)
- :"m" (idle_thread->ip));
-#elif defined(__x86_64__)
- __asm__ __volatile__("mov %0,%%rsp\n\t"
- "push %1\n\t"
- "ret"
- :"=m" (idle_thread->sp)
- :"m" (idle_thread->ip));
-#endif
-}
-
-
-
DECLARE_MUTEX(mutex);
void th_f1(void *data)
diff -r 60b60f75a221 -r a3c6479c87ef extras/mini-os/arch/x86/mm.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/mm.c Wed Nov 22 10:11:36 2006 +0000
@@ -0,0 +1,428 @@
+/*
+ ****************************************************************************
+ * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
+ * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
+ ****************************************************************************
+ *
+ * File: mm.c
+ * Author: Rolf Neugebauer (neugebar@xxxxxxxxxxxxx)
+ * Changes: Grzegorz Milos
+ *
+ * Date: Aug 2003, chages Aug 2005
+ *
+ * Environment: Xen Minimal OS
+ * Description: memory management related functions
+ * contains buddy page allocator from Xen.
+ *
+ ****************************************************************************
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <os.h>
+#include <hypervisor.h>
+#include <mm.h>
+#include <types.h>
+#include <lib.h>
+#include <xmalloc.h>
+
+#ifdef MM_DEBUG
+#define DEBUG(_f, _a...) \
+ printk("MINI_OS(file=mm.c, line=%d) " _f "\n", __LINE__, ## _a)
+#else
+#define DEBUG(_f, _a...) ((void)0)
+#endif
+
+unsigned long *phys_to_machine_mapping;
+extern char *stack;
+extern void page_walk(unsigned long virt_addr);
+
+void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
+ unsigned long offset, unsigned long level)
+{
+ pgentry_t *tab = (pgentry_t *)start_info.pt_base;
+ unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
+ unsigned long prot_e, prot_t, pincmd;
+ mmu_update_t mmu_updates[1];
+ struct mmuext_op pin_request;
+
+ DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
+ "prev_l_mfn=%lx, offset=%lx",
+ level, *pt_pfn, prev_l_mfn, offset);
+
+ /* We need to clear the page, otherwise we might fail to map it
+ as a page table page */
+ memset((unsigned long*)pfn_to_virt(*pt_pfn), 0, PAGE_SIZE);
+
+ switch ( level )
+ {
+ case L1_FRAME:
+ prot_e = L1_PROT;
+ prot_t = L2_PROT;
+ pincmd = MMUEXT_PIN_L1_TABLE;
+ break;
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+ case L2_FRAME:
+ prot_e = L2_PROT;
+ prot_t = L3_PROT;
+ pincmd = MMUEXT_PIN_L2_TABLE;
+ break;
+#endif
+#if defined(__x86_64__)
+ case L3_FRAME:
+ prot_e = L3_PROT;
+ prot_t = L4_PROT;
+ pincmd = MMUEXT_PIN_L3_TABLE;
+ break;
+#endif
+ default:
+ printk("new_pt_frame() called with invalid level number %d\n", level);
+ do_exit();
+ break;
+ }
+
+ /* Update the entry */
+#if defined(__x86_64__)
+ tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
+ tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
+#endif
+#if defined(CONFIG_X86_PAE)
+ tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
+#endif
+
+ mmu_updates[0].ptr = ((pgentry_t)tab[l2_table_offset(pt_page)] &
PAGE_MASK) +
+ sizeof(pgentry_t) * l1_table_offset(pt_page);
+ mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
+ (prot_e & ~_PAGE_RW);
+ if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
+ {
+ printk("PTE for new page table page could not be updated\n");
+ do_exit();
+ }
+
+ /* Pin the page to provide correct protection */
+ pin_request.cmd = pincmd;
+ pin_request.arg1.mfn = pfn_to_mfn(*pt_pfn);
+ if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
+ {
+ printk("ERROR: pinning failed\n");
+ do_exit();
+ }
+
+ /* Now fill the new page table page with entries.
+ Update the page directory as well. */
+ mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) +
sizeof(pgentry_t) * offset;
+ mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
+ if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
+ {
+ printk("ERROR: mmu_update failed\n");
+ do_exit();
+ }
+
+ *pt_pfn += 1;
+}
+
+/* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
+static int need_pt_frame(unsigned long virt_address, int level)
+{
+ unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
+#if defined(__x86_64__)
+ unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
+#else
+ unsigned long hyp_virt_end = 0xffffffff;
+#endif
+
+ /* In general frames will _not_ be needed if they were already
+ allocated to map the hypervisor into our VA space */
+#if defined(__x86_64__)
+ if(level == L3_FRAME)
+ {
+ if(l4_table_offset(virt_address) >=
+ l4_table_offset(hyp_virt_start) &&
+ l4_table_offset(virt_address) <=
+ l4_table_offset(hyp_virt_end))
+ return 0;
+ return 1;
+ } else
+#endif
+
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+ if(level == L2_FRAME)
+ {
+#if defined(__x86_64__)
+ if(l4_table_offset(virt_address) >=
+ l4_table_offset(hyp_virt_start) &&
+ l4_table_offset(virt_address) <=
+ l4_table_offset(hyp_virt_end))
+#endif
+ if(l3_table_offset(virt_address) >=
+ l3_table_offset(hyp_virt_start) &&
+ l3_table_offset(virt_address) <=
+ l3_table_offset(hyp_virt_end))
+ return 0;
+
+ return 1;
+ } else
+#endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */
+
+ /* Always need l1 frames */
+ if(level == L1_FRAME)
+ return 1;
+
+ printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n",
+ level, hyp_virt_start, hyp_virt_end);
+ return -1;
+}
+
+void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
+{
+ unsigned long start_address, end_address;
+ unsigned long pfn_to_map, pt_pfn = *start_pfn;
+ static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
+ pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
+ unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
+ unsigned long offset;
+ int count = 0;
+
+ pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) *
L1_PAGETABLE_ENTRIES;
+
+ if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
+ {
+ printk("WARNING: Mini-OS trying to use Xen virtual space. "
+ "Truncating memory from %dMB to ",
+ ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned
long)&_text)>>20);
+ *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
+ printk("%dMB\n",
+ ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned
long)&_text)>>20);
+ }
+
+ start_address = (unsigned long)pfn_to_virt(pfn_to_map);
+ end_address = (unsigned long)pfn_to_virt(*max_pfn);
+
+ /* We worked out the virtual memory range to map, now mapping loop */
+ printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
+
+ while(start_address < end_address)
+ {
+ tab = (pgentry_t *)start_info.pt_base;
+ mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
+
+#if defined(__x86_64__)
+ offset = l4_table_offset(start_address);
+ /* Need new L3 pt frame */
+ if(!(start_address & L3_MASK))
+ if(need_pt_frame(start_address, L3_FRAME))
+ new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
+
+ page = tab[offset];
+ mfn = pte_to_mfn(page);
+ tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+#endif
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+ offset = l3_table_offset(start_address);
+ /* Need new L2 pt frame */
+ if(!(start_address & L2_MASK))
+ if(need_pt_frame(start_address, L2_FRAME))
+ new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
+
+ page = tab[offset];
+ mfn = pte_to_mfn(page);
+ tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+#endif
+ offset = l2_table_offset(start_address);
+ /* Need new L1 pt frame */
+ if(!(start_address & L1_MASK))
+ if(need_pt_frame(start_address, L1_FRAME))
+ new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
+
+ page = tab[offset];
+ mfn = pte_to_mfn(page);
+ offset = l1_table_offset(start_address);
+
+ mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) +
sizeof(pgentry_t) * offset;
+ mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) <<
PAGE_SHIFT | L1_PROT;
+ count++;
+ if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
+ {
+ if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
+ {
+ printk("PTE could not be updated\n");
+ do_exit();
+ }
+ count = 0;
+ }
+ start_address += PAGE_SIZE;
+ }
+
+ *start_pfn = pt_pfn;
+}
+
+
+void mem_test(unsigned long *start_add, unsigned long *end_add)
+{
+ unsigned long mask = 0x10000;
+ unsigned long *pointer;
+
+ for(pointer = start_add; pointer < end_add; pointer++)
+ {
+ if(!(((unsigned long)pointer) & 0xfffff))
+ {
+ printk("Writing to %lx\n", pointer);
+ page_walk((unsigned long)pointer);
+ }
+ *pointer = (unsigned long)pointer & ~mask;
+ }
+
+ for(pointer = start_add; pointer < end_add; pointer++)
+ {
+ if(((unsigned long)pointer & ~mask) != *pointer)
+ printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
+ (unsigned long)pointer,
+ *pointer,
+ ((unsigned long)pointer & ~mask));
+ }
+
+}
+
+static pgentry_t *demand_map_pgt;
+static void *demand_map_area_start;
+
+void arch_init_demand_mapping_area(unsigned long max_pfn)
+{
+ unsigned long mfn;
+ pgentry_t *tab;
+ unsigned long start_addr;
+ unsigned long pt_pfn;
+ unsigned offset;
+
+ /* Round up to four megs. + 1024 rather than + 1023 since we want
+ to be sure we don't end up in the same place we started. */
+ max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1);
+ if (max_pfn == 0 ||
+ (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >=
+ HYPERVISOR_VIRT_START) {
+ printk("Too much memory; no room for demand map hole.\n");
+ do_exit();
+ }
+
+ demand_map_area_start = pfn_to_virt(max_pfn);
+ printk("Demand map pfns start at %lx (%p).\n", max_pfn,
+ demand_map_area_start);
+ start_addr = (unsigned long)demand_map_area_start;
+
+ tab = (pgentry_t *)start_info.pt_base;
+ mfn = virt_to_mfn(start_info.pt_base);
+ pt_pfn = virt_to_pfn(alloc_page());
+
+#if defined(__x86_64__)
+ offset = l4_table_offset(start_addr);
+ if (!(tab[offset] & _PAGE_PRESENT)) {
+ new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
+ pt_pfn = virt_to_pfn(alloc_page());
+ }
+ ASSERT(tab[offset] & _PAGE_PRESENT);
+ mfn = pte_to_mfn(tab[offset]);
+ tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+#endif
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+ offset = l3_table_offset(start_addr);
+ if (!(tab[offset] & _PAGE_PRESENT)) {
+ new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
+ pt_pfn = virt_to_pfn(alloc_page());
+ }
+ ASSERT(tab[offset] & _PAGE_PRESENT);
+ mfn = pte_to_mfn(tab[offset]);
+ tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+#endif
+ offset = l2_table_offset(start_addr);
+ if (tab[offset] & _PAGE_PRESENT) {
+ printk("Demand map area already has a page table covering it?\n");
+ BUG();
+ }
+ demand_map_pgt = pfn_to_virt(pt_pfn);
+ new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
+ ASSERT(tab[offset] & _PAGE_PRESENT);
+ printk("Initialised demand area.\n");
+}
+
+void *map_frames(unsigned long *f, unsigned long n)
+{
+ unsigned long x;
+ unsigned long y = 0;
+ mmu_update_t mmu_updates[16];
+ int rc;
+
+ if (n > 16) {
+ printk("Tried to map too many (%ld) frames at once.\n", n);
+ return NULL;
+ }
+
+ /* Find a run of n contiguous frames */
+ for (x = 0; x <= 1024 - n; x += y + 1) {
+ for (y = 0; y < n; y++)
+ if (demand_map_pgt[x+y] & _PAGE_PRESENT)
+ break;
+ if (y == n)
+ break;
+ }
+ if (y != n) {
+ printk("Failed to map %ld frames!\n", n);
+ return NULL;
+ }
+
+ /* Found it at x. Map it in. */
+ for (y = 0; y < n; y++) {
+ mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]);
+ mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT;
+ }
+
+ rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF);
+ if (rc < 0) {
+ printk("Map %ld failed: %d.\n", n, rc);
+ return NULL;
+ } else {
+ return (void *)(unsigned long)((unsigned long)demand_map_area_start +
+ x * PAGE_SIZE);
+ }
+}
+
+void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p)
+{
+
+ unsigned long start_pfn, max_pfn;
+
+ printk(" _text: %p\n", &_text);
+ printk(" _etext: %p\n", &_etext);
+ printk(" _edata: %p\n", &_edata);
+ printk(" stack start: %p\n", &stack);
+ printk(" _end: %p\n", &_end);
+
+ /* First page follows page table pages and 3 more pages (store page etc) */
+ start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
+ start_info.nr_pt_frames + 3;
+ max_pfn = start_info.nr_pages;
+
+ printk(" start_pfn: %lx\n", start_pfn);
+ printk(" max_pfn: %lx\n", max_pfn);
+
+ build_pagetable(&start_pfn, &max_pfn);
+
+ *start_pfn_p = start_pfn;
+ *max_pfn_p = max_pfn;
+}
+
diff -r 60b60f75a221 -r a3c6479c87ef extras/mini-os/arch/x86/sched.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/sched.c Wed Nov 22 10:11:36 2006 +0000
@@ -0,0 +1,150 @@
+/*
+ ****************************************************************************
+ * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
+ ****************************************************************************
+ *
+ * File: sched.c
+ * Author: Grzegorz Milos
+ * Changes: Robert Kaiser
+ *
+ * Date: Aug 2005
+ *
+ * Environment: Xen Minimal OS
+ * Description: simple scheduler for Mini-Os
+ *
+ * The scheduler is non-preemptive (cooperative), and schedules according
+ * to Round Robin algorithm.
+ *
+ ****************************************************************************
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <os.h>
+#include <hypervisor.h>
+#include <time.h>
+#include <mm.h>
+#include <types.h>
+#include <lib.h>
+#include <xmalloc.h>
+#include <list.h>
+#include <sched.h>
+#include <semaphore.h>
+
+
+#ifdef SCHED_DEBUG
+#define DEBUG(_f, _a...) \
+ printk("MINI_OS(file=sched.c, line=%d) " _f "\n", __LINE__, ## _a)
+#else
+#define DEBUG(_f, _a...) ((void)0)
+#endif
+
+
+void dump_stack(struct thread *thread)
+{
+ unsigned long *bottom = (unsigned long *)(thread->stack + 2*4*1024);
+ unsigned long *pointer = (unsigned long *)thread->sp;
+ int count;
+ if(thread == current)
+ {
+#ifdef __i386__
+ asm("movl %%esp,%0"
+ : "=r"(pointer));
+#else
+ asm("movq %%rsp,%0"
+ : "=r"(pointer));
+#endif
+ }
+ printk("The stack for \"%s\"\n", thread->name);
+ for(count = 0; count < 25 && pointer < bottom; count ++)
+ {
+ printk("[0x%lx] 0x%lx\n", pointer, *pointer);
+ pointer++;
+ }
+
+ if(pointer < bottom) printk(" ... continues.\n");
+}
+
+/* Gets run when a new thread is scheduled the first time ever,
+ defined in x86_[32/64].S */
+extern void thread_starter(void);
+
+/* Pushes the specified value onto the stack of the specified thread */
+static void stack_push(struct thread *thread, unsigned long value)
+{
+ thread->sp -= sizeof(unsigned long);
+ *((unsigned long *)thread->sp) = value;
+}
+
+struct thread* create_thread(char *name, void (*function)(void *), void *data)
+{
+ struct thread *thread;
+ unsigned long flags;
+
+ thread = xmalloc(struct thread);
+ /* Allocate 2 pages for stack, stack will be 2pages aligned */
+ thread->stack = (char *)alloc_pages(1);
+ thread->name = name;
+ printk("Thread \"%s\": pointer: 0x%lx, stack: 0x%lx\n", name, thread,
+ thread->stack);
+
+ thread->sp = (unsigned long)thread->stack + 4096 * 2;
+ /* Save pointer to the thread on the stack, used by current macro */
+ *((unsigned long *)thread->stack) = (unsigned long)thread;
+
+ stack_push(thread, (unsigned long) function);
+ stack_push(thread, (unsigned long) data);
+ thread->ip = (unsigned long) thread_starter;
+
+ /* Not runable, not exited, not sleeping */
+ thread->flags = 0;
+ thread->wakeup_time = 0LL;
+ set_runnable(thread);
+ local_irq_save(flags);
+ if(idle_thread != NULL) {
+ list_add_tail(&thread->thread_list, &idle_thread->thread_list);
+ } else if(function != idle_thread_fn)
+ {
+ printk("BUG: Not allowed to create thread before initialising
scheduler.\n");
+ BUG();
+ }
+ local_irq_restore(flags);
+ return thread;
+}
+
+
+void run_idle_thread(void)
+{
+ /* Switch stacks and run the thread */
+#if defined(__i386__)
+ __asm__ __volatile__("mov %0,%%esp\n\t"
+ "push %1\n\t"
+ "ret"
+ :"=m" (idle_thread->sp)
+ :"m" (idle_thread->ip));
+#elif defined(__x86_64__)
+ __asm__ __volatile__("mov %0,%%rsp\n\t"
+ "push %1\n\t"
+ "ret"
+ :"=m" (idle_thread->sp)
+ :"m" (idle_thread->ip));
+#endif
+}
+
+
+
diff -r 60b60f75a221 -r a3c6479c87ef extras/mini-os/include/x86/arch_mm.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/include/x86/arch_mm.h Wed Nov 22 10:11:36 2006 +0000
@@ -0,0 +1,209 @@
+/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
+ *
+ * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
+ * Copyright (c) 2005, Keir A Fraser
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _ARCH_MM_H_
+#define _ARCH_MM_H_
+
+#if defined(__i386__)
+#include <xen/arch-x86_32.h>
+#elif defined(__x86_64__)
+#include <xen/arch-x86_64.h>
+#else
+#error "Unsupported architecture"
+#endif
+
+#define L1_FRAME 1
+#define L2_FRAME 2
+#define L3_FRAME 3
+
+#define L1_PAGETABLE_SHIFT 12
+
+#if defined(__i386__)
+
+#if !defined(CONFIG_X86_PAE)
+
+#define L2_PAGETABLE_SHIFT 22
+
+#define L1_PAGETABLE_ENTRIES 1024
+#define L2_PAGETABLE_ENTRIES 1024
+
+#define PADDR_BITS 32
+#define PADDR_MASK (~0UL)
+
+#define NOT_L1_FRAMES 1
+#define PRIpte "08lx"
+typedef unsigned long pgentry_t;
+
+#else /* defined(CONFIG_X86_PAE) */
+
+#define L2_PAGETABLE_SHIFT 21
+#define L3_PAGETABLE_SHIFT 30
+
+#define L1_PAGETABLE_ENTRIES 512
+#define L2_PAGETABLE_ENTRIES 512
+#define L3_PAGETABLE_ENTRIES 4
+
+#define PADDR_BITS 44
+#define PADDR_MASK ((1ULL << PADDR_BITS)-1)
+
+#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
+
+/*
+ * If starting from virtual address greater than 0xc0000000,
+ * this value will be 2 to account for final mid-level page
+ * directory which is always mapped in at this location.
+ */
+#define NOT_L1_FRAMES 3
+#define PRIpte "016llx"
+typedef uint64_t pgentry_t;
+
+#endif /* !defined(CONFIG_X86_PAE) */
+
+#elif defined(__x86_64__)
+
+#define L2_PAGETABLE_SHIFT 21
+#define L3_PAGETABLE_SHIFT 30
+#define L4_PAGETABLE_SHIFT 39
+
+#define L1_PAGETABLE_ENTRIES 512
+#define L2_PAGETABLE_ENTRIES 512
+#define L3_PAGETABLE_ENTRIES 512
+#define L4_PAGETABLE_ENTRIES 512
+
+/* These are page-table limitations. Current CPUs support only 40-bit phys. */
+#define PADDR_BITS 52
+#define VADDR_BITS 48
+#define PADDR_MASK ((1UL << PADDR_BITS)-1)
+#define VADDR_MASK ((1UL << VADDR_BITS)-1)
+
+#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
+#define L3_MASK ((1UL << L4_PAGETABLE_SHIFT) - 1)
+
+#define NOT_L1_FRAMES 3
+#define PRIpte "016lx"
+typedef unsigned long pgentry_t;
+
+#endif
+
+#define L1_MASK ((1UL << L2_PAGETABLE_SHIFT) - 1)
+
+/* Given a virtual address, get an entry offset into a page table. */
+#define l1_table_offset(_a) \
+ (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
+#define l2_table_offset(_a) \
+ (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+#define l3_table_offset(_a) \
+ (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
+#endif
+#if defined(__x86_64__)
+#define l4_table_offset(_a) \
+ (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
+#endif
+
+#define _PAGE_PRESENT 0x001UL
+#define _PAGE_RW 0x002UL
+#define _PAGE_USER 0x004UL
+#define _PAGE_PWT 0x008UL
+#define _PAGE_PCD 0x010UL
+#define _PAGE_ACCESSED 0x020UL
+#define _PAGE_DIRTY 0x040UL
+#define _PAGE_PAT 0x080UL
+#define _PAGE_PSE 0x080UL
+#define _PAGE_GLOBAL 0x100UL
+
+#if defined(__i386__)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
+#if defined(CONFIG_X86_PAE)
+#define L3_PROT (_PAGE_PRESENT)
+#endif /* CONFIG_X86_PAE */
+#elif defined(__x86_64__)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#endif /* __i386__ || __x86_64__ */
+
+#ifndef CONFIG_X86_PAE
+#define PAGE_SIZE (1UL << L1_PAGETABLE_SHIFT)
+#else
+#define PAGE_SIZE (1ULL << L1_PAGETABLE_SHIFT)
+#endif
+#define PAGE_SHIFT L1_PAGETABLE_SHIFT
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT)
+#define PFN_DOWN(x) ((x) >> L1_PAGETABLE_SHIFT)
+#define PFN_PHYS(x) ((x) << L1_PAGETABLE_SHIFT)
+#define PHYS_PFN(x) ((x) >> L1_PAGETABLE_SHIFT)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* Definitions for machine and pseudophysical addresses. */
+#ifdef CONFIG_X86_PAE
+typedef unsigned long long paddr_t;
+typedef unsigned long long maddr_t;
+#else
+typedef unsigned long paddr_t;
+typedef unsigned long maddr_t;
+#endif
+
+extern unsigned long *phys_to_machine_mapping;
+extern char _text, _etext, _edata, _end;
+#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
+static __inline__ maddr_t phys_to_machine(paddr_t phys)
+{
+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+ return machine;
+}
+
+#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+static __inline__ paddr_t machine_to_phys(maddr_t machine)
+{
+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+ return phys;
+}
+
+#define VIRT_START ((unsigned long)&_text)
+
+#define to_phys(x) ((unsigned long)(x)-VIRT_START)
+#define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START))
+
+#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt)))
+#define virt_to_mfn(_virt) (pfn_to_mfn(virt_to_pfn(_virt)))
+#define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach)))
+#define virt_to_mach(_virt) (phys_to_machine(to_phys(_virt)))
+#define mfn_to_virt(_mfn) (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT))
+#define pfn_to_virt(_pfn) (to_virt((_pfn) << PAGE_SHIFT))
+
+/* Pagetable walking. */
+#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >>
L1_PAGETABLE_SHIFT)
+#define pte_to_virt(_pte) to_virt(mfn_to_pfn(pte_to_mfn(_pte)) <<
PAGE_SHIFT)
+
+
+#endif /* _ARCH_MM_H_ */
diff -r 60b60f75a221 -r a3c6479c87ef extras/mini-os/include/x86/arch_sched.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/include/x86/arch_sched.h Wed Nov 22 10:11:36 2006 +0000
@@ -0,0 +1,58 @@
+
+#ifndef __ARCH_SCHED_H__
+#define __ARCH_SCHED_H__
+
+
+static inline struct thread* get_current(void)
+{
+ struct thread **current;
+#ifdef __i386__
+ __asm__("andl %%esp,%0; ":"=r" (current) : "r" (~8191UL));
+#else
+ __asm__("andq %%rsp,%0; ":"=r" (current) : "r" (~8191UL));
+#endif
+ return *current;
+}
+
+#ifdef __i386__
+#define arch_switch_threads(prev, next) do { \
+ unsigned long esi,edi; \
+ __asm__ __volatile__("pushfl\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl %%esp,%0\n\t" /* save ESP */ \
+ "movl %4,%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%1\n\t" /* save EIP */ \
+ "pushl %5\n\t" /* restore EIP */ \
+ "ret\n\t" \
+ "1:\t" \
+ "popl %%ebp\n\t" \
+ "popfl" \
+ :"=m" (prev->sp),"=m" (prev->ip), \
+ "=S" (esi),"=D" (edi) \
+ :"m" (next->sp),"m" (next->ip), \
+ "2" (prev), "d" (next)); \
+} while (0)
+#elif __x86_64__
+#define arch_switch_threads(prev, next) do { \
+ unsigned long rsi,rdi; \
+ __asm__ __volatile__("pushfq\n\t" \
+ "pushq %%rbp\n\t" \
+ "movq %%rsp,%0\n\t" /* save RSP */ \
+ "movq %4,%%rsp\n\t" /* restore RSP */ \
+ "movq $1f,%1\n\t" /* save RIP */ \
+ "pushq %5\n\t" /* restore RIP */ \
+ "ret\n\t" \
+ "1:\t" \
+ "popq %%rbp\n\t" \
+ "popfq" \
+ :"=m" (prev->sp),"=m" (prev->ip), \
+ "=S" (rsi),"=D" (rdi) \
+ :"m" (next->sp),"m" (next->ip), \
+ "2" (prev), "d" (next)); \
+} while (0)
+#endif
+
+
+
+
+#endif /* __ARCH_SCHED_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|