# HG changeset patch # User hahn@amur # Date 1154415804 -7200 # Node ID 287178fbc3209bccd1fa6668415f3118d2926e84 # Parent d2bf1a7cc1319d2e0379c3394a6c09ec2f0c51e1 First small re-architecture for porting mini-os to ia64. Signed-off-by: Dietmar Hahn diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/Makefile --- a/extras/mini-os/Makefile Sat Jul 29 14:05:59 2006 +0100 +++ b/extras/mini-os/Makefile Tue Aug 01 09:03:24 2006 +0200 @@ -11,8 +11,12 @@ CFLAGS := -fno-builtin -Wall -Werror -Wr CFLAGS := -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline -override CPPFLAGS := -Iinclude $(CPPFLAGS) ASFLAGS = -D__ASSEMBLY__ + +# For possible special source directories. +EXTRA_DIRS = +# For possible special header directories. +EXTRA_INC = LDLIBS = -L. -lminios LDFLAGS := -N -T minios-$(TARGET_ARCH).lds @@ -20,17 +24,23 @@ ifeq ($(TARGET_ARCH),x86_32) ifeq ($(TARGET_ARCH),x86_32) CFLAGS += -m32 -march=i686 LDFLAGS += -m elf_i386 +EXTRA_INC = x86 +EXTRA_DIRS = x86 endif ifeq ($(TARGET_ARCH)$(pae),x86_32y) CFLAGS += -DCONFIG_X86_PAE=1 ASFLAGS += -DCONFIG_X86_PAE=1 +EXTRA_INC = x86 +EXTRA_DIRS = x86 endif ifeq ($(TARGET_ARCH),x86_64) CFLAGS += -m64 -mno-red-zone -fpic -fno-reorder-blocks CFLAGS += -fno-asynchronous-unwind-tables LDFLAGS += -m elf_x86_64 +EXTRA_INC = x86 +EXTRA_DIRS = x86 endif ifeq ($(debug),y) @@ -39,6 +49,8 @@ CFLAGS += -O3 CFLAGS += -O3 endif +override CPPFLAGS := -Iinclude $(CPPFLAGS) -Iinclude/$(TARGET_ARCH) -Iinclude/$(EXTRA_INC) + TARGET := mini-os HEAD := $(TARGET_ARCH).o @@ -46,9 +58,20 @@ OBJS += $(patsubst %.c,%.o,$(wildcard li OBJS += $(patsubst %.c,%.o,$(wildcard lib/*.c)) OBJS += $(patsubst %.c,%.o,$(wildcard xenbus/*.c)) OBJS += $(patsubst %.c,%.o,$(wildcard console/*.c)) +OBJS += $(patsubst %.S,%.o,$(wildcard $(TARGET_ARCH)/*.S)) +OBJS += $(patsubst %.S,%.o,$(wildcard $(TARGET_ARCH)/*.c)) +# For special wanted source directories. +extra_objs := $(foreach dir,$(EXTRA_DIRS),$(patsubst %.c,%.o,$(wildcard $(dir)/*.c))) +OBJS += $(extra_objs) +extra_objs := $(foreach dir,$(EXTRA_DIRS),$(patsubst %.S,%.o,$(wildcard $(dir)/*.S))) +OBJS += $(extra_objs) HDRS := $(wildcard include/*.h) HDRS += $(wildcard include/xen/*.h) +HDRS += $(wildcard include/$(TARGET_ARCH)/*.h) +# For special wanted header directories. +extra_heads := $(foreach dir,$(EXTRA_INC),$(wildcard $(dir)/*.h)) +HDRS += $(extra_heads) .PHONY: default default: $(TARGET) @@ -57,8 +80,11 @@ links: links: [ -e include/xen ] || ln -sf ../../../xen/include/public include/xen -libminios.a: links $(OBJS) $(HEAD) - $(AR) r libminios.a $(HEAD) $(OBJS) +libminios.a: links $(OBJS) + @echo HAHN_extra_objs: $(extra_objs) + @echo HAHN_OBJS: $(OBJS) + @echo HAHN_HDRS: $(HDRS) + $(AR) r libminios.a $(OBJS) $(TARGET): libminios.a $(HEAD) $(LD) $(LDFLAGS) $(HEAD) $(LDLIBS) -o $@.elf diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/include/x86/os.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/extras/mini-os/include/x86/os.h Tue Aug 01 09:03:24 2006 +0200 @@ -0,0 +1,561 @@ +/****************************************************************************** + * os.h + * + * random collection of macros and definition + */ + +#ifndef _OS_H_ +#define _OS_H_ + +#if __GNUC__ == 2 && __GNUC_MINOR__ < 96 +#define __builtin_expect(x, expected_value) (x) +#endif +#define unlikely(x) __builtin_expect((x),0) + +#define smp_processor_id() 0 + + +#ifndef __ASSEMBLY__ +#include +#include + +extern void do_exit(void); +#define BUG do_exit + +#endif +#include + + +#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0, 0)) + +#define __KERNEL_CS FLAT_KERNEL_CS +#define __KERNEL_DS FLAT_KERNEL_DS +#define __KERNEL_SS FLAT_KERNEL_SS + +#define TRAP_divide_error 0 +#define TRAP_debug 1 +#define TRAP_nmi 2 +#define TRAP_int3 3 +#define TRAP_overflow 4 +#define TRAP_bounds 5 +#define TRAP_invalid_op 6 +#define TRAP_no_device 7 +#define TRAP_double_fault 8 +#define TRAP_copro_seg 9 +#define TRAP_invalid_tss 10 +#define TRAP_no_segment 11 +#define TRAP_stack_error 12 +#define TRAP_gp_fault 13 +#define TRAP_page_fault 14 +#define TRAP_spurious_int 15 +#define TRAP_copro_error 16 +#define TRAP_alignment_check 17 +#define TRAP_machine_check 18 +#define TRAP_simd_error 19 +#define TRAP_deferred_nmi 31 + +/* Everything below this point is not included by assembler (.S) files. */ +#ifndef __ASSEMBLY__ + +extern shared_info_t *HYPERVISOR_shared_info; + +void trap_init(void); + + + +/* + * The use of 'barrier' in the following reflects their use as local-lock + * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following + * critical operations are executed. All critical operations must complete + * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also + * includes these barriers, for example. + */ + +#define __cli() \ +do { \ + vcpu_info_t *_vcpu; \ + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ + _vcpu->evtchn_upcall_mask = 1; \ + barrier(); \ +} while (0) + +#define __sti() \ +do { \ + vcpu_info_t *_vcpu; \ + barrier(); \ + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ + _vcpu->evtchn_upcall_mask = 0; \ + barrier(); /* unmask then check (avoid races) */ \ + if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ + force_evtchn_callback(); \ +} while (0) + +#define __save_flags(x) \ +do { \ + vcpu_info_t *_vcpu; \ + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ + (x) = _vcpu->evtchn_upcall_mask; \ +} while (0) + +#define __restore_flags(x) \ +do { \ + vcpu_info_t *_vcpu; \ + barrier(); \ + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ + if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ + barrier(); /* unmask then check (avoid races) */ \ + if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ + force_evtchn_callback(); \ + }\ +} while (0) + +#define safe_halt() ((void)0) + +#define __save_and_cli(x) \ +do { \ + vcpu_info_t *_vcpu; \ + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ + (x) = _vcpu->evtchn_upcall_mask; \ + _vcpu->evtchn_upcall_mask = 1; \ + barrier(); \ +} while (0) + +#define local_irq_save(x) __save_and_cli(x) +#define local_irq_restore(x) __restore_flags(x) +#define local_save_flags(x) __save_flags(x) +#define local_irq_disable() __cli() +#define local_irq_enable() __sti() + +#define irqs_disabled() \ + HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask + +/* This is a barrier for the compiler only, NOT the processor! */ +#define barrier() __asm__ __volatile__("": : :"memory") + +#if defined(__i386__) +#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") +#define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") +#define wmb() __asm__ __volatile__ ("": : :"memory") +#elif defined(__x86_64__) +#define mb() __asm__ __volatile__ ("mfence":::"memory") +#define rmb() __asm__ __volatile__ ("lfence":::"memory") +#define wmb() __asm__ __volatile__ ("sfence" ::: "memory") /* From CONFIG_UNORDERED_IO (linux) */ +#endif + + +#define LOCK_PREFIX "" +#define LOCK "" +#define ADDR (*(volatile long *) addr) +/* + * Make sure gcc doesn't try to be clever and move things around + * on us. We need to use _exactly_ the address the user gave us, + * not some alias that contains the same information. + */ +typedef struct { volatile int counter; } atomic_t; + + +/************************** i386 *******************************/ +#if defined (__i386__) + +#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) +static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("xchgb %b0,%1" + :"=q" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 2: + __asm__ __volatile__("xchgw %w0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 4: + __asm__ __volatile__("xchgl %0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + } + return x; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It can be reorderdered on other architectures other than x86. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) +{ + int oldbit; + + __asm__ __volatile__( LOCK + "btrl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"=m" (ADDR) + :"Ir" (nr) : "memory"); + return oldbit; +} + +static inline int constant_test_bit(int nr, const volatile unsigned long *addr) +{ + return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; +} + +static inline int variable_test_bit(int nr, const volatile unsigned long * addr) +{ + int oldbit; + + __asm__ __volatile__( + "btl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit) + :"m" (ADDR),"Ir" (nr)); + return oldbit; +} + +#define test_bit(nr,addr) \ +(__builtin_constant_p(nr) ? \ + constant_test_bit((nr),(addr)) : \ + variable_test_bit((nr),(addr))) + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writting portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__( LOCK + "btsl %1,%0" + :"=m" (ADDR) + :"Ir" (nr)); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static inline void clear_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__( LOCK + "btrl %1,%0" + :"=m" (ADDR) + :"Ir" (nr)); +} + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static inline unsigned long __ffs(unsigned long word) +{ + __asm__("bsfl %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + + +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + */ +#define ADDR (*(volatile long *) addr) + +#define rdtscll(val) \ + __asm__ __volatile__("rdtsc" : "=A" (val)) + + + +#elif defined(__x86_64__)/* ifdef __i386__ */ +/************************** x86_84 *******************************/ + +#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) +#define __xg(x) ((volatile long *)(x)) +static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("xchgb %b0,%1" + :"=q" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 2: + __asm__ __volatile__("xchgw %w0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 4: + __asm__ __volatile__("xchgl %k0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 8: + __asm__ __volatile__("xchgq %0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + } + return x; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_clear_bit(int nr, volatile void * addr) +{ + int oldbit; + + __asm__ __volatile__( LOCK_PREFIX + "btrl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"=m" (ADDR) + :"dIr" (nr) : "memory"); + return oldbit; +} + +static __inline__ int constant_test_bit(int nr, const volatile void * addr) +{ + return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; +} + +static __inline__ int variable_test_bit(int nr, volatile const void * addr) +{ + int oldbit; + + __asm__ __volatile__( + "btl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit) + :"m" (ADDR),"dIr" (nr)); + return oldbit; +} + +#define test_bit(nr,addr) \ +(__builtin_constant_p(nr) ? \ + constant_test_bit((nr),(addr)) : \ + variable_test_bit((nr),(addr))) + + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void set_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( LOCK_PREFIX + "btsl %1,%0" + :"=m" (ADDR) + :"dIr" (nr) : "memory"); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static __inline__ void clear_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( LOCK_PREFIX + "btrl %1,%0" + :"=m" (ADDR) + :"dIr" (nr)); +} + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __inline__ unsigned long __ffs(unsigned long word) +{ + __asm__("bsfq %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + +#define ADDR (*(volatile long *) addr) + +#define rdtscll(val) do { \ + unsigned int __a,__d; \ + asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ + (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ +} while(0) + +#define wrmsr(msr,val1,val2) \ + __asm__ __volatile__("wrmsr" \ + : /* no outputs */ \ + : "c" (msr), "a" (val1), "d" (val2)) + +#define wrmsrl(msr,val) wrmsr(msr,(u32)((u64)(val)),((u64)(val))>>32) + + +#else /* ifdef __x86_64__ */ +#error "Unsupported architecture" +#endif + + +/********************* common i386 and x86_64 ****************************/ +struct __synch_xchg_dummy { unsigned long a[100]; }; +#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) + +#define synch_cmpxchg(ptr, old, new) \ +((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ + (unsigned long)(old), \ + (unsigned long)(new), \ + sizeof(*(ptr)))) + +static inline unsigned long __synch_cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + __asm__ __volatile__("lock; cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; + case 2: + __asm__ __volatile__("lock; cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; +#ifdef __x86_64__ + case 4: + __asm__ __volatile__("lock; cmpxchgl %k1,%2" + : "=a"(prev) + : "r"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; + case 8: + __asm__ __volatile__("lock; cmpxchgq %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; +#else + case 4: + __asm__ __volatile__("lock; cmpxchgl %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; +#endif + } + return old; +} + + +static __inline__ void synch_set_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__ ( + "lock btsl %1,%0" + : "=m" (ADDR) : "Ir" (nr) : "memory" ); +} + +static __inline__ void synch_clear_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__ ( + "lock btrl %1,%0" + : "=m" (ADDR) : "Ir" (nr) : "memory" ); +} + +static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) +{ + int oldbit; + __asm__ __volatile__ ( + "lock btsl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); + return oldbit; +} + +static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) +{ + int oldbit; + __asm__ __volatile__ ( + "lock btrl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); + return oldbit; +} + +static __inline__ int synch_const_test_bit(int nr, const volatile void * addr) +{ + return ((1UL << (nr & 31)) & + (((const volatile unsigned int *) addr)[nr >> 5])) != 0; +} + +static __inline__ int synch_var_test_bit(int nr, volatile void * addr) +{ + int oldbit; + __asm__ __volatile__ ( + "btl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); + return oldbit; +} + +#define synch_test_bit(nr,addr) \ +(__builtin_constant_p(nr) ? \ + synch_const_test_bit((nr),(addr)) : \ + synch_var_test_bit((nr),(addr))) + + + +#endif /* not assembly */ +#endif /* _OS_H_ */ diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/include/x86/spinlock.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/extras/mini-os/include/x86/spinlock.h Tue Aug 01 09:03:24 2006 +0200 @@ -0,0 +1,121 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#include + +/* + * Your basic SMP spinlocks, allowing only a single CPU anywhere + */ + +typedef struct { + volatile unsigned int slock; +} spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } + +#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) + +/* + * Simple spin lock operations. There are two variants, one clears IRQ's + * on the local processor, one does not. + * + * We make no fairness assumptions. They have a cost. + */ + +#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) +#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) + +#define spin_lock_string \ + "1:\n" \ + LOCK \ + "decb %0\n\t" \ + "jns 3f\n" \ + "2:\t" \ + "rep;nop\n\t" \ + "cmpb $0,%0\n\t" \ + "jle 2b\n\t" \ + "jmp 1b\n" \ + "3:\n\t" + +#define spin_lock_string_flags \ + "1:\n" \ + LOCK \ + "decb %0\n\t" \ + "jns 4f\n\t" \ + "2:\t" \ + "testl $0x200, %1\n\t" \ + "jz 3f\n\t" \ + "#sti\n\t" \ + "3:\t" \ + "rep;nop\n\t" \ + "cmpb $0, %0\n\t" \ + "jle 3b\n\t" \ + "#cli\n\t" \ + "jmp 1b\n" \ + "4:\n\t" + +/* + * This works. Despite all the confusion. + * (except on PPro SMP or if we are using OOSTORE) + * (PPro errata 66, 92) + */ + +#define spin_unlock_string \ + "xchgb %b0, %1" \ + :"=q" (oldval), "=m" (lock->slock) \ + :"0" (oldval) : "memory" + +static inline void _raw_spin_unlock(spinlock_t *lock) +{ + char oldval = 1; + __asm__ __volatile__( + spin_unlock_string + ); +} + +static inline int _raw_spin_trylock(spinlock_t *lock) +{ + char oldval; + __asm__ __volatile__( + "xchgb %b0,%1\n" + :"=q" (oldval), "=m" (lock->slock) + :"0" (0) : "memory"); + return oldval > 0; +} + +static inline void _raw_spin_lock(spinlock_t *lock) +{ + __asm__ __volatile__( + spin_lock_string + :"=m" (lock->slock) : : "memory"); +} + +static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) +{ + __asm__ __volatile__( + spin_lock_string_flags + :"=m" (lock->slock) : "r" (flags) : "memory"); +} + +#define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \ + 1 : ({ 0;});}) + +#define _spin_lock(lock) \ +do { \ + _raw_spin_lock(lock); \ +} while(0) + +#define _spin_unlock(lock) \ +do { \ + _raw_spin_unlock(lock); \ +} while (0) + + +#define spin_lock(lock) _spin_lock(lock) +#define spin_unlock(lock) _spin_unlock(lock) + +#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED + +#endif diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/include/x86_32/hypercall-x86_32.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/extras/mini-os/include/x86_32/hypercall-x86_32.h Tue Aug 01 09:03:24 2006 +0200 @@ -0,0 +1,326 @@ +/****************************************************************************** + * hypercall-x86_32.h + * + * Copied from XenLinux. + * + * Copyright (c) 2002-2004, K A Fraser + * + * This file may be distributed separately from the Linux kernel, or + * incorporated into other software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __HYPERCALL_X86_32_H__ +#define __HYPERCALL_X86_32_H__ + +#include +#include +#include +#include + +#define __STR(x) #x +#define STR(x) __STR(x) + +extern char hypercall_page[PAGE_SIZE]; + +#define _hypercall0(type, name) \ +({ \ + long __res; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res) \ + : \ + : "memory" ); \ + (type)__res; \ +}) + +#define _hypercall1(type, name, a1) \ +({ \ + long __res, __ign1; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=b" (__ign1) \ + : "1" ((long)(a1)) \ + : "memory" ); \ + (type)__res; \ +}) + +#define _hypercall2(type, name, a1, a2) \ +({ \ + long __res, __ign1, __ign2; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ + : "1" ((long)(a1)), "2" ((long)(a2)) \ + : "memory" ); \ + (type)__res; \ +}) + +#define _hypercall3(type, name, a1, a2, a3) \ +({ \ + long __res, __ign1, __ign2, __ign3; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ + "=d" (__ign3) \ + : "1" ((long)(a1)), "2" ((long)(a2)), \ + "3" ((long)(a3)) \ + : "memory" ); \ + (type)__res; \ +}) + +#define _hypercall4(type, name, a1, a2, a3, a4) \ +({ \ + long __res, __ign1, __ign2, __ign3, __ign4; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ + "=d" (__ign3), "=S" (__ign4) \ + : "1" ((long)(a1)), "2" ((long)(a2)), \ + "3" ((long)(a3)), "4" ((long)(a4)) \ + : "memory" ); \ + (type)__res; \ +}) + +#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ +({ \ + long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ + "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ + : "1" ((long)(a1)), "2" ((long)(a2)), \ + "3" ((long)(a3)), "4" ((long)(a4)), \ + "5" ((long)(a5)) \ + : "memory" ); \ + (type)__res; \ +}) + +static inline int +HYPERVISOR_set_trap_table( + trap_info_t *table) +{ + return _hypercall1(int, set_trap_table, table); +} + +static inline int +HYPERVISOR_mmu_update( + mmu_update_t *req, int count, int *success_count, domid_t domid) +{ + return _hypercall4(int, mmu_update, req, count, success_count, domid); +} + +static inline int +HYPERVISOR_mmuext_op( + struct mmuext_op *op, int count, int *success_count, domid_t domid) +{ + return _hypercall4(int, mmuext_op, op, count, success_count, domid); +} + +static inline int +HYPERVISOR_set_gdt( + unsigned long *frame_list, int entries) +{ + return _hypercall2(int, set_gdt, frame_list, entries); +} + +static inline int +HYPERVISOR_stack_switch( + unsigned long ss, unsigned long esp) +{ + return _hypercall2(int, stack_switch, ss, esp); +} + +static inline int +HYPERVISOR_set_callbacks( + unsigned long event_selector, unsigned long event_address, + unsigned long failsafe_selector, unsigned long failsafe_address) +{ + return _hypercall4(int, set_callbacks, + event_selector, event_address, + failsafe_selector, failsafe_address); +} + +static inline int +HYPERVISOR_fpu_taskswitch( + int set) +{ + return _hypercall1(int, fpu_taskswitch, set); +} + +static inline int +HYPERVISOR_sched_op( + int cmd, unsigned long arg) +{ + return _hypercall2(int, sched_op, cmd, arg); +} + +static inline long +HYPERVISOR_set_timer_op( + u64 timeout) +{ + unsigned long timeout_hi = (unsigned long)(timeout>>32); + unsigned long timeout_lo = (unsigned long)timeout; + return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); +} + +static inline int +HYPERVISOR_dom0_op( + dom0_op_t *dom0_op) +{ + dom0_op->interface_version = DOM0_INTERFACE_VERSION; + return _hypercall1(int, dom0_op, dom0_op); +} + +static inline int +HYPERVISOR_set_debugreg( + int reg, unsigned long value) +{ + return _hypercall2(int, set_debugreg, reg, value); +} + +static inline unsigned long +HYPERVISOR_get_debugreg( + int reg) +{ + return _hypercall1(unsigned long, get_debugreg, reg); +} + +static inline int +HYPERVISOR_update_descriptor( + u64 ma, u64 desc) +{ + return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); +} + +static inline int +HYPERVISOR_memory_op( + unsigned int cmd, void *arg) +{ + return _hypercall2(int, memory_op, cmd, arg); +} + +static inline int +HYPERVISOR_multicall( + void *call_list, int nr_calls) +{ + return _hypercall2(int, multicall, call_list, nr_calls); +} + +static inline int +HYPERVISOR_update_va_mapping( + unsigned long va, pte_t new_val, unsigned long flags) +{ + unsigned long pte_hi = 0; +#ifdef CONFIG_X86_PAE + pte_hi = new_val.pte_high; +#endif + return _hypercall4(int, update_va_mapping, va, + new_val.pte_low, pte_hi, flags); +} + +static inline int +HYPERVISOR_event_channel_op( + void *op) +{ + return _hypercall1(int, event_channel_op, op); +} + +static inline int +HYPERVISOR_xen_version( + int cmd, void *arg) +{ + return _hypercall2(int, xen_version, cmd, arg); +} + +static inline int +HYPERVISOR_console_io( + int cmd, int count, char *str) +{ + return _hypercall3(int, console_io, cmd, count, str); +} + +static inline int +HYPERVISOR_physdev_op( + void *physdev_op) +{ + return _hypercall1(int, physdev_op, physdev_op); +} + +static inline int +HYPERVISOR_grant_table_op( + unsigned int cmd, void *uop, unsigned int count) +{ + return _hypercall3(int, grant_table_op, cmd, uop, count); +} + +static inline int +HYPERVISOR_update_va_mapping_otherdomain( + unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) +{ + unsigned long pte_hi = 0; +#ifdef CONFIG_X86_PAE + pte_hi = new_val.pte_high; +#endif + return _hypercall5(int, update_va_mapping_otherdomain, va, + new_val.pte_low, pte_hi, flags, domid); +} + +static inline int +HYPERVISOR_vm_assist( + unsigned int cmd, unsigned int type) +{ + return _hypercall2(int, vm_assist, cmd, type); +} + +static inline int +HYPERVISOR_vcpu_op( + int cmd, int vcpuid, void *extra_args) +{ + return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); +} + +static inline int +HYPERVISOR_suspend( + unsigned long srec) +{ + return _hypercall3(int, sched_op, SCHEDOP_shutdown, + SHUTDOWN_suspend, srec); +} + +static inline int +HYPERVISOR_nmi_op( + unsigned long op, + unsigned long arg) +{ + return _hypercall2(int, nmi_op, op, arg); +} + +#endif /* __HYPERCALL_X86_32_H__ */ + +/* + * Local variables: + * c-file-style: "linux" + * indent-tabs-mode: t + * c-indent-level: 8 + * c-basic-offset: 8 + * tab-width: 8 + * End: + */ diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/include/x86_64/hypercall-x86_64.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/extras/mini-os/include/x86_64/hypercall-x86_64.h Tue Aug 01 09:03:24 2006 +0200 @@ -0,0 +1,326 @@ +/****************************************************************************** + * hypercall-x86_64.h + * + * Copied from XenLinux. + * + * Copyright (c) 2002-2004, K A Fraser + * + * 64-bit updates: + * Benjamin Liu + * Jun Nakajima + * + * This file may be distributed separately from the Linux kernel, or + * incorporated into other software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __HYPERCALL_X86_64_H__ +#define __HYPERCALL_X86_64_H__ + +#include +#include +#include + +#define __STR(x) #x +#define STR(x) __STR(x) + +extern char hypercall_page[PAGE_SIZE]; + +#define _hypercall0(type, name) \ +({ \ + long __res; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res) \ + : \ + : "memory" ); \ + (type)__res; \ +}) + +#define _hypercall1(type, name, a1) \ +({ \ + long __res, __ign1; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=D" (__ign1) \ + : "1" ((long)(a1)) \ + : "memory" ); \ + (type)__res; \ +}) + +#define _hypercall2(type, name, a1, a2) \ +({ \ + long __res, __ign1, __ign2; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ + : "1" ((long)(a1)), "2" ((long)(a2)) \ + : "memory" ); \ + (type)__res; \ +}) + +#define _hypercall3(type, name, a1, a2, a3) \ +({ \ + long __res, __ign1, __ign2, __ign3; \ + asm volatile ( \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ + "=d" (__ign3) \ + : "1" ((long)(a1)), "2" ((long)(a2)), \ + "3" ((long)(a3)) \ + : "memory" ); \ + (type)__res; \ +}) + +#define _hypercall4(type, name, a1, a2, a3, a4) \ +({ \ + long __res, __ign1, __ign2, __ign3; \ + asm volatile ( \ + "movq %7,%%r10; " \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ + "=d" (__ign3) \ + : "1" ((long)(a1)), "2" ((long)(a2)), \ + "3" ((long)(a3)), "g" ((long)(a4)) \ + : "memory", "r10" ); \ + (type)__res; \ +}) + +#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ +({ \ + long __res, __ign1, __ign2, __ign3; \ + asm volatile ( \ + "movq %7,%%r10; movq %8,%%r8; " \ + "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ + : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ + "=d" (__ign3) \ + : "1" ((long)(a1)), "2" ((long)(a2)), \ + "3" ((long)(a3)), "g" ((long)(a4)), \ + "g" ((long)(a5)) \ + : "memory", "r10", "r8" ); \ + (type)__res; \ +}) + +static inline int +HYPERVISOR_set_trap_table( + trap_info_t *table) +{ + return _hypercall1(int, set_trap_table, table); +} + +static inline int +HYPERVISOR_mmu_update( + mmu_update_t *req, int count, int *success_count, domid_t domid) +{ + return _hypercall4(int, mmu_update, req, count, success_count, domid); +} + +static inline int +HYPERVISOR_mmuext_op( + struct mmuext_op *op, int count, int *success_count, domid_t domid) +{ + return _hypercall4(int, mmuext_op, op, count, success_count, domid); +} + +static inline int +HYPERVISOR_set_gdt( + unsigned long *frame_list, int entries) +{ + return _hypercall2(int, set_gdt, frame_list, entries); +} + +static inline int +HYPERVISOR_stack_switch( + unsigned long ss, unsigned long esp) +{ + return _hypercall2(int, stack_switch, ss, esp); +} + +static inline int +HYPERVISOR_set_callbacks( + unsigned long event_address, unsigned long failsafe_address, + unsigned long syscall_address) +{ + return _hypercall3(int, set_callbacks, + event_address, failsafe_address, syscall_address); +} + +static inline int +HYPERVISOR_fpu_taskswitch( + int set) +{ + return _hypercall1(int, fpu_taskswitch, set); +} + +static inline int +HYPERVISOR_sched_op( + int cmd, unsigned long arg) +{ + return _hypercall2(int, sched_op, cmd, arg); +} + +static inline long +HYPERVISOR_set_timer_op( + u64 timeout) +{ + return _hypercall1(long, set_timer_op, timeout); +} + +static inline int +HYPERVISOR_dom0_op( + dom0_op_t *dom0_op) +{ + dom0_op->interface_version = DOM0_INTERFACE_VERSION; + return _hypercall1(int, dom0_op, dom0_op); +} + +static inline int +HYPERVISOR_set_debugreg( + int reg, unsigned long value) +{ + return _hypercall2(int, set_debugreg, reg, value); +} + +static inline unsigned long +HYPERVISOR_get_debugreg( + int reg) +{ + return _hypercall1(unsigned long, get_debugreg, reg); +} + +static inline int +HYPERVISOR_update_descriptor( + unsigned long ma, unsigned long word) +{ + return _hypercall2(int, update_descriptor, ma, word); +} + +static inline int +HYPERVISOR_memory_op( + unsigned int cmd, void *arg) +{ + return _hypercall2(int, memory_op, cmd, arg); +} + +static inline int +HYPERVISOR_multicall( + void *call_list, int nr_calls) +{ + return _hypercall2(int, multicall, call_list, nr_calls); +} + +static inline int +HYPERVISOR_update_va_mapping( + unsigned long va, pte_t new_val, unsigned long flags) +{ + return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); +} + +static inline int +HYPERVISOR_event_channel_op( + void *op) +{ + return _hypercall1(int, event_channel_op, op); +} + +static inline int +HYPERVISOR_xen_version( + int cmd, void *arg) +{ + return _hypercall2(int, xen_version, cmd, arg); +} + +static inline int +HYPERVISOR_console_io( + int cmd, int count, char *str) +{ + return _hypercall3(int, console_io, cmd, count, str); +} + +static inline int +HYPERVISOR_physdev_op( + void *physdev_op) +{ + return _hypercall1(int, physdev_op, physdev_op); +} + +static inline int +HYPERVISOR_grant_table_op( + unsigned int cmd, void *uop, unsigned int count) +{ + return _hypercall3(int, grant_table_op, cmd, uop, count); +} + +static inline int +HYPERVISOR_update_va_mapping_otherdomain( + unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) +{ + return _hypercall4(int, update_va_mapping_otherdomain, va, + new_val.pte, flags, domid); +} + +static inline int +HYPERVISOR_vm_assist( + unsigned int cmd, unsigned int type) +{ + return _hypercall2(int, vm_assist, cmd, type); +} + +static inline int +HYPERVISOR_vcpu_op( + int cmd, int vcpuid, void *extra_args) +{ + return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); +} + +static inline int +HYPERVISOR_set_segment_base( + int reg, unsigned long value) +{ + return _hypercall2(int, set_segment_base, reg, value); +} + +static inline int +HYPERVISOR_suspend( + unsigned long srec) +{ + return _hypercall3(int, sched_op, SCHEDOP_shutdown, + SHUTDOWN_suspend, srec); +} + +static inline int +HYPERVISOR_nmi_op( + unsigned long op, + unsigned long arg) +{ + return _hypercall2(int, nmi_op, op, arg); +} + +#endif /* __HYPERCALL_X86_64_H__ */ + +/* + * Local variables: + * c-file-style: "linux" + * indent-tabs-mode: t + * c-indent-level: 8 + * c-basic-offset: 8 + * tab-width: 8 + * End: + */ diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/x86/traps.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/extras/mini-os/x86/traps.c Tue Aug 01 09:03:24 2006 +0200 @@ -0,0 +1,229 @@ + +#include +#include +#include +#include +#include +#include + +/* + * These are assembler stubs in entry.S. + * They are the actual entry points for virtual exceptions. + */ +void divide_error(void); +void debug(void); +void int3(void); +void overflow(void); +void bounds(void); +void invalid_op(void); +void device_not_available(void); +void coprocessor_segment_overrun(void); +void invalid_TSS(void); +void segment_not_present(void); +void stack_segment(void); +void general_protection(void); +void page_fault(void); +void coprocessor_error(void); +void simd_coprocessor_error(void); +void alignment_check(void); +void spurious_interrupt_bug(void); +void machine_check(void); + + +void dump_regs(struct pt_regs *regs) +{ + printk("Thread: %s\n", current->name); +#ifdef __i386__ + printk("EIP: %x, EFLAGS %x.\n", regs->eip, regs->eflags); + printk("EBX: %08x ECX: %08x EDX: %08x\n", + regs->ebx, regs->ecx, regs->edx); + printk("ESI: %08x EDI: %08x EBP: %08x EAX: %08x\n", + regs->esi, regs->edi, regs->ebp, regs->eax); + printk("DS: %04x ES: %04x orig_eax: %08x, eip: %08x\n", + regs->xds, regs->xes, regs->orig_eax, regs->eip); + printk("CS: %04x EFLAGS: %08x esp: %08x ss: %04x\n", + regs->xcs, regs->eflags, regs->esp, regs->xss); +#else + printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip); + printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", + regs->ss, regs->rsp, regs->eflags); + printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", + regs->rax, regs->rbx, regs->rcx); + printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", + regs->rdx, regs->rsi, regs->rdi); + printk("RBP: %016lx R08: %016lx R09: %016lx\n", + regs->rbp, regs->r8, regs->r9); + printk("R10: %016lx R11: %016lx R12: %016lx\n", + regs->r10, regs->r11, regs->r12); + printk("R13: %016lx R14: %016lx R15: %016lx\n", + regs->r13, regs->r14, regs->r15); +#endif +} + +static void do_trap(int trapnr, char *str, struct pt_regs * regs, unsigned long error_code) +{ + printk("FATAL: Unhandled Trap %d (%s), error code=0x%lx\n", trapnr, str, error_code); + printk("Regs address %p\n", regs); + dump_regs(regs); + do_exit(); +} + +#define DO_ERROR(trapnr, str, name) \ +void do_##name(struct pt_regs * regs, unsigned long error_code) \ +{ \ + do_trap(trapnr, str, regs, error_code); \ +} + +#define DO_ERROR_INFO(trapnr, str, name, sicode, siaddr) \ +void do_##name(struct pt_regs * regs, unsigned long error_code) \ +{ \ + do_trap(trapnr, str, regs, error_code); \ +} + +DO_ERROR_INFO( 0, "divide error", divide_error, FPE_INTDIV, regs->eip) +DO_ERROR( 3, "int3", int3) +DO_ERROR( 4, "overflow", overflow) +DO_ERROR( 5, "bounds", bounds) +DO_ERROR_INFO( 6, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip) +DO_ERROR( 7, "device not available", device_not_available) +DO_ERROR( 9, "coprocessor segment overrun", coprocessor_segment_overrun) +DO_ERROR(10, "invalid TSS", invalid_TSS) +DO_ERROR(11, "segment not present", segment_not_present) +DO_ERROR(12, "stack segment", stack_segment) +DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0) +DO_ERROR(18, "machine check", machine_check) + +void page_walk(unsigned long virt_address) +{ + pgentry_t *tab = (pgentry_t *)start_info.pt_base, page; + unsigned long addr = virt_address; + printk("Pagetable walk from virt %lx, base %lx:\n", virt_address, start_info.pt_base); + +#if defined(__x86_64__) + page = tab[l4_table_offset(addr)]; + tab = pte_to_virt(page); + printk(" L4 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l4_table_offset(addr)); +#endif +#if defined(__x86_64__) || defined(CONFIG_X86_PAE) + page = tab[l3_table_offset(addr)]; + tab = pte_to_virt(page); + printk(" L3 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l3_table_offset(addr)); +#endif + page = tab[l2_table_offset(addr)]; + tab = pte_to_virt(page); + printk(" L2 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l2_table_offset(addr)); + + page = tab[l1_table_offset(addr)]; + printk(" L1 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l1_table_offset(addr)); + +} + +#define read_cr2() \ + (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2) + +static int handling_pg_fault = 0; + +void do_page_fault(struct pt_regs *regs, unsigned long error_code) +{ + unsigned long addr = read_cr2(); + /* If we are already handling a page fault, and got another one + that means we faulted in pagetable walk. Continuing here would cause + a recursive fault */ + if(handling_pg_fault) + { + printk("Page fault in pagetable walk (access to invalid memory?).\n"); + do_exit(); + } + handling_pg_fault = 1; + +#if defined(__x86_64__) + printk("Page fault at linear address %p, rip %p, code %lx\n", + addr, regs->rip, error_code); +#else + printk("Page fault at linear address %p, eip %p, code %lx\n", + addr, regs->eip, error_code); +#endif + + dump_regs(regs); + page_walk(addr); + do_exit(); + /* We should never get here ... but still */ + handling_pg_fault = 0; +} + +void do_general_protection(struct pt_regs *regs, long error_code) +{ +#ifdef __i386__ + printk("GPF eip: %p, error_code=%lx\n", regs->eip, error_code); +#else + printk("GPF rip: %p, error_code=%lx\n", regs->rip, error_code); +#endif + dump_regs(regs); + do_exit(); +} + + +void do_debug(struct pt_regs * regs) +{ + printk("Debug exception\n"); +#define TF_MASK 0x100 + regs->eflags &= ~TF_MASK; + dump_regs(regs); + do_exit(); +} + +void do_coprocessor_error(struct pt_regs * regs) +{ + printk("Copro error\n"); + dump_regs(regs); + do_exit(); +} + +void simd_math_error(void *eip) +{ + printk("SIMD error\n"); +} + +void do_simd_coprocessor_error(struct pt_regs * regs) +{ + printk("SIMD copro error\n"); +} + +void do_spurious_interrupt_bug(struct pt_regs * regs) +{ +} + +/* + * Submit a virtual IDT to teh hypervisor. This consists of tuples + * (interrupt vector, privilege ring, CS:EIP of handler). + * The 'privilege ring' field specifies the least-privileged ring that + * can trap to that vector using a software-interrupt instruction (INT). + */ +static trap_info_t trap_table[] = { + { 0, 0, __KERNEL_CS, (unsigned long)divide_error }, + { 1, 0, __KERNEL_CS, (unsigned long)debug }, + { 3, 3, __KERNEL_CS, (unsigned long)int3 }, + { 4, 3, __KERNEL_CS, (unsigned long)overflow }, + { 5, 3, __KERNEL_CS, (unsigned long)bounds }, + { 6, 0, __KERNEL_CS, (unsigned long)invalid_op }, + { 7, 0, __KERNEL_CS, (unsigned long)device_not_available }, + { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun }, + { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS }, + { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present }, + { 12, 0, __KERNEL_CS, (unsigned long)stack_segment }, + { 13, 0, __KERNEL_CS, (unsigned long)general_protection }, + { 14, 0, __KERNEL_CS, (unsigned long)page_fault }, + { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug }, + { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error }, + { 17, 0, __KERNEL_CS, (unsigned long)alignment_check }, + { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error }, + { 0, 0, 0, 0 } +}; + + + +void trap_init(void) +{ + HYPERVISOR_set_trap_table(trap_table); +} + diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/include/hypercall-x86_32.h --- a/extras/mini-os/include/hypercall-x86_32.h Sat Jul 29 14:05:59 2006 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,326 +0,0 @@ -/****************************************************************************** - * hypercall-x86_32.h - * - * Copied from XenLinux. - * - * Copyright (c) 2002-2004, K A Fraser - * - * This file may be distributed separately from the Linux kernel, or - * incorporated into other software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef __HYPERCALL_X86_32_H__ -#define __HYPERCALL_X86_32_H__ - -#include -#include -#include -#include - -#define __STR(x) #x -#define STR(x) __STR(x) - -extern char hypercall_page[PAGE_SIZE]; - -#define _hypercall0(type, name) \ -({ \ - long __res; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res) \ - : \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall1(type, name, a1) \ -({ \ - long __res, __ign1; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=b" (__ign1) \ - : "1" ((long)(a1)) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall2(type, name, a1, a2) \ -({ \ - long __res, __ign1, __ign2; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ - : "1" ((long)(a1)), "2" ((long)(a2)) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall3(type, name, a1, a2, a3) \ -({ \ - long __res, __ign1, __ign2, __ign3; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ - "=d" (__ign3) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - "3" ((long)(a3)) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall4(type, name, a1, a2, a3, a4) \ -({ \ - long __res, __ign1, __ign2, __ign3, __ign4; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ - "=d" (__ign3), "=S" (__ign4) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - "3" ((long)(a3)), "4" ((long)(a4)) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ -({ \ - long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ - "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - "3" ((long)(a3)), "4" ((long)(a4)), \ - "5" ((long)(a5)) \ - : "memory" ); \ - (type)__res; \ -}) - -static inline int -HYPERVISOR_set_trap_table( - trap_info_t *table) -{ - return _hypercall1(int, set_trap_table, table); -} - -static inline int -HYPERVISOR_mmu_update( - mmu_update_t *req, int count, int *success_count, domid_t domid) -{ - return _hypercall4(int, mmu_update, req, count, success_count, domid); -} - -static inline int -HYPERVISOR_mmuext_op( - struct mmuext_op *op, int count, int *success_count, domid_t domid) -{ - return _hypercall4(int, mmuext_op, op, count, success_count, domid); -} - -static inline int -HYPERVISOR_set_gdt( - unsigned long *frame_list, int entries) -{ - return _hypercall2(int, set_gdt, frame_list, entries); -} - -static inline int -HYPERVISOR_stack_switch( - unsigned long ss, unsigned long esp) -{ - return _hypercall2(int, stack_switch, ss, esp); -} - -static inline int -HYPERVISOR_set_callbacks( - unsigned long event_selector, unsigned long event_address, - unsigned long failsafe_selector, unsigned long failsafe_address) -{ - return _hypercall4(int, set_callbacks, - event_selector, event_address, - failsafe_selector, failsafe_address); -} - -static inline int -HYPERVISOR_fpu_taskswitch( - int set) -{ - return _hypercall1(int, fpu_taskswitch, set); -} - -static inline int -HYPERVISOR_sched_op( - int cmd, unsigned long arg) -{ - return _hypercall2(int, sched_op, cmd, arg); -} - -static inline long -HYPERVISOR_set_timer_op( - u64 timeout) -{ - unsigned long timeout_hi = (unsigned long)(timeout>>32); - unsigned long timeout_lo = (unsigned long)timeout; - return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); -} - -static inline int -HYPERVISOR_dom0_op( - dom0_op_t *dom0_op) -{ - dom0_op->interface_version = DOM0_INTERFACE_VERSION; - return _hypercall1(int, dom0_op, dom0_op); -} - -static inline int -HYPERVISOR_set_debugreg( - int reg, unsigned long value) -{ - return _hypercall2(int, set_debugreg, reg, value); -} - -static inline unsigned long -HYPERVISOR_get_debugreg( - int reg) -{ - return _hypercall1(unsigned long, get_debugreg, reg); -} - -static inline int -HYPERVISOR_update_descriptor( - u64 ma, u64 desc) -{ - return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); -} - -static inline int -HYPERVISOR_memory_op( - unsigned int cmd, void *arg) -{ - return _hypercall2(int, memory_op, cmd, arg); -} - -static inline int -HYPERVISOR_multicall( - void *call_list, int nr_calls) -{ - return _hypercall2(int, multicall, call_list, nr_calls); -} - -static inline int -HYPERVISOR_update_va_mapping( - unsigned long va, pte_t new_val, unsigned long flags) -{ - unsigned long pte_hi = 0; -#ifdef CONFIG_X86_PAE - pte_hi = new_val.pte_high; -#endif - return _hypercall4(int, update_va_mapping, va, - new_val.pte_low, pte_hi, flags); -} - -static inline int -HYPERVISOR_event_channel_op( - void *op) -{ - return _hypercall1(int, event_channel_op, op); -} - -static inline int -HYPERVISOR_xen_version( - int cmd, void *arg) -{ - return _hypercall2(int, xen_version, cmd, arg); -} - -static inline int -HYPERVISOR_console_io( - int cmd, int count, char *str) -{ - return _hypercall3(int, console_io, cmd, count, str); -} - -static inline int -HYPERVISOR_physdev_op( - void *physdev_op) -{ - return _hypercall1(int, physdev_op, physdev_op); -} - -static inline int -HYPERVISOR_grant_table_op( - unsigned int cmd, void *uop, unsigned int count) -{ - return _hypercall3(int, grant_table_op, cmd, uop, count); -} - -static inline int -HYPERVISOR_update_va_mapping_otherdomain( - unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) -{ - unsigned long pte_hi = 0; -#ifdef CONFIG_X86_PAE - pte_hi = new_val.pte_high; -#endif - return _hypercall5(int, update_va_mapping_otherdomain, va, - new_val.pte_low, pte_hi, flags, domid); -} - -static inline int -HYPERVISOR_vm_assist( - unsigned int cmd, unsigned int type) -{ - return _hypercall2(int, vm_assist, cmd, type); -} - -static inline int -HYPERVISOR_vcpu_op( - int cmd, int vcpuid, void *extra_args) -{ - return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); -} - -static inline int -HYPERVISOR_suspend( - unsigned long srec) -{ - return _hypercall3(int, sched_op, SCHEDOP_shutdown, - SHUTDOWN_suspend, srec); -} - -static inline int -HYPERVISOR_nmi_op( - unsigned long op, - unsigned long arg) -{ - return _hypercall2(int, nmi_op, op, arg); -} - -#endif /* __HYPERCALL_X86_32_H__ */ - -/* - * Local variables: - * c-file-style: "linux" - * indent-tabs-mode: t - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/include/hypercall-x86_64.h --- a/extras/mini-os/include/hypercall-x86_64.h Sat Jul 29 14:05:59 2006 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,326 +0,0 @@ -/****************************************************************************** - * hypercall-x86_64.h - * - * Copied from XenLinux. - * - * Copyright (c) 2002-2004, K A Fraser - * - * 64-bit updates: - * Benjamin Liu - * Jun Nakajima - * - * This file may be distributed separately from the Linux kernel, or - * incorporated into other software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef __HYPERCALL_X86_64_H__ -#define __HYPERCALL_X86_64_H__ - -#include -#include -#include - -#define __STR(x) #x -#define STR(x) __STR(x) - -extern char hypercall_page[PAGE_SIZE]; - -#define _hypercall0(type, name) \ -({ \ - long __res; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res) \ - : \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall1(type, name, a1) \ -({ \ - long __res, __ign1; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=D" (__ign1) \ - : "1" ((long)(a1)) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall2(type, name, a1, a2) \ -({ \ - long __res, __ign1, __ign2; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ - : "1" ((long)(a1)), "2" ((long)(a2)) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall3(type, name, a1, a2, a3) \ -({ \ - long __res, __ign1, __ign2, __ign3; \ - asm volatile ( \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ - "=d" (__ign3) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - "3" ((long)(a3)) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall4(type, name, a1, a2, a3, a4) \ -({ \ - long __res, __ign1, __ign2, __ign3; \ - asm volatile ( \ - "movq %7,%%r10; " \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ - "=d" (__ign3) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - "3" ((long)(a3)), "g" ((long)(a4)) \ - : "memory", "r10" ); \ - (type)__res; \ -}) - -#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ -({ \ - long __res, __ign1, __ign2, __ign3; \ - asm volatile ( \ - "movq %7,%%r10; movq %8,%%r8; " \ - "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\ - : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ - "=d" (__ign3) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - "3" ((long)(a3)), "g" ((long)(a4)), \ - "g" ((long)(a5)) \ - : "memory", "r10", "r8" ); \ - (type)__res; \ -}) - -static inline int -HYPERVISOR_set_trap_table( - trap_info_t *table) -{ - return _hypercall1(int, set_trap_table, table); -} - -static inline int -HYPERVISOR_mmu_update( - mmu_update_t *req, int count, int *success_count, domid_t domid) -{ - return _hypercall4(int, mmu_update, req, count, success_count, domid); -} - -static inline int -HYPERVISOR_mmuext_op( - struct mmuext_op *op, int count, int *success_count, domid_t domid) -{ - return _hypercall4(int, mmuext_op, op, count, success_count, domid); -} - -static inline int -HYPERVISOR_set_gdt( - unsigned long *frame_list, int entries) -{ - return _hypercall2(int, set_gdt, frame_list, entries); -} - -static inline int -HYPERVISOR_stack_switch( - unsigned long ss, unsigned long esp) -{ - return _hypercall2(int, stack_switch, ss, esp); -} - -static inline int -HYPERVISOR_set_callbacks( - unsigned long event_address, unsigned long failsafe_address, - unsigned long syscall_address) -{ - return _hypercall3(int, set_callbacks, - event_address, failsafe_address, syscall_address); -} - -static inline int -HYPERVISOR_fpu_taskswitch( - int set) -{ - return _hypercall1(int, fpu_taskswitch, set); -} - -static inline int -HYPERVISOR_sched_op( - int cmd, unsigned long arg) -{ - return _hypercall2(int, sched_op, cmd, arg); -} - -static inline long -HYPERVISOR_set_timer_op( - u64 timeout) -{ - return _hypercall1(long, set_timer_op, timeout); -} - -static inline int -HYPERVISOR_dom0_op( - dom0_op_t *dom0_op) -{ - dom0_op->interface_version = DOM0_INTERFACE_VERSION; - return _hypercall1(int, dom0_op, dom0_op); -} - -static inline int -HYPERVISOR_set_debugreg( - int reg, unsigned long value) -{ - return _hypercall2(int, set_debugreg, reg, value); -} - -static inline unsigned long -HYPERVISOR_get_debugreg( - int reg) -{ - return _hypercall1(unsigned long, get_debugreg, reg); -} - -static inline int -HYPERVISOR_update_descriptor( - unsigned long ma, unsigned long word) -{ - return _hypercall2(int, update_descriptor, ma, word); -} - -static inline int -HYPERVISOR_memory_op( - unsigned int cmd, void *arg) -{ - return _hypercall2(int, memory_op, cmd, arg); -} - -static inline int -HYPERVISOR_multicall( - void *call_list, int nr_calls) -{ - return _hypercall2(int, multicall, call_list, nr_calls); -} - -static inline int -HYPERVISOR_update_va_mapping( - unsigned long va, pte_t new_val, unsigned long flags) -{ - return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); -} - -static inline int -HYPERVISOR_event_channel_op( - void *op) -{ - return _hypercall1(int, event_channel_op, op); -} - -static inline int -HYPERVISOR_xen_version( - int cmd, void *arg) -{ - return _hypercall2(int, xen_version, cmd, arg); -} - -static inline int -HYPERVISOR_console_io( - int cmd, int count, char *str) -{ - return _hypercall3(int, console_io, cmd, count, str); -} - -static inline int -HYPERVISOR_physdev_op( - void *physdev_op) -{ - return _hypercall1(int, physdev_op, physdev_op); -} - -static inline int -HYPERVISOR_grant_table_op( - unsigned int cmd, void *uop, unsigned int count) -{ - return _hypercall3(int, grant_table_op, cmd, uop, count); -} - -static inline int -HYPERVISOR_update_va_mapping_otherdomain( - unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) -{ - return _hypercall4(int, update_va_mapping_otherdomain, va, - new_val.pte, flags, domid); -} - -static inline int -HYPERVISOR_vm_assist( - unsigned int cmd, unsigned int type) -{ - return _hypercall2(int, vm_assist, cmd, type); -} - -static inline int -HYPERVISOR_vcpu_op( - int cmd, int vcpuid, void *extra_args) -{ - return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); -} - -static inline int -HYPERVISOR_set_segment_base( - int reg, unsigned long value) -{ - return _hypercall2(int, set_segment_base, reg, value); -} - -static inline int -HYPERVISOR_suspend( - unsigned long srec) -{ - return _hypercall3(int, sched_op, SCHEDOP_shutdown, - SHUTDOWN_suspend, srec); -} - -static inline int -HYPERVISOR_nmi_op( - unsigned long op, - unsigned long arg) -{ - return _hypercall2(int, nmi_op, op, arg); -} - -#endif /* __HYPERCALL_X86_64_H__ */ - -/* - * Local variables: - * c-file-style: "linux" - * indent-tabs-mode: t - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/include/os.h --- a/extras/mini-os/include/os.h Sat Jul 29 14:05:59 2006 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,561 +0,0 @@ -/****************************************************************************** - * os.h - * - * random collection of macros and definition - */ - -#ifndef _OS_H_ -#define _OS_H_ - -#if __GNUC__ == 2 && __GNUC_MINOR__ < 96 -#define __builtin_expect(x, expected_value) (x) -#endif -#define unlikely(x) __builtin_expect((x),0) - -#define smp_processor_id() 0 - - -#ifndef __ASSEMBLY__ -#include -#include - -extern void do_exit(void); -#define BUG do_exit - -#endif -#include - - -#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0, 0)) - -#define __KERNEL_CS FLAT_KERNEL_CS -#define __KERNEL_DS FLAT_KERNEL_DS -#define __KERNEL_SS FLAT_KERNEL_SS - -#define TRAP_divide_error 0 -#define TRAP_debug 1 -#define TRAP_nmi 2 -#define TRAP_int3 3 -#define TRAP_overflow 4 -#define TRAP_bounds 5 -#define TRAP_invalid_op 6 -#define TRAP_no_device 7 -#define TRAP_double_fault 8 -#define TRAP_copro_seg 9 -#define TRAP_invalid_tss 10 -#define TRAP_no_segment 11 -#define TRAP_stack_error 12 -#define TRAP_gp_fault 13 -#define TRAP_page_fault 14 -#define TRAP_spurious_int 15 -#define TRAP_copro_error 16 -#define TRAP_alignment_check 17 -#define TRAP_machine_check 18 -#define TRAP_simd_error 19 -#define TRAP_deferred_nmi 31 - -/* Everything below this point is not included by assembler (.S) files. */ -#ifndef __ASSEMBLY__ - -extern shared_info_t *HYPERVISOR_shared_info; - -void trap_init(void); - - - -/* - * The use of 'barrier' in the following reflects their use as local-lock - * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following - * critical operations are executed. All critical operations must complete - * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also - * includes these barriers, for example. - */ - -#define __cli() \ -do { \ - vcpu_info_t *_vcpu; \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ - _vcpu->evtchn_upcall_mask = 1; \ - barrier(); \ -} while (0) - -#define __sti() \ -do { \ - vcpu_info_t *_vcpu; \ - barrier(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ - _vcpu->evtchn_upcall_mask = 0; \ - barrier(); /* unmask then check (avoid races) */ \ - if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ - force_evtchn_callback(); \ -} while (0) - -#define __save_flags(x) \ -do { \ - vcpu_info_t *_vcpu; \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ - (x) = _vcpu->evtchn_upcall_mask; \ -} while (0) - -#define __restore_flags(x) \ -do { \ - vcpu_info_t *_vcpu; \ - barrier(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ - if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ - barrier(); /* unmask then check (avoid races) */ \ - if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ - force_evtchn_callback(); \ - }\ -} while (0) - -#define safe_halt() ((void)0) - -#define __save_and_cli(x) \ -do { \ - vcpu_info_t *_vcpu; \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ - (x) = _vcpu->evtchn_upcall_mask; \ - _vcpu->evtchn_upcall_mask = 1; \ - barrier(); \ -} while (0) - -#define local_irq_save(x) __save_and_cli(x) -#define local_irq_restore(x) __restore_flags(x) -#define local_save_flags(x) __save_flags(x) -#define local_irq_disable() __cli() -#define local_irq_enable() __sti() - -#define irqs_disabled() \ - HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask - -/* This is a barrier for the compiler only, NOT the processor! */ -#define barrier() __asm__ __volatile__("": : :"memory") - -#if defined(__i386__) -#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") -#define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") -#define wmb() __asm__ __volatile__ ("": : :"memory") -#elif defined(__x86_64__) -#define mb() __asm__ __volatile__ ("mfence":::"memory") -#define rmb() __asm__ __volatile__ ("lfence":::"memory") -#define wmb() __asm__ __volatile__ ("sfence" ::: "memory") /* From CONFIG_UNORDERED_IO (linux) */ -#endif - - -#define LOCK_PREFIX "" -#define LOCK "" -#define ADDR (*(volatile long *) addr) -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { volatile int counter; } atomic_t; - - -/************************** i386 *******************************/ -#if defined (__i386__) - -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) -struct __xchg_dummy { unsigned long a[100]; }; -#define __xg(x) ((struct __xchg_dummy *)(x)) -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - switch (size) { - case 1: - __asm__ __volatile__("xchgb %b0,%1" - :"=q" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 2: - __asm__ __volatile__("xchgw %w0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 4: - __asm__ __volatile__("xchgl %0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - } - return x; -} - -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK - "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"=m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -static inline int constant_test_bit(int nr, const volatile unsigned long *addr) -{ - return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; -} - -static inline int variable_test_bit(int nr, const volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__( - "btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (ADDR),"Ir" (nr)); - return oldbit; -} - -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))) - -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writting portable code, - * make sure not to rely on its reordering guarantees. - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( LOCK - "btsl %1,%0" - :"=m" (ADDR) - :"Ir" (nr)); -} - -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. - */ -static inline void clear_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( LOCK - "btrl %1,%0" - :"=m" (ADDR) - :"Ir" (nr)); -} - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - __asm__("bsfl %1,%0" - :"=r" (word) - :"rm" (word)); - return word; -} - - -/* - * These have to be done with inline assembly: that way the bit-setting - * is guaranteed to be atomic. All bit operations return 0 if the bit - * was cleared before the operation and != 0 if it was not. - * - * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). - */ -#define ADDR (*(volatile long *) addr) - -#define rdtscll(val) \ - __asm__ __volatile__("rdtsc" : "=A" (val)) - - - -#elif defined(__x86_64__)/* ifdef __i386__ */ -/************************** x86_84 *******************************/ - -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) -#define __xg(x) ((volatile long *)(x)) -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - switch (size) { - case 1: - __asm__ __volatile__("xchgb %b0,%1" - :"=q" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 2: - __asm__ __volatile__("xchgw %w0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 4: - __asm__ __volatile__("xchgl %k0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 8: - __asm__ __volatile__("xchgq %0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - } - return x; -} - -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static __inline__ int test_and_clear_bit(int nr, volatile void * addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"=m" (ADDR) - :"dIr" (nr) : "memory"); - return oldbit; -} - -static __inline__ int constant_test_bit(int nr, const volatile void * addr) -{ - return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; -} - -static __inline__ int variable_test_bit(int nr, volatile const void * addr) -{ - int oldbit; - - __asm__ __volatile__( - "btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (ADDR),"dIr" (nr)); - return oldbit; -} - -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))) - - -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static __inline__ void set_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btsl %1,%0" - :"=m" (ADDR) - :"dIr" (nr) : "memory"); -} - -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. - */ -static __inline__ void clear_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btrl %1,%0" - :"=m" (ADDR) - :"dIr" (nr)); -} - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static __inline__ unsigned long __ffs(unsigned long word) -{ - __asm__("bsfq %1,%0" - :"=r" (word) - :"rm" (word)); - return word; -} - -#define ADDR (*(volatile long *) addr) - -#define rdtscll(val) do { \ - unsigned int __a,__d; \ - asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ - (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ -} while(0) - -#define wrmsr(msr,val1,val2) \ - __asm__ __volatile__("wrmsr" \ - : /* no outputs */ \ - : "c" (msr), "a" (val1), "d" (val2)) - -#define wrmsrl(msr,val) wrmsr(msr,(u32)((u64)(val)),((u64)(val))>>32) - - -#else /* ifdef __x86_64__ */ -#error "Unsupported architecture" -#endif - - -/********************* common i386 and x86_64 ****************************/ -struct __synch_xchg_dummy { unsigned long a[100]; }; -#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) - -#define synch_cmpxchg(ptr, old, new) \ -((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ - (unsigned long)(old), \ - (unsigned long)(new), \ - sizeof(*(ptr)))) - -static inline unsigned long __synch_cmpxchg(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - __asm__ __volatile__("lock; cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; - case 2: - __asm__ __volatile__("lock; cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; -#ifdef __x86_64__ - case 4: - __asm__ __volatile__("lock; cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; - case 8: - __asm__ __volatile__("lock; cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; -#else - case 4: - __asm__ __volatile__("lock; cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; -#endif - } - return old; -} - - -static __inline__ void synch_set_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__ ( - "lock btsl %1,%0" - : "=m" (ADDR) : "Ir" (nr) : "memory" ); -} - -static __inline__ void synch_clear_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__ ( - "lock btrl %1,%0" - : "=m" (ADDR) : "Ir" (nr) : "memory" ); -} - -static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) -{ - int oldbit; - __asm__ __volatile__ ( - "lock btsl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); - return oldbit; -} - -static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) -{ - int oldbit; - __asm__ __volatile__ ( - "lock btrl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); - return oldbit; -} - -static __inline__ int synch_const_test_bit(int nr, const volatile void * addr) -{ - return ((1UL << (nr & 31)) & - (((const volatile unsigned int *) addr)[nr >> 5])) != 0; -} - -static __inline__ int synch_var_test_bit(int nr, volatile void * addr) -{ - int oldbit; - __asm__ __volatile__ ( - "btl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); - return oldbit; -} - -#define synch_test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - synch_const_test_bit((nr),(addr)) : \ - synch_var_test_bit((nr),(addr))) - - - -#endif /* not assembly */ -#endif /* _OS_H_ */ diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/include/spinlock.h --- a/extras/mini-os/include/spinlock.h Sat Jul 29 14:05:59 2006 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,121 +0,0 @@ -#ifndef __ASM_SPINLOCK_H -#define __ASM_SPINLOCK_H - -#include - -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ - -typedef struct { - volatile unsigned int slock; -} spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } - -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) - -/* - * Simple spin lock operations. There are two variants, one clears IRQ's - * on the local processor, one does not. - * - * We make no fairness assumptions. They have a cost. - */ - -#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) -#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) - -#define spin_lock_string \ - "1:\n" \ - LOCK \ - "decb %0\n\t" \ - "jns 3f\n" \ - "2:\t" \ - "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ - "jle 2b\n\t" \ - "jmp 1b\n" \ - "3:\n\t" - -#define spin_lock_string_flags \ - "1:\n" \ - LOCK \ - "decb %0\n\t" \ - "jns 4f\n\t" \ - "2:\t" \ - "testl $0x200, %1\n\t" \ - "jz 3f\n\t" \ - "#sti\n\t" \ - "3:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jle 3b\n\t" \ - "#cli\n\t" \ - "jmp 1b\n" \ - "4:\n\t" - -/* - * This works. Despite all the confusion. - * (except on PPro SMP or if we are using OOSTORE) - * (PPro errata 66, 92) - */ - -#define spin_unlock_string \ - "xchgb %b0, %1" \ - :"=q" (oldval), "=m" (lock->slock) \ - :"0" (oldval) : "memory" - -static inline void _raw_spin_unlock(spinlock_t *lock) -{ - char oldval = 1; - __asm__ __volatile__( - spin_unlock_string - ); -} - -static inline int _raw_spin_trylock(spinlock_t *lock) -{ - char oldval; - __asm__ __volatile__( - "xchgb %b0,%1\n" - :"=q" (oldval), "=m" (lock->slock) - :"0" (0) : "memory"); - return oldval > 0; -} - -static inline void _raw_spin_lock(spinlock_t *lock) -{ - __asm__ __volatile__( - spin_lock_string - :"=m" (lock->slock) : : "memory"); -} - -static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) -{ - __asm__ __volatile__( - spin_lock_string_flags - :"=m" (lock->slock) : "r" (flags) : "memory"); -} - -#define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \ - 1 : ({ 0;});}) - -#define _spin_lock(lock) \ -do { \ - _raw_spin_lock(lock); \ -} while(0) - -#define _spin_unlock(lock) \ -do { \ - _raw_spin_unlock(lock); \ -} while (0) - - -#define spin_lock(lock) _spin_lock(lock) -#define spin_unlock(lock) _spin_unlock(lock) - -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED - -#endif diff -r d2bf1a7cc131 -r 287178fbc320 extras/mini-os/traps.c --- a/extras/mini-os/traps.c Sat Jul 29 14:05:59 2006 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,229 +0,0 @@ - -#include -#include -#include -#include -#include -#include - -/* - * These are assembler stubs in entry.S. - * They are the actual entry points for virtual exceptions. - */ -void divide_error(void); -void debug(void); -void int3(void); -void overflow(void); -void bounds(void); -void invalid_op(void); -void device_not_available(void); -void coprocessor_segment_overrun(void); -void invalid_TSS(void); -void segment_not_present(void); -void stack_segment(void); -void general_protection(void); -void page_fault(void); -void coprocessor_error(void); -void simd_coprocessor_error(void); -void alignment_check(void); -void spurious_interrupt_bug(void); -void machine_check(void); - - -void dump_regs(struct pt_regs *regs) -{ - printk("Thread: %s\n", current->name); -#ifdef __i386__ - printk("EIP: %x, EFLAGS %x.\n", regs->eip, regs->eflags); - printk("EBX: %08x ECX: %08x EDX: %08x\n", - regs->ebx, regs->ecx, regs->edx); - printk("ESI: %08x EDI: %08x EBP: %08x EAX: %08x\n", - regs->esi, regs->edi, regs->ebp, regs->eax); - printk("DS: %04x ES: %04x orig_eax: %08x, eip: %08x\n", - regs->xds, regs->xes, regs->orig_eax, regs->eip); - printk("CS: %04x EFLAGS: %08x esp: %08x ss: %04x\n", - regs->xcs, regs->eflags, regs->esp, regs->xss); -#else - printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip); - printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", - regs->ss, regs->rsp, regs->eflags); - printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", - regs->rax, regs->rbx, regs->rcx); - printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", - regs->rdx, regs->rsi, regs->rdi); - printk("RBP: %016lx R08: %016lx R09: %016lx\n", - regs->rbp, regs->r8, regs->r9); - printk("R10: %016lx R11: %016lx R12: %016lx\n", - regs->r10, regs->r11, regs->r12); - printk("R13: %016lx R14: %016lx R15: %016lx\n", - regs->r13, regs->r14, regs->r15); -#endif -} - -static void do_trap(int trapnr, char *str, struct pt_regs * regs, unsigned long error_code) -{ - printk("FATAL: Unhandled Trap %d (%s), error code=0x%lx\n", trapnr, str, error_code); - printk("Regs address %p\n", regs); - dump_regs(regs); - do_exit(); -} - -#define DO_ERROR(trapnr, str, name) \ -void do_##name(struct pt_regs * regs, unsigned long error_code) \ -{ \ - do_trap(trapnr, str, regs, error_code); \ -} - -#define DO_ERROR_INFO(trapnr, str, name, sicode, siaddr) \ -void do_##name(struct pt_regs * regs, unsigned long error_code) \ -{ \ - do_trap(trapnr, str, regs, error_code); \ -} - -DO_ERROR_INFO( 0, "divide error", divide_error, FPE_INTDIV, regs->eip) -DO_ERROR( 3, "int3", int3) -DO_ERROR( 4, "overflow", overflow) -DO_ERROR( 5, "bounds", bounds) -DO_ERROR_INFO( 6, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip) -DO_ERROR( 7, "device not available", device_not_available) -DO_ERROR( 9, "coprocessor segment overrun", coprocessor_segment_overrun) -DO_ERROR(10, "invalid TSS", invalid_TSS) -DO_ERROR(11, "segment not present", segment_not_present) -DO_ERROR(12, "stack segment", stack_segment) -DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0) -DO_ERROR(18, "machine check", machine_check) - -void page_walk(unsigned long virt_address) -{ - pgentry_t *tab = (pgentry_t *)start_info.pt_base, page; - unsigned long addr = virt_address; - printk("Pagetable walk from virt %lx, base %lx:\n", virt_address, start_info.pt_base); - -#if defined(__x86_64__) - page = tab[l4_table_offset(addr)]; - tab = pte_to_virt(page); - printk(" L4 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l4_table_offset(addr)); -#endif -#if defined(__x86_64__) || defined(CONFIG_X86_PAE) - page = tab[l3_table_offset(addr)]; - tab = pte_to_virt(page); - printk(" L3 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l3_table_offset(addr)); -#endif - page = tab[l2_table_offset(addr)]; - tab = pte_to_virt(page); - printk(" L2 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l2_table_offset(addr)); - - page = tab[l1_table_offset(addr)]; - printk(" L1 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l1_table_offset(addr)); - -} - -#define read_cr2() \ - (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2) - -static int handling_pg_fault = 0; - -void do_page_fault(struct pt_regs *regs, unsigned long error_code) -{ - unsigned long addr = read_cr2(); - /* If we are already handling a page fault, and got another one - that means we faulted in pagetable walk. Continuing here would cause - a recursive fault */ - if(handling_pg_fault) - { - printk("Page fault in pagetable walk (access to invalid memory?).\n"); - do_exit(); - } - handling_pg_fault = 1; - -#if defined(__x86_64__) - printk("Page fault at linear address %p, rip %p, code %lx\n", - addr, regs->rip, error_code); -#else - printk("Page fault at linear address %p, eip %p, code %lx\n", - addr, regs->eip, error_code); -#endif - - dump_regs(regs); - page_walk(addr); - do_exit(); - /* We should never get here ... but still */ - handling_pg_fault = 0; -} - -void do_general_protection(struct pt_regs *regs, long error_code) -{ -#ifdef __i386__ - printk("GPF eip: %p, error_code=%lx\n", regs->eip, error_code); -#else - printk("GPF rip: %p, error_code=%lx\n", regs->rip, error_code); -#endif - dump_regs(regs); - do_exit(); -} - - -void do_debug(struct pt_regs * regs) -{ - printk("Debug exception\n"); -#define TF_MASK 0x100 - regs->eflags &= ~TF_MASK; - dump_regs(regs); - do_exit(); -} - -void do_coprocessor_error(struct pt_regs * regs) -{ - printk("Copro error\n"); - dump_regs(regs); - do_exit(); -} - -void simd_math_error(void *eip) -{ - printk("SIMD error\n"); -} - -void do_simd_coprocessor_error(struct pt_regs * regs) -{ - printk("SIMD copro error\n"); -} - -void do_spurious_interrupt_bug(struct pt_regs * regs) -{ -} - -/* - * Submit a virtual IDT to teh hypervisor. This consists of tuples - * (interrupt vector, privilege ring, CS:EIP of handler). - * The 'privilege ring' field specifies the least-privileged ring that - * can trap to that vector using a software-interrupt instruction (INT). - */ -static trap_info_t trap_table[] = { - { 0, 0, __KERNEL_CS, (unsigned long)divide_error }, - { 1, 0, __KERNEL_CS, (unsigned long)debug }, - { 3, 3, __KERNEL_CS, (unsigned long)int3 }, - { 4, 3, __KERNEL_CS, (unsigned long)overflow }, - { 5, 3, __KERNEL_CS, (unsigned long)bounds }, - { 6, 0, __KERNEL_CS, (unsigned long)invalid_op }, - { 7, 0, __KERNEL_CS, (unsigned long)device_not_available }, - { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun }, - { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS }, - { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present }, - { 12, 0, __KERNEL_CS, (unsigned long)stack_segment }, - { 13, 0, __KERNEL_CS, (unsigned long)general_protection }, - { 14, 0, __KERNEL_CS, (unsigned long)page_fault }, - { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug }, - { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error }, - { 17, 0, __KERNEL_CS, (unsigned long)alignment_check }, - { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error }, - { 0, 0, 0, 0 } -}; - - - -void trap_init(void) -{ - HYPERVISOR_set_trap_table(trap_table); -} -