diff --git a/Makefile b/Makefile index 8b0b42f..d8d1dbb 100644 --- a/Makefile +++ b/Makefile @@ -303,7 +303,8 @@ LINUXINCLUDE := -Iinclude \ $(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) \ -include include/linux/autoconf.h -CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE) +# CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE) +CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE) -D _XEN_SPIN_LOCK CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -Wstrict-prototypes -Wundef -Werror-implicit-function-declaration -fno-delete-null-pointer-checks diff --git a/include/asm-i386/mach-xen/asm/hypervisor.h b/include/asm-i386/mach-xen/asm/hypervisor.h index 89cde62..a3806f1 100644 --- a/include/asm-i386/mach-xen/asm/hypervisor.h +++ b/include/asm-i386/mach-xen/asm/hypervisor.h @@ -143,6 +143,15 @@ HYPERVISOR_yield( } static inline int +HYPERVISOR_yield_to(uint vcpu) +{ + struct sched_yield_to yield_to = { .version = 1, .vcpu_id = vcpu }; + int rc = HYPERVISOR_sched_op(SCHEDOP_yield_to, &yield_to); + /* TBD: compat */ + return rc; +} + +static inline int HYPERVISOR_block( void) { diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index a8e3d89..c76e20f 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -16,6 +16,81 @@ * (the type definitions are in asm/spinlock_types.h) */ +#ifdef _XEN_SPIN_LOCK +#include + +#define __raw_spin_is_locked(x) \ + (*(volatile signed int *)(&(x)->slock) >= 0) + +static inline int _attempt_raw_spin_lock(raw_spinlock_t *lock) +{ + const int COUNTMAX = 10000, myid=read_pda(cpunumber); + int oldval; + + asm volatile + ("1: movsxl %1, %%rax \n" + " cmpq $0, %%rax \n" + " jge 4f \n" + "2: \n" + LOCK_PREFIX " cmpxchgl %k2, %1 \n" + " jnz 4f \n" + "3: /* exit */ \n" + LOCK_SECTION_START("") + "4: xor %%rdx, %%rdx \n" + "6: inc %%rdx \n" + " cmpl %k3, %%edx \n" + " jge 3b \n" + " pause \n" + " movsxl %1, %%rax \n" + " cmpq $0, %%rax \n" + " jge 6b \n" + " jmp 2b \n" + LOCK_SECTION_END + + : "=&a" (oldval) + : "m" (lock->slock), "c" (myid), "g" (COUNTMAX) + : "rdx", "memory", "cc" + ); + return oldval; +} + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + int rc, old_lock_holder; + + do { + old_lock_holder = _attempt_raw_spin_lock(lock); + + if (old_lock_holder >= 0) + if ((rc=HYPERVISOR_yield_to(old_lock_holder)) != 0) + printk("XEN: Yield failed. rc:%d\n", rc); + } while (old_lock_holder != -1); +} + +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + int oldval, myid = read_pda(cpunumber); + + __asm__ __volatile__ ( + "movl $-1, %%eax \n" + LOCK_PREFIX " cmpxchgl %k2, %1 \n" + : "=&a" (oldval) + : "m" (lock->slock), "c" (myid) + : "memory", "cc" + ); + + return (oldval == -1); +} + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + __asm__ __volatile__ ("movl $-1, %0" : "=m"(lock->slock) : : "memory"); +} + +#else + #define __raw_spin_is_locked(x) \ (*(volatile signed int *)(&(x)->slock) <= 0) @@ -64,6 +139,8 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ); } +#endif + #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) @@ -124,4 +201,5 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) : "=m" (rw->lock) : : "memory"); } + #endif /* __ASM_SPINLOCK_H */ diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h index 59efe84..6fb8da0 100644 --- a/include/asm-x86_64/spinlock_types.h +++ b/include/asm-x86_64/spinlock_types.h @@ -9,7 +9,11 @@ typedef struct { volatile unsigned int slock; } raw_spinlock_t; +#ifdef _XEN_SPIN_LOCK +#define __RAW_SPIN_LOCK_UNLOCKED { -1 } +#else #define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#endif typedef struct { volatile unsigned int lock; diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h index abf11cc..dc60001 100644 --- a/include/xen/interface/sched.h +++ b/include/xen/interface/sched.h @@ -90,6 +90,17 @@ DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ + +/* + * Voluntarily yield the CPU to another given vcpu + * @arg == vcpu info. + */ +#define SCHEDOP_yield_to 5 +struct sched_yield_to { + unsigned int version; + unsigned int vcpu_id; +}; + #endif /* __XEN_PUBLIC_SCHED_H__ */ /*