WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Simplify spinlock code and re-enable IRQs

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Simplify spinlock code and re-enable IRQs where possible when spinning.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 07 Apr 2009 23:02:27 -0700
Delivery-date: Tue, 07 Apr 2009 23:05:37 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1238508239 -3600
# Node ID d669f5d1f87690c9acd483ac1c2bd7395e81c9e3
# Parent  909bb1245930f493372e080574349d95879f4c52
Simplify spinlock code and re-enable IRQs where possible when spinning.

Based on a patch by Juergen Gross.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/common/spinlock.c                         |   21 ++++-
 xen/include/asm-ia64/linux-xen/asm/spinlock.h |  105 --------------------------
 xen/include/asm-x86/spinlock.h                |   13 ---
 3 files changed, 18 insertions(+), 121 deletions(-)

diff -r 909bb1245930 -r d669f5d1f876 xen/common/spinlock.c
--- a/xen/common/spinlock.c     Tue Mar 31 14:04:50 2009 +0100
+++ b/xen/common/spinlock.c     Tue Mar 31 15:03:59 2009 +0100
@@ -2,6 +2,7 @@
 #include <xen/irq.h>
 #include <xen/smp.h>
 #include <xen/spinlock.h>
+#include <asm/processor.h>
 
 #ifndef NDEBUG
 
@@ -43,7 +44,9 @@ void _spin_lock(spinlock_t *lock)
 void _spin_lock(spinlock_t *lock)
 {
     check_lock(&lock->debug);
-    _raw_spin_lock(&lock->raw);
+    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
+        while ( likely(_raw_spin_is_locked(&lock->raw)) )
+            cpu_relax();
 }
 
 void _spin_lock_irq(spinlock_t *lock)
@@ -51,7 +54,13 @@ void _spin_lock_irq(spinlock_t *lock)
     ASSERT(local_irq_is_enabled());
     local_irq_disable();
     check_lock(&lock->debug);
-    _raw_spin_lock(&lock->raw);
+    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
+    {
+        local_irq_enable();
+        while ( likely(_raw_spin_is_locked(&lock->raw)) )
+            cpu_relax();
+        local_irq_disable();
+    }
 }
 
 unsigned long _spin_lock_irqsave(spinlock_t *lock)
@@ -59,7 +68,13 @@ unsigned long _spin_lock_irqsave(spinloc
     unsigned long flags;
     local_irq_save(flags);
     check_lock(&lock->debug);
-    _raw_spin_lock(&lock->raw);
+    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
+    {
+        local_irq_restore(flags);
+        while ( likely(_raw_spin_is_locked(&lock->raw)) )
+            cpu_relax();
+        local_irq_save(flags);
+    }
     return flags;
 }
 
diff -r 909bb1245930 -r d669f5d1f876 
xen/include/asm-ia64/linux-xen/asm/spinlock.h
--- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h     Tue Mar 31 14:04:50 
2009 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h     Tue Mar 31 15:03:59 
2009 +0100
@@ -21,111 +21,9 @@
 
 typedef struct {
        volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-#ifdef DEBUG_SPINLOCK
-       void *locker;
-#endif
 } raw_spinlock_t;
 
-#ifdef XEN
-#ifdef DEBUG_SPINLOCK
-#define _RAW_SPIN_LOCK_UNLOCKED        /*(raw_spinlock_t)*/ { 0, NULL }
-#else
 #define _RAW_SPIN_LOCK_UNLOCKED        /*(raw_spinlock_t)*/ { 0 }
-#endif
-#else
-#define _RAW_SPIN_LOCK_UNLOCKED        /*(raw_spinlock_t)*/ { 0 }
-#endif
-
-#ifdef ASM_SUPPORTED
-/*
- * Try to get the lock.  If we fail to get the lock, make a non-standard call 
to
- * ia64_spinlock_contention().  We do not use a normal call because that would 
force all
- * callers of spin_lock() to be non-leaf routines.  Instead, 
ia64_spinlock_contention() is
- * carefully coded to touch only those registers that spin_lock() marks 
"clobbered".
- */
-
-#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", 
"r29", "r30", "b6", "memory"
-
-static inline void
-_raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
-{
-       register volatile unsigned int *ptr asm ("r31") = &lock->lock;
-
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-# ifdef CONFIG_ITANIUM
-       /* don't use brl on Itanium... */
-       asm volatile ("{\n\t"
-                     "  mov ar.ccv = r0\n\t"
-                     "  mov r28 = ip\n\t"
-                     "  mov r30 = 1;;\n\t"
-                     "}\n\t"
-                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
-                     "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
-                     "cmp4.ne p14, p0 = r30, r0\n\t"
-                     "mov b6 = r29;;\n\t"
-                     "mov r27=%2\n\t"
-                     "(p14) br.cond.spnt.many b6"
-                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
-# else
-       asm volatile ("{\n\t"
-                     "  mov ar.ccv = r0\n\t"
-                     "  mov r28 = ip\n\t"
-                     "  mov r30 = 1;;\n\t"
-                     "}\n\t"
-                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
-                     "cmp4.ne p14, p0 = r30, r0\n\t"
-                     "mov r27=%2\n\t"
-                     "(p14) brl.cond.spnt.many 
ia64_spinlock_contention_pre3_4;;"
-                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
-# endif /* CONFIG_MCKINLEY */
-#else
-# ifdef CONFIG_ITANIUM
-       /* don't use brl on Itanium... */
-       /* mis-declare, so we get the entry-point, not it's function 
descriptor: */
-       asm volatile ("mov r30 = 1\n\t"
-                     "mov r27=%2\n\t"
-                     "mov ar.ccv = r0;;\n\t"
-                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
-                     "movl r29 = ia64_spinlock_contention;;\n\t"
-                     "cmp4.ne p14, p0 = r30, r0\n\t"
-                     "mov b6 = r29;;\n\t"
-                     "(p14) br.call.spnt.many b6 = b6"
-                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
-# else
-       asm volatile ("mov r30 = 1\n\t"
-                     "mov r27=%2\n\t"
-                     "mov ar.ccv = r0;;\n\t"
-                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
-                     "cmp4.ne p14, p0 = r30, r0\n\t"
-                     "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
-                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
-# endif /* CONFIG_MCKINLEY */
-#endif
-
-#ifdef DEBUG_SPINLOCK
-       asm volatile ("mov %0=ip" : "=r" (lock->locker));
-#endif
-}
-#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
-#else /* !ASM_SUPPORTED */
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-# define _raw_spin_lock(x)                                                     
        \
-do {                                                                           
        \
-       __u32 *ia64_spinlock_ptr = (__u32 *) (x);                               
        \
-       __u64 ia64_spinlock_val;                                                
        \
-       ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);         
        \
-       if (unlikely(ia64_spinlock_val)) {                                      
        \
-               do {                                                            
        \
-                       while (*ia64_spinlock_ptr)                              
        \
-                               ia64_barrier();                                 
        \
-                       ia64_spinlock_val = 
ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
-               } while (ia64_spinlock_val);                                    
        \
-       }                                                                       
        \
-} while (0)
-#endif /* !ASM_SUPPORTED */
 
 #define _raw_spin_is_locked(x) ((x)->lock != 0)
 #define _raw_spin_unlock(x)    do { barrier(); (x)->lock = 0; } while (0)
@@ -134,9 +32,6 @@ typedef struct {
 typedef struct {
        volatile unsigned int read_counter      : 31;
        volatile unsigned int write_lock        :  1;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
 } raw_rwlock_t;
 #define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
 
diff -r 909bb1245930 -r d669f5d1f876 xen/include/asm-x86/spinlock.h
--- a/xen/include/asm-x86/spinlock.h    Tue Mar 31 14:04:50 2009 +0100
+++ b/xen/include/asm-x86/spinlock.h    Tue Mar 31 15:03:59 2009 +0100
@@ -12,19 +12,6 @@ typedef struct {
 #define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 1 }
 
 #define _raw_spin_is_locked(x) ((x)->lock <= 0)
-
-static always_inline void _raw_spin_lock(raw_spinlock_t *lock)
-{
-    asm volatile (
-        "1:  lock; decw %0         \n"
-        "    jns 3f                \n"
-        "2:  rep; nop              \n"
-        "    cmpw $0,%0            \n"
-        "    jle 2b                \n"
-        "    jmp 1b                \n"
-        "3:"
-        : "=m" (lock->lock) : : "memory" );
-}
 
 static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
 {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Simplify spinlock code and re-enable IRQs where possible when spinning., Xen patchbot-unstable <=