|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 2/2] xen/rwlock: add check_lock() handling to rwlocks
Checking whether a lock is consistently used regarding interrupts on
or off is beneficial for rwlocks, too.
So add check_lock() calls to rwlock functions. For this purpose make
check_lock() globally accessible.
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V2:
- call check_lock() unconditionally in try_lock variants (Jan Beulich)
---
xen/common/spinlock.c | 3 +--
xen/include/xen/rwlock.h | 11 +++++++++++
xen/include/xen/spinlock.h | 2 ++
3 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index b4aaf6bce6..405322c6b8 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -13,7 +13,7 @@
static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
-static void check_lock(union lock_debug *debug, bool try)
+void check_lock(union lock_debug *debug, bool try)
{
bool irq_safe = !local_irq_is_enabled();
@@ -108,7 +108,6 @@ void spin_debug_disable(void)
#else /* CONFIG_DEBUG_LOCKS */
-#define check_lock(l, t) ((void)0)
#define check_barrier(l) ((void)0)
#define got_lock(l) ((void)0)
#define rel_lock(l) ((void)0)
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 427664037a..94496a0f53 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -56,6 +56,7 @@ static inline int _read_trylock(rwlock_t *lock)
u32 cnts;
preempt_disable();
+ check_lock(&lock->lock.debug, true);
cnts = atomic_read(&lock->cnts);
if ( likely(_can_read_lock(cnts)) )
{
@@ -66,6 +67,7 @@ static inline int _read_trylock(rwlock_t *lock)
*/
if ( likely(_can_read_lock(cnts)) )
return 1;
+
atomic_sub(_QR_BIAS, &lock->cnts);
}
preempt_enable();
@@ -87,7 +89,10 @@ static inline void _read_lock(rwlock_t *lock)
* arch_lock_acquire_barrier().
*/
if ( likely(_can_read_lock(cnts)) )
+ {
+ check_lock(&lock->lock.debug, false);
return;
+ }
/* The slowpath will decrement the reader count, if necessary. */
queue_read_lock_slowpath(lock);
@@ -162,7 +167,10 @@ static inline void _write_lock(rwlock_t *lock)
* arch_lock_acquire_barrier().
*/
if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
+ {
+ check_lock(&lock->lock.debug, false);
return;
+ }
queue_write_lock_slowpath(lock);
/*
@@ -197,6 +205,7 @@ static inline int _write_trylock(rwlock_t *lock)
u32 cnts;
preempt_disable();
+ check_lock(&lock->lock.debug, true);
cnts = atomic_read(&lock->cnts);
if ( unlikely(cnts) ||
unlikely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) != 0) )
@@ -328,6 +337,8 @@ static inline void _percpu_read_lock(percpu_rwlock_t
**per_cpudata,
/* Drop the read lock because we don't need it anymore. */
read_unlock(&percpu_rwlock->rwlock);
}
+ else
+ check_lock(&percpu_rwlock->rwlock.lock.debug, false);
}
static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index ca13b600a0..9fa4e600c1 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -21,11 +21,13 @@ union lock_debug {
};
};
#define _LOCK_DEBUG { LOCK_DEBUG_INITVAL }
+void check_lock(union lock_debug *debug, bool try);
void spin_debug_enable(void);
void spin_debug_disable(void);
#else
union lock_debug { };
#define _LOCK_DEBUG { }
+#define check_lock(l, t) ((void)0)
#define spin_debug_enable() ((void)0)
#define spin_debug_disable() ((void)0)
#endif
--
2.26.2
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |