[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 01 of 11] x86/mm: make p2m lock into an rwlock
# HG changeset patch # User Tim Deegan <tim@xxxxxxx> # Date 1336661656 -3600 # Node ID 4a99c5456e9d8aa707bbd57bb4f4af88e1d456ca # Parent 8a86d841e6d42fbffc9e20d3028875dd4990882d x86/mm: make p2m lock into an rwlock Because the p2m lock was already recursive, we need to add a new mm-lock class of recursive rwlocks. Signed-off-by: Tim Deegan <tim@xxxxxxx> diff -r 8a86d841e6d4 -r 4a99c5456e9d xen/arch/x86/mm/mm-locks.h --- a/xen/arch/x86/mm/mm-locks.h Tue May 08 13:36:24 2012 +0200 +++ b/xen/arch/x86/mm/mm-locks.h Thu May 10 15:54:16 2012 +0100 @@ -97,13 +97,71 @@ static inline void _mm_enforce_order_loc __set_lock_level(level); } + +static inline void mm_rwlock_init(mm_rwlock_t *l) +{ + rwlock_init(&l->lock); + l->locker = -1; + l->locker_function = "nobody"; + l->unlock_level = 0; +} + +static inline int mm_write_locked_by_me(mm_rwlock_t *l) +{ + return (l->locker == get_processor_id()); +} + +static inline void _mm_write_lock(mm_rwlock_t *l, const char *func, int level) +{ + if ( !mm_write_locked_by_me(l) ) + { + __check_lock_level(level); + write_lock(&l->lock); + l->locker = get_processor_id(); + l->locker_function = func; + l->unlock_level = __get_lock_level(); + __set_lock_level(level); + } + l->recurse_count++; +} + +static inline void mm_write_unlock(mm_rwlock_t *l) +{ + if ( --(l->recurse_count) != 0 ) + return; + l->locker = -1; + l->locker_function = "nobody"; + __set_lock_level(l->unlock_level); + write_unlock(&l->lock); +} + +static inline void _mm_read_lock(mm_rwlock_t *l, int level) +{ + __check_lock_level(level); + read_lock(&l->lock); + /* There's nowhere to store the per-CPU unlock level so we can't + * set the lock level. */ +} + +static inline void mm_read_unlock(mm_rwlock_t *l) +{ + read_unlock(&l->lock); +} + /* This wrapper uses the line number to express the locking order below */ #define declare_mm_lock(name) \ static inline void mm_lock_##name(mm_lock_t *l, const char *func, int rec)\ { _mm_lock(l, func, __LINE__, rec); } +#define declare_mm_rwlock(name) \ + static inline void mm_write_lock_##name(mm_rwlock_t *l, const char *func) \ + { _mm_write_lock(l, func, __LINE__); } \ + static inline void mm_read_lock_##name(mm_rwlock_t *l) \ + { _mm_read_lock(l, __LINE__); } /* These capture the name of the calling function */ #define mm_lock(name, l) mm_lock_##name(l, __func__, 0) #define mm_lock_recursive(name, l) mm_lock_##name(l, __func__, 1) +#define mm_write_lock(name, l) mm_write_lock_##name(l, __func__) +#define mm_read_lock(name, l) mm_read_lock_##name(l) /* This wrapper is intended for "external" locks which do not use * the mm_lock_t types. Such locks inside the mm code are also subject @@ -152,27 +210,24 @@ declare_mm_lock(nestedp2m) #define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock) /* P2M lock (per-p2m-table) - * - * This protects all queries and updates to the p2m table. * - * A note about ordering: - * The order established here is enforced on all mutations of a p2m. - * For lookups, the order established here is enforced only for hap - * domains (1. shadow domains present a few nasty inversions; - * 2. shadow domains do not support paging and sharing, - * the main sources of dynamic p2m mutations) - * - * The lock is recursive as it is common for a code path to look up a gfn - * and later mutate it. + * This protects all queries and updates to the p2m table. + * Queries may be made under the read lock but all modifications + * need the main (write) lock. + * + * The write lock is recursive as it is common for a code path to look + * up a gfn and later mutate it. */ -declare_mm_lock(p2m) -#define p2m_lock(p) mm_lock_recursive(p2m, &(p)->lock) -#define gfn_lock(p,g,o) mm_lock_recursive(p2m, &(p)->lock) -#define p2m_unlock(p) mm_unlock(&(p)->lock) -#define gfn_unlock(p,g,o) mm_unlock(&(p)->lock) -#define p2m_locked_by_me(p) mm_locked_by_me(&(p)->lock) -#define gfn_locked_by_me(p,g) mm_locked_by_me(&(p)->lock) +declare_mm_rwlock(p2m); +#define p2m_lock(p) mm_write_lock(p2m, &(p)->lock); +#define p2m_unlock(p) mm_write_unlock(&(p)->lock); +#define gfn_lock(p,g,o) p2m_lock(p) +#define gfn_unlock(p,g,o) p2m_unlock(p) +#define p2m_read_lock(p) mm_read_lock(p2m, &(p)->lock) +#define p2m_read_unlock(p) mm_read_unlock(&(p)->lock) +#define p2m_locked_by_me(p) mm_write_locked_by_me(&(p)->lock) +#define gfn_locked_by_me(p,g) p2m_locked_by_me(p) /* Sharing per page lock * diff -r 8a86d841e6d4 -r 4a99c5456e9d xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Tue May 08 13:36:24 2012 +0200 +++ b/xen/arch/x86/mm/p2m.c Thu May 10 15:54:16 2012 +0100 @@ -71,7 +71,7 @@ boolean_param("hap_2mb", opt_hap_2mb); /* Init the datastructures for later use by the p2m code */ static void p2m_initialise(struct domain *d, struct p2m_domain *p2m) { - mm_lock_init(&p2m->lock); + mm_rwlock_init(&p2m->lock); mm_lock_init(&p2m->pod.lock); INIT_LIST_HEAD(&p2m->np2m_list); INIT_PAGE_LIST_HEAD(&p2m->pages); diff -r 8a86d841e6d4 -r 4a99c5456e9d xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Tue May 08 13:36:24 2012 +0200 +++ b/xen/include/asm-x86/mm.h Thu May 10 15:54:16 2012 +0100 @@ -649,4 +649,12 @@ typedef struct mm_lock { const char *locker_function; /* func that took it */ } mm_lock_t; +typedef struct mm_rwlock { + rwlock_t lock; + int unlock_level; + int recurse_count; + int locker; /* CPU that holds the write lock */ + const char *locker_function; /* func that took it */ +} mm_rwlock_t; + #endif /* __ASM_X86_MM_H__ */ diff -r 8a86d841e6d4 -r 4a99c5456e9d xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h Tue May 08 13:36:24 2012 +0200 +++ b/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100 @@ -192,7 +192,7 @@ typedef unsigned int p2m_query_t; /* Per-p2m-table state */ struct p2m_domain { /* Lock that protects updates to the p2m */ - mm_lock_t lock; + mm_rwlock_t lock; /* Shadow translated domain: p2m mapping */ pagetable_t phys_table; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |