WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 13 of 17] x86/mm: dedup the various copies of the sha

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 13 of 17] x86/mm: dedup the various copies of the shadow lock functions
From: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Date: Thu, 2 Jun 2011 13:20:23 +0100
Delivery-date: Thu, 02 Jun 2011 05:43:29 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1307017210@xxxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1307017210@xxxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.8.3
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1307017012 -3600
# Node ID d6518e8670ab15d5a9ec49b500ecf6e67442d3a8
# Parent  51f28a7cbb3d1e42e3592e5dc45041742a100fea
x86/mm: dedup the various copies of the shadow lock functions

Define the lock and unlock functions once, and list all the locks in one
place so (a) it's obvious what the locking discipline is and (b) none of
the locks are visible to non-mm code.  Automatically enforce that these
locks never get taken in the wrong order.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 02 13:16:52 2011 +0100
@@ -1215,7 +1215,6 @@ void ept_sync_domain(struct domain *d)
         return;
 
     ASSERT(local_irq_is_enabled());
-    ASSERT(p2m_locked_by_me(p2m_get_hostp2m(d)));
 
     /*
      * Flush active cpus synchronously. Flush others the next time this domain
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Thu Jun 02 13:16:52 2011 +0100
@@ -572,7 +572,7 @@ static void hap_destroy_monitor_table(st
 /************************************************/
 void hap_domain_init(struct domain *d)
 {
-    hap_lock_init(d);
+    mm_lock_init(&d->arch.paging.hap.lock);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
 }
 
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/hap/private.h
--- a/xen/arch/x86/mm/hap/private.h     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/hap/private.h     Thu Jun 02 13:16:52 2011 +0100
@@ -20,6 +20,8 @@
 #ifndef __HAP_PRIVATE_H__
 #define __HAP_PRIVATE_H__
 
+#include "../mm-locks.h"
+
 /********************************************/
 /*          GUEST TRANSLATION FUNCS         */
 /********************************************/
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/mem_sharing.c     Thu Jun 02 13:16:52 2011 +0100
@@ -32,6 +32,8 @@
 #include <asm/mem_event.h>
 #include <asm/atomic.h>
 
+#include "mm-locks.h"
+
 /* Auditing of memory sharing code? */
 #define MEM_SHARING_AUDIT  0
 
@@ -74,13 +76,7 @@ typedef struct gfn_info
     struct list_head list;
 } gfn_info_t;
 
-typedef struct shr_lock
-{
-    spinlock_t  lock;            /* mem sharing lock */
-    int         locker;          /* processor which holds the lock */
-    const char *locker_function; /* func that took it */
-} shr_lock_t;
-static shr_lock_t shr_lock;
+static mm_lock_t shr_lock;
 
 /* Returns true if list has only one entry. O(1) complexity. */
 static inline int list_has_one_entry(struct list_head *head)
@@ -93,43 +89,11 @@ static inline struct gfn_info* gfn_get_i
     return list_entry(list->next, struct gfn_info, list);
 }
 
-#define shr_lock_init(_i)                      \
-    do {                                       \
-        spin_lock_init(&shr_lock.lock);        \
-        shr_lock.locker = -1;                  \
-        shr_lock.locker_function = "nobody";   \
-    } while (0)
-
-#define shr_locked_by_me(_i)                   \
-    (current->processor == shr_lock.locker)
-
-#define shr_lock(_i)                                           \
-    do {                                                       \
-        if ( unlikely(shr_lock.locker == current->processor) ) \
-        {                                                      \
-            printk("Error: shr lock held by %s\n",             \
-                   shr_lock.locker_function);                  \
-            BUG();                                             \
-        }                                                      \
-        spin_lock(&shr_lock.lock);                             \
-        ASSERT(shr_lock.locker == -1);                         \
-        shr_lock.locker = current->processor;                  \
-        shr_lock.locker_function = __func__;                   \
-    } while (0)
-
-#define shr_unlock(_i)                                    \
-    do {                                                  \
-        ASSERT(shr_lock.locker == current->processor);    \
-        shr_lock.locker = -1;                             \
-        shr_lock.locker_function = "nobody";              \
-        spin_unlock(&shr_lock.lock);                      \
-    } while (0)
-
 static void __init mem_sharing_hash_init(void)
 {
     int i;
 
-    shr_lock_init();
+    mm_lock_init(&shr_lock);
     for(i=0; i<SHR_HASH_LENGTH; i++)
         shr_hash[i] = NULL;
 }
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/mm-locks.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/mm/mm-locks.h        Thu Jun 02 13:16:52 2011 +0100
@@ -0,0 +1,161 @@
+/******************************************************************************
+ * arch/x86/mm/mm-locks.h
+ *
+ * Spinlocks used by the code in arch/x86/mm.
+ *
+ * Copyright (c) 2011 Citrix Systems, inc. 
+ * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
+ * Copyright (c) 2006-2007 XenSource Inc.
+ * Copyright (c) 2006 Michael A Fetterman
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _MM_LOCKS_H
+#define _MM_LOCKS_H
+
+/* Per-CPU variable for enforcing the lock ordering */
+DECLARE_PER_CPU(int, mm_lock_level);
+
+static inline void mm_lock_init(mm_lock_t *l)
+{
+    spin_lock_init(&l->lock);
+    l->locker = -1;
+    l->locker_function = "nobody";
+    l->unlock_level = 0;
+}
+
+static inline void _mm_lock(mm_lock_t *l, const char *func, int level)
+{
+    if ( unlikely(l->locker == current->processor) )
+        panic("mm lock held by %s\n", l->locker_function);
+    /* If you see this crash, the numbers printed are lines in this file 
+     * where the offending locks are declared. */
+    if ( unlikely(this_cpu(mm_lock_level) >= level) )
+        panic("mm locking order violation: %i >= %i\n", 
+              this_cpu(mm_lock_level), level);
+    spin_lock(&l->lock);
+    ASSERT(l->locker == -1);
+    l->locker = current->processor;
+    l->locker_function = func;
+    l->unlock_level = this_cpu(mm_lock_level);
+    this_cpu(mm_lock_level) = level;
+}
+/* This wrapper uses the line number to express the locking order below */
+#define declare_mm_lock(name)                                             \
+  static inline void mm_lock_##name(mm_lock_t *l, const char *func)       \
+  { _mm_lock(l, func, __LINE__); }
+/* This one captures the name of the calling function */
+#define mm_lock(name, l) mm_lock_##name(l, __func__)
+
+static inline void mm_unlock(mm_lock_t *l)
+{
+    ASSERT(l->locker == current->processor);
+    l->locker = -1;
+    l->locker_function = "nobody";
+    this_cpu(mm_lock_level) = l->unlock_level;
+    l->unlock_level = -1;
+    spin_unlock(&l->lock);
+}
+
+static inline int mm_locked_by_me(mm_lock_t *l) 
+{
+    return (current->processor == l->locker);
+}
+
+/************************************************************************
+ *                                                                      *
+ * To avoid deadlocks, these locks _MUST_ be taken in the order they're *
+ * declared in this file.  The locking functions will enforce this.     *
+ *                                                                      *
+ ************************************************************************/
+
+/* Page-sharing lock (global) 
+ *
+ * A single global lock that protects the memory-sharing code's
+ * hash tables. */
+
+declare_mm_lock(shr)
+#define shr_lock()         mm_lock(shr, &shr_lock)
+#define shr_unlock()       mm_unlock(&shr_lock)
+#define shr_locked_by_me() mm_locked_by_me(&shr_lock)
+
+/* Nested P2M lock (per-domain)
+ *
+ * A per-domain lock that protects some of the nested p2m datastructures.
+ * TODO: find out exactly what needs to be covered by this lock */
+
+declare_mm_lock(nestedp2m)
+#define nestedp2m_lock(d)   mm_lock(nestedp2m, &(d)->arch.nested_p2m_lock)
+#define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock)
+
+/* P2M lock (per-p2m-table)
+ * 
+ * This protects all updates to the p2m table.  Updates are expected to
+ * be safe against concurrent reads, which do *not* require the lock. */
+
+declare_mm_lock(p2m)
+#define p2m_lock(p)         mm_lock(p2m, &(p)->lock)
+#define p2m_unlock(p)       mm_unlock(&(p)->lock)
+#define p2m_locked_by_me(p) mm_locked_by_me(&(p)->lock)
+
+/* Shadow lock (per-domain)
+ *
+ * This lock is intended to allow us to make atomic updates to the
+ * software TLB that the shadow pagetables provide.
+ *
+ * Specifically, it protects:
+ *   - all changes to shadow page table pages
+ *   - the shadow hash table
+ *   - the shadow page allocator 
+ *   - all changes to guest page table pages
+ *   - all changes to the page_info->tlbflush_timestamp
+ *   - the page_info->count fields on shadow pages */
+
+declare_mm_lock(shadow)
+#define shadow_lock(d)         mm_lock(shadow, &(d)->arch.paging.shadow.lock)
+#define shadow_unlock(d)       mm_unlock(&(d)->arch.paging.shadow.lock)
+#define shadow_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.shadow.lock)
+
+/* HAP lock (per-domain)
+ * 
+ * Equivalent of the shadow lock for HAP.  Protects updates to the
+ * NPT and EPT tables, and the HAP page allocator. */
+
+declare_mm_lock(hap)
+#define hap_lock(d)         mm_lock(hap, &(d)->arch.paging.hap.lock)
+#define hap_unlock(d)       mm_unlock(&(d)->arch.paging.hap.lock)
+#define hap_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.hap.lock)
+
+/* Log-dirty lock (per-domain) 
+ * 
+ * Protects the log-dirty bitmap from concurrent accesses (and teardowns, etc).
+ *
+ * Because mark_dirty is called from a lot of places, the log-dirty lock
+ * may be acquired with the shadow or HAP locks already held.  When the
+ * log-dirty code makes callbacks into HAP or shadow code to reset
+ * various traps that will trigger the mark_dirty calls, it must *not*
+ * have the log-dirty lock held, or it risks deadlock.  Because the only
+ * purpose of those calls is to make sure that *guest* actions will
+ * cause mark_dirty to be called (hypervisor actions explictly call it
+ * anyway), it is safe to release the log-dirty lock before the callback
+ * as long as the domain is paused for the entire operation. */
+
+declare_mm_lock(log_dirty)
+#define log_dirty_lock(d) mm_lock(log_dirty, &(d)->arch.paging.log_dirty.lock)
+#define log_dirty_unlock(d) mm_unlock(&(d)->arch.paging.log_dirty.lock)
+
+
+#endif /* _MM_LOCKS_H */
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m-ept.c Thu Jun 02 13:16:52 2011 +0100
@@ -32,6 +32,8 @@
 #include <xen/keyhandler.h>
 #include <xen/softirq.h>
 
+#include "mm-locks.h"
+
 #define atomic_read_ept_entry(__pepte)                              \
     ( (ept_entry_t) { .epte = atomic_read64(&(__pepte)->epte) } )
 #define atomic_write_ept_entry(__pepte, __epte)                     \
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pod.c Thu Jun 02 13:16:52 2011 +0100
@@ -32,7 +32,8 @@
 #include <xen/event.h>
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
- 
+
+#include "mm-locks.h"
 
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef mfn_to_page
@@ -375,7 +376,7 @@ p2m_pod_empty_cache(struct domain *d)
 
     /* After this barrier no new PoD activities can happen. */
     BUG_ON(!d->is_dying);
-    spin_barrier(&p2m->lock);
+    spin_barrier(&p2m->lock.lock);
 
     spin_lock(&d->page_alloc_lock);
 
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pt.c  Thu Jun 02 13:16:52 2011 +0100
@@ -38,6 +38,8 @@
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
+#include "mm-locks.h"
+
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef mfn_to_page
 #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
@@ -37,6 +37,8 @@
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
+#include "mm-locks.h"
+
 /* turn on/off 1GB host page table support for hap, default on */
 static bool_t __read_mostly opt_hap_1gb = 1;
 boolean_param("hap_1gb", opt_hap_1gb);
@@ -70,7 +72,7 @@ boolean_param("hap_2mb", opt_hap_2mb);
 static void p2m_initialise(struct domain *d, struct p2m_domain *p2m)
 {
     memset(p2m, 0, sizeof(*p2m));
-    p2m_lock_init(p2m);
+    mm_lock_init(&p2m->lock);
     INIT_PAGE_LIST_HEAD(&p2m->pages);
     INIT_PAGE_LIST_HEAD(&p2m->pod.super);
     INIT_PAGE_LIST_HEAD(&p2m->pod.single);
@@ -95,7 +97,7 @@ p2m_init_nestedp2m(struct domain *d)
     uint8_t i;
     struct p2m_domain *p2m;
 
-    nestedp2m_lock_init(d);
+    mm_lock_init(&d->arch.nested_p2m_lock);
     for (i = 0; i < MAX_NESTEDP2M; i++) {
         d->arch.nested_p2m[i] = p2m = xmalloc(struct p2m_domain);
         if (p2m == NULL)
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/paging.c  Thu Jun 02 13:16:52 2011 +0100
@@ -30,6 +30,8 @@
 #include <xen/numa.h>
 #include <xsm/xsm.h>
 
+#include "mm-locks.h"
+
 /* Printouts */
 #define PAGING_PRINTK(_f, _a...)                                     \
     debugtrace_printk("pg: %s(): " _f, __func__, ##_a)
@@ -41,9 +43,9 @@
             debugtrace_printk("pgdebug: %s(): " _f, __func__, ##_a); \
     } while (0)
 
-/************************************************/
-/*              LOG DIRTY SUPPORT               */
-/************************************************/
+/* Per-CPU variable for enforcing the lock ordering */
+DEFINE_PER_CPU(int, mm_lock_level);
+
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef mfn_to_page
 #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
@@ -52,49 +54,9 @@
 #undef page_to_mfn
 #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
 
-/* The log-dirty lock.  This protects the log-dirty bitmap from
- * concurrent accesses (and teardowns, etc).
- *
- * Locking discipline: always acquire shadow or HAP lock before this one.
- *
- * Because mark_dirty is called from a lot of places, the log-dirty lock
- * may be acquired with the shadow or HAP locks already held.  When the
- * log-dirty code makes callbacks into HAP or shadow code to reset
- * various traps that will trigger the mark_dirty calls, it must *not*
- * have the log-dirty lock held, or it risks deadlock.  Because the only
- * purpose of those calls is to make sure that *guest* actions will
- * cause mark_dirty to be called (hypervisor actions explictly call it
- * anyway), it is safe to release the log-dirty lock before the callback
- * as long as the domain is paused for the entire operation. */
-
-#define log_dirty_lock_init(_d)                                   \
-    do {                                                          \
-        spin_lock_init(&(_d)->arch.paging.log_dirty.lock);        \
-        (_d)->arch.paging.log_dirty.locker = -1;                  \
-        (_d)->arch.paging.log_dirty.locker_function = "nobody";   \
-    } while (0)
-
-#define log_dirty_lock(_d)                                                   \
-    do {                                                                     \
-        if (unlikely((_d)->arch.paging.log_dirty.locker==current->processor))\
-        {                                                                    \
-            printk("Error: paging log dirty lock held by %s\n",              \
-                   (_d)->arch.paging.log_dirty.locker_function);             \
-            BUG();                                                           \
-        }                                                                    \
-        spin_lock(&(_d)->arch.paging.log_dirty.lock);                        \
-        ASSERT((_d)->arch.paging.log_dirty.locker == -1);                    \
-        (_d)->arch.paging.log_dirty.locker = current->processor;             \
-        (_d)->arch.paging.log_dirty.locker_function = __func__;              \
-    } while (0)
-
-#define log_dirty_unlock(_d)                                              \
-    do {                                                                  \
-        ASSERT((_d)->arch.paging.log_dirty.locker == current->processor); \
-        (_d)->arch.paging.log_dirty.locker = -1;                          \
-        (_d)->arch.paging.log_dirty.locker_function = "nobody";           \
-        spin_unlock(&(_d)->arch.paging.log_dirty.lock);                   \
-    } while (0)
+/************************************************/
+/*              LOG DIRTY SUPPORT               */
+/************************************************/
 
 static mfn_t paging_new_log_dirty_page(struct domain *d)
 {
@@ -671,7 +633,7 @@ void paging_log_dirty_init(struct domain
                            void   (*clean_dirty_bitmap)(struct domain *d))
 {
     /* We initialize log dirty lock first */
-    log_dirty_lock_init(d);
+    mm_lock_init(&d->arch.paging.log_dirty.lock);
 
     d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
     d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Jun 02 13:16:52 2011 +0100
@@ -45,7 +45,7 @@ DEFINE_PER_CPU(uint32_t,trace_shadow_pat
  * Called for every domain from arch_domain_create() */
 void shadow_domain_init(struct domain *d, unsigned int domcr_flags)
 {
-    shadow_lock_init(d);
+    mm_lock_init(&d->arch.paging.shadow.lock);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
 
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Jun 02 13:16:52 2011 +0100
@@ -3136,7 +3136,7 @@ static int sh_page_fault(struct vcpu *v,
     if ( unlikely(shadow_locked_by_me(d)) )
     {
         SHADOW_ERROR("Recursive shadow fault: lock was taken by %s\n",
-                     d->arch.paging.shadow.locker_function);
+                     d->arch.paging.shadow.lock.locker_function);
         return 0;
     }
 
diff -r 51f28a7cbb3d -r d6518e8670ab xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/private.h  Thu Jun 02 13:16:52 2011 +0100
@@ -31,6 +31,7 @@
 #include <asm/x86_emulate.h>
 #include <asm/hvm/support.h>
 
+#include "../mm-locks.h"
 
 /******************************************************************************
  * Levels of self-test and paranoia
@@ -128,57 +129,6 @@ enum {
     TRCE_SFLAG_OOS_FIXUP_EVICT,
 };
 
-/******************************************************************************
- * The shadow lock.
- *
- * This lock is per-domain.  It is intended to allow us to make atomic
- * updates to the software TLB that the shadow tables provide.
- * 
- * Specifically, it protects:
- *   - all changes to shadow page table pages
- *   - the shadow hash table
- *   - the shadow page allocator 
- *   - all changes to guest page table pages
- *   - all changes to the page_info->tlbflush_timestamp
- *   - the page_info->count fields on shadow pages
- *   - the shadow dirty bit array and count
- */
-#ifndef CONFIG_SMP
-#error shadow.h currently requires CONFIG_SMP
-#endif
-
-#define shadow_lock_init(_d)                                   \
-    do {                                                       \
-        spin_lock_init(&(_d)->arch.paging.shadow.lock);        \
-        (_d)->arch.paging.shadow.locker = -1;                  \
-        (_d)->arch.paging.shadow.locker_function = "nobody";   \
-    } while (0)
-
-#define shadow_locked_by_me(_d)                     \
-    (current->processor == (_d)->arch.paging.shadow.locker)
-
-#define shadow_lock(_d)                                                       \
-    do {                                                                      \
-        if ( unlikely((_d)->arch.paging.shadow.locker == current->processor) )\
-        {                                                                     \
-            printk("Error: shadow lock held by %s\n",                         \
-                   (_d)->arch.paging.shadow.locker_function);                 \
-            BUG();                                                            \
-        }                                                                     \
-        spin_lock(&(_d)->arch.paging.shadow.lock);                            \
-        ASSERT((_d)->arch.paging.shadow.locker == -1);                        \
-        (_d)->arch.paging.shadow.locker = current->processor;                 \
-        (_d)->arch.paging.shadow.locker_function = __func__;                  \
-    } while (0)
-
-#define shadow_unlock(_d)                                              \
-    do {                                                               \
-        ASSERT((_d)->arch.paging.shadow.locker == current->processor); \
-        (_d)->arch.paging.shadow.locker = -1;                          \
-        (_d)->arch.paging.shadow.locker_function = "nobody";           \
-        spin_unlock(&(_d)->arch.paging.shadow.lock);                   \
-    } while (0)
-
 
 /* Size (in bytes) of a guest PTE */
 #if GUEST_PAGING_LEVELS >= 3
diff -r 51f28a7cbb3d -r d6518e8670ab xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/domain.h      Thu Jun 02 13:16:52 2011 +0100
@@ -91,9 +91,8 @@ void hypercall_page_initialise(struct do
 /*          shadow paging extension             */
 /************************************************/
 struct shadow_domain {
-    spinlock_t        lock;  /* shadow domain lock */
-    int               locker; /* processor which holds the lock */
-    const char       *locker_function; /* Func that took it */
+    mm_lock_t         lock;  /* shadow domain lock */
+
     unsigned int      opt_flags;    /* runtime tunable optimizations on/off */
     struct page_list_head pinned_shadows;
 
@@ -159,9 +158,7 @@ struct shadow_vcpu {
 /*            hardware assisted paging          */
 /************************************************/
 struct hap_domain {
-    spinlock_t        lock;
-    int               locker;
-    const char       *locker_function;
+    mm_lock_t         lock;
 
     struct page_list_head freelist;
     unsigned int      total_pages;  /* number of pages allocated */
@@ -174,9 +171,7 @@ struct hap_domain {
 /************************************************/
 struct log_dirty_domain {
     /* log-dirty lock */
-    spinlock_t     lock;
-    int            locker; /* processor that holds the lock */
-    const char    *locker_function; /* func that took it */
+    mm_lock_t     lock;
 
     /* log-dirty radix tree to record dirty pages */
     mfn_t          top;
@@ -280,9 +275,7 @@ struct arch_domain
 
     /* nestedhvm: translate l2 guest physical to host physical */
     struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
-    spinlock_t nested_p2m_lock;
-    int nested_p2m_locker;
-    const char *nested_p2m_function;
+    mm_lock_t nested_p2m_lock;
 
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
     struct radix_tree_root irq_pirq;
diff -r 51f28a7cbb3d -r d6518e8670ab xen/include/asm-x86/hap.h
--- a/xen/include/asm-x86/hap.h Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/hap.h Thu Jun 02 13:16:52 2011 +0100
@@ -47,41 +47,6 @@ hap_unmap_domain_page(void *p)
 }
 
 /************************************************/
-/*           locking for hap code               */
-/************************************************/
-#define hap_lock_init(_d)                                   \
-    do {                                                    \
-        spin_lock_init(&(_d)->arch.paging.hap.lock);        \
-        (_d)->arch.paging.hap.locker = -1;                  \
-        (_d)->arch.paging.hap.locker_function = "nobody";   \
-    } while (0)
-
-#define hap_locked_by_me(_d)                     \
-    (current->processor == (_d)->arch.paging.hap.locker)
-
-#define hap_lock(_d)                                                       \
-    do {                                                                   \
-        if ( unlikely((_d)->arch.paging.hap.locker == current->processor) )\
-        {                                                                  \
-            printk("Error: hap lock held by %s\n",                         \
-                   (_d)->arch.paging.hap.locker_function);                 \
-            BUG();                                                         \
-        }                                                                  \
-        spin_lock(&(_d)->arch.paging.hap.lock);                            \
-        ASSERT((_d)->arch.paging.hap.locker == -1);                        \
-        (_d)->arch.paging.hap.locker = current->processor;                 \
-        (_d)->arch.paging.hap.locker_function = __func__;                  \
-    } while (0)
-
-#define hap_unlock(_d)                                              \
-    do {                                                            \
-        ASSERT((_d)->arch.paging.hap.locker == current->processor); \
-        (_d)->arch.paging.hap.locker = -1;                          \
-        (_d)->arch.paging.hap.locker_function = "nobody";           \
-        spin_unlock(&(_d)->arch.paging.hap.lock);                   \
-    } while (0)
-
-/************************************************/
 /*        hap domain level functions            */
 /************************************************/
 void  hap_domain_init(struct domain *d);
diff -r 51f28a7cbb3d -r d6518e8670ab xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/mm.h  Thu Jun 02 13:16:52 2011 +0100
@@ -4,6 +4,7 @@
 
 #include <xen/config.h>
 #include <xen/list.h>
+#include <xen/spinlock.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 
@@ -597,4 +598,12 @@ unsigned long domain_get_maximum_gpfn(st
 
 extern struct domain *dom_xen, *dom_io, *dom_cow;      /* for vmcoreinfo */
 
+/* Definition of an mm lock: spinlock with extra fields for debugging */
+typedef struct mm_lock {
+    spinlock_t         lock; 
+    int                unlock_level;
+    int                locker;          /* processor which holds the lock */
+    const char        *locker_function; /* func that took it */
+} mm_lock_t;
+
 #endif /* __ASM_X86_MM_H__ */
diff -r 51f28a7cbb3d -r d6518e8670ab xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
@@ -189,9 +189,7 @@ typedef enum {
 /* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
-    spinlock_t         lock;
-    int                locker;   /* processor which holds the lock */
-    const char        *locker_function; /* Func that took it */
+    mm_lock_t          lock;
 
     /* Shadow translated domain: p2m mapping */
     pagetable_t        phys_table;
@@ -285,80 +283,6 @@ struct p2m_domain *p2m_get_p2m(struct vc
 #define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
 
 
-/*
- * The P2M lock.  This protects all updates to the p2m table.
- * Updates are expected to be safe against concurrent reads,
- * which do *not* require the lock.
- *
- * Locking discipline: always acquire this lock before the shadow or HAP one
- */
-
-#define p2m_lock_init(_p2m)                     \
-    do {                                        \
-        spin_lock_init(&(_p2m)->lock);          \
-        (_p2m)->locker = -1;                    \
-        (_p2m)->locker_function = "nobody";     \
-    } while (0)
-
-#define p2m_lock(_p2m)                                          \
-    do {                                                        \
-        if ( unlikely((_p2m)->locker == current->processor) )   \
-        {                                                       \
-            printk("Error: p2m lock held by %s\n",              \
-                   (_p2m)->locker_function);                    \
-            BUG();                                              \
-        }                                                       \
-        spin_lock(&(_p2m)->lock);                               \
-        ASSERT((_p2m)->locker == -1);                           \
-        (_p2m)->locker = current->processor;                    \
-        (_p2m)->locker_function = __func__;                     \
-    } while (0)
-
-#define p2m_unlock(_p2m)                                \
-    do {                                                \
-        ASSERT((_p2m)->locker == current->processor);   \
-        (_p2m)->locker = -1;                            \
-        (_p2m)->locker_function = "nobody";             \
-        spin_unlock(&(_p2m)->lock);                     \
-    } while (0)
-
-#define p2m_locked_by_me(_p2m)                            \
-    (current->processor == (_p2m)->locker)
-
-
-#define nestedp2m_lock_init(_domain)                                  \
-    do {                                                              \
-        spin_lock_init(&(_domain)->arch.nested_p2m_lock);             \
-        (_domain)->arch.nested_p2m_locker = -1;                       \
-        (_domain)->arch.nested_p2m_function = "nobody";               \
-    } while (0)
-
-#define nestedp2m_locked_by_me(_domain)                \
-    (current->processor == (_domain)->arch.nested_p2m_locker)
-
-#define nestedp2m_lock(_domain)                                       \
-    do {                                                              \
-        if ( nestedp2m_locked_by_me(_domain) )                        \
-        {                                                             \
-            printk("Error: p2m lock held by %s\n",                    \
-                   (_domain)->arch.nested_p2m_function);              \
-            BUG();                                                    \
-        }                                                             \
-        spin_lock(&(_domain)->arch.nested_p2m_lock);                  \
-        ASSERT((_domain)->arch.nested_p2m_locker == -1);              \
-        (_domain)->arch.nested_p2m_locker = current->processor;       \
-        (_domain)->arch.nested_p2m_function = __func__;               \
-    } while (0)
-
-#define nestedp2m_unlock(_domain)                                      \
-    do {                                                               \
-        ASSERT(nestedp2m_locked_by_me(_domain));                       \
-        (_domain)->arch.nested_p2m_locker = -1;                        \
-        (_domain)->arch.nested_p2m_function = "nobody";                \
-        spin_unlock(&(_domain)->arch.nested_p2m_lock);                 \
-    } while (0)
-
-
 /* Read a particular P2M table, mapping pages as we go.  Most callers
  * should _not_ call this directly; use the other gfn_to_mfn_* functions
  * below unless you know you want to walk a p2m that isn't a domain's

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>