WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [XEN] Clean up locking/init code around l

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [XEN] Clean up locking/init code around log-dirty interfaces
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 14 Jun 2007 12:52:13 -0700
Delivery-date: Thu, 14 Jun 2007 15:24:03 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1181569126 -3600
# Node ID 2c8c6ca1296b82e31bb0a50fcf9f63d0bfa11176
# Parent  3d5f39c610ad8ccc5097abbd15ab329a57ef0b6d
[XEN] Clean up locking/init code around log-dirty interfaces
to avoid deadlocks and make sure locks/functions are in place for
PV domains to be put in log-dirty mode if they're not already shadowed.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm/hap/hap.c       |    8 ++--
 xen/arch/x86/mm/paging.c        |   77 ++++++++++++++++++++++++++++++----------
 xen/arch/x86/mm/shadow/common.c |    8 ++--
 3 files changed, 67 insertions(+), 26 deletions(-)

diff -r 3d5f39c610ad -r 2c8c6ca1296b xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Mon Jun 11 14:35:52 2007 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Mon Jun 11 14:38:46 2007 +0100
@@ -425,6 +425,10 @@ void hap_domain_init(struct domain *d)
 {
     hap_lock_init(d);
     INIT_LIST_HEAD(&d->arch.paging.hap.freelists);
+
+    /* This domain will use HAP for log-dirty mode */
+    paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty,
+                          hap_clean_dirty_bitmap);
 }
 
 /* return 0 for success, -errno for failure */
@@ -454,10 +458,6 @@ int hap_enable(struct domain *d, u32 mod
             goto out;
         }
     }
-
-    /* initialize log dirty here */
-    paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty,
-                          hap_clean_dirty_bitmap);
 
     /* allocate P2m table */
     if ( mode & PG_translate ) {
diff -r 3d5f39c610ad -r 2c8c6ca1296b xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Mon Jun 11 14:35:52 2007 +0100
+++ b/xen/arch/x86/mm/paging.c  Mon Jun 11 14:38:46 2007 +0100
@@ -53,6 +53,21 @@ boolean_param("hap", opt_hap_enabled);
 #undef page_to_mfn
 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
 
+/* The log-dirty lock.  This protects the log-dirty bitmap from
+ * concurrent accesses (and teardowns, etc). 
+ * 
+ * Locking discipline: always acquire shadow or HAP lock before this one.
+ * 
+ * Because mark_dirty is called from a lot of places, the log-dirty lock
+ * may be acquired with the shadow or HAP locks already held.  When the
+ * log-dirty code makes callbacks into HAP or shadow code to reset
+ * various traps that will trigger the mark_dirty calls, it must *not*
+ * have the log-dirty lock held, or it risks deadlock.  Because the only
+ * purpose of those calls is to make sure that *guest* actions will
+ * cause mark_dirty to be called (hypervisor actions explictly call it
+ * anyway), it is safe to release the log-dirty lock before the callback
+ * as long as the domain is paused for the entire operation. */
+
 #define log_dirty_lock_init(_d)                                   \
     do {                                                          \
         spin_lock_init(&(_d)->arch.paging.log_dirty.lock);        \
@@ -85,7 +100,9 @@ boolean_param("hap", opt_hap_enabled);
 /* allocate bitmap resources for log dirty */
 int paging_alloc_log_dirty_bitmap(struct domain *d)
 {
-    ASSERT(d->arch.paging.log_dirty.bitmap == NULL);
+    if ( d->arch.paging.log_dirty.bitmap != NULL )
+        return 0;
+
     d->arch.paging.log_dirty.bitmap_size =
         (domain_get_maximum_gpfn(d) + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
     d->arch.paging.log_dirty.bitmap = 
@@ -133,14 +150,21 @@ int paging_log_dirty_enable(struct domai
         goto out;
     }
 
-    ret = d->arch.paging.log_dirty.enable_log_dirty(d);
-    if ( ret != 0 )
-        paging_free_log_dirty_bitmap(d);
-
- out:
-    log_dirty_unlock(d);
+    log_dirty_unlock(d);
+
+    /* Safe because the domain is paused. */    
+    ret = d->arch.paging.log_dirty.enable_log_dirty(d);    
+
+    /* Possibility of leaving the bitmap allocated here but it'll be
+     * tidied on domain teardown. */
+
     domain_unpause(d);
     return ret;
+
+ out:
+    log_dirty_unlock(d);
+    domain_unpause(d);
+    return ret;
 }
 
 int paging_log_dirty_disable(struct domain *d)
@@ -148,8 +172,9 @@ int paging_log_dirty_disable(struct doma
     int ret;
 
     domain_pause(d);
+    /* Safe because the domain is paused. */
+    ret = d->arch.paging.log_dirty.disable_log_dirty(d);
     log_dirty_lock(d);
-    ret = d->arch.paging.log_dirty.disable_log_dirty(d);
     if ( !paging_mode_log_dirty(d) )
         paging_free_log_dirty_bitmap(d);
     log_dirty_unlock(d);
@@ -182,7 +207,10 @@ void paging_mark_dirty(struct domain *d,
      * Nothing to do here...
      */
     if ( unlikely(!VALID_M2P(pfn)) )
+    {
+        log_dirty_unlock(d);
         return;
+    }
 
     if ( likely(pfn < d->arch.paging.log_dirty.bitmap_size) ) 
     { 
@@ -237,11 +265,6 @@ int paging_log_dirty_op(struct domain *d
     {
         d->arch.paging.log_dirty.fault_count = 0;
         d->arch.paging.log_dirty.dirty_count = 0;
-
-        /* We need to further call clean_dirty_bitmap() functions of specific
-         * paging modes (shadow or hap).
-         */
-        d->arch.paging.log_dirty.clean_dirty_bitmap(d);
     }
 
     if ( guest_handle_is_null(sc->dirty_bitmap) )
@@ -280,6 +303,17 @@ int paging_log_dirty_op(struct domain *d
     }
 #undef CHUNK
 
+    log_dirty_unlock(d);
+
+    if ( clean )
+    {
+        /* We need to further call clean_dirty_bitmap() functions of specific
+         * paging modes (shadow or hap).  Safe because the domain is paused. */
+        d->arch.paging.log_dirty.clean_dirty_bitmap(d);
+    }
+    domain_unpause(d);
+    return rv;
+
  out:
     log_dirty_unlock(d);
     domain_unpause(d);
@@ -291,6 +325,8 @@ int paging_log_dirty_op(struct domain *d
  * these functions for log dirty code to call. This function usually is 
  * invoked when paging is enabled. Check shadow_enable() and hap_enable() for 
  * reference.
+ *
+ * These function pointers must not be followed with the log-dirty lock held. 
  */
 void paging_log_dirty_init(struct domain *d,
                            int    (*enable_log_dirty)(struct domain *d),
@@ -319,8 +355,13 @@ void paging_domain_init(struct domain *d
 void paging_domain_init(struct domain *d)
 {
     p2m_init(d);
+
+    /* The order of the *_init calls below is important, as the later
+     * ones may rewrite some common fields.  Shadow pagetables are the
+     * default... */
     shadow_domain_init(d);
 
+    /* ... but we will use hardware assistance if it's available. */
     if ( opt_hap_enabled && is_hvm_domain(d) )
         hap_domain_init(d);
 }
@@ -397,13 +438,13 @@ int paging_domctl(struct domain *d, xen_
 /* Call when destroying a domain */
 void paging_teardown(struct domain *d)
 {
+    if ( opt_hap_enabled && is_hvm_domain(d) )
+        hap_teardown(d);
+    else
+        shadow_teardown(d);
+
     /* clean up log dirty resources. */
     paging_log_dirty_teardown(d);
-    
-    if ( opt_hap_enabled && is_hvm_domain(d) )
-        hap_teardown(d);
-    else
-        shadow_teardown(d);
 }
 
 /* Call once all of the references to the domain have gone away */
diff -r 3d5f39c610ad -r 2c8c6ca1296b xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Jun 11 14:35:52 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Mon Jun 11 14:38:46 2007 +0100
@@ -49,6 +49,10 @@ void shadow_domain_init(struct domain *d
         INIT_LIST_HEAD(&d->arch.paging.shadow.freelists[i]);
     INIT_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist);
     INIT_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
+
+    /* Use shadow pagetables for log-dirty support */
+    paging_log_dirty_init(d, shadow_enable_log_dirty, 
+                          shadow_disable_log_dirty, shadow_clean_dirty_bitmap);
 }
 
 /* Setup the shadow-specfic parts of a vcpu struct. Note: The most important
@@ -2453,10 +2457,6 @@ int shadow_enable(struct domain *d, u32 
         }        
     }
 
-    /* initialize log dirty here */
-    paging_log_dirty_init(d, shadow_enable_log_dirty, 
-                          shadow_disable_log_dirty, shadow_clean_dirty_bitmap);
-
     /* Init the P2M table.  Must be done before we take the shadow lock 
      * to avoid possible deadlock. */
     if ( mode & PG_translate )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [XEN] Clean up locking/init code around log-dirty interfaces, Xen patchbot-unstable <=