[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 21/27] libxl: Fix an ao completion bug; document locking policy



Document the concurrent access policies for libxl__ao and libxl__egc,
and their corresponding gcs.

Fix a violation of the policy:

If an ao was submitted and a callback requested, and while the
initiating function was still running on the original thread, the ao
is completed on another thread, the completing thread would improperly
concurrently access the ao with the initiating thread.

Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 tools/libxl/libxl_event.c    |    7 +++++++
 tools/libxl/libxl_internal.h |   23 ++++++++++++++++++++++-
 2 files changed, 29 insertions(+), 1 deletions(-)

diff --git a/tools/libxl/libxl_event.c b/tools/libxl/libxl_event.c
index 3e784f0..b546471 100644
--- a/tools/libxl/libxl_event.c
+++ b/tools/libxl/libxl_event.c
@@ -901,6 +901,11 @@ void libxl__event_disaster(libxl__egc *egc, const char 
*msg, int errnoval,
 
 static void egc_run_callbacks(libxl__egc *egc)
 {
+    /*
+     * The callbacks must happen with the ctx unlocked.
+     * See the comment near #define EGC_GC in libxl_internal.h and
+     * those in the definitions of libxl__egc and libxl__ao.
+     */
     EGC_GC;
     libxl_event *ev, *ev_tmp;
 
@@ -914,9 +919,11 @@ static void egc_run_callbacks(libxl__egc *egc)
                              entry_for_callback, ao_tmp) {
         LIBXL_TAILQ_REMOVE(&egc->aos_for_callback, ao, entry_for_callback);
         ao->how.callback(CTX, ao->rc, ao->how.u.for_callback);
+        CTX_LOCK;
         ao->notified = 1;
         if (!ao->in_initiator)
             libxl__ao__destroy(CTX, ao);
+        CTX_UNLOCK;
     }
 }
 
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 9b4c273..f48cfdc 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -359,7 +359,8 @@ struct libxl__gc {
 };
 
 struct libxl__egc {
-    /* for event-generating functions only */
+    /* For event-generating functions only.
+     * The egc and its gc may be accessed only on the creating thread. */
     struct libxl__gc gc;
     struct libxl__event_list occurred_for_callback;
     LIBXL_TAILQ_HEAD(, libxl__ao) aos_for_callback;
@@ -369,6 +370,20 @@ struct libxl__egc {
 #define LIBXL__AO_MAGIC_DESTROYED    0xA0DEAD00ul
 
 struct libxl__ao {
+    /*
+     * An ao and its gc may be accessed only with the ctx lock held.
+     *
+     * Special exception: If an ao has been added to
+     * egc->aos_for_callback, the thread owning the egc may remove the
+     * ao from that list and make the callback without holding the
+     * lock.
+     *
+     * Corresponding restriction: An ao may be added only to one
+     * egc->aos_for_callback, once; rc and how must already have been
+     * set and may not be subsequently modified.  (This restriction is
+     * easily and obviously met since the ao is queued for callback
+     * only in libxl__ao_complete.)
+     */
     uint32_t magic;
     unsigned constructing:1, in_initiator:1, complete:1, notified:1;
     int rc;
@@ -1365,6 +1380,12 @@ libxl__device_model_version_running(libxl__gc *gc, 
uint32_t domid);
  * should in any case not find it necessary to call egc-creators from
  * within libxl.
  *
+ * The callbacks must all take place with the ctx unlocked because
+ * the application is entitled to reenter libxl from them.  This
+ * would be bad not because the lock is not recursive (it is) but
+ * because the application might make blocking libxl calls which
+ * would hold the lock unreasonably long.
+ *
  * For the same reason libxl__egc_cleanup (or EGC_FREE) must be called
  * with the ctx *unlocked*.  So the right pattern has the EGC_...
  * macro calls on the outside of the CTX_... ones.
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.