[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] Re: [PATCH] libxenlight: clone context to avoid GC corruption



All right, I cut this patch. There's a function to clone a context, and there's a function to clone a context and open a new xenstore handle. And there are the 'discard' counterparts. Ack/Nack them and I'll resubmit the remaining two patches still in the pipeline.
Thanks for reviewing everything else
Andres
Stefano Stabellini wrote:
On Thu, 3 Dec 2009, Stefano Stabellini wrote:
On Wed, 2 Dec 2009, Andres Lagar-Cavilla wrote:
Provide a function to clone a context. This is necessary
because simply copying the structs will eventually
corrup the GC: maxsize is updated in the cloned context
but not in the originating one, yet they have the same
array of referenced pointers alloc_ptrs.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>


Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>


Actually I missed one thing: I think you should move

clone.xsh = xs_daemon_open();

into libxl_clone_context and

xs_daemon_close(clone.xsh);

into libxl_discard_cloned_context.

The rest is fine, thanks for finding and fixing this bug!

# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1259848013 18000
# Node ID eeb2f35cd9a7a5a6c4e38c1ec5995524035375fa
# Parent  684a4d0abc86dbded4ceda68f7b5e1b4806ff1d7
Provide a function to clone a context. This is necessary
because simply copying the structs will eventually
corrup the GC: maxsize is updated in the cloned context
but not in the originating, yet they have the same array
of referenced pointers alloc_ptrs.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>

diff -r 684a4d0abc86 -r eeb2f35cd9a7 libxl.c
--- a/libxl.c
+++ b/libxl.c
@@ -656,10 +656,17 @@ void dm_xenstore_record_pid(struct libxl
     struct libxl_device_model_starting *starting = for_spawn;
     struct libxl_ctx clone;
     char *kvs[3];
-    int rc;
+    int rc, cloned;
 
-    clone = *ctx;
-    clone.xsh = xs_daemon_open();
+    if (libxl_clone_context_xs(ctx, &clone)) {
+        XL_LOG(ctx, XL_LOG_ERROR, "Out of memory when cloning context");
+        /* Throw a prayer fallback */
+        clone = *ctx;
+        clone.xsh = xs_daemon_open();
+        cloned = 0;
+    } else {
+        cloned = 1;
+    }
     /* we mustn't use the parent's handle in the child */
 
     kvs[0] = "image/device-model-pid";
@@ -669,7 +676,11 @@ void dm_xenstore_record_pid(struct libxl
     if (rc) XL_LOG_ERRNO(&clone, XL_LOG_ERROR,
                          "Couldn't record device model pid %ld at %s/%s",
                          (unsigned long)innerchild, starting->dom_path, kvs);
-    xs_daemon_close(clone.xsh);
+    if (cloned) {
+        libxl_discard_cloned_context_xs(&clone);
+    } else {
+        xs_daemon_close(clone.xsh);
+    }
 }
 
 static int libxl_vfb_and_vkb_from_device_model_info(struct libxl_ctx *ctx,
diff -r 684a4d0abc86 -r eeb2f35cd9a7 libxl_device.c
--- a/libxl_device.c
+++ b/libxl_device.c
@@ -212,15 +212,19 @@ int libxl_devices_destroy(struct libxl_c
     fd_set rfds;
     struct timeval tv;
     flexarray_t *toremove;
-    struct libxl_ctx clone = *ctx;
+    struct libxl_ctx clone;
 
-    clone.xsh = xs_daemon_open();
+    if (libxl_clone_context_xs(ctx, &clone)) {
+        XL_LOG(ctx, XL_LOG_ERROR, "Out of memory when cloning context");
+        return ERROR_NOMEM;
+    }
+
     toremove = flexarray_make(16, 1);
     path = libxl_sprintf(&clone, "/local/domain/%d/device", domid);
     l1 = libxl_xs_directory(&clone, XBT_NULL, path, &num1);
     if (!l1) {
         XL_LOG(&clone, XL_LOG_ERROR, "%s is empty", path);
-        xs_daemon_close(clone.xsh);
+        libxl_discard_cloned_context_xs(&clone);
         return -1;
     }
     for (i = 0; i < num1; i++) {
@@ -269,7 +273,7 @@ int libxl_devices_destroy(struct libxl_c
         xs_rm(clone.xsh, XBT_NULL, path);
     }
     flexarray_free(toremove);
-    xs_daemon_close(clone.xsh);
+    libxl_discard_cloned_context_xs(&clone);
     return 0;
 }
 
diff -r 684a4d0abc86 -r eeb2f35cd9a7 libxl_internal.c
--- a/libxl_internal.c
+++ b/libxl_internal.c
@@ -28,6 +28,28 @@ int libxl_error_set(struct libxl_ctx *ct
     return 0;
 }
 
+int libxl_clone_context(struct libxl_ctx *from, struct libxl_ctx *to)
+{
+    /* We could just copy the structs, but since
+     * maxsize is not a pointer we need to take care
+     * of our own GC. */
+    *to = *from;
+    to->alloc_ptrs = NULL;
+    to->alloc_maxsize = 256;
+    to->alloc_ptrs = calloc(to->alloc_maxsize, sizeof(void *));
+    if (!to->alloc_ptrs)
+        return ERROR_NOMEM;
+    return 0;
+}
+
+void libxl_discard_cloned_context(struct libxl_ctx *ctx)
+{
+    /* We only need to worry about GC-related fields */
+    (void)libxl_ctx_free(ctx);
+    if (ctx->alloc_ptrs)
+        free(ctx->alloc_ptrs);
+}
+
 int libxl_ptr_add(struct libxl_ctx *ctx, void *ptr)
 {
     int i;
diff -r 684a4d0abc86 -r eeb2f35cd9a7 libxl_internal.h
--- a/libxl_internal.h
+++ b/libxl_internal.h
@@ -63,6 +63,23 @@ typedef struct {
 #define PRINTF_ATTRIBUTE(x, y) __attribute__((format(printf, x, y)))
 
 /* memory allocation tracking/helpers */
+int libxl_clone_context(struct libxl_ctx *from, struct libxl_ctx *to);
+static inline int libxl_clone_context_xs(
+                struct libxl_ctx *from, struct libxl_ctx *to)
+{
+    int rc;
+    rc = libxl_clone_context(from, to);
+    if (rc) return rc;
+    to->xsh = xs_daemon_open();
+    return 0;
+}
+void libxl_discard_cloned_context(struct libxl_ctx *ctx);
+static inline void libxl_discard_cloned_context_xs(
+                struct libxl_ctx *ctx)
+{
+    libxl_discard_cloned_context(ctx);
+    xs_daemon_close(ctx->xsh);
+}
 int libxl_ptr_add(struct libxl_ctx *ctx, void *ptr);
 int libxl_free(struct libxl_ctx *ctx, void *ptr);
 int libxl_free_all(struct libxl_ctx *ctx);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.