[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1 of 6 RESENT] libxl: Remove frontend and backend devices from xenstore after destroy



On Sun, 2011-06-05 at 17:50 +0100, Marek Marczykowski wrote:
> # HG changeset patch
> # User Marek Marczykowski <marmarek@xxxxxxxxxxxx>
> # Date 1307143993 -7200
> # Node ID c32797243a6ba61dd2942a0307151e42fb7bf157
> # Parent  37c77bacb52aa7795978b994f9d371b979b2cb07
> libxl: Remove frontend and backend devices from xenstore after destroy
> 
> Cleanup frontend and backend devices from xenstore for all dev types - not 
> only
> disks. Because backend cleanup moved to libxl__device_destroy,
> libxl__devices_destroy is somehow simpler.
> 
> Signed-off-by: Marek Marczykowski <marmarek@xxxxxxxxxxxx>

Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

> 
> diff -r 37c77bacb52a -r c32797243a6b tools/libxl/libxl.c
> --- a/tools/libxl/libxl.c     Mon May 23 17:38:28 2011 +0100
> +++ b/tools/libxl/libxl.c     Sat Jun 04 01:33:13 2011 +0200
> @@ -1105,8 +1105,6 @@ int libxl_device_disk_del(libxl_ctx *ctx
>      device.devid            = devid;
>      device.kind             = DEVICE_VBD;
>      rc = libxl__device_del(&gc, &device, wait);
> -    xs_rm(ctx->xsh, XBT_NULL, libxl__device_backend_path(&gc, &device));
> -    xs_rm(ctx->xsh, XBT_NULL, libxl__device_frontend_path(&gc, &device));
>  out_free:
>      libxl__free_all(&gc);
>      return rc;
> diff -r 37c77bacb52a -r c32797243a6b tools/libxl/libxl_device.c
> --- a/tools/libxl/libxl_device.c      Mon May 23 17:38:28 2011 +0100
> +++ b/tools/libxl/libxl_device.c      Sat Jun 04 01:33:13 2011 +0200
> @@ -272,6 +272,8 @@ retry_transaction:
>      if (!force) {
>          xs_watch(ctx->xsh, state_path, be_path);
>          rc = 1;
> +    } else {
> +        xs_rm(ctx->xsh, XBT_NULL, be_path);
>      }
>  out:
>      return rc;
> @@ -311,10 +313,8 @@ int libxl__devices_destroy(libxl__gc *gc
>      char *path, *be_path, *fe_path;
>      unsigned int num1, num2;
>      char **l1 = NULL, **l2 = NULL;
> -    int i, j, n = 0, n_watches = 0;
> -    flexarray_t *toremove;
> +    int i, j, n_watches = 0;
>  
> -    toremove = flexarray_make(16, 1);
>      path = libxl__sprintf(gc, "/local/domain/%d/device", domid);
>      l1 = libxl__xs_directory(gc, XBT_NULL, path, &num1);
>      if (!l1) {
> @@ -338,7 +338,6 @@ int libxl__devices_destroy(libxl__gc *gc
>              if (be_path != NULL) {
>                  if (libxl__device_destroy(gc, be_path, force) > 0)
>                      n_watches++;
> -                flexarray_set(toremove, n++, libxl__dirname(gc, be_path));
>              } else {
>                  xs_rm(ctx->xsh, XBT_NULL, path);
>              }
> @@ -351,7 +350,6 @@ int libxl__devices_destroy(libxl__gc *gc
>      if (be_path && strcmp(be_path, "")) {
>          if (libxl__device_destroy(gc, be_path, force) > 0)
>              n_watches++;
> -        flexarray_set(toremove, n++, libxl__dirname(gc, be_path));
>      }
>  
>      if (!force) {
> @@ -371,17 +369,13 @@ int libxl__devices_destroy(libxl__gc *gc
>              }
>          }
>      }
> -    for (i = 0; i < n; i++) {
> -        flexarray_get(toremove, i, (void**) &path);
> -        xs_rm(ctx->xsh, XBT_NULL, path);
> -    }
>  out:
> -    flexarray_free(toremove);
>      return 0;
>  }
>  
>  int libxl__device_del(libxl__gc *gc, libxl__device *dev, int wait)
>  {
> +    libxl_ctx *ctx = libxl__gc_owner(gc);
>      char *backend_path;
>      int rc;
>  
> @@ -400,6 +394,7 @@ int libxl__device_del(libxl__gc *gc, lib
>          (void)wait_for_dev_destroy(gc, &tv);
>      }
>  
> +    xs_rm(ctx->xsh, XBT_NULL, libxl__device_frontend_path(gc, dev));
>      rc = 0;
>  
>  out:
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.