[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 6/6] libxl: support unmapping static shared memory areas during domain destruction



On Wed, 23 Aug 2017, Zhongze Liu wrote:
> Add libxl__sshm_del to Unmap static shared memory areas mapped by
> libxl__sshm_add during domain creation. The unmapping process is:
> 
> * For a master: check if it still has living slaves: 1) if yes, mark its
>   status as "zombie", and don't destroy the information under
>   /local/static_shm; 2) if no, simply cleanup related xs entries
> * For a slave: unmap the shared pages, and cleanup related xs entries. If
>   this is the only slave, and it's master is in zombie state, also cleanup
>   the master entries.
> 
> This is for the proposal "Allow setting up shared memory areas between VMs
> from xl config file" (see [1]).
> 
> [1] https://lists.xenproject.org/archives/html/xen-devel/2017-07/msg03047.html
> 
> Signed-off-by: Zhongze Liu <blackskygg@xxxxxxxxx>
> 
> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
> Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> Cc: Julien Grall <julien.grall@xxxxxxx>
> Cc: xen-devel@xxxxxxxxxxxxx
> ---
>  tools/libxl/libxl_domain.c   |   5 ++
>  tools/libxl/libxl_internal.h |   2 +
>  tools/libxl/libxl_sshm.c     | 125 
> +++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 132 insertions(+)
> 
> diff --git a/tools/libxl/libxl_domain.c b/tools/libxl/libxl_domain.c
> index 08eccd082b..73ac856fb4 100644
> --- a/tools/libxl/libxl_domain.c
> +++ b/tools/libxl/libxl_domain.c
> @@ -1028,6 +1028,11 @@ void libxl__destroy_domid(libxl__egc *egc, 
> libxl__destroy_domid_state *dis)
>          goto out;
>      }
>  
> +    rc = libxl__sshm_del(gc, domid);
> +    if (rc) {
> +        LOGD(ERROR, domid, "Deleting static shm failed.");
> +    }
> +
>      if (libxl__device_pci_destroy_all(gc, domid) < 0)
>          LOGD(ERROR, domid, "Pci shutdown failed");
>      rc = xc_domain_pause(ctx->xch, domid);
> diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
> index 74bc0acb21..648eaee8c2 100644
> --- a/tools/libxl/libxl_internal.h
> +++ b/tools/libxl/libxl_internal.h
> @@ -4361,6 +4361,8 @@ static inline bool libxl__acpi_defbool_val(const 
> libxl_domain_build_info *b_info
>  _hidden int libxl__sshm_add(libxl__gc *gc, uint32_t domid,
>                              libxl_static_shm *sshm, int len);
>  
> +_hidden int libxl__sshm_del(libxl__gc *gc, uint32_t domid);
> +
>  _hidden int libxl__sshm_check_overlap(libxl__gc *gc, uint32_t domid,
>                                        libxl_static_shm *sshms, int len);
>  _hidden int libxl__sshm_setdefault(libxl__gc *gc, uint32_t domid,
> diff --git a/tools/libxl/libxl_sshm.c b/tools/libxl/libxl_sshm.c
> index e16c24ccb9..417a4cd0a4 100644
> --- a/tools/libxl/libxl_sshm.c
> +++ b/tools/libxl/libxl_sshm.c
> @@ -79,6 +79,131 @@ int libxl__sshm_check_overlap(libxl__gc *gc, uint32_t 
> domid,
>      return 0;
>  }
>  
> +/* Delete sshm master infomation from xenstore. If there are still living
> + * slaves, mark the master as in zombie state, but do not delete it,
> + * and it will be totally deleted later after all slaves are gone */
> +static void libxl__sshm_del_master(libxl__gc *gc, xs_transaction_t xt,
> +                                   uint32_t domid, const char *id)
> +{
> +    char *sshm_path;
> +
> +    sshm_path = libxl__xs_get_sshmpath(gc, id);
> +
> +    /* we know that domid can't be both a master and a slave for one id
> +     * (enforced in the *_add_master() and *_add_slave() code),
> +     * so the number of slaves won't change during iteration. Simply check
> +     * sshm_path/slaves to tell if there are still living slaves. */
> +    if (libxl__xs_read(gc, xt, GCSPRINTF("%s/slaves", sshm_path))) {

Is it possible that "slaves" is present but empty? What does
libxl__xs_read return in that case?


> +        SSHM_ERROR(domid, id,
> +                   "there are living slaves, won't be totally destroyed");
> +
> +        libxl__xs_write_checked(gc, xt,
> +                                GCSPRINTF("%s/status", sshm_path),
> +                                "zombie");
> +    } else {
> +        libxl__xs_path_cleanup(gc, xt, sshm_path);
> +    }
> +
> +    return;
> +}
> +
> +static void libxl__sshm_do_unmap(libxl__gc *gc, uint32_t domid, const char 
> *id,
> +                                 uint64_t begin, uint64_t end)
> +{
> +    begin >>= 12;
> +    end >>= 12;
> +    for (; begin < end; ++begin) {
> +        if (xc_domain_remove_from_physmap(CTX->xch, domid, begin)) {
> +            SSHM_ERROR(domid, id,
> +                       "unable to unmap shared page at 0x%"PRIx64".",
> +                       begin);
> +        }
> +    }
> +}
> +
> +static void libxl__sshm_del_slave(libxl__gc *gc, xs_transaction_t xt,
> +                                  uint32_t domid, const char *id, bool 
> isretry)
> +{
> +    char *sshm_path, *slave_path;
> +    char *master_stat, *mdomid_str, *begin_str, *end_str;
> +    uint64_t begin, end;
> +    uint32_t master_domid;
> +
> +    sshm_path = libxl__xs_get_sshmpath(gc, id);
> +    slave_path = GCSPRINTF("%s/slaves/%"PRIu32, sshm_path, domid);
> +
> +    begin_str = libxl__xs_read(gc, xt, GCSPRINTF("%s/begin", slave_path));
> +    end_str = libxl__xs_read(gc, xt, GCSPRINTF("%s/end", slave_path));
> +    begin = strtoull(begin_str, NULL, 16);
> +    end = strtoull(end_str, NULL, 16);
> +
> +    /* Avoid calling do_unmap many times in case of xs transaction retry */
> +    if (!isretry)
> +        libxl__sshm_do_unmap(gc, domid, id, begin, end);
> +
> +    libxl__xs_path_cleanup(gc, xt, slave_path);
> +
> +    /* check if master is in zombie state and has no slaves now,
> +     * if yes, now it's the time to destroy it */
> +    master_stat = libxl__xs_read(gc, xt, GCSPRINTF("%s/status", sshm_path));
> +    if (!strncmp(master_stat, "zombie", 6) &&
> +        !libxl__xs_read(gc, xt, GCSPRINTF("%s/slaves", sshm_path)))

same here


> +    {
> +        mdomid_str = libxl__xs_read(gc, xt, GCSPRINTF("%s/master", 
> sshm_path));
> +        master_domid = strtoul(mdomid_str, NULL, 10);
> +        libxl__sshm_del_master(gc, xt, master_domid, id);
> +    }
> +}
> +
> +/* Delete static_shm entries in the xensotre. */
> +int libxl__sshm_del(libxl__gc *gc,  uint32_t domid)
> +{
> +    int rc, i;
> +    bool isretry;
> +    xs_transaction_t xt = XBT_NULL;
> +    char *dom_path, *dom_sshm_path;
> +    const char *role;
> +    char **sshm_ents;
> +    unsigned int sshm_num;
> +
> +    dom_path = libxl__xs_get_dompath(gc, domid);
> +    dom_sshm_path = GCSPRINTF("%s/static_shm", dom_path);
> +
> +    isretry = false;
> +    for (;;) {
> +        rc = libxl__xs_transaction_start(gc, &xt);
> +        if (rc) goto out;
> +
> +        if (libxl__xs_read(gc, xt, dom_sshm_path)) {
> +            sshm_ents = libxl__xs_directory(gc, xt, dom_sshm_path, 
> &sshm_num);
> +            if (!sshm_ents) continue;
> +
> +            for (i = 0; i < sshm_num; ++i) {
> +                role = libxl__xs_read(gc, xt,
> +                                      GCSPRINTF("%s/%s/role",
> +                                                dom_sshm_path,
> +                                                sshm_ents[i]));
> +                assert(role);
> +                if (!strncmp(role, "slave", 5)) {
> +                    libxl__sshm_del_slave(gc, xt, domid, sshm_ents[i], 
> isretry);
> +                } else {
> +                    libxl__sshm_del_master(gc, xt, domid, sshm_ents[i]);
> +                }
> +            }
> +        }
> +
> +        rc = libxl__xs_transaction_commit(gc, &xt);
> +        if (!rc) break;
> +        if (rc < 0) goto out;
> +      isretry = true;
> +    }
> +
> +    rc = 0;
> +out:
> +    libxl__xs_transaction_abort(gc, &xt);
> +    return rc;
> +}
> +
>  /* The caller have to guarentee that sshm->begin < sshm->end */
>  static int libxl__sshm_do_map(libxl__gc *gc, uint32_t mid, uint32_t sid,
>                                libxl_static_shm *sshm,
> -- 
> 2.14.0
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.