[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v8 4/8] libxl: support unmapping static shared memory areas during domain destruction



On Wed, Oct 24, 2018 at 2:49 PM Oleksandr Andrushchenko
<andr2000@xxxxxxxxx> wrote:
>
> On 10/09/2018 02:37 AM, Stefano Stabellini wrote:
> > From: Zhongze Liu <blackskygg@xxxxxxxxx>
> >
> > Author: Zhongze Liu <blackskygg@xxxxxxxxx>
> >
> > Add libxl__sshm_del to unmap static shared memory areas mapped by
> > libxl__sshm_add during domain creation. The unmapping process is:
> >
> > * For a master: decrease the refcount of the sshm region, if the refcount
> >    reaches 0, cleanup the whole sshm path.
> >
> > * For a slave:
> >    1) unmap the shared pages, and cleanup related xs entries. If the
> >       system works normally, all the shared pages will be unmapped, so there
> >       won't be page leaks. In case of errors, the unmapping process will go
> >       on and unmap all the other pages that can be unmapped, so the other
> >       pages won't be leaked, either.
> >    2) Decrease the refcount of the sshm region, if the refcount reaches
> >       0, cleanup the whole sshm path.
> >
> > This is for the proposal "Allow setting up shared memory areas between VMs
> > from xl config file" (see [1]).
> >
> > [1] https://lists.xen.org/archives/html/xen-devel/2017-08/msg03242.html
> >
> > Signed-off-by: Zhongze Liu <blackskygg@xxxxxxxxx>
> > Signed-off-by: Stefano Stabellini <stefanos@xxxxxxxxxx>
> >
> > Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
> > Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
> > Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> > Cc: Julien Grall <julien.grall@xxxxxxx>
> > Cc: xen-devel@xxxxxxxxxxxxx
> >
> > ---
> > Changes in v5:
> > - fix typos
> > - add comments
> > - cannot move unmap before xenstore transaction because it needs to
> >    retrieve begin/size values from xenstore
> > ---
> >   tools/libxl/libxl_domain.c   |   5 ++
> >   tools/libxl/libxl_internal.h |   2 +
> >   tools/libxl/libxl_sshm.c     | 107 
> > +++++++++++++++++++++++++++++++++++++++++++
> >   3 files changed, 114 insertions(+)
> >
> > diff --git a/tools/libxl/libxl_domain.c b/tools/libxl/libxl_domain.c
> > index 3377bba..3f7ffa6 100644
> > --- a/tools/libxl/libxl_domain.c
> > +++ b/tools/libxl/libxl_domain.c
> > @@ -1060,6 +1060,11 @@ void libxl__destroy_domid(libxl__egc *egc, 
> > libxl__destroy_domid_state *dis)
> >           goto out;
> >       }
> >
> > +    rc = libxl__sshm_del(gc, domid);
> > +    if (rc) {
> > +        LOGD(ERROR, domid, "Deleting static shm failed.");
> > +    }
> Do you need brackets here?
> > +
> >       if (libxl__device_pci_destroy_all(gc, domid) < 0)
> >           LOGD(ERROR, domid, "Pci shutdown failed");
> >       rc = xc_domain_pause(ctx->xch, domid);
> > diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
> > index 6f31a3d..e86d356 100644
> > --- a/tools/libxl/libxl_internal.h
> > +++ b/tools/libxl/libxl_internal.h
> > @@ -4442,6 +4442,8 @@ static inline const char 
> > *libxl__qemu_qmp_path(libxl__gc *gc, int domid)
> >   _hidden int libxl__sshm_add(libxl__gc *gc, uint32_t domid,
> >                               libxl_static_shm *sshm, int len);
> >
> > +_hidden int libxl__sshm_del(libxl__gc *gc, uint32_t domid);
> > +
> >   _hidden int libxl__sshm_check_overlap(libxl__gc *gc, uint32_t domid,
> >                                         libxl_static_shm *sshms, int len);
> >   _hidden int libxl__sshm_setdefault(libxl__gc *gc, uint32_t domid,
> > diff --git a/tools/libxl/libxl_sshm.c b/tools/libxl/libxl_sshm.c
> > index f61b80c..9672056 100644
> > --- a/tools/libxl/libxl_sshm.c
> > +++ b/tools/libxl/libxl_sshm.c
> > @@ -94,6 +94,113 @@ int libxl__sshm_check_overlap(libxl__gc *gc, uint32_t 
> > domid,
> >       return 0;
> >   }
> >
> > +/*
> > + * Decrease the refcount of an sshm. When refcount reaches 0,
> > + * clean up the whole sshm path.
> > + * Xenstore operations are done within the same transaction.
> > + */
> > +static void libxl__sshm_decref(libxl__gc *gc, xs_transaction_t xt,
> > +                               const char *sshm_path)
> > +{
> > +    int count;
> > +    const char *count_path, *count_string;
> > +
> > +    count_path = GCSPRINTF("%s/usercnt", sshm_path);
> > +    if (libxl__xs_read_checked(gc, xt, count_path, &count_string))
> > +        return;
> > +    count = atoi(count_string);
> > +
> > +    if (--count == 0) {
> > +        libxl__xs_path_cleanup(gc, xt, sshm_path);
> > +        return;
> > +    }
> > +
> > +    count_string = GCSPRINTF("%d", count);
> > +    libxl__xs_write_checked(gc, xt, count_path, count_string);
> > +
> > +    return;
> > +}
> > +
> > +static void libxl__sshm_do_unmap(libxl__gc *gc, uint32_t domid, const char 
> > *id,
> > +                                 uint64_t begin, uint64_t size)
> > +{
> > +    uint64_t end;
> > +    begin >>= XC_PAGE_SHIFT;
> > +    size >>= XC_PAGE_SHIFT;
> > +    end = begin + size;
> > +    for (; begin < end; ++begin) {
> > +        if (xc_domain_remove_from_physmap(CTX->xch, domid, begin)) {
What I was thinking is that xc_domain_remove_from_physmap_batch()
could speed up unmapping if was present)

> > +            SSHM_ERROR(domid, id,
> > +                       "unable to unmap shared page at 0x%"PRIx64".",
> > +                       begin);
> > +        }
> > +    }
> > +}
> > +
> > +static void libxl__sshm_del_slave(libxl__gc *gc, xs_transaction_t xt,
> > +                                  uint32_t domid, const char *id, bool 
> > isretry)
> isretry is not used, please remove
> > +{
> > +    const char *slave_path, *begin_str, *size_str;
> > +    uint64_t begin, size;
> > +
> > +    slave_path = GCSPRINTF("%s/slaves/%"PRIu32, SSHM_PATH(id), domid);
> > +
> > +    begin_str = libxl__xs_read(gc, xt, GCSPRINTF("%s/begin", slave_path));
> > +    size_str = libxl__xs_read(gc, xt, GCSPRINTF("%s/size", slave_path));
> > +    begin = strtoull(begin_str, NULL, 16);
> > +    size = strtoull(size_str, NULL, 16);
> > +
> > +    libxl__sshm_do_unmap(gc, domid, id, begin, size);
> > +    libxl__xs_path_cleanup(gc, xt, slave_path);
> > +}
> > +
> > +/* Delete static_shm entries in the xensotre. */
> > +int libxl__sshm_del(libxl__gc *gc,  uint32_t domid)
> > +{
> > +    int rc, i;
> > +    bool isretry;
> > +    xs_transaction_t xt = XBT_NULL;
> > +    const char *dom_path, *dom_sshm_path, *role;
> > +    char **sshm_ents;
> > +    unsigned int sshm_num;
> > +
> > +    dom_path = libxl__xs_get_dompath(gc, domid);
> > +    dom_sshm_path = GCSPRINTF("%s/static_shm", dom_path);
> > +
> > +    isretry = false;
> see above, isretry is not used
> > +    for (;;) {
> > +        rc = libxl__xs_transaction_start(gc, &xt);
> > +        if (rc) goto out;
> > +
> > +        if (libxl__xs_read(gc, xt, dom_sshm_path)) {
> > +            sshm_ents = libxl__xs_directory(gc, xt, dom_sshm_path, 
> > &sshm_num);
> > +            if (!sshm_ents) continue;
> > +
> > +            for (i = 0; i < sshm_num; ++i) {
> > +                role = libxl__xs_read(gc, xt,
> > +                                      GCSPRINTF("%s/%s/role",
> > +                                                dom_sshm_path,
> > +                                                sshm_ents[i]));
> > +                assert(role);
> > +                if (!strncmp(role, "slave", 5))
> > +                    libxl__sshm_del_slave(gc, xt, domid, sshm_ents[i], 
> > isretry);
> > +
> > +                libxl__sshm_decref(gc, xt, SSHM_PATH(sshm_ents[i]));
> > +            }
> > +        }
> > +
> > +        rc = libxl__xs_transaction_commit(gc, &xt);
> > +        if (!rc) break;
> > +        if (rc < 0) goto out;
> > +        isretry = true;
> > +    }
> > +
> > +    rc = 0;
> > +out:
> > +    libxl__xs_transaction_abort(gc, &xt);
> > +    return rc;
> > +}
> > +
> >   /*   libxl__sshm_do_map -- map pages into slave's physmap
> >    *
> >    *   This functions maps
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxxx
> https://lists.xenproject.org/mailman/listinfo/xen-devel



-- 
Regards,

Oleksandr Tyshchenko

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.