[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] drivers: xen-blkback: delay pending_req allocation to connect_ring



El 26/05/15 a les 2.06, Bob Liu ha escrit:
> In connect_ring, we can know exactly how many pages are used for the shared
> ring and also whether feature-persistent is enabled, delay pending_req
> allocation here so that we won't waste too much memory.

I would very much prefer for this to be a pre-patch for your multipage
ring series. Do you think you can include it in the next iteration?

> Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
> ---
>  drivers/block/xen-blkback/common.h |  3 +-
>  drivers/block/xen-blkback/xenbus.c | 95 
> ++++++++++++++++++++------------------
>  2 files changed, 51 insertions(+), 47 deletions(-)
> 
> diff --git a/drivers/block/xen-blkback/common.h 
> b/drivers/block/xen-blkback/common.h
> index 919a1ab..e1d605d 100644
> --- a/drivers/block/xen-blkback/common.h
> +++ b/drivers/block/xen-blkback/common.h
> @@ -249,7 +249,7 @@ struct backend_info;
>  #define PERSISTENT_GNT_WAS_ACTIVE    1
>  
>  /* Number of requests that we can fit in a ring */
> -#define XEN_MAX_BLKIF_REQS           (32 * XENBUS_MAX_RING_PAGES)
> +#define XEN_BLKIF_REQS                       32

This should be XEN_BLKIF_REQS_PER_PAGE (or a similar name of your choice
that reflects that those are the number of requests per ring page).

>  
>  struct persistent_gnt {
>       struct page *page;
> @@ -321,6 +321,7 @@ struct xen_blkif {
>       struct work_struct      free_work;
>       /* Thread shutdown wait queue. */
>       wait_queue_head_t       shutdown_wq;
> +     unsigned int nr_ring_pages;
>  };
>  
>  struct seg_buf {
> diff --git a/drivers/block/xen-blkback/xenbus.c 
> b/drivers/block/xen-blkback/xenbus.c
> index bc33888..48336a3 100644
> --- a/drivers/block/xen-blkback/xenbus.c
> +++ b/drivers/block/xen-blkback/xenbus.c
> @@ -125,8 +125,6 @@ static void xen_update_blkif_status(struct xen_blkif 
> *blkif)
>  static struct xen_blkif *xen_blkif_alloc(domid_t domid)
>  {
>       struct xen_blkif *blkif;
> -     struct pending_req *req, *n;
> -     int i, j;
>  
>       BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
>  
> @@ -153,50 +151,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
>       INIT_LIST_HEAD(&blkif->pending_free);
>       INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
>  
> -     for (i = 0; i < XEN_MAX_BLKIF_REQS; i++) {
> -             req = kzalloc(sizeof(*req), GFP_KERNEL);
> -             if (!req)
> -                     goto fail;
> -             list_add_tail(&req->free_list,
> -                           &blkif->pending_free);
> -             for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
> -                     req->segments[j] = kzalloc(sizeof(*req->segments[0]),
> -                                                GFP_KERNEL);
> -                     if (!req->segments[j])
> -                             goto fail;
> -             }
> -             for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
> -                     req->indirect_pages[j] = 
> kzalloc(sizeof(*req->indirect_pages[0]),
> -                                                      GFP_KERNEL);
> -                     if (!req->indirect_pages[j])
> -                             goto fail;
> -             }
> -     }
>       spin_lock_init(&blkif->pending_free_lock);
>       init_waitqueue_head(&blkif->pending_free_wq);
>       init_waitqueue_head(&blkif->shutdown_wq);
>  
>       return blkif;
> -
> -fail:
> -     list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
> -             list_del(&req->free_list);
> -             for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
> -                     if (!req->segments[j])
> -                             break;
> -                     kfree(req->segments[j]);
> -             }
> -             for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
> -                     if (!req->indirect_pages[j])
> -                             break;
> -                     kfree(req->indirect_pages[j]);
> -             }
> -             kfree(req);
> -     }
> -
> -     kmem_cache_free(xen_blkif_cachep, blkif);
> -
> -     return ERR_PTR(-ENOMEM);
>  }
>  
>  static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
> @@ -313,7 +272,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
>               i++;
>       }
>  
> -     WARN_ON(i != XEN_MAX_BLKIF_REQS);
> +     WARN_ON(i != XEN_BLKIF_REQS * blkif->nr_ring_pages);
>  
>       kmem_cache_free(xen_blkif_cachep, blkif);
>  }
> @@ -868,9 +827,10 @@ static int connect_ring(struct backend_info *be)
>       struct xenbus_device *dev = be->dev;
>       unsigned int ring_ref[XENBUS_MAX_RING_PAGES];
>       unsigned int evtchn, nr_grefs, ring_page_order;
> -     unsigned int pers_grants;
> +     unsigned int pers_grants, i, j;
> +     struct pending_req *req, *n;
>       char protocol[64] = "";
> -     int err;
> +     int err, nr_indiret_pages, nr_segs;
                 ^ nr_indirect_pages (notice the missing 'c').
>  
>       pr_debug("%s %s\n", __func__, dev->otherend);
>  
> @@ -899,8 +859,6 @@ static int connect_ring(struct backend_info *be)
>               pr_info("%s:using single page: ring-ref %d\n", dev->otherend,
>                       ring_ref[0]);
>       } else {
> -             unsigned int i;
> -
>               if (ring_page_order > xen_blkif_max_ring_order) {
>                       err = -EINVAL;
>                       xenbus_dev_fatal(dev, err, "%s/request %d ring page 
> order exceed max:%d",
> @@ -949,11 +907,39 @@ static int connect_ring(struct backend_info *be)
>  
>       be->blkif->vbd.feature_gnt_persistent = pers_grants;
>       be->blkif->vbd.overflow_max_grants = 0;
> +     be->blkif->nr_ring_pages = nr_grefs;
>  
>       pr_info("ring-pages:%d, event-channel %d, protocol %d (%s) %s\n",
>               nr_grefs, evtchn, be->blkif->blk_protocol, protocol,
>               pers_grants ? "persistent grants" : "");
>  
> +     if (!pers_grants) {
> +             nr_indiret_pages = 0;
> +             nr_segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
> +     } else {
> +             nr_indiret_pages = MAX_INDIRECT_PAGES;
> +             nr_segs = MAX_INDIRECT_SEGMENTS;
> +     }

This is not right, persistent grants and indirect descriptors are
features that can be used independently, so AFAICT this has to be:

        nr_indiret_pages = MAX_INDIRECT_PAGES;
        nr_segs = MAX_INDIRECT_SEGMENTS;

Roger.


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.