[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH V2 2/3] xen-blkfront: teach blkfront driver handle trim request



On Thu, Aug 18, 2011 at 05:34:30PM +0800, Li Dongyang wrote:
> The blkfront driver now will read feature-trim from xenstore,
> and set up the request queue with trim params, then we can forward the
> discard requests to backend driver.
> 
> Signed-off-by: Li Dongyang <lidongyang@xxxxxxxxxx>
> ---
>  drivers/block/xen-blkfront.c |   96 
> ++++++++++++++++++++++++++++++++----------
>  1 files changed, 73 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
> index b536a9c..c22fb07 100644
> --- a/drivers/block/xen-blkfront.c
> +++ b/drivers/block/xen-blkfront.c
> @@ -98,6 +98,9 @@ struct blkfront_info
>       unsigned long shadow_free;
>       unsigned int feature_flush;
>       unsigned int flush_op;
> +     unsigned int feature_trim;
> +     unsigned int discard_granularity;
> +     unsigned int discard_alignment;
>       int is_ready;
>  };
>  
> @@ -302,29 +305,36 @@ static int blkif_queue_request(struct request *req)
>               ring_req->operation = info->flush_op;
>       }
>  
> -     ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
> -     BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
> +     if (unlikely(req->cmd_flags & REQ_DISCARD)) {
> +             /* id, sector_number and handle are set above. */
> +             ring_req->operation = BLKIF_OP_TRIM;
> +             ring_req->nr_segments = 0;
> +             ring_req->u.trim.nr_sectors = blk_rq_sectors(req);
> +     } else {
> +             ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
> +             BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
>  
> -     for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
> -             buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
> -             fsect = sg->offset >> 9;
> -             lsect = fsect + (sg->length >> 9) - 1;
> -             /* install a grant reference. */
> -             ref = gnttab_claim_grant_reference(&gref_head);
> -             BUG_ON(ref == -ENOSPC);
> +             for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
> +                     buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
> +                     fsect = sg->offset >> 9;
> +                     lsect = fsect + (sg->length >> 9) - 1;
> +                     /* install a grant reference. */
> +                     ref = gnttab_claim_grant_reference(&gref_head);
> +                     BUG_ON(ref == -ENOSPC);
>  
> -             gnttab_grant_foreign_access_ref(
> -                             ref,
> -                             info->xbdev->otherend_id,
> -                             buffer_mfn,
> -                             rq_data_dir(req) );
> -
> -             info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
> -             ring_req->u.rw.seg[i] =
> -                             (struct blkif_request_segment) {
> -                                     .gref       = ref,
> -                                     .first_sect = fsect,
> -                                     .last_sect  = lsect };
> +                     gnttab_grant_foreign_access_ref(
> +                                     ref,
> +                                     info->xbdev->otherend_id,
> +                                     buffer_mfn,
> +                                     rq_data_dir(req));
> +
> +                     info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
> +                     ring_req->u.rw.seg[i] =
> +                                     (struct blkif_request_segment) {
> +                                             .gref       = ref,
> +                                             .first_sect = fsect,
> +                                             .last_sect  = lsect };
> +             }
>       }
>  
>       info->ring.req_prod_pvt++;
> @@ -399,6 +409,7 @@ wait:
>  static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
>  {
>       struct request_queue *rq;
> +     struct blkfront_info *info = gd->private_data;
>  
>       rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
>       if (rq == NULL)
> @@ -406,6 +417,13 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 
> sector_size)
>  
>       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
>  
> +     if (info->feature_trim) {
> +             queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
> +             blk_queue_max_discard_sectors(rq, get_capacity(gd));
> +             rq->limits.discard_granularity = info->discard_granularity;
> +             rq->limits.discard_alignment = info->discard_alignment;
> +     }
> +
>       /* Hard sector size and max sectors impersonate the equiv. hardware. */
>       blk_queue_logical_block_size(rq, sector_size);
>       blk_queue_max_hw_sectors(rq, 512);
> @@ -722,6 +740,19 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
>  
>               error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
>               switch (bret->operation) {
> +             case BLKIF_OP_TRIM:
> +                     if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
> +                             struct request_queue *rq = info->rq;
> +                             printk(KERN_WARNING "blkfront: %s: trim op 
> failed\n",
> +                                        info->gd->disk_name);
> +                             error = -EOPNOTSUPP;
> +                             info->feature_trim = 0;
> +                             spin_lock_irq(rq->queue_lock);
> +                             queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
> +                             spin_unlock_irq(rq->queue_lock);
> +                     }
> +                     __blk_end_request_all(req, error);
> +                     break;
>               case BLKIF_OP_FLUSH_DISKCACHE:
>               case BLKIF_OP_WRITE_BARRIER:
>                       if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
> @@ -1108,7 +1139,9 @@ static void blkfront_connect(struct blkfront_info *info)
>       unsigned long sector_size;
>       unsigned int binfo;
>       int err;
> -     int barrier, flush;
> +     unsigned int discard_granularity;
> +     unsigned int discard_alignment;
> +     int barrier, flush, trim;
>  
>       switch (info->connected) {
>       case BLKIF_STATE_CONNECTED:
> @@ -1178,7 +1211,24 @@ static void blkfront_connect(struct blkfront_info 
> *info)
>               info->feature_flush = REQ_FLUSH;
>               info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
>       }
> -             
> +
> +     err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
> +                         "feature-trim", "%d", &trim,
> +                         NULL);
> +
> +     if (!err && trim) {
> +             info->feature_trim = 1;
> +             err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
> +                     "discard_granularity", "%u", &discard_granularity,
> +                     "discard_alignment", "%u", &discard_alignment,

I am just not sure why the '_' was choosen. It seems so odds
with the rest of what is put in XenBus.

> +                     NULL);
> +             if (!err) {
> +                     info->discard_granularity = discard_granularity;
> +                     info->discard_alignment = discard_alignment;
> +             }
> +     } else
> +             info->feature_trim = 0;
> +
>       err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
>       if (err) {
>               xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
> -- 
> 1.7.6
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.