[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] teach blkfront driver handle discard request



The blkfront driver now will read feature-discard from xenstore,
and set up the request queue, also forward the discard request to
backend driver.

Signed-off-by: Li Dongyang <lidongyang@xxxxxxxxxx>
---
 drivers/xen/blkfront/blkfront.c |   48 +++++++++++++++++++++++++++++++++++---
 drivers/xen/blkfront/block.h    |    3 ++
 drivers/xen/blkfront/vbd.c      |    8 ++++++
 3 files changed, 55 insertions(+), 4 deletions(-)

diff --git a/drivers/xen/blkfront/blkfront.c b/drivers/xen/blkfront/blkfront.c
index 9410fae..8a32e27 100644
--- a/drivers/xen/blkfront/blkfront.c
+++ b/drivers/xen/blkfront/blkfront.c
@@ -328,7 +328,9 @@ static void connect(struct blkfront_info *info)
        unsigned long long sectors;
        unsigned long sector_size;
        unsigned int binfo;
-       int err, barrier;
+       unsigned int discard_granularity;
+       unsigned int discard_alignment;
+       int err, barrier, discard;
 
        switch (info->connected) {
        case BLKIF_STATE_CONNECTED:
@@ -377,6 +379,21 @@ static void connect(struct blkfront_info *info)
        else
                info->feature_flush = 0;
 
+       err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+                          "feature-discard", "%d", &discard);
+       if (err > 0 && discard) {
+               info->feature_discard = 1;
+               err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                           "discard_granularity", "%u", &discard_granularity,
+                           "discard_alignment", "%u", &discard_alignment,
+                           NULL);
+               if (!err) {
+                       info->discard_granularity = discard_granularity;
+                       info->discard_alignment = discard_alignment;
+               }
+       } else
+               info->feature_discard = 0;
+
        err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
        if (err) {
                xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
@@ -704,9 +721,18 @@ static int blkif_queue_request(struct request *req)
        if (req->cmd_type == REQ_TYPE_BLOCK_PC)
                ring_req->operation = BLKIF_OP_PACKET;
 
-       ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
-       BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
-       for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
+       if (req->cmd_flags & REQ_DISCARD) {
+               /* id sector_number and handle are initialized above. */
+               blkif_request_trim_t *trim_req =
+                       (blkif_request_trim_t *)ring_req;
+               trim_req->operation = BLKIF_OP_TRIM;
+               trim_req->reserved = 0;
+               trim_req->nr_sectors = blk_rq_sectors(req);
+       } else {
+               ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
+               BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+               for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
                        buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT;
                        fsect = sg->offset >> 9;
                        lsect = fsect + (sg->length >> 9) - 1;
@@ -726,6 +752,7 @@ static int blkif_queue_request(struct request *req)
                                        .gref       = ref,
                                        .first_sect = fsect,
                                        .last_sect  = lsect };
+               }
        }
 
        info->ring.req_prod_pvt++;
@@ -821,6 +848,19 @@ static irqreturn_t blkif_int(int irq, void *dev_id)
 
                ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO;
                switch (bret->operation) {
+               case BLKIF_OP_TRIM:
+                       if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+                               struct request_queue *rq = info->rq;
+                               pr_warning("blkfront: %s: trim op failed\n",
+                                          info->gd->disk_name);
+                               ret = -EOPNOTSUPP;
+                               info->feature_discard = 0;
+                               spin_lock_irq(rq->queue_lock);
+                               queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
+                               spin_unlock_irq(rq->queue_lock);
+                       }
+                       __blk_end_request_all(req, ret);
+                       break;
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                pr_warning("blkfront: %s:"
diff --git a/drivers/xen/blkfront/block.h b/drivers/xen/blkfront/block.h
index 8db27a3..25926f3 100644
--- a/drivers/xen/blkfront/block.h
+++ b/drivers/xen/blkfront/block.h
@@ -112,6 +112,9 @@ struct blkfront_info
        struct blk_shadow shadow[BLK_RING_SIZE];
        unsigned long shadow_free;
        int feature_flush;
+       int feature_discard;
+       unsigned int discard_granularity;
+       unsigned int discard_alignment;
        int is_ready;
 
        /**
diff --git a/drivers/xen/blkfront/vbd.c b/drivers/xen/blkfront/vbd.c
index 2a98439..c0170cf 100644
--- a/drivers/xen/blkfront/vbd.c
+++ b/drivers/xen/blkfront/vbd.c
@@ -300,6 +300,7 @@ static int
 xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
 {
        struct request_queue *rq;
+       struct blkfront_info *info = gd->private_data;
 
        rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
        if (rq == NULL)
@@ -307,6 +308,13 @@ xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
        queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+
+       if (info->feature_discard) {
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+               blk_queue_max_discard_sectors(rq, get_capacity(gd));
+               rq->limits.discard_granularity = info->discard_granularity;
+               rq->limits.discard_alignment = info->discard_alignment;
+       }
 #endif
 
        /* Hard sector size and max sectors impersonate the equiv. hardware. */
-- 
1.7.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.