[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] linux-2.6.18/blktap: don't let in-flight requests defer pending ones


  • To: "xen-devel" <xen-devel@xxxxxxxxxxxxx>
  • From: "Jan Beulich" <JBeulich@xxxxxxxx>
  • Date: Thu, 22 Nov 2012 12:19:33 +0000
  • Delivery-date: Thu, 22 Nov 2012 12:20:23 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

Running RING_FINAL_CHECK_FOR_REQUESTS from make_response is a bad
idea. It means that in-flight I/O is essentially blocking continued
batches. This essentially kills throughput on frontends which unplug
(or even just notify) early and rightfully assume additional requests
will be picked up on time, not synchronously.

Derived from a similar blkback patch by Daniel Stodden (see c/s
1118:c7c14595c18b).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/drivers/xen/blktap/blktap.c
+++ b/drivers/xen/blktap/blktap.c
@@ -1285,7 +1285,7 @@ irqreturn_t tap_blkif_be_int(int irq, vo
  * DOWNWARD CALLS -- These interface with the block-device layer proper.
  */
 static int print_dbug = 1;
-static int do_block_io_op(blkif_t *blkif)
+static int _do_block_io_op(blkif_t *blkif)
 {
        blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        blkif_request_t req;
@@ -1397,6 +1397,22 @@ static int do_block_io_op(blkif_t *blkif
        return more_to_do;
 }
 
+static int do_block_io_op(blkif_t *blkif)
+{
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
+       int more_to_do;
+
+       do {
+               more_to_do = _do_block_io_op(blkif);
+               if (more_to_do)
+                       break;
+
+               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
+       } while (more_to_do);
+
+       return more_to_do;
+}
+
 static void dispatch_rw_block_io(blkif_t *blkif,
                                 blkif_request_t *req,
                                 pending_req_t *pending_req)
@@ -1647,7 +1663,6 @@ static void make_response(blkif_t *blkif
        blkif_response_t  resp;
        unsigned long     flags;
        blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-       int more_to_do = 0;
        int notify;
 
        resp.id        = id;
@@ -1678,20 +1693,7 @@ static void make_response(blkif_t *blkif
        blk_rings->common.rsp_prod_pvt++;
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
 
-       if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
-               /*
-                * Tail check for pending requests. Allows frontend to avoid
-                * notifications if requests are already in flight (lower
-                * overheads and promotes batching).
-                */
-               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
-       } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
-               more_to_do = 1;
-       }
-
        spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-       if (more_to_do)
-               blkif_notify_work(blkif);
        if (notify)
                notify_remote_via_irq(blkif->irq);
 }



Attachment: xen-blktap-streamline.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.