[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 00/10] xen-block: multi hardware-queues/rings support



On Thu, Nov 26, 2015 at 03:09:02PM +0800, Bob Liu wrote:
> 
> On 11/26/2015 10:57 AM, Konrad Rzeszutek Wilk wrote:
> > On Thu, Nov 26, 2015 at 10:28:10AM +0800, Bob Liu wrote:
> >>
> >> On 11/26/2015 06:12 AM, Konrad Rzeszutek Wilk wrote:
> >>> On Wed, Nov 25, 2015 at 03:56:03PM -0500, Konrad Rzeszutek Wilk wrote:
> >>>> On Wed, Nov 25, 2015 at 02:25:07PM -0500, Konrad Rzeszutek Wilk wrote:
> >>>>>>   xen/blkback: separate ring information out of struct xen_blkif
> >>>>>>   xen/blkback: pseudo support for multi hardware queues/rings
> >>>>>>   xen/blkback: get the number of hardware queues/rings from blkfront
> >>>>>>   xen/blkback: make pool of persistent grants and free pages per-queue
> >>>>>
> >>>>> OK, got to those as well. I have put them in 'devel/for-jens-4.5' and
> >>>>> are going to test them overnight before pushing them out.
> >>>>>
> >>>>> I see two bugs in the code that we MUST deal with:
> >>>>>
> >>>>>  - print_stats () is going to show zero values.
> >>>>>  - the sysfs code (VBD_SHOW) aren't converted over to fetch data
> >>>>>    from all the rings.
> >>>>
> >>>> - kthread_run can't handle the two "name, i" arguments. I see:
> >>>>
> >>>> root      5101     2  0 20:47 ?        00:00:00 [blkback.3.xvda-]
> >>>> root      5102     2  0 20:47 ?        00:00:00 [blkback.3.xvda-]
> >>>
> >>> And doing save/restore:
> >>>
> >>> xl save <id> /tmp/A;
> >>> xl restore /tmp/A;
> >>>
> >>> ends up us loosing the proper state and not getting the ring setup back.
> >>> I see this is backend:
> >>>
> >>> [ 2719.448600] vbd vbd-22-51712: -1 guest requested 0 queues, exceeding 
> >>> the maximum of 3.
> >>>
> >>> And XenStore agrees:
> >>> tool = ""
> >>>  xenstored = ""
> >>> local = ""
> >>>  domain = ""
> >>>   0 = ""
> >>>    domid = "0"
> >>>    name = "Domain-0"
> >>>    device-model = ""
> >>>     0 = ""
> >>>      state = "running"
> >>>    error = ""
> >>>     backend = ""
> >>>      vbd = ""
> >>>       2 = ""
> >>>        51712 = ""
> >>>         error = "-1 guest requested 0 queues, exceeding the maximum of 3."
> >>>
> >>> .. which also leads to a memory leak as xen_blkbk_remove never gets
> >>> called.
> >>
> >> I think which was already fix by your patch:
> >> [PATCH RFC 2/2] xen/blkback: Free resources if connect_ring failed.
> > 
> > Nope. I get that with or without the patch.
> > 
> 
> Attached patch should fix this issue. 

I reworked it a bit.

From 214635bd2d1c331d984a8170be30c7ba82a11fb2 Mon Sep 17 00:00:00 2001
From: Bob Liu <bob.liu@xxxxxxxxxx>
Date: Wed, 25 Nov 2015 17:52:55 -0500
Subject: [PATCH] xen/blkfront: realloc ring info in blkif_resume

Need to reallocate ring info in the resume path, because info->rinfo was freed
in blkif_free(). And 'multi-queue-max-queues' backend reports may have been
changed.

Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
Reported-and-Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 drivers/block/xen-blkfront.c | 74 +++++++++++++++++++++++++++-----------------
 1 file changed, 45 insertions(+), 29 deletions(-)

diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ef5ce43..4f77d36 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1676,6 +1676,43 @@ again:
        return err;
 }
 
+static int negotiate_mq(struct blkfront_info *info)
+{
+       unsigned int backend_max_queues = 0;
+       int err;
+       unsigned int i;
+
+       BUG_ON(info->nr_rings);
+
+       /* Check if backend supports multiple queues. */
+       err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+                          "multi-queue-max-queues", "%u", &backend_max_queues);
+       if (err < 0)
+               backend_max_queues = 1;
+
+       info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
+       /* We need at least one ring. */
+       if (!info->nr_rings)
+               info->nr_rings = 1;
+
+       info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * 
info->nr_rings, GFP_KERNEL);
+       if (!info->rinfo) {
+               xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info 
structure");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < info->nr_rings; i++) {
+               struct blkfront_ring_info *rinfo;
+
+               rinfo = &info->rinfo[i];
+               INIT_LIST_HEAD(&rinfo->indirect_pages);
+               INIT_LIST_HEAD(&rinfo->grants);
+               rinfo->dev_info = info;
+               INIT_WORK(&rinfo->work, blkif_restart_queue);
+               spin_lock_init(&rinfo->ring_lock);
+       }
+       return 0;
+}
 /**
  * Entry point to this code when a new device is created.  Allocate the basic
  * structures and the ring buffer for communication with the backend, and
@@ -1686,9 +1723,7 @@ static int blkfront_probe(struct xenbus_device *dev,
                          const struct xenbus_device_id *id)
 {
        int err, vdevice;
-       unsigned int r_index;
        struct blkfront_info *info;
-       unsigned int backend_max_queues = 0;
 
        /* FIXME: Use dynamic device id if this is not set. */
        err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1739,33 +1774,10 @@ static int blkfront_probe(struct xenbus_device *dev,
        }
 
        info->xbdev = dev;
-       /* Check if backend supports multiple queues. */
-       err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-                          "multi-queue-max-queues", "%u", &backend_max_queues);
-       if (err < 0)
-               backend_max_queues = 1;
-
-       info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
-       /* We need at least one ring. */
-       if (!info->nr_rings)
-               info->nr_rings = 1;
-
-       info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * 
info->nr_rings, GFP_KERNEL);
-       if (!info->rinfo) {
-               xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info 
structure");
+       err = negotiate_mq(info);
+       if (err) {
                kfree(info);
-               return -ENOMEM;
-       }
-
-       for (r_index = 0; r_index < info->nr_rings; r_index++) {
-               struct blkfront_ring_info *rinfo;
-
-               rinfo = &info->rinfo[r_index];
-               INIT_LIST_HEAD(&rinfo->indirect_pages);
-               INIT_LIST_HEAD(&rinfo->grants);
-               rinfo->dev_info = info;
-               INIT_WORK(&rinfo->work, blkif_restart_queue);
-               spin_lock_init(&rinfo->ring_lock);
+               return err;
        }
 
        mutex_init(&info->mutex);
@@ -1926,12 +1938,16 @@ static int blkif_recover(struct blkfront_info *info)
 static int blkfront_resume(struct xenbus_device *dev)
 {
        struct blkfront_info *info = dev_get_drvdata(&dev->dev);
-       int err;
+       int err = 0;
 
        dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
 
        blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 
+       err = negotiate_mq(info);
+       if (err)
+               return err;
+
        err = talk_to_blkback(dev, info);
 
        /*
-- 
2.5.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.