[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 05/10] xen/blkfront: negotiate number of queues/rings to be used with backend



On Mon, Nov 02, 2015 at 12:21:41PM +0800, Bob Liu wrote:
> The number of hardware queues for xen/blkfront is set by parameter
> 'max_queues'(default 4), while the max value xen/blkback supported is notified
> through xenstore("multi-queue-max-queues").

That is not right.

s/The number/The max number/

The second part: ",while the max value xen/blkback supported is..". I think
you are trying to say: "it is also capped by the max value that
the xen/blkback exposes through XenStore key 'multi-queue-max-queues'.

> 
> The negotiated number is the smaller one and would be written back to xenstore
> as "multi-queue-num-queues", blkback need to read this negotiated number.

s/blkback need to read/blkback needs to read this/

> 
> Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
> ---
>  drivers/block/xen-blkfront.c | 166 
> +++++++++++++++++++++++++++++++------------
>  1 file changed, 120 insertions(+), 46 deletions(-)
> 
> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
> index 8cc5995..23096d7 100644
> --- a/drivers/block/xen-blkfront.c
> +++ b/drivers/block/xen-blkfront.c
> @@ -98,6 +98,10 @@ static unsigned int xen_blkif_max_segments = 32;
>  module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
>  MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests 
> (default is 32)");
>  
> +static unsigned int xen_blkif_max_queues = 4;
> +module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
> +MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used 
> per virtual disk");
> +
>  /*
>   * Maximum order of pages to be used for the shared ring between front and
>   * backend, 4KB page granularity is used.
> @@ -113,6 +117,7 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of 
> pages to be used for the
>   * characters are enough. Define to 20 to keep consist with backend.
>   */
>  #define RINGREF_NAME_LEN (20)
> +#define QUEUE_NAME_LEN (12)

Little bit of documentation please. Why 12? Why not 31415 for example?
I presume it is 'queue-%u' and since so that is 7 + 10 (UINT_MAX is
4294967295) = 17!


>  
>  /*
>   *  Per-ring info.
> @@ -695,7 +700,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 
> sector_size,
>  
>       memset(&info->tag_set, 0, sizeof(info->tag_set));
>       info->tag_set.ops = &blkfront_mq_ops;
> -     info->tag_set.nr_hw_queues = 1;
> +     info->tag_set.nr_hw_queues = info->nr_rings;
>       info->tag_set.queue_depth =  BLK_RING_SIZE(info);
>       info->tag_set.numa_node = NUMA_NO_NODE;
>       info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
> @@ -1352,6 +1357,51 @@ fail:
>       return err;
>  }
>  
> +static int write_per_ring_nodes(struct xenbus_transaction xbt,
> +                             struct blkfront_ring_info *rinfo, const char 
> *dir)
> +{
> +     int err, i;

Please make 'i' be an unsigned int. Especially as you are using '%u' in the 
snprintf.


> +     const char *message = NULL;
> +     struct blkfront_info *info = rinfo->dev_info;
> +
> +     if (info->nr_ring_pages == 1) {
> +             err = xenbus_printf(xbt, dir, "ring-ref", "%u", 
> rinfo->ring_ref[0]);
> +             if (err) {
> +                     message = "writing ring-ref";
> +                     goto abort_transaction;
> +             }
> +             pr_info("%s: write ring-ref:%d\n", dir, rinfo->ring_ref[0]);

Ewww. No.

> +     } else {
> +             for (i = 0; i < info->nr_ring_pages; i++) {
> +                     char ring_ref_name[RINGREF_NAME_LEN];
> +
> +                     snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", 
> i);
> +                     err = xenbus_printf(xbt, dir, ring_ref_name,
> +                                         "%u", rinfo->ring_ref[i]);
> +                     if (err) {
> +                             message = "writing ring-ref";
> +                             goto abort_transaction;
> +                     }
> +                     pr_info("%s: write ring-ref:%d\n", dir, 
> rinfo->ring_ref[i]);

No no please.

> +             }
> +     }
> +
> +     err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);

That is not right.

That only creates one. But the blkif.h says (And the example agrees)
that there are N 'event-channel' for N-rings.

Shouldn't this be part of the above loop?

> +     if (err) {
> +             message = "writing event-channel";
> +             goto abort_transaction;
> +     }
> +     pr_info("%s: write event-channel:%d\n", dir, rinfo->evtchn);

Please no.

> +
> +     return 0;
> +
> +abort_transaction:
> +     xenbus_transaction_end(xbt, 1);
> +     if (message)
> +             xenbus_dev_fatal(info->xbdev, err, "%s", message);
> +
> +     return err;
> +}
>  
>  /* Common code used when first setting up, and when resuming. */
>  static int talk_to_blkback(struct xenbus_device *dev,
> @@ -1362,7 +1412,6 @@ static int talk_to_blkback(struct xenbus_device *dev,
>       int err, i;
>       unsigned int max_page_order = 0;
>       unsigned int ring_page_order = 0;
> -     struct blkfront_ring_info *rinfo;
>  
>       err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
>                          "max-ring-page-order", "%u", &max_page_order);
> @@ -1374,7 +1423,8 @@ static int talk_to_blkback(struct xenbus_device *dev,
>       }
>  
>       for (i = 0; i < info->nr_rings; i++) {
> -             rinfo = &info->rinfo[i];
> +             struct blkfront_ring_info *rinfo = &info->rinfo[i];
> +
>               /* Create shared ring, alloc event channel. */
>               err = setup_blkring(dev, rinfo);
>               if (err)
> @@ -1388,45 +1438,51 @@ again:
>               goto destroy_blkring;
>       }
>  
> -     if (info->nr_rings == 1) {
> -             rinfo = &info->rinfo[0];
> -             if (info->nr_ring_pages == 1) {
> -                     err = xenbus_printf(xbt, dev->nodename,
> -                                         "ring-ref", "%u", 
> rinfo->ring_ref[0]);
> -                     if (err) {
> -                             message = "writing ring-ref";
> -                             goto abort_transaction;
> -                     }
> -             } else {
> -                     err = xenbus_printf(xbt, dev->nodename,
> -                                         "ring-page-order", "%u", 
> ring_page_order);
> -                     if (err) {
> -                             message = "writing ring-page-order";
> -                             goto abort_transaction;
> -                     }
> +     if (info->nr_ring_pages > 1) {
> +             err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
> +                                 ring_page_order);
> +             if (err) {
> +                     message = "writing ring-page-order";
> +                     goto abort_transaction;
> +             }
> +     }
>  
> -                     for (i = 0; i < info->nr_ring_pages; i++) {
> -                             char ring_ref_name[RINGREF_NAME_LEN];
> +     /* We already got the number of queues/rings in _probe */
> +     if (info->nr_rings == 1) {
> +             err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
> +             if (err)
> +                     goto destroy_blkring;
> +     } else {
> +             char *path;
> +             size_t pathsize;
>  
> -                             snprintf(ring_ref_name, RINGREF_NAME_LEN, 
> "ring-ref%u", i);
> -                             err = xenbus_printf(xbt, dev->nodename, 
> ring_ref_name,
> -                                                 "%u", rinfo->ring_ref[i]);
> -                             if (err) {
> -                                     message = "writing ring-ref";
> -                                     goto abort_transaction;
> -                             }
> -                     }
> -             }
> -             err = xenbus_printf(xbt, dev->nodename,
> -                                 "event-channel", "%u", rinfo->evtchn);
> +             err = xenbus_printf(xbt, dev->nodename, 
> "multi-queue-num-queues", "%u",
> +                                 info->nr_rings);
>               if (err) {
> -                     message = "writing event-channel";
> +                     message = "writing multi-queue-num-queues";
>                       goto abort_transaction;
>               }
> -     } else {
> -             /* Not supported at this stage */
> -             goto abort_transaction;
> +
> +             pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
> +             path = kmalloc(pathsize, GFP_KERNEL);
> +             if (!path) {
> +                     err = -ENOMEM;
> +                     message = "ENOMEM while writing ring references";
> +                     goto abort_transaction;
> +             }
> +
> +             for (i = 0; i < info->nr_rings; i++) {
> +                     memset(path, 0, pathsize);
> +                     snprintf(path, pathsize, "%s/queue-%u", dev->nodename, 
> i);
> +                     err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
> +                     if (err) {
> +                             kfree(path);
> +                             goto destroy_blkring;
> +                     }
> +             }
> +             kfree(path);
>       }
> +
>       err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
>                           XEN_IO_PROTO_ABI_NATIVE);
>       if (err) {
> @@ -1449,7 +1505,7 @@ again:
>  
>       for (i = 0; i < info->nr_rings; i++) {
>               int j;
> -             rinfo = &info->rinfo[i];
> +             struct blkfront_ring_info *rinfo = &info->rinfo[i];
>  
>               for (j = 0; j < BLK_RING_SIZE(info); j++)
>                       rinfo->shadow[j].req.u.rw.id = j + 1;
> @@ -1480,6 +1536,7 @@ static int blkfront_probe(struct xenbus_device *dev,
>  {
>       int err, vdevice, r_index;
>       struct blkfront_info *info;
> +     unsigned int backend_max_queues = 0;
>  
>       /* FIXME: Use dynamic device id if this is not set. */
>       err = xenbus_scanf(XBT_NIL, dev->nodename,
> @@ -1529,7 +1586,25 @@ static int blkfront_probe(struct xenbus_device *dev,
>               return -ENOMEM;
>       }
>  
> -     info->nr_rings = 1;
> +     mutex_init(&info->mutex);
> +     spin_lock_init(&info->dev_lock);
> +     info->xbdev = dev;
> +     info->vdevice = vdevice;
> +     INIT_LIST_HEAD(&info->grants);
> +     info->connected = BLKIF_STATE_DISCONNECTED;
> +
> +     /* Check if backend supports multiple queues */

Missing full stop.

> +     err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
> +                        "multi-queue-max-queues", "%u", &backend_max_queues);
> +     if (err < 0)
> +             backend_max_queues = 1;
> +
> +     info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
> +     if (info->nr_rings <= 0)

It can't be < 0. 'nr_rings' is unsigned int.

Perhaps 'if (!info->nr_rings)

> +             info->nr_rings = 1;
> +     pr_debug("Number of queues to be used:%u, though backend supports 
> max-queues:%u\n",
> +              info->nr_rings, backend_max_queues);
> +

How about putting this in 'xlvbd_flush'? It can report that there.

And I am not sure if there is any need to include the backend_max_queues
value. One can find that from XenStore.

>       info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * 
> info->nr_rings, GFP_KERNEL);
>       if (!info->rinfo) {
>               xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info 
> structure");
> @@ -1547,14 +1622,6 @@ static int blkfront_probe(struct xenbus_device *dev,
>               spin_lock_init(&rinfo->ring_lock);
>       }
>  
> -     mutex_init(&info->mutex);
> -     spin_lock_init(&info->dev_lock);
> -     info->xbdev = dev;
> -     info->vdevice = vdevice;
> -     INIT_LIST_HEAD(&info->grants);
> -     info->persistent_gnts_c = 0;
> -     info->connected = BLKIF_STATE_DISCONNECTED;
> -
>       /* Front end dir is a number, which is used as the id. */
>       info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
>       dev_set_drvdata(&dev->dev, info);
> @@ -2216,6 +2283,7 @@ static struct xenbus_driver blkfront_driver = {
>  static int __init xlblk_init(void)
>  {
>       int ret;
> +     int nr_cpus = num_online_cpus();
>  
>       if (!xen_domain())
>               return -ENODEV;
> @@ -2226,6 +2294,12 @@ static int __init xlblk_init(void)
>               xen_blkif_max_ring_order = 0;
>       }
>  
> +     if (xen_blkif_max_queues > nr_cpus) {
> +             pr_info("Invalid max_queues (%d), will use default max: %d.\n",
> +                     xen_blkif_max_queues, nr_cpus);
> +             xen_blkif_max_queues = nr_cpus;
> +     }
> +
>       if (!xen_has_pv_disk_devices())
>               return -ENODEV;
>  
> -- 
> 1.8.3.1
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.