[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 8/9] xen/blkback: pseudo support for multi hardware queues/rings



On 10/05/2015 11:08 PM, Roger Pau Monné wrote:
> El 05/09/15 a les 14.39, Bob Liu ha escrit:
>> Prepare patch for multi hardware queues/rings, the ring number was set to 1 
>> by
>> force.
> 
> This should be:
> 
> Preparatory patch for multiple hardware queues (rings). The number of
> rings is unconditionally set to 1.
> 
> But frankly this description is not helpful at all, you should describe
> the preparatory changes and why you need them.
> 

Will update, the purpose is just to make each patch smaller and more readable.

>>
>> Signed-off-by: Arianna Avanzini <avanzini.arianna@xxxxxxxxx>
>> Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
>> ---
>>  drivers/block/xen-blkback/common.h |    3 +-
>>  drivers/block/xen-blkback/xenbus.c |  328 
>> +++++++++++++++++++++++-------------
>>  2 files changed, 209 insertions(+), 122 deletions(-)
>>
>> diff --git a/drivers/block/xen-blkback/common.h 
>> b/drivers/block/xen-blkback/common.h
>> index cc253d4..ba058a0 100644
>> --- a/drivers/block/xen-blkback/common.h
>> +++ b/drivers/block/xen-blkback/common.h
>> @@ -339,7 +339,8 @@ struct xen_blkif {
>>      unsigned long long                      st_wr_sect;
>>      unsigned int nr_ring_pages;
>>      /* All rings for this device */
>> -    struct xen_blkif_ring ring;
>> +    struct xen_blkif_ring *rings;
>> +    unsigned int nr_rings;
>>  };
>>  
>>  struct seg_buf {
>> diff --git a/drivers/block/xen-blkback/xenbus.c 
>> b/drivers/block/xen-blkback/xenbus.c
>> index 6482ee3..04b8d0d 100644
>> --- a/drivers/block/xen-blkback/xenbus.c
>> +++ b/drivers/block/xen-blkback/xenbus.c
>> @@ -26,6 +26,7 @@
>>  /* Enlarge the array size in order to fully show blkback name. */
>>  #define BLKBACK_NAME_LEN (20)
>>  #define RINGREF_NAME_LEN (20)
>> +#define RINGREF_NAME_LEN (20)
> 
> Duplicate define?
> 

Will update.

>>  
>>  struct backend_info {
>>      struct xenbus_device    *dev;
>> @@ -84,11 +85,13 @@ static int blkback_name(struct xen_blkif *blkif, char 
>> *buf)
>>  
>>  static void xen_update_blkif_status(struct xen_blkif *blkif)
>>  {
>> -    int err;
>> +    int err, i;
>>      char name[BLKBACK_NAME_LEN];
>> +    struct xen_blkif_ring *ring;
>> +    char per_ring_name[BLKBACK_NAME_LEN + 2];
> 
> Hm, why don't you just add + 2 to the place where BLKBACK_NAME_LEN is
> defined and use the same character array ("name")? This is just a waste
> of stack.
> 

per_ring_name = name + 'queue number';
If reuse name[], I'm not sure whether
snprintf(name, BLKBACK_NAME_LEN + 2, "%s-%d", name, i); can work.

>>  
>>      /* Not ready to connect? */
>> -    if (!blkif->ring.irq || !blkif->vbd.bdev)
>> +    if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
>>              return;
>>  
>>      /* Already connected? */
>> @@ -113,19 +116,68 @@ static void xen_update_blkif_status(struct xen_blkif 
>> *blkif)
>>      }
>>      invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
>>  
>> -    blkif->ring.xenblkd = kthread_run(xen_blkif_schedule, &blkif->ring, 
>> "%s", name);
>> -    if (IS_ERR(blkif->ring.xenblkd)) {
>> -            err = PTR_ERR(blkif->ring.xenblkd);
>> -            blkif->ring.xenblkd = NULL;
>> -            xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
>> -            return;
>> +    if (blkif->nr_rings == 1) {
>> +            blkif->rings[0].xenblkd = kthread_run(xen_blkif_schedule, 
>> &blkif->rings[0], "%s", name);
>> +            if (IS_ERR(blkif->rings[0].xenblkd)) {
>> +                    err = PTR_ERR(blkif->rings[0].xenblkd);
>> +                    blkif->rings[0].xenblkd = NULL;
>> +                    xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
>> +                    return;
>> +            }
>> +    } else {
>> +            for (i = 0; i < blkif->nr_rings; i++) {
>> +                    snprintf(per_ring_name, BLKBACK_NAME_LEN + 2, "%s-%d", 
>> name, i);
>> +                    ring = &blkif->rings[i];
>> +                    ring->xenblkd = kthread_run(xen_blkif_schedule, ring, 
>> "%s", per_ring_name);
>> +                    if (IS_ERR(ring->xenblkd)) {
>> +                            err = PTR_ERR(ring->xenblkd);
>> +                            ring->xenblkd = NULL;
>> +                            xenbus_dev_error(blkif->be->dev, err,
>> +                                            "start %s xenblkd", 
>> per_ring_name);
>> +                            return;
>> +                    }
>> +            }
>> +    }
>> +}
>> +
>> +static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
>> +{
>> +    struct xen_blkif_ring *ring;
>> +    int r;
>> +
>> +    blkif->rings = kzalloc(blkif->nr_rings * sizeof(struct xen_blkif_ring), 
>> GFP_KERNEL);
>> +    if (!blkif->rings)
>> +            return -ENOMEM;
>> +
>> +    for (r = 0; r < blkif->nr_rings; r++) {
>> +            ring = &blkif->rings[r];
>> +
>> +            spin_lock_init(&ring->blk_ring_lock);
>> +            init_waitqueue_head(&ring->wq);
>> +            ring->st_print = jiffies;
>> +            ring->persistent_gnts.rb_node = NULL;
>> +            spin_lock_init(&ring->free_pages_lock);
>> +            INIT_LIST_HEAD(&ring->free_pages);
>> +            INIT_LIST_HEAD(&ring->persistent_purge_list);
>> +            ring->free_pages_num = 0;
>> +            atomic_set(&ring->persistent_gnt_in_use, 0);
>> +            atomic_set(&ring->inflight, 0);
>> +            INIT_WORK(&ring->persistent_purge_work, 
>> xen_blkbk_unmap_purged_grants);
>> +            INIT_LIST_HEAD(&ring->pending_free);
>> +
>> +            spin_lock_init(&ring->pending_free_lock);
>> +            init_waitqueue_head(&ring->pending_free_wq);
>> +            init_waitqueue_head(&ring->shutdown_wq);
> 
> I've already commented on the previous patch, but a bunch of this needs
> to be per-device rather than per-ring.
> 

Will update and all other comments.

Thanks
-Bob

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.