[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/6] xen-blkfront: avoid to use start/stop queue



On Wed, Jul 12, 2017 at 02:20:58AM +0800, Ming Lei wrote:
> This interfaces will be removed soon, so use quiesce and
> unquiesce instead, which should be more safe.

'should be'? That does not sound encouraging?

> 
> The only one usage will be removed in the following
> congestion control patches.
> 
> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> Cc: Juergen Gross <jgross@xxxxxxxx>
> Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx>
> ---
>  drivers/block/xen-blkfront.c | 22 ++++++++--------------
>  1 file changed, 8 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
> index c852ed3c01d5..1578befda635 100644
> --- a/drivers/block/xen-blkfront.c
> +++ b/drivers/block/xen-blkfront.c
> @@ -1187,7 +1187,7 @@ static void xlvbd_release_gendisk(struct blkfront_info 
> *info)
>               return;
>  
>       /* No more blkif_request(). */
> -     blk_mq_stop_hw_queues(info->rq);
> +     blk_mq_quiesce_queue(info->rq);
>  
>       for (i = 0; i < info->nr_rings; i++) {
>               struct blkfront_ring_info *rinfo = &info->rinfo[i];
> @@ -1216,8 +1216,10 @@ static void xlvbd_release_gendisk(struct blkfront_info 
> *info)
>  /* Already hold rinfo->ring_lock. */
>  static inline void kick_pending_request_queues_locked(struct 
> blkfront_ring_info *rinfo)
>  {
> -     if (!RING_FULL(&rinfo->ring))
> +     if (!RING_FULL(&rinfo->ring)) {
>               blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
> +             blk_mq_kick_requeue_list(rinfo->dev_info->rq);
> +     }
>  }
>  
>  static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
> @@ -1225,7 +1227,8 @@ static void kick_pending_request_queues(struct 
> blkfront_ring_info *rinfo)
>       unsigned long flags;
>  
>       spin_lock_irqsave(&rinfo->ring_lock, flags);
> -     kick_pending_request_queues_locked(rinfo);
> +     if (!RING_FULL(&rinfo->ring))
> +             blk_mq_run_hw_queues(rinfo->dev_info->rq, true);
>       spin_unlock_irqrestore(&rinfo->ring_lock, flags);
>  }
>  
> @@ -1346,7 +1349,7 @@ static void blkif_free(struct blkfront_info *info, int 
> suspend)
>               BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
>       /* No more blkif_request(). */
>       if (info->rq)
> -             blk_mq_stop_hw_queues(info->rq);
> +             blk_mq_quiesce_queue(info->rq);
>  
>       for (i = 0; i < info->nr_rings; i++)
>               blkif_free_ring(&info->rinfo[i]);
> @@ -2018,22 +2021,13 @@ static int blkif_recover(struct blkfront_info *info)
>       /* Now safe for us to use the shared ring */
>       info->connected = BLKIF_STATE_CONNECTED;
>  
> -     for (r_index = 0; r_index < info->nr_rings; r_index++) {
> -             struct blkfront_ring_info *rinfo;
> -
> -             rinfo = &info->rinfo[r_index];
> -             /* Kick any other new requests queued since we resumed */
> -             kick_pending_request_queues(rinfo);
> -     }
> -
>       list_for_each_entry_safe(req, n, &info->requests, queuelist) {
>               /* Requeue pending requests (flush or discard) */
>               list_del_init(&req->queuelist);
>               BUG_ON(req->nr_phys_segments > segs);
>               blk_mq_requeue_request(req, false);
>       }
> -     blk_mq_start_stopped_hw_queues(info->rq, true);
> -     blk_mq_kick_requeue_list(info->rq);
> +     blk_mq_unquiesce_queue(info->rq);
>  
>       while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
>               /* Traverse the list of pending bios and re-queue them */
> -- 
> 2.9.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.