[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 10/19] rnbd-srv: simplify bio mapping in process_rdma



On Wed, Jan 19, 2022 at 1:20 AM Jinpu Wang <jinpu.wang@xxxxxxxxx> wrote:
>
> Hi Christoph,
>
> Thanks for the patch.
>
> On Tue, Jan 18, 2022 at 8:20 AM Christoph Hellwig <hch@xxxxxx> wrote:
> >
> > The memory mapped in process_rdma is contiguous, so there is no need
> > to loop over bio_add_page.  Remove rnbd_bio_map_kern and just open code
> > the bio allocation and mapping in the caller.
> >
> > Signed-off-by: Christoph Hellwig <hch@xxxxxx>
> > ---
> >  drivers/block/rnbd/rnbd-srv-dev.c | 57 -------------------------------
> >  drivers/block/rnbd/rnbd-srv-dev.h |  5 ---
> >  drivers/block/rnbd/rnbd-srv.c     | 20 ++++++++---
> >  3 files changed, 15 insertions(+), 67 deletions(-)
> >
> > diff --git a/drivers/block/rnbd/rnbd-srv-dev.c 
> > b/drivers/block/rnbd/rnbd-srv-dev.c
> > index b241a099aeae2..98d3e591a0885 100644
> > --- a/drivers/block/rnbd/rnbd-srv-dev.c
> > +++ b/drivers/block/rnbd/rnbd-srv-dev.c
> > @@ -44,60 +44,3 @@ void rnbd_dev_close(struct rnbd_dev *dev)
> >         blkdev_put(dev->bdev, dev->blk_open_flags);
> >         kfree(dev);
> >  }
> > -
> > -void rnbd_dev_bi_end_io(struct bio *bio)
> > -{
> > -       struct rnbd_dev_blk_io *io = bio->bi_private;
> > -
> > -       rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
> > -       bio_put(bio);
> > -}
> > -
> > -/**
> > - *     rnbd_bio_map_kern       -       map kernel address into bio
> > - *     @data: pointer to buffer to map
> > - *     @bs: bio_set to use.
> > - *     @len: length in bytes
> > - *     @gfp_mask: allocation flags for bio allocation
> > - *
> > - *     Map the kernel address into a bio suitable for io to a block
> > - *     device. Returns an error pointer in case of error.
> > - */
> > -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
> > -                             unsigned int len, gfp_t gfp_mask)
> > -{
> > -       unsigned long kaddr = (unsigned long)data;
> > -       unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
> > -       unsigned long start = kaddr >> PAGE_SHIFT;
> > -       const int nr_pages = end - start;
> > -       int offset, i;
> > -       struct bio *bio;
> > -
> > -       bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
> > -       if (!bio)
> > -               return ERR_PTR(-ENOMEM);
> > -
> > -       offset = offset_in_page(kaddr);
> > -       for (i = 0; i < nr_pages; i++) {
> > -               unsigned int bytes = PAGE_SIZE - offset;
> > -
> > -               if (len <= 0)
> > -                       break;
> > -
> > -               if (bytes > len)
> > -                       bytes = len;
> > -
> > -               if (bio_add_page(bio, virt_to_page(data), bytes,
> > -                                   offset) < bytes) {
> > -                       /* we don't support partial mappings */
> > -                       bio_put(bio);
> > -                       return ERR_PTR(-EINVAL);
> > -               }
> > -
> > -               data += bytes;
> > -               len -= bytes;
> > -               offset = 0;
> > -       }
> > -
> > -       return bio;
> > -}
> > diff --git a/drivers/block/rnbd/rnbd-srv-dev.h 
> > b/drivers/block/rnbd/rnbd-srv-dev.h
> > index 0eb23850afb95..1a14ece0be726 100644
> > --- a/drivers/block/rnbd/rnbd-srv-dev.h
> > +++ b/drivers/block/rnbd/rnbd-srv-dev.h
> > @@ -41,11 +41,6 @@ void rnbd_dev_close(struct rnbd_dev *dev);
> >
> >  void rnbd_endio(void *priv, int error);
> >
> > -void rnbd_dev_bi_end_io(struct bio *bio);
> > -
> > -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
> > -                             unsigned int len, gfp_t gfp_mask);
> > -
> >  static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
> >  {
> >         return queue_max_segments(bdev_get_queue(dev->bdev));
> > diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
> > index 1ee808fc600cf..65c670e96075b 100644
> > --- a/drivers/block/rnbd/rnbd-srv.c
> > +++ b/drivers/block/rnbd/rnbd-srv.c
> > @@ -114,6 +114,14 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session 
> > *srv_sess)
> >         return sess_dev;
> >  }
> >
> > +static void rnbd_dev_bi_end_io(struct bio *bio)
> > +{
> > +       struct rnbd_dev_blk_io *io = bio->bi_private;
> > +
> > +       rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
> > +       bio_put(bio);
> > +}
> > +
> >  static int process_rdma(struct rnbd_srv_session *srv_sess,
> >                         struct rtrs_srv_op *id, void *data, u32 datalen,
> >                         const void *usr, size_t usrlen)
> > @@ -144,11 +152,11 @@ static int process_rdma(struct rnbd_srv_session 
> > *srv_sess,
> >         priv->sess_dev = sess_dev;
> >         priv->id = id;
> >
> > -       /* Generate bio with pages pointing to the rdma buffer */
> > -       bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, 
> > datalen, GFP_KERNEL);
> > -       if (IS_ERR(bio)) {
> > -               err = PTR_ERR(bio);
> > -               rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", 
> > err);
> > +       bio = bio_alloc_bioset(GFP_KERNEL, 1, 
> > sess_dev->rnbd_dev->ibd_bio_set);
> > +       if (bio_add_page(bio, virt_to_page(data), datalen,
> > +                       offset_in_page(data))) {
> this changes lead to IO error all the time, because bio_add_page return len.
> We need  if (bio_add_page(bio, virt_to_page(data), datalen,
>                      offset_in_page(data)) < datalen)
>
> Thanks!
> > +               rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
> > +               err = -EINVAL;
> >                 goto sess_dev_put;
> >         }
> >
> > @@ -170,6 +178,8 @@ static int process_rdma(struct rnbd_srv_session 
> > *srv_sess,
> >
> >         return 0;
> >
> > +bio_put:
> > +       bio_put(bio);
and bio_put is not used, should move bio_put(bio); below, no need for
bio_put: label.
> >  sess_dev_put:
> >         rnbd_put_sess_dev(sess_dev);
> >  err:
> > --
> > 2.30.2
> >



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.