[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 10/13] xen/pvcalls: implement poll command



On Mon, 24 Jul 2017, Juergen Gross wrote:
> On 22/07/17 02:12, Stefano Stabellini wrote:
> > For active sockets, check the indexes and use the inflight_conn_req
> > waitqueue to wait.
> > 
> > For passive sockets, send PVCALLS_POLL to the backend. Use the
> > inflight_accept_req waitqueue if an accept is outstanding. Otherwise use
> > the inflight_req waitqueue: inflight_req is awaken when a new response
> > is received; on wakeup we check whether the POLL response is arrived by
> > looking at the PVCALLS_FLAG_POLL_RET flag. We set the flag from
> > pvcalls_front_event_handler, if the response was for a POLL command.
> > 
> > In pvcalls_front_event_handler, get the struct socket pointer from the
> > poll id (we previously converted struct socket* to uint64_t and used it
> > as id).
> > 
> > Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
> > CC: boris.ostrovsky@xxxxxxxxxx
> > CC: jgross@xxxxxxxx
> > ---
> >  drivers/xen/pvcalls-front.c | 123 
> > ++++++++++++++++++++++++++++++++++++++++----
> >  drivers/xen/pvcalls-front.h |   3 ++
> >  2 files changed, 115 insertions(+), 11 deletions(-)
> > 
> > diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
> > index 3d1041a..b6cfb7d 100644
> > --- a/drivers/xen/pvcalls-front.c
> > +++ b/drivers/xen/pvcalls-front.c
> > @@ -128,17 +128,29 @@ static irqreturn_t pvcalls_front_event_handler(int 
> > irq, void *dev_id)
> >             rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons);
> >  
> >             req_id = rsp->req_id;
> > -           src = (uint8_t *)&bedata->rsp[req_id];
> > -           src += sizeof(rsp->req_id);
> > -           dst = (uint8_t *)rsp;
> > -           dst += sizeof(rsp->req_id);
> > -           memcpy(dst, src, sizeof(*rsp) - sizeof(rsp->req_id));
> > -           /*
> > -            * First copy the rest of the data, then req_id. It is
> > -            * paired with the barrier when accessing bedata->rsp.
> > -            */
> > -           smp_wmb();
> > -           WRITE_ONCE(bedata->rsp[req_id].req_id, rsp->req_id);
> > +           if (rsp->cmd == PVCALLS_POLL) {
> > +                   struct socket *sock = (struct socket *) rsp->u.poll.id;
> > +                   struct sock_mapping *map =
> > +                           (struct sock_mapping *)
> > +                           READ_ONCE(sock->sk->sk_send_head);
> > +
> > +                   set_bit(PVCALLS_FLAG_POLL_RET,
> > +                           (void *)&map->passive.flags);
> 
> Add a barrier here to make sure PVCALLS_FLAG_POLL_INFLIGHT is cleared
> _after_ setting PVCALLS_FLAG_POLL_RET?

Yes, good point, I'll add an smp_wmb() here. A barrier is unnecessary at
the other end (the beginning of pvcalls_front_poll_passive) because of
the conditional instructions creating control dependencies. I'll add a
comment.


> > +                   clear_bit(PVCALLS_FLAG_POLL_INFLIGHT,
> > +                             (void *)&map->passive.flags);
> > +           } else {
> > +                   src = (uint8_t *)&bedata->rsp[req_id];
> > +                   src += sizeof(rsp->req_id);
> > +                   dst = (uint8_t *)rsp;
> > +                   dst += sizeof(rsp->req_id);
> > +                   memcpy(dst, src, sizeof(*rsp) - sizeof(rsp->req_id));
> > +                   /*
> > +                    * First copy the rest of the data, then req_id. It is
> > +                    * paired with the barrier when accessing bedata->rsp.
> > +                    */
> > +                   smp_wmb();
> > +                   WRITE_ONCE(bedata->rsp[req_id].req_id, rsp->req_id);
> > +           }
> >  
> >             bedata->ring.rsp_cons++;
> >             wake_up(&bedata->inflight_req);
> > @@ -704,6 +716,95 @@ int pvcalls_front_accept(struct socket *sock, struct 
> > socket *newsock, int flags)
> >     return ret;
> >  }
> >  
> > +static unsigned int pvcalls_front_poll_passive(struct file *file,
> > +                                          struct pvcalls_bedata *bedata,
> > +                                          struct sock_mapping *map,
> > +                                          poll_table *wait)
> > +{
> > +   int notify, req_id;
> > +   struct xen_pvcalls_request *req;
> > +
> > +   if (test_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
> > +                (void *)&map->passive.flags)) {
> > +           poll_wait(file, &map->passive.inflight_accept_req, wait);
> > +           return 0;
> > +   }
> > +
> > +   if (test_and_clear_bit(PVCALLS_FLAG_POLL_RET,
> > +                          (void *)&map->passive.flags))
> > +           return POLLIN;
> > +
> > +   if (test_and_set_bit(PVCALLS_FLAG_POLL_INFLIGHT,
> > +                        (void *)&map->passive.flags)) {
> > +           poll_wait(file, &bedata->inflight_req, wait);
> > +           return 0;
> > +   }
> > +
> > +   spin_lock(&bedata->pvcallss_lock);
> > +   req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
> > +   BUG_ON(req_id >= PVCALLS_NR_REQ_PER_RING);
> > +   if (RING_FULL(&bedata->ring) ||
> > +       READ_ONCE(bedata->rsp[req_id].req_id) != PVCALLS_INVALID_ID) {
> > +           spin_unlock(&bedata->pvcallss_lock);
> > +           return -EAGAIN;
> > +   }
> > +   req = RING_GET_REQUEST(&bedata->ring, req_id);
> > +   req->req_id = req_id;
> > +   req->cmd = PVCALLS_POLL;
> > +   req->u.poll.id = (uint64_t) map->sock;
> > +
> > +   bedata->ring.req_prod_pvt++;
> > +   RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
> > +   spin_unlock(&bedata->pvcallss_lock);
> > +   if (notify)
> > +           notify_remote_via_irq(bedata->irq);
> > +
> > +   poll_wait(file, &bedata->inflight_req, wait);
> > +   return 0;
> > +}
> > +
> > +static unsigned int pvcalls_front_poll_active(struct file *file,
> > +                                         struct pvcalls_bedata *bedata,
> > +                                         struct sock_mapping *map,
> > +                                         poll_table *wait)
> > +{
> > +   unsigned int mask = 0;
> > +   int32_t in_error, out_error;
> > +   struct pvcalls_data_intf *intf = map->active.ring;
> > +
> > +   out_error = intf->out_error;
> > +   in_error = intf->in_error;
> > +
> > +   poll_wait(file, &map->active.inflight_conn_req, wait);
> > +   if (pvcalls_front_write_todo(map))
> > +           mask |= POLLOUT | POLLWRNORM;
> > +   if (pvcalls_front_read_todo(map))
> > +           mask |= POLLIN | POLLRDNORM;
> > +   if (in_error != 0 || out_error != 0)
> > +           mask |= POLLERR;
> > +
> > +   return mask;
> > +}
> > +
> > +unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
> > +                          poll_table *wait)
> > +{
> > +   struct pvcalls_bedata *bedata;
> > +   struct sock_mapping *map;
> > +
> > +   if (!pvcalls_front_dev)
> > +           return POLLNVAL;
> > +   bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
> > +
> > +   map = (struct sock_mapping *) READ_ONCE(sock->sk->sk_send_head);
> > +   if (!map)
> > +           return POLLNVAL;
> > +   if (map->active_socket)
> > +           return pvcalls_front_poll_active(file, bedata, map, wait);
> > +   else
> > +           return pvcalls_front_poll_passive(file, bedata, map, wait);
> > +}
> > +
> >  static const struct xenbus_device_id pvcalls_front_ids[] = {
> >     { "pvcalls" },
> >     { "" }
> > diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
> > index de24041..25e05b8 100644
> > --- a/drivers/xen/pvcalls-front.h
> > +++ b/drivers/xen/pvcalls-front.h
> > @@ -20,5 +20,8 @@ int pvcalls_front_recvmsg(struct socket *sock,
> >                       struct msghdr *msg,
> >                       size_t len,
> >                       int flags);
> > +unsigned int pvcalls_front_poll(struct file *file,
> > +                           struct socket *sock,
> > +                           poll_table *wait);
> >  
> >  #endif
> > 
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.