[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 08/13] xen/pvcalls: implement accept command



Introduce a waitqueue to allow only one outstanding accept command at
any given time and to implement polling on the passive socket. Introduce
a flags field to keep track of in-flight accept and poll commands.

Send PVCALLS_ACCEPT to the backend. Allocate a new active socket. Make
sure that only one accept command is executed at any given time by
setting PVCALLS_FLAG_ACCEPT_INFLIGHT and waiting on the
inflight_accept_req waitqueue.

Convert the new struct sock_mapping pointer into an uint64_t and use it
as id for the new socket to pass to the backend.

Check if the accept call is non-blocking: in that case after sending the
ACCEPT command to the backend store the sock_mapping pointer of the new
struct and the inflight req_id then return -EAGAIN (which will respond
only when there is something to accept). Next time accept is called,
we'll check if the ACCEPT command has been answered, if so we'll pick up
where we left off, otherwise we return -EAGAIN again.

Note that, differently from the other commands, we can use
wait_event_interruptible (instead of wait_event) in the case of accept
as we are able to track the req_id of the ACCEPT response that we are
waiting.

Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
CC: boris.ostrovsky@xxxxxxxxxx
CC: jgross@xxxxxxxx
---
 drivers/xen/pvcalls-front.c | 135 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/xen/pvcalls-front.h |   3 +
 2 files changed, 138 insertions(+)

diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 4ecc1e5..414eafd 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -78,6 +78,16 @@ struct sock_mapping {
 #define PVCALLS_STATUS_BIND          1
 #define PVCALLS_STATUS_LISTEN        2
                        uint8_t status;
+               /*
+                * Internal state-machine flags.
+                * Only one accept operation can be inflight for a socket.
+                * Only one poll operation can be inflight for a given socket.
+                */
+#define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
+                       uint8_t flags;
+                       uint32_t inflight_req_id;
+                       struct sock_mapping *accept_map;
+                       wait_queue_head_t inflight_accept_req;
                } passive;
        };
 };
@@ -470,6 +480,131 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
        return ret;
 }
 
+int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int 
flags)
+{
+       struct pvcalls_bedata *bedata;
+       struct sock_mapping *map;
+       struct sock_mapping *map2 = NULL;
+       struct xen_pvcalls_request *req;
+       int notify, req_id, ret, evtchn, nonblock;
+
+       pvcalls_enter;
+       if (!pvcalls_front_dev) {
+               pvcalls_exit;
+               return -ENOTCONN;
+       }
+       bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+       map = (struct sock_mapping *) sock->sk->sk_send_head;
+       if (!map) {
+               pvcalls_exit;
+               return -ENOTSOCK;
+       }
+
+       if (map->passive.status != PVCALLS_STATUS_LISTEN) {
+               pvcalls_exit;
+               return -EINVAL;
+       }
+
+       nonblock = flags & SOCK_NONBLOCK;
+       /*
+        * Backend only supports 1 inflight accept request, will return
+        * errors for the others
+        */
+       if (test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+                            (void *)&map->passive.flags)) {
+               req_id = READ_ONCE(map->passive.inflight_req_id);
+               if (req_id != PVCALLS_INVALID_ID &&
+                   READ_ONCE(bedata->rsp[req_id].req_id) == req_id) {
+                       map2 = map->passive.accept_map;
+                       goto received;
+               }
+               if (nonblock) {
+                       pvcalls_exit;
+                       return -EAGAIN;
+               }
+               if (wait_event_interruptible(map->passive.inflight_accept_req,
+                       !test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+                                         (void *)&map->passive.flags))) {
+                       pvcalls_exit;
+                       return -EINTR;
+               }
+       }
+
+       spin_lock(&bedata->socket_lock);
+       ret = get_request(bedata, &req_id);
+       if (ret < 0) {
+               spin_unlock(&bedata->socket_lock);
+               pvcalls_exit;
+               return ret;
+       }
+       map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+       if (map2 == NULL) {
+               spin_unlock(&bedata->socket_lock);
+               pvcalls_exit;
+               return -ENOMEM;
+       }
+       ret =  create_active(map2, &evtchn);
+       if (ret < 0) {
+               kfree(map2);
+               spin_unlock(&bedata->socket_lock);
+               pvcalls_exit;
+               return -ENOMEM;
+       }
+       list_add_tail(&map2->list, &bedata->socket_mappings);
+
+       req = RING_GET_REQUEST(&bedata->ring, req_id);
+       req->req_id = req_id;
+       req->cmd = PVCALLS_ACCEPT;
+       req->u.accept.id = (uint64_t) map;
+       req->u.accept.ref = map2->active.ref;
+       req->u.accept.id_new = (uint64_t) map2;
+       req->u.accept.evtchn = evtchn;
+       map->passive.accept_map = map2;
+
+       bedata->ring.req_prod_pvt++;
+       RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+       spin_unlock(&bedata->socket_lock);
+       if (notify)
+               notify_remote_via_irq(bedata->irq);
+       /* We could check if we have received a response before returning. */
+       if (nonblock) {
+               WRITE_ONCE(map->passive.inflight_req_id, req_id);
+               pvcalls_exit;
+               return -EAGAIN;
+       }
+
+       if (wait_event_interruptible(bedata->inflight_req,
+               READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
+               pvcalls_exit;
+               return -EINTR;
+       }
+
+received:
+       map2->sock = newsock;
+       newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
+       if (!newsock->sk) {
+               WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
+               WRITE_ONCE(map->passive.inflight_req_id, PVCALLS_INVALID_ID);
+               pvcalls_front_free_map(bedata, map2);
+               kfree(map2);
+               pvcalls_exit;
+               return -ENOMEM;
+       }
+       newsock->sk->sk_send_head = (void *)map2;
+
+       clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
+       wake_up(&map->passive.inflight_accept_req);
+
+       ret = bedata->rsp[req_id].ret;
+       /* read ret, then set this rsp slot to be reused */
+       smp_mb();
+       WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
+       WRITE_ONCE(map->passive.inflight_req_id, PVCALLS_INVALID_ID);
+       pvcalls_exit;
+       return ret;
+}
+
 static const struct xenbus_device_id pvcalls_front_ids[] = {
        { "pvcalls" },
        { "" }
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
index aa8fe10..ab4f1da 100644
--- a/drivers/xen/pvcalls-front.h
+++ b/drivers/xen/pvcalls-front.h
@@ -10,5 +10,8 @@ int pvcalls_front_bind(struct socket *sock,
                       struct sockaddr *addr,
                       int addr_len);
 int pvcalls_front_listen(struct socket *sock, int backlog);
+int pvcalls_front_accept(struct socket *sock,
+                        struct socket *newsock,
+                        int flags);
 
 #endif
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.