[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 17/18] xen/pvcalls: implement write



When the other end notifies us that there is data to be written
(pvcalls_back_conn_event), increment the io and write counters, and
schedule the ioworker.

Implement the write function called by ioworker by reading the data from
the data ring, writing it to the socket by calling inet_sendmsg.

Set out_error on error.

Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
CC: boris.ostrovsky@xxxxxxxxxx
CC: jgross@xxxxxxxx
---
 drivers/xen/pvcalls-back.c | 74 +++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 73 insertions(+), 1 deletion(-)

diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index ccceabd..424dcac 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -179,7 +179,66 @@ static void pvcalls_conn_back_read(void *opaque)
 
 static int pvcalls_conn_back_write(struct sock_mapping *map)
 {
-       return 0;
+       struct pvcalls_data_intf *intf = map->ring;
+       struct pvcalls_data *data = &map->data;
+       struct msghdr msg;
+       struct kvec vec[2];
+       RING_IDX cons, prod, size, array_size;
+       int ret;
+
+       cons = intf->out_cons;
+       prod = intf->out_prod;
+       /* read the indexes before dealing with the data */
+       virt_mb();
+
+       array_size = XEN_FLEX_RING_SIZE(map->ring_order);
+       size = pvcalls_queued(prod, cons, array_size);
+       if (size == 0)
+               return 0;
+
+       memset(&msg, 0, sizeof(msg));
+       msg.msg_flags |= MSG_DONTWAIT;
+       msg.msg_iter.type = ITER_KVEC|READ;
+       msg.msg_iter.count = size;
+       if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
+               vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
+               vec[0].iov_len = size;
+               msg.msg_iter.kvec = vec;
+               msg.msg_iter.nr_segs = 1;
+       } else {
+               vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
+               vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
+               vec[1].iov_base = data->out;
+               vec[1].iov_len = size - vec[0].iov_len;
+               msg.msg_iter.kvec = vec;
+               msg.msg_iter.nr_segs = 2;
+       }
+
+       atomic_set(&map->write, 0);
+       ret = inet_sendmsg(map->sock, &msg, size);
+       if (ret == -EAGAIN || (ret >= 0 && ret < size)) {
+               atomic_inc(&map->write);
+               atomic_inc(&map->io);
+       }
+       if (ret == -EAGAIN)
+               return ret;
+
+       /* write the data, then update the indexes */
+       virt_wmb();
+       if (ret < 0) {
+               intf->out_error = ret;
+       } else {
+               intf->out_error = 0;
+               intf->out_cons = cons + ret;
+               prod = intf->out_prod;
+       }
+       /* update the indexes, then notify the other end */
+       virt_wmb();
+       if (prod != cons + ret)
+               atomic_inc(&map->write);
+       notify_remote_via_irq(map->irq);
+
+       return ret;
 }
 
 static void pvcalls_back_ioworker(struct work_struct *work)
@@ -849,6 +908,19 @@ static irqreturn_t pvcalls_back_event(int irq, void 
*dev_id)
 
 static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
 {
+       struct sock_mapping *map = sock_map;
+       struct pvcalls_ioworker *iow;
+
+       if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
+               map->sock->sk->sk_user_data != map)
+               return IRQ_HANDLED;
+
+       iow = &map->ioworker;
+
+       atomic_inc(&map->write);
+       atomic_inc(&map->io);
+       queue_work(iow->wq, &iow->register_work);
+
        return IRQ_HANDLED;
 }
 
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.