[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 17/18] xen/pvcalls: implement write



On 02/06/17 21:31, Stefano Stabellini wrote:
> When the other end notifies us that there is data to be written
> (pvcalls_back_conn_event), increment the io and write counters, and
> schedule the ioworker.
> 
> Implement the write function called by ioworker by reading the data from
> the data ring, writing it to the socket by calling inet_sendmsg.
> 
> Set out_error on error.
> 
> Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
> CC: boris.ostrovsky@xxxxxxxxxx
> CC: jgross@xxxxxxxx
> ---
>  drivers/xen/pvcalls-back.c | 74 
> +++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 73 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
> index e7d2b85..fe3e70f 100644
> --- a/drivers/xen/pvcalls-back.c
> +++ b/drivers/xen/pvcalls-back.c
> @@ -180,7 +180,66 @@ static void pvcalls_conn_back_read(unsigned long opaque)
>  
>  static int pvcalls_conn_back_write(struct sock_mapping *map)
>  {
> -     return 0;
> +     struct pvcalls_data_intf *intf = map->ring;
> +     struct pvcalls_data *data = &map->data;
> +     struct msghdr msg;
> +     struct kvec vec[2];
> +     RING_IDX cons, prod, size, ring_size;
> +     int ret;
> +
> +     cons = intf->out_cons;
> +     prod = intf->out_prod;
> +     /* read the indexes before dealing with the data */
> +     virt_mb();
> +
> +     ring_size = XEN_FLEX_RING_SIZE(map->ring_order);
> +     size = pvcalls_queued(prod, cons, ring_size);
> +     if (size == 0)
> +             return 0;
> +
> +     memset(&msg, 0, sizeof(msg));
> +     msg.msg_flags |= MSG_DONTWAIT;
> +     msg.msg_iter.type = ITER_KVEC|READ;
> +     msg.msg_iter.count = size;
> +     if (pvcalls_mask(prod, ring_size) > pvcalls_mask(cons, ring_size)) {
> +             vec[0].iov_base = data->out + pvcalls_mask(cons, ring_size);
> +             vec[0].iov_len = size;
> +             msg.msg_iter.kvec = vec;
> +             msg.msg_iter.nr_segs = 1;
> +     } else {
> +             vec[0].iov_base = data->out + pvcalls_mask(cons, ring_size);
> +             vec[0].iov_len = ring_size - pvcalls_mask(cons, ring_size);
> +             vec[1].iov_base = data->out;
> +             vec[1].iov_len = size - vec[0].iov_len;
> +             msg.msg_iter.kvec = vec;
> +             msg.msg_iter.nr_segs = 2;
> +     }
> +
> +     atomic_set(&map->write, 0);
> +     ret = inet_sendmsg(map->sock, &msg, size);
> +     if (ret == -EAGAIN || ret < size) {

Do you really want to do this for all errors?
Or did you mean:
        if ((ret >= 0 && ret < size) || ret == -EAGAIN)


Juergen

> +             atomic_inc(&map->write);
> +             atomic_inc(&map->io);
> +     }
> +     if (ret == -EAGAIN)
> +             return ret;
> +
> +     /* write the data, then update the indexes */
> +     virt_wmb();
> +     if (ret < 0) {
> +             intf->out_error = ret;
> +     } else {
> +             intf->out_error = 0;
> +             intf->out_cons = cons + ret;
> +             prod = intf->out_prod;
> +     }
> +     /* update the indexes, then notify the other end */
> +     virt_wmb();
> +     if (prod != cons + ret)
> +             atomic_inc(&map->write);
> +     notify_remote_via_irq(map->irq);
> +
> +     return ret;
>  }
>  
>  static void pvcalls_back_ioworker(struct work_struct *work)
> @@ -837,6 +896,19 @@ static irqreturn_t pvcalls_back_event(int irq, void 
> *dev_id)
>  
>  static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
>  {
> +     struct sock_mapping *map = sock_map;
> +     struct pvcalls_ioworker *iow;
> +
> +     if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
> +             map->sock->sk->sk_user_data != map)
> +             return IRQ_HANDLED;
> +
> +     iow = &map->ioworker;
> +
> +     atomic_inc(&map->write);
> +     atomic_inc(&map->io);
> +     queue_work_on(iow->cpu, iow->wq, &iow->register_work);
> +
>       return IRQ_HANDLED;
>  }
>  
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.