[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 6/7] xen/9pfs: receive responses



On 14/03/17 00:50, Stefano Stabellini wrote:
> Upon receiving a notification from the backend, schedule the
> p9_xen_response work_struct. p9_xen_response checks if any responses are
> available, if so, it reads them one by one, calling p9_client_cb to send
> them up to the 9p layer (p9_client_cb completes the request). Handle the
> ring following the Xen 9pfs specification.
> 
> Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> CC: jgross@xxxxxxxx
> CC: Eric Van Hensbergen <ericvh@xxxxxxxxx>
> CC: Ron Minnich <rminnich@xxxxxxxxxx>
> CC: Latchesar Ionkov <lucho@xxxxxxxxxx>
> CC: v9fs-developer@xxxxxxxxxxxxxxxxxxxxx
> ---
>  net/9p/trans_xen.c | 55 
> ++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 55 insertions(+)
> 
> diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
> index b40bbcb..1a7eb52 100644
> --- a/net/9p/trans_xen.c
> +++ b/net/9p/trans_xen.c
> @@ -168,6 +168,61 @@ static int p9_xen_request(struct p9_client *client, 
> struct p9_req_t *p9_req)
>  
>  static void p9_xen_response(struct work_struct *work)
>  {
> +     struct xen_9pfs_front_priv *priv;
> +     struct xen_9pfs_dataring *ring;
> +     RING_IDX cons, prod, masked_cons, masked_prod;
> +     struct xen_9pfs_header h;
> +     struct p9_req_t *req;
> +     int status;
> +
> +     ring = container_of(work, struct xen_9pfs_dataring, work);
> +     priv = ring->priv;
> +
> +     while (1) {
> +             cons = ring->intf->in_cons;
> +             prod = ring->intf->in_prod;
> +             virt_rmb();
> +
> +             if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < 
> sizeof(h)) {
> +                     notify_remote_via_irq(ring->irq);
> +                     return;
> +             }
> +
> +             masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> +             masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> +
> +             /* First, read just the header */
> +             xen_9pfs_read_packet(ring->data.in,
> +                             masked_prod, &masked_cons,
> +                             XEN_9PFS_RING_SIZE, &h, sizeof(h));
> +
> +             req = p9_tag_lookup(priv->client, h.tag);
> +             if (!req || req->status != REQ_STATUS_SENT) {
> +                     dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
> +                     cons += h.size;
> +                     virt_mb();
> +                     ring->intf->in_cons = cons;
> +                     continue;
> +             }
> +
> +             memcpy(req->rc, &h, sizeof(h));
> +             req->rc->offset = 0;
> +
> +             masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> +             /* Then, read the whole packet (including the header) */
> +             xen_9pfs_read_packet(ring->data.in,
> +                             masked_prod, &masked_cons,
> +                             XEN_9PFS_RING_SIZE, req->rc->sdata, h.size);

Please align the parameters to the same column.

> +
> +             virt_mb();
> +             cons += h.size;
> +             ring->intf->in_cons = cons;
> +
> +             status = (req->status != REQ_STATUS_ERROR) ?
> +                     REQ_STATUS_RCVD : REQ_STATUS_ERROR;
> +
> +             p9_client_cb(priv->client, req, status);
> +     }
>  }
>  
>  static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
> 


Juergen

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.