[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 4/7] xen/9pfs: connect to the backend



On 15/03/17 20:23, Stefano Stabellini wrote:
> Implement functions to handle the xenbus handshake. Upon connection,
> allocate the rings according to the protocol specification.
> 
> Initialize a work_struct and a wait_queue. The work_struct will be used
> to schedule work upon receiving an event channel notification from the
> backend. The wait_queue will be used to wait when the ring is full and
> we need to send a new request.
> 
> Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
> CC: groug@xxxxxxxx
> CC: boris.ostrovsky@xxxxxxxxxx
> CC: jgross@xxxxxxxx
> CC: Eric Van Hensbergen <ericvh@xxxxxxxxx>
> CC: Ron Minnich <rminnich@xxxxxxxxxx>
> CC: Latchesar Ionkov <lucho@xxxxxxxxxx>
> CC: v9fs-developer@xxxxxxxxxxxxxxxxxxxxx
> ---
>  net/9p/trans_xen.c | 248 
> +++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 248 insertions(+)
> 
> diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
> index b1333b2..ada2b0c 100644
> --- a/net/9p/trans_xen.c
> +++ b/net/9p/trans_xen.c
>  static int xen_9pfs_front_probe(struct xenbus_device *dev,
>               const struct xenbus_device_id *id)
>  {
> +     int ret, i;
> +     struct xenbus_transaction xbt;
> +     struct xen_9pfs_front_priv *priv = NULL;
> +     char *versions;
> +     unsigned int max_rings, max_ring_order, len;
> +
> +     versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
> +     if (!len)
> +             return -EINVAL;
> +     if (strcmp(versions, "1")) {
> +             kfree(versions);
> +             return -EINVAL;
> +     }
> +     kfree(versions);
> +     max_rings = xenbus_read_unsigned(dev->otherend, "max-rings", 0);
> +     if (max_rings < XEN_9PFS_NUM_RINGS)
> +             return -EINVAL;
> +     max_ring_order = xenbus_read_unsigned(dev->otherend, 
> "max-ring-page-order", 0);
> +     if (max_ring_order < XEN_9PFS_RING_ORDER)
> +             return -EINVAL;
> +
> +
> +     priv = kzalloc(sizeof(struct xen_9pfs_front_priv), GFP_KERNEL);
> +     if (!priv)
> +             return -ENOMEM;
> +
> +     priv->dev = dev;
> +     priv->num_rings = XEN_9PFS_NUM_RINGS;
> +     priv->rings = kzalloc(sizeof(struct xen_9pfs_dataring) * 
> priv->num_rings,
> +                           GFP_KERNEL);
> +     if (!priv->rings) {
> +             kfree(priv);
> +             return -ENOMEM;
> +     }
> +
> +     for (i = 0; i < priv->num_rings; i++) {
> +             priv->rings[i].priv = priv;
> +             ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]);
> +             if (ret < 0)
> +                     goto error;
> +     }
> +
> + again:
> +     ret = xenbus_transaction_start(&xbt);
> +     if (ret) {
> +             xenbus_dev_fatal(dev, ret, "starting transaction");
> +             goto error;
> +     }
> +     ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
> +     if (ret)
> +             goto error_xenbus;
> +     ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u", 
> priv->num_rings);
> +     if (ret)
> +             goto error_xenbus;
> +     for (i = 0; i < priv->num_rings; i++) {
> +             char str[16];
> +
> +             BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
> +             sprintf(str, "ring-ref%u", i);
> +             ret = xenbus_printf(xbt, dev->nodename, str, "%d", 
> priv->rings[i].ref);
> +             if (ret)
> +                     goto error_xenbus;
> +
> +             sprintf(str, "event-channel-%u", i);
> +             ret = xenbus_printf(xbt, dev->nodename, str, "%u", 
> priv->rings[i].evtchn);
> +             if (ret)
> +                     goto error_xenbus;
> +     }
> +     priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
> +     if (ret)
> +             goto error_xenbus;

Shouldn't you test priv->tag instead?


Juergen


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.