[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 4/4] 9pfs: introduce init_out/in_iov_from_pdu



Not all 9pfs transports share memory between request and response. For
those who don't, it is necessary to know how much memory is required in
the response.

Split the existing init_iov_from_pdu function in two:
init_out_iov_from_pdu (for writes) and init_in_iov_from_pdu (for reads).
init_in_iov_from_pdu takes an additional size parameter to specify the
memory required for the response message.

Signed-off-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
 hw/9pfs/9p.c               |  6 +++++-
 hw/9pfs/9p.h               |  6 ++++--
 hw/9pfs/virtio-9p-device.c | 28 ++++++++++++++++++----------
 3 files changed, 27 insertions(+), 13 deletions(-)

diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 79d7201..ce1f9d9 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -1652,7 +1652,11 @@ static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, 
V9fsPDU *pdu,
     struct iovec *iov;
     unsigned int niov;
 
-    pdu->s->transport->init_iov_from_pdu(pdu, &iov, &niov, is_write);
+    if (is_write) {
+        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov);
+    } else {
+        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size);
+    }
 
     qemu_iovec_init_external(&elem, iov, niov);
     qemu_iovec_init(qiov, niov);
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
index c8c9aa8..4c4feaf 100644
--- a/hw/9pfs/9p.h
+++ b/hw/9pfs/9p.h
@@ -349,8 +349,10 @@ struct V9fsTransport {
                                 va_list ap);
     ssize_t     (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
                                   va_list ap);
-    void        (*init_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
-                                     unsigned int *pniov, bool is_write);
+    void        (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
+                                        unsigned int *pniov, size_t size);
+    void        (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
+                                         unsigned int *pniov);
     void        (*push_and_notify)(V9fsPDU *pdu);
 };
 
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index 273425b..27a4a32 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -171,26 +171,34 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t 
offset,
     return v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, 
ap);
 }
 
-static void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
-                                     unsigned int *pniov, bool is_write)
+/* The size parameter is used by other transports. Do not drop it. */
+static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
+                                        unsigned int *pniov, size_t size)
 {
     V9fsState *s = pdu->s;
     V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
     VirtQueueElement *elem = v->elems[pdu->idx];
 
-    if (is_write) {
-        *piov = elem->out_sg;
-        *pniov = elem->out_num;
-    } else {
-        *piov = elem->in_sg;
-        *pniov = elem->in_num;
-    }
+    *piov = elem->in_sg;
+    *pniov = elem->in_num;
+}
+
+static void virtio_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
+                                         unsigned int *pniov)
+{
+    V9fsState *s = pdu->s;
+    V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
+    VirtQueueElement *elem = v->elems[pdu->idx];
+
+    *piov = elem->out_sg;
+    *pniov = elem->out_num;
 }
 
 static const struct V9fsTransport virtio_9p_transport = {
     .pdu_vmarshal = virtio_pdu_vmarshal,
     .pdu_vunmarshal = virtio_pdu_vunmarshal,
-    .init_iov_from_pdu = virtio_init_iov_from_pdu,
+    .init_in_iov_from_pdu = virtio_init_in_iov_from_pdu,
+    .init_out_iov_from_pdu = virtio_init_out_iov_from_pdu,
     .push_and_notify = virtio_9p_push_and_notify,
 };
 
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.