[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH net-next v2] xen-netback: make copy batch size configurable



Commit eb1723a29b9a ("xen-netback: refactor guest rx") refactored Rx
handling and as a result decreased max grant copy ops from 4352 to 64.
Before this commit it would drain the rx_queue (while there are
enough slots in the ring to put packets) then copy to all pages and write
responses on the ring. With the refactor we do almost the same albeit
the last two steps are done every COPY_BATCH_SIZE (64) copies.

For big packets, the value of 64 means copying 3 packets best case scenario
(17 copies) and worst-case only 1 packet (34 copies, i.e. if all frags
plus head cross the 4k grant boundary) which could be the case when
packets go from local backend process.

Instead of making it static to 64 grant copies, lets allow the user to
select its value (while keeping the current as default) by introducing
the `copy_batch_size` module parameter. This allows users to select
the higher batches (i.e. for better throughput with big packets) as it
was prior to the above mentioned commit.

Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx>
---
Changes since v1:
 * move rx_copy.{idx,op} reallocation to separate helper
 Addressed Paul's comments:
 * rename xenvif_copy_state#size field to batch_size
 * argument `size` should be unsigned int
 * vfree is safe with NULL
 * realloc rx_copy.{idx,op} after copy op flush
---
 drivers/net/xen-netback/common.h    |  7 +++++--
 drivers/net/xen-netback/interface.c | 16 +++++++++++++++-
 drivers/net/xen-netback/netback.c   |  5 +++++
 drivers/net/xen-netback/rx.c        | 35 ++++++++++++++++++++++++++++++++++-
 4 files changed, 59 insertions(+), 4 deletions(-)

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a46a1e94505d..8e4eaf3a507d 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -129,8 +129,9 @@ struct xenvif_stats {
 #define COPY_BATCH_SIZE 64
 
 struct xenvif_copy_state {
-       struct gnttab_copy op[COPY_BATCH_SIZE];
-       RING_IDX idx[COPY_BATCH_SIZE];
+       struct gnttab_copy *op;
+       RING_IDX *idx;
+       unsigned int batch_size;
        unsigned int num;
        struct sk_buff_head *completed;
 };
@@ -358,6 +359,7 @@ irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
 
 void xenvif_rx_action(struct xenvif_queue *queue);
 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
+int xenvif_rx_copy_realloc(struct xenvif_queue *queue, unsigned int size);
 
 void xenvif_carrier_on(struct xenvif *vif);
 
@@ -381,6 +383,7 @@ extern unsigned int rx_drain_timeout_msecs;
 extern unsigned int rx_stall_timeout_msecs;
 extern unsigned int xenvif_max_queues;
 extern unsigned int xenvif_hash_cache_size;
+extern unsigned int xenvif_copy_batch_size;
 
 #ifdef CONFIG_DEBUG_FS
 extern struct dentry *xen_netback_dbg_root;
diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
index 78ebe494fef0..e12eb64ab0a9 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -518,6 +518,12 @@ int xenvif_init_queue(struct xenvif_queue *queue)
 {
        int err, i;
 
+       err = xenvif_rx_copy_realloc(queue, xenvif_copy_batch_size);
+       if (err) {
+               netdev_err(queue->vif->dev, "Could not alloc rx_copy\n");
+               goto err;
+       }
+
        queue->credit_bytes = queue->remaining_credit = ~0UL;
        queue->credit_usec  = 0UL;
        timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
@@ -544,7 +550,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
                                 queue->mmap_pages);
        if (err) {
                netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
-               return -ENOMEM;
+               goto err;
        }
 
        for (i = 0; i < MAX_PENDING_REQS; i++) {
@@ -556,6 +562,11 @@ int xenvif_init_queue(struct xenvif_queue *queue)
        }
 
        return 0;
+
+err:
+       vfree(queue->rx_copy.op);
+       vfree(queue->rx_copy.idx);
+       return -ENOMEM;
 }
 
 void xenvif_carrier_on(struct xenvif *vif)
@@ -788,6 +799,9 @@ void xenvif_disconnect_ctrl(struct xenvif *vif)
  */
 void xenvif_deinit_queue(struct xenvif_queue *queue)
 {
+       vfree(queue->rx_copy.op);
+       vfree(queue->rx_copy.idx);
+       queue->rx_copy.batch_size = 0;
        gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
 }
 
diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
index a27daa23c9dc..3a5e1d7ac2f4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,11 @@ unsigned int xenvif_hash_cache_size = 
XENVIF_HASH_CACHE_SIZE_DEFAULT;
 module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
 MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
 
+/* This is the maximum batch of grant copies on Rx */
+unsigned int xenvif_copy_batch_size = COPY_BATCH_SIZE;
+module_param_named(copy_batch_size, xenvif_copy_batch_size, uint, 0644);
+MODULE_PARM_DESC(copy_batch_size, "Maximum batch of grant copies on Rx");
+
 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
                               u8 status);
 
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index b1cf7c6f407a..07eebd75e668 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -130,6 +130,36 @@ static void xenvif_rx_queue_drop_expired(struct 
xenvif_queue *queue)
        }
 }
 
+int xenvif_rx_copy_realloc(struct xenvif_queue *queue, unsigned int size)
+{
+       void *op = NULL, *idx = NULL;
+
+       if (!size || queue->rx_copy.num)
+               return -EINVAL;
+
+       op = vzalloc(size * sizeof(struct gnttab_copy));
+       if (!op)
+               goto err;
+
+       idx = vzalloc(size * sizeof(RING_IDX));
+       if (!idx)
+               goto err;
+
+       vfree(queue->rx_copy.op);
+       vfree(queue->rx_copy.idx);
+
+       queue->rx_copy.op = op;
+       queue->rx_copy.idx = idx;
+       queue->rx_copy.batch_size = size;
+       netdev_dbg(queue->vif->dev, "Reallocated rx_copy for batch size %u\n",
+                  size);
+       return 0;
+
+err:
+       vfree(op);
+       return -ENOMEM;
+}
+
 static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
 {
        unsigned int i;
@@ -162,6 +192,9 @@ static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
                notify_remote_via_irq(queue->rx_irq);
 
        __skb_queue_purge(queue->rx_copy.completed);
+
+       if (unlikely(xenvif_copy_batch_size != queue->rx_copy.batch_size))
+               xenvif_rx_copy_realloc(queue, xenvif_copy_batch_size);
 }
 
 static void xenvif_rx_copy_add(struct xenvif_queue *queue,
@@ -172,7 +205,7 @@ static void xenvif_rx_copy_add(struct xenvif_queue *queue,
        struct page *page;
        struct xen_page_foreign *foreign;
 
-       if (queue->rx_copy.num == COPY_BATCH_SIZE)
+       if (queue->rx_copy.num == queue->rx_copy.batch_size)
                xenvif_rx_copy_flush(queue);
 
        op = &queue->rx_copy.op[queue->rx_copy.num];
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.