[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC V2] xen-netback: remove guest RX path dependence on MAX_SKB_FRAGS



This dependence is undesirable and logically incorrect.

It's undesirable because Xen network protocol should not depend on a
OS-specific constant.

It's incorrect because the ring slots required doesn't correspond to the
number of frags a SKB has (consider compound page frags).

This patch removes this dependence by correctly counting the ring slots
required.

Change in V2:
Don't play with rx_req_cons_peek, just store the number of slots in
vif->packet_slots.
Only calculate number of ring slots once and store it in SKB control
block for later use.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 drivers/net/xen-netback/common.h    |    7 ++++++
 drivers/net/xen-netback/interface.c |   33 ++++++++++++++++++++++++----
 drivers/net/xen-netback/netback.c   |   41 ++++++++++++-----------------------
 3 files changed, 50 insertions(+), 31 deletions(-)

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a4d77e..305b40c 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -86,6 +86,8 @@ struct xenvif {
         * once all queued skbs are put on the ring.
         */
        RING_IDX rx_req_cons_peek;
+#define XEN_NETBK_MIN_PACKET_SLOT 1 /* packet must occupy at least 1 slot */
+       unsigned int packet_slots;
 
        /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
        unsigned long   credit_bytes;
@@ -104,6 +106,11 @@ struct xenvif {
        wait_queue_head_t waiting_to_free;
 };
 
+struct skb_cb_overlay {
+       int meta_slots_used;
+       int slots_required;
+};
+
 static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
 {
        return to_xenbus_device(vif->dev->dev.parent);
diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
index 087d2db..b6393ce 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -96,20 +96,45 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
+       struct skb_cb_overlay *sco = (struct skb_cb_overlay *)skb->cb;
+       unsigned int ring_slots_required;
 
        BUG_ON(skb->dev != dev);
 
        if (vif->netbk == NULL)
                goto drop;
 
-       /* Drop the packet if the target domain has no receive buffers. */
-       if (!xenvif_rx_schedulable(vif))
+       ring_slots_required = xen_netbk_count_skb_slots(vif, skb);
+       sco->slots_required = ring_slots_required;
+
+       /* vif->packet_slots stores the ring slots required by
+        * the SKB. The will affect xen_netbk_rx_ring_full. After we
+        * determine what to do with this packet (either queue it or
+        * drop it), packet_slots should be reset to
+        * XEN_NETBK_MIN_PACKET_SLOT (1).
+        */
+       vif->packet_slots = ring_slots_required;
+
+       /* Drop the packet if the target domain has no receive
+        * buffers. This check must go *after* setting
+        * vif->packet_slots as xenvif_rx_schedulable calls
+        * xen_netbk_rx_ring_full.
+        */
+       if (!xenvif_rx_schedulable(vif)) {
+               vif->packet_slots = XEN_NETBK_MIN_PACKET_SLOT;
                goto drop;
+       }
 
-       /* Reserve ring slots for the worst-case number of fragments. */
-       vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
+       /* Reserve ring slots for the fragments. */
+       vif->rx_req_cons_peek += ring_slots_required;
        xenvif_get(vif);
 
+       /* Resetting packet_slots must go *before*
+        * xen_netbk_must_stop_queue as it calls
+        * xen_netbk_rx_ring_full.
+        */
+       vif->packet_slots = XEN_NETBK_MIN_PACKET_SLOT;
+
        if (vif->can_queue && xen_netbk_must_stop_queue(vif))
                netif_stop_queue(dev);
 
diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
index 64828de..2b9f1f3 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -289,21 +289,10 @@ static void xen_netbk_kick_thread(struct xen_netbk *netbk)
        wake_up(&netbk->wq);
 }
 
-static int max_required_rx_slots(struct xenvif *vif)
-{
-       int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
-
-       /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-       if (vif->can_sg || vif->gso || vif->gso_prefix)
-               max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
-
-       return max;
-}
-
 int xen_netbk_rx_ring_full(struct xenvif *vif)
 {
        RING_IDX peek   = vif->rx_req_cons_peek;
-       RING_IDX needed = max_required_rx_slots(vif);
+       RING_IDX needed = vif->packet_slots;
 
        return ((vif->rx.sring->req_prod - peek) < needed) ||
               ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < 
needed);
@@ -314,8 +303,7 @@ int xen_netbk_must_stop_queue(struct xenvif *vif)
        if (!xen_netbk_rx_ring_full(vif))
                return 0;
 
-       vif->rx.sring->req_event = vif->rx_req_cons_peek +
-               max_required_rx_slots(vif);
+       vif->rx.sring->req_event = vif->rx_req_cons_peek + vif->packet_slots;
        mb(); /* request notification /then/ check the queue */
 
        return xen_netbk_rx_ring_full(vif);
@@ -661,10 +649,6 @@ static void netbk_add_frag_responses(struct xenvif *vif, 
int status,
        }
 }
 
-struct skb_cb_overlay {
-       int meta_slots_used;
-};
-
 static void xen_netbk_rx_action(struct xen_netbk *netbk)
 {
        struct xenvif *vif = NULL, *tmp;
@@ -675,7 +659,6 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
        struct sk_buff *skb;
        LIST_HEAD(notify);
        int ret;
-       int nr_frags;
        int count;
        unsigned long offset;
        struct skb_cb_overlay *sco;
@@ -690,20 +673,23 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
        count = 0;
 
        while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
+               unsigned int ring_slots_required;
                vif = netdev_priv(skb->dev);
-               nr_frags = skb_shinfo(skb)->nr_frags;
 
                sco = (struct skb_cb_overlay *)skb->cb;
-               sco->meta_slots_used = netbk_gop_skb(skb, &npo);
 
-               count += nr_frags + 1;
+               ring_slots_required = sco->slots_required;
 
-               __skb_queue_tail(&rxq, skb);
-
-               /* Filled the batch queue? */
-               /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-               if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
+               if (count + ring_slots_required > XEN_NETIF_RX_RING_SIZE) {
+                       skb_queue_head(&netbk->rx_queue, skb);
                        break;
+               }
+
+               count += ring_slots_required;
+
+               sco->meta_slots_used = netbk_gop_skb(skb, &npo);
+
+               __skb_queue_tail(&rxq, skb);
        }
 
        BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
@@ -1872,6 +1858,7 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
        BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
 
        vif->rx_req_cons_peek = 0;
+       vif->packet_slots = XEN_NETBK_MIN_PACKET_SLOT;
 
        return 0;
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.