[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH net-next v4 3/4] xen-netback: pass hash value to the frontend



My recent patch to include/xen/interface/io/netif.h defines a new extra
info type that can be used to pass hash values between backend and guest
frontend.

This patch adds code to xen-netback to pass hash values calculated for
guest receive-side packets (i.e. netback transmit side) to the frontend.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 drivers/net/xen-netback/interface.c | 13 ++++++-
 drivers/net/xen-netback/netback.c   | 78 +++++++++++++++++++++++++++++++------
 2 files changed, 77 insertions(+), 14 deletions(-)

diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
index 5a39cdb..1c7f49b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -158,8 +158,17 @@ static u16 xenvif_select_queue(struct net_device *dev, 
struct sk_buff *skb,
        struct xenvif *vif = netdev_priv(dev);
        unsigned int size = vif->hash.size;
 
-       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
-               return fallback(dev, skb) % dev->real_num_tx_queues;
+       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
+               u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
+
+               /* Make sure there is no hash information in the socket
+                * buffer otherwise it would be incorrectly forwarded
+                * to the frontend.
+                */
+               skb_clear_hash(skb);
+
+               return index;
+       }
 
        xenvif_set_skb_hash(vif, skb);
 
diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
index 6509d11..7c72510 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -168,6 +168,8 @@ static bool xenvif_rx_ring_slots_available(struct 
xenvif_queue *queue)
        needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
        if (skb_is_gso(skb))
                needed++;
+       if (skb->sw_hash)
+               needed++;
 
        do {
                prod = queue->rx.sring->req_prod;
@@ -285,6 +287,8 @@ struct gop_frag_copy {
        struct xenvif_rx_meta *meta;
        int head;
        int gso_type;
+       int protocol;
+       int hash_present;
 
        struct page *page;
 };
@@ -331,8 +335,15 @@ static void xenvif_setup_copy_gop(unsigned long gfn,
        npo->copy_off += *len;
        info->meta->size += *len;
 
+       if (!info->head)
+               return;
+
        /* Leave a gap for the GSO descriptor. */
-       if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
+       if ((1 << info->gso_type) & queue->vif->gso_mask)
+               queue->rx.req_cons++;
+
+       /* Leave a gap for the hash extra segment. */
+       if (info->hash_present)
                queue->rx.req_cons++;
 
        info->head = 0; /* There must be something in this buffer now */
@@ -367,6 +378,11 @@ static void xenvif_gop_frag_copy(struct xenvif_queue 
*queue, struct sk_buff *skb
                .npo = npo,
                .head = *head,
                .gso_type = XEN_NETIF_GSO_TYPE_NONE,
+               /* xenvif_set_skb_hash() will have either set a s/w
+                * hash or cleared the hash depending on
+                * whether the the frontend wants a hash for this skb.
+                */
+               .hash_present = skb->sw_hash,
        };
        unsigned long bytes;
 
@@ -555,6 +571,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue)
 
 static void xenvif_rx_action(struct xenvif_queue *queue)
 {
+       struct xenvif *vif = queue->vif;
        s8 status;
        u16 flags;
        struct xen_netif_rx_response *resp;
@@ -590,9 +607,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
        gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
+               struct xen_netif_extra_info *extra = NULL;
 
                if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_prefix_mask) {
+                   vif->gso_prefix_mask) {
                        resp = RING_GET_RESPONSE(&queue->rx,
                                                 queue->rx.rsp_prod_pvt++);
 
@@ -610,7 +628,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                queue->stats.tx_bytes += skb->len;
                queue->stats.tx_packets++;
 
-               status = xenvif_check_gop(queue->vif,
+               status = xenvif_check_gop(vif,
                                          XENVIF_RX_CB(skb)->meta_slots_used,
                                          &npo);
 
@@ -632,21 +650,57 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                                        flags);
 
                if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_mask) {
-                       struct xen_netif_extra_info *gso =
-                               (struct xen_netif_extra_info *)
+                   vif->gso_mask) {
+                       extra = (struct xen_netif_extra_info *)
                                RING_GET_RESPONSE(&queue->rx,
                                                  queue->rx.rsp_prod_pvt++);
 
                        resp->flags |= XEN_NETRXF_extra_info;
 
-                       gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
-                       gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
-                       gso->u.gso.pad = 0;
-                       gso->u.gso.features = 0;
+                       extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+                       extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+                       extra->u.gso.pad = 0;
+                       extra->u.gso.features = 0;
+
+                       extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+                       extra->flags = 0;
+               }
+
+               if (skb->sw_hash) {
+                       /* Since the skb got here via xenvif_select_queue()
+                        * we know that the hash has been re-calculated
+                        * according to a configuration set by the frontend
+                        * and therefore we know that it is legitimate to
+                        * pass it to the frontend.
+                        */
+                       if (resp->flags & XEN_NETRXF_extra_info)
+                               extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+                       else
+                               resp->flags |= XEN_NETRXF_extra_info;
+
+                       extra = (struct xen_netif_extra_info *)
+                               RING_GET_RESPONSE(&queue->rx,
+                                                 queue->rx.rsp_prod_pvt++);
 
-                       gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-                       gso->flags = 0;
+                       extra->u.hash.algorithm =
+                               XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
+
+                       if (skb->l4_hash)
+                               extra->u.hash.type =
+                                       skb->protocol == htons(ETH_P_IP) ?
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+                       else
+                               extra->u.hash.type =
+                                       skb->protocol == htons(ETH_P_IP) ?
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
+
+                       *(uint32_t *)extra->u.hash.value =
+                               skb_get_hash_raw(skb);
+
+                       extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+                       extra->flags = 0;
                }
 
                xenvif_add_frag_responses(queue, status,
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.