[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 4/4] xen/netback: fix grant copy across page boundary



From: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>

When KASAN (or SLUB_DEBUG) is turned on, there is a higher chance that
non-power-of-two allocations are not aligned to the next power of 2 of
the size. Therefore, handle grant copies that cross page boundaries.

Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
Acked-by: Paul Durrant <paul@xxxxxxx>
---
v2 --> v3:
- Added Acked-by: Paul Durrant <paul@xxxxxxx>
CC: "David S. Miller" <davem@xxxxxxxxxxxxx>
CC: netdev@xxxxxxxxxxxxxxx

v1 --> v2:
- Use sizeof_field(struct sk_buff, cb)) instead of magic number 48
- Slightly update commit message

RFC --> v1:
- Added BUILD_BUG_ON to the netback patch
- xenvif_idx_release() now located outside the loop

CC: Wei Liu <wei.liu@xxxxxxxxxx>
CC: Paul Durrant <paul@xxxxxxx>
---
 drivers/net/xen-netback/common.h  |  2 +-
 drivers/net/xen-netback/netback.c | 60 +++++++++++++++++++++++++------
 2 files changed, 50 insertions(+), 12 deletions(-)

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 05847eb91a1b..e57684415edd 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -155,7 +155,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
        grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
 
-       struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
+       struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS * 2];
        struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
        struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
        /* passed to gnttab_[un]map_refs with pages under (un)mapping */
diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
index 315dfc6ea297..41054de38a62 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -320,6 +320,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
 
 struct xenvif_tx_cb {
        u16 pending_idx;
+       u8 copies;
 };
 
 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
@@ -439,6 +440,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 {
        struct gnttab_map_grant_ref *gop_map = *gopp_map;
        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+       u8 copies = XENVIF_TX_CB(skb)->copies;
        /* This always points to the shinfo of the skb being checked, which
         * could be either the first or the one on the frag_list
         */
@@ -450,23 +452,26 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
        int nr_frags = shinfo->nr_frags;
        const bool sharedslot = nr_frags &&
                                frag_get_pending_idx(&shinfo->frags[0]) == 
pending_idx;
-       int i, err;
+       int i, err = 0;
 
-       /* Check status of header. */
-       err = (*gopp_copy)->status;
-       if (unlikely(err)) {
-               if (net_ratelimit())
-                       netdev_dbg(queue->vif->dev,
+       while (copies) {
+               /* Check status of header. */
+               int newerr = (*gopp_copy)->status;
+               if (unlikely(newerr)) {
+                       if (net_ratelimit())
+                               netdev_dbg(queue->vif->dev,
                                   "Grant copy of header failed! status: %d 
pending_idx: %u ref: %u\n",
                                   (*gopp_copy)->status,
                                   pending_idx,
                                   (*gopp_copy)->source.u.ref);
-               /* The first frag might still have this slot mapped */
-               if (!sharedslot)
-                       xenvif_idx_release(queue, pending_idx,
-                                          XEN_NETIF_RSP_ERROR);
+                       err = newerr;
+               }
+               (*gopp_copy)++;
+               copies--;
        }
-       (*gopp_copy)++;
+       /* The first frag might still have this slot mapped */
+       if (unlikely(err) && !sharedslot)
+               xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
 
 check_frags:
        for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -910,6 +915,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        xenvif_tx_err(queue, &txreq, extra_count, idx);
                        break;
                }
+               XENVIF_TX_CB(skb)->copies = 0;
 
                skb_shinfo(skb)->nr_frags = ret;
                if (data_len < txreq.size)
@@ -933,6 +939,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                                   "Can't allocate the 
frag_list skb.\n");
                                break;
                        }
+                       XENVIF_TX_CB(nskb)->copies = 0;
                }
 
                if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
@@ -990,6 +997,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue 
*queue,
 
                queue->tx_copy_ops[*copy_ops].len = data_len;
                queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
+               XENVIF_TX_CB(skb)->copies++;
+
+               if (offset_in_page(skb->data) + data_len > XEN_PAGE_SIZE) {
+                       unsigned int extra_len = offset_in_page(skb->data) +
+                                            data_len - XEN_PAGE_SIZE;
+
+                       queue->tx_copy_ops[*copy_ops].len -= extra_len;
+                       (*copy_ops)++;
+
+                       queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
+                       queue->tx_copy_ops[*copy_ops].source.domid =
+                               queue->vif->domid;
+                       queue->tx_copy_ops[*copy_ops].source.offset =
+                               txreq.offset + data_len - extra_len;
+
+                       queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
+                               virt_to_gfn(skb->data + data_len - extra_len);
+                       queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
+                       queue->tx_copy_ops[*copy_ops].dest.offset = 0;
+
+                       queue->tx_copy_ops[*copy_ops].len = extra_len;
+                       queue->tx_copy_ops[*copy_ops].flags = 
GNTCOPY_source_gref;
+
+                       XENVIF_TX_CB(skb)->copies++;
+               }
 
                (*copy_ops)++;
 
@@ -1688,5 +1720,11 @@ static void __exit netback_fini(void)
 }
 module_exit(netback_fini);
 
+static void __init __maybe_unused build_assertions(void)
+{
+       BUILD_BUG_ON(sizeof(struct xenvif_tx_cb) >
+                    sizeof_field(struct sk_buff, cb));
+}
+
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_ALIAS("xen-backend:vif");
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.