[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 07/10] net: only allow paged fragments with the same destructor to be coalesced.



Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: "David S. Miller" <davem@xxxxxxxxxxxxx>
Cc: Alexey Kuznetsov <kuznet@xxxxxxxxxxxxx>
Cc: "Pekka Savola (ipv6)" <pekkas@xxxxxxxxxx>
Cc: James Morris <jmorris@xxxxxxxxx>
Cc: Hideaki YOSHIFUJI <yoshfuji@xxxxxxxxxxxxxx>
Cc: Patrick McHardy <kaber@xxxxxxxxx>
Cc: Eric Dumazet <eric.dumazet@xxxxxxxxx>
Cc: "MichaÅ MirosÅaw" <mirq-linux@xxxxxxxxxxxx>
Cc: netdev@xxxxxxxxxxxxxxx
---
 include/linux/skbuff.h |    7 +++++--
 net/core/skbuff.c      |    1 +
 net/ipv4/ip_output.c   |    2 +-
 net/ipv4/tcp.c         |    4 ++--
 4 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6ac283e..8593ac2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2014,13 +2014,16 @@ static inline int skb_add_data(struct sk_buff *skb,
 }
 
 static inline int skb_can_coalesce(struct sk_buff *skb, int i,
-                                  const struct page *page, int off)
+                                  const struct page *page,
+                                  const struct skb_frag_destructor *destroy,
+                                  int off)
 {
        if (i) {
                const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i 
- 1];
 
                return page == skb_frag_page(frag) &&
-                      off == frag->page_offset + skb_frag_size(frag);
+                      off == frag->page_offset + skb_frag_size(frag) &&
+                      frag->page.destructor == destroy;
        }
        return 0;
 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9ec88ce..e63a4a6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2323,6 +2323,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 
int shiftlen)
         */
        if (!to ||
            !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
+                             fragfrom->page.destructor,
                              fragfrom->page_offset)) {
                merge = -1;
        } else {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ff302bd..9e4eca6 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1243,7 +1243,7 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 
*fl4, struct page *page,
                i = skb_shinfo(skb)->nr_frags;
                if (len > size)
                        len = size;
-               if (skb_can_coalesce(skb, i, page, offset)) {
+               if (skb_can_coalesce(skb, i, page, NULL, offset)) {
                        skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
                } else if (i < MAX_SKB_FRAGS) {
                        get_page(page);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cfd7edd..b1612e9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -804,7 +804,7 @@ new_segment:
                        copy = size;
 
                i = skb_shinfo(skb)->nr_frags;
-               can_coalesce = skb_can_coalesce(skb, i, page, offset);
+               can_coalesce = skb_can_coalesce(skb, i, page, NULL, offset);
                if (!can_coalesce && i >= MAX_SKB_FRAGS) {
                        tcp_mark_push(tp, skb);
                        goto new_segment;
@@ -1013,7 +1013,7 @@ new_segment:
 
                                off = sk->sk_sndmsg_off;
 
-                               if (skb_can_coalesce(skb, i, page, off) &&
+                               if (skb_can_coalesce(skb, i, page, NULL, off) &&
                                    off != PAGE_SIZE) {
                                        /* We can extend the last page
                                         * fragment. */
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.