[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] kernel BUG at net/core/dev.c:1133!



On Sat, Jul 08, 2006 at 12:03:42PM +1000, herbert wrote:
> 
> I'll send a patch soon once I've tested it.

Here you go.  This should fix the problem.

[NET] linux: Import net-gso1.patch

Here is the original changelog:

   [NET] gso: Fix up GSO packets with broken checksums

   Certain subsystems in the stack (e.g., netfilter) can break the partial
   checksum on GSO packets.  Until they're fixed, this patch allows this to
   work by recomputing the partial checksums through the GSO mechanism.

   Once they've all been converted to update the partial checksum instead of
   clearing it, this workaround can be removed.

Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>

Cheers,
-- 
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <herbert@xxxxxxxxxxxxxxxxxxx>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
diff -r 51252bc644da -r 521480565732 linux-2.6-xen-sparse/include/linux/skbuff.h
--- a/linux-2.6-xen-sparse/include/linux/skbuff.h       Fri Jul 07 23:38:57 
2006 +1000
+++ b/linux-2.6-xen-sparse/include/linux/skbuff.h       Sat Jul 08 14:30:02 
2006 +1000
@@ -1412,5 +1412,10 @@ static inline void nf_reset(struct sk_bu
 static inline void nf_reset(struct sk_buff *skb) {}
 #endif /* CONFIG_NETFILTER */
 
+static inline int skb_is_gso(const struct sk_buff *skb)
+{
+       return skb_shinfo(skb)->gso_size;
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SKBUFF_H */
diff -r 51252bc644da -r 521480565732 linux-2.6-xen-sparse/net/core/dev.c
--- a/linux-2.6-xen-sparse/net/core/dev.c       Fri Jul 07 23:38:57 2006 +1000
+++ b/linux-2.6-xen-sparse/net/core/dev.c       Sat Jul 08 14:30:02 2006 +1000
@@ -1089,9 +1089,17 @@ int skb_checksum_help(struct sk_buff *sk
        unsigned int csum;
        int ret = 0, offset = skb->h.raw - skb->data;
 
-       if (inward) {
-               skb->ip_summed = CHECKSUM_NONE;
-               goto out;
+       if (inward)
+               goto out_set_summed;
+
+       if (unlikely(skb_shinfo(skb)->gso_size)) {
+               static int warned;
+
+               WARN_ON(!warned);
+               warned = 1;
+
+               /* Let GSO fix up the checksum. */
+               goto out_set_summed;
        }
 
        if (skb_cloned(skb)) {
@@ -1108,6 +1116,8 @@ int skb_checksum_help(struct sk_buff *sk
        BUG_ON(skb->csum + 2 > offset);
 
        *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
+
+out_set_summed:
        skb->ip_summed = CHECKSUM_NONE;
 out:   
        return ret;
@@ -1128,17 +1138,35 @@ struct sk_buff *skb_gso_segment(struct s
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
        int type = skb->protocol;
+       int err;
 
        BUG_ON(skb_shinfo(skb)->frag_list);
-       BUG_ON(skb->ip_summed != CHECKSUM_HW);
 
        skb->mac.raw = skb->data;
        skb->mac_len = skb->nh.raw - skb->data;
        __skb_pull(skb, skb->mac_len);
 
+       if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
+               static int warned;
+
+               WARN_ON(!warned);
+               warned = 1;
+
+               if (skb_header_cloned(skb) &&
+                   (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+                       return ERR_PTR(err);
+       }
+
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
                if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
+                       if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
+                               err = ptype->gso_send_check(skb);
+                               segs = ERR_PTR(err);
+                               if (err || skb_gso_ok(skb, features))
+                                       break;
+                               __skb_push(skb, skb->data - skb->nh.raw);
+                       }
                        segs = ptype->gso_segment(skb, features);
                        break;
                }
diff -r 51252bc644da -r 521480565732 patches/linux-2.6.16.13/net-gso2.patch
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/linux-2.6.16.13/net-gso2.patch    Sat Jul 08 14:30:02 2006 +1000
@@ -0,0 +1,473 @@
+diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
+index b5e39a1..29d9218 100644
+--- a/drivers/net/bnx2.c
++++ b/drivers/net/bnx2.c
+@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
+               skb = tx_buf->skb;
+ #ifdef BCM_TSO 
+               /* partial BD completions possible with TSO packets */
+-              if (skb_shinfo(skb)->gso_size) {
++              if (skb_is_gso(skb)) {
+                       u16 last_idx, last_ring_idx;
+ 
+                       last_idx = sw_cons +
+diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
+index 7b7d360..7d72e16 100644
+--- a/drivers/net/chelsio/sge.c
++++ b/drivers/net/chelsio/sge.c
+@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
+       struct cpl_tx_pkt *cpl;
+ 
+ #ifdef NETIF_F_TSO
+-      if (skb_shinfo(skb)->gso_size) {
++      if (skb_is_gso(skb)) {
+               int eth_type;
+               struct cpl_tx_pkt_lso *hdr;
+ 
+diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
+index 681d284..96ddc24 100644
+--- a/drivers/net/e1000/e1000_main.c
++++ b/drivers/net/e1000/e1000_main.c
+@@ -2526,7 +2526,7 @@ #ifdef NETIF_F_TSO
+       uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
+       int err;
+ 
+-      if (skb_shinfo(skb)->gso_size) {
++      if (skb_is_gso(skb)) {
+               if (skb_header_cloned(skb)) {
+                       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+                       if (err)
+@@ -2651,7 +2651,7 @@ #ifdef NETIF_F_TSO
+                * tso gets written back prematurely before the data is fully
+                * DMAd to the controller */
+               if (!skb->data_len && tx_ring->last_tx_tso &&
+-                              !skb_shinfo(skb)->gso_size) {
++                  !skb_is_gso(skb)) {
+                       tx_ring->last_tx_tso = 0;
+                       size -= 4;
+               }
+@@ -2934,8 +2934,7 @@ #endif
+ 
+ #ifdef NETIF_F_TSO
+       /* Controller Erratum workaround */
+-      if (!skb->data_len && tx_ring->last_tx_tso &&
+-              !skb_shinfo(skb)->gso_size)
++      if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
+               count++;
+ #endif
+ 
+diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
+index c35f16e..c6ca459 100644
+--- a/drivers/net/forcedeth.c
++++ b/drivers/net/forcedeth.c
+@@ -1105,7 +1105,7 @@ static int nv_start_xmit(struct sk_buff 
+       np->tx_skbuff[nr] = skb;
+ 
+ #ifdef NETIF_F_TSO
+-      if (skb_shinfo(skb)->gso_size)
++      if (skb_is_gso(skb))
+               tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << 
NV_TX2_TSO_SHIFT);
+       else
+ #endif
+diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
+index bdab369..7d187d0 100644
+--- a/drivers/net/ixgb/ixgb_main.c
++++ b/drivers/net/ixgb/ixgb_main.c
+@@ -1163,7 +1163,7 @@ #ifdef NETIF_F_TSO
+       uint16_t ipcse, tucse, mss;
+       int err;
+ 
+-      if(likely(skb_shinfo(skb)->gso_size)) {
++      if (likely(skb_is_gso(skb))) {
+               if (skb_header_cloned(skb)) {
+                       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+                       if (err)
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 9bcaa80..3843e0a 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -139,7 +139,7 @@ #ifndef LOOPBACK_MUST_CHECKSUM
+ #endif
+ 
+ #ifdef LOOPBACK_TSO
+-      if (skb_shinfo(skb)->gso_size) {
++      if (skb_is_gso(skb)) {
+               BUG_ON(skb->protocol != htons(ETH_P_IP));
+               BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
+ 
+diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
+index 2a55eb3..aa06a82 100644
+--- a/drivers/net/sky2.c
++++ b/drivers/net/sky2.c
+@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
+       count = sizeof(dma_addr_t) / sizeof(u32);
+       count += skb_shinfo(skb)->nr_frags * count;
+ 
+-      if (skb_shinfo(skb)->gso_size)
++      if (skb_is_gso(skb))
+               ++count;
+ 
+       if (skb->ip_summed == CHECKSUM_HW)
+diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
+index 30c48c9..3d62abc 100644
+--- a/drivers/net/typhoon.c
++++ b/drivers/net/typhoon.c
+@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, st
+        * If problems develop with TSO, check this first.
+        */
+       numDesc = skb_shinfo(skb)->nr_frags + 1;
+-      if(skb_tso_size(skb))
++      if (skb_is_gso(skb))
+               numDesc++;
+ 
+       /* When checking for free space in the ring, we need to also
+@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, st
+                               TYPHOON_TX_PF_VLAN_TAG_SHIFT);
+       }
+ 
+-      if(skb_tso_size(skb)) {
++      if (skb_is_gso(skb)) {
+               first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
+               first_txd->numDesc++;
+ 
+diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
+index d9cc997..a3ea8e0 100644
+--- a/drivers/s390/net/qeth_main.c
++++ b/drivers/s390/net/qeth_main.c
+@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
+       queue = card->qdio.out_qs
+               [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+ 
+-      if (skb_shinfo(skb)->gso_size)
++      if (skb_is_gso(skb))
+               large_send = card->options.large_send;
+ 
+       /*are we able to do TSO ? If so ,prepare and send it from here */
+@@ -4501,8 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
+               card->stats.tx_packets++;
+               card->stats.tx_bytes += skb->len;
+ #ifdef CONFIG_QETH_PERF_STATS
+-              if (skb_shinfo(skb)->gso_size &&
+-                 !(large_send == QETH_LARGE_SEND_NO)) {
++              if (skb_is_gso(skb) && !(large_send == QETH_LARGE_SEND_NO)) {
+                       card->perf_stats.large_send_bytes += skb->len;
+                       card->perf_stats.large_send_cnt++;
+               }
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 47b0965..9865736 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -541,6 +541,7 @@ struct packet_type {
+                                        struct net_device *);
+       struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
+                                               int features);
++      int                     (*gso_send_check)(struct sk_buff *skb);
+       void                    *af_packet_priv;
+       struct list_head        list;
+ };
+@@ -1001,14 +1002,15 @@ extern void linkwatch_run_queue(void);
+ 
+ static inline int skb_gso_ok(struct sk_buff *skb, int features)
+ {
+-      int feature = skb_shinfo(skb)->gso_size ?
+-                    skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
++      int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT;
+       return (features & feature) == feature;
+ }
+ 
+ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
+ {
+-      return !skb_gso_ok(skb, dev->features);
++      return skb_is_gso(skb) &&
++             (!skb_gso_ok(skb, dev->features) ||
++              unlikely(skb->ip_summed != CHECKSUM_HW));
+ }
+ 
+ #endif /* __KERNEL__ */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index b19d45d..adfe3a8 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1403,5 +1403,10 @@ #else /* CONFIG_NETFILTER */
+ static inline void nf_reset(struct sk_buff *skb) {}
+ #endif /* CONFIG_NETFILTER */
+ 
++static inline int skb_is_gso(const struct sk_buff *skb)
++{
++      return skb_shinfo(skb)->gso_size;
++}
++
+ #endif        /* __KERNEL__ */
+ #endif        /* _LINUX_SKBUFF_H */
+diff --git a/include/net/protocol.h b/include/net/protocol.h
+index 0d2dcdb..d516c58 100644
+--- a/include/net/protocol.h
++++ b/include/net/protocol.h
+@@ -37,6 +37,7 @@ #define MAX_INET_PROTOS      256             /* Must be 
+ struct net_protocol {
+       int                     (*handler)(struct sk_buff *skb);
+       void                    (*err_handler)(struct sk_buff *skb, u32 info);
++      int                     (*gso_send_check)(struct sk_buff *skb);
+       struct sk_buff         *(*gso_segment)(struct sk_buff *skb,
+                                              int features);
+       int                     no_policy;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 70e1d5f..22dbbac 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1063,6 +1063,7 @@ extern struct request_sock_ops tcp_reque
+ 
+ extern int tcp_v4_destroy_sock(struct sock *sk);
+ 
++extern int tcp_v4_gso_send_check(struct sk_buff *skb);
+ extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
+ 
+ #ifdef CONFIG_PROC_FS
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 00b1128..b34e76f 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -32,7 +32,7 @@ static inline int should_deliver(const s
+ int br_dev_queue_push_xmit(struct sk_buff *skb)
+ {
+       /* drop mtu oversized packets except tso */
+-      if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
++      if (skb->len > skb->dev->mtu && !skb_is_gso(skb))
+               kfree_skb(skb);
+       else {
+ #ifdef CONFIG_BRIDGE_NETFILTER
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index 588207f..b2dba74 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
+ {
+       if (skb->protocol == htons(ETH_P_IP) &&
+           skb->len > skb->dev->mtu &&
+-          !skb_shinfo(skb)->gso_size)
++          !skb_is_gso(skb))
+               return ip_fragment(skb, br_dev_queue_push_xmit);
+       else
+               return br_dev_queue_push_xmit(skb);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 32e1056..e814a89 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1083,9 +1083,17 @@ int skb_checksum_help(struct sk_buff *sk
+       unsigned int csum;
+       int ret = 0, offset = skb->h.raw - skb->data;
+ 
+-      if (inward) {
+-              skb->ip_summed = CHECKSUM_NONE;
+-              goto out;
++      if (inward)
++              goto out_set_summed;
++
++      if (unlikely(skb_shinfo(skb)->gso_size)) {
++              static int warned;
++
++              WARN_ON(!warned);
++              warned = 1;
++
++              /* Let GSO fix up the checksum. */
++              goto out_set_summed;
+       }
+ 
+       if (skb_cloned(skb)) {
+@@ -1102,6 +1110,8 @@ int skb_checksum_help(struct sk_buff *sk
+       BUG_ON(skb->csum + 2 > offset);
+ 
+       *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
++
++out_set_summed:
+       skb->ip_summed = CHECKSUM_NONE;
+ out:  
+       return ret;
+@@ -1122,17 +1132,35 @@ struct sk_buff *skb_gso_segment(struct s
+       struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+       struct packet_type *ptype;
+       int type = skb->protocol;
++      int err;
+ 
+       BUG_ON(skb_shinfo(skb)->frag_list);
+-      BUG_ON(skb->ip_summed != CHECKSUM_HW);
+ 
+       skb->mac.raw = skb->data;
+       skb->mac_len = skb->nh.raw - skb->data;
+       __skb_pull(skb, skb->mac_len);
+ 
++      if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
++              static int warned;
++
++              WARN_ON(!warned);
++              warned = 1;
++
++              if (skb_header_cloned(skb) &&
++                  (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
++                      return ERR_PTR(err);
++      }
++
+       rcu_read_lock();
+       list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
+               if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
++                      if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
++                              err = ptype->gso_send_check(skb);
++                              segs = ERR_PTR(err);
++                              if (err || skb_gso_ok(skb, features))
++                                      break;
++                              __skb_push(skb, skb->data - skb->nh.raw);
++                      }
+                       segs = ptype->gso_segment(skb, features);
+                       break;
+               }
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 5ba719e..0a8c559 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1085,6 +1085,40 @@ int inet_sk_rebuild_header(struct sock *
+ 
+ EXPORT_SYMBOL(inet_sk_rebuild_header);
+ 
++static int inet_gso_send_check(struct sk_buff *skb)
++{
++      struct iphdr *iph;
++      struct net_protocol *ops;
++      int proto;
++      int ihl;
++      int err = -EINVAL;
++
++      if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
++              goto out;
++
++      iph = skb->nh.iph;
++      ihl = iph->ihl * 4;
++      if (ihl < sizeof(*iph))
++              goto out;
++
++      if (unlikely(!pskb_may_pull(skb, ihl)))
++              goto out;
++
++      skb->h.raw = __skb_pull(skb, ihl);
++      iph = skb->nh.iph;
++      proto = iph->protocol & (MAX_INET_PROTOS - 1);
++      err = -EPROTONOSUPPORT;
++
++      rcu_read_lock();
++      ops = rcu_dereference(inet_protos[proto]);
++      if (likely(ops && ops->gso_send_check))
++              err = ops->gso_send_check(skb);
++      rcu_read_unlock();
++
++out:
++      return err;
++}
++
+ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+ {
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+@@ -1142,6 +1176,7 @@ #endif
+ static struct net_protocol tcp_protocol = {
+       .handler =      tcp_v4_rcv,
+       .err_handler =  tcp_v4_err,
++      .gso_send_check = tcp_v4_gso_send_check,
+       .gso_segment =  tcp_tso_segment,
+       .no_policy =    1,
+ };
+@@ -1188,6 +1223,7 @@ static int ipv4_proc_init(void);
+ static struct packet_type ip_packet_type = {
+       .type = __constant_htons(ETH_P_IP),
+       .func = ip_rcv,
++      .gso_send_check = inet_gso_send_check,
+       .gso_segment = inet_gso_segment,
+ };
+ 
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 19c3c73..2de887c 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -210,7 +210,7 @@ #if defined(CONFIG_NETFILTER) && defined
+               return dst_output(skb);
+       }
+ #endif
+-      if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
++      if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
+               return ip_fragment(skb, ip_finish_output2);
+       else
+               return ip_finish_output2(skb);
+@@ -1095,7 +1095,7 @@ ssize_t  ip_append_page(struct sock *sk, 
+       while (size > 0) {
+               int i;
+ 
+-              if (skb_shinfo(skb)->gso_size)
++              if (skb_is_gso(skb))
+                       len = size;
+               else {
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 233bdf2..b4240b4 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -495,6 +495,24 @@ void tcp_v4_send_check(struct sock *sk, 
+       }
+ }
+ 
++int tcp_v4_gso_send_check(struct sk_buff *skb)
++{
++      struct iphdr *iph;
++      struct tcphdr *th;
++
++      if (!pskb_may_pull(skb, sizeof(*th)))
++              return -EINVAL;
++
++      iph = skb->nh.iph;
++      th = skb->h.th;
++
++      th->check = 0;
++      th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
++      skb->csum = offsetof(struct tcphdr, check);
++      skb->ip_summed = CHECKSUM_HW;
++      return 0;
++}
++
+ /*
+  *    This routine will send an RST to the other tcp.
+  *
+diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
+index 737c1db..62ead52 100644
+--- a/net/ipv4/xfrm4_output.c
++++ b/net/ipv4/xfrm4_output.c
+@@ -189,7 +189,7 @@ #ifdef CONFIG_NETFILTER
+       }
+ #endif
+ 
+-      if (!skb_shinfo(skb)->gso_size)
++      if (!skb_is_gso(skb))
+               return xfrm4_output_finish2(skb);
+ 
+       skb->protocol = htons(ETH_P_IP);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index cf5d17e..33a5850 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s
+ 
+ int ip6_output(struct sk_buff *skb)
+ {
+-      if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
++      if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
+                               dst_allfrag(skb->dst))
+               return ip6_fragment(skb, ip6_output2);
+       else
+diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
+index 39bdeec..e9ea338 100644
+--- a/net/ipv6/xfrm6_output.c
++++ b/net/ipv6/xfrm6_output.c
+@@ -179,7 +179,7 @@ static int xfrm6_output_finish(struct sk
+ {
+       struct sk_buff *segs;
+ 
+-      if (!skb_shinfo(skb)->gso_size)
++      if (!skb_is_gso(skb))
+               return xfrm6_output_finish2(skb);
+ 
+       skb->protocol = htons(ETH_P_IP);

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.