Browse Source

Merge branch 'gso_encap_fixes'

Florian Westphal says:

====================
net: minor gso encapsulation fixes

The following series fixes a minor bug in the gso segmentation handlers
when encapsulation offload is used.

Theoretically this could cause kernel panic when the stack tries
to software-segment such a GRE offload packet, but it looks like there
is only one affected call site (tbf scheduler) and it handles NULL
return value.

I've included a followup patch to add IS_ERR_OR_NULL checks where needed.

While looking into this, I also found that size computation of the individual
segments is incorrect if skb->encapsulation is set.

Please see individual patches for delta vs. v1.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 10 years ago
parent
commit
d10845fc85

+ 10 - 3
net/core/skbuff.c

@@ -4070,15 +4070,22 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
 {
 {
 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
+	unsigned int thlen = 0;
 
 
-	if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
-		return tcp_hdrlen(skb) + shinfo->gso_size;
+	if (skb->encapsulation) {
+		thlen = skb_inner_transport_header(skb) -
+			skb_transport_header(skb);
 
 
+		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+			thlen += inner_tcp_hdrlen(skb);
+	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+		thlen = tcp_hdrlen(skb);
+	}
 	/* UFO sets gso_size to the size of the fragmentation
 	/* UFO sets gso_size to the size of the fragmentation
 	 * payload, i.e. the size of the L4 (UDP) header is already
 	 * payload, i.e. the size of the L4 (UDP) header is already
 	 * accounted for.
 	 * accounted for.
 	 */
 	 */
-	return shinfo->gso_size;
+	return thlen + shinfo->gso_size;
 }
 }
 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
 
 

+ 1 - 1
net/ipv4/af_inet.c

@@ -1246,7 +1246,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
 
 
 	encap = SKB_GSO_CB(skb)->encap_level > 0;
 	encap = SKB_GSO_CB(skb)->encap_level > 0;
 	if (encap)
 	if (encap)
-		features = skb->dev->hw_enc_features & netif_skb_features(skb);
+		features &= skb->dev->hw_enc_features;
 	SKB_GSO_CB(skb)->encap_level += ihl;
 	SKB_GSO_CB(skb)->encap_level += ihl;
 
 
 	skb_reset_transport_header(skb);
 	skb_reset_transport_header(skb);

+ 1 - 1
net/ipv4/gre_offload.c

@@ -68,7 +68,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
 	skb->mac_len = skb_inner_network_offset(skb);
 	skb->mac_len = skb_inner_network_offset(skb);
 
 
 	/* segment inner packet. */
 	/* segment inner packet. */
-	enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+	enc_features = skb->dev->hw_enc_features & features;
 	segs = skb_mac_gso_segment(skb, enc_features);
 	segs = skb_mac_gso_segment(skb, enc_features);
 	if (IS_ERR_OR_NULL(segs)) {
 	if (IS_ERR_OR_NULL(segs)) {
 		skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
 		skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);

+ 1 - 1
net/ipv4/ip_output.c

@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
 	 */
 	 */
 	features = netif_skb_features(skb);
 	features = netif_skb_features(skb);
 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-	if (IS_ERR(segs)) {
+	if (IS_ERR_OR_NULL(segs)) {
 		kfree_skb(skb);
 		kfree_skb(skb);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}

+ 1 - 1
net/ipv4/udp_offload.c

@@ -58,7 +58,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
 		skb->encap_hdr_csum = 1;
 		skb->encap_hdr_csum = 1;
 
 
 	/* segment inner packet. */
 	/* segment inner packet. */
-	enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+	enc_features = skb->dev->hw_enc_features & features;
 	segs = gso_inner_segment(skb, enc_features);
 	segs = gso_inner_segment(skb, enc_features);
 	if (IS_ERR_OR_NULL(segs)) {
 	if (IS_ERR_OR_NULL(segs)) {
 		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
 		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,

+ 1 - 1
net/ipv6/ip6_offload.c

@@ -90,7 +90,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
 
 	encap = SKB_GSO_CB(skb)->encap_level > 0;
 	encap = SKB_GSO_CB(skb)->encap_level > 0;
 	if (encap)
 	if (encap)
-		features = skb->dev->hw_enc_features & netif_skb_features(skb);
+		features &= skb->dev->hw_enc_features;
 	SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
 	SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
 
 
 	ipv6h = ipv6_hdr(skb);
 	ipv6h = ipv6_hdr(skb);

+ 1 - 1
net/mpls/mpls_gso.c

@@ -48,7 +48,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
 	__skb_push(skb, skb->mac_len);
 	__skb_push(skb, skb->mac_len);
 
 
 	/* Segment inner packet. */
 	/* Segment inner packet. */
-	mpls_features = skb->dev->mpls_features & netif_skb_features(skb);
+	mpls_features = skb->dev->mpls_features & features;
 	segs = skb_mac_gso_segment(skb, mpls_features);
 	segs = skb_mac_gso_segment(skb, mpls_features);
 
 
 
 

+ 1 - 1
net/netfilter/nfnetlink_queue_core.c

@@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to
 	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to
 	 * mean 'ignore this hook'.
 	 * mean 'ignore this hook'.
 	 */
 	 */
-	if (IS_ERR(segs))
+	if (IS_ERR_OR_NULL(segs))
 		goto out_err;
 		goto out_err;
 	queued = 0;
 	queued = 0;
 	err = 0;
 	err = 0;

+ 2 - 0
net/openvswitch/datapath.c

@@ -324,6 +324,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
 	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
 	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
 	if (IS_ERR(segs))
 	if (IS_ERR(segs))
 		return PTR_ERR(segs);
 		return PTR_ERR(segs);
+	if (segs == NULL)
+		return -EINVAL;
 
 
 	/* Queue all of the segments. */
 	/* Queue all of the segments. */
 	skb = segs;
 	skb = segs;

+ 2 - 0
net/xfrm/xfrm_output.c

@@ -157,6 +157,8 @@ static int xfrm_output_gso(struct sk_buff *skb)
 	kfree_skb(skb);
 	kfree_skb(skb);
 	if (IS_ERR(segs))
 	if (IS_ERR(segs))
 		return PTR_ERR(segs);
 		return PTR_ERR(segs);
+	if (segs == NULL)
+		return -EINVAL;
 
 
 	do {
 	do {
 		struct sk_buff *nskb = segs->next;
 		struct sk_buff *nskb = segs->next;