|
@@ -3097,11 +3097,31 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
|
|
|
sg = !!(features & NETIF_F_SG);
|
|
|
csum = !!can_checksum_protocol(features, proto);
|
|
|
|
|
|
- /* GSO partial only requires that we trim off any excess that
|
|
|
- * doesn't fit into an MSS sized block, so take care of that
|
|
|
- * now.
|
|
|
- */
|
|
|
- if (sg && csum && (features & NETIF_F_GSO_PARTIAL)) {
|
|
|
+ if (sg && csum && (mss != GSO_BY_FRAGS)) {
|
|
|
+ if (!(features & NETIF_F_GSO_PARTIAL)) {
|
|
|
+ struct sk_buff *iter;
|
|
|
+
|
|
|
+ if (!list_skb ||
|
|
|
+ !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
|
|
|
+ goto normal;
|
|
|
+
|
|
|
+ /* Split the buffer at the frag_list pointer.
|
|
|
+ * This is based on the assumption that all
|
|
|
+ * buffers in the chain excluding the last
|
|
|
+ * containing the same amount of data.
|
|
|
+ */
|
|
|
+ skb_walk_frags(head_skb, iter) {
|
|
|
+ if (skb_headlen(iter))
|
|
|
+ goto normal;
|
|
|
+
|
|
|
+ len -= iter->len;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* GSO partial only requires that we trim off any excess that
|
|
|
+ * doesn't fit into an MSS sized block, so take care of that
|
|
|
+ * now.
|
|
|
+ */
|
|
|
partial_segs = len / mss;
|
|
|
if (partial_segs > 1)
|
|
|
mss *= partial_segs;
|
|
@@ -3109,6 +3129,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
|
|
|
partial_segs = 0;
|
|
|
}
|
|
|
|
|
|
+normal:
|
|
|
headroom = skb_headroom(head_skb);
|
|
|
pos = skb_headlen(head_skb);
|
|
|
|
|
@@ -3300,21 +3321,29 @@ perform_csum_check:
|
|
|
*/
|
|
|
segs->prev = tail;
|
|
|
|
|
|
- /* Update GSO info on first skb in partial sequence. */
|
|
|
if (partial_segs) {
|
|
|
+ struct sk_buff *iter;
|
|
|
int type = skb_shinfo(head_skb)->gso_type;
|
|
|
+ unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
|
|
|
|
|
|
/* Update type to add partial and then remove dodgy if set */
|
|
|
- type |= SKB_GSO_PARTIAL;
|
|
|
+ type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
|
|
|
type &= ~SKB_GSO_DODGY;
|
|
|
|
|
|
/* Update GSO info and prepare to start updating headers on
|
|
|
* our way back down the stack of protocols.
|
|
|
*/
|
|
|
- skb_shinfo(segs)->gso_size = skb_shinfo(head_skb)->gso_size;
|
|
|
- skb_shinfo(segs)->gso_segs = partial_segs;
|
|
|
- skb_shinfo(segs)->gso_type = type;
|
|
|
- SKB_GSO_CB(segs)->data_offset = skb_headroom(segs) + doffset;
|
|
|
+ for (iter = segs; iter; iter = iter->next) {
|
|
|
+ skb_shinfo(iter)->gso_size = gso_size;
|
|
|
+ skb_shinfo(iter)->gso_segs = partial_segs;
|
|
|
+ skb_shinfo(iter)->gso_type = type;
|
|
|
+ SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (tail->len - doffset <= gso_size)
|
|
|
+ skb_shinfo(tail)->gso_size = 0;
|
|
|
+ else if (tail != segs)
|
|
|
+ skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
|
|
|
}
|
|
|
|
|
|
/* Following permits correct backpressure, for protocols
|