|
@@ -363,15 +363,17 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
|
|
*/
|
|
*/
|
|
static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
|
|
static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
|
|
{
|
|
{
|
|
|
|
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
|
|
+
|
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
|
skb->csum = 0;
|
|
skb->csum = 0;
|
|
|
|
|
|
TCP_SKB_CB(skb)->tcp_flags = flags;
|
|
TCP_SKB_CB(skb)->tcp_flags = flags;
|
|
TCP_SKB_CB(skb)->sacked = 0;
|
|
TCP_SKB_CB(skb)->sacked = 0;
|
|
|
|
|
|
- skb_shinfo(skb)->gso_segs = 1;
|
|
|
|
- skb_shinfo(skb)->gso_size = 0;
|
|
|
|
- skb_shinfo(skb)->gso_type = 0;
|
|
|
|
|
|
+ shinfo->gso_segs = 1;
|
|
|
|
+ shinfo->gso_size = 0;
|
|
|
|
+ shinfo->gso_type = 0;
|
|
|
|
|
|
TCP_SKB_CB(skb)->seq = seq;
|
|
TCP_SKB_CB(skb)->seq = seq;
|
|
if (flags & (TCPHDR_SYN | TCPHDR_FIN))
|
|
if (flags & (TCPHDR_SYN | TCPHDR_FIN))
|
|
@@ -986,6 +988,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
|
|
static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
|
|
static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
|
|
unsigned int mss_now)
|
|
unsigned int mss_now)
|
|
{
|
|
{
|
|
|
|
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
|
|
+
|
|
/* Make sure we own this skb before messing gso_size/gso_segs */
|
|
/* Make sure we own this skb before messing gso_size/gso_segs */
|
|
WARN_ON_ONCE(skb_cloned(skb));
|
|
WARN_ON_ONCE(skb_cloned(skb));
|
|
|
|
|
|
@@ -993,13 +997,13 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
|
|
/* Avoid the costly divide in the normal
|
|
/* Avoid the costly divide in the normal
|
|
* non-TSO case.
|
|
* non-TSO case.
|
|
*/
|
|
*/
|
|
- skb_shinfo(skb)->gso_segs = 1;
|
|
|
|
- skb_shinfo(skb)->gso_size = 0;
|
|
|
|
- skb_shinfo(skb)->gso_type = 0;
|
|
|
|
|
|
+ shinfo->gso_segs = 1;
|
|
|
|
+ shinfo->gso_size = 0;
|
|
|
|
+ shinfo->gso_type = 0;
|
|
} else {
|
|
} else {
|
|
- skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
|
|
|
|
- skb_shinfo(skb)->gso_size = mss_now;
|
|
|
|
- skb_shinfo(skb)->gso_type = sk->sk_gso_type;
|
|
|
|
|
|
+ shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
|
|
|
|
+ shinfo->gso_size = mss_now;
|
|
|
|
+ shinfo->gso_type = sk->sk_gso_type;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1146,6 +1150,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
|
|
*/
|
|
*/
|
|
static void __pskb_trim_head(struct sk_buff *skb, int len)
|
|
static void __pskb_trim_head(struct sk_buff *skb, int len)
|
|
{
|
|
{
|
|
|
|
+ struct skb_shared_info *shinfo;
|
|
int i, k, eat;
|
|
int i, k, eat;
|
|
|
|
|
|
eat = min_t(int, len, skb_headlen(skb));
|
|
eat = min_t(int, len, skb_headlen(skb));
|
|
@@ -1157,23 +1162,24 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
|
|
}
|
|
}
|
|
eat = len;
|
|
eat = len;
|
|
k = 0;
|
|
k = 0;
|
|
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
|
|
- int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
|
|
|
|
|
|
+ shinfo = skb_shinfo(skb);
|
|
|
|
+ for (i = 0; i < shinfo->nr_frags; i++) {
|
|
|
|
+ int size = skb_frag_size(&shinfo->frags[i]);
|
|
|
|
|
|
if (size <= eat) {
|
|
if (size <= eat) {
|
|
skb_frag_unref(skb, i);
|
|
skb_frag_unref(skb, i);
|
|
eat -= size;
|
|
eat -= size;
|
|
} else {
|
|
} else {
|
|
- skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
|
|
|
|
|
|
+ shinfo->frags[k] = shinfo->frags[i];
|
|
if (eat) {
|
|
if (eat) {
|
|
- skb_shinfo(skb)->frags[k].page_offset += eat;
|
|
|
|
- skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
|
|
|
|
|
|
+ shinfo->frags[k].page_offset += eat;
|
|
|
|
+ skb_frag_size_sub(&shinfo->frags[k], eat);
|
|
eat = 0;
|
|
eat = 0;
|
|
}
|
|
}
|
|
k++;
|
|
k++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- skb_shinfo(skb)->nr_frags = k;
|
|
|
|
|
|
+ shinfo->nr_frags = k;
|
|
|
|
|
|
skb_reset_tail_pointer(skb);
|
|
skb_reset_tail_pointer(skb);
|
|
skb->data_len -= len;
|
|
skb->data_len -= len;
|