|
@@ -2776,7 +2776,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
|
|
|
|
|
|
max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
|
|
|
tcp_for_write_queue_from(skb, sk) {
|
|
|
- __u8 sacked = TCP_SKB_CB(skb)->sacked;
|
|
|
+ __u8 sacked;
|
|
|
int segs;
|
|
|
|
|
|
if (skb == tcp_send_head(sk))
|
|
@@ -2788,6 +2788,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
|
|
|
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
|
|
|
if (segs <= 0)
|
|
|
return;
|
|
|
+ sacked = TCP_SKB_CB(skb)->sacked;
|
|
|
/* In case tcp_shift_skb_data() have aggregated large skbs,
|
|
|
* we need to make sure not sending too bigs TSO packets
|
|
|
*/
|