|
@@ -698,7 +698,8 @@ static void tcp_tsq_handler(struct sock *sk)
|
|
|
if ((1 << sk->sk_state) &
|
|
|
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
|
|
|
TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
|
|
|
- tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
|
|
|
+ tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
|
|
|
+ 0, GFP_ATOMIC);
|
|
|
}
|
|
|
/*
|
|
|
* One tasklet per cpu tries to send more skbs.
|
|
@@ -1904,7 +1905,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
|
|
|
|
|
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
|
|
|
set_bit(TSQ_THROTTLED, &tp->tsq_flags);
|
|
|
- break;
|
|
|
+ /* It is possible TX completion already happened
|
|
|
+ * before we set TSQ_THROTTLED, so we must
|
|
|
+ * test again the condition.
|
|
|
+ * We abuse smp_mb__after_clear_bit() because
|
|
|
+ * there is no smp_mb__after_set_bit() yet
|
|
|
+ */
|
|
|
+ smp_mb__after_clear_bit();
|
|
|
+ if (atomic_read(&sk->sk_wmem_alloc) > limit)
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
limit = mss_now;
|