|
@@ -2091,6 +2091,15 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
|
|
|
limit <<= factor;
|
|
|
|
|
|
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
|
|
|
+ /* Always send the 1st or 2nd skb in write queue.
|
|
|
+ * No need to wait for TX completion to call us back,
|
|
|
+ * after softirq/tasklet schedule.
|
|
|
+ * This helps when TX completions are delayed too much.
|
|
|
+ */
|
|
|
+ if (skb == sk->sk_write_queue.next ||
|
|
|
+ skb->prev == sk->sk_write_queue.next)
|
|
|
+ return false;
|
|
|
+
|
|
|
set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags);
|
|
|
/* It is possible TX completion already happened
|
|
|
* before we set TSQ_THROTTLED, so we must
|