|
@@ -2816,8 +2816,10 @@ begin_fwd:
|
|
|
* connection tear down and (memory) recovery.
|
|
|
* Otherwise tcp_send_fin() could be tempted to either delay FIN
|
|
|
* or even be forced to close flow without any FIN.
|
|
|
+ * In general, we want to allow one skb per socket to avoid hangs
|
|
|
+ * with edge trigger epoll()
|
|
|
*/
|
|
|
-static void sk_forced_wmem_schedule(struct sock *sk, int size)
|
|
|
+void sk_forced_mem_schedule(struct sock *sk, int size)
|
|
|
{
|
|
|
int amt, status;
|
|
|
|
|
@@ -2864,7 +2866,7 @@ coalesce:
|
|
|
return;
|
|
|
}
|
|
|
skb_reserve(skb, MAX_TCP_HEADER);
|
|
|
- sk_forced_wmem_schedule(sk, skb->truesize);
|
|
|
+ sk_forced_mem_schedule(sk, skb->truesize);
|
|
|
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
|
|
|
tcp_init_nondata_skb(skb, tp->write_seq,
|
|
|
TCPHDR_ACK | TCPHDR_FIN);
|