|
@@ -2812,6 +2812,21 @@ begin_fwd:
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* We allow to exceed memory limits for FIN packets to expedite
|
|
|
|
+ * connection tear down and (memory) recovery.
|
|
|
|
+ * Otherwise tcp_send_fin() could loop forever.
|
|
|
|
+ */
|
|
|
|
+static void sk_forced_wmem_schedule(struct sock *sk, int size)
|
|
|
|
+{
|
|
|
|
+ int amt, status;
|
|
|
|
+
|
|
|
|
+ if (size <= sk->sk_forward_alloc)
|
|
|
|
+ return;
|
|
|
|
+ amt = sk_mem_pages(size);
|
|
|
|
+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
|
|
|
|
+ sk_memory_allocated_add(sk, amt, &status);
|
|
|
|
+}
|
|
|
|
+
|
|
/* Send a fin. The caller locks the socket for us. This cannot be
|
|
/* Send a fin. The caller locks the socket for us. This cannot be
|
|
* allowed to fail queueing a FIN frame under any circumstances.
|
|
* allowed to fail queueing a FIN frame under any circumstances.
|
|
*/
|
|
*/
|
|
@@ -2834,11 +2849,14 @@ void tcp_send_fin(struct sock *sk)
|
|
} else {
|
|
} else {
|
|
/* Socket is locked, keep trying until memory is available. */
|
|
/* Socket is locked, keep trying until memory is available. */
|
|
for (;;) {
|
|
for (;;) {
|
|
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
|
|
|
|
|
|
+ skb = alloc_skb_fclone(MAX_TCP_HEADER,
|
|
|
|
+ sk->sk_allocation);
|
|
if (skb)
|
|
if (skb)
|
|
break;
|
|
break;
|
|
yield();
|
|
yield();
|
|
}
|
|
}
|
|
|
|
+ skb_reserve(skb, MAX_TCP_HEADER);
|
|
|
|
+ sk_forced_wmem_schedule(sk, skb->truesize);
|
|
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
|
|
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
|
|
tcp_init_nondata_skb(skb, tp->write_seq,
|
|
tcp_init_nondata_skb(skb, tp->write_seq,
|
|
TCPHDR_ACK | TCPHDR_FIN);
|
|
TCPHDR_ACK | TCPHDR_FIN);
|