|
@@ -1289,6 +1289,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct sk_buff *buff;
|
|
struct sk_buff *buff;
|
|
|
int nsize, old_factor;
|
|
int nsize, old_factor;
|
|
|
|
|
+ long limit;
|
|
|
int nlen;
|
|
int nlen;
|
|
|
u8 flags;
|
|
u8 flags;
|
|
|
|
|
|
|
@@ -1299,8 +1300,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
|
|
if (nsize < 0)
|
|
if (nsize < 0)
|
|
|
nsize = 0;
|
|
nsize = 0;
|
|
|
|
|
|
|
|
- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf &&
|
|
|
|
|
- tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) {
|
|
|
|
|
|
|
+ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
|
|
|
|
|
+ * We need some allowance to not penalize applications setting small
|
|
|
|
|
+ * SO_SNDBUF values.
|
|
|
|
|
+ * Also allow first and last skb in retransmit queue to be split.
|
|
|
|
|
+ */
|
|
|
|
|
+ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
|
|
|
|
|
+ if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
|
|
|
|
|
+ tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
|
|
|
|
|
+ skb != tcp_rtx_queue_head(sk) &&
|
|
|
|
|
+ skb != tcp_rtx_queue_tail(sk))) {
|
|
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
|
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
|
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
}
|
|
}
|