|
@@ -4942,6 +4942,7 @@ new_range:
|
|
|
* 2) not add too big latencies if thousands of packets sit there.
|
|
|
* (But if application shrinks SO_RCVBUF, we could still end up
|
|
|
* freeing whole queue here)
|
|
|
+ * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
|
|
|
*
|
|
|
* Return true if queue has shrunk.
|
|
|
*/
|
|
@@ -4949,20 +4950,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct rb_node *node, *prev;
|
|
|
+ int goal;
|
|
|
|
|
|
if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
|
|
|
return false;
|
|
|
|
|
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
|
|
|
+ goal = sk->sk_rcvbuf >> 3;
|
|
|
node = &tp->ooo_last_skb->rbnode;
|
|
|
do {
|
|
|
prev = rb_prev(node);
|
|
|
rb_erase(node, &tp->out_of_order_queue);
|
|
|
+ goal -= rb_to_skb(node)->truesize;
|
|
|
tcp_drop(sk, rb_to_skb(node));
|
|
|
- sk_mem_reclaim(sk);
|
|
|
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
|
|
- !tcp_under_memory_pressure(sk))
|
|
|
- break;
|
|
|
+ if (!prev || goal <= 0) {
|
|
|
+ sk_mem_reclaim(sk);
|
|
|
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
|
|
+ !tcp_under_memory_pressure(sk))
|
|
|
+ break;
|
|
|
+ goal = sk->sk_rcvbuf >> 3;
|
|
|
+ }
|
|
|
node = prev;
|
|
|
} while (node);
|
|
|
tp->ooo_last_skb = rb_to_skb(prev);
|