|
@@ -4335,7 +4335,7 @@ static void tcp_ofo_queue(struct sock *sk)
|
|
|
|
|
|
p = rb_first(&tp->out_of_order_queue);
|
|
|
while (p) {
|
|
|
- skb = rb_entry(p, struct sk_buff, rbnode);
|
|
|
+ skb = rb_to_skb(p);
|
|
|
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
|
|
|
break;
|
|
|
|
|
@@ -4399,7 +4399,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
|
|
|
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
- struct rb_node **p, *q, *parent;
|
|
|
+ struct rb_node **p, *parent;
|
|
|
struct sk_buff *skb1;
|
|
|
u32 seq, end_seq;
|
|
|
bool fragstolen;
|
|
@@ -4458,7 +4458,7 @@ coalesce_done:
|
|
|
parent = NULL;
|
|
|
while (*p) {
|
|
|
parent = *p;
|
|
|
- skb1 = rb_entry(parent, struct sk_buff, rbnode);
|
|
|
+ skb1 = rb_to_skb(parent);
|
|
|
if (before(seq, TCP_SKB_CB(skb1)->seq)) {
|
|
|
p = &parent->rb_left;
|
|
|
continue;
|
|
@@ -4503,9 +4503,7 @@ insert:
|
|
|
|
|
|
merge_right:
|
|
|
/* Remove other segments covered by skb. */
|
|
|
- while ((q = rb_next(&skb->rbnode)) != NULL) {
|
|
|
- skb1 = rb_entry(q, struct sk_buff, rbnode);
|
|
|
-
|
|
|
+ while ((skb1 = skb_rb_next(skb)) != NULL) {
|
|
|
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
|
|
|
break;
|
|
|
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
|
@@ -4520,7 +4518,7 @@ merge_right:
|
|
|
tcp_drop(sk, skb1);
|
|
|
}
|
|
|
/* If there is no skb after us, we are the last_skb ! */
|
|
|
- if (!q)
|
|
|
+ if (!skb1)
|
|
|
tp->ooo_last_skb = skb;
|
|
|
|
|
|
add_sack:
|
|
@@ -4706,7 +4704,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
|
|
|
if (list)
|
|
|
return !skb_queue_is_last(list, skb) ? skb->next : NULL;
|
|
|
|
|
|
- return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
|
|
|
+ return skb_rb_next(skb);
|
|
|
}
|
|
|
|
|
|
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
|
|
@@ -4735,7 +4733,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
|
|
|
|
|
|
while (*p) {
|
|
|
parent = *p;
|
|
|
- skb1 = rb_entry(parent, struct sk_buff, rbnode);
|
|
|
+ skb1 = rb_to_skb(parent);
|
|
|
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
|
|
|
p = &parent->rb_left;
|
|
|
else
|
|
@@ -4854,26 +4852,19 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct sk_buff *skb, *head;
|
|
|
- struct rb_node *p;
|
|
|
u32 start, end;
|
|
|
|
|
|
- p = rb_first(&tp->out_of_order_queue);
|
|
|
- skb = rb_entry_safe(p, struct sk_buff, rbnode);
|
|
|
+ skb = skb_rb_first(&tp->out_of_order_queue);
|
|
|
new_range:
|
|
|
if (!skb) {
|
|
|
- p = rb_last(&tp->out_of_order_queue);
|
|
|
- /* Note: This is possible p is NULL here. We do not
|
|
|
- * use rb_entry_safe(), as ooo_last_skb is valid only
|
|
|
- * if rbtree is not empty.
|
|
|
- */
|
|
|
- tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
|
|
|
+ tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
|
|
|
return;
|
|
|
}
|
|
|
start = TCP_SKB_CB(skb)->seq;
|
|
|
end = TCP_SKB_CB(skb)->end_seq;
|
|
|
|
|
|
for (head = skb;;) {
|
|
|
- skb = tcp_skb_next(skb, NULL);
|
|
|
+ skb = skb_rb_next(skb);
|
|
|
|
|
|
/* Range is terminated when we see a gap or when
|
|
|
* we are at the queue end.
|
|
@@ -4916,14 +4907,14 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
|
|
|
do {
|
|
|
prev = rb_prev(node);
|
|
|
rb_erase(node, &tp->out_of_order_queue);
|
|
|
- tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
|
|
|
+ tcp_drop(sk, rb_to_skb(node));
|
|
|
sk_mem_reclaim(sk);
|
|
|
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
|
|
!tcp_under_memory_pressure(sk))
|
|
|
break;
|
|
|
node = prev;
|
|
|
} while (node);
|
|
|
- tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
|
|
|
+ tp->ooo_last_skb = rb_to_skb(prev);
|
|
|
|
|
|
/* Reset SACK state. A conforming SACK implementation will
|
|
|
* do the same at a timeout based retransmit. When a connection
|