|
@@ -3496,6 +3496,16 @@ static void tcp_xmit_recovery(struct sock *sk, int rexmit)
|
|
|
tcp_xmit_retransmit_queue(sk);
|
|
|
}
|
|
|
|
|
|
+/* Returns the number of packets newly acked or sacked by the current ACK */
|
|
|
+static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
|
|
|
+{
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ u32 delivered;
|
|
|
+
|
|
|
+ delivered = tp->delivered - prior_delivered;
|
|
|
+ return delivered;
|
|
|
+}
|
|
|
+
|
|
|
/* This routine deals with incoming acks, but not outgoing ones. */
|
|
|
static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
|
{
|
|
@@ -3619,7 +3629,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
|
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
|
|
|
sk_dst_confirm(sk);
|
|
|
|
|
|
- delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
|
|
|
+ delivered = tcp_newly_delivered(sk, delivered, flag);
|
|
|
lost = tp->lost - lost; /* freshly marked lost */
|
|
|
rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
|
|
|
tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
|
|
@@ -3629,9 +3639,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
|
|
|
|
no_queue:
|
|
|
/* If data was DSACKed, see if we can undo a cwnd reduction. */
|
|
|
- if (flag & FLAG_DSACKING_ACK)
|
|
|
+ if (flag & FLAG_DSACKING_ACK) {
|
|
|
tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
|
|
|
&rexmit);
|
|
|
+ tcp_newly_delivered(sk, delivered, flag);
|
|
|
+ }
|
|
|
/* If this ack opens up a zero window, clear backoff. It was
|
|
|
* being used to time the probes, and is probably far higher than
|
|
|
* it needs to be for normal retransmission.
|
|
@@ -3655,6 +3667,7 @@ old_ack:
|
|
|
&sack_state);
|
|
|
tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
|
|
|
&rexmit);
|
|
|
+ tcp_newly_delivered(sk, delivered, flag);
|
|
|
tcp_xmit_recovery(sk, rexmit);
|
|
|
}
|
|
|
|