|
|
@@ -2840,8 +2840,12 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
|
|
|
const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct sk_buff *stats;
|
|
|
struct tcp_info info;
|
|
|
+ u64 rate64;
|
|
|
+ u32 rate;
|
|
|
|
|
|
- stats = alloc_skb(5 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC);
|
|
|
+ stats = alloc_skb(7 * nla_total_size_64bit(sizeof(u64)) +
|
|
|
+ 3 * nla_total_size(sizeof(u32)) +
|
|
|
+ 2 * nla_total_size(sizeof(u8)), GFP_ATOMIC);
|
|
|
if (!stats)
|
|
|
return NULL;
|
|
|
|
|
|
@@ -2856,6 +2860,20 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
|
|
|
tp->data_segs_out, TCP_NLA_PAD);
|
|
|
nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
|
|
|
tp->total_retrans, TCP_NLA_PAD);
|
|
|
+
|
|
|
+ rate = READ_ONCE(sk->sk_pacing_rate);
|
|
|
+ rate64 = rate != ~0U ? rate : ~0ULL;
|
|
|
+ nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
|
|
|
+
|
|
|
+ rate64 = tcp_compute_delivery_rate(tp);
|
|
|
+ nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
|
|
|
+
|
|
|
+ nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd);
|
|
|
+ nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
|
|
|
+ nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
|
|
|
+
|
|
|
+ nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
|
|
|
+ nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
|
|
|
return stats;
|
|
|
}
|
|
|
|