|
@@ -729,9 +729,9 @@ static void ip_vs_trash_cleanup(struct net *net)
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
|
|
|
+ip_vs_copy_stats(struct ip_vs_kstats *dst, struct ip_vs_stats *src)
|
|
|
{
|
|
|
-#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->ustats.c - src->ustats0.c
|
|
|
+#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->kstats.c - src->kstats0.c
|
|
|
|
|
|
spin_lock_bh(&src->lock);
|
|
|
|
|
@@ -746,6 +746,21 @@ ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
|
|
|
spin_unlock_bh(&src->lock);
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+ip_vs_export_stats_user(struct ip_vs_stats_user *dst, struct ip_vs_kstats *src)
|
|
|
+{
|
|
|
+ dst->conns = (u32)src->conns;
|
|
|
+ dst->inpkts = (u32)src->inpkts;
|
|
|
+ dst->outpkts = (u32)src->outpkts;
|
|
|
+ dst->inbytes = src->inbytes;
|
|
|
+ dst->outbytes = src->outbytes;
|
|
|
+ dst->cps = (u32)src->cps;
|
|
|
+ dst->inpps = (u32)src->inpps;
|
|
|
+ dst->outpps = (u32)src->outpps;
|
|
|
+ dst->inbps = (u32)src->inbps;
|
|
|
+ dst->outbps = (u32)src->outbps;
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
ip_vs_zero_stats(struct ip_vs_stats *stats)
|
|
|
{
|
|
@@ -753,7 +768,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats)
|
|
|
|
|
|
/* get current counters as zero point, rates are zeroed */
|
|
|
|
|
|
-#define IP_VS_ZERO_STATS_COUNTER(c) stats->ustats0.c = stats->ustats.c
|
|
|
+#define IP_VS_ZERO_STATS_COUNTER(c) stats->kstats0.c = stats->kstats.c
|
|
|
|
|
|
IP_VS_ZERO_STATS_COUNTER(conns);
|
|
|
IP_VS_ZERO_STATS_COUNTER(inpkts);
|
|
@@ -2044,7 +2059,7 @@ static const struct file_operations ip_vs_info_fops = {
|
|
|
static int ip_vs_stats_show(struct seq_file *seq, void *v)
|
|
|
{
|
|
|
struct net *net = seq_file_single_net(seq);
|
|
|
- struct ip_vs_stats_user show;
|
|
|
+ struct ip_vs_kstats show;
|
|
|
|
|
|
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
|
|
|
seq_puts(seq,
|
|
@@ -2053,17 +2068,22 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
|
|
|
" Conns Packets Packets Bytes Bytes\n");
|
|
|
|
|
|
ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats);
|
|
|
- seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", show.conns,
|
|
|
- show.inpkts, show.outpkts,
|
|
|
- (unsigned long long) show.inbytes,
|
|
|
- (unsigned long long) show.outbytes);
|
|
|
-
|
|
|
-/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
|
|
|
+ seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n\n",
|
|
|
+ (unsigned long long)show.conns,
|
|
|
+ (unsigned long long)show.inpkts,
|
|
|
+ (unsigned long long)show.outpkts,
|
|
|
+ (unsigned long long)show.inbytes,
|
|
|
+ (unsigned long long)show.outbytes);
|
|
|
+
|
|
|
+/* 01234567 01234567 01234567 0123456701234567 0123456701234567*/
|
|
|
seq_puts(seq,
|
|
|
- " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
|
|
|
- seq_printf(seq, "%8X %8X %8X %16X %16X\n",
|
|
|
- show.cps, show.inpps, show.outpps,
|
|
|
- show.inbps, show.outbps);
|
|
|
+ " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
|
|
|
+ seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n",
|
|
|
+ (unsigned long long)show.cps,
|
|
|
+ (unsigned long long)show.inpps,
|
|
|
+ (unsigned long long)show.outpps,
|
|
|
+ (unsigned long long)show.inbps,
|
|
|
+ (unsigned long long)show.outbps);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2086,7 +2106,7 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
|
|
|
struct net *net = seq_file_single_net(seq);
|
|
|
struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats;
|
|
|
struct ip_vs_cpu_stats __percpu *cpustats = tot_stats->cpustats;
|
|
|
- struct ip_vs_stats_user rates;
|
|
|
+ struct ip_vs_kstats kstats;
|
|
|
int i;
|
|
|
|
|
|
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
|
|
@@ -2098,41 +2118,41 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
|
|
|
for_each_possible_cpu(i) {
|
|
|
struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i);
|
|
|
unsigned int start;
|
|
|
- __u64 inbytes, outbytes;
|
|
|
+ u64 conns, inpkts, outpkts, inbytes, outbytes;
|
|
|
|
|
|
do {
|
|
|
start = u64_stats_fetch_begin_irq(&u->syncp);
|
|
|
- inbytes = u->ustats.inbytes;
|
|
|
- outbytes = u->ustats.outbytes;
|
|
|
+ conns = u->cnt.conns;
|
|
|
+ inpkts = u->cnt.inpkts;
|
|
|
+ outpkts = u->cnt.outpkts;
|
|
|
+ inbytes = u->cnt.inbytes;
|
|
|
+ outbytes = u->cnt.outbytes;
|
|
|
} while (u64_stats_fetch_retry_irq(&u->syncp, start));
|
|
|
|
|
|
- seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
|
|
|
- i, u->ustats.conns, u->ustats.inpkts,
|
|
|
- u->ustats.outpkts, (__u64)inbytes,
|
|
|
- (__u64)outbytes);
|
|
|
+ seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
|
|
|
+ i, (u64)conns, (u64)inpkts,
|
|
|
+ (u64)outpkts, (u64)inbytes,
|
|
|
+ (u64)outbytes);
|
|
|
}
|
|
|
|
|
|
- spin_lock_bh(&tot_stats->lock);
|
|
|
-
|
|
|
- seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n",
|
|
|
- tot_stats->ustats.conns, tot_stats->ustats.inpkts,
|
|
|
- tot_stats->ustats.outpkts,
|
|
|
- (unsigned long long) tot_stats->ustats.inbytes,
|
|
|
- (unsigned long long) tot_stats->ustats.outbytes);
|
|
|
-
|
|
|
- ip_vs_read_estimator(&rates, tot_stats);
|
|
|
+ ip_vs_copy_stats(&kstats, tot_stats);
|
|
|
|
|
|
- spin_unlock_bh(&tot_stats->lock);
|
|
|
+ seq_printf(seq, " ~ %8LX %8LX %8LX %16LX %16LX\n\n",
|
|
|
+ (unsigned long long)kstats.conns,
|
|
|
+ (unsigned long long)kstats.inpkts,
|
|
|
+ (unsigned long long)kstats.outpkts,
|
|
|
+ (unsigned long long)kstats.inbytes,
|
|
|
+ (unsigned long long)kstats.outbytes);
|
|
|
|
|
|
-/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
|
|
|
+/* ... 01234567 01234567 01234567 0123456701234567 0123456701234567 */
|
|
|
seq_puts(seq,
|
|
|
- " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
|
|
|
- seq_printf(seq, " %8X %8X %8X %16X %16X\n",
|
|
|
- rates.cps,
|
|
|
- rates.inpps,
|
|
|
- rates.outpps,
|
|
|
- rates.inbps,
|
|
|
- rates.outbps);
|
|
|
+ " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
|
|
|
+ seq_printf(seq, " %8LX %8LX %8LX %16LX %16LX\n",
|
|
|
+ kstats.cps,
|
|
|
+ kstats.inpps,
|
|
|
+ kstats.outpps,
|
|
|
+ kstats.inbps,
|
|
|
+ kstats.outbps);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2400,6 +2420,7 @@ static void
|
|
|
ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
|
|
|
{
|
|
|
struct ip_vs_scheduler *sched;
|
|
|
+ struct ip_vs_kstats kstats;
|
|
|
|
|
|
sched = rcu_dereference_protected(src->scheduler, 1);
|
|
|
dst->protocol = src->protocol;
|
|
@@ -2411,7 +2432,8 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
|
|
|
dst->timeout = src->timeout / HZ;
|
|
|
dst->netmask = src->netmask;
|
|
|
dst->num_dests = src->num_dests;
|
|
|
- ip_vs_copy_stats(&dst->stats, &src->stats);
|
|
|
+ ip_vs_copy_stats(&kstats, &src->stats);
|
|
|
+ ip_vs_export_stats_user(&dst->stats, &kstats);
|
|
|
}
|
|
|
|
|
|
static inline int
|
|
@@ -2485,6 +2507,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
|
|
|
int count = 0;
|
|
|
struct ip_vs_dest *dest;
|
|
|
struct ip_vs_dest_entry entry;
|
|
|
+ struct ip_vs_kstats kstats;
|
|
|
|
|
|
memset(&entry, 0, sizeof(entry));
|
|
|
list_for_each_entry(dest, &svc->destinations, n_list) {
|
|
@@ -2506,7 +2529,8 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
|
|
|
entry.activeconns = atomic_read(&dest->activeconns);
|
|
|
entry.inactconns = atomic_read(&dest->inactconns);
|
|
|
entry.persistconns = atomic_read(&dest->persistconns);
|
|
|
- ip_vs_copy_stats(&entry.stats, &dest->stats);
|
|
|
+ ip_vs_copy_stats(&kstats, &dest->stats);
|
|
|
+ ip_vs_export_stats_user(&entry.stats, &kstats);
|
|
|
if (copy_to_user(&uptr->entrytable[count],
|
|
|
&entry, sizeof(entry))) {
|
|
|
ret = -EFAULT;
|
|
@@ -2798,25 +2822,51 @@ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
|
|
|
};
|
|
|
|
|
|
static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
|
|
|
- struct ip_vs_stats *stats)
|
|
|
+ struct ip_vs_kstats *kstats)
|
|
|
+{
|
|
|
+ struct nlattr *nl_stats = nla_nest_start(skb, container_type);
|
|
|
+
|
|
|
+ if (!nl_stats)
|
|
|
+ return -EMSGSIZE;
|
|
|
+
|
|
|
+ if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) ||
|
|
|
+ nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) ||
|
|
|
+ nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
|
|
|
+ nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) ||
|
|
|
+ nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) ||
|
|
|
+ nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) ||
|
|
|
+ nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, (u32)kstats->inbps) ||
|
|
|
+ nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, (u32)kstats->outbps))
|
|
|
+ goto nla_put_failure;
|
|
|
+ nla_nest_end(skb, nl_stats);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+nla_put_failure:
|
|
|
+ nla_nest_cancel(skb, nl_stats);
|
|
|
+ return -EMSGSIZE;
|
|
|
+}
|
|
|
+
|
|
|
+static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type,
|
|
|
+ struct ip_vs_kstats *kstats)
|
|
|
{
|
|
|
- struct ip_vs_stats_user ustats;
|
|
|
struct nlattr *nl_stats = nla_nest_start(skb, container_type);
|
|
|
+
|
|
|
if (!nl_stats)
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
- ip_vs_copy_stats(&ustats, stats);
|
|
|
-
|
|
|
- if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
|
|
|
- nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
|
|
|
- nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
|
|
|
- nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
|
|
|
- nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
|
|
|
- nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
|
|
|
- nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
|
|
|
- nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
|
|
|
- nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
|
|
|
- nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
|
|
|
+ if (nla_put_u64(skb, IPVS_STATS_ATTR_CONNS, kstats->conns) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_CPS, kstats->cps) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps) ||
|
|
|
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps))
|
|
|
goto nla_put_failure;
|
|
|
nla_nest_end(skb, nl_stats);
|
|
|
|
|
@@ -2835,6 +2885,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
|
|
|
struct nlattr *nl_service;
|
|
|
struct ip_vs_flags flags = { .flags = svc->flags,
|
|
|
.mask = ~0 };
|
|
|
+ struct ip_vs_kstats kstats;
|
|
|
|
|
|
nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
|
|
|
if (!nl_service)
|
|
@@ -2860,7 +2911,10 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
|
|
|
nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
|
|
|
nla_put_be32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
|
|
|
goto nla_put_failure;
|
|
|
- if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
|
|
|
+ ip_vs_copy_stats(&kstats, &svc->stats);
|
|
|
+ if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &kstats))
|
|
|
+ goto nla_put_failure;
|
|
|
+ if (ip_vs_genl_fill_stats64(skb, IPVS_SVC_ATTR_STATS64, &kstats))
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
nla_nest_end(skb, nl_service);
|
|
@@ -3032,6 +3086,7 @@ static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
|
|
|
static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
|
|
|
{
|
|
|
struct nlattr *nl_dest;
|
|
|
+ struct ip_vs_kstats kstats;
|
|
|
|
|
|
nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST);
|
|
|
if (!nl_dest)
|
|
@@ -3054,7 +3109,10 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
|
|
|
atomic_read(&dest->persistconns)) ||
|
|
|
nla_put_u16(skb, IPVS_DEST_ATTR_ADDR_FAMILY, dest->af))
|
|
|
goto nla_put_failure;
|
|
|
- if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
|
|
|
+ ip_vs_copy_stats(&kstats, &dest->stats);
|
|
|
+ if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &kstats))
|
|
|
+ goto nla_put_failure;
|
|
|
+ if (ip_vs_genl_fill_stats64(skb, IPVS_DEST_ATTR_STATS64, &kstats))
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
nla_nest_end(skb, nl_dest);
|