|
@@ -4726,18 +4726,24 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
|
|
|
}
|
|
|
|
|
|
static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
|
|
|
- int items, int bytes, size_t syncpoff)
|
|
|
+ int bytes, size_t syncpoff)
|
|
|
{
|
|
|
- int i;
|
|
|
- int pad = bytes - sizeof(u64) * items;
|
|
|
+ int i, c;
|
|
|
+ u64 buff[IPSTATS_MIB_MAX];
|
|
|
+ int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
|
|
|
+
|
|
|
BUG_ON(pad < 0);
|
|
|
|
|
|
- /* Use put_unaligned() because stats may not be aligned for u64. */
|
|
|
- put_unaligned(items, &stats[0]);
|
|
|
- for (i = 1; i < items; i++)
|
|
|
- put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]);
|
|
|
+ memset(buff, 0, sizeof(buff));
|
|
|
+ buff[0] = IPSTATS_MIB_MAX;
|
|
|
|
|
|
- memset(&stats[items], 0, pad);
|
|
|
+ for_each_possible_cpu(c) {
|
|
|
+ for (i = 1; i < IPSTATS_MIB_MAX; i++)
|
|
|
+ buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
|
|
|
+ }
|
|
|
+
|
|
|
+ memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
|
|
|
+ memset(&stats[IPSTATS_MIB_MAX], 0, pad);
|
|
|
}
|
|
|
|
|
|
static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
|
|
@@ -4745,8 +4751,8 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
|
|
|
{
|
|
|
switch (attrtype) {
|
|
|
case IFLA_INET6_STATS:
|
|
|
- __snmp6_fill_stats64(stats, idev->stats.ipv6,
|
|
|
- IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
|
|
|
+ __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
|
|
|
+ offsetof(struct ipstats_mib, syncp));
|
|
|
break;
|
|
|
case IFLA_INET6_ICMP6STATS:
|
|
|
__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, ICMP6_MIB_MAX, bytes);
|