|
@@ -1452,38 +1452,51 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
|
|
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
|
|
|
|
|
|
|
|
+u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
|
|
|
|
+{
|
|
|
|
+ return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
|
|
|
|
+
|
|
unsigned long snmp_fold_field(void __percpu *mib, int offt)
|
|
unsigned long snmp_fold_field(void __percpu *mib, int offt)
|
|
{
|
|
{
|
|
unsigned long res = 0;
|
|
unsigned long res = 0;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
for_each_possible_cpu(i)
|
|
for_each_possible_cpu(i)
|
|
- res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
|
|
|
|
|
|
+ res += snmp_get_cpu_field(mib, i, offt);
|
|
return res;
|
|
return res;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(snmp_fold_field);
|
|
EXPORT_SYMBOL_GPL(snmp_fold_field);
|
|
|
|
|
|
#if BITS_PER_LONG==32
|
|
#if BITS_PER_LONG==32
|
|
|
|
|
|
|
|
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
|
|
|
|
+ size_t syncp_offset)
|
|
|
|
+{
|
|
|
|
+ void *bhptr;
|
|
|
|
+ struct u64_stats_sync *syncp;
|
|
|
|
+ u64 v;
|
|
|
|
+ unsigned int start;
|
|
|
|
+
|
|
|
|
+ bhptr = per_cpu_ptr(mib, cpu);
|
|
|
|
+ syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
|
|
|
|
+ do {
|
|
|
|
+ start = u64_stats_fetch_begin_irq(syncp);
|
|
|
|
+ v = *(((u64 *)bhptr) + offt);
|
|
|
|
+ } while (u64_stats_fetch_retry_irq(syncp, start));
|
|
|
|
+
|
|
|
|
+ return v;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
|
|
|
|
+
|
|
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
|
|
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
|
|
{
|
|
{
|
|
u64 res = 0;
|
|
u64 res = 0;
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
for_each_possible_cpu(cpu) {
|
|
- void *bhptr;
|
|
|
|
- struct u64_stats_sync *syncp;
|
|
|
|
- u64 v;
|
|
|
|
- unsigned int start;
|
|
|
|
-
|
|
|
|
- bhptr = per_cpu_ptr(mib, cpu);
|
|
|
|
- syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
|
|
|
|
- do {
|
|
|
|
- start = u64_stats_fetch_begin_irq(syncp);
|
|
|
|
- v = *(((u64 *) bhptr) + offt);
|
|
|
|
- } while (u64_stats_fetch_retry_irq(syncp, start));
|
|
|
|
-
|
|
|
|
- res += v;
|
|
|
|
|
|
+ res += snmp_get_cpu_field(mib, cpu, offct, syncp_offset);
|
|
}
|
|
}
|
|
return res;
|
|
return res;
|
|
}
|
|
}
|