Browse Source

proc: Reduce cache miss in xfrm_statistics_seq_show

This is to use the generic interfaces snmp_get_cpu_field{,64}_batch to
aggregate the data by going through all the items of each cpu sequentially.

Signed-off-by: Jia He <hejianet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Jia He 8 years ago
parent
commit
07613873f1
1 changed files with 8 additions and 2 deletions
  1. 8 2
      net/xfrm/xfrm_proc.c

+ 8 - 2
net/xfrm/xfrm_proc.c

@@ -50,12 +50,18 @@ static const struct snmp_mib xfrm_mib_list[] = {
 
 
 static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
 static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
 {
 {
+	unsigned long buff[LINUX_MIB_XFRMMAX];
 	struct net *net = seq->private;
 	struct net *net = seq->private;
 	int i;
 	int i;
+
+	memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
+
+	snmp_get_cpu_field_batch(buff, xfrm_mib_list,
+				 net->mib.xfrm_statistics);
 	for (i = 0; xfrm_mib_list[i].name; i++)
 	for (i = 0; xfrm_mib_list[i].name; i++)
 		seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
 		seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
-			   snmp_fold_field(net->mib.xfrm_statistics,
-					   xfrm_mib_list[i].entry));
+						buff[i]);
+
 	return 0;
 	return 0;
 }
 }