proc: Reduce cache miss in xfrm_statistics_seq_show

This is to use the generic interfaces snmp_get_cpu_field{,64}_batch to
aggregate the data by going through all the items of each cpu sequentially.

Signed-off-by: Jia He <hejianet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jia He 2016-09-30 11:29:02 +08:00 committed by David S. Miller
parent 7d64a94be2
commit 07613873f1
1 changed files with 8 additions and 2 deletions

View File

@ -50,12 +50,18 @@ static const struct snmp_mib xfrm_mib_list[] = {
static int xfrm_statistics_seq_show(struct seq_file *seq, void *v) static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
{ {
unsigned long buff[LINUX_MIB_XFRMMAX];
struct net *net = seq->private; struct net *net = seq->private;
int i; int i;
memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
snmp_get_cpu_field_batch(buff, xfrm_mib_list,
net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++) for (i = 0; xfrm_mib_list[i].name; i++)
seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
snmp_fold_field(net->mib.xfrm_statistics, buff[i]);
xfrm_mib_list[i].entry));
return 0; return 0;
} }