summaryrefslogtreecommitdiff
path: root/include/net/ip.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2021-09-30 04:03:32 +0300
committerDavid S. Miller <davem@davemloft.net>2021-09-30 16:17:10 +0300
commit59f09ae8fac4a990070fc6bdc889d0e0118664ea (patch)
treeb2a2ae1c36876b2ef1aea66848d36b9b51cc41f2 /include/net/ip.h
parentdee3b2d0fa4b51a079f7d12159b42240f795bf64 (diff)
downloadlinux-59f09ae8fac4a990070fc6bdc889d0e0118664ea.tar.xz
net: snmp: inline snmp_get_cpu_field()
This trivial function is called ~90,000 times on 256 cpus hosts, when reading /proc/net/netstat. And this number keeps inflating. Inlining it saves many cycles. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/ip.h')
-rw-r--r--include/net/ip.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/net/ip.h b/include/net/ip.h
index 9192444f2964..cf229a531194 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -291,7 +291,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
+static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+ return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,