summaryrefslogtreecommitdiff
path: root/net/core/sock.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2021-11-15 20:11:49 +0300
committerDavid S. Miller <davem@davemloft.net>2021-11-16 16:20:45 +0300
commit4199bae10c49e24bc2c5d8c06a68820d56640000 (patch)
treeb2961c173f6371d1e6c6025c8dc8d7cee442dc7f /net/core/sock.c
parentd477eb9004845cb2dc92ad5eed79a437738a868a (diff)
downloadlinux-4199bae10c49e24bc2c5d8c06a68820d56640000.tar.xz
net: merge net->core.prot_inuse and net->core.sock_inuse
net->core.sock_inuse is a per cpu variable (int), while net->core.prot_inuse is another per cpu variable of 64 integers. per cpu allocator tend to place them in very different places. Grouping them together makes sense, since it makes updates potentially faster, if hitting the same cache line. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/sock.c')
-rw-r--r--net/core/sock.c12
1 files changed, 1 insertions, 11 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index a9bd22b883b9..d7fc8b5e2569 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3553,7 +3553,7 @@ int sock_inuse_get(struct net *net)
int cpu, res = 0;
for_each_possible_cpu(cpu)
- res += *per_cpu_ptr(net->core.sock_inuse, cpu);
+ res += per_cpu_ptr(net->core.prot_inuse, cpu)->all;
return res;
}
@@ -3565,22 +3565,12 @@ static int __net_init sock_inuse_init_net(struct net *net)
net->core.prot_inuse = alloc_percpu(struct prot_inuse);
if (net->core.prot_inuse == NULL)
return -ENOMEM;
-
- net->core.sock_inuse = alloc_percpu(int);
- if (net->core.sock_inuse == NULL)
- goto out;
-
return 0;
-
-out:
- free_percpu(net->core.prot_inuse);
- return -ENOMEM;
}
static void __net_exit sock_inuse_exit_net(struct net *net)
{
free_percpu(net->core.prot_inuse);
- free_percpu(net->core.sock_inuse);
}
static struct pernet_operations net_inuse_ops = {