summaryrefslogtreecommitdiff
path: root/net/socket.c
diff options
context:
space:
mode:
authorTonghao Zhang <xiangxia.m.yue@gmail.com>2017-12-14 16:51:58 +0300
committerDavid S. Miller <davem@davemloft.net>2017-12-19 17:58:14 +0300
commit648845ab7e200993dccd3948c719c858368c91e7 (patch)
treeca35bba9338cb8bca9cadfa1badd2e755277beda /net/socket.c
parent08fc7f8140730d2f8499c91b5abad44581b74635 (diff)
downloadlinux-648845ab7e200993dccd3948c719c858368c91e7.tar.xz
sock: Move the socket inuse to namespace.
In some case, we want to know how many sockets are in use in different _net_ namespaces. It's a key resource metric. This patch add a member in struct netns_core. This is a counter for socket-inuse in the _net_ namespace. The patch will add/sub counter in the sk_alloc, sk_clone_lock and __sk_free. This patch will not counter the socket created in kernel. It's not very useful for userspace to know how many kernel sockets we created. The main reasons for doing this are that: 1. When linux calls the 'do_exit' for process to exit, the functions 'exit_task_namespaces' and 'exit_task_work' will be called sequentially. 'exit_task_namespaces' may have destroyed the _net_ namespace, but 'sock_release' called in 'exit_task_work' may use the _net_ namespace if we counter the socket-inuse in sock_release. 2. socket and sock are in pair. More important, sock holds the _net_ namespace. We counter the socket-inuse in sock, for avoiding holding _net_ namespace again in socket. It's a easy way to maintain the code. Signed-off-by: Martin Zhang <zhangjunweimartin@didichuxing.com> Signed-off-by: Tonghao Zhang <zhangtonghao@didichuxing.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/socket.c')
-rw-r--r--net/socket.c21
1 files changed, 2 insertions, 19 deletions
diff --git a/net/socket.c b/net/socket.c
index 05f361faec45..bbd2e9ceb692 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -163,12 +163,6 @@ static DEFINE_SPINLOCK(net_family_lock);
static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
/*
- * Statistics counters of the socket lists
- */
-
-static DEFINE_PER_CPU(int, sockets_in_use);
-
-/*
* Support routines.
* Move socket addresses back and forth across the kernel/user
* divide and look after the messy bits.
@@ -578,7 +572,6 @@ struct socket *sock_alloc(void)
inode->i_gid = current_fsgid();
inode->i_op = &sockfs_inode_ops;
- this_cpu_add(sockets_in_use, 1);
return sock;
}
EXPORT_SYMBOL(sock_alloc);
@@ -605,7 +598,6 @@ void sock_release(struct socket *sock)
if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
pr_err("%s: fasync list not empty!\n", __func__);
- this_cpu_sub(sockets_in_use, 1);
if (!sock->file) {
iput(SOCK_INODE(sock));
return;
@@ -2622,17 +2614,8 @@ core_initcall(sock_init); /* early initcall */
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{
- int cpu;
- int counter = 0;
-
- for_each_possible_cpu(cpu)
- counter += per_cpu(sockets_in_use, cpu);
-
- /* It can be negative, by the way. 8) */
- if (counter < 0)
- counter = 0;
-
- seq_printf(seq, "sockets: used %d\n", counter);
+ seq_printf(seq, "sockets: used %d\n",
+ sock_inuse_get(seq->private));
}
#endif /* CONFIG_PROC_FS */