summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@virtuozzo.com>2018-02-19 12:58:45 +0300
committerDavid S. Miller <davem@davemloft.net>2018-02-20 21:23:27 +0300
commit65b7b5b90fcd17b25ef43b0cd02bda47bf286675 (patch)
treeb850ae90d607134d8e5dced4c0d5533abfd24a71 /net/core
parent19efbd93e6fb05eab81856b4fc8d64211dd37088 (diff)
downloadlinux-65b7b5b90fcd17b25ef43b0cd02bda47bf286675.tar.xz
net: Make cleanup_list and net::cleanup_list of llist type
This simplifies cleanup queueing and makes cleanup lists to use llist primitives. Since llist has its own cmpxchg() ordering, cleanup_list_lock is not more need. Also, struct llist_node is smaller, than struct list_head, so we save some bytes in struct net with this patch. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/net_namespace.c20
1 files changed, 6 insertions, 14 deletions
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index e89a516620dd..abf8a46e94e2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -481,21 +481,18 @@ static void unhash_nsid(struct net *net, struct net *last)
spin_unlock_bh(&net->nsid_lock);
}
-static DEFINE_SPINLOCK(cleanup_list_lock);
-static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
+static LLIST_HEAD(cleanup_list);
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp, *last;
- struct list_head net_kill_list;
+ struct llist_node *net_kill_list;
LIST_HEAD(net_exit_list);
unsigned write;
/* Atomically snapshot the list of namespaces to cleanup */
- spin_lock_irq(&cleanup_list_lock);
- list_replace_init(&cleanup_list, &net_kill_list);
- spin_unlock_irq(&cleanup_list_lock);
+ net_kill_list = llist_del_all(&cleanup_list);
again:
write = READ_ONCE(nr_sync_pernet_ops);
if (write)
@@ -510,7 +507,7 @@ again:
/* Don't let anyone else find us. */
rtnl_lock();
- list_for_each_entry(net, &net_kill_list, cleanup_list)
+ llist_for_each_entry(net, net_kill_list, cleanup_list)
list_del_rcu(&net->list);
/* Cache last net. After we unlock rtnl, no one new net
* added to net_namespace_list can assign nsid pointer
@@ -525,7 +522,7 @@ again:
last = list_last_entry(&net_namespace_list, struct net, list);
rtnl_unlock();
- list_for_each_entry(net, &net_kill_list, cleanup_list) {
+ llist_for_each_entry(net, net_kill_list, cleanup_list) {
unhash_nsid(net, last);
list_add_tail(&net->exit_list, &net_exit_list);
}
@@ -585,12 +582,7 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net);
void __put_net(struct net *net)
{
/* Cleanup the network namespace in process context */
- unsigned long flags;
-
- spin_lock_irqsave(&cleanup_list_lock, flags);
- list_add(&net->cleanup_list, &cleanup_list);
- spin_unlock_irqrestore(&cleanup_list_lock, flags);
-
+ llist_add(&net->cleanup_list, &cleanup_list);
queue_work(netns_wq, &net_cleanup_work);
}
EXPORT_SYMBOL_GPL(__put_net);