summaryrefslogtreecommitdiff
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-02-23 02:41:21 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 03:41:27 +0300
commitbc2791f857e1984b7548d2a2de2ffb1a913dee62 (patch)
tree1cf8a24fe4da02037d1b37de3e49eb7dec708834 /mm/slab_common.c
parent9eeadc8b6e0e31f9aea1f8886ef472f62c2b7f55 (diff)
downloadlinux-bc2791f857e1984b7548d2a2de2ffb1a913dee62.tar.xz
slab: link memcg kmem_caches on their associated memory cgroup
With kmem cgroup support enabled, kmem_caches can be created and destroyed frequently and a great number of near empty kmem_caches can accumulate if there are a lot of transient cgroups and the system is not under memory pressure. When memory reclaim starts under such conditions, it can lead to consecutive deactivation and destruction of many kmem_caches, easily hundreds of thousands on moderately large systems, exposing scalability issues in the current slab management code. This is one of the patches to address the issue. While a memcg kmem_cache is listed on its root cache's ->children list, there is no direct way to iterate all kmem_caches which are assocaited with a memory cgroup. The only way to iterate them is walking all caches while filtering out caches which don't match, which would be most of them. This makes memcg destruction operations O(N^2) where N is the total number of slab caches which can be huge. This combined with the synchronous RCU operations can tie up a CPU and affect the whole machine for many hours when memory reclaim triggers offlining and destruction of the stale memcgs. This patch adds mem_cgroup->kmem_caches list which goes through memcg_cache_params->kmem_caches_node of all kmem_caches which are associated with the memcg. All memcg specific iterations, including stat file access, are updated to use the new list instead. Link: http://lkml.kernel.org/r/20170117235411.9408-6-tj@kernel.org Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Jay Vana <jsvana@fb.com> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c36
1 files changed, 29 insertions, 7 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index c3885032dbce..c3bbeddaeaaf 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -154,6 +154,7 @@ static int init_memcg_params(struct kmem_cache *s,
s->memcg_params.root_cache = root_cache;
s->memcg_params.memcg = memcg;
INIT_LIST_HEAD(&s->memcg_params.children_node);
+ INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
return 0;
}
@@ -224,6 +225,7 @@ int memcg_update_all_caches(int num_memcgs)
static void unlink_memcg_cache(struct kmem_cache *s)
{
list_del(&s->memcg_params.children_node);
+ list_del(&s->memcg_params.kmem_caches_node);
}
#else
static inline int init_memcg_params(struct kmem_cache *s,
@@ -596,6 +598,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
list_add(&s->memcg_params.children_node,
&root_cache->memcg_params.children);
+ list_add(&s->memcg_params.kmem_caches_node, &memcg->kmem_caches);
/*
* Since readers won't lock (see cache_from_memcg_idx()), we need a
@@ -651,9 +654,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
get_online_mems();
mutex_lock(&slab_mutex);
- list_for_each_entry_safe(s, s2, &slab_caches, list) {
- if (is_root_cache(s) || s->memcg_params.memcg != memcg)
- continue;
+ list_for_each_entry_safe(s, s2, &memcg->kmem_caches,
+ memcg_params.kmem_caches_node) {
/*
* The cgroup is about to be freed and therefore has no charges
* left. Hence, all its caches must be empty by now.
@@ -1201,15 +1203,35 @@ static int slab_show(struct seq_file *m, void *p)
}
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
+void *memcg_slab_start(struct seq_file *m, loff_t *pos)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+
+ mutex_lock(&slab_mutex);
+ return seq_list_start(&memcg->kmem_caches, *pos);
+}
+
+void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+
+ return seq_list_next(p, &memcg->kmem_caches, pos);
+}
+
+void memcg_slab_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&slab_mutex);
+}
+
int memcg_slab_show(struct seq_file *m, void *p)
{
- struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
+ struct kmem_cache *s = list_entry(p, struct kmem_cache,
+ memcg_params.kmem_caches_node);
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- if (p == slab_caches.next)
+ if (p == memcg->kmem_caches.next)
print_slabinfo_header(m);
- if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
- cache_show(s, m);
+ cache_show(s, m);
return 0;
}
#endif