From e6100a4590bfc0a91c8e882f73538c9c0d987a46 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 26 Mar 2024 11:37:39 +0100 Subject: mm, slab: move slab_memcg hooks to mm/memcontrol.c The hooks make multiple calls to functions in mm/memcontrol.c, including to th current_obj_cgroup() marked __always_inline. It might be faster to make a single call to the hook in mm/memcontrol.c instead. The hooks also don't use almost anything from mm/slub.c. obj_full_size() can move with the hooks and cache_vmstat_idx() to the internal mm/slab.h Link: https://lkml.kernel.org/r/20240326-slab-memcg-v3-2-d85d2563287a@suse.cz Signed-off-by: Vlastimil Babka Reviewed-by: Roman Gushchin Cc: Al Viro Cc: Chengming Zhou Cc: Christian Brauner Cc: Christoph Lameter Cc: Chuck Lever Cc: David Rientjes Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Jan Kara Cc: Jeff Layton Cc: Johannes Weiner Cc: Joonsoo Kim Cc: Josh Poimboeuf Cc: Kees Cook Cc: Linus Torvalds Cc: Michal Hocko Cc: Muchun Song Cc: Pekka Enberg Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 90 +++++++++++++++++++++++++++++++++++++++++++++++++ mm/slab.h | 13 +++++++ mm/slub.c | 103 ++------------------------------------------------------ 3 files changed, 105 insertions(+), 101 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a2aa2ff6cf77..29fa5b17a1ef 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3556,6 +3556,96 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) refill_obj_stock(objcg, size, true); } +static inline size_t obj_full_size(struct kmem_cache *s) +{ + /* + * For each accounted object there is an extra space which is used + * to store obj_cgroup membership. Charge it too. + */ + return s->size + sizeof(struct obj_cgroup *); +} + +bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, + gfp_t flags, size_t size, void **p) +{ + struct obj_cgroup *objcg; + struct slab *slab; + unsigned long off; + size_t i; + + /* + * The obtained objcg pointer is safe to use within the current scope, + * defined by current task or set_active_memcg() pair. + * obj_cgroup_get() is used to get a permanent reference. + */ + objcg = current_obj_cgroup(); + if (!objcg) + return true; + + /* + * slab_alloc_node() avoids the NULL check, so we might be called with a + * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill + * the whole requested size. + * return success as there's nothing to free back + */ + if (unlikely(*p == NULL)) + return true; + + flags &= gfp_allowed_mask; + + if (lru) { + int ret; + struct mem_cgroup *memcg; + + memcg = get_mem_cgroup_from_objcg(objcg); + ret = memcg_list_lru_alloc(memcg, lru, flags); + css_put(&memcg->css); + + if (ret) + return false; + } + + if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s))) + return false; + + for (i = 0; i < size; i++) { + slab = virt_to_slab(p[i]); + + if (!slab_obj_exts(slab) && + alloc_slab_obj_exts(slab, s, flags, false)) { + obj_cgroup_uncharge(objcg, obj_full_size(s)); + continue; + } + + off = obj_to_index(s, slab, p[i]); + obj_cgroup_get(objcg); + slab_obj_exts(slab)[off].objcg = objcg; + mod_objcg_state(objcg, slab_pgdat(slab), + cache_vmstat_idx(s), obj_full_size(s)); + } + + return true; +} + +void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, + void **p, int objects, struct slabobj_ext *obj_exts) +{ + for (int i = 0; i < objects; i++) { + struct obj_cgroup *objcg; + unsigned int off; + + off = obj_to_index(s, slab, p[i]); + objcg = obj_exts[off].objcg; + if (!objcg) + continue; + + obj_exts[off].objcg = NULL; + obj_cgroup_uncharge(objcg, obj_full_size(s)); + mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), + -obj_full_size(s)); + obj_cgroup_put(objcg); + } +} #endif /* CONFIG_MEMCG_KMEM */ /* diff --git a/mm/slab.h b/mm/slab.h index 1343bfa12cee..411251b9bdd1 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -558,6 +558,9 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK); } +int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, + gfp_t gfp, bool new_slab); + #else /* CONFIG_SLAB_OBJ_EXT */ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) @@ -567,7 +570,17 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) #endif /* CONFIG_SLAB_OBJ_EXT */ +static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) +{ + return (s->flags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; +} + #ifdef CONFIG_MEMCG_KMEM +bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, + gfp_t flags, size_t size, void **p); +void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, + void **p, int objects, struct slabobj_ext *obj_exts); void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, enum node_stat_item idx, int nr); #endif diff --git a/mm/slub.c b/mm/slub.c index f83efdbe15b7..3e33ff900d35 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1859,12 +1859,6 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, #endif #endif /* CONFIG_SLUB_DEBUG */ -static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) -{ - return (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; -} - #ifdef CONFIG_SLAB_OBJ_EXT #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG @@ -1923,8 +1917,8 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts, #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ __GFP_ACCOUNT | __GFP_NOFAIL) -static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, - gfp_t gfp, bool new_slab) +int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, + gfp_t gfp, bool new_slab) { unsigned int objects = objs_per_slab(s, slab); unsigned long new_exts; @@ -2083,78 +2077,6 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, #endif /* CONFIG_SLAB_OBJ_EXT */ #ifdef CONFIG_MEMCG_KMEM -static inline size_t obj_full_size(struct kmem_cache *s) -{ - /* - * For each accounted object there is an extra space which is used - * to store obj_cgroup membership. Charge it too. - */ - return s->size + sizeof(struct obj_cgroup *); -} - -static bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, - struct list_lru *lru, - gfp_t flags, size_t size, - void **p) -{ - struct obj_cgroup *objcg; - struct slab *slab; - unsigned long off; - size_t i; - - /* - * The obtained objcg pointer is safe to use within the current scope, - * defined by current task or set_active_memcg() pair. - * obj_cgroup_get() is used to get a permanent reference. - */ - objcg = current_obj_cgroup(); - if (!objcg) - return true; - - /* - * slab_alloc_node() avoids the NULL check, so we might be called with a - * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill - * the whole requested size. - * return success as there's nothing to free back - */ - if (unlikely(*p == NULL)) - return true; - - flags &= gfp_allowed_mask; - - if (lru) { - int ret; - struct mem_cgroup *memcg; - - memcg = get_mem_cgroup_from_objcg(objcg); - ret = memcg_list_lru_alloc(memcg, lru, flags); - css_put(&memcg->css); - - if (ret) - return false; - } - - if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s))) - return false; - - for (i = 0; i < size; i++) { - slab = virt_to_slab(p[i]); - - if (!slab_obj_exts(slab) && - alloc_slab_obj_exts(slab, s, flags, false)) { - obj_cgroup_uncharge(objcg, obj_full_size(s)); - continue; - } - - off = obj_to_index(s, slab, p[i]); - obj_cgroup_get(objcg); - slab_obj_exts(slab)[off].objcg = objcg; - mod_objcg_state(objcg, slab_pgdat(slab), - cache_vmstat_idx(s), obj_full_size(s)); - } - - return true; -} static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); @@ -2181,27 +2103,6 @@ bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, return false; } -static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, - void **p, int objects, - struct slabobj_ext *obj_exts) -{ - for (int i = 0; i < objects; i++) { - struct obj_cgroup *objcg; - unsigned int off; - - off = obj_to_index(s, slab, p[i]); - objcg = obj_exts[off].objcg; - if (!objcg) - continue; - - obj_exts[off].objcg = NULL; - obj_cgroup_uncharge(objcg, obj_full_size(s)); - mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), - -obj_full_size(s)); - obj_cgroup_put(objcg); - } -} - static __fastpath_inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) -- cgit v1.2.3