From b347aa7b57477f71c740e2bbc6d1078a7109ba23 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Fri, 3 Jun 2022 06:21:49 +0300 Subject: mm/tracing: add 'accounted' entry into output of allocation tracepoints Slab caches marked with SLAB_ACCOUNT force accounting for every allocation from this cache even if __GFP_ACCOUNT flag is not passed. Unfortunately, at the moment this flag is not visible in ftrace output, and this makes it difficult to analyze the accounted allocations. This patch adds boolean "accounted" entry into trace output, and set it to 'true' for calls used __GFP_ACCOUNT flag and for allocations from caches marked with SLAB_ACCOUNT. Set it to 'false' if accounting is disabled in configs. Signed-off-by: Vasily Averin Acked-by: Shakeel Butt Acked-by: Roman Gushchin Acked-by: Muchun Song Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Link: https://lore.kernel.org/r/c418ed25-65fe-f623-fbf8-1676528859ed@openvz.org Signed-off-by: Vlastimil Babka --- mm/slob.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm/slob.c') diff --git a/mm/slob.c b/mm/slob.c index f47811f09aca..56421fe461c4 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -507,7 +507,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) *m = size; ret = (void *)m + minalign; - trace_kmalloc_node(caller, ret, + trace_kmalloc_node(caller, ret, NULL, size, size + minalign, gfp, node); } else { unsigned int order = get_order(size); @@ -516,7 +516,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) gfp |= __GFP_COMP; ret = slob_new_pages(gfp, order, node); - trace_kmalloc_node(caller, ret, + trace_kmalloc_node(caller, ret, NULL, size, PAGE_SIZE << order, gfp, node); } @@ -616,12 +616,12 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) if (c->size < PAGE_SIZE) { b = slob_alloc(c->size, flags, c->align, node, 0); - trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, + trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size, SLOB_UNITS(c->size) * SLOB_UNIT, flags, node); } else { b = slob_new_pages(flags, get_order(c->size), node); - trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, + trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size, PAGE_SIZE << get_order(c->size), flags, node); } -- cgit v1.2.3 From 3041808b522031dccfbd898e520109569f039860 Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Wed, 15 Jun 2022 00:26:35 +0900 Subject: mm/slab_common: move generic bulk alloc/free functions to SLOB Now that only SLOB use __kmem_cache_{alloc,free}_bulk(), move them to SLOB. No functional change intended. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka --- mm/slab.h | 9 --------- mm/slab_common.c | 27 --------------------------- mm/slob.c | 25 +++++++++++++++++++++---- 3 files changed, 21 insertions(+), 40 deletions(-) (limited to 'mm/slob.c') diff --git a/mm/slab.h b/mm/slab.h index a8d5eb1c323f..4ec82bec15ec 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -380,15 +380,6 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); -/* - * Generic implementation of bulk operations - * These are useful for situations in which the allocator cannot - * perform optimizations. In that case segments of the object listed - * may be allocated or freed using these operations. - */ -void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); -int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); - static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) { return (s->flags & SLAB_RECLAIM_ACCOUNT) ? diff --git a/mm/slab_common.c b/mm/slab_common.c index 6c9aac5d8f4a..17996649cfe3 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -104,33 +104,6 @@ static inline int kmem_cache_sanity_check(const char *name, unsigned int size) } #endif -void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) -{ - size_t i; - - for (i = 0; i < nr; i++) { - if (s) - kmem_cache_free(s, p[i]); - else - kfree(p[i]); - } -} - -int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, - void **p) -{ - size_t i; - - for (i = 0; i < nr; i++) { - void *x = p[i] = kmem_cache_alloc(s, flags); - if (!x) { - __kmem_cache_free_bulk(s, i, p); - return 0; - } - } - return i; -} - /* * Figure out what the alignment of the objects will be given a set of * flags, a user specified alignment and the size of the objects. diff --git a/mm/slob.c b/mm/slob.c index 56421fe461c4..2bd4f476c340 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -692,16 +692,33 @@ void kmem_cache_free(struct kmem_cache *c, void *b) } EXPORT_SYMBOL(kmem_cache_free); -void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) +void kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) { - __kmem_cache_free_bulk(s, size, p); + size_t i; + + for (i = 0; i < nr; i++) { + if (s) + kmem_cache_free(s, p[i]); + else + kfree(p[i]); + } } EXPORT_SYMBOL(kmem_cache_free_bulk); -int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, +int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, void **p) { - return __kmem_cache_alloc_bulk(s, flags, size, p); + size_t i; + + for (i = 0; i < nr; i++) { + void *x = p[i] = kmem_cache_alloc(s, flags); + + if (!x) { + kmem_cache_free_bulk(s, i, p); + return 0; + } + } + return i; } EXPORT_SYMBOL(kmem_cache_alloc_bulk); -- cgit v1.2.3