summaryrefslogtreecommitdiff
path: root/mm/slob.c
diff options
context:
space:
mode:
authorHyeonggon Yoo <42.hyeyoo@gmail.com>2022-08-17 13:18:24 +0300
committerVlastimil Babka <vbabka@suse.cz>2022-09-01 12:44:26 +0300
commit2c1d697fb8ba6d2d44f914d4268ae1ccdf025f1b (patch)
treea2e3b945b6a96e59c319083739023ba52e932fcb /mm/slob.c
parent11e9734bcb6a7361943f993eba4e97f5812120d8 (diff)
downloadlinux-2c1d697fb8ba6d2d44f914d4268ae1ccdf025f1b.tar.xz
mm/slab_common: drop kmem_alloc & avoid dereferencing fields when not using
Drop kmem_alloc event class, and define kmalloc and kmem_cache_alloc using TRACE_EVENT() macro. And then this patch does: - Do not pass pointer to struct kmem_cache to trace_kmalloc. gfp flag is enough to know if it's accounted or not. - Avoid dereferencing s->object_size and s->size when not using kmem_cache_alloc event. - Avoid dereferencing s->name in when not using kmem_cache_free event. - Adjust s->size to SLOB_UNITS(s->size) * SLOB_UNIT in SLOB Cc: Vasily Averin <vasily.averin@linux.dev> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 3208c56d8f82..771af84576bf 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -507,8 +507,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
*m = size;
ret = (void *)m + minalign;
- trace_kmalloc(caller, ret, NULL, size,
- size + minalign, gfp, node);
+ trace_kmalloc(caller, ret, size, size + minalign, gfp, node);
} else {
unsigned int order = get_order(size);
@@ -516,8 +515,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
- trace_kmalloc(caller, ret, NULL, size,
- PAGE_SIZE << order, gfp, node);
+ trace_kmalloc(caller, ret, size, PAGE_SIZE << order, gfp, node);
}
kmemleak_alloc(ret, size, 1, gfp);
@@ -594,6 +592,9 @@ int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
+
+ /* Actual size allocated */
+ c->size = SLOB_UNITS(c->size) * SLOB_UNIT;
c->flags = flags;
return 0;
}
@@ -608,14 +609,10 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node, 0);
- trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size,
- SLOB_UNITS(c->size) * SLOB_UNIT,
- flags, node);
+ trace_kmem_cache_alloc(_RET_IP_, b, c, flags, node);
} else {
b = slob_new_pages(flags, get_order(c->size), node);
- trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size,
- PAGE_SIZE << get_order(c->size),
- flags, node);
+ trace_kmem_cache_alloc(_RET_IP_, b, c, flags, node);
}
if (b && c->ctor) {
@@ -671,7 +668,7 @@ static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
kmemleak_free_recursive(b, c->flags);
- trace_kmem_cache_free(_RET_IP_, b, c->name);
+ trace_kmem_cache_free(_RET_IP_, b, c);
if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu));