summaryrefslogtreecommitdiff
path: root/mm/slob.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-01 23:54:19 +0400
committerIngo Molnar <mingo@elte.hu>2009-04-02 02:49:02 +0400
commit8302294f43250dc337108c51882a6007f2b1e2e0 (patch)
tree85acd4440799c46a372df9cad170fa0c21e59096 /mm/slob.c
parent4fe70410d9a219dabb47328effccae7e7f2a6e26 (diff)
parent2e572895bf3203e881356a4039ab0fa428ed2639 (diff)
downloadlinux-8302294f43250dc337108c51882a6007f2b1e2e0.tar.xz
Merge branch 'tracing/core-v2' into tracing-for-linus
Conflicts: include/linux/slub_def.h lib/Kconfig.debug mm/slob.c mm/slub.c
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c35
1 files changed, 30 insertions, 5 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 7a3411524dac..4dd6516447f2 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
+#include <trace/kmemtrace.h>
#include <asm/atomic.h>
/*
@@ -474,6 +475,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ void *ret;
lockdep_trace_alloc(gfp);
@@ -482,12 +484,17 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node);
+
if (!m)
return NULL;
*m = size;
- return (void *)m + align;
+ ret = (void *)m + align;
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+ _RET_IP_, ret,
+ size, size + align, gfp, node);
} else {
- void *ret;
+ unsigned int order = get_order(size);
ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
@@ -495,8 +502,13 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
page = virt_to_page(ret);
page->private = size;
}
- return ret;
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+ _RET_IP_, ret,
+ size, PAGE_SIZE << order, gfp, node);
}
+
+ return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
@@ -514,6 +526,8 @@ void kfree(const void *block)
slob_free(m, *m + align);
} else
put_page(&sp->page);
+
+ kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
}
EXPORT_SYMBOL(kfree);
@@ -583,10 +597,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
- if (c->size < PAGE_SIZE)
+ if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node);
- else
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+ _RET_IP_, b, c->size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
b = slob_new_pages(flags, get_order(c->size), node);
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+ _RET_IP_, b, c->size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
if (c->ctor)
c->ctor(b);
@@ -622,6 +645,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
} else {
__kmem_cache_free(b, c->size);
}
+
+ kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
}
EXPORT_SYMBOL(kmem_cache_free);