summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/slob.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/mm/slob.c b/mm/slob.c
index a08e4681fd0d..06a5ec70e728 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -28,9 +28,8 @@
* from kmalloc are prepended with a 4-byte header with the kmalloc size.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* alloc_pages() directly, allocating compound pages so the page order
- * does not have to be separately tracked, and also stores the exact
- * allocation size in page->private so that it can be used to accurately
- * provide ksize(). These objects are detected in kfree() because slob_page()
+ * does not have to be separately tracked.
+ * These objects are detected in kfree() because PageSlab()
* is false for them.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
@@ -455,11 +454,6 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
if (likely(order))
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
- if (ret) {
- struct page *page;
- page = virt_to_page(ret);
- page->private = size;
- }
trace_kmalloc_node(caller, ret,
size, PAGE_SIZE << order, gfp, node);
@@ -514,18 +508,20 @@ EXPORT_SYMBOL(kfree);
size_t ksize(const void *block)
{
struct page *sp;
+ int align;
+ unsigned int *m;
BUG_ON(!block);
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
sp = virt_to_page(block);
- if (PageSlab(sp)) {
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
- unsigned int *m = (unsigned int *)(block - align);
- return SLOB_UNITS(*m) * SLOB_UNIT;
- } else
- return sp->private;
+ if (unlikely(!PageSlab(sp)))
+ return PAGE_SIZE << compound_order(sp);
+
+ align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ m = (unsigned int *)(block - align);
+ return SLOB_UNITS(*m) * SLOB_UNIT;
}
EXPORT_SYMBOL(ksize);