summaryrefslogtreecommitdiff
path: root/mm/slab.h
diff options
context:
space:
mode:
authorHyeonggon Yoo <42.hyeyoo@gmail.com>2022-08-17 13:18:19 +0300
committerVlastimil Babka <vbabka@suse.cz>2022-08-24 17:11:41 +0300
commitd6a71648dbc0ca5520cba16a8fdce8d37ae74218 (patch)
treeb15d1f620d72682e4c169024d798307f5792a62d /mm/slab.h
parentc4cab557521a73bd803e5c6f613b4e00bd3c4662 (diff)
downloadlinux-d6a71648dbc0ca5520cba16a8fdce8d37ae74218.tar.xz
mm/slab: kmalloc: pass requests larger than order-1 page to page allocator
There is not much benefit for serving large objects in kmalloc(). Let's pass large requests to page allocator like SLUB for better maintenance of common code. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 801a207a5cd7..9808d537f6ba 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -660,6 +660,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
print_tracking(cachep, x);
return cachep;
}
+
+void free_large_kmalloc(struct folio *folio, void *object);
+
#endif /* CONFIG_SLOB */
static inline size_t slab_ksize(const struct kmem_cache *s)