summaryrefslogtreecommitdiff
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 23:14:19 +0400
committerPekka Enberg <penberg@kernel.org>2013-02-01 14:32:07 +0400
commit95a05b428cc675694321c8f762591984f3fd2b1e (patch)
tree3a74205955201dd5e1abb0a85104d95cafa49df6 /include/linux/slub_def.h
parent6a67368c36e2c0c2578ba62f6264ab739af08cce (diff)
downloadlinux-95a05b428cc675694321c8f762591984f3fd2b1e.tar.xz
slab: Common constants for kmalloc boundaries
Standardize the constants that describe the smallest and largest object kept in the kmalloc arrays for SLAB and SLUB. Differentiate between the maximum size for which a slab cache is used (KMALLOC_MAX_CACHE_SIZE) and the maximum allocatable size (KMALLOC_MAX_SIZE, KMALLOC_MAX_ORDER). Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h19
1 files changed, 3 insertions, 16 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 99c3e05ff1f0..032028ef9a34 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -115,19 +115,6 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-/*
- * Maximum kmalloc object size handled by SLUB. Larger object allocations
- * are passed through to the page allocator. The page allocator "fastpath"
- * is relatively slow so we need this value sufficiently high so that
- * performance critical objects are allocated through the SLUB fastpath.
- *
- * This should be dropped to PAGE_SIZE / 2 once the page allocator
- * "fastpath" becomes competitive with the slab allocator fastpaths.
- */
-#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
-
-#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
-
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
@@ -139,7 +126,7 @@ struct kmem_cache {
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
-extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
+extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
/*
* Find the slab cache for a given combination of allocation flags and size.
@@ -211,7 +198,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
- if (size > SLUB_MAX_SIZE)
+ if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);
if (!(flags & SLUB_DMA)) {
@@ -247,7 +234,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) &&
- size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
+ size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (!s)