summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 22:56:07 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 22:56:07 +0400
commitae664dba2724e59ddd66291b895f7370e28b9a7a (patch)
treed6e214bdc9999bcb8b0a067053aa6934cfd9d60e /include
parenta2faf2fc534f57ba26bc4d613795236ed4f5fb1c (diff)
parent08afe22c68d8c07e8e31ee6491c37f36199ba14b (diff)
downloadlinux-ae664dba2724e59ddd66291b895f7370e28b9a7a.tar.xz
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "This contains preparational work from Christoph Lameter and Glauber Costa for SLAB memcg and cleanups and improvements from Ezequiel Garcia and Joonsoo Kim. Please note that the SLOB cleanup commit from Arnd Bergmann already appears in your tree but I had also merged it myself which is why it shows up in the shortlog." * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: mm/sl[aou]b: Common alignment code slab: Use the new create_boot_cache function to simplify bootstrap slub: Use statically allocated kmem_cache boot structure for bootstrap mm, sl[au]b: create common functions for boot slab creation slab: Simplify bootstrap slub: Use correct cpu_slab on dead cpu mm: fix slab.c kernel-doc warnings mm/slob: use min_t() to compare ARCH_SLAB_MINALIGN slab: Ignore internal flags in cache creation mm/slob: Use free_page instead of put_page for page-size kmalloc allocations mm/sl[aou]b: Move common kmem_cache_size() to slab.h mm/slob: Use object_size field in kmem_cache_size() mm/slob: Drop usage of page->private for storing page-sized allocations slub: Commonize slab_cache field in struct page sl[au]b: Process slabinfo_show in common code mm/sl[au]b: Move print_slabinfo_header to slab_common.c mm/sl[au]b: Move slabinfo processing to slab_common.c slub: remove one code path and reduce lock contention in __slab_free()
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/slab.h9
-rw-r--r--include/linux/slab_def.h6
3 files changed, 15 insertions, 7 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7d9ebb7cc982..f8f5162a3571 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -128,10 +128,7 @@ struct page {
};
struct list_head list; /* slobs list of pages */
- struct { /* slab fields */
- struct kmem_cache *slab_cache;
- struct slab *slab_page;
- };
+ struct slab *slab_page; /* slab fields */
};
/* Remainder is not double word aligned */
@@ -146,7 +143,7 @@ struct page {
#if USE_SPLIT_PTLOCKS
spinlock_t ptl;
#endif
- struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */
};
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 83d1a1454b7e..743a10415122 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
-unsigned int kmem_cache_size(struct kmem_cache *);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+/*
+ * Determine the size of a slab object
+ */
+static inline unsigned int kmem_cache_size(struct kmem_cache *s)
+{
+ return s->object_size;
+}
+
void __init kmem_cache_init_late(void);
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cc290f0bdb34..45c0356fdc8c 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -89,9 +89,13 @@ struct kmem_cache {
* (see kmem_cache_init())
* We still use [NR_CPUS] and not [1] or [0] because cache_cache
* is statically defined, so we reserve the max number of cpus.
+ *
+ * We also need to guarantee that the list is able to accomodate a
+ * pointer for each node since "nodelists" uses the remainder of
+ * available pointers.
*/
struct kmem_list3 **nodelists;
- struct array_cache *array[NR_CPUS];
+ struct array_cache *array[NR_CPUS + MAX_NUMNODES];
/*
* Do not add fields after array[]
*/