summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2017-11-16 04:32:18 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-16 05:21:01 +0300
commitd50112edde1d0c621520e53747044009f11c656b (patch)
treeca4092f2126ac85a63647a48e43ecbf34bb69782 /mm/slub.c
parenta3ba074447824625d3a267a5fffd2ea21556ebf4 (diff)
downloadlinux-d50112edde1d0c621520e53747044009f11c656b.tar.xz
slab, slub, slob: add slab_flags_t
Add sparse-checked slab_flags_t for struct kmem_cache::flags (SLAB_POISON, etc). SLAB is bloated temporarily by switching to "unsigned long", but only temporarily. Link: http://lkml.kernel.org/r/20171021100225.GA22428@avx2 Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 025bbb540f3d..482d1daa9088 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -193,8 +193,10 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
/* Internal SLUB flags */
-#define __OBJECT_POISON 0x80000000UL /* Poison object */
-#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
+/* Poison object */
+#define __OBJECT_POISON ((slab_flags_t __force)0x80000000UL)
+/* Use cmpxchg_double */
+#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000UL)
/*
* Tracking user of a slab.
@@ -485,9 +487,9 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
* Debug settings:
*/
#if defined(CONFIG_SLUB_DEBUG_ON)
-static int slub_debug = DEBUG_DEFAULT_FLAGS;
+static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
#else
-static int slub_debug;
+static slab_flags_t slub_debug;
#endif
static char *slub_debug_slabs;
@@ -1289,8 +1291,8 @@ out:
__setup("slub_debug", setup_slub_debug);
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
/*
@@ -1322,8 +1324,8 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
@@ -3477,7 +3479,7 @@ static void set_cpu_partial(struct kmem_cache *s)
*/
static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
- unsigned long flags = s->flags;
+ slab_flags_t flags = s->flags;
size_t size = s->object_size;
int order;
@@ -3593,7 +3595,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
return !!oo_objects(s->oo);
}
-static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
+static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
{
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->reserved = 0;
@@ -4245,7 +4247,7 @@ void __init kmem_cache_init_late(void)
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *s, *c;
@@ -4275,7 +4277,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
return s;
}
-int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
{
int err;