summaryrefslogtreecommitdiff
path: root/mm/slab.h
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2023-10-03 10:59:48 +0300
committerVlastimil Babka <vbabka@suse.cz>2023-12-06 13:57:21 +0300
commit19975f83412fbb9b1458f3dfbf16ca043a57788a (patch)
tree5b3a19dca7077e2e06926a20c588802695b64a7d /mm/slab.h
parent7ef08ae8277c66657127844179912214c67fb4bc (diff)
downloadlinux-19975f83412fbb9b1458f3dfbf16ca043a57788a.tar.xz
mm/slab: move the rest of slub_def.h to mm/slab.h
mm/slab.h is the only place to include include/linux/slub_def.h which has allowed switching between SLAB and SLUB. Now we can simply move the contents over and remove slub_def.h. Use this opportunity to fix up some whitespace (alignment) issues. Reviewed-by: Kees Cook <keescook@chromium.org> Acked-by: David Rientjes <rientjes@google.com> Tested-by: David Rientjes <rientjes@google.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h138
1 files changed, 137 insertions, 1 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 014c36ea51fa..3a8d13c099fa 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -209,7 +209,143 @@ static inline size_t slab_size(const struct slab *slab)
return PAGE_SIZE << slab_order(slab);
}
-#include <linux/slub_def.h>
+#include <linux/kfence.h>
+#include <linux/kobject.h>
+#include <linux/reciprocal_div.h>
+#include <linux/local_lock.h>
+
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+#define slub_percpu_partial(c) ((c)->partial)
+
+#define slub_set_percpu_partial(c, p) \
+({ \
+ slub_percpu_partial(c) = (p)->next; \
+})
+
+#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
+#else
+#define slub_percpu_partial(c) NULL
+
+#define slub_set_percpu_partial(c, p)
+
+#define slub_percpu_partial_read_once(c) NULL
+#endif // CONFIG_SLUB_CPU_PARTIAL
+
+/*
+ * Word size structure that can be atomically updated or read and that
+ * contains both the order and the number of objects that a slab of the
+ * given order would contain.
+ */
+struct kmem_cache_order_objects {
+ unsigned int x;
+};
+
+/*
+ * Slab cache management.
+ */
+struct kmem_cache {
+#ifndef CONFIG_SLUB_TINY
+ struct kmem_cache_cpu __percpu *cpu_slab;
+#endif
+ /* Used for retrieving partial slabs, etc. */
+ slab_flags_t flags;
+ unsigned long min_partial;
+ unsigned int size; /* Object size including metadata */
+ unsigned int object_size; /* Object size without metadata */
+ struct reciprocal_value reciprocal_size;
+ unsigned int offset; /* Free pointer offset */
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ /* Number of per cpu partial objects to keep around */
+ unsigned int cpu_partial;
+ /* Number of per cpu partial slabs to keep around */
+ unsigned int cpu_partial_slabs;
+#endif
+ struct kmem_cache_order_objects oo;
+
+ /* Allocation and freeing of slabs */
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags; /* gfp flags to use on each alloc */
+ int refcount; /* Refcount for slab cache destroy */
+ void (*ctor)(void *object); /* Object constructor */
+ unsigned int inuse; /* Offset to metadata */
+ unsigned int align; /* Alignment */
+ unsigned int red_left_pad; /* Left redzone padding size */
+ const char *name; /* Name (only for display!) */
+ struct list_head list; /* List of slab caches */
+#ifdef CONFIG_SYSFS
+ struct kobject kobj; /* For sysfs */
+#endif
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+ unsigned long random;
+#endif
+
+#ifdef CONFIG_NUMA
+ /*
+ * Defragmentation by allocating from a remote node.
+ */
+ unsigned int remote_node_defrag_ratio;
+#endif
+
+#ifdef CONFIG_SLAB_FREELIST_RANDOM
+ unsigned int *random_seq;
+#endif
+
+#ifdef CONFIG_KASAN_GENERIC
+ struct kasan_cache kasan_info;
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
+#endif
+
+ struct kmem_cache_node *node[MAX_NUMNODES];
+};
+
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
+#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *s);
+void sysfs_slab_release(struct kmem_cache *s);
+#else
+static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
+static inline void sysfs_slab_release(struct kmem_cache *s) { }
+#endif
+
+void *fixup_red_left(struct kmem_cache *s, void *p);
+
+static inline void *nearest_obj(struct kmem_cache *cache,
+ const struct slab *slab, void *x)
+{
+ void *object = x - (x - slab_address(slab)) % cache->size;
+ void *last_object = slab_address(slab) +
+ (slab->objects - 1) * cache->size;
+ void *result = (unlikely(object > last_object)) ? last_object : object;
+
+ result = fixup_red_left(cache, result);
+ return result;
+}
+
+/* Determine object index from a given position */
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
+ void *addr, void *obj)
+{
+ return reciprocal_divide(kasan_reset_tag(obj) - addr,
+ cache->reciprocal_size);
+}
+
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct slab *slab, void *obj)
+{
+ if (is_kfence_address(obj))
+ return 0;
+ return __obj_to_index(cache, slab_address(slab), obj);
+}
+
+static inline int objs_per_slab(const struct kmem_cache *cache,
+ const struct slab *slab)
+{
+ return slab->objects;
+}
#include <linux/memcontrol.h>
#include <linux/fault-inject.h>