summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c69
1 files changed, 38 insertions, 31 deletions
diff --git a/mm/slub.c b/mm/slub.c
index fac07382d3a6..2ef88bbf56a3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -975,20 +975,20 @@ static inline void set_orig_size(struct kmem_cache *s,
void *object, unsigned int orig_size)
{
void *p = kasan_reset_tag(object);
+ unsigned int kasan_meta_size;
if (!slub_debug_orig_size(s))
return;
-#ifdef CONFIG_KASAN_GENERIC
/*
- * KASAN could save its free meta data in object's data area at
- * offset 0, if the size is larger than 'orig_size', it will
- * overlap the data redzone in [orig_size+1, object_size], and
- * the check should be skipped.
+ * KASAN can save its free meta data inside of the object at offset 0.
+ * If this meta data size is larger than 'orig_size', it will overlap
+ * the data redzone in [orig_size+1, object_size]. Thus, we adjust
+ * 'orig_size' to be as at least as big as KASAN's meta data.
*/
- if (kasan_metadata_size(s, true) > orig_size)
- orig_size = s->object_size;
-#endif
+ kasan_meta_size = kasan_metadata_size(s, true);
+ if (kasan_meta_size > orig_size)
+ orig_size = kasan_meta_size;
p += get_info_end(s);
p += sizeof(struct track) * 2;
@@ -1297,7 +1297,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
{
u8 *p = object;
u8 *endobject = object + s->object_size;
- unsigned int orig_size;
+ unsigned int orig_size, kasan_meta_size;
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
@@ -1327,12 +1327,23 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
}
if (s->flags & SLAB_POISON) {
- if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
- (!check_bytes_and_report(s, slab, p, "Poison", p,
- POISON_FREE, s->object_size - 1) ||
- !check_bytes_and_report(s, slab, p, "End Poison",
- p + s->object_size - 1, POISON_END, 1)))
- return 0;
+ if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
+ /*
+ * KASAN can save its free meta data inside of the
+ * object at offset 0. Thus, skip checking the part of
+ * the redzone that overlaps with the meta data.
+ */
+ kasan_meta_size = kasan_metadata_size(s, true);
+ if (kasan_meta_size < s->object_size - 1 &&
+ !check_bytes_and_report(s, slab, p, "Poison",
+ p + kasan_meta_size, POISON_FREE,
+ s->object_size - kasan_meta_size - 1))
+ return 0;
+ if (kasan_meta_size < s->object_size &&
+ !check_bytes_and_report(s, slab, p, "End Poison",
+ p + s->object_size - 1, POISON_END, 1))
+ return 0;
+ }
/*
* check_pad_bytes cleans up on its own.
*/
@@ -2159,9 +2170,9 @@ static void *setup_object(struct kmem_cache *s, void *object)
setup_object_debug(s, object);
object = kasan_init_slab_obj(s, object);
if (unlikely(s->ctor)) {
- kasan_unpoison_object_data(s, object);
+ kasan_unpoison_new_object(s, object);
s->ctor(object);
- kasan_poison_object_data(s, object);
+ kasan_poison_new_object(s, object);
}
return object;
}
@@ -2176,11 +2187,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
struct slab *slab;
unsigned int order = oo_order(oo);
- if (node == NUMA_NO_NODE)
- folio = (struct folio *)alloc_pages(flags, order);
- else
- folio = (struct folio *)__alloc_pages_node(node, flags, order);
-
+ folio = (struct folio *)alloc_pages_node(node, flags, order);
if (!folio)
return NULL;
@@ -3908,7 +3915,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
*/
static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
{
- struct page *page;
+ struct folio *folio;
void *ptr = NULL;
unsigned int order = get_order(size);
@@ -3916,10 +3923,10 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
flags = kmalloc_fix_flags(flags);
flags |= __GFP_COMP;
- page = alloc_pages_node(node, flags, order);
- if (page) {
- ptr = page_address(page);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+ folio = (struct folio *)alloc_pages_node(node, flags, order);
+ if (folio) {
+ ptr = folio_address(folio);
+ lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
PAGE_SIZE << order);
}
@@ -4368,9 +4375,9 @@ static void free_large_kmalloc(struct folio *folio, void *object)
kasan_kfree_large(object);
kmsan_kfree_large(object);
- mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
+ lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
- __free_pages(folio_page(folio, 0), order);
+ folio_put(folio);
}
/**
@@ -4783,7 +4790,7 @@ static inline int calculate_order(unsigned int size)
* Doh this slab cannot be placed using slub_max_order.
*/
order = get_order(size);
- if (order <= MAX_ORDER)
+ if (order <= MAX_PAGE_ORDER)
return order;
return -ENOSYS;
}
@@ -5311,7 +5318,7 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str)
{
get_option(&str, (int *)&slub_max_order);
- slub_max_order = min_t(unsigned int, slub_max_order, MAX_ORDER);
+ slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
if (slub_min_order > slub_max_order)
slub_min_order = slub_max_order;