From 022012dcf44209074af97b6ae531a10c08736b31 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Mon, 20 Nov 2023 18:47:13 +0100 Subject: lib/stackdepot, kasan: add flags to __stack_depot_save and rename Change the bool can_alloc argument of __stack_depot_save to a u32 argument that accepts a set of flags. The following patch will add another flag to stack_depot_save_flags besides the existing STACK_DEPOT_FLAG_CAN_ALLOC. Also rename the function to stack_depot_save_flags, as __stack_depot_save is a cryptic name, Link: https://lkml.kernel.org/r/645fa15239621eebbd3a10331e5864b718839512.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Alexander Potapenko Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Cc: Oscar Salvador Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 8b06bab5c406..b29d46b83d1f 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -368,7 +368,7 @@ static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { } #endif -depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc); +depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags); void kasan_set_track(struct kasan_track *track, gfp_t flags); void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags); void kasan_save_free_info(struct kmem_cache *cache, void *object); -- cgit v1.2.3 From 5d4c6ac94694096cdfb528f05a3019a1a423b3a4 Mon Sep 17 00:00:00 2001 From: Juntong Deng Date: Mon, 27 Nov 2023 21:17:31 +0000 Subject: kasan: record and report more information Record and report more information to help us find the cause of the bug and to help us correlate the error with other system events. This patch adds recording and showing CPU number and timestamp at allocation and free (controlled by CONFIG_KASAN_EXTRA_INFO). The timestamps in the report use the same format and source as printk. Error occurrence timestamp is already implicit in the printk log, and CPU number is already shown by dump_stack_lvl, so there is no need to add it. In order to record CPU number and timestamp at allocation and free, corresponding members need to be added to the relevant data structures, which will lead to increased memory consumption. In Generic KASAN, members are added to struct kasan_track. Since in most cases, alloc meta is stored in the redzone and free meta is stored in the object or the redzone, memory consumption will not increase much. In SW_TAGS KASAN and HW_TAGS KASAN, members are added to struct kasan_stack_ring_entry. Memory consumption increases as the size of struct kasan_stack_ring_entry increases (this part of the memory is allocated by memblock), but since this is configurable, it is up to the user to choose. Link: https://lkml.kernel.org/r/VI1P193MB0752BD991325D10E4AB1913599BDA@VI1P193MB0752.EURP193.PROD.OUTLOOK.COM Signed-off-by: Juntong Deng Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Vincenzo Frascino Signed-off-by: Andrew Morton --- lib/Kconfig.kasan | 21 +++++++++++++++++++++ mm/kasan/common.c | 8 ++++++++ mm/kasan/kasan.h | 8 ++++++++ mm/kasan/report.c | 12 ++++++++++++ mm/kasan/report_tags.c | 15 +++++++++++++++ mm/kasan/tags.c | 15 +++++++++++++++ 6 files changed, 79 insertions(+) (limited to 'mm/kasan/kasan.h') diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 935eda08b1e1..8653f5c38be7 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -207,4 +207,25 @@ config KASAN_MODULE_TEST A part of the KASAN test suite that is not integrated with KUnit. Incompatible with Hardware Tag-Based KASAN. +config KASAN_EXTRA_INFO + bool "Record and report more information" + depends on KASAN + help + Record and report more information to help us find the cause of the + bug and to help us correlate the error with other system events. + + Currently, the CPU number and timestamp are additionally + recorded for each heap block at allocation and free time, and + 8 bytes will be added to each metadata structure that records + allocation or free information. + + In Generic KASAN, each kmalloc-8 and kmalloc-16 object will add + 16 bytes of additional memory consumption, and each kmalloc-32 + object will add 8 bytes of additional memory consumption, not + affecting other larger objects. + + In SW_TAGS KASAN and HW_TAGS KASAN, depending on the stack_ring_size + boot parameter, it will add 8 * stack_ring_size bytes of additional + memory consumption. + endif # KASAN diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b5d8bd26fced..fe6c4b43ad9f 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -49,6 +50,13 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags) void kasan_set_track(struct kasan_track *track, gfp_t flags) { +#ifdef CONFIG_KASAN_EXTRA_INFO + u32 cpu = raw_smp_processor_id(); + u64 ts_nsec = local_clock(); + + track->cpu = cpu; + track->timestamp = ts_nsec >> 3; +#endif /* CONFIG_KASAN_EXTRA_INFO */ track->pid = current->pid; track->stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index b29d46b83d1f..5e298e3ac909 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -187,6 +187,10 @@ static inline bool kasan_requires_meta(void) struct kasan_track { u32 pid; depot_stack_handle_t stack; +#ifdef CONFIG_KASAN_EXTRA_INFO + u64 cpu:20; + u64 timestamp:44; +#endif /* CONFIG_KASAN_EXTRA_INFO */ }; enum kasan_report_type { @@ -278,6 +282,10 @@ struct kasan_stack_ring_entry { u32 pid; depot_stack_handle_t stack; bool is_free; +#ifdef CONFIG_KASAN_EXTRA_INFO + u64 cpu:20; + u64 timestamp:44; +#endif /* CONFIG_KASAN_EXTRA_INFO */ }; struct kasan_stack_ring { diff --git a/mm/kasan/report.c b/mm/kasan/report.c index e77facb62900..a938237f6882 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -262,7 +262,19 @@ static void print_error_description(struct kasan_report_info *info) static void print_track(struct kasan_track *track, const char *prefix) { +#ifdef CONFIG_KASAN_EXTRA_INFO + u64 ts_nsec = track->timestamp; + unsigned long rem_usec; + + ts_nsec <<= 3; + rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000; + + pr_err("%s by task %u on cpu %d at %lu.%06lus:\n", + prefix, track->pid, track->cpu, + (unsigned long)ts_nsec, rem_usec); +#else pr_err("%s by task %u:\n", prefix, track->pid); +#endif /* CONFIG_KASAN_EXTRA_INFO */ if (track->stack) stack_depot_print(track->stack); else diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c index 55154743f915..979f284c2497 100644 --- a/mm/kasan/report_tags.c +++ b/mm/kasan/report_tags.c @@ -27,6 +27,15 @@ static const char *get_common_bug_type(struct kasan_report_info *info) return "invalid-access"; } +#ifdef CONFIG_KASAN_EXTRA_INFO +static void kasan_complete_extra_report_info(struct kasan_track *track, + struct kasan_stack_ring_entry *entry) +{ + track->cpu = entry->cpu; + track->timestamp = entry->timestamp; +} +#endif /* CONFIG_KASAN_EXTRA_INFO */ + void kasan_complete_mode_report_info(struct kasan_report_info *info) { unsigned long flags; @@ -73,6 +82,9 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) info->free_track.pid = entry->pid; info->free_track.stack = entry->stack; +#ifdef CONFIG_KASAN_EXTRA_INFO + kasan_complete_extra_report_info(&info->free_track, entry); +#endif /* CONFIG_KASAN_EXTRA_INFO */ free_found = true; /* @@ -88,6 +100,9 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) info->alloc_track.pid = entry->pid; info->alloc_track.stack = entry->stack; +#ifdef CONFIG_KASAN_EXTRA_INFO + kasan_complete_extra_report_info(&info->alloc_track, entry); +#endif /* CONFIG_KASAN_EXTRA_INFO */ alloc_found = true; /* diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 739ae997463d..c13b198b8302 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -93,6 +94,17 @@ void __init kasan_init_tags(void) } } +#ifdef CONFIG_KASAN_EXTRA_INFO +static void save_extra_info(struct kasan_stack_ring_entry *entry) +{ + u32 cpu = raw_smp_processor_id(); + u64 ts_nsec = local_clock(); + + entry->cpu = cpu; + entry->timestamp = ts_nsec >> 3; +} +#endif /* CONFIG_KASAN_EXTRA_INFO */ + static void save_stack_info(struct kmem_cache *cache, void *object, gfp_t gfp_flags, bool is_free) { @@ -128,6 +140,9 @@ next: entry->pid = current->pid; entry->stack = stack; entry->is_free = is_free; +#ifdef CONFIG_KASAN_EXTRA_INFO + save_extra_info(entry); +#endif /* CONFIG_KASAN_EXTRA_INFO */ entry->ptr = object; -- cgit v1.2.3 From a414d4286f3400aa05631c4931eb3feba83e29e8 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 22:19:51 +0100 Subject: kasan: handle concurrent kasan_record_aux_stack calls kasan_record_aux_stack can be called concurrently on the same object. This might lead to a race condition when rotating the saved aux stack trace handles, which in turns leads to incorrect accounting of stack depot handles and refcount underflows in the stack depot code. Fix by introducing a raw spinlock to protect the aux stack trace handles in kasan_record_aux_stack. Link: https://lkml.kernel.org/r/1606b960e2f746862d1f459515972f9695bf448a.1703020707.git.andreyknvl@google.com Fixes: 773688a6cb24 ("kasan: use stack_depot_put for Generic mode") Signed-off-by: Andrey Konovalov Reported-by: Tetsuo Handa Reported-by: syzbot+186b55175d8360728234@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/000000000000784b1c060b0074a2@google.com/ Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/generic.c | 32 +++++++++++++++++++++++++++++--- mm/kasan/kasan.h | 8 ++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 54e20b2bc3e1..55e6b5db2cae 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -471,8 +472,18 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) struct kasan_free_meta *free_meta; alloc_meta = kasan_get_alloc_meta(cache, object); - if (alloc_meta) + if (alloc_meta) { __memset(alloc_meta, 0, sizeof(*alloc_meta)); + + /* + * Temporarily disable KASAN bug reporting to allow instrumented + * raw_spin_lock_init to access aux_lock, which resides inside + * of a redzone. + */ + kasan_disable_current(); + raw_spin_lock_init(&alloc_meta->aux_lock); + kasan_enable_current(); + } free_meta = kasan_get_free_meta(cache, object); if (free_meta) __memset(free_meta, 0, sizeof(*free_meta)); @@ -502,6 +513,8 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) struct kmem_cache *cache; struct kasan_alloc_meta *alloc_meta; void *object; + depot_stack_handle_t new_handle, old_handle; + unsigned long flags; if (is_kfence_address(addr) || !slab) return; @@ -512,9 +525,22 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) if (!alloc_meta) return; - stack_depot_put(alloc_meta->aux_stack[1]); + new_handle = kasan_save_stack(0, depot_flags); + + /* + * Temporarily disable KASAN bug reporting to allow instrumented + * spinlock functions to access aux_lock, which resides inside of a + * redzone. + */ + kasan_disable_current(); + raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags); + old_handle = alloc_meta->aux_stack[1]; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; - alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); + alloc_meta->aux_stack[0] = new_handle; + raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags); + kasan_enable_current(); + + stack_depot_put(old_handle); } void kasan_record_aux_stack(void *addr) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 5e298e3ac909..69e4f5e58e33 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) @@ -249,6 +250,13 @@ struct kasan_global { struct kasan_alloc_meta { struct kasan_track alloc_track; /* Free track is stored in kasan_free_meta. */ + /* + * aux_lock protects aux_stack from accesses from concurrent + * kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping + * on RT kernels, as kasan_record_aux_stack_noalloc can be called from + * non-sleepable contexts. + */ + raw_spinlock_t aux_lock; depot_stack_handle_t aux_stack[2]; }; -- cgit v1.2.3 From c20e3feadd4505c46a87dcabef5b129a97992466 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:45 +0100 Subject: kasan: improve kasan_non_canonical_hook Make kasan_non_canonical_hook to be more sure in its report (i.e. say "probably" instead of "maybe") if the address belongs to the shadow memory region for kernel addresses. Also use the kasan_shadow_to_mem helper to calculate the original address. Also improve the comments in kasan_non_canonical_hook. Link: https://lkml.kernel.org/r/af94ef3cb26f8c065048b3158d9f20f6102bfaaa.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 6 ++++++ mm/kasan/report.c | 34 ++++++++++++++++++++-------------- 2 files changed, 26 insertions(+), 14 deletions(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 69e4f5e58e33..0e209b823b2c 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -307,6 +307,12 @@ struct kasan_stack_ring { #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) +static __always_inline bool addr_in_shadow(const void *addr) +{ + return addr >= (void *)KASAN_SHADOW_START && + addr < (void *)KASAN_SHADOW_END; +} + #ifndef kasan_shadow_to_mem static inline const void *kasan_shadow_to_mem(const void *shadow_addr) { diff --git a/mm/kasan/report.c b/mm/kasan/report.c index a938237f6882..4bc7ac9fb37d 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -635,37 +635,43 @@ void kasan_report_async(void) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) /* - * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high - * canonical half of the address space) cause out-of-bounds shadow memory reads - * before the actual access. For addresses in the low canonical half of the - * address space, as well as most non-canonical addresses, that out-of-bounds - * shadow memory access lands in the non-canonical part of the address space. - * Help the user figure out what the original bogus pointer was. + * With compiler-based KASAN modes, accesses to bogus pointers (outside of the + * mapped kernel address space regions) cause faults when KASAN tries to check + * the shadow memory before the actual memory access. This results in cryptic + * GPF reports, which are hard for users to interpret. This hook helps users to + * figure out what the original bogus pointer was. */ void kasan_non_canonical_hook(unsigned long addr) { unsigned long orig_addr; const char *bug_type; + /* + * All addresses that came as a result of the memory-to-shadow mapping + * (even for bogus pointers) must be >= KASAN_SHADOW_OFFSET. + */ if (addr < KASAN_SHADOW_OFFSET) return; - orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT; + orig_addr = (unsigned long)kasan_shadow_to_mem((void *)addr); + /* * For faults near the shadow address for NULL, we can be fairly certain * that this is a KASAN shadow memory access. - * For faults that correspond to shadow for low canonical addresses, we - * can still be pretty sure - that shadow region is a fairly narrow - * chunk of the non-canonical address space. - * But faults that look like shadow for non-canonical addresses are a - * really large chunk of the address space. In that case, we still - * print the decoded address, but make it clear that this is not - * necessarily what's actually going on. + * For faults that correspond to the shadow for low or high canonical + * addresses, we can still be pretty sure: these shadow regions are a + * fairly narrow chunk of the address space. + * But the shadow for non-canonical addresses is a really large chunk + * of the address space. For this case, we still print the decoded + * address, but make it clear that this is not necessarily what's + * actually going on. */ if (orig_addr < PAGE_SIZE) bug_type = "null-ptr-deref"; else if (orig_addr < TASK_SIZE) bug_type = "probably user-memory-access"; + else if (addr_in_shadow((void *)addr)) + bug_type = "probably wild-memory-access"; else bug_type = "maybe wild-memory-access"; pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type, -- cgit v1.2.3 From 3067b919ed81e17b8b8fb932c43f200775b1d545 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:46 +0100 Subject: kasan: clean up kasan_requires_meta Currently, for Generic KASAN mode, kasan_requires_meta is defined to return kasan_stack_collection_enabled. Even though the Generic mode does not support disabling stack trace collection, kasan_requires_meta was implemented in this way to make it easier to implement the disabling for the Generic mode in the future. However, for the Generic mode, the per-object metadata also stores the quarantine link. So even if disabling stack collection is implemented, the per-object metadata will still be required. Fix kasan_requires_meta to return true for the Generic mode and update the related comments. This change does not fix any observable bugs but rather just brings the code to a cleaner state. Link: https://lkml.kernel.org/r/8086623407095ac1c82377a2107dcc5845f99cfa.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 0e209b823b2c..38af25b9c89c 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -101,21 +101,21 @@ static inline bool kasan_sample_page_alloc(unsigned int order) #ifdef CONFIG_KASAN_GENERIC -/* Generic KASAN uses per-object metadata to store stack traces. */ +/* + * Generic KASAN uses per-object metadata to store alloc and free stack traces + * and the quarantine link. + */ static inline bool kasan_requires_meta(void) { - /* - * Technically, Generic KASAN always collects stack traces right now. - * However, let's use kasan_stack_collection_enabled() in case the - * kasan.stacktrace command-line argument is changed to affect - * Generic KASAN. - */ - return kasan_stack_collection_enabled(); + return true; } #else /* CONFIG_KASAN_GENERIC */ -/* Tag-based KASAN modes do not use per-object metadata. */ +/* + * Tag-based KASAN modes do not use per-object metadata: they use the stack + * ring to store alloc and free stack traces and do not use qurantine. + */ static inline bool kasan_requires_meta(void) { return false; -- cgit v1.2.3 From 1a55836a1b002298714dc84032b3d19d9bbc2d66 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:47 +0100 Subject: kasan: update kasan_poison documentation comment The comment for kasan_poison says that the size argument gets aligned by the function to KASAN_GRANULE_SIZE, which is wrong: the argument must be already aligned when it is passed to the function. Remove the invalid part of the comment. Link: https://lkml.kernel.org/r/992a302542059fc40d86ea560eac413ecb31b6a1.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 38af25b9c89c..1c34511090d7 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -513,8 +513,6 @@ static inline bool kasan_byte_accessible(const void *addr) * @size - range size, must be aligned to KASAN_GRANULE_SIZE * @value - value that's written to metadata for the range * @init - whether to initialize the memory range (only for hardware tag-based) - * - * The size gets aligned to KASAN_GRANULE_SIZE before marking the range. */ void kasan_poison(const void *addr, size_t size, u8 value, bool init); -- cgit v1.2.3 From 99f3fe416c71aa3d5aba69174c274309ededfd42 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:48 +0100 Subject: kasan: clean up is_kfence_address checks 1. Do not untag addresses that are passed to is_kfence_address: it tolerates tagged addresses. 2. Move is_kfence_address checks from internal KASAN functions (kasan_poison/unpoison, etc.) to external-facing ones. Note that kasan_poison/unpoison are never called outside of KASAN/slab code anymore; the comment is wrong, so drop it. 3. Simplify/reorganize the code around the updated checks. Link: https://lkml.kernel.org/r/1065732315ef4e141b6177d8f612232d4d5bc0ab.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/common.c | 26 +++++++++++++++++--------- mm/kasan/kasan.h | 16 ++-------------- mm/kasan/shadow.c | 12 ------------ 3 files changed, 19 insertions(+), 35 deletions(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index f4255e807b74..86adf80cc11a 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -79,6 +79,9 @@ EXPORT_SYMBOL(kasan_disable_current); void __kasan_unpoison_range(const void *address, size_t size) { + if (is_kfence_address(address)) + return; + kasan_unpoison(address, size, false); } @@ -218,9 +221,6 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, tagged_object = object; object = kasan_reset_tag(object); - if (is_kfence_address(object)) - return false; - if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE); return true; @@ -247,7 +247,12 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip, bool init) { - bool buggy_object = poison_slab_object(cache, object, ip, init); + bool buggy_object; + + if (is_kfence_address(object)) + return false; + + buggy_object = poison_slab_object(cache, object, ip, init); return buggy_object ? true : kasan_quarantine_put(cache, object); } @@ -359,7 +364,7 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object if (unlikely(object == NULL)) return NULL; - if (is_kfence_address(kasan_reset_tag(object))) + if (is_kfence_address(object)) return (void *)object; /* The object has already been unpoisoned by kasan_slab_alloc(). */ @@ -417,7 +422,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag if (unlikely(object == ZERO_SIZE_PTR)) return (void *)object; - if (is_kfence_address(kasan_reset_tag(object))) + if (is_kfence_address(object)) return (void *)object; /* @@ -483,6 +488,9 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) return true; } + if (is_kfence_address(ptr)) + return false; + slab = folio_slab(folio); return !poison_slab_object(slab->slab_cache, ptr, ip, false); } @@ -492,9 +500,6 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) struct slab *slab; gfp_t flags = 0; /* Might be executing under a lock. */ - if (is_kfence_address(kasan_reset_tag(ptr))) - return; - slab = virt_to_slab(ptr); /* @@ -507,6 +512,9 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) return; } + if (is_kfence_address(ptr)) + return; + /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ unpoison_slab_object(slab->slab_cache, ptr, size, flags); diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 1c34511090d7..5fbcc1b805bc 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -466,35 +466,23 @@ static inline u8 kasan_random_tag(void) { return 0; } static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init) { - addr = kasan_reset_tag(addr); - - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; if (WARN_ON(size & KASAN_GRANULE_MASK)) return; - hw_set_mem_tag_range((void *)addr, size, value, init); + hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init); } static inline void kasan_unpoison(const void *addr, size_t size, bool init) { u8 tag = get_tag(addr); - addr = kasan_reset_tag(addr); - - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; size = round_up(size, KASAN_GRANULE_SIZE); - hw_set_mem_tag_range((void *)addr, size, tag, init); + hw_set_mem_tag_range(kasan_reset_tag(addr), size, tag, init); } static inline bool kasan_byte_accessible(const void *addr) diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 0154d200be40..30625303d01a 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -135,10 +135,6 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) */ addr = kasan_reset_tag(addr); - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; if (WARN_ON(size & KASAN_GRANULE_MASK)) @@ -175,14 +171,6 @@ void kasan_unpoison(const void *addr, size_t size, bool init) */ addr = kasan_reset_tag(addr); - /* - * Skip KFENCE memory if called explicitly outside of sl*b. Also note - * that calls to ksize(), where size is not a multiple of machine-word - * size, would otherwise poison the invalid portion of the word. - */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; -- cgit v1.2.3 From 58ee788cb23738abd57d6327d0b5096df5a53d31 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:49 +0100 Subject: kasan: respect CONFIG_KASAN_VMALLOC for kasan_flag_vmalloc Never enable the kasan_flag_vmalloc static branch unless CONFIG_KASAN_VMALLOC is enabled. This does not fix any observable bugs (vmalloc annotations for the HW_TAGS mode are no-op with CONFIG_KASAN_VMALLOC disabled) but rather just cleans up the code. Link: https://lkml.kernel.org/r/3e5c933c8f6b59bd587efb05c407964be951772c.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/hw_tags.c | 7 +++++++ mm/kasan/kasan.h | 1 + 2 files changed, 8 insertions(+) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 06141bbc1e51..80f11a3eccd5 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -57,7 +57,11 @@ enum kasan_mode kasan_mode __ro_after_init; EXPORT_SYMBOL_GPL(kasan_mode); /* Whether to enable vmalloc tagging. */ +#ifdef CONFIG_KASAN_VMALLOC DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc); +#else +DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc); +#endif #define PAGE_ALLOC_SAMPLE_DEFAULT 1 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3 @@ -119,6 +123,9 @@ static int __init early_kasan_flag_vmalloc(char *arg) if (!arg) return -EINVAL; + if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) + return 0; + if (!strcmp(arg, "off")) kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF; else if (!strcmp(arg, "on")) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 5fbcc1b805bc..dee105ba32dd 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -49,6 +49,7 @@ DECLARE_PER_CPU(long, kasan_page_alloc_skip); static inline bool kasan_vmalloc_enabled(void) { + /* Static branch is never enabled with CONFIG_KASAN_VMALLOC disabled. */ return static_branch_likely(&kasan_flag_vmalloc); } -- cgit v1.2.3 From 14c99b990cccd924ae6101bd36b85f8456eabb10 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:50 +0100 Subject: kasan: check kasan_vmalloc_enabled in vmalloc tests Check that vmalloc poisoning is not disabled via command line when running the vmalloc-related KASAN tests. Skip the tests otherwise. Link: https://lkml.kernel.org/r/954456e50ac98519910c3e24a479a18eae62f8dd.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/hw_tags.c | 1 + mm/kasan/kasan.h | 5 +++++ mm/kasan/kasan_test.c | 11 ++++++++++- 3 files changed, 16 insertions(+), 1 deletion(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 80f11a3eccd5..2b994092a2d4 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -62,6 +62,7 @@ DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc); #else DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc); #endif +EXPORT_SYMBOL_GPL(kasan_flag_vmalloc); #define PAGE_ALLOC_SAMPLE_DEFAULT 1 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3 diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index dee105ba32dd..acc1a9410f0d 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -83,6 +83,11 @@ static inline bool kasan_sample_page_alloc(unsigned int order) #else /* CONFIG_KASAN_HW_TAGS */ +static inline bool kasan_vmalloc_enabled(void) +{ + return IS_ENABLED(CONFIG_KASAN_VMALLOC); +} + static inline bool kasan_async_fault_possible(void) { return false; diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index aa994b62378b..9b1024a6e580 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -1552,6 +1552,9 @@ static void vmalloc_helpers_tags(struct kunit *test) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); + if (!kasan_vmalloc_enabled()) + kunit_skip(test, "Test requires kasan.vmalloc=on"); + ptr = vmalloc(PAGE_SIZE); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); @@ -1586,6 +1589,9 @@ static void vmalloc_oob(struct kunit *test) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); + if (!kasan_vmalloc_enabled()) + kunit_skip(test, "Test requires kasan.vmalloc=on"); + v_ptr = vmalloc(size); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); @@ -1639,6 +1645,9 @@ static void vmap_tags(struct kunit *test) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); + if (!kasan_vmalloc_enabled()) + kunit_skip(test, "Test requires kasan.vmalloc=on"); + p_page = alloc_pages(GFP_KERNEL, 1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page); p_ptr = page_address(p_page); @@ -1757,7 +1766,7 @@ static void match_all_not_assigned(struct kunit *test) free_pages((unsigned long)ptr, order); } - if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) + if (!kasan_vmalloc_enabled()) return; for (i = 0; i < 256; i++) { -- cgit v1.2.3 From 04afc540e58e8f66fd85b0b88af3e8ce286be17c Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 19:35:38 +0100 Subject: kasan: reuse kasan_track in kasan_stack_ring_entry Avoid duplicating fields of kasan_track in kasan_stack_ring_entry: reuse the structure. Link: https://lkml.kernel.org/r/20231221183540.168428-2-andrey.konovalov@linux.dev Fixes: 5d4c6ac94694 ("kasan: record and report more information") Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Juntong Deng Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 7 +------ mm/kasan/report_tags.c | 12 ++++++------ mm/kasan/tags.c | 12 ++++++------ 3 files changed, 13 insertions(+), 18 deletions(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index acc1a9410f0d..a280bb04c0e6 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -293,13 +293,8 @@ struct kasan_free_meta { struct kasan_stack_ring_entry { void *ptr; size_t size; - u32 pid; - depot_stack_handle_t stack; + struct kasan_track track; bool is_free; -#ifdef CONFIG_KASAN_EXTRA_INFO - u64 cpu:20; - u64 timestamp:44; -#endif /* CONFIG_KASAN_EXTRA_INFO */ }; struct kasan_stack_ring { diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c index 979f284c2497..688b9d70b04a 100644 --- a/mm/kasan/report_tags.c +++ b/mm/kasan/report_tags.c @@ -31,8 +31,8 @@ static const char *get_common_bug_type(struct kasan_report_info *info) static void kasan_complete_extra_report_info(struct kasan_track *track, struct kasan_stack_ring_entry *entry) { - track->cpu = entry->cpu; - track->timestamp = entry->timestamp; + track->cpu = entry->track.cpu; + track->timestamp = entry->track.timestamp; } #endif /* CONFIG_KASAN_EXTRA_INFO */ @@ -80,8 +80,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) if (free_found) break; - info->free_track.pid = entry->pid; - info->free_track.stack = entry->stack; + info->free_track.pid = entry->track.pid; + info->free_track.stack = entry->track.stack; #ifdef CONFIG_KASAN_EXTRA_INFO kasan_complete_extra_report_info(&info->free_track, entry); #endif /* CONFIG_KASAN_EXTRA_INFO */ @@ -98,8 +98,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) if (alloc_found) break; - info->alloc_track.pid = entry->pid; - info->alloc_track.stack = entry->stack; + info->alloc_track.pid = entry->track.pid; + info->alloc_track.stack = entry->track.stack; #ifdef CONFIG_KASAN_EXTRA_INFO kasan_complete_extra_report_info(&info->alloc_track, entry); #endif /* CONFIG_KASAN_EXTRA_INFO */ diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index c13b198b8302..c4d14dbf27c0 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -100,8 +100,8 @@ static void save_extra_info(struct kasan_stack_ring_entry *entry) u32 cpu = raw_smp_processor_id(); u64 ts_nsec = local_clock(); - entry->cpu = cpu; - entry->timestamp = ts_nsec >> 3; + entry->track.cpu = cpu; + entry->track.timestamp = ts_nsec >> 3; } #endif /* CONFIG_KASAN_EXTRA_INFO */ @@ -134,15 +134,15 @@ next: if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR)) goto next; /* Busy slot. */ - old_stack = entry->stack; + old_stack = entry->track.stack; entry->size = cache->object_size; - entry->pid = current->pid; - entry->stack = stack; - entry->is_free = is_free; + entry->track.pid = current->pid; + entry->track.stack = stack; #ifdef CONFIG_KASAN_EXTRA_INFO save_extra_info(entry); #endif /* CONFIG_KASAN_EXTRA_INFO */ + entry->is_free = is_free; entry->ptr = object; -- cgit v1.2.3 From fd4064f69708bc84d960b666896715fe86882c85 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 19:35:39 +0100 Subject: kasan: simplify saving extra info into tracks Avoid duplicating code for saving extra info into tracks: reuse the common function for this. Link: https://lkml.kernel.org/r/20231221183540.168428-3-andrey.konovalov@linux.dev Fixes: 5d4c6ac94694 ("kasan: record and report more information") Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Juntong Deng Signed-off-by: Andrew Morton --- mm/kasan/common.c | 12 ++++++++++-- mm/kasan/generic.c | 4 ++-- mm/kasan/kasan.h | 3 ++- mm/kasan/tags.c | 17 +---------------- 4 files changed, 15 insertions(+), 21 deletions(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 86adf80cc11a..a486e9b1ac68 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -48,7 +48,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags) return stack_depot_save_flags(entries, nr_entries, flags, depot_flags); } -void kasan_set_track(struct kasan_track *track, gfp_t flags) +void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack) { #ifdef CONFIG_KASAN_EXTRA_INFO u32 cpu = raw_smp_processor_id(); @@ -58,8 +58,16 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags) track->timestamp = ts_nsec >> 3; #endif /* CONFIG_KASAN_EXTRA_INFO */ track->pid = current->pid; - track->stack = kasan_save_stack(flags, + track->stack = stack; +} + +void kasan_save_track(struct kasan_track *track, gfp_t flags) +{ + depot_stack_handle_t stack; + + stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); + kasan_set_track(track, stack); } #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 5b3308127ce0..0e77c43c559e 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -579,7 +579,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) stack_depot_put(alloc_meta->aux_stack[1]); __memset(alloc_meta, 0, sizeof(*alloc_meta)); - kasan_set_track(&alloc_meta->alloc_track, flags); + kasan_save_track(&alloc_meta->alloc_track, flags); } void kasan_save_free_info(struct kmem_cache *cache, void *object) @@ -590,7 +590,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object) if (!free_meta) return; - kasan_set_track(&free_meta->free_track, 0); + kasan_save_track(&free_meta->free_track, 0); /* The object was freed and has free track set. */ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK; } diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index a280bb04c0e6..814e89523c64 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -392,7 +392,8 @@ static inline void kasan_init_object_meta(struct kmem_cache *cache, const void * #endif depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags); -void kasan_set_track(struct kasan_track *track, gfp_t flags); +void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack); +void kasan_save_track(struct kasan_track *track, gfp_t flags); void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags); void kasan_save_free_info(struct kmem_cache *cache, void *object); diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index c4d14dbf27c0..d65d48b85f90 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -94,17 +94,6 @@ void __init kasan_init_tags(void) } } -#ifdef CONFIG_KASAN_EXTRA_INFO -static void save_extra_info(struct kasan_stack_ring_entry *entry) -{ - u32 cpu = raw_smp_processor_id(); - u64 ts_nsec = local_clock(); - - entry->track.cpu = cpu; - entry->track.timestamp = ts_nsec >> 3; -} -#endif /* CONFIG_KASAN_EXTRA_INFO */ - static void save_stack_info(struct kmem_cache *cache, void *object, gfp_t gfp_flags, bool is_free) { @@ -137,11 +126,7 @@ next: old_stack = entry->track.stack; entry->size = cache->object_size; - entry->track.pid = current->pid; - entry->track.stack = stack; -#ifdef CONFIG_KASAN_EXTRA_INFO - save_extra_info(entry); -#endif /* CONFIG_KASAN_EXTRA_INFO */ + kasan_set_track(&entry->track, stack); entry->is_free = is_free; entry->ptr = object; -- cgit v1.2.3 From 63b85ac56a6498476fb34402c10a3f431f62f35c Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 26 Dec 2023 23:51:21 +0100 Subject: kasan: stop leaking stack trace handles Commit 773688a6cb24 ("kasan: use stack_depot_put for Generic mode") added support for stack trace eviction for Generic KASAN. However, that commit didn't evict stack traces when the object is not put into quarantine. As a result, some stack traces are never evicted from the stack depot. In addition, with the "kasan: save mempool stack traces" series, the free stack traces for mempool objects are also not properly evicted from the stack depot. Fix both issues by: 1. Evicting all stack traces when an object if freed if it was not put into quarantine; 2. Always evicting an existing free stack trace when a new one is saved. Also do a few related clean-ups: - Do not zero out free track when initializing/invalidating free meta: set a value in shadow memory instead; - Rename KASAN_SLAB_FREETRACK to KASAN_SLAB_FREE_META; - Drop the kasan_init_cache_meta function as it's not used by KASAN; - Add comments for the kasan_alloc_meta and kasan_free_meta structs. [akpm@linux-foundation.org: make release_free_meta() and release_alloc_meta() static] Link: https://lkml.kernel.org/r/20231226225121.235865-1-andrey.konovalov@linux.dev Fixes: 773688a6cb24 ("kasan: use stack_depot_put for Generic mode") Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/common.c | 27 +++++++++++++++++---- mm/kasan/generic.c | 60 ++++++++++++++++++++++++++++++++++++++++------- mm/kasan/kasan.h | 25 +++++++++++++++----- mm/kasan/quarantine.c | 20 +--------------- mm/kasan/report_generic.c | 6 ++--- 5 files changed, 97 insertions(+), 41 deletions(-) (limited to 'mm/kasan/kasan.h') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index a486e9b1ac68..223af53d4338 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -255,14 +255,33 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip, bool init) { - bool buggy_object; - if (is_kfence_address(object)) return false; - buggy_object = poison_slab_object(cache, object, ip, init); + /* + * If the object is buggy, do not let slab put the object onto the + * freelist. The object will thus never be allocated again and its + * metadata will never get released. + */ + if (poison_slab_object(cache, object, ip, init)) + return true; + + /* + * If the object is put into quarantine, do not let slab put the object + * onto the freelist for now. The object's metadata is kept until the + * object gets evicted from quarantine. + */ + if (kasan_quarantine_put(cache, object)) + return true; + + /* + * If the object is not put into quarantine, it will likely be quickly + * reallocated. Thus, release its metadata now. + */ + kasan_release_object_meta(cache, object); - return buggy_object ? true : kasan_quarantine_put(cache, object); + /* Let slab put the object onto the freelist. */ + return false; } static inline bool check_page_allocation(void *ptr, unsigned long ip) diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 0e77c43c559e..24c13dfb1e94 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -480,10 +480,10 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { struct kasan_alloc_meta *alloc_meta; - struct kasan_free_meta *free_meta; alloc_meta = kasan_get_alloc_meta(cache, object); if (alloc_meta) { + /* Zero out alloc meta to mark it as invalid. */ __memset(alloc_meta, 0, sizeof(*alloc_meta)); /* @@ -495,9 +495,50 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) raw_spin_lock_init(&alloc_meta->aux_lock); kasan_enable_current(); } + + /* + * Explicitly marking free meta as invalid is not required: the shadow + * value for the first 8 bytes of a newly allocated object is not + * KASAN_SLAB_FREE_META. + */ +} + +static void release_alloc_meta(struct kasan_alloc_meta *meta) +{ + /* Evict the stack traces from stack depot. */ + stack_depot_put(meta->alloc_track.stack); + stack_depot_put(meta->aux_stack[0]); + stack_depot_put(meta->aux_stack[1]); + + /* Zero out alloc meta to mark it as invalid. */ + __memset(meta, 0, sizeof(*meta)); +} + +static void release_free_meta(const void *object, struct kasan_free_meta *meta) +{ + /* Check if free meta is valid. */ + if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META) + return; + + /* Evict the stack trace from the stack depot. */ + stack_depot_put(meta->free_track.stack); + + /* Mark free meta as invalid. */ + *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; +} + +void kasan_release_object_meta(struct kmem_cache *cache, const void *object) +{ + struct kasan_alloc_meta *alloc_meta; + struct kasan_free_meta *free_meta; + + alloc_meta = kasan_get_alloc_meta(cache, object); + if (alloc_meta) + release_alloc_meta(alloc_meta); + free_meta = kasan_get_free_meta(cache, object); if (free_meta) - __memset(free_meta, 0, sizeof(*free_meta)); + release_free_meta(object, free_meta); } size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) @@ -573,11 +614,8 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) if (!alloc_meta) return; - /* Evict previous stack traces (might exist for krealloc). */ - stack_depot_put(alloc_meta->alloc_track.stack); - stack_depot_put(alloc_meta->aux_stack[0]); - stack_depot_put(alloc_meta->aux_stack[1]); - __memset(alloc_meta, 0, sizeof(*alloc_meta)); + /* Evict previous stack traces (might exist for krealloc or mempool). */ + release_alloc_meta(alloc_meta); kasan_save_track(&alloc_meta->alloc_track, flags); } @@ -590,7 +628,11 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object) if (!free_meta) return; + /* Evict previous stack trace (might exist for mempool). */ + release_free_meta(object, free_meta); + kasan_save_track(&free_meta->free_track, 0); - /* The object was freed and has free track set. */ - *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK; + + /* Mark free meta as valid. */ + *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META; } diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 814e89523c64..645ae04539c9 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -156,7 +156,7 @@ static inline bool kasan_requires_meta(void) #ifdef CONFIG_KASAN_GENERIC -#define KASAN_SLAB_FREETRACK 0xFA /* freed slab object with free track */ +#define KASAN_SLAB_FREE_META 0xFA /* freed slab object with free meta */ #define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */ /* Stack redzone shadow values. Compiler ABI, do not change. */ @@ -253,6 +253,15 @@ struct kasan_global { #ifdef CONFIG_KASAN_GENERIC +/* + * Alloc meta contains the allocation-related information about a slab object. + * Alloc meta is saved when an object is allocated and is kept until either the + * object returns to the slab freelist (leaves quarantine for quarantined + * objects or gets freed for the non-quarantined ones) or reallocated via + * krealloc or through a mempool. + * Alloc meta is stored inside of the object's redzone. + * Alloc meta is considered valid whenever it contains non-zero data. + */ struct kasan_alloc_meta { struct kasan_track alloc_track; /* Free track is stored in kasan_free_meta. */ @@ -278,8 +287,12 @@ struct qlist_node { #define KASAN_NO_FREE_META INT_MAX /* - * Free meta is only used by Generic mode while the object is in quarantine. - * After that, slab allocator stores the freelist pointer in the object. + * Free meta contains the freeing-related information about a slab object. + * Free meta is only kept for quarantined objects and for mempool objects until + * the object gets allocated again. + * Free meta is stored within the object's memory. + * Free meta is considered valid whenever the value of the shadow byte that + * corresponds to the first 8 bytes of the object is KASAN_SLAB_FREE_META. */ struct kasan_free_meta { struct qlist_node quarantine_link; @@ -380,15 +393,15 @@ void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report struct slab *kasan_addr_to_slab(const void *addr); #ifdef CONFIG_KASAN_GENERIC -void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size); -void kasan_init_object_meta(struct kmem_cache *cache, const void *object); struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, const void *object); struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, const void *object); +void kasan_init_object_meta(struct kmem_cache *cache, const void *object); +void kasan_release_object_meta(struct kmem_cache *cache, const void *object); #else -static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { } static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { } +static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { } #endif depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags); diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 782e045da911..8afa77bc5d3b 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -143,22 +143,10 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) { void *object = qlink_to_object(qlink, cache); - struct kasan_alloc_meta *alloc_meta = kasan_get_alloc_meta(cache, object); struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object); unsigned long flags; - if (alloc_meta) { - stack_depot_put(alloc_meta->alloc_track.stack); - stack_depot_put(alloc_meta->aux_stack[0]); - stack_depot_put(alloc_meta->aux_stack[1]); - __memset(alloc_meta, 0, sizeof(*alloc_meta)); - } - - if (free_meta && - *(u8 *)kasan_mem_to_shadow(object) == KASAN_SLAB_FREETRACK) { - stack_depot_put(free_meta->free_track.stack); - __memset(&free_meta->free_track, 0, sizeof(free_meta->free_track)); - } + kasan_release_object_meta(cache, object); /* * If init_on_free is enabled and KASAN's free metadata is stored in @@ -170,12 +158,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) cache->kasan_info.free_meta_offset == 0) memzero_explicit(free_meta, sizeof(*free_meta)); - /* - * As the object now gets freed from the quarantine, - * take note that its free track is no longer exists. - */ - *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; - if (IS_ENABLED(CONFIG_SLAB)) local_irq_save(flags); diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c index 99cbcd73cff7..f5b8e37b3805 100644 --- a/mm/kasan/report_generic.c +++ b/mm/kasan/report_generic.c @@ -110,7 +110,7 @@ static const char *get_shadow_bug_type(struct kasan_report_info *info) bug_type = "use-after-free"; break; case KASAN_SLAB_FREE: - case KASAN_SLAB_FREETRACK: + case KASAN_SLAB_FREE_META: bug_type = "slab-use-after-free"; break; case KASAN_ALLOCA_LEFT: @@ -173,8 +173,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) memcpy(&info->alloc_track, &alloc_meta->alloc_track, sizeof(info->alloc_track)); - if (*(u8 *)kasan_mem_to_shadow(info->object) == KASAN_SLAB_FREETRACK) { - /* Free meta must be present with KASAN_SLAB_FREETRACK. */ + if (*(u8 *)kasan_mem_to_shadow(info->object) == KASAN_SLAB_FREE_META) { + /* Free meta must be present with KASAN_SLAB_FREE_META. */ free_meta = kasan_get_free_meta(info->cache, info->object); memcpy(&info->free_track, &free_meta->free_track, sizeof(info->free_track)); -- cgit v1.2.3