From dce44566192ec0b38597fdfd435013c2d54653ff Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 29 Apr 2021 22:55:15 -0700 Subject: mm/memtest: add ARCH_USE_MEMTEST early_memtest() does not get called from all architectures. Hence enabling CONFIG_MEMTEST and providing a valid memtest=[1..N] kernel command line option might not trigger the memory pattern tests as would be expected in normal circumstances. This situation is misleading. The change here prevents the above mentioned problem after introducing a new config option ARCH_USE_MEMTEST that should be subscribed on platforms that call early_memtest(), in order to enable the config CONFIG_MEMTEST. Conversely CONFIG_MEMTEST cannot be enabled on platforms where it would not be tested anyway. Link: https://lkml.kernel.org/r/1617269193-22294-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Acked-by: Catalin Marinas (arm64) Reviewed-by: Max Filippov Cc: Russell King Cc: Will Deacon Cc: Thomas Bogendoerfer Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Chris Zankel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b38bbb5bb00e..678c13967580 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2573,11 +2573,18 @@ config TEST_FPU endif # RUNTIME_TESTING_MENU +config ARCH_USE_MEMTEST + bool + help + An architecture should select this when it uses early_memtest() + during boot process. + config MEMTEST bool "Memtest" + depends on ARCH_USE_MEMTEST help This option adds a kernel parameter 'memtest', which allows memtest - to be set. + to be set and executed. memtest=0, mean disabled; -- default memtest=1, mean do 1 test pattern; ... -- cgit v1.2.3 From a803315858bf8c6863f719f9fb251576fdf68a8c Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Thu, 29 Apr 2021 22:59:10 -0700 Subject: lib/test_vmalloc.c: remove two kvfree_rcu() tests Remove two test cases related to kvfree_rcu() and SLAB. Those are considered as redundant now, because similar test functionality has recently been introduced in the "rcuscale" RCU test-suite. Link: https://lkml.kernel.org/r/20210402202237.20334-1-urezki@gmail.com Signed-off-by: Uladzislau Rezki (Sony) Cc: Hillf Danton Cc: Michal Hocko Cc: Matthew Wilcox Cc: Oleksiy Avramchenko Cc: Steven Rostedt Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_vmalloc.c | 40 ---------------------------------------- 1 file changed, 40 deletions(-) (limited to 'lib') diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 5cf2fe9aab9e..4eb6abdaa74e 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -47,8 +47,6 @@ __param(int, run_test_mask, INT_MAX, "\t\tid: 128, name: pcpu_alloc_test\n" "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" - "\t\tid: 1024, name: kvfree_rcu_1_arg_slab_test\n" - "\t\tid: 2048, name: kvfree_rcu_2_arg_slab_test\n" /* Add a new test case description here. */ ); @@ -363,42 +361,6 @@ kvfree_rcu_2_arg_vmalloc_test(void) return 0; } -static int -kvfree_rcu_1_arg_slab_test(void) -{ - struct test_kvfree_rcu *p; - int i; - - for (i = 0; i < test_loop_count; i++) { - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -1; - - p->array[0] = 'a'; - kvfree_rcu(p); - } - - return 0; -} - -static int -kvfree_rcu_2_arg_slab_test(void) -{ - struct test_kvfree_rcu *p; - int i; - - for (i = 0; i < test_loop_count; i++) { - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -1; - - p->array[0] = 'a'; - kvfree_rcu(p, rcu); - } - - return 0; -} - struct test_case_desc { const char *test_name; int (*test_func)(void); @@ -415,8 +377,6 @@ static struct test_case_desc test_case_array[] = { { "pcpu_alloc_test", pcpu_alloc_test }, { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, - { "kvfree_rcu_1_arg_slab_test", kvfree_rcu_1_arg_slab_test }, - { "kvfree_rcu_2_arg_slab_test", kvfree_rcu_2_arg_slab_test }, /* Add a new test case here. */ }; -- cgit v1.2.3 From 80f4759964cc70ca8e3c793afbecbdc235ce7272 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Thu, 29 Apr 2021 22:59:13 -0700 Subject: lib/test_vmalloc.c: add a new 'nr_threads' parameter By using this parameter we can specify how many workers are created to perform vmalloc tests. By default it is one CPU. The maximum value is set to 1024. As a result of this change a 'single_cpu_test' one becomes obsolete, therefore it is no longer needed. [urezki@gmail.com: extend max value of nr_threads parameter] Link: https://lkml.kernel.org/r/20210406124536.19658-1-urezki@gmail.com Link: https://lkml.kernel.org/r/20210402202237.20334-2-urezki@gmail.com Signed-off-by: Uladzislau Rezki (Sony) Cc: Hillf Danton Cc: Matthew Wilcox Cc: Michal Hocko Cc: Oleksiy Avramchenko Cc: Shuah Khan Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_vmalloc.c | 88 +++++++++++++++++++++++++----------------------------- 1 file changed, 40 insertions(+), 48 deletions(-) (limited to 'lib') diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 4eb6abdaa74e..01e9543de566 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -23,8 +23,8 @@ module_param(name, type, 0444); \ MODULE_PARM_DESC(name, msg) \ -__param(bool, single_cpu_test, false, - "Use single first online CPU to run tests"); +__param(int, nr_threads, 0, + "Number of workers to perform tests(min: 1 max: USHRT_MAX)"); __param(bool, sequential_test_order, false, "Use sequential stress tests order"); @@ -50,13 +50,6 @@ __param(int, run_test_mask, INT_MAX, /* Add a new test case description here. */ ); -/* - * Depends on single_cpu_test parameter. If it is true, then - * use first online CPU to trigger a test on, otherwise go with - * all online CPUs. - */ -static cpumask_t cpus_run_test_mask = CPU_MASK_NONE; - /* * Read write semaphore for synchronization of setup * phase that is done in main thread and workers. @@ -386,16 +379,13 @@ struct test_case_data { u64 time; }; -/* Split it to get rid of: WARNING: line over 80 characters */ -static struct test_case_data - per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)]; - static struct test_driver { struct task_struct *task; + struct test_case_data data[ARRAY_SIZE(test_case_array)]; + unsigned long start; unsigned long stop; - int cpu; -} per_cpu_test_driver[NR_CPUS]; +} *tdriver; static void shuffle_array(int *arr, int n) { @@ -423,9 +413,6 @@ static int test_func(void *private) ktime_t kt; u64 delta; - if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0) - pr_err("Failed to set affinity to %d CPU\n", t->cpu); - for (i = 0; i < ARRAY_SIZE(test_case_array); i++) random_array[i] = i; @@ -450,9 +437,9 @@ static int test_func(void *private) kt = ktime_get(); for (j = 0; j < test_repeat_count; j++) { if (!test_case_array[index].test_func()) - per_cpu_test_data[t->cpu][index].test_passed++; + t->data[index].test_passed++; else - per_cpu_test_data[t->cpu][index].test_failed++; + t->data[index].test_failed++; } /* @@ -461,7 +448,7 @@ static int test_func(void *private) delta = (u64) ktime_us_delta(ktime_get(), kt); do_div(delta, (u32) test_repeat_count); - per_cpu_test_data[t->cpu][index].time = delta; + t->data[index].time = delta; } t->stop = get_cycles(); @@ -477,53 +464,56 @@ static int test_func(void *private) return 0; } -static void +static int init_test_configurtion(void) { /* - * Reset all data of all CPUs. + * A maximum number of workers is defined as hard-coded + * value and set to USHRT_MAX. We add such gap just in + * case and for potential heavy stressing. */ - memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data)); + nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX); - if (single_cpu_test) - cpumask_set_cpu(cpumask_first(cpu_online_mask), - &cpus_run_test_mask); - else - cpumask_and(&cpus_run_test_mask, cpu_online_mask, - cpu_online_mask); + /* Allocate the space for test instances. */ + tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL); + if (tdriver == NULL) + return -1; if (test_repeat_count <= 0) test_repeat_count = 1; if (test_loop_count <= 0) test_loop_count = 1; + + return 0; } static void do_concurrent_test(void) { - int cpu, ret; + int i, ret; /* * Set some basic configurations plus sanity check. */ - init_test_configurtion(); + ret = init_test_configurtion(); + if (ret < 0) + return; /* * Put on hold all workers. */ down_write(&prepare_for_test_rwsem); - for_each_cpu(cpu, &cpus_run_test_mask) { - struct test_driver *t = &per_cpu_test_driver[cpu]; + for (i = 0; i < nr_threads; i++) { + struct test_driver *t = &tdriver[i]; - t->cpu = cpu; - t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu); + t->task = kthread_run(test_func, t, "vmalloc_test/%d", i); if (!IS_ERR(t->task)) /* Success. */ atomic_inc(&test_n_undone); else - pr_err("Failed to start kthread for %d CPU\n", cpu); + pr_err("Failed to start %d kthread\n", i); } /* @@ -541,29 +531,31 @@ static void do_concurrent_test(void) ret = wait_for_completion_timeout(&test_all_done_comp, HZ); } while (!ret); - for_each_cpu(cpu, &cpus_run_test_mask) { - struct test_driver *t = &per_cpu_test_driver[cpu]; - int i; + for (i = 0; i < nr_threads; i++) { + struct test_driver *t = &tdriver[i]; + int j; if (!IS_ERR(t->task)) kthread_stop(t->task); - for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { - if (!((run_test_mask & (1 << i)) >> i)) + for (j = 0; j < ARRAY_SIZE(test_case_array); j++) { + if (!((run_test_mask & (1 << j)) >> j)) continue; pr_info( "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n", - test_case_array[i].test_name, - per_cpu_test_data[cpu][i].test_passed, - per_cpu_test_data[cpu][i].test_failed, + test_case_array[j].test_name, + t->data[j].test_passed, + t->data[j].test_failed, test_repeat_count, test_loop_count, - per_cpu_test_data[cpu][i].time); + t->data[j].time); } - pr_info("All test took CPU%d=%lu cycles\n", - cpu, t->stop - t->start); + pr_info("All test took worker%d=%lu cycles\n", + i, t->stop - t->start); } + + kvfree(tdriver); } static int vmalloc_test_init(void) -- cgit v1.2.3 From aa5c219c60ccb75b50c16329885b65c275172e4a Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 29 Apr 2021 22:59:59 -0700 Subject: kasan: init memory in kasan_(un)poison for HW_TAGS This change adds an argument to kasan_poison() and kasan_unpoison() that allows initializing memory along with setting the tags for HW_TAGS. Combining setting allocation tags with memory initialization will improve HW_TAGS KASAN performance when init_on_alloc/free is enabled. This change doesn't integrate memory initialization with KASAN, this is done is subsequent patches in this series. Link: https://lkml.kernel.org/r/3054314039fa64510947e674180d675cab1b4c41.1615296150.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Christoph Lameter Cc: David Rientjes Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Joonsoo Kim Cc: Kevin Brodsky Cc: Pekka Enberg Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan.c | 4 ++-- mm/kasan/common.c | 28 ++++++++++++++-------------- mm/kasan/generic.c | 12 ++++++------ mm/kasan/kasan.h | 14 ++++++++------ mm/kasan/shadow.c | 10 +++++----- mm/kasan/sw_tags.c | 2 +- 6 files changed, 36 insertions(+), 34 deletions(-) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 785e724ce0d8..0882d6c17e62 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -1049,14 +1049,14 @@ static void match_all_mem_tag(struct kunit *test) continue; /* Mark the first memory granule with the chosen memory tag. */ - kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag); + kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false); /* This access must cause a KASAN report. */ KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0); } /* Recover the memory tag and free. */ - kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr)); + kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false); kfree(ptr); } diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 7b53291dafa1..b3d8eb154f59 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -60,7 +60,7 @@ void kasan_disable_current(void) void __kasan_unpoison_range(const void *address, size_t size) { - kasan_unpoison(address, size); + kasan_unpoison(address, size, false); } #ifdef CONFIG_KASAN_STACK @@ -69,7 +69,7 @@ void kasan_unpoison_task_stack(struct task_struct *task) { void *base = task_stack_page(task); - kasan_unpoison(base, THREAD_SIZE); + kasan_unpoison(base, THREAD_SIZE, false); } /* Unpoison the stack for the current task beyond a watermark sp value. */ @@ -82,7 +82,7 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) */ void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); - kasan_unpoison(base, watermark - base); + kasan_unpoison(base, watermark - base, false); } #endif /* CONFIG_KASAN_STACK */ @@ -108,14 +108,14 @@ void __kasan_alloc_pages(struct page *page, unsigned int order) tag = kasan_random_tag(); for (i = 0; i < (1 << order); i++) page_kasan_tag_set(page + i, tag); - kasan_unpoison(page_address(page), PAGE_SIZE << order); + kasan_unpoison(page_address(page), PAGE_SIZE << order, false); } void __kasan_free_pages(struct page *page, unsigned int order) { if (likely(!PageHighMem(page))) kasan_poison(page_address(page), PAGE_SIZE << order, - KASAN_FREE_PAGE); + KASAN_FREE_PAGE, false); } /* @@ -251,18 +251,18 @@ void __kasan_poison_slab(struct page *page) for (i = 0; i < compound_nr(page); i++) page_kasan_tag_reset(page + i); kasan_poison(page_address(page), page_size(page), - KASAN_KMALLOC_REDZONE); + KASAN_KMALLOC_REDZONE, false); } void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) { - kasan_unpoison(object, cache->object_size); + kasan_unpoison(object, cache->object_size, false); } void __kasan_poison_object_data(struct kmem_cache *cache, void *object) { kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), - KASAN_KMALLOC_REDZONE); + KASAN_KMALLOC_REDZONE, false); } /* @@ -351,7 +351,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, } kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), - KASAN_KMALLOC_FREE); + KASAN_KMALLOC_FREE, false); if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine)) return false; @@ -407,7 +407,7 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) if (unlikely(!PageSlab(page))) { if (____kasan_kfree_large(ptr, ip)) return; - kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE); + kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false); } else { ____kasan_slab_free(page->slab_cache, ptr, ip, false); } @@ -453,7 +453,7 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, * Unpoison the whole object. * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. */ - kasan_unpoison(tagged_object, cache->object_size); + kasan_unpoison(tagged_object, cache->object_size, false); /* Save alloc info (if possible) for non-kmalloc() allocations. */ if (kasan_stack_collection_enabled()) @@ -496,7 +496,7 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache, redzone_end = round_up((unsigned long)(object + cache->object_size), KASAN_GRANULE_SIZE); kasan_poison((void *)redzone_start, redzone_end - redzone_start, - KASAN_KMALLOC_REDZONE); + KASAN_KMALLOC_REDZONE, false); /* * Save alloc info (if possible) for kmalloc() allocations. @@ -546,7 +546,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, KASAN_GRANULE_SIZE); redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr)); kasan_poison((void *)redzone_start, redzone_end - redzone_start, - KASAN_PAGE_REDZONE); + KASAN_PAGE_REDZONE, false); return (void *)ptr; } @@ -563,7 +563,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag * Part of it might already have been unpoisoned, but it's unknown * how big that part is. */ - kasan_unpoison(object, size); + kasan_unpoison(object, size, false); page = virt_to_head_page(object); diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 2e55e0f82f39..53cbf28859b5 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -208,11 +208,11 @@ static void register_global(struct kasan_global *global) { size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE); - kasan_unpoison(global->beg, global->size); + kasan_unpoison(global->beg, global->size, false); kasan_poison(global->beg + aligned_size, global->size_with_redzone - aligned_size, - KASAN_GLOBAL_REDZONE); + KASAN_GLOBAL_REDZONE, false); } void __asan_register_globals(struct kasan_global *globals, size_t size) @@ -292,11 +292,11 @@ void __asan_alloca_poison(unsigned long addr, size_t size) WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); kasan_unpoison((const void *)(addr + rounded_down_size), - size - rounded_down_size); + size - rounded_down_size, false); kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, - KASAN_ALLOCA_LEFT); + KASAN_ALLOCA_LEFT, false); kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE, - KASAN_ALLOCA_RIGHT); + KASAN_ALLOCA_RIGHT, false); } EXPORT_SYMBOL(__asan_alloca_poison); @@ -306,7 +306,7 @@ void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) if (unlikely(!stack_top || stack_top > stack_bottom)) return; - kasan_unpoison(stack_top, stack_bottom - stack_top); + kasan_unpoison(stack_top, stack_bottom - stack_top, false); } EXPORT_SYMBOL(__asan_allocas_unpoison); diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 0583033a7df6..071c456a3579 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -359,7 +359,7 @@ static inline u8 kasan_random_tag(void) { return 0; } #ifdef CONFIG_KASAN_HW_TAGS -static inline void kasan_poison(const void *addr, size_t size, u8 value) +static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init) { addr = kasan_reset_tag(addr); @@ -372,10 +372,10 @@ static inline void kasan_poison(const void *addr, size_t size, u8 value) if (WARN_ON(size & KASAN_GRANULE_MASK)) return; - hw_set_mem_tag_range((void *)addr, size, value, false); + hw_set_mem_tag_range((void *)addr, size, value, init); } -static inline void kasan_unpoison(const void *addr, size_t size) +static inline void kasan_unpoison(const void *addr, size_t size, bool init) { u8 tag = get_tag(addr); @@ -389,7 +389,7 @@ static inline void kasan_unpoison(const void *addr, size_t size) return; size = round_up(size, KASAN_GRANULE_SIZE); - hw_set_mem_tag_range((void *)addr, size, tag, false); + hw_set_mem_tag_range((void *)addr, size, tag, init); } static inline bool kasan_byte_accessible(const void *addr) @@ -407,22 +407,24 @@ static inline bool kasan_byte_accessible(const void *addr) * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE * @size - range size, must be aligned to KASAN_GRANULE_SIZE * @value - value that's written to metadata for the range + * @init - whether to initialize the memory range (only for hardware tag-based) * * The size gets aligned to KASAN_GRANULE_SIZE before marking the range. */ -void kasan_poison(const void *addr, size_t size, u8 value); +void kasan_poison(const void *addr, size_t size, u8 value, bool init); /** * kasan_unpoison - mark the memory range as accessible * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE * @size - range size, can be unaligned + * @init - whether to initialize the memory range (only for hardware tag-based) * * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before * marking the range. * For the generic mode, the last granule of the memory range gets partially * unpoisoned based on the @size. */ -void kasan_unpoison(const void *addr, size_t size); +void kasan_unpoison(const void *addr, size_t size, bool init); bool kasan_byte_accessible(const void *addr); diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 63f43443f5d7..727ad4629173 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -69,7 +69,7 @@ void *memcpy(void *dest, const void *src, size_t len) return __memcpy(dest, src, len); } -void kasan_poison(const void *addr, size_t size, u8 value) +void kasan_poison(const void *addr, size_t size, u8 value, bool init) { void *shadow_start, *shadow_end; @@ -106,7 +106,7 @@ void kasan_poison_last_granule(const void *addr, size_t size) } #endif -void kasan_unpoison(const void *addr, size_t size) +void kasan_unpoison(const void *addr, size_t size, bool init) { u8 tag = get_tag(addr); @@ -129,7 +129,7 @@ void kasan_unpoison(const void *addr, size_t size) return; /* Unpoison all granules that cover the object. */ - kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag); + kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false); /* Partially poison the last granule for the generic mode. */ if (IS_ENABLED(CONFIG_KASAN_GENERIC)) @@ -344,7 +344,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size) return; size = round_up(size, KASAN_GRANULE_SIZE); - kasan_poison(start, size, KASAN_VMALLOC_INVALID); + kasan_poison(start, size, KASAN_VMALLOC_INVALID, false); } void kasan_unpoison_vmalloc(const void *start, unsigned long size) @@ -352,7 +352,7 @@ void kasan_unpoison_vmalloc(const void *start, unsigned long size) if (!is_vmalloc_or_module_addr(start)) return; - kasan_unpoison(start, size); + kasan_unpoison(start, size, false); } static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 00ae8913fc74..9df8e7f69e87 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -163,7 +163,7 @@ EXPORT_SYMBOL(__hwasan_storeN_noabort); void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) { - kasan_poison((void *)addr, size, tag); + kasan_poison((void *)addr, size, tag, false); } EXPORT_SYMBOL(__hwasan_tag_memory); -- cgit v1.2.3 From 99734b535d9bf8d5826be8f8f3719dfc586c3452 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 29 Apr 2021 23:00:49 -0700 Subject: kasan: detect false-positives in tests Currently, KASAN-KUnit tests can check that a particular annotated part of code causes a KASAN report. However, they do not check that no unwanted reports happen between the annotated parts. This patch implements these checks. It is done by setting report_data.report_found to false in kasan_test_init() and at the end of KUNIT_EXPECT_KASAN_FAIL() and then checking that it remains false at the beginning of KUNIT_EXPECT_KASAN_FAIL() and in kasan_test_exit(). kunit_add_named_resource() call is moved to kasan_test_init(), and the value of fail_data.report_expected is kept as false in between KUNIT_EXPECT_KASAN_FAIL() annotations for consistency. Link: https://lkml.kernel.org/r/48079c52cc329fbc52f4386996598d58022fb872.1617207873.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Andrey Ryabinin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan.c | 55 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 29 insertions(+), 26 deletions(-) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 0882d6c17e62..dc05cfc2d12f 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -54,6 +54,10 @@ static int kasan_test_init(struct kunit *test) multishot = kasan_save_enable_multi_shot(); kasan_set_tagging_report_once(false); + fail_data.report_found = false; + fail_data.report_expected = false; + kunit_add_named_resource(test, NULL, NULL, &resource, + "kasan_data", &fail_data); return 0; } @@ -61,6 +65,7 @@ static void kasan_test_exit(struct kunit *test) { kasan_set_tagging_report_once(true); kasan_restore_multi_shot(multishot); + KUNIT_EXPECT_FALSE(test, fail_data.report_found); } /** @@ -78,33 +83,31 @@ static void kasan_test_exit(struct kunit *test) * fields, it can reorder or optimize away the accesses to those fields. * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the * expression to prevent that. + * + * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as + * false. This allows detecting KASAN reports that happen outside of the checks + * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL + * and in kasan_test_exit. */ -#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ - !kasan_async_mode_enabled()) \ - migrate_disable(); \ - WRITE_ONCE(fail_data.report_expected, true); \ - WRITE_ONCE(fail_data.report_found, false); \ - kunit_add_named_resource(test, \ - NULL, \ - NULL, \ - &resource, \ - "kasan_data", &fail_data); \ - barrier(); \ - expression; \ - barrier(); \ - if (kasan_async_mode_enabled()) \ - kasan_force_async_fault(); \ - barrier(); \ - KUNIT_EXPECT_EQ(test, \ - READ_ONCE(fail_data.report_expected), \ - READ_ONCE(fail_data.report_found)); \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ - !kasan_async_mode_enabled()) { \ - if (READ_ONCE(fail_data.report_found)) \ - kasan_enable_tagging_sync(); \ - migrate_enable(); \ - } \ +#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ + !kasan_async_mode_enabled()) \ + migrate_disable(); \ + KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \ + WRITE_ONCE(fail_data.report_expected, true); \ + barrier(); \ + expression; \ + barrier(); \ + KUNIT_EXPECT_EQ(test, \ + READ_ONCE(fail_data.report_expected), \ + READ_ONCE(fail_data.report_found)); \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \ + if (READ_ONCE(fail_data.report_found)) \ + kasan_enable_tagging_sync(); \ + migrate_enable(); \ + } \ + WRITE_ONCE(fail_data.report_found, false); \ + WRITE_ONCE(fail_data.report_expected, false); \ } while (0) #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ -- cgit v1.2.3