summaryrefslogtreecommitdiff
path: root/mm/kasan
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2021-06-24 16:05:25 +0300
committerWill Deacon <will@kernel.org>2021-06-24 16:05:25 +0300
commitfdceddb06a5ff5ad3894cf9e8124d5af38ac5793 (patch)
tree1c0e225ad623ec5d6d02c42f400b2202f4567dc0 /mm/kasan
parent81ad4bb1fe91d28d793d801e462a284c7f82cc40 (diff)
parentc275c5c6d50a0518cdb0584e85905d10e7cefc6e (diff)
downloadlinux-fdceddb06a5ff5ad3894cf9e8124d5af38ac5793.tar.xz
Merge branch 'for-next/mte' into for-next/core
KASAN optimisations for the hardware tagging (MTE) implementation. * for-next/mte: kasan: disable freed user page poisoning with HW tags arm64: mte: handle tags zeroing at page allocation time kasan: use separate (un)poison implementation for integrated init mm: arch: remove indirection level in alloc_zeroed_user_highpage_movable() kasan: speed up mte_set_mem_tag_range
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/common.c4
-rw-r--r--mm/kasan/hw_tags.c32
2 files changed, 34 insertions, 2 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6bb87f2acd4e..0ecd293af344 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
return 0;
}
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
unsigned long i;
@@ -111,7 +111,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
}
-void __kasan_free_pages(struct page *page, unsigned int order, bool init)
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
{
if (likely(!PageHighMem(page)))
kasan_poison(page_address(page), PAGE_SIZE << order,
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 4004388b4e4b..ed5e5b833d61 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -238,6 +238,38 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
return &alloc_meta->free_track[0];
}
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
+{
+ /*
+ * This condition should match the one in post_alloc_hook() in
+ * page_alloc.c.
+ */
+ bool init = !want_init_on_free() && want_init_on_alloc(flags);
+
+ if (flags & __GFP_SKIP_KASAN_POISON)
+ SetPageSkipKASanPoison(page);
+
+ if (flags & __GFP_ZEROTAGS) {
+ int i;
+
+ for (i = 0; i != 1 << order; ++i)
+ tag_clear_highpage(page + i);
+ } else {
+ kasan_unpoison_pages(page, order, init);
+ }
+}
+
+void kasan_free_pages(struct page *page, unsigned int order)
+{
+ /*
+ * This condition should match the one in free_pages_prepare() in
+ * page_alloc.c.
+ */
+ bool init = want_init_on_free();
+
+ kasan_poison_pages(page, order, init);
+}
+
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
void kasan_set_tagging_report_once(bool state)