summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2022-03-25 04:10:31 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-25 05:06:46 +0300
commitb42090ae6f3aa07b0a39403545d688489548a6a8 (patch)
treef1404502b91d3df67ade0a4a83a5a7074a0d5ded /mm/page_alloc.c
parentb8491b9052fef036aac0ca3afc18ef223aef6f61 (diff)
downloadlinux-b42090ae6f3aa07b0a39403545d688489548a6a8.tar.xz
kasan, page_alloc: merge kasan_alloc_pages into post_alloc_hook
Currently, the code responsible for initializing and poisoning memory in post_alloc_hook() is scattered across two locations: kasan_alloc_pages() hook for HW_TAGS KASAN and post_alloc_hook() itself. This is confusing. This and a few following patches combine the code from these two locations. Along the way, these patches do a step-by-step restructure the many performed checks to make them easier to follow. Replace the only caller of kasan_alloc_pages() with its implementation. As kasan_has_integrated_init() is only true when CONFIG_KASAN_HW_TAGS is enabled, moving the code does no functional changes. Also move init and init_tags variables definitions out of kasan_has_integrated_init() clause in post_alloc_hook(), as they have the same values regardless of what the if condition evaluates to. This patch is not useful by itself but makes the simplifications in the following patches easier to follow. Link: https://lkml.kernel.org/r/5ac7e0b30f5cbb177ec363ddd7878a3141289592.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Acked-by: Marco Elver <elver@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d2d6e48d2d83..db82facf14e7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2346,6 +2346,9 @@ static inline bool check_new_pcp(struct page *page, unsigned int order)
inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
+ bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
+ bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
+
set_page_private(page, 0);
set_page_refcounted(page);
@@ -2361,15 +2364,22 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
/*
* As memory initialization might be integrated into KASAN,
- * kasan_alloc_pages and kernel_init_free_pages must be
+ * KASAN unpoisoning and memory initializion code must be
* kept together to avoid discrepancies in behavior.
*/
if (kasan_has_integrated_init()) {
- kasan_alloc_pages(page, order, gfp_flags);
- } else {
- bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
- bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
+ if (gfp_flags & __GFP_SKIP_KASAN_POISON)
+ SetPageSkipKASanPoison(page);
+
+ if (init_tags) {
+ int i;
+ for (i = 0; i != 1 << order; ++i)
+ tag_clear_highpage(page + i);
+ } else {
+ kasan_unpoison_pages(page, order, init);
+ }
+ } else {
kasan_unpoison_pages(page, order, init);
if (init_tags) {