summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorNikhil V <quic_nprakash@quicinc.com>2023-07-13 10:07:57 +0300
committerWill Deacon <will@kernel.org>2023-07-20 13:44:50 +0300
commita8bd38dbc57c2fe074df2c9e549b9c2ad3183c83 (patch)
tree919b95d8a8e5f94a8a38297b78de5a4796a37c0f /arch/arm64
parent55b87b74996383230586f4f9f801ae304c70e649 (diff)
downloadlinux-a8bd38dbc57c2fe074df2c9e549b9c2ad3183c83.tar.xz
arm64: mm: Make hibernation aware of KFENCE
In the restore path, swsusp_arch_suspend_exit uses copy_page() to over-write memory. However, with features like KFENCE enabled, there could be situations where it may have marked some pages as not valid, due to which it could be reported as invalid accesses. Consider a situation where page 'P' was part of the hibernation image. Now, when the resume kernel tries to restore the pages, the same page 'P' is already in use in the resume kernel and is kfence protected, due to which its mapping is removed from linear map. Since restoring pages happens with the resume kernel page tables, we would end up accessing 'P' during copy and results in kernel pagefault. The proposed fix tries to solve this issue by marking PTE as valid for such kfence protected pages. Co-developed-by: Pavankumar Kondeti <quic_pkondeti@quicinc.com> Signed-off-by: Pavankumar Kondeti <quic_pkondeti@quicinc.com> Signed-off-by: Nikhil V <quic_nprakash@quicinc.com> Link: https://lore.kernel.org/r/20230713070757.4093-1-quic_nprakash@quicinc.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/mm/trans_pgd.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index 4ea2eefbc053..e9ad391fc8ea 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -24,6 +24,7 @@
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
+#include <linux/kfence.h>
static void *trans_alloc(struct trans_pgd_info *info)
{
@@ -41,7 +42,8 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
* the temporary mappings we use during restore.
*/
set_pte(dst_ptep, pte_mkwrite(pte));
- } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
+ } else if ((debug_pagealloc_enabled() ||
+ is_kfence_address((void *)addr)) && !pte_none(pte)) {
/*
* debug_pagealloc will removed the PTE_VALID bit if
* the page isn't in use by the resume kernel. It may have