summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/trans_pgd.c
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2021-01-25 22:19:13 +0300
committerWill Deacon <will@kernel.org>2021-01-27 18:41:12 +0300
commit7018d467ff2d6a6d3c820a7ad4e897fe2430b040 (patch)
tree173ce4cf4d47c3f783be5ff33d7e7a1febddcecc /arch/arm64/mm/trans_pgd.c
parent1401bef703a48cf79c674215f702a1435362beae (diff)
downloadlinux-7018d467ff2d6a6d3c820a7ad4e897fe2430b040.tar.xz
arm64: trans_pgd: hibernate: idmap the single page that holds the copy page routines
To resume from hibernate, the contents of memory are restored from the swap image. This may overwrite any page, including the running kernel and its page tables. Hibernate copies the code it uses to do the restore into a single page that it knows won't be overwritten, and maps it with page tables built from pages that won't be overwritten. Today the address it uses for this mapping is arbitrary, but to allow kexec to reuse this code, it needs to be idmapped. To idmap the page we must avoid the kernel helpers that have VA_BITS baked in. Convert create_single_mapping() to take a single PA, and idmap it. The page tables are built in the reverse order to normal using pfn_pte() to stir in any bits between 52:48. T0SZ is always increased to cover 48bits, or 52 if the copy code has bits 52:48 in its PA. Signed-off-by: James Morse <james.morse@arm.com> [Adopted the original patch from James to trans_pgd interface, so it can be commonly used by both Kexec and Hibernate. Some minor clean-ups.] Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com> Link: https://lore.kernel.org/linux-arm-kernel/20200115143322.214247-4-james.morse@arm.com/ Link: https://lore.kernel.org/r/20210125191923.1060122-9-pasha.tatashin@soleen.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/mm/trans_pgd.c')
-rw-r--r--arch/arm64/mm/trans_pgd.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index ded8e2ba0308..527f0a39c3da 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -273,3 +273,52 @@ int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
return 0;
}
+
+/*
+ * The page we want to idmap may be outside the range covered by VA_BITS that
+ * can be built using the kernel's p?d_populate() helpers. As a one off, for a
+ * single page, we build these page tables bottom up and just assume that will
+ * need the maximum T0SZ.
+ *
+ * Returns 0 on success, and -ENOMEM on failure.
+ * On success trans_ttbr0 contains page table with idmapped page, t0sz is set to
+ * maximum T0SZ for this page.
+ */
+int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
+ unsigned long *t0sz, void *page)
+{
+ phys_addr_t dst_addr = virt_to_phys(page);
+ unsigned long pfn = __phys_to_pfn(dst_addr);
+ int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47;
+ int bits_mapped = PAGE_SHIFT - 4;
+ unsigned long level_mask, prev_level_entry, *levels[4];
+ int this_level, index, level_lsb, level_msb;
+
+ dst_addr &= PAGE_MASK;
+ prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_EXEC));
+
+ for (this_level = 3; this_level >= 0; this_level--) {
+ levels[this_level] = trans_alloc(info);
+ if (!levels[this_level])
+ return -ENOMEM;
+
+ level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level);
+ level_msb = min(level_lsb + bits_mapped, max_msb);
+ level_mask = GENMASK_ULL(level_msb, level_lsb);
+
+ index = (dst_addr & level_mask) >> level_lsb;
+ *(levels[this_level] + index) = prev_level_entry;
+
+ pfn = virt_to_pfn(levels[this_level]);
+ prev_level_entry = pte_val(pfn_pte(pfn,
+ __pgprot(PMD_TYPE_TABLE)));
+
+ if (level_msb == max_msb)
+ break;
+ }
+
+ *trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn));
+ *t0sz = TCR_T0SZ(max_msb + 1);
+
+ return 0;
+}