summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/fixmap.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2023-04-06 18:27:58 +0300
committerWill Deacon <will@kernel.org>2023-04-11 20:55:28 +0300
commitb97547761b02cc95e0e6be827dc9ca9da8142761 (patch)
treeefc00269ee2243872d1683a58f2b31e104635ecd /arch/arm64/mm/fixmap.c
parent32f5b6995f790ac8fc048d3afd6b83c82074aedf (diff)
downloadlinux-b97547761b02cc95e0e6be827dc9ca9da8142761.tar.xz
arm64: mm: move fixmap code to its own file
Over time, arm64's mm/mmu.c has become increasingly large and painful to navigate. Move the fixmap code to its own file where it can be understood in isolation. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Will Deacon <will@kernel.org> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Link: https://lore.kernel.org/r/20230406152759.4164229-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/mm/fixmap.c')
-rw-r--r--arch/arm64/mm/fixmap.c215
1 files changed, 215 insertions, 0 deletions
diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c
new file mode 100644
index 000000000000..7fbdd6faf500
--- /dev/null
+++ b/arch/arm64/mm/fixmap.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Fixmap manipulation code
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/sizes.h>
+
+#include <asm/fixmap.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+
+static inline pud_t *fixmap_pud(unsigned long addr)
+{
+ pgd_t *pgdp = pgd_offset_k(addr);
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+ p4d_t p4d = READ_ONCE(*p4dp);
+
+ BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
+
+ return pud_offset_kimg(p4dp, addr);
+}
+
+static inline pmd_t *fixmap_pmd(unsigned long addr)
+{
+ pud_t *pudp = fixmap_pud(addr);
+ pud_t pud = READ_ONCE(*pudp);
+
+ BUG_ON(pud_none(pud) || pud_bad(pud));
+
+ return pmd_offset_kimg(pudp, addr);
+}
+
+static inline pte_t *fixmap_pte(unsigned long addr)
+{
+ return &bm_pte[pte_index(addr)];
+}
+
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). This function is called too early to use
+ * lm_alias so __p*d_populate functions must be used to populate with the
+ * physical address from __pa_symbol.
+ */
+void __init early_fixmap_init(void)
+{
+ pgd_t *pgdp;
+ p4d_t *p4dp, p4d;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ unsigned long addr = FIXADDR_TOT_START;
+
+ pgdp = pgd_offset_k(addr);
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = READ_ONCE(*p4dp);
+ if (CONFIG_PGTABLE_LEVELS > 3 &&
+ !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
+ /*
+ * We only end up here if the kernel mapping and the fixmap
+ * share the top level pgd entry, which should only happen on
+ * 16k/4 levels configurations.
+ */
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+ pudp = pud_offset_kimg(p4dp, addr);
+ } else {
+ if (p4d_none(p4d))
+ __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
+ pudp = fixmap_pud(addr);
+ }
+ if (pud_none(READ_ONCE(*pudp)))
+ __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
+ pmdp = fixmap_pmd(addr);
+ __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
+
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+ * we are not prepared:
+ */
+ BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+ != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+ if ((pmdp != fixmap_pmd(__fix_to_virt(FIX_BTMAP_BEGIN)))
+ || pmdp != fixmap_pmd(__fix_to_virt(FIX_BTMAP_END))) {
+ WARN_ON(1);
+ pr_warn("pmdp %p != %p, %p\n",
+ pmdp, fixmap_pmd(__fix_to_virt(FIX_BTMAP_BEGIN)),
+ fixmap_pmd(__fix_to_virt(FIX_BTMAP_END)));
+ pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+ __fix_to_virt(FIX_BTMAP_BEGIN));
+ pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
+ __fix_to_virt(FIX_BTMAP_END));
+
+ pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
+ pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
+ }
+}
+
+/*
+ * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
+ * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
+ */
+void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *ptep;
+
+ BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
+
+ ptep = fixmap_pte(addr);
+
+ if (pgprot_val(flags)) {
+ set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
+ } else {
+ pte_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ }
+}
+
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+{
+ const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
+ int offset;
+ void *dt_virt;
+
+ /*
+ * Check whether the physical FDT address is set and meets the minimum
+ * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+ * at least 8 bytes so that we can always access the magic and size
+ * fields of the FDT header after mapping the first chunk, double check
+ * here if that is indeed the case.
+ */
+ BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+ return NULL;
+
+ /*
+ * Make sure that the FDT region can be mapped without the need to
+ * allocate additional translation table pages, so that it is safe
+ * to call create_mapping_noalloc() this early.
+ *
+ * On 64k pages, the FDT will be mapped using PTEs, so we need to
+ * be in the same PMD as the rest of the fixmap.
+ * On 4k pages, we'll use section mappings for the FDT so we only
+ * have to be in the same PUD.
+ */
+ BUILD_BUG_ON(dt_virt_base % SZ_2M);
+
+ BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
+ __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
+
+ offset = dt_phys % SWAPPER_BLOCK_SIZE;
+ dt_virt = (void *)dt_virt_base + offset;
+
+ /* map the first chunk so we can read the size from the header */
+ create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+ dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
+
+ if (fdt_magic(dt_virt) != FDT_MAGIC)
+ return NULL;
+
+ *size = fdt_totalsize(dt_virt);
+ if (*size > MAX_FDT_SIZE)
+ return NULL;
+
+ if (offset + *size > SWAPPER_BLOCK_SIZE)
+ create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+ round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
+
+ return dt_virt;
+}
+
+/*
+ * Copy the fixmap region into a new pgdir.
+ */
+void __init fixmap_copy(pgd_t *pgdir)
+{
+ if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) {
+ /*
+ * The fixmap falls in a separate pgd to the kernel, and doesn't
+ * live in the carveout for the swapper_pg_dir. We can simply
+ * re-use the existing dir for the fixmap.
+ */
+ set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START),
+ READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START)));
+ } else if (CONFIG_PGTABLE_LEVELS > 3) {
+ pgd_t *bm_pgdp;
+ p4d_t *bm_p4dp;
+ pud_t *bm_pudp;
+ /*
+ * The fixmap shares its top level pgd entry with the kernel
+ * mapping. This can really only occur when we are running
+ * with 16k/4 levels, so we can simply reuse the pud level
+ * entry instead.
+ */
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+ bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START);
+ bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START);
+ bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START);
+ pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
+ pud_clear_fixmap();
+ } else {
+ BUG();
+ }
+}