From 7e04cc918954f9090952e8d17cb2c3c4a5ad055e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 10 May 2021 17:52:06 +0530 Subject: arm64/mm: Validate CONFIG_PGTABLE_LEVELS CONFIG_PGTABLE_LEVELS has been statically defined in (arch/arm64/Kconfig) depending on the page size and requested virtual address range. In order to validate this page table levels selection this adds a BUILD_BUG_ON() as per the existing formula ARM64_HW_PGTABLE_LEVELS(). This would help protect any inadvertent changes to CONFIG_PGTABLE_LEVELS selection. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/1620649326-24115-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/init.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index e55409caaee3..6e1ca044ca90 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -499,6 +499,13 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); #endif + /* + * Selected page table levels should match when derived from + * scratch using the virtual address range and page size. + */ + BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) != + CONFIG_PGTABLE_LEVELS); + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* -- cgit v1.2.3 From e377ab82311af95c99648c6424a6b888a0ccb102 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 10 May 2021 16:37:51 +0530 Subject: arm64/mm: Remove [PUD|PMD]_TABLE_BIT from [pud|pmd]_bad() Semantics wise, [pud|pmd]_bad() have always implied that a given [PUD|PMD] entry does not have a pointer to the next level page table. This had been made clear in the commit a1c76574f345 ("arm64: mm: use *_sect to check for section maps"). Hence explicitly check for a table entry rather than just testing a single bit. This basically redefines [pud|pmd]_bad() in terms of [pud|pmd]_table() making the semantics clear. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Mark Rutland Link: https://lore.kernel.org/r/1620644871-26280-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 0b10204e72fc..11e60d0cd9b6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -511,13 +511,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) - #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_TABLE) #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_SECT) #define pmd_leaf(pmd) pmd_sect(pmd) +#define pmd_bad(pmd) (!pmd_table(pmd)) #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) @@ -604,7 +603,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) #define pud_none(pud) (!pud_val(pud)) -#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) +#define pud_bad(pud) (!pud_table(pud)) #define pud_present(pud) pte_present(pud_pte(pud)) #define pud_leaf(pud) pud_sect(pud) #define pud_valid(pud) pte_valid(pud_pte(pud)) -- cgit v1.2.3 From 40221c737608cf324870c58ef063159c3a6a4c81 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 24 May 2021 13:10:30 +0530 Subject: arm64/mm: Make vmemmap_free() available only with CONFIG_MEMORY_HOTPLUG vmemmap_free() callsites (mm/sparse.c) and declaration (include/linux/mm.h) are protected with CONFIG_MEMORY_HOTPLUG. This function is not required if CONFIG_MEMORY_HOTPLUG is not enabled. Hence move the config wrapper outside the function definition. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/1621842030-23256-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6dd9369e3ea0..3d34cd127f6b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1166,16 +1166,17 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return 0; } #endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ + +#ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { -#ifdef CONFIG_MEMORY_HOTPLUG WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); unmap_hotplug_range(start, end, true, altmap); free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); -#endif } +#endif /* CONFIG_MEMORY_HOTPLUG */ static inline pud_t *fixmap_pud(unsigned long addr) { -- cgit v1.2.3 From 5ae632ed356c2f2e42a3e7ea447e98a9e684539c Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sat, 29 May 2021 19:15:10 +0800 Subject: arm64: mm: Use better bitmap_zalloc() Use better bitmap_zalloc() to allocate bitmap. Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Kefeng Wang Link: https://lore.kernel.org/r/20210529111510.186355-1-wangkefeng.wang@huawei.com Signed-off-by: Will Deacon --- arch/arm64/mm/context.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 001737a8f309..cd72576ae2b7 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -402,14 +402,12 @@ static int asids_init(void) { asid_bits = get_cpu_asid_bits(); atomic64_set(&asid_generation, ASID_FIRST_VERSION); - asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), - GFP_KERNEL); + asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); if (!asid_map) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); - pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), - sizeof(*pinned_asid_map), GFP_KERNEL); + pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); nr_pinned_asids = 0; /* -- cgit v1.2.3 From 58cc6b72a2127475296502fcb4d2b5006b7f4742 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 27 May 2021 12:03:17 +0100 Subject: arm64: mm: Remove unused support for Device-GRE memory type The Device-GRE memory type is unused, so remove it and reclaim a MAIR. Cc: Christoph Hellwig Acked-by: Catalin Marinas Suggested-by: Catalin Marinas Link: https://lore.kernel.org/r/20210505180228.GA3874@arm.com Link: https://lore.kernel.org/r/20210527110319.22157-2-will@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 1 - arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/mm/proc.S | 1 - arch/arm64/mm/ptdump.c | 4 ---- 4 files changed, 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 87b90dc27a43..1e025e3b655e 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -138,7 +138,6 @@ #define MT_NORMAL_WT 3 #define MT_DEVICE_nGnRnE 4 #define MT_DEVICE_nGnRE 5 -#define MT_DEVICE_GRE 6 /* * Memory types for Stage-2 translation diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 65d15700a168..baeb33cd7685 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -703,7 +703,6 @@ /* MAIR_ELx memory attributes (used by Linux) */ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) #define MAIR_ATTR_DEVICE_nGnRE UL(0x04) -#define MAIR_ATTR_DEVICE_GRE UL(0x0c) #define MAIR_ATTR_NORMAL_NC UL(0x44) #define MAIR_ATTR_NORMAL_WT UL(0xbb) #define MAIR_ATTR_NORMAL_TAGGED UL(0xf0) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 97d7bcd8d4f2..add026fcc88c 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -58,7 +58,6 @@ #define MAIR_EL1_SET \ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ - MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \ diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index a1937dfff31c..1c403536c9bb 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -157,10 +157,6 @@ static const struct prot_bits pte_bits[] = { .mask = PTE_ATTRINDX_MASK, .val = PTE_ATTRINDX(MT_DEVICE_nGnRE), .set = "DEVICE/nGnRE", - }, { - .mask = PTE_ATTRINDX_MASK, - .val = PTE_ATTRINDX(MT_DEVICE_GRE), - .set = "DEVICE/GRE", }, { .mask = PTE_ATTRINDX_MASK, .val = PTE_ATTRINDX(MT_NORMAL_NC), -- cgit v1.2.3 From ee67c1103a1b50467969cf2cdb182c096c144459 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 27 May 2021 12:03:18 +0100 Subject: arm64: acpi: Map EFI_MEMORY_WT memory as Normal-NC The only user we have of Normal Write-Through memory is in the ACPI code when mapping memory regions advertised as EFI_MEMORY_WT. Since most (all?) CPUs treat write-through as non-cacheable under the hood, don't bother with the extra memory type here and just treat EFI_MEMORY_WT the same way as EFI_MEMORY_WC by mapping it to the Normal-NC memory type instead and emitting a warning if we have failed to find an alternative EFI memory type. Cc: Ard Biesheuvel Cc: Lorenzo Pieralisi Cc: Christoph Hellwig Acked-by: Catalin Marinas Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210527110319.22157-3-will@kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/acpi.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index cada0b816c8a..f3851724fe35 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -239,6 +239,18 @@ done: } } +static pgprot_t __acpi_get_writethrough_mem_attribute(void) +{ + /* + * Although UEFI specifies the use of Normal Write-through for + * EFI_MEMORY_WT, it is seldom used in practice and not implemented + * by most (all?) CPUs. Rather than allocate a MAIR just for this + * purpose, emit a warning and use Normal Non-cacheable instead. + */ + pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n"); + return __pgprot(PROT_NORMAL_NC); +} + pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) { /* @@ -246,7 +258,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is * mapped to a corresponding MAIR attribute encoding. * The EFI memory attribute advises all possible capabilities - * of a memory region. We use the most efficient capability. + * of a memory region. */ u64 attr; @@ -254,10 +266,10 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) attr = efi_mem_attributes(addr); if (attr & EFI_MEMORY_WB) return PAGE_KERNEL; - if (attr & EFI_MEMORY_WT) - return __pgprot(PROT_NORMAL_WT); if (attr & EFI_MEMORY_WC) return __pgprot(PROT_NORMAL_NC); + if (attr & EFI_MEMORY_WT) + return __acpi_get_writethrough_mem_attribute(); return __pgprot(PROT_DEVICE_nGnRnE); } @@ -340,10 +352,10 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) default: if (region->attribute & EFI_MEMORY_WB) prot = PAGE_KERNEL; - else if (region->attribute & EFI_MEMORY_WT) - prot = __pgprot(PROT_NORMAL_WT); else if (region->attribute & EFI_MEMORY_WC) prot = __pgprot(PROT_NORMAL_NC); + else if (region->attribute & EFI_MEMORY_WT) + prot = __acpi_get_writethrough_mem_attribute(); } } return __ioremap(phys, size, prot); -- cgit v1.2.3 From 21cfe6edbadb703b674ae2ddf78862d00d24bfc5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 27 May 2021 12:03:19 +0100 Subject: arm64: mm: Remove unused support for Normal-WT memory type The Normal-WT memory type is unused, so remove it and reclaim a MAIR. Cc: Christoph Hellwig Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210527110319.22157-4-will@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 5 ++--- arch/arm64/include/asm/pgtable-prot.h | 1 - arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/mm/proc.S | 1 - 4 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 1e025e3b655e..7b360960cc35 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -135,9 +135,8 @@ #define MT_NORMAL 0 #define MT_NORMAL_TAGGED 1 #define MT_NORMAL_NC 2 -#define MT_NORMAL_WT 3 -#define MT_DEVICE_nGnRnE 4 -#define MT_DEVICE_nGnRE 5 +#define MT_DEVICE_nGnRnE 3 +#define MT_DEVICE_nGnRE 4 /* * Memory types for Stage-2 translation diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 938092df76cf..7032f04c8ac6 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -55,7 +55,6 @@ extern bool arm64_use_ng_mappings; #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) #define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED)) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index baeb33cd7685..9ea84bcddf85 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -704,7 +704,6 @@ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) #define MAIR_ATTR_DEVICE_nGnRE UL(0x04) #define MAIR_ATTR_NORMAL_NC UL(0x44) -#define MAIR_ATTR_NORMAL_WT UL(0xbb) #define MAIR_ATTR_NORMAL_TAGGED UL(0xf0) #define MAIR_ATTR_NORMAL UL(0xff) #define MAIR_ATTR_MASK UL(0xff) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index add026fcc88c..6e640fa9788e 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -60,7 +60,6 @@ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ - MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) #ifdef CONFIG_CPU_PM -- cgit v1.2.3 From 65688d2a05deb9f0671a7e2301eadbfe7e27c9e9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 27 May 2021 13:43:56 +0100 Subject: arm64: cache: Lower ARCH_DMA_MINALIGN to 64 (L1_CACHE_BYTES) Back in 97303480753e ("arm64: Increase the max granular size"), ARCH_DMA_MINALIGN was effectively increased to 128 bytes thanks to an increase in L1_CACHE_BYTES due to an unsubstantiated performance claim on the now obsolete ThunderX-1. Although this was reverted in d93277b9839b, ARCH_DMA_MINALIGN was kept at 128 bytes by ebc7e21e0fa2 ("arm64: Increase ARCH_DMA_MINALIGN to 128"). During discussion of the original patch, it was reported that the change also prevented a warning during boot on (again, now obsolete) Qualcomm server hardware where the cache writeback granule was larger than 64 bytes. The reason for this warning was because non-coherent DMA could lead to data corruption due to unexpected writeback from the CPU where a cacheline is shared with other allocations. Since then, systems have appeared with larger cachelines still, and so commit 8f5c9037a55b ("arm64/mm: Correct the cache line size warning with non coherent device") reworked the warning so that it only appears on systems where non-coherent DMA is actually required and taints the kernel with TAINT_CPU_OUT_OF_SPEC. We are not aware of any systems, even including the aforementioned obsolete machines, which have a CWG larger than 64 bytes and require non-coherent DMA. More recently, it has been reported that a ARCH_DMA_MINALIGN of 128 bytes wastes considerable memory (~6% immediately after boot on one system). Reduce ARCH_DMA_MINALIGN to 64 bytes and allow the warning/taint to indicate if there are machines that unknowingly rely on this. Cc: Catalin Marinas Cc: Mark Rutland Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Vincent Whitchurch Link: https://lore.kernel.org/linux-arm-kernel/1442944788-17254-1-git-send-email-rric@kernel.org/ Link: https://lore.kernel.org/linux-arm-kernel/CAOZdJXUiRMAguDV+HEJqPg57MyBNqEcTyaH+ya=U93NHb-pdJA@mail.gmail.com/ Link: https://lore.kernel.org/linux-arm-kernel/20190614131141.4428-1-msys.mizuma@gmail.com/ Link: https://lore.kernel.org/r/20210517074332.28280-1-vincent.whitchurch@axis.com Acked-by: Catalin Marinas Acked-by: Mark Rutland Acked-by: Arnd Bergmann Acked-by: Mark Rutland Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210527124356.22367-1-will@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/cache.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index a074459f8f2f..a9c0716e7440 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -47,7 +47,7 @@ * cache before the transfer is done, causing old data to be seen by * the CPU. */ -#define ARCH_DMA_MINALIGN (128) +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #ifdef CONFIG_KASAN_SW_TAGS #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) -- cgit v1.2.3 From fcf9dc02f83949b3261eefe03e7bb81c59bfaa9c Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 3 Jun 2021 20:02:39 +0800 Subject: arm64: mm: Add is_el1_data_abort() helper We alread have is_el1_instruction_abort(), add is_el1_data_abort() helper and use it. Signed-off-by: Kefeng Wang Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210603120239.169018-1-wangkefeng.wang@huawei.com Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 871c82ab0a30..5c855b2ab93b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -232,13 +232,17 @@ static bool is_el1_instruction_abort(unsigned int esr) return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; } +static bool is_el1_data_abort(unsigned int esr) +{ + return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; +} + static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - unsigned int ec = ESR_ELx_EC(esr); unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; - if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR) + if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) return false; if (fsc_type == ESR_ELx_FSC_PERM) @@ -258,7 +262,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, unsigned long flags; u64 par, dfsc; - if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || + if (!is_el1_data_abort(esr) || (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) return false; @@ -346,10 +350,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr, static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) { - unsigned int ec = ESR_ELx_EC(esr); unsigned int fsc = esr & ESR_ELx_FSC; - if (ec != ESR_ELx_EC_DABT_CUR) + if (!is_el1_data_abort(esr)) return false; if (fsc == ESR_ELx_FSC_MTE) -- cgit v1.2.3 From e0e3903f83d5e41ab7e7737ebe41ef36f578dc0a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 8 Jun 2021 13:37:42 +0100 Subject: arm64: mm: decode xFSC in mem_abort_decode() It would be helpful if mem_abort_decode() could decode the DFSC/IFSC, as this can make it easier to identify common bugs (e.g. accesses which trigger alignment faults) without having to manually decode the xFSC value. Decode the xFSC in mem_abort_decode(). Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Robin Murphy Cc: Will Deacon Link: https://lore.kernel.org/r/20210608123742.11921-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 5c855b2ab93b..6786cf152666 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -99,6 +99,8 @@ static void mem_abort_decode(unsigned int esr) pr_alert(" EA = %lu, S1PTW = %lu\n", (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); + pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC), + esr_to_fault_info(esr)->name); if (esr_is_data_abort(esr)) data_abort_decode(esr); -- cgit v1.2.3 From 0f473ac746a992b3afd994ccd1ac73052ea256f2 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 14 Jun 2021 15:40:11 +0530 Subject: arm64/mm: Drop SWAPPER_INIT_MAP_SIZE The commit cdef5f6e9e0e ("arm64: mm: allocate pagetables anywhere") had dropped the last reference to SWAPPER_INIT_MAP_SIZE. Hence just clean up. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Reviewed-by: Gavin Shan Link: https://lore.kernel.org/r/1623665411-20055-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kernel-pgtable.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index d44df9d62fc9..e2f103cce7c1 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -100,9 +100,6 @@ #define SWAPPER_TABLE_SHIFT PMD_SHIFT #endif -/* The size of the initial kernel direct mapping */ -#define SWAPPER_INIT_MAP_SIZE (_AC(1, UL) << SWAPPER_TABLE_SHIFT) - /* * Initial memory map attributes. */ -- cgit v1.2.3 From ca6ece6a76a8b5d8b428429c2803df48a69ee88b Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 14 Jun 2021 15:12:35 +0530 Subject: arm64/mm: Use CONT_PMD_SHIFT for ARM64_MEMSTART_SHIFT ARM64_MEMSTART_SIZE needs to be aligned with CONT_PMD_SIZE on 16K page size config. Hence just directly use CONT_PMD_SHIFT. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Reviewed-by: Gavin Shan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/1623663755-8949-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kernel-pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index e2f103cce7c1..c5f18f2408b5 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -122,7 +122,7 @@ #if defined(CONFIG_ARM64_4K_PAGES) #define ARM64_MEMSTART_SHIFT PUD_SHIFT #elif defined(CONFIG_ARM64_16K_PAGES) -#define ARM64_MEMSTART_SHIFT (PMD_SHIFT + 5) +#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT #else #define ARM64_MEMSTART_SHIFT PMD_SHIFT #endif -- cgit v1.2.3 From 4aaa87ab3d2de485d8aae7a88cc9cb02dcd2c450 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 14 Jun 2021 13:48:26 +0530 Subject: arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK] SECTION_[SHIFT|SIZE|MASK] are essentially PMD_[SHIFT|SIZE|MASK]. But these create confusion being similar to generic sparsemem memory sections, which are derived from SECTION_SIZE_BITS. Section references have always implied PMD level block mapping. Instead just use all PMD level macros which would make it explicit and also remove confusion with sparsmem memory sections. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Reviewed-by: Gavin Shan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/1623658706-7182-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kernel-pgtable.h | 4 ++-- arch/arm64/include/asm/pgtable-hwdef.h | 7 ------- arch/arm64/mm/mmu.c | 2 +- 3 files changed, 3 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index c5f18f2408b5..1260187adb31 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -91,8 +91,8 @@ /* Initial memory map size */ #if ARM64_SWAPPER_USES_SECTION_MAPS -#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT -#define SWAPPER_BLOCK_SIZE SECTION_SIZE +#define SWAPPER_BLOCK_SHIFT PMD_SHIFT +#define SWAPPER_BLOCK_SIZE PMD_SIZE #define SWAPPER_TABLE_SHIFT PUD_SHIFT #else #define SWAPPER_BLOCK_SHIFT PAGE_SHIFT diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index b82575a33f8b..40085e53f573 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -71,13 +71,6 @@ #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) -/* - * Section address mask and size definitions. - */ -#define SECTION_SHIFT PMD_SHIFT -#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) - /* * Contiguous page definitions. */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 3d34cd127f6b..5b75f7eefb72 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -228,7 +228,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, next = pmd_addr_end(addr, end); /* try section mapping first */ - if (((addr | next | phys) & ~SECTION_MASK) == 0 && + if (((addr | next | phys) & ~PMD_MASK) == 0 && (flags & NO_BLOCK_MAPPINGS) == 0) { pmd_set_huge(pmdp, phys, prot); -- cgit v1.2.3 From 84c5e23edecd7013ceaed8460deed5c33842cb8d Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 14 Jun 2021 20:27:01 +0800 Subject: arm64: mm: Pass original fault address to handle_mm_fault() Currently, the lower bits of fault address is cleared before it's passed to handle_mm_fault(). It's unnecessary since generic code does same thing since the commit 1a29d85eb0f19 ("mm: use vmf->address instead of of vmf->virtual_address"). This passes the original fault address to handle_mm_fault() in case the generic code needs to know the exact fault address. Signed-off-by: Gavin Shan Acked-by: Catalin Marinas Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20210614122701.100515-1-gshan@redhat.com Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6786cf152666..bd9a0bb5fb56 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -509,7 +509,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, */ if (!(vma->vm_flags & vm_flags)) return VM_FAULT_BADACCESS; - return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs); + return handle_mm_fault(vma, addr, mm_flags, regs); } static bool is_el0_instruction_abort(unsigned int esr) -- cgit v1.2.3 From 9163f01130304fab1f74683d7d44632da7bda637 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 15 Jun 2021 15:02:58 +0530 Subject: arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan When using CONFIG_ARM64_SW_TTBR0_PAN, a task's thread_info::ttbr0 must be the TTBR0_EL1 value used to run userspace. With 52-bit PAs, the PA must be packed into the TTBR using phys_to_ttbr(), but we forget to do this in some of the SW PAN code. Thus, if the value is installed into TTBR0_EL1 (as may happen in the uaccess routines), this could result in UNPREDICTABLE behaviour. Since hardware with 52-bit PA support almost certainly has HW PAN, which will be used in preference, this shouldn't be a practical issue, but let's fix this for consistency. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: James Morse Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Fixes: 529c4b05a3cb ("arm64: handle 52-bit addresses in TTBR") Signed-off-by: Anshuman Khandual Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/1623749578-11231-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu_context.h | 4 ++-- arch/arm64/kernel/setup.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index d3cef9133539..eeb210997149 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -177,9 +177,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, return; if (mm == &init_mm) - ttbr = __pa_symbol(reserved_pg_dir); + ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); else - ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; + ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48; WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 61845c0821d9..68b30e8c22db 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -381,7 +381,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) * faults in case uaccess_enable() is inadvertently called by the init * thread. */ - init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir); + init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); #endif if (boot_args[1] || boot_args[2] || boot_args[3]) { -- cgit v1.2.3 From c70fe14f83ae0793a1119fa5741b19ab9ba411b2 Mon Sep 17 00:00:00 2001 From: Dong Aisheng Date: Tue, 18 May 2021 18:14:03 +0800 Subject: arm64: mm: fix the count comments in compute_indices 'count - 1' is confusing and not comply with the real code running. 'count' actually represents the extra entries required, no need minus 1. Signed-off-by: Dong Aisheng Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210518101405.1048860-3-aisheng.dong@nxp.com Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 96873dfa67fd..b70db34458ec 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -195,7 +195,7 @@ SYM_CODE_END(preserve_boot_args) and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1) mov \istart, \ptrs mul \istart, \istart, \count - add \iend, \iend, \istart // iend += (count - 1) * ptrs + add \iend, \iend, \istart // iend += count * ptrs // our entries span multiple tables lsr \istart, \vstart, \shift -- cgit v1.2.3 From f91671b5418bde81a7ce6bb2e9f3f4d41184b77c Mon Sep 17 00:00:00 2001 From: Dong Aisheng Date: Tue, 18 May 2021 18:14:04 +0800 Subject: arm64: mm: drop unused __pa(__idmap_text_start) x5 is not used in the following map_memory. Instead, __pa(__idmap_text_start) is stored in x3 which is used later. Signed-off-by: Dong Aisheng Acked-by: Catalin Marinas Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20210518101405.1048860-4-aisheng.dong@nxp.com Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b70db34458ec..d266b4c6287d 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -354,7 +354,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables) #endif 1: ldr_l x4, idmap_ptrs_per_pgd - mov x5, x3 // __pa(__idmap_text_start) adr_l x6, __idmap_text_end // __pa(__idmap_text_end) map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14 -- cgit v1.2.3 From 7957a3db01bf533a235a9ae9333150abbe6bde32 Mon Sep 17 00:00:00 2001 From: Dong Aisheng Date: Tue, 18 May 2021 18:14:05 +0800 Subject: arm64: head: fix code comments in set_cpu_boot_mode_flag Up to here, the CPU boot mode can either be EL1 or EL2. Correct the code comments a bit. Signed-off-by: Dong Aisheng Acked-by: Catalin Marinas Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20210518101405.1048860-5-aisheng.dong@nxp.com Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index d266b4c6287d..3b88000841d9 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -550,7 +550,7 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 -1: str w0, [x1] // This CPU has booted in EL1 +1: str w0, [x1] // Save CPU boot mode dmb sy dc ivac, x1 // Invalidate potentially stale cache line ret -- cgit v1.2.3 From 2062d44da3499eed3c7d005df8f0b54d300ac0b5 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 18 Jun 2021 10:17:02 +0530 Subject: arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS ARM64_SWAPPER_USES_SECTION_MAPS implies that a PMD level huge page mappings are used for swapper, idmap and vmemmap. Lets make it PMD explicit removing any possible confusion with generic memory sections and also bit generic as it's applicable for idmap and vmemmap mappings as well. Hence rename it as ARM64_KERNEL_USES_PMD_MAPS instead. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/1623991622-24294-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kernel-pgtable.h | 10 +++++----- arch/arm64/mm/mmu.c | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 1260187adb31..3512184cfec1 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -18,9 +18,9 @@ * 64K (section size = 512M). */ #ifdef CONFIG_ARM64_4K_PAGES -#define ARM64_SWAPPER_USES_SECTION_MAPS 1 +#define ARM64_KERNEL_USES_PMD_MAPS 1 #else -#define ARM64_SWAPPER_USES_SECTION_MAPS 0 +#define ARM64_KERNEL_USES_PMD_MAPS 0 #endif /* @@ -33,7 +33,7 @@ * VA range, so pages required to map highest possible PA are reserved in all * cases. */ -#if ARM64_SWAPPER_USES_SECTION_MAPS +#if ARM64_KERNEL_USES_PMD_MAPS #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) #define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1) #else @@ -90,7 +90,7 @@ #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) /* Initial memory map size */ -#if ARM64_SWAPPER_USES_SECTION_MAPS +#if ARM64_KERNEL_USES_PMD_MAPS #define SWAPPER_BLOCK_SHIFT PMD_SHIFT #define SWAPPER_BLOCK_SIZE PMD_SIZE #define SWAPPER_TABLE_SHIFT PUD_SHIFT @@ -106,7 +106,7 @@ #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#if ARM64_SWAPPER_USES_SECTION_MAPS +#if ARM64_KERNEL_USES_PMD_MAPS #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) #else #define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 5b75f7eefb72..e04e4b6bdf16 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1113,14 +1113,14 @@ static void free_empty_tables(unsigned long addr, unsigned long end, } #endif -#if !ARM64_SWAPPER_USES_SECTION_MAPS +#if !ARM64_KERNEL_USES_PMD_MAPS int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); return vmemmap_populate_basepages(start, end, node, altmap); } -#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ +#else /* !ARM64_KERNEL_USES_PMD_MAPS */ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { @@ -1165,7 +1165,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return 0; } -#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ +#endif /* !ARM64_KERNEL_USES_PMD_MAPS */ #ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_free(unsigned long start, unsigned long end, -- cgit v1.2.3 From 52218fcd61cb42bde0d301db4acb3ffdf3463cc7 Mon Sep 17 00:00:00 2001 From: Zhenyu Ye Date: Wed, 23 Jun 2021 15:05:22 +0800 Subject: arm64: tlb: fix the TTL value of tlb_get_level The TTL field indicates the level of page table walk holding the *leaf* entry for the address being invalidated. But currently, the TTL field may be set to an incorrent value in the following stack: pte_free_tlb __pte_free_tlb tlb_remove_table tlb_table_invalidate tlb_flush_mmu_tlbonly tlb_flush In this case, we just want to flush a PTE page, but the tlb->cleared_pmds is set and we get tlb_level = 2 in the tlb_get_level() function. This may cause some unexpected problems. This patch set the TTL field to 0 if tlb->freed_tables is set. The tlb->freed_tables indicates page table pages are freed, not the leaf entry. Cc: # 5.9.x Fixes: c4ab2cbc1d87 ("arm64: tlb: Set the TTL field in flush_tlb_range") Acked-by: Catalin Marinas Reported-by: ZhuRui Signed-off-by: Zhenyu Ye Link: https://lore.kernel.org/r/b80ead47-1f88-3a00-18e1-cacc22f54cc4@huawei.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/tlb.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 61c97d3b58c7..c995d1f4594f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb); */ static inline int tlb_get_level(struct mmu_gather *tlb) { + /* The TTL field is only valid for the leaf entry. */ + if (tlb->freed_tables) + return 0; + if (tlb->cleared_ptes && !(tlb->cleared_pmds || tlb->cleared_puds || tlb->cleared_p4ds)) -- cgit v1.2.3