summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-08-27 19:00:43 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-08-27 19:00:43 +0300
commit94606b893f4503a834f066bc9caa9659bd9ef810 (patch)
treef6c53d240d316d2d088787d5ba5f862468e79f00
parent77dd11439b86e3f7990e4c0c9e0b67dca82750ba (diff)
parent463dbba4d189750c2f576449d0bbb11c5413712e (diff)
downloadlinux-94606b893f4503a834f066bc9caa9659bd9ef810.tar.xz
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM fix from Russell King: "Resolve a Keystone 2 kernel mapping regression" * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: 9104/2: Fix Keystone 2 kernel mapping regression
-rw-r--r--arch/arm/include/asm/memory.h7
-rw-r--r--arch/arm/kernel/head.S17
-rw-r--r--arch/arm/mm/mmu.c9
-rw-r--r--arch/arm/mm/pv-fixup-asm.S2
4 files changed, 27 insertions, 8 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index cfc9dfd70aad..f673e13e0f94 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -160,10 +160,11 @@ extern unsigned long vectors_base;
/*
* Physical start and end address of the kernel sections. These addresses are
- * 2MB-aligned to match the section mappings placed over the kernel.
+ * 2MB-aligned to match the section mappings placed over the kernel. We use
+ * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
*/
-extern u32 kernel_sec_start;
-extern u32 kernel_sec_end;
+extern u64 kernel_sec_start;
+extern u64 kernel_sec_end;
/*
* Physical vs virtual RAM address space conversion. These are
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 9eb0b4dbcc12..29070eb8df7d 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -49,7 +49,8 @@
/*
* This needs to be assigned at runtime when the linker symbols are
- * resolved.
+ * resolved. These are unsigned 64bit really, but in this assembly code
+ * We store them as 32bit.
*/
.pushsection .data
.align 2
@@ -57,8 +58,10 @@
.globl kernel_sec_end
kernel_sec_start:
.long 0
+ .long 0
kernel_sec_end:
.long 0
+ .long 0
.popsection
.macro pgtbl, rd, phys
@@ -250,7 +253,11 @@ __create_page_tables:
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
ldr r6, =(_end - 1)
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
- str r8, [r5] @ Save physical start of kernel
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ str r8, [r5, #4] @ Save physical start of kernel (BE)
+#else
+ str r8, [r5] @ Save physical start of kernel (LE)
+#endif
orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: str r3, [r0], #1 << PMD_ORDER
@@ -259,7 +266,11 @@ __create_page_tables:
bls 1b
eor r3, r3, r7 @ Remove the MMU flags
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
- str r3, [r5] @ Save physical end of kernel
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ str r3, [r5, #4] @ Save physical end of kernel (BE)
+#else
+ str r3, [r5] @ Save physical end of kernel (LE)
+#endif
#ifdef CONFIG_XIP_KERNEL
/*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 7583bda5ea7d..a4e006005107 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1609,6 +1609,13 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
return;
/*
+ * Offset the kernel section physical offsets so that the kernel
+ * mapping will work out later on.
+ */
+ kernel_sec_start += offset;
+ kernel_sec_end += offset;
+
+ /*
* Get the address of the remap function in the 1:1 identity
* mapping setup by the early page table assembly code. We
* must get this prior to the pv update. The following barrier
@@ -1716,7 +1723,7 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
- pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
+ pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
kernel_sec_start, kernel_sec_end);
prepare_page_table();
diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S
index 5c5e1952000a..f8e11f7c7880 100644
--- a/arch/arm/mm/pv-fixup-asm.S
+++ b/arch/arm/mm/pv-fixup-asm.S
@@ -29,7 +29,7 @@ ENTRY(lpae_pgtables_remap_asm)
ldr r6, =(_end - 1)
add r7, r2, #0x1000
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
- add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
+ add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
1: ldrd r4, r5, [r7]
adds r4, r4, r0
adc r5, r5, r1