summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig76
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts5
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-tplink-archer-c2300-v1.dts5
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts19
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi7
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4912.dtsi14
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm63146.dtsi14
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm63158.dtsi14
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm6813.dtsi14
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm6856.dtsi14
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm6858.dtsi14
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts10
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm94912.dts10
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm963146.dts10
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm963158.dts10
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm96813.dts10
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm96856.dts10
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm96858.dts10
-rw-r--r--arch/arm64/boot/dts/qcom/pm6150.dtsi46
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/crypto/Kconfig1
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S265
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c154
-rw-r--r--arch/arm64/crypto/aes-glue.c1
-rw-r--r--arch/arm64/hyperv/hv_core.c14
-rw-r--r--arch/arm64/hyperv/mshyperv.c22
-rw-r--r--arch/arm64/include/asm/archrandom.h2
-rw-r--r--arch/arm64/include/asm/assembler.h59
-rw-r--r--arch/arm64/include/asm/brk-imm.h2
-rw-r--r--arch/arm64/include/asm/cpu.h4
-rw-r--r--arch/arm64/include/asm/cpufeature.h114
-rw-r--r--arch/arm64/include/asm/crash_reserve.h (renamed from arch/arm64/include/asm/crash_core.h)4
-rw-r--r--arch/arm64/include/asm/elf.h10
-rw-r--r--arch/arm64/include/asm/esr.h13
-rw-r--r--arch/arm64/include/asm/exception.h2
-rw-r--r--arch/arm64/include/asm/fixmap.h2
-rw-r--r--arch/arm64/include/asm/fpsimd.h4
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm64/include/asm/hwcap.h15
-rw-r--r--arch/arm64/include/asm/hyperv-tlfs.h45
-rw-r--r--arch/arm64/include/asm/io.h12
-rw-r--r--arch/arm64/include/asm/kasan.h2
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h103
-rw-r--r--arch/arm64/include/asm/kexec.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h6
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h13
-rw-r--r--arch/arm64/include/asm/kvm_host.h100
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h2
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h46
-rw-r--r--arch/arm64/include/asm/kvm_nested.h1
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h2
-rw-r--r--arch/arm64/include/asm/memory.h33
-rw-r--r--arch/arm64/include/asm/mmu.h40
-rw-r--r--arch/arm64/include/asm/mmu_context.h53
-rw-r--r--arch/arm64/include/asm/mshyperv.h4
-rw-r--r--arch/arm64/include/asm/pgalloc.h52
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h33
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h20
-rw-r--r--arch/arm64/include/asm/pgtable-types.h6
-rw-r--r--arch/arm64/include/asm/pgtable.h666
-rw-r--r--arch/arm64/include/asm/processor.h4
-rw-r--r--arch/arm64/include/asm/ptdump.h7
-rw-r--r--arch/arm64/include/asm/scs.h36
-rw-r--r--arch/arm64/include/asm/setup.h3
-rw-r--r--arch/arm64/include/asm/sysreg.h5
-rw-r--r--arch/arm64/include/asm/tlb.h3
-rw-r--r--arch/arm64/include/asm/tlbflush.h13
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h15
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h15
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h8
-rw-r--r--arch/arm64/include/uapi/asm/sve_context.h11
-rw-r--r--arch/arm64/kernel/Makefile15
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c287
-rw-r--r--arch/arm64/kernel/cpuinfo.c19
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/entry-common.c36
-rw-r--r--arch/arm64/kernel/fpsimd.c18
-rw-r--r--arch/arm64/kernel/head.S486
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm64/kernel/image-vars.h35
-rw-r--r--arch/arm64/kernel/kaslr.c4
-rw-r--r--arch/arm64/kernel/machine_kexec.c2
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c10
-rw-r--r--arch/arm64/kernel/module.c2
-rw-r--r--arch/arm64/kernel/mte.c2
-rw-r--r--arch/arm64/kernel/pi/.gitignore3
-rw-r--r--arch/arm64/kernel/pi/Makefile27
-rw-r--r--arch/arm64/kernel/pi/idreg-override.c (renamed from arch/arm64/kernel/idreg-override.c)78
-rw-r--r--arch/arm64/kernel/pi/kaslr_early.c78
-rw-r--r--arch/arm64/kernel/pi/map_kernel.c253
-rw-r--r--arch/arm64/kernel/pi/map_range.c105
-rw-r--r--arch/arm64/kernel/pi/patch-scs.c (renamed from arch/arm64/kernel/patch-scs.c)36
-rw-r--r--arch/arm64/kernel/pi/pi.h36
-rw-r--r--arch/arm64/kernel/pi/relacheck.c130
-rw-r--r--arch/arm64/kernel/pi/relocate.c64
-rw-r--r--arch/arm64/kernel/probes/kprobes.c21
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S78
-rw-r--r--arch/arm64/kernel/process.c3
-rw-r--r--arch/arm64/kernel/ptrace.c50
-rw-r--r--arch/arm64/kernel/setup.c27
-rw-r--r--arch/arm64/kernel/signal.c98
-rw-r--r--arch/arm64/kernel/sleep.S3
-rw-r--r--arch/arm64/kernel/syscall.c5
-rw-r--r--arch/arm64/kernel/vmcore_info.c (renamed from arch/arm64/kernel/crash_core.c)3
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm64/kvm/Kconfig4
-rw-r--r--arch/arm64/kvm/arch_timer.c2
-rw-r--r--arch/arm64/kvm/arm.c14
-rw-r--r--arch/arm64/kvm/debug.c3
-rw-r--r--arch/arm64/kvm/emulate-nested.c231
-rw-r--r--arch/arm64/kvm/fpsimd.c3
-rw-r--r--arch/arm64/kvm/guest.c9
-rw-r--r--arch/arm64/kvm/hyp/aarch32.c4
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h130
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h24
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c4
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c24
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c2
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/arm64/kvm/mmu.c33
-rw-r--r--arch/arm64/kvm/nested.c274
-rw-r--r--arch/arm64/kvm/pmu-emul.c15
-rw-r--r--arch/arm64/kvm/sys_regs.c263
-rw-r--r--arch/arm64/kvm/sys_regs.h2
-rw-r--r--arch/arm64/kvm/vgic/vgic-debug.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c10
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c63
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c3
-rw-r--r--arch/arm64/kvm/vgic/vgic.c62
-rw-r--r--arch/arm64/kvm/vgic/vgic.h15
-rw-r--r--arch/arm64/mm/Makefile1
-rw-r--r--arch/arm64/mm/contpte.c408
-rw-r--r--arch/arm64/mm/fault.c42
-rw-r--r--arch/arm64/mm/fixmap.c43
-rw-r--r--arch/arm64/mm/hugetlbpage.c47
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/kasan_init.c165
-rw-r--r--arch/arm64/mm/mmap.c4
-rw-r--r--arch/arm64/mm/mmu.c275
-rw-r--r--arch/arm64/mm/pageattr.c6
-rw-r--r--arch/arm64/mm/pgd.c17
-rw-r--r--arch/arm64/mm/proc.S116
-rw-r--r--arch/arm64/mm/ptdump.c88
-rw-r--r--arch/arm64/mm/trans_pgd.c6
-rw-r--r--arch/arm64/tools/cpucaps3
-rw-r--r--arch/arm64/tools/sysreg88
150 files changed, 4840 insertions, 2092 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ae0db7c97435..77e05d4959f2 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -164,7 +164,7 @@ config ARM64
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
- select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+ select HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
@@ -198,7 +198,7 @@ config ARM64
if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS
select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \
if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \
- !CC_OPTIMIZE_FOR_SIZE)
+ (CC_IS_CLANG || !CC_OPTIMIZE_FOR_SIZE))
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_SAMPLE_FTRACE_DIRECT
@@ -216,7 +216,6 @@ config ARM64
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IOREMAP_PROT
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_KVM
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PERF_EVENTS
@@ -229,6 +228,7 @@ config ARM64
select HAVE_FUNCTION_ARG_ACCESS_API
select MMU_GATHER_RCU_TABLE_FREE
select HAVE_RSEQ
+ select HAVE_RUST if CPU_LITTLE_ENDIAN
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
@@ -362,7 +362,9 @@ config PGTABLE_LEVELS
default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
+ default 4 if ARM64_16K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
+ default 5 if ARM64_4K_PAGES && ARM64_VA_BITS_52
config ARCH_SUPPORTS_UPROBES
def_bool y
@@ -376,8 +378,8 @@ config BROKEN_GAS_INST
config BUILTIN_RETURN_ADDRESS_STRIPS_PAC
bool
# Clang's __builtin_return_adddress() strips the PAC since 12.0.0
- # https://reviews.llvm.org/D75044
- default y if CC_IS_CLANG && (CLANG_VERSION >= 120000)
+ # https://github.com/llvm/llvm-project/commit/2a96f47c5ffca84cd774ad402cacd137f4bf45e2
+ default y if CC_IS_CLANG
# GCC's __builtin_return_address() strips the PAC since 11.1.0,
# and this was backported to 10.2.0, 9.4.0, 8.5.0, but not earlier
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94891
@@ -390,13 +392,13 @@ config BUILTIN_RETURN_ADDRESS_STRIPS_PAC
config KASAN_SHADOW_OFFSET
hex
depends on KASAN_GENERIC || KASAN_SW_TAGS
- default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
- default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
+ default 0xdfff800000000000 if (ARM64_VA_BITS_48 || (ARM64_VA_BITS_52 && !ARM64_16K_PAGES)) && !KASAN_SW_TAGS
+ default 0xdfffc00000000000 if (ARM64_VA_BITS_47 || ARM64_VA_BITS_52) && ARM64_16K_PAGES && !KASAN_SW_TAGS
default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
default 0xdfffffc000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
default 0xdffffff800000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
- default 0xefff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
- default 0xefffc00000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
+ default 0xefff800000000000 if (ARM64_VA_BITS_48 || (ARM64_VA_BITS_52 && !ARM64_16K_PAGES)) && KASAN_SW_TAGS
+ default 0xefffc00000000000 if (ARM64_VA_BITS_47 || ARM64_VA_BITS_52) && ARM64_16K_PAGES && KASAN_SW_TAGS
default 0xeffffe0000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
default 0xefffffc000000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
@@ -541,9 +543,8 @@ config ARM64_ERRATUM_832075
If unsure, say Y.
config ARM64_ERRATUM_834220
- bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
+ bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault (rare)"
depends on KVM
- default y
help
This option adds an alternative code sequence to work around ARM
erratum 834220 on Cortex-A57 parts up to r1p2.
@@ -559,7 +560,7 @@ config ARM64_ERRATUM_834220
as it depends on the alternative framework, which will only patch
the kernel if an affected CPU is detected.
- If unsure, say Y.
+ If unsure, say N.
config ARM64_ERRATUM_1742098
bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
@@ -686,8 +687,7 @@ config ARM64_WORKAROUND_REPEAT_TLBI
bool
config ARM64_ERRATUM_2441007
- bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
- default y
+ bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI (rare)"
select ARM64_WORKAROUND_REPEAT_TLBI
help
This option adds a workaround for ARM Cortex-A55 erratum #2441007.
@@ -700,11 +700,10 @@ config ARM64_ERRATUM_2441007
Work around this by adding the affected CPUs to the list that needs
TLB sequences to be done twice.
- If unsure, say Y.
+ If unsure, say N.
config ARM64_ERRATUM_1286807
- bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
- default y
+ bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation (rare)"
select ARM64_WORKAROUND_REPEAT_TLBI
help
This option adds a workaround for ARM Cortex-A76 erratum 1286807.
@@ -718,6 +717,8 @@ config ARM64_ERRATUM_1286807
invalidated has been observed by other observers. The
workaround repeats the TLBI+DSB operation.
+ If unsure, say N.
+
config ARM64_ERRATUM_1463225
bool "Cortex-A76: Software Step might prevent interrupt recognition"
default y
@@ -737,8 +738,7 @@ config ARM64_ERRATUM_1463225
If unsure, say Y.
config ARM64_ERRATUM_1542419
- bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
- default y
+ bool "Neoverse-N1: workaround mis-ordering of instruction fetches (rare)"
help
This option adds a workaround for ARM Neoverse-N1 erratum
1542419.
@@ -750,7 +750,7 @@ config ARM64_ERRATUM_1542419
Workaround the issue by hiding the DIC feature from EL0. This
forces user-space to perform cache maintenance.
- If unsure, say Y.
+ If unsure, say N.
config ARM64_ERRATUM_1508412
bool "Cortex-A77: 1508412: workaround deadlock on sequence of NC/Device load and store exclusive or PAR read"
@@ -925,8 +925,7 @@ config ARM64_ERRATUM_2224489
If unsure, say Y.
config ARM64_ERRATUM_2441009
- bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
- default y
+ bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI (rare)"
select ARM64_WORKAROUND_REPEAT_TLBI
help
This option adds a workaround for ARM Cortex-A510 erratum #2441009.
@@ -939,7 +938,7 @@ config ARM64_ERRATUM_2441009
Work around this by adding the affected CPUs to the list that needs
TLB sequences to be done twice.
- If unsure, say Y.
+ If unsure, say N.
config ARM64_ERRATUM_2064142
bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled"
@@ -1278,9 +1277,7 @@ endchoice
choice
prompt "Virtual address space size"
- default ARM64_VA_BITS_39 if ARM64_4K_PAGES
- default ARM64_VA_BITS_47 if ARM64_16K_PAGES
- default ARM64_VA_BITS_42 if ARM64_64K_PAGES
+ default ARM64_VA_BITS_52
help
Allows choosing one of multiple possible virtual address
space sizes. The level of translation table is determined by
@@ -1307,7 +1304,7 @@ config ARM64_VA_BITS_48
config ARM64_VA_BITS_52
bool "52-bit"
- depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
+ depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
help
Enable 52-bit virtual addressing for userspace when explicitly
requested via a hint to mmap(). The kernel will also use 52-bit
@@ -1354,10 +1351,11 @@ choice
config ARM64_PA_BITS_48
bool "48-bit"
+ depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52
config ARM64_PA_BITS_52
- bool "52-bit (ARMv8.2)"
- depends on ARM64_64K_PAGES
+ bool "52-bit"
+ depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
help
Enable support for a 52-bit physical address space, introduced as
@@ -1374,6 +1372,10 @@ config ARM64_PA_BITS
default 48 if ARM64_PA_BITS_48
default 52 if ARM64_PA_BITS_52
+config ARM64_LPA2
+ def_bool y
+ depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES
+
choice
prompt "Endianness"
default CPU_LITTLE_ENDIAN
@@ -1384,7 +1386,6 @@ choice
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
- depends on !LD_IS_LLD || LLD_VERSION >= 130000
# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
depends on AS_IS_GNU || AS_VERSION >= 150000
help
@@ -1516,7 +1517,7 @@ config ARCH_SUPPORTS_CRASH_DUMP
def_bool y
config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
- def_bool CRASH_CORE
+ def_bool CRASH_RESERVE
config TRANS_TABLE
def_bool y
@@ -2015,8 +2016,6 @@ config ARM64_BTI_KERNEL
depends on !CC_IS_GCC || GCC_VERSION >= 100100
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671
depends on !CC_IS_GCC
- # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
- depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS)
help
Build the kernel with Branch Target Identification annotations
@@ -2219,13 +2218,22 @@ config STACKPROTECTOR_PER_TASK
config UNWIND_PATCH_PAC_INTO_SCS
bool "Enable shadow call stack dynamically using code patching"
- # needs Clang with https://reviews.llvm.org/D111780 incorporated
+ # needs Clang with https://github.com/llvm/llvm-project/commit/de07cde67b5d205d58690be012106022aea6d2b3 incorporated
depends on CC_IS_CLANG && CLANG_VERSION >= 150000
depends on ARM64_PTR_AUTH_KERNEL && CC_HAS_BRANCH_PROT_PAC_RET
depends on SHADOW_CALL_STACK
select UNWIND_TABLES
select DYNAMIC_SCS
+config ARM64_CONTPTE
+ bool "Contiguous PTE mappings for user memory" if EXPERT
+ depends on TRANSPARENT_HUGEPAGE
+ default y
+ help
+ When enabled, user mappings are configured using the PTE contiguous
+ bit, for any mappings that meet the size and alignment requirements.
+ This reduces TLB pressure and improves performance.
+
endmenu # "Kernel Features"
menu "Boot options"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index a88cdf910687..0e075d3c546b 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -41,6 +41,8 @@ KBUILD_CFLAGS += -mgeneral-regs-only \
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(compat_vdso)
+KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
+
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
@@ -65,7 +67,9 @@ endif
ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
KBUILD_CFLAGS += -mbranch-protection=pac-ret+bti
+ KBUILD_RUSTFLAGS += -Zbranch-protection=bti,pac-ret
else ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
+ KBUILD_RUSTFLAGS += -Zbranch-protection=pac-ret
ifeq ($(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET),y)
KBUILD_CFLAGS += -mbranch-protection=pac-ret
else
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts
index 78204d71ecd2..999d93730240 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts
@@ -125,6 +125,11 @@
};
};
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
&nandcs {
nand-ecc-strength = <4>;
nand-ecc-step-size = <512>;
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-tplink-archer-c2300-v1.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-tplink-archer-c2300-v1.dts
index fcf092c81b59..19fc03ef47a0 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-tplink-archer-c2300-v1.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-tplink-archer-c2300-v1.dts
@@ -155,6 +155,11 @@
};
};
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
&nandcs {
nand-ecc-strength = <4>;
nand-ecc-step-size = <512>;
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
index d94a53d68320..2a0d4ee3bd79 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
@@ -166,11 +166,15 @@
};
};
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
&nandcs {
nand-ecc-strength = <4>;
nand-ecc-step-size = <512>;
nand-on-flash-bbt;
- brcm,nand-has-wp;
#address-cells = <1>;
#size-cells = <0>;
@@ -181,16 +185,17 @@
#size-cells = <1>;
partition@0 {
- compatible = "nvmem-cells";
label = "cferom";
reg = <0x0 0x100000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x0 0x100000>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
- base_mac_addr: mac@106a0 {
- reg = <0x106a0 0x6>;
+ base_mac_addr: mac@106a0 {
+ reg = <0x106a0 0x6>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
index 2f124b027bbf..e01cf4f54077 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
@@ -227,9 +227,6 @@
brcm,num-gphy = <5>;
brcm,num-rgmii-ports = <2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
ports: ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -589,7 +586,7 @@
status = "disabled";
};
- nand-controller@1800 {
+ nand_controller: nand-controller@1800 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
@@ -597,7 +594,7 @@
reg-names = "nand", "nand-int-base";
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "nand_ctlrdy";
- status = "okay";
+ status = "disabled";
nandcs: nand@0 {
compatible = "brcm,nandcs";
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912.dtsi
index d658c81f7285..14b2adfb817c 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912.dtsi
@@ -138,6 +138,20 @@
status = "disabled";
};
+ nand_controller: nand-controller@1800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
+ reg = <0x1800 0x600>, <0x2000 0x10>;
+ reg-names = "nand", "nand-int-base";
+ status = "disabled";
+
+ nandcs: nand@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ };
+ };
+
uart0: serial@12000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x12000 0x1000>;
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm63146.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm63146.dtsi
index 4f474d47022e..589b8a1efc72 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm63146.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm63146.dtsi
@@ -119,6 +119,20 @@
status = "disabled";
};
+ nand_controller: nand-controller@1800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
+ reg = <0x1800 0x600>, <0x2000 0x10>;
+ reg-names = "nand", "nand-int-base";
+ status = "disabled";
+
+ nandcs: nand@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ };
+ };
+
uart0: serial@12000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x12000 0x1000>;
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm63158.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm63158.dtsi
index 909f254dc47d..48d618e75866 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm63158.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm63158.dtsi
@@ -137,6 +137,20 @@
status = "disabled";
};
+ nand_controller: nand-controller@1800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
+ reg = <0x1800 0x600>, <0x2000 0x10>;
+ reg-names = "nand", "nand-int-base";
+ status = "disabled";
+
+ nandcs: nand@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ };
+ };
+
uart0: serial@12000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x12000 0x1000>;
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm6813.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm6813.dtsi
index 685ae32951c9..1d1303cf90f3 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm6813.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm6813.dtsi
@@ -138,6 +138,20 @@
status = "disabled";
};
+ nand_controller: nand-controller@1800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
+ reg = <0x1800 0x600>, <0x2000 0x10>;
+ reg-names = "nand", "nand-int-base";
+ status = "disabled";
+
+ nandcs: nand@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ };
+ };
+
uart0: serial@12000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x12000 0x1000>;
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm6856.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm6856.dtsi
index 820553ce541b..00c62c1e5df0 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm6856.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm6856.dtsi
@@ -119,5 +119,19 @@
num-cs = <8>;
status = "disabled";
};
+
+ nand_controller: nand-controller@1800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
+ reg = <0x1800 0x600>, <0x2000 0x10>;
+ reg-names = "nand", "nand-int-base";
+ status = "disabled";
+
+ nandcs: nand@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm6858.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm6858.dtsi
index 0eb93c298297..caeaf428dc15 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm6858.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm6858.dtsi
@@ -156,5 +156,19 @@
num-cs = <8>;
status = "disabled";
};
+
+ nand_controller: nand-controller@1800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
+ reg = <0x1800 0x600>, <0x2000 0x10>;
+ reg-names = "nand", "nand-int-base";
+ status = "disabled";
+
+ nandcs: nand@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts
index c4e6e71f6310..030ffa5364fb 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts
@@ -32,3 +32,13 @@
&hsspi {
status = "okay";
};
+
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
+&nandcs {
+ nand-on-flash-bbt;
+ brcm,nand-ecc-use-strap;
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm94912.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm94912.dts
index e69cd683211a..4b779e6c22e1 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm94912.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm94912.dts
@@ -32,3 +32,13 @@
&hsspi {
status = "okay";
};
+
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
+&nandcs {
+ nand-on-flash-bbt;
+ brcm,nand-ecc-use-strap;
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm963146.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm963146.dts
index db2c82d6dfd8..2851e8e41bf4 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm963146.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm963146.dts
@@ -32,3 +32,13 @@
&hsspi {
status = "okay";
};
+
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
+&nandcs {
+ nand-on-flash-bbt;
+ brcm,nand-ecc-use-strap;
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm963158.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm963158.dts
index 25c12bc63545..17dc594fe83f 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm963158.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm963158.dts
@@ -32,3 +32,13 @@
&hsspi {
status = "okay";
};
+
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
+&nandcs {
+ nand-on-flash-bbt;
+ brcm,nand-ecc-use-strap;
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm96813.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm96813.dts
index faba21f03120..34832a734734 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm96813.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm96813.dts
@@ -32,3 +32,13 @@
&hsspi {
status = "okay";
};
+
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
+&nandcs {
+ nand-on-flash-bbt;
+ brcm,nand-ecc-use-strap;
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm96856.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm96856.dts
index 9808331eede2..e1396b5544b7 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm96856.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm96856.dts
@@ -32,3 +32,13 @@
&hsspi {
status = "okay";
};
+
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
+&nandcs {
+ nand-on-flash-bbt;
+ brcm,nand-ecc-use-strap;
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm96858.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm96858.dts
index 1f561c8e13b0..30bbf6f2917e 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm96858.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm96858.dts
@@ -32,3 +32,13 @@
&hsspi {
status = "okay";
};
+
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
+&nandcs {
+ nand-on-flash-bbt;
+ brcm,nand-ecc-use-strap;
+};
diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi
index ddbaf7280b03..11158c2bd524 100644
--- a/arch/arm64/boot/dts/qcom/pm6150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi
@@ -63,6 +63,52 @@
};
};
+ pm6150_vbus: usb-vbus-regulator@1100 {
+ compatible = "qcom,pm6150-vbus-reg,
+ qcom,pm8150b-vbus-reg";
+ reg = <0x1100>;
+ status = "disabled";
+ };
+
+ pm6150_typec: typec@1500 {
+ compatible = "qcom,pm6150-typec,
+ qcom,pm8150b-typec";
+ reg = <0x1500>, <0x1700>;
+ interrupts = <0x0 0x15 0x00 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x15 0x01 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x15 0x02 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x15 0x03 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x15 0x04 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x15 0x05 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x15 0x06 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x15 0x07 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x17 0x00 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x17 0x01 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x17 0x02 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x17 0x03 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x17 0x04 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x17 0x05 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x17 0x06 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x17 0x07 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "or-rid-detect-change",
+ "vpd-detect",
+ "cc-state-change",
+ "vconn-oc",
+ "vbus-change",
+ "attach-detach",
+ "legacy-cable-detect",
+ "try-snk-src-detect",
+ "sig-tx",
+ "sig-rx",
+ "msg-tx",
+ "msg-rx",
+ "msg-tx-failed",
+ "msg-tx-discarded",
+ "msg-rx-discarded",
+ "fr-swap";
+ status = "disabled";
+ };
+
pm6150_temp: temp-alarm@2400 {
compatible = "qcom,spmi-temp-alarm";
reg = <0x2400>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index ed69b4636d0f..2c30d617e180 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -76,7 +76,6 @@ CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VISCONTI=y
CONFIG_ARCH_XGENE=y
CONFIG_ARCH_ZYNQMP=y
-CONFIG_ARM64_VA_BITS_48=y
CONFIG_SCHED_MC=y
CONFIG_SCHED_SMT=y
CONFIG_NUMA=y
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index eb7b423ba463..e7d9bd8e4709 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -268,6 +268,7 @@ config CRYPTO_AES_ARM64_CE_CCM
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_AES_ARM64_CE
+ select CRYPTO_AES_ARM64_CE_BLK
select CRYPTO_AEAD
select CRYPTO_LIB_AES
help
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index b03f7f71f893..f2624238fd95 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -1,8 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
+ * aes-ce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
*
- * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2013 - 2017 Linaro Ltd.
+ * Copyright (C) 2024 Google LLC
+ *
+ * Author: Ard Biesheuvel <ardb@kernel.org>
*/
#include <linux/linkage.h>
@@ -11,211 +14,129 @@
.text
.arch armv8-a+crypto
- /*
- * u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
- * u32 macp, u8 const rk[], u32 rounds);
- */
-SYM_FUNC_START(ce_aes_ccm_auth_data)
- ld1 {v0.16b}, [x0] /* load mac */
- cbz w3, 1f
- sub w3, w3, #16
- eor v1.16b, v1.16b, v1.16b
-0: ldrb w7, [x1], #1 /* get 1 byte of input */
- subs w2, w2, #1
- add w3, w3, #1
- ins v1.b[0], w7
- ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
- beq 8f /* out of input? */
- cbnz w3, 0b
- eor v0.16b, v0.16b, v1.16b
-1: ld1 {v3.4s}, [x4] /* load first round key */
- prfm pldl1strm, [x1]
- cmp w5, #12 /* which key size? */
- add x6, x4, #16
- sub w7, w5, #2 /* modified # of rounds */
- bmi 2f
- bne 5f
- mov v5.16b, v3.16b
- b 4f
-2: mov v4.16b, v3.16b
- ld1 {v5.4s}, [x6], #16 /* load 2nd round key */
-3: aese v0.16b, v4.16b
- aesmc v0.16b, v0.16b
-4: ld1 {v3.4s}, [x6], #16 /* load next round key */
- aese v0.16b, v5.16b
- aesmc v0.16b, v0.16b
-5: ld1 {v4.4s}, [x6], #16 /* load next round key */
- subs w7, w7, #3
- aese v0.16b, v3.16b
- aesmc v0.16b, v0.16b
- ld1 {v5.4s}, [x6], #16 /* load next round key */
- bpl 3b
- aese v0.16b, v4.16b
- subs w2, w2, #16 /* last data? */
- eor v0.16b, v0.16b, v5.16b /* final round */
- bmi 6f
- ld1 {v1.16b}, [x1], #16 /* load next input block */
- eor v0.16b, v0.16b, v1.16b /* xor with mac */
- bne 1b
-6: st1 {v0.16b}, [x0] /* store mac */
- beq 10f
- adds w2, w2, #16
- beq 10f
- mov w3, w2
-7: ldrb w7, [x1], #1
- umov w6, v0.b[0]
- eor w6, w6, w7
- strb w6, [x0], #1
- subs w2, w2, #1
- beq 10f
- ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
- b 7b
-8: cbz w3, 91f
- mov w7, w3
- add w3, w3, #16
-9: ext v1.16b, v1.16b, v1.16b, #1
- adds w7, w7, #1
- bne 9b
-91: eor v0.16b, v0.16b, v1.16b
- st1 {v0.16b}, [x0]
-10: mov w0, w3
- ret
-SYM_FUNC_END(ce_aes_ccm_auth_data)
+ .macro load_round_keys, rk, nr, tmp
+ sub w\tmp, \nr, #10
+ add \tmp, \rk, w\tmp, sxtw #4
+ ld1 {v10.4s-v13.4s}, [\rk]
+ ld1 {v14.4s-v17.4s}, [\tmp], #64
+ ld1 {v18.4s-v21.4s}, [\tmp], #64
+ ld1 {v3.4s-v5.4s}, [\tmp]
+ .endm
- /*
- * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
- * u32 rounds);
- */
-SYM_FUNC_START(ce_aes_ccm_final)
- ld1 {v3.4s}, [x2], #16 /* load first round key */
- ld1 {v0.16b}, [x0] /* load mac */
- cmp w3, #12 /* which key size? */
- sub w3, w3, #2 /* modified # of rounds */
- ld1 {v1.16b}, [x1] /* load 1st ctriv */
- bmi 0f
- bne 3f
- mov v5.16b, v3.16b
- b 2f
-0: mov v4.16b, v3.16b
-1: ld1 {v5.4s}, [x2], #16 /* load next round key */
- aese v0.16b, v4.16b
- aesmc v0.16b, v0.16b
- aese v1.16b, v4.16b
- aesmc v1.16b, v1.16b
-2: ld1 {v3.4s}, [x2], #16 /* load next round key */
- aese v0.16b, v5.16b
- aesmc v0.16b, v0.16b
- aese v1.16b, v5.16b
- aesmc v1.16b, v1.16b
-3: ld1 {v4.4s}, [x2], #16 /* load next round key */
- subs w3, w3, #3
- aese v0.16b, v3.16b
- aesmc v0.16b, v0.16b
- aese v1.16b, v3.16b
- aesmc v1.16b, v1.16b
- bpl 1b
- aese v0.16b, v4.16b
- aese v1.16b, v4.16b
- /* final round key cancels out */
- eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
- st1 {v0.16b}, [x0] /* store result */
- ret
-SYM_FUNC_END(ce_aes_ccm_final)
+ .macro dround, va, vb, vk
+ aese \va\().16b, \vk\().16b
+ aesmc \va\().16b, \va\().16b
+ aese \vb\().16b, \vk\().16b
+ aesmc \vb\().16b, \vb\().16b
+ .endm
+
+ .macro aes_encrypt, va, vb, nr
+ tbz \nr, #2, .L\@
+ dround \va, \vb, v10
+ dround \va, \vb, v11
+ tbz \nr, #1, .L\@
+ dround \va, \vb, v12
+ dround \va, \vb, v13
+.L\@: .irp v, v14, v15, v16, v17, v18, v19, v20, v21, v3
+ dround \va, \vb, \v
+ .endr
+ aese \va\().16b, v4.16b
+ aese \vb\().16b, v4.16b
+ .endm
.macro aes_ccm_do_crypt,enc
- cbz x2, 5f
- ldr x8, [x6, #8] /* load lower ctr */
+ load_round_keys x3, w4, x10
+
ld1 {v0.16b}, [x5] /* load mac */
+ cbz x2, ce_aes_ccm_final
+ ldr x8, [x6, #8] /* load lower ctr */
CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
0: /* outer loop */
ld1 {v1.8b}, [x6] /* load upper ctr */
prfm pldl1strm, [x1]
add x8, x8, #1
rev x9, x8
- cmp w4, #12 /* which key size? */
- sub w7, w4, #2 /* get modified # of rounds */
ins v1.d[1], x9 /* no carry in lower ctr */
- ld1 {v3.4s}, [x3] /* load first round key */
- add x10, x3, #16
- bmi 1f
- bne 4f
- mov v5.16b, v3.16b
- b 3f
-1: mov v4.16b, v3.16b
- ld1 {v5.4s}, [x10], #16 /* load 2nd round key */
-2: /* inner loop: 3 rounds, 2x interleaved */
- aese v0.16b, v4.16b
- aesmc v0.16b, v0.16b
- aese v1.16b, v4.16b
- aesmc v1.16b, v1.16b
-3: ld1 {v3.4s}, [x10], #16 /* load next round key */
- aese v0.16b, v5.16b
- aesmc v0.16b, v0.16b
- aese v1.16b, v5.16b
- aesmc v1.16b, v1.16b
-4: ld1 {v4.4s}, [x10], #16 /* load next round key */
- subs w7, w7, #3
- aese v0.16b, v3.16b
- aesmc v0.16b, v0.16b
- aese v1.16b, v3.16b
- aesmc v1.16b, v1.16b
- ld1 {v5.4s}, [x10], #16 /* load next round key */
- bpl 2b
- aese v0.16b, v4.16b
- aese v1.16b, v4.16b
+
+ aes_encrypt v0, v1, w4
+
subs w2, w2, #16
- bmi 6f /* partial block? */
+ bmi ce_aes_ccm_crypt_tail
ld1 {v2.16b}, [x1], #16 /* load next input block */
.if \enc == 1
eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
- eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
+ eor v6.16b, v1.16b, v2.16b /* xor with crypted ctr */
.else
eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */
- eor v1.16b, v2.16b, v5.16b /* final round enc */
+ eor v6.16b, v2.16b, v5.16b /* final round enc */
.endif
eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
- st1 {v1.16b}, [x0], #16 /* write output block */
+ st1 {v6.16b}, [x0], #16 /* write output block */
bne 0b
CPU_LE( rev x8, x8 )
- st1 {v0.16b}, [x5] /* store mac */
str x8, [x6, #8] /* store lsb end of ctr (BE) */
-5: ret
-
-6: eor v0.16b, v0.16b, v5.16b /* final round mac */
- eor v1.16b, v1.16b, v5.16b /* final round enc */
+ cbnz x7, ce_aes_ccm_final
st1 {v0.16b}, [x5] /* store mac */
- add w2, w2, #16 /* process partial tail block */
-7: ldrb w9, [x1], #1 /* get 1 byte of input */
- umov w6, v1.b[0] /* get top crypted ctr byte */
- umov w7, v0.b[0] /* get top mac byte */
- .if \enc == 1
- eor w7, w7, w9
- eor w9, w9, w6
- .else
- eor w9, w9, w6
- eor w7, w7, w9
- .endif
- strb w9, [x0], #1 /* store out byte */
- strb w7, [x5], #1 /* store mac byte */
- subs w2, w2, #1
- beq 5b
- ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
- ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
- b 7b
+ ret
.endm
+SYM_FUNC_START_LOCAL(ce_aes_ccm_crypt_tail)
+ eor v0.16b, v0.16b, v5.16b /* final round mac */
+ eor v1.16b, v1.16b, v5.16b /* final round enc */
+
+ add x1, x1, w2, sxtw /* rewind the input pointer (w2 < 0) */
+ add x0, x0, w2, sxtw /* rewind the output pointer */
+
+ adr_l x8, .Lpermute /* load permute vectors */
+ add x9, x8, w2, sxtw
+ sub x8, x8, w2, sxtw
+ ld1 {v7.16b-v8.16b}, [x9]
+ ld1 {v9.16b}, [x8]
+
+ ld1 {v2.16b}, [x1] /* load a full block of input */
+ tbl v1.16b, {v1.16b}, v7.16b /* move keystream to end of register */
+ eor v7.16b, v2.16b, v1.16b /* encrypt partial input block */
+ bif v2.16b, v7.16b, v22.16b /* select plaintext */
+ tbx v7.16b, {v6.16b}, v8.16b /* insert output from previous iteration */
+ tbl v2.16b, {v2.16b}, v9.16b /* copy plaintext to start of v2 */
+ eor v0.16b, v0.16b, v2.16b /* fold plaintext into mac */
+
+ st1 {v7.16b}, [x0] /* store output block */
+ cbz x7, 0f
+
+SYM_INNER_LABEL(ce_aes_ccm_final, SYM_L_LOCAL)
+ ld1 {v1.16b}, [x7] /* load 1st ctriv */
+
+ aes_encrypt v0, v1, w4
+
+ /* final round key cancels out */
+ eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
+0: st1 {v0.16b}, [x5] /* store result */
+ ret
+SYM_FUNC_END(ce_aes_ccm_crypt_tail)
+
/*
* void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
* u8 const rk[], u32 rounds, u8 mac[],
- * u8 ctr[]);
+ * u8 ctr[], u8 const final_iv[]);
* void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
* u8 const rk[], u32 rounds, u8 mac[],
- * u8 ctr[]);
+ * u8 ctr[], u8 const final_iv[]);
*/
SYM_FUNC_START(ce_aes_ccm_encrypt)
+ movi v22.16b, #255
aes_ccm_do_crypt 1
SYM_FUNC_END(ce_aes_ccm_encrypt)
SYM_FUNC_START(ce_aes_ccm_decrypt)
+ movi v22.16b, #0
aes_ccm_do_crypt 0
SYM_FUNC_END(ce_aes_ccm_decrypt)
+
+ .section ".rodata", "a"
+ .align 6
+ .fill 15, 1, 0xff
+.Lpermute:
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
+ .fill 15, 1, 0xff
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 25cd3808ecbe..ce9b28e3c7d6 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
+ * aes-ce-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
*
- * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2013 - 2017 Linaro Ltd.
+ * Copyright (C) 2024 Google LLC
+ *
+ * Author: Ard Biesheuvel <ardb@kernel.org>
*/
#include <asm/neon.h>
@@ -15,6 +18,8 @@
#include "aes-ce-setkey.h"
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+
static int num_rounds(struct crypto_aes_ctx *ctx)
{
/*
@@ -27,19 +32,17 @@ static int num_rounds(struct crypto_aes_ctx *ctx)
return 6 + ctx->key_length / 4;
}
-asmlinkage u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
- u32 macp, u32 const rk[], u32 rounds);
+asmlinkage u32 ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ int blocks, u8 dg[], int enc_before,
+ int enc_after);
asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
u32 const rk[], u32 rounds, u8 mac[],
- u8 ctr[]);
+ u8 ctr[], u8 const final_iv[]);
asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
u32 const rk[], u32 rounds, u8 mac[],
- u8 ctr[]);
-
-asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
- u32 rounds);
+ u8 ctr[], u8 const final_iv[]);
static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
unsigned int key_len)
@@ -94,6 +97,41 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
return 0;
}
+static u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
+ u32 macp, u32 const rk[], u32 rounds)
+{
+ int enc_after = (macp + abytes) % AES_BLOCK_SIZE;
+
+ do {
+ u32 blocks = abytes / AES_BLOCK_SIZE;
+
+ if (macp == AES_BLOCK_SIZE || (!macp && blocks > 0)) {
+ u32 rem = ce_aes_mac_update(in, rk, rounds, blocks, mac,
+ macp, enc_after);
+ u32 adv = (blocks - rem) * AES_BLOCK_SIZE;
+
+ macp = enc_after ? 0 : AES_BLOCK_SIZE;
+ in += adv;
+ abytes -= adv;
+
+ if (unlikely(rem)) {
+ kernel_neon_end();
+ kernel_neon_begin();
+ macp = 0;
+ }
+ } else {
+ u32 l = min(AES_BLOCK_SIZE - macp, abytes);
+
+ crypto_xor(&mac[macp], in, l);
+ in += l;
+ macp += l;
+ abytes -= l;
+ }
+ } while (abytes > 0);
+
+ return macp;
+}
+
static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -101,7 +139,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
struct __packed { __be16 l; __be32 h; u16 len; } ltag;
struct scatter_walk walk;
u32 len = req->assoclen;
- u32 macp = 0;
+ u32 macp = AES_BLOCK_SIZE;
/* prepend the AAD with a length tag */
if (len < 0xff00) {
@@ -125,16 +163,11 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
scatterwalk_start(&walk, sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
- n = min_t(u32, n, SZ_4K); /* yield NEON at least every 4k */
p = scatterwalk_map(&walk);
macp = ce_aes_ccm_auth_data(mac, p, n, macp, ctx->key_enc,
num_rounds(ctx));
- if (len / SZ_4K > (len - n) / SZ_4K) {
- kernel_neon_end();
- kernel_neon_begin();
- }
len -= n;
scatterwalk_unmap(p);
@@ -149,7 +182,7 @@ static int ccm_encrypt(struct aead_request *req)
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
struct skcipher_walk walk;
u8 __aligned(8) mac[AES_BLOCK_SIZE];
- u8 buf[AES_BLOCK_SIZE];
+ u8 orig_iv[AES_BLOCK_SIZE];
u32 len = req->cryptlen;
int err;
@@ -158,42 +191,55 @@ static int ccm_encrypt(struct aead_request *req)
return err;
/* preserve the original iv for the final round */
- memcpy(buf, req->iv, AES_BLOCK_SIZE);
+ memcpy(orig_iv, req->iv, AES_BLOCK_SIZE);
err = skcipher_walk_aead_encrypt(&walk, req, false);
+ if (unlikely(err))
+ return err;
kernel_neon_begin();
if (req->assoclen)
ccm_calculate_auth_mac(req, mac);
- while (walk.nbytes) {
+ do {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
- bool final = walk.nbytes == walk.total;
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 buf[AES_BLOCK_SIZE];
+ u8 *final_iv = NULL;
- if (final)
+ if (walk.nbytes == walk.total) {
tail = 0;
+ final_iv = orig_iv;
+ }
- ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes - tail, ctx->key_enc,
- num_rounds(ctx), mac, walk.iv);
+ if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+ src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
+ src, walk.nbytes);
- if (!final)
- kernel_neon_end();
- err = skcipher_walk_done(&walk, tail);
- if (!final)
- kernel_neon_begin();
- }
+ ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail,
+ ctx->key_enc, num_rounds(ctx),
+ mac, walk.iv, final_iv);
+
+ if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+ memcpy(walk.dst.virt.addr, dst, walk.nbytes);
- ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+ if (walk.nbytes) {
+ err = skcipher_walk_done(&walk, tail);
+ }
+ } while (walk.nbytes);
kernel_neon_end();
+ if (unlikely(err))
+ return err;
+
/* copy authtag to end of dst */
scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
crypto_aead_authsize(aead), 1);
- return err;
+ return 0;
}
static int ccm_decrypt(struct aead_request *req)
@@ -203,7 +249,7 @@ static int ccm_decrypt(struct aead_request *req)
unsigned int authsize = crypto_aead_authsize(aead);
struct skcipher_walk walk;
u8 __aligned(8) mac[AES_BLOCK_SIZE];
- u8 buf[AES_BLOCK_SIZE];
+ u8 orig_iv[AES_BLOCK_SIZE];
u32 len = req->cryptlen - authsize;
int err;
@@ -212,34 +258,44 @@ static int ccm_decrypt(struct aead_request *req)
return err;
/* preserve the original iv for the final round */
- memcpy(buf, req->iv, AES_BLOCK_SIZE);
+ memcpy(orig_iv, req->iv, AES_BLOCK_SIZE);
err = skcipher_walk_aead_decrypt(&walk, req, false);
+ if (unlikely(err))
+ return err;
kernel_neon_begin();
if (req->assoclen)
ccm_calculate_auth_mac(req, mac);
- while (walk.nbytes) {
+ do {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
- bool final = walk.nbytes == walk.total;
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 buf[AES_BLOCK_SIZE];
+ u8 *final_iv = NULL;
- if (final)
+ if (walk.nbytes == walk.total) {
tail = 0;
+ final_iv = orig_iv;
+ }
- ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes - tail, ctx->key_enc,
- num_rounds(ctx), mac, walk.iv);
+ if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+ src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
+ src, walk.nbytes);
- if (!final)
- kernel_neon_end();
- err = skcipher_walk_done(&walk, tail);
- if (!final)
- kernel_neon_begin();
- }
+ ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail,
+ ctx->key_enc, num_rounds(ctx),
+ mac, walk.iv, final_iv);
+
+ if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+ memcpy(walk.dst.virt.addr, dst, walk.nbytes);
- ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+ if (walk.nbytes) {
+ err = skcipher_walk_done(&walk, tail);
+ }
+ } while (walk.nbytes);
kernel_neon_end();
@@ -247,11 +303,11 @@ static int ccm_decrypt(struct aead_request *req)
return err;
/* compare calculated auth tag with the stored one */
- scatterwalk_map_and_copy(buf, req->src,
+ scatterwalk_map_and_copy(orig_iv, req->src,
req->assoclen + req->cryptlen - authsize,
authsize, 0);
- if (crypto_memneq(mac, buf, authsize))
+ if (crypto_memneq(mac, orig_iv, authsize))
return -EBADMSG;
return 0;
}
@@ -290,6 +346,6 @@ module_init(aes_mod_init);
module_exit(aes_mod_exit);
MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ccm(aes)");
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 162787c7aa86..a147e847a5a1 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -1048,6 +1048,7 @@ unregister_ciphers:
#ifdef USE_V8_CRYPTO_EXTENSIONS
module_cpu_feature_match(AES, aes_init);
+EXPORT_SYMBOL_NS(ce_aes_mac_update, CRYPTO_INTERNAL);
#else
module_init(aes_init);
EXPORT_SYMBOL(neon_aes_ecb_encrypt);
diff --git a/arch/arm64/hyperv/hv_core.c b/arch/arm64/hyperv/hv_core.c
index b54c34793701..f1ebc025e1df 100644
--- a/arch/arm64/hyperv/hv_core.c
+++ b/arch/arm64/hyperv/hv_core.c
@@ -160,22 +160,22 @@ void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
return;
panic_reported = true;
- guest_id = hv_get_vpreg(HV_REGISTER_GUEST_OSID);
+ guest_id = hv_get_vpreg(HV_REGISTER_GUEST_OS_ID);
/*
* Hyper-V provides the ability to store only 5 values.
* Pick the passed in error value, the guest_id, the PC,
* and the SP.
*/
- hv_set_vpreg(HV_REGISTER_CRASH_P0, err);
- hv_set_vpreg(HV_REGISTER_CRASH_P1, guest_id);
- hv_set_vpreg(HV_REGISTER_CRASH_P2, regs->pc);
- hv_set_vpreg(HV_REGISTER_CRASH_P3, regs->sp);
- hv_set_vpreg(HV_REGISTER_CRASH_P4, 0);
+ hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P0, err);
+ hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P1, guest_id);
+ hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P2, regs->pc);
+ hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P3, regs->sp);
+ hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P4, 0);
/*
* Let Hyper-V know there is crash data available
*/
- hv_set_vpreg(HV_REGISTER_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
+ hv_set_vpreg(HV_REGISTER_GUEST_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
}
EXPORT_SYMBOL_GPL(hyperv_report_panic);
diff --git a/arch/arm64/hyperv/mshyperv.c b/arch/arm64/hyperv/mshyperv.c
index f1b8a04ee9f2..b1a4de4eee29 100644
--- a/arch/arm64/hyperv/mshyperv.c
+++ b/arch/arm64/hyperv/mshyperv.c
@@ -19,10 +19,17 @@
static bool hyperv_initialized;
+int hv_get_hypervisor_version(union hv_hypervisor_version_info *info)
+{
+ hv_get_vpreg_128(HV_REGISTER_HYPERVISOR_VERSION,
+ (struct hv_get_vp_registers_output *)info);
+
+ return 0;
+}
+
static int __init hyperv_init(void)
{
struct hv_get_vp_registers_output result;
- u32 a, b, c, d;
u64 guest_id;
int ret;
@@ -39,7 +46,7 @@ static int __init hyperv_init(void)
/* Setup the guest ID */
guest_id = hv_generate_guest_id(LINUX_VERSION_CODE);
- hv_set_vpreg(HV_REGISTER_GUEST_OSID, guest_id);
+ hv_set_vpreg(HV_REGISTER_GUEST_OS_ID, guest_id);
/* Get the features and hints from Hyper-V */
hv_get_vpreg_128(HV_REGISTER_FEATURES, &result);
@@ -54,15 +61,6 @@ static int __init hyperv_init(void)
ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
ms_hyperv.misc_features);
- /* Get information about the Hyper-V host version */
- hv_get_vpreg_128(HV_REGISTER_HYPERVISOR_VERSION, &result);
- a = result.as32.a;
- b = result.as32.b;
- c = result.as32.c;
- d = result.as32.d;
- pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n",
- b >> 16, b & 0xFFFF, a, d & 0xFFFFFF, c, d >> 24);
-
ret = hv_common_init();
if (ret)
return ret;
@@ -74,6 +72,8 @@ static int __init hyperv_init(void)
return ret;
}
+ ms_hyperv_late_init();
+
hyperv_initialized = true;
return 0;
}
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index ecdb3cfcd0f8..8babfbe31f95 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -129,6 +129,4 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}
-u64 kaslr_early_init(void *fdt);
-
#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 513787e43329..ab8b396428da 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -38,10 +38,6 @@
msr daifset, #0xf
.endm
- .macro enable_daif
- msr daifclr, #0xf
- .endm
-
/*
* Save/restore interrupts.
*/
@@ -346,20 +342,6 @@ alternative_cb_end
.endm
/*
- * idmap_get_t0sz - get the T0SZ value needed to cover the ID map
- *
- * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
- * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
- * this number conveniently equals the number of leading zeroes in
- * the physical address of _end.
- */
- .macro idmap_get_t0sz, reg
- adrp \reg, _end
- orr \reg, \reg, #(1 << VA_BITS_MIN) - 1
- clz \reg, \reg
- .endm
-
-/*
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
* ID_AA64MMFR0_EL1.PARange value
*
@@ -590,18 +572,27 @@ alternative_endif
.endm
/*
- * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
+ * If the kernel is built for 52-bit virtual addressing but the hardware only
+ * supports 48 bits, we cannot program the pgdir address into TTBR1 directly,
+ * but we have to add an offset so that the TTBR1 address corresponds with the
+ * pgdir entry that covers the lowest 48-bit addressable VA.
+ *
+ * Note that this trick is only used for LVA/64k pages - LPA2/4k pages uses an
+ * additional paging level, and on LPA2/16k pages, we would end up with a root
+ * level table with only 2 entries, which is suboptimal in terms of TLB
+ * utilization, so there we fall back to 47 bits of translation if LPA2 is not
+ * supported.
+ *
* orr is used as it can cover the immediate value (and is idempotent).
- * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
* ttbr: Value of ttbr to set, modified.
*/
.macro offset_ttbr1, ttbr, tmp
-#ifdef CONFIG_ARM64_VA_BITS_52
- mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
- and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
- cbnz \tmp, .Lskipoffs_\@
- orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
-.Lskipoffs_\@ :
+#if defined(CONFIG_ARM64_VA_BITS_52) && !defined(CONFIG_ARM64_LPA2)
+ mrs \tmp, tcr_el1
+ and \tmp, \tmp, #TCR_T1SZ_MASK
+ cmp \tmp, #TCR_T1SZ(VA_BITS_MIN)
+ orr \tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET
+ csel \ttbr, \tmp, \ttbr, eq
#endif
.endm
@@ -623,25 +614,13 @@ alternative_endif
.macro phys_to_pte, pte, phys
#ifdef CONFIG_ARM64_PA_BITS_52
- /*
- * We assume \phys is 64K aligned and this is guaranteed by only
- * supporting this configuration with 64K pages.
- */
- orr \pte, \phys, \phys, lsr #36
- and \pte, \pte, #PTE_ADDR_MASK
+ orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
+ and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
#else
mov \pte, \phys
#endif
.endm
- .macro pte_to_phys, phys, pte
- and \phys, \pte, #PTE_ADDR_MASK
-#ifdef CONFIG_ARM64_PA_BITS_52
- orr \phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT
- and \phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT)
-#endif
- .endm
-
/*
* tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
*/
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
index 1abdcd508a11..beb42c62b6ac 100644
--- a/arch/arm64/include/asm/brk-imm.h
+++ b/arch/arm64/include/asm/brk-imm.h
@@ -11,6 +11,7 @@
* 0x004: for installing kprobes
* 0x005: for installing uprobes
* 0x006: for kprobe software single-step
+ * 0x007: for kretprobe return
* Allowed values for kgdb are 0x400 - 0x7ff
* 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
@@ -23,6 +24,7 @@
#define KPROBES_BRK_IMM 0x004
#define UPROBES_BRK_IMM 0x005
#define KPROBES_BRK_SS_IMM 0x006
+#define KRETPROBES_BRK_IMM 0x007
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index b1e43f56ee46..9b73fd0cd721 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -52,14 +52,18 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64isar0;
u64 reg_id_aa64isar1;
u64 reg_id_aa64isar2;
+ u64 reg_id_aa64isar3;
u64 reg_id_aa64mmfr0;
u64 reg_id_aa64mmfr1;
u64 reg_id_aa64mmfr2;
u64 reg_id_aa64mmfr3;
+ u64 reg_id_aa64mmfr4;
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
+ u64 reg_id_aa64pfr2;
u64 reg_id_aa64zfr0;
u64 reg_id_aa64smfr0;
+ u64 reg_id_aa64fpfr0;
struct cpuinfo_32bit aarch32;
};
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index bd8d4ca81a48..8b904a757bd3 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -17,6 +17,7 @@
#define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0
#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
+#define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF 8
#ifndef __ASSEMBLY__
@@ -363,6 +364,7 @@ struct arm64_cpu_capabilities {
u8 field_pos;
u8 field_width;
u8 min_field_value;
+ u8 max_field_value;
u8 hwcap_type;
bool sign;
unsigned long hwcap;
@@ -768,6 +770,11 @@ static __always_inline bool system_supports_tpidr2(void)
return system_supports_sme();
}
+static __always_inline bool system_supports_fpmr(void)
+{
+ return alternative_has_cap_unlikely(ARM64_HAS_FPMR);
+}
+
static __always_inline bool system_supports_cnp(void)
{
return alternative_has_cap_unlikely(ARM64_HAS_CNP);
@@ -905,7 +912,9 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
+extern struct arm64_ftr_override id_aa64mmfr0_override;
extern struct arm64_ftr_override id_aa64mmfr1_override;
+extern struct arm64_ftr_override id_aa64mmfr2_override;
extern struct arm64_ftr_override id_aa64pfr0_override;
extern struct arm64_ftr_override id_aa64pfr1_override;
extern struct arm64_ftr_override id_aa64zfr0_override;
@@ -915,9 +924,114 @@ extern struct arm64_ftr_override id_aa64isar2_override;
extern struct arm64_ftr_override arm64_sw_feature_override;
+static inline
+u64 arm64_apply_feature_override(u64 val, int feat, int width,
+ const struct arm64_ftr_override *override)
+{
+ u64 oval = override->val;
+
+ /*
+ * When it encounters an invalid override (e.g., an override that
+ * cannot be honoured due to a missing CPU feature), the early idreg
+ * override code will set the mask to 0x0 and the value to non-zero for
+ * the field in question. In order to determine whether the override is
+ * valid or not for the field we are interested in, we first need to
+ * disregard bits belonging to other fields.
+ */
+ oval &= GENMASK_ULL(feat + width - 1, feat);
+
+ /*
+ * The override is valid if all value bits are accounted for in the
+ * mask. If so, replace the masked bits with the override value.
+ */
+ if (oval == (oval & override->mask)) {
+ val &= ~override->mask;
+ val |= oval;
+ }
+
+ /* Extract the field from the updated value */
+ return cpuid_feature_extract_unsigned_field(val, feat);
+}
+
+static inline bool arm64_test_sw_feature_override(int feat)
+{
+ /*
+ * Software features are pseudo CPU features that have no underlying
+ * CPUID system register value to apply the override to.
+ */
+ return arm64_apply_feature_override(0, feat, 4,
+ &arm64_sw_feature_override);
+}
+
+static inline bool kaslr_disabled_cmdline(void)
+{
+ return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
+}
+
u32 get_kvm_ipa_limit(void);
void dump_cpu_features(void);
+static inline bool cpu_has_bti(void)
+{
+ if (!IS_ENABLED(CONFIG_ARM64_BTI))
+ return false;
+
+ return arm64_apply_feature_override(read_cpuid(ID_AA64PFR1_EL1),
+ ID_AA64PFR1_EL1_BT_SHIFT, 4,
+ &id_aa64pfr1_override);
+}
+
+static inline bool cpu_has_pac(void)
+{
+ u64 isar1, isar2;
+
+ if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
+ return false;
+
+ isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+ isar2 = read_cpuid(ID_AA64ISAR2_EL1);
+
+ if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_APA_SHIFT, 4,
+ &id_aa64isar1_override))
+ return true;
+
+ if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_API_SHIFT, 4,
+ &id_aa64isar1_override))
+ return true;
+
+ return arm64_apply_feature_override(isar2, ID_AA64ISAR2_EL1_APA3_SHIFT, 4,
+ &id_aa64isar2_override);
+}
+
+static inline bool cpu_has_lva(void)
+{
+ u64 mmfr2;
+
+ mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+ mmfr2 &= ~id_aa64mmfr2_override.mask;
+ mmfr2 |= id_aa64mmfr2_override.val;
+ return cpuid_feature_extract_unsigned_field(mmfr2,
+ ID_AA64MMFR2_EL1_VARange_SHIFT);
+}
+
+static inline bool cpu_has_lpa2(void)
+{
+#ifdef CONFIG_ARM64_LPA2
+ u64 mmfr0;
+ int feat;
+
+ mmfr0 = read_sysreg(id_aa64mmfr0_el1);
+ mmfr0 &= ~id_aa64mmfr0_override.mask;
+ mmfr0 |= id_aa64mmfr0_override.val;
+ feat = cpuid_feature_extract_signed_field(mmfr0,
+ ID_AA64MMFR0_EL1_TGRAN_SHIFT);
+
+ return feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2;
+#else
+ return false;
+#endif
+}
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/crash_core.h b/arch/arm64/include/asm/crash_reserve.h
index 9f5c8d339f44..4afe027a4e7b 100644
--- a/arch/arm64/include/asm/crash_core.h
+++ b/arch/arm64/include/asm/crash_reserve.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _ARM64_CRASH_CORE_H
-#define _ARM64_CRASH_CORE_H
+#ifndef _ARM64_CRASH_RESERVE_H
+#define _ARM64_CRASH_RESERVE_H
/* Current arm64 boot protocol requires 2MB alignment */
#define CRASH_ALIGN SZ_2M
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 97932fbf973d..3f93f4eef953 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -201,16 +201,16 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#define COMPAT_ELF_PLATFORM ("v8l")
#endif
-#ifdef CONFIG_COMPAT
-
-/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
-#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
-
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18
typedef unsigned int compat_elf_greg_t;
typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
+#ifdef CONFIG_COMPAT
+
+/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
+#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
+
/* AArch32 EABI. */
#define EF_ARM_EABI_MASK 0xff000000
int compat_elf_check_arch(const struct elf32_hdr *);
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 353fe08546cf..81606bf7d5ac 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -117,15 +117,9 @@
#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
-#define ESR_ELx_FSC_SEA_TTW0 (0x14)
-#define ESR_ELx_FSC_SEA_TTW1 (0x15)
-#define ESR_ELx_FSC_SEA_TTW2 (0x16)
-#define ESR_ELx_FSC_SEA_TTW3 (0x17)
+#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
#define ESR_ELx_FSC_SECC (0x18)
-#define ESR_ELx_FSC_SECC_TTW0 (0x1c)
-#define ESR_ELx_FSC_SECC_TTW1 (0x1d)
-#define ESR_ELx_FSC_SECC_TTW2 (0x1e)
-#define ESR_ELx_FSC_SECC_TTW3 (0x1f)
+#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
/* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV_SHIFT (24)
@@ -394,6 +388,9 @@ static inline bool esr_is_data_abort(unsigned long esr)
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
{
+ /* Translation fault, level -1 */
+ if ((esr & ESR_ELx_FSC) == 0b101011)
+ return true;
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
}
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index ad688e157c9b..f296662590c7 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -74,7 +74,7 @@ void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_el0_mops(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr);
-void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
+void do_signal(struct pt_regs *regs);
void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 58c294a96676..87e307804b99 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -87,6 +87,7 @@ enum fixed_addresses {
FIX_PTE,
FIX_PMD,
FIX_PUD,
+ FIX_P4D,
FIX_PGD,
__end_of_fixed_addresses
@@ -100,7 +101,6 @@ enum fixed_addresses {
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
void __init early_fixmap_init(void);
-void __init fixmap_copy(pgd_t *pgdir);
#define __early_set_fixmap __set_fixmap
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index b67b89c54e1c..bc69ac368d73 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -21,7 +21,6 @@
#include <linux/stddef.h>
#include <linux/types.h>
-#ifdef CONFIG_COMPAT
/* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f
#define VFP_FPSCR_CTRL_MASK 0x07f79f00
@@ -30,7 +29,6 @@
* control/status register.
*/
#define VFP_STATE_SIZE ((32 * 8) + 4)
-#endif
static inline unsigned long cpacr_save_enable_kernel_sve(void)
{
@@ -89,6 +87,7 @@ struct cpu_fp_state {
void *sve_state;
void *sme_state;
u64 *svcr;
+ u64 *fpmr;
unsigned int sve_vl;
unsigned int sme_vl;
enum fp_type *fp_type;
@@ -154,6 +153,7 @@ extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__unused);
extern u64 read_smcr_features(void);
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 84055329cd8b..bd81cf17744a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -59,7 +59,6 @@ static inline void decode_ctrl_reg(u32 reg,
/* Watchpoints */
#define ARM_BREAKPOINT_LOAD 1
#define ARM_BREAKPOINT_STORE 2
-#define AARCH64_ESR_ACCESS_MASK (1 << 6)
/* Lengths */
#define ARM_BREAKPOINT_LEN_1 0x1
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index cd71e09ea14d..4edd3b61df11 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -142,6 +142,21 @@
#define KERNEL_HWCAP_SVE_B16B16 __khwcap2_feature(SVE_B16B16)
#define KERNEL_HWCAP_LRCPC3 __khwcap2_feature(LRCPC3)
#define KERNEL_HWCAP_LSE128 __khwcap2_feature(LSE128)
+#define KERNEL_HWCAP_FPMR __khwcap2_feature(FPMR)
+#define KERNEL_HWCAP_LUT __khwcap2_feature(LUT)
+#define KERNEL_HWCAP_FAMINMAX __khwcap2_feature(FAMINMAX)
+#define KERNEL_HWCAP_F8CVT __khwcap2_feature(F8CVT)
+#define KERNEL_HWCAP_F8FMA __khwcap2_feature(F8FMA)
+#define KERNEL_HWCAP_F8DP4 __khwcap2_feature(F8DP4)
+#define KERNEL_HWCAP_F8DP2 __khwcap2_feature(F8DP2)
+#define KERNEL_HWCAP_F8E4M3 __khwcap2_feature(F8E4M3)
+#define KERNEL_HWCAP_F8E5M2 __khwcap2_feature(F8E5M2)
+#define KERNEL_HWCAP_SME_LUTV2 __khwcap2_feature(SME_LUTV2)
+#define KERNEL_HWCAP_SME_F8F16 __khwcap2_feature(SME_F8F16)
+#define KERNEL_HWCAP_SME_F8F32 __khwcap2_feature(SME_F8F32)
+#define KERNEL_HWCAP_SME_SF8FMA __khwcap2_feature(SME_SF8FMA)
+#define KERNEL_HWCAP_SME_SF8DP4 __khwcap2_feature(SME_SF8DP4)
+#define KERNEL_HWCAP_SME_SF8DP2 __khwcap2_feature(SME_SF8DP2)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/hyperv-tlfs.h b/arch/arm64/include/asm/hyperv-tlfs.h
index bc6c7ac934a1..bc30aadedfe9 100644
--- a/arch/arm64/include/asm/hyperv-tlfs.h
+++ b/arch/arm64/include/asm/hyperv-tlfs.h
@@ -22,14 +22,6 @@
*/
/*
- * These Hyper-V registers provide information equivalent to the CPUID
- * instruction on x86/x64.
- */
-#define HV_REGISTER_HYPERVISOR_VERSION 0x00000100 /*CPUID 0x40000002 */
-#define HV_REGISTER_FEATURES 0x00000200 /*CPUID 0x40000003 */
-#define HV_REGISTER_ENLIGHTENMENTS 0x00000201 /*CPUID 0x40000004 */
-
-/*
* Group C Features. See the asm-generic version of hyperv-tlfs.h
* for a description of Feature Groups.
*/
@@ -41,28 +33,29 @@
#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(13)
/*
- * Synthetic register definitions equivalent to MSRs on x86/x64
+ * To support arch-generic code calling hv_set/get_register:
+ * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl
+ * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
*/
-#define HV_REGISTER_CRASH_P0 0x00000210
-#define HV_REGISTER_CRASH_P1 0x00000211
-#define HV_REGISTER_CRASH_P2 0x00000212
-#define HV_REGISTER_CRASH_P3 0x00000213
-#define HV_REGISTER_CRASH_P4 0x00000214
-#define HV_REGISTER_CRASH_CTL 0x00000215
+#define HV_MSR_CRASH_P0 (HV_REGISTER_GUEST_CRASH_P0)
+#define HV_MSR_CRASH_P1 (HV_REGISTER_GUEST_CRASH_P1)
+#define HV_MSR_CRASH_P2 (HV_REGISTER_GUEST_CRASH_P2)
+#define HV_MSR_CRASH_P3 (HV_REGISTER_GUEST_CRASH_P3)
+#define HV_MSR_CRASH_P4 (HV_REGISTER_GUEST_CRASH_P4)
+#define HV_MSR_CRASH_CTL (HV_REGISTER_GUEST_CRASH_CTL)
-#define HV_REGISTER_GUEST_OSID 0x00090002
-#define HV_REGISTER_VP_INDEX 0x00090003
-#define HV_REGISTER_TIME_REF_COUNT 0x00090004
-#define HV_REGISTER_REFERENCE_TSC 0x00090017
+#define HV_MSR_VP_INDEX (HV_REGISTER_VP_INDEX)
+#define HV_MSR_TIME_REF_COUNT (HV_REGISTER_TIME_REF_COUNT)
+#define HV_MSR_REFERENCE_TSC (HV_REGISTER_REFERENCE_TSC)
-#define HV_REGISTER_SINT0 0x000A0000
-#define HV_REGISTER_SCONTROL 0x000A0010
-#define HV_REGISTER_SIEFP 0x000A0012
-#define HV_REGISTER_SIMP 0x000A0013
-#define HV_REGISTER_EOM 0x000A0014
+#define HV_MSR_SINT0 (HV_REGISTER_SINT0)
+#define HV_MSR_SCONTROL (HV_REGISTER_SCONTROL)
+#define HV_MSR_SIEFP (HV_REGISTER_SIEFP)
+#define HV_MSR_SIMP (HV_REGISTER_SIMP)
+#define HV_MSR_EOM (HV_REGISTER_EOM)
-#define HV_REGISTER_STIMER0_CONFIG 0x000B0000
-#define HV_REGISTER_STIMER0_COUNT 0x000B0001
+#define HV_MSR_STIMER0_CONFIG (HV_REGISTER_STIMER0_CONFIG)
+#define HV_MSR_STIMER0_COUNT (HV_REGISTER_STIMER0_COUNT)
union hv_msi_entry {
u64 as_uint64[2];
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 3b694511b98f..8d825522c55c 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -24,25 +24,29 @@
#define __raw_writeb __raw_writeb
static __always_inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
- asm volatile("strb %w0, [%1]" : : "rZ" (val), "r" (addr));
+ volatile u8 __iomem *ptr = addr;
+ asm volatile("strb %w0, %1" : : "rZ" (val), "Qo" (*ptr));
}
#define __raw_writew __raw_writew
static __always_inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
- asm volatile("strh %w0, [%1]" : : "rZ" (val), "r" (addr));
+ volatile u16 __iomem *ptr = addr;
+ asm volatile("strh %w0, %1" : : "rZ" (val), "Qo" (*ptr));
}
#define __raw_writel __raw_writel
static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
- asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
+ volatile u32 __iomem *ptr = addr;
+ asm volatile("str %w0, %1" : : "rZ" (val), "Qo" (*ptr));
}
#define __raw_writeq __raw_writeq
static __always_inline void __raw_writeq(u64 val, volatile void __iomem *addr)
{
- asm volatile("str %x0, [%1]" : : "rZ" (val), "r" (addr));
+ volatile u64 __iomem *ptr = addr;
+ asm volatile("str %x0, %1" : : "rZ" (val), "Qo" (*ptr));
}
#define __raw_readb __raw_readb
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 7eefc525a9df..e1b57c13f8a4 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -17,11 +17,9 @@
asmlinkage void kasan_early_init(void);
void kasan_init(void);
-void kasan_copy_shadow(pgd_t *pgdir);
#else
static inline void kasan_init(void) { }
-static inline void kasan_copy_shadow(pgd_t *pgdir) { }
#endif
#endif
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 83ddb14b95a5..bf05a77873a4 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -13,28 +13,27 @@
#include <asm/sparsemem.h>
/*
- * The linear mapping and the start of memory are both 2M aligned (per
- * the arm64 booting.txt requirements). Hence we can use section mapping
- * with 4K (section size = 2M) but not with 16K (section size = 32M) or
- * 64K (section size = 512M).
+ * The physical and virtual addresses of the start of the kernel image are
+ * equal modulo 2 MiB (per the arm64 booting.txt requirements). Hence we can
+ * use section mapping with 4K (section size = 2M) but not with 16K (section
+ * size = 32M) or 64K (section size = 512M).
*/
-
-/*
- * The idmap and swapper page tables need some space reserved in the kernel
- * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
- * map the kernel. With the 64K page configuration, swapper and idmap need to
- * map to pte level. The swapper also maps the FDT (see __create_page_tables
- * for more information). Note that the number of ID map translation levels
- * could be increased on the fly if system RAM is out of reach for the default
- * VA range, so pages required to map highest possible PA are reserved in all
- * cases.
- */
-#ifdef CONFIG_ARM64_4K_PAGES
-#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
+#if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN
+#define SWAPPER_BLOCK_SHIFT PMD_SHIFT
+#define SWAPPER_SKIP_LEVEL 1
#else
-#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
+#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
+#define SWAPPER_SKIP_LEVEL 0
#endif
+#define SWAPPER_BLOCK_SIZE (UL(1) << SWAPPER_BLOCK_SHIFT)
+#define SWAPPER_TABLE_SHIFT (SWAPPER_BLOCK_SHIFT + PAGE_SHIFT - 3)
+#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - SWAPPER_SKIP_LEVEL)
+#define INIT_IDMAP_PGTABLE_LEVELS (IDMAP_LEVELS - SWAPPER_SKIP_LEVEL)
+
+#define IDMAP_VA_BITS 48
+#define IDMAP_LEVELS ARM64_HW_PGTABLE_LEVELS(IDMAP_VA_BITS)
+#define IDMAP_ROOT_LEVEL (4 - IDMAP_LEVELS)
/*
* A relocatable kernel may execute from an address that differs from the one at
@@ -50,57 +49,39 @@
#define EARLY_ENTRIES(vstart, vend, shift, add) \
(SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
-#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
-
-#if SWAPPER_PGTABLE_LEVELS > 3
-#define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add))
-#else
-#define EARLY_PUDS(vstart, vend, add) (0)
-#endif
+#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \
+ (lvls > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PAGE_SHIFT - 3), add) : 0)
-#if SWAPPER_PGTABLE_LEVELS > 2
-#define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add))
-#else
-#define EARLY_PMDS(vstart, vend, add) (0)
-#endif
+#define EARLY_PAGES(lvls, vstart, vend, add) (1 /* PGDIR page */ \
+ + EARLY_LEVEL(3, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \
+ + EARLY_LEVEL(2, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \
+ + EARLY_LEVEL(1, (lvls), (vstart), (vend), add))/* each entry needs a next level page table */
+#define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(SWAPPER_PGTABLE_LEVELS, KIMAGE_VADDR, _end, EXTRA_PAGE) \
+ + EARLY_SEGMENT_EXTRA_PAGES))
-#define EARLY_PAGES(vstart, vend, add) ( 1 /* PGDIR page */ \
- + EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
- + EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
- + EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
-#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EXTRA_PAGE))
+#define INIT_IDMAP_DIR_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, KIMAGE_VADDR, _end, 1))
+#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + EARLY_IDMAP_EXTRA_PAGES) * PAGE_SIZE)
-/* the initial ID map may need two extra pages if it needs to be extended */
-#if VA_BITS < 48
-#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + 2) * PAGE_SIZE)
-#else
-#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
-#endif
-#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
+#define INIT_IDMAP_FDT_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, 0UL, UL(MAX_FDT_SIZE), 1) - 1)
+#define INIT_IDMAP_FDT_SIZE ((INIT_IDMAP_FDT_PAGES + EARLY_IDMAP_EXTRA_FDT_PAGES) * PAGE_SIZE)
-/* Initial memory map size */
-#ifdef CONFIG_ARM64_4K_PAGES
-#define SWAPPER_BLOCK_SHIFT PMD_SHIFT
-#define SWAPPER_BLOCK_SIZE PMD_SIZE
-#define SWAPPER_TABLE_SHIFT PUD_SHIFT
-#else
-#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
-#define SWAPPER_BLOCK_SIZE PAGE_SIZE
-#define SWAPPER_TABLE_SHIFT PMD_SHIFT
-#endif
+/* The number of segments in the kernel image (text, rodata, inittext, initdata, data+bss) */
+#define KERNEL_SEGMENT_COUNT 5
+#if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN
+#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1)
/*
- * Initial memory map attributes.
+ * The initial ID map consists of the kernel image, mapped as two separate
+ * segments, and may appear misaligned wrt the swapper block size. This means
+ * we need 3 additional pages. The DT could straddle a swapper block boundary,
+ * so it may need 2.
*/
-#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED | PTE_UXN)
-#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S | PTE_UXN)
-
-#ifdef CONFIG_ARM64_4K_PAGES
-#define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS | PTE_WRITE)
-#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY)
+#define EARLY_IDMAP_EXTRA_PAGES 3
+#define EARLY_IDMAP_EXTRA_FDT_PAGES 2
#else
-#define SWAPPER_RW_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS | PTE_WRITE)
-#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
+#define EARLY_SEGMENT_EXTRA_PAGES 0
+#define EARLY_IDMAP_EXTRA_PAGES 0
+#define EARLY_IDMAP_EXTRA_FDT_PAGES 0
#endif
#endif /* __ASM_KERNEL_PGTABLE_H */
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 9ac9572a3bbe..4d9cc7a76d9c 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -80,7 +80,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
}
}
-#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_HIBERNATION)
+#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_HIBERNATION)
extern bool crash_is_nosave(unsigned long pfn);
extern void crash_prepare_suspend(void);
extern void crash_post_resume(void);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 3c6f8ba1e479..e01bb5ca13b7 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -102,10 +102,8 @@
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
-#define HCRX_GUEST_FLAGS \
- (HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \
- (cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0))
-#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En)
+#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
+#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
/* TCR_EL2 Registers bits */
#define TCR_EL2_DS (1UL << 32)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index b804fe832184..975af30af31f 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -209,7 +209,8 @@ static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
{
- return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H;
+ return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
+ (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H));
}
static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
@@ -425,15 +426,9 @@ static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
{
switch (kvm_vcpu_trap_get_fault(vcpu)) {
case ESR_ELx_FSC_EXTABT:
- case ESR_ELx_FSC_SEA_TTW0:
- case ESR_ELx_FSC_SEA_TTW1:
- case ESR_ELx_FSC_SEA_TTW2:
- case ESR_ELx_FSC_SEA_TTW3:
+ case ESR_ELx_FSC_SEA_TTW(-1) ... ESR_ELx_FSC_SEA_TTW(3):
case ESR_ELx_FSC_SECC:
- case ESR_ELx_FSC_SECC_TTW0:
- case ESR_ELx_FSC_SECC_TTW1:
- case ESR_ELx_FSC_SECC_TTW2:
- case ESR_ELx_FSC_SECC_TTW3:
+ case ESR_ELx_FSC_SECC_TTW(-1) ... ESR_ELx_FSC_SECC_TTW(3):
return true;
default:
return false;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 21c57b812569..9e8a496fb284 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -238,9 +238,32 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
return index;
}
+struct kvm_sysreg_masks;
+
+enum fgt_group_id {
+ __NO_FGT_GROUP__,
+ HFGxTR_GROUP,
+ HDFGRTR_GROUP,
+ HDFGWTR_GROUP = HDFGRTR_GROUP,
+ HFGITR_GROUP,
+ HAFGRTR_GROUP,
+
+ /* Must be last */
+ __NR_FGT_GROUP_IDS__
+};
+
struct kvm_arch {
struct kvm_s2_mmu mmu;
+ /*
+ * Fine-Grained UNDEF, mimicking the FGT layout defined by the
+ * architecture. We track them globally, as we present the
+ * same feature-set to all vcpus.
+ *
+ * Index 0 is currently spare.
+ */
+ u64 fgu[__NR_FGT_GROUP_IDS__];
+
/* Interrupt controller */
struct vgic_dist vgic;
@@ -274,6 +297,8 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
/* Initial ID reg values loaded */
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
+ /* Fine-Grained UNDEF initialised */
+#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
unsigned long flags;
/* VM-wide vCPU feature set */
@@ -294,6 +319,9 @@ struct kvm_arch {
/* PMCR_EL0.N value for the guest */
u8 pmcr_n;
+ /* Iterator for idreg debugfs */
+ u8 idreg_debugfs_iter;
+
/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;
struct maple_tree smccc_filter;
@@ -312,6 +340,9 @@ struct kvm_arch {
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
u64 id_regs[KVM_ARM_ID_REG_NUM];
+ /* Masks for VNCR-baked sysregs */
+ struct kvm_sysreg_masks *sysreg_masks;
+
/*
* For an untrusted host VM, 'pkvm.handle' is used to lookup
* the associated pKVM instance in the hypervisor.
@@ -474,6 +505,13 @@ enum vcpu_sysreg {
NR_SYS_REGS /* Nothing after this line! */
};
+struct kvm_sysreg_masks {
+ struct {
+ u64 res0;
+ u64 res1;
+ } mask[NR_SYS_REGS - __VNCR_START__];
+};
+
struct kvm_cpu_context {
struct user_pt_regs regs; /* sp = sp_el0 */
@@ -543,12 +581,14 @@ struct kvm_vcpu_arch {
enum fp_type fp_type;
unsigned int sve_max_vl;
u64 svcr;
+ u64 fpmr;
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
/* Values of trap registers for the guest. */
u64 hcr_el2;
+ u64 hcrx_el2;
u64 mdcr_el2;
u64 cptr_el2;
@@ -868,7 +908,15 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
-#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
+u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
+#define __vcpu_sys_reg(v,r) \
+ (*({ \
+ const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
+ u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
+ if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
+ *__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
+ __r; \
+ }))
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
@@ -1055,14 +1103,20 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
+void kvm_sys_regs_create_debugfs(struct kvm *kvm);
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
int __init kvm_sys_reg_table_init(void);
+struct sys_reg_desc;
+int __init populate_sysreg_config(const struct sys_reg_desc *sr,
+ unsigned int idx);
int __init populate_nv_trap_config(void);
bool lock_all_vcpus(struct kvm *kvm);
void unlock_all_vcpus(struct kvm *kvm);
+void kvm_init_sysreg(struct kvm_vcpu *);
+
/* MMIO helpers */
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
@@ -1233,4 +1287,48 @@ static inline void kvm_hyp_reserve(void) { }
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
+#define __expand_field_sign_unsigned(id, fld, val) \
+ ((u64)SYS_FIELD_VALUE(id, fld, val))
+
+#define __expand_field_sign_signed(id, fld, val) \
+ ({ \
+ u64 __val = SYS_FIELD_VALUE(id, fld, val); \
+ sign_extend64(__val, id##_##fld##_WIDTH - 1); \
+ })
+
+#define expand_field_sign(id, fld, val) \
+ (id##_##fld##_SIGNED ? \
+ __expand_field_sign_signed(id, fld, val) : \
+ __expand_field_sign_unsigned(id, fld, val))
+
+#define get_idreg_field_unsigned(kvm, id, fld) \
+ ({ \
+ u64 __val = IDREG((kvm), SYS_##id); \
+ FIELD_GET(id##_##fld##_MASK, __val); \
+ })
+
+#define get_idreg_field_signed(kvm, id, fld) \
+ ({ \
+ u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
+ sign_extend64(__val, id##_##fld##_WIDTH - 1); \
+ })
+
+#define get_idreg_field_enum(kvm, id, fld) \
+ get_idreg_field_unsigned(kvm, id, fld)
+
+#define get_idreg_field(kvm, id, fld) \
+ (id##_##fld##_SIGNED ? \
+ get_idreg_field_signed(kvm, id, fld) : \
+ get_idreg_field_unsigned(kvm, id, fld))
+
+#define kvm_has_feat(kvm, id, fld, limit) \
+ (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
+
+#define kvm_has_feat_enum(kvm, id, fld, val) \
+ (get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
+
+#define kvm_has_feat_range(kvm, id, fld, min, max) \
+ (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
+ get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 145ce73fc16c..3e2a1ac0c9bb 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
/*
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
* static inline can allow the compiler to out-of-line this. KVM always wants
- * the macro version as its always inlined.
+ * the macro version as it's always inlined.
*/
#define __kvm_swab32(x) ___constant_swab32(x)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e3e793d0ec30..d5e48d870461 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -54,27 +54,6 @@
#include <asm/alternative.h>
/*
- * Convert a kernel VA into a HYP VA.
- * reg: VA to be converted.
- *
- * The actual code generation takes place in kvm_update_va_mask, and
- * the instructions below are only there to reserve the space and
- * perform the register allocation (kvm_update_va_mask uses the
- * specific registers encoded in the instructions).
- */
-.macro kern_hyp_va reg
-#ifndef __KVM_VHE_HYPERVISOR__
-alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
- and \reg, \reg, #1 /* mask with va_mask */
- ror \reg, \reg, #1 /* rotate to the first tag bit */
- add \reg, \reg, #0 /* insert the low 12 bits of the tag */
- add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
- ror \reg, \reg, #63 /* rotate back */
-alternative_cb_end
-#endif
-.endm
-
-/*
* Convert a hypervisor VA to a PA
* reg: hypervisor address to be converted in place
* tmp: temporary register
@@ -127,14 +106,29 @@ void kvm_apply_hyp_relocations(void);
#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
+/*
+ * Convert a kernel VA into a HYP VA.
+ *
+ * Can be called from hyp or non-hyp context.
+ *
+ * The actual code generation takes place in kvm_update_va_mask(), and
+ * the instructions below are only there to reserve the space and
+ * perform the register allocation (kvm_update_va_mask() uses the
+ * specific registers encoded in the instructions).
+ */
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{
+/*
+ * This #ifndef is an optimisation for when this is called from VHE hyp
+ * context. When called from a VHE non-hyp context, kvm_update_va_mask() will
+ * replace the instructions with `nop`s.
+ */
#ifndef __KVM_VHE_HYPERVISOR__
- asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
- "ror %0, %0, #1\n"
- "add %0, %0, #0\n"
- "add %0, %0, #0, lsl 12\n"
- "ror %0, %0, #63\n",
+ asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" /* mask with va_mask */
+ "ror %0, %0, #1\n" /* rotate to the first tag bit */
+ "add %0, %0, #0\n" /* insert the low 12 bits of the tag */
+ "add %0, %0, #0, lsl 12\n" /* insert the top 12 bits of the tag */
+ "ror %0, %0, #63\n", /* rotate back */
ARM64_ALWAYS_SYSTEM,
kvm_update_va_mask)
: "+r" (v));
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 4882905357f4..c77d795556e1 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -60,7 +60,6 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
return ttbr0 & ~GENMASK_ULL(63, 48);
}
-extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
int kvm_init_nv_sysregs(struct kvm *kvm);
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index cfdf40f734b1..19278dfe7978 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -197,6 +197,7 @@ enum kvm_pgtable_stage2_flags {
* @KVM_PGTABLE_PROT_W: Write permission.
* @KVM_PGTABLE_PROT_R: Read permission.
* @KVM_PGTABLE_PROT_DEVICE: Device attributes.
+ * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes.
* @KVM_PGTABLE_PROT_SW0: Software bit 0.
* @KVM_PGTABLE_PROT_SW1: Software bit 1.
* @KVM_PGTABLE_PROT_SW2: Software bit 2.
@@ -208,6 +209,7 @@ enum kvm_pgtable_prot {
KVM_PGTABLE_PROT_R = BIT(2),
KVM_PGTABLE_PROT_DEVICE = BIT(3),
+ KVM_PGTABLE_PROT_NORMAL_NC = BIT(4),
KVM_PGTABLE_PROT_SW0 = BIT(55),
KVM_PGTABLE_PROT_SW1 = BIT(56),
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index d82305ab420f..54fb014eba05 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -30,8 +30,8 @@
* keep a constant PAGE_OFFSET and "fallback" to using the higher end
* of the VMEMMAP where 52-bit support is not available in hardware.
*/
-#define VMEMMAP_SHIFT (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)
-#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) >> VMEMMAP_SHIFT)
+#define VMEMMAP_RANGE (_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET)
+#define VMEMMAP_SIZE ((VMEMMAP_RANGE >> PAGE_SHIFT) * sizeof(struct page))
/*
* PAGE_OFFSET - the virtual address of the start of the linear map, at the
@@ -47,14 +47,18 @@
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN))
#define MODULES_VSIZE (SZ_2G)
-#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
-#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
-#define PCI_IO_END (VMEMMAP_START - SZ_8M)
-#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
-#define FIXADDR_TOP (VMEMMAP_START - SZ_32M)
+#define VMEMMAP_START (VMEMMAP_END - VMEMMAP_SIZE)
+#define VMEMMAP_END (-UL(SZ_1G))
+#define PCI_IO_START (VMEMMAP_END + SZ_8M)
+#define PCI_IO_END (PCI_IO_START + PCI_IO_SIZE)
+#define FIXADDR_TOP (-UL(SZ_8M))
#if VA_BITS > 48
+#ifdef CONFIG_ARM64_16K_PAGES
+#define VA_BITS_MIN (47)
+#else
#define VA_BITS_MIN (48)
+#endif
#else
#define VA_BITS_MIN (VA_BITS)
#endif
@@ -173,6 +177,7 @@
* Memory types for Stage-2 translation
*/
#define MT_S2_NORMAL 0xf
+#define MT_S2_NORMAL_NC 0x5
#define MT_S2_DEVICE_nGnRE 0x1
/*
@@ -180,6 +185,7 @@
* Stage-2 enforces Normal-WB and Device-nGnRE
*/
#define MT_S2_FWB_NORMAL 6
+#define MT_S2_FWB_NORMAL_NC 5
#define MT_S2_FWB_DEVICE_nGnRE 1
#ifdef CONFIG_ARM64_4K_PAGES
@@ -209,9 +215,20 @@
#include <asm/boot.h>
#include <asm/bug.h>
#include <asm/sections.h>
+#include <asm/sysreg.h>
+
+static inline u64 __pure read_tcr(void)
+{
+ u64 tcr;
+
+ // read_sysreg() uses asm volatile, so avoid it here
+ asm("mrs %0, tcr_el1" : "=r"(tcr));
+ return tcr;
+}
#if VA_BITS > 48
-extern u64 vabits_actual;
+// For reasons of #include hell, we can't use TCR_T1SZ_OFFSET/TCR_T1SZ_MASK here
+#define vabits_actual (64 - ((read_tcr() >> 16) & 63))
#else
#define vabits_actual ((u64)VA_BITS)
#endif
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 2fcf51231d6e..65977c7783c5 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -71,10 +71,46 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
pgprot_t prot, bool page_mappings_only);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
extern void mark_linear_text_alias_ro(void);
-extern bool kaslr_requires_kpti(void);
+
+/*
+ * This check is triggered during the early boot before the cpufeature
+ * is initialised. Checking the status on the local CPU allows the boot
+ * CPU to detect the need for non-global mappings and thus avoiding a
+ * pagetable re-write after all the CPUs are booted. This check will be
+ * anyway run on individual CPUs, allowing us to get the consistent
+ * state once the SMP CPUs are up and thus make the switch to non-global
+ * mappings if required.
+ */
+static inline bool kaslr_requires_kpti(void)
+{
+ /*
+ * E0PD does a similar job to KPTI so can be used instead
+ * where available.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
+ u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+ if (cpuid_feature_extract_unsigned_field(mmfr2,
+ ID_AA64MMFR2_EL1_E0PD_SHIFT))
+ return false;
+ }
+
+ /*
+ * Systems affected by Cavium erratum 24756 are incompatible
+ * with KPTI.
+ */
+ if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+ extern const struct midr_range cavium_erratum_27456_cpus[];
+
+ if (is_midr_in_range_list(read_cpuid_id(),
+ cavium_erratum_27456_cpus))
+ return false;
+ }
+
+ return true;
+}
#define INIT_MM_CONTEXT(name) \
- .pgd = init_pg_dir,
+ .pgd = swapper_pg_dir,
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 9ce4200508b1..c768d16b81a4 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -61,11 +61,9 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
}
/*
- * TCR.T0SZ value to use when the ID map is active. Usually equals
- * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
- * physical memory, in which case it will be smaller.
+ * TCR.T0SZ value to use when the ID map is active.
*/
-extern int idmap_t0sz;
+#define idmap_t0sz TCR_T0SZ(IDMAP_VA_BITS)
/*
* Ensure TCR.T0SZ is set to the provided value.
@@ -110,18 +108,13 @@ static inline void cpu_uninstall_idmap(void)
cpu_switch_mm(mm->pgd, mm);
}
-static inline void __cpu_install_idmap(pgd_t *idmap)
+static inline void cpu_install_idmap(void)
{
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
cpu_set_idmap_tcr_t0sz();
- cpu_switch_mm(lm_alias(idmap), &init_mm);
-}
-
-static inline void cpu_install_idmap(void)
-{
- __cpu_install_idmap(idmap_pg_dir);
+ cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
}
/*
@@ -148,51 +141,21 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
isb();
}
-/*
- * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
- * avoiding the possibility of conflicting TLB entries being allocated.
- */
-static inline void __cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap, bool cnp)
-{
- typedef void (ttbr_replace_func)(phys_addr_t);
- extern ttbr_replace_func idmap_cpu_replace_ttbr1;
- ttbr_replace_func *replace_phys;
- unsigned long daif;
-
- /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
- phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
-
- if (cnp)
- ttbr1 |= TTBR_CNP_BIT;
-
- replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
-
- __cpu_install_idmap(idmap);
-
- /*
- * We really don't want to take *any* exceptions while TTBR1 is
- * in the process of being replaced so mask everything.
- */
- daif = local_daif_save();
- replace_phys(ttbr1);
- local_daif_restore(daif);
-
- cpu_uninstall_idmap();
-}
+void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp);
static inline void cpu_enable_swapper_cnp(void)
{
- __cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir, true);
+ __cpu_replace_ttbr1(lm_alias(swapper_pg_dir), true);
}
-static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
+static inline void cpu_replace_ttbr1(pgd_t *pgdp)
{
/*
* Only for early TTBR1 replacement before cpucaps are finalized and
* before we've decided whether to use CNP.
*/
WARN_ON(system_capabilities_finalized());
- __cpu_replace_ttbr1(pgdp, idmap, false);
+ __cpu_replace_ttbr1(pgdp, false);
}
/*
diff --git a/arch/arm64/include/asm/mshyperv.h b/arch/arm64/include/asm/mshyperv.h
index 20070a847304..a975e1a689dd 100644
--- a/arch/arm64/include/asm/mshyperv.h
+++ b/arch/arm64/include/asm/mshyperv.h
@@ -31,12 +31,12 @@ void hv_set_vpreg(u32 reg, u64 value);
u64 hv_get_vpreg(u32 reg);
void hv_get_vpreg_128(u32 reg, struct hv_get_vp_registers_output *result);
-static inline void hv_set_register(unsigned int reg, u64 value)
+static inline void hv_set_msr(unsigned int reg, u64 value)
{
hv_set_vpreg(reg, value);
}
-static inline u64 hv_get_register(unsigned int reg)
+static inline u64 hv_get_msr(unsigned int reg)
{
return hv_get_vpreg(reg);
}
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 237224484d0f..8ff5f2a2579e 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -14,6 +14,7 @@
#include <asm/tlbflush.h>
#define __HAVE_ARCH_PGD_FREE
+#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
@@ -43,7 +44,8 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
{
- set_p4d(p4dp, __p4d(__phys_to_p4d_val(pudp) | prot));
+ if (pgtable_l4_enabled())
+ set_p4d(p4dp, __p4d(__phys_to_p4d_val(pudp) | prot));
}
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
@@ -53,6 +55,13 @@ static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
p4dval |= (mm == &init_mm) ? P4D_TABLE_UXN : P4D_TABLE_PXN;
__p4d_populate(p4dp, __pa(pudp), p4dval);
}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ if (!pgtable_l4_enabled())
+ return;
+ __pud_free(mm, pud);
+}
#else
static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
{
@@ -60,6 +69,47 @@ static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
}
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#if CONFIG_PGTABLE_LEVELS > 4
+
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t p4dp, pgdval_t prot)
+{
+ if (pgtable_l5_enabled())
+ set_pgd(pgdp, __pgd(__phys_to_pgd_val(p4dp) | prot));
+}
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, p4d_t *p4dp)
+{
+ pgdval_t pgdval = PGD_TYPE_TABLE;
+
+ pgdval |= (mm == &init_mm) ? PGD_TABLE_UXN : PGD_TABLE_PXN;
+ __pgd_populate(pgdp, __pa(p4dp), pgdval);
+}
+
+static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ return (p4d_t *)get_zeroed_page(gfp);
+}
+
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ if (!pgtable_l5_enabled())
+ return;
+ BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
+ free_page((unsigned long)p4d);
+}
+
+#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
+#else
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t p4dp, pgdval_t prot)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_PGTABLE_LEVELS > 4 */
+
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index e4944d517c99..ef207a0d4f0d 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -26,10 +26,10 @@
#define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - 3))
/*
- * Size mapped by an entry at level n ( 0 <= n <= 3)
+ * Size mapped by an entry at level n ( -1 <= n <= 3)
* We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
* in the final page. The maximum number of translation levels supported by
- * the architecture is 4. Hence, starting at level n, we have further
+ * the architecture is 5. Hence, starting at level n, we have further
* ((4 - n) - 1) levels of translation excluding the offset within the page.
* So, the total number of bits mapped by an entry at level n is :
*
@@ -62,9 +62,16 @@
#define PTRS_PER_PUD (1 << (PAGE_SHIFT - 3))
#endif
+#if CONFIG_PGTABLE_LEVELS > 4
+#define P4D_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(0)
+#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
+#define P4D_MASK (~(P4D_SIZE-1))
+#define PTRS_PER_P4D (1 << (PAGE_SHIFT - 3))
+#endif
+
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map
- * (depending on the configuration, this level can be 0, 1 or 2).
+ * (depending on the configuration, this level can be -1, 0, 1 or 2).
*/
#define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS)
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
@@ -87,6 +94,15 @@
/*
* Hardware page table definitions.
*
+ * Level -1 descriptor (PGD).
+ */
+#define PGD_TYPE_TABLE (_AT(pgdval_t, 3) << 0)
+#define PGD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
+#define PGD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
+#define PGD_TABLE_PXN (_AT(pgdval_t, 1) << 59)
+#define PGD_TABLE_UXN (_AT(pgdval_t, 1) << 60)
+
+/*
* Level 0 descriptor (P4D).
*/
#define P4D_TYPE_TABLE (_AT(p4dval_t, 3) << 0)
@@ -155,13 +171,17 @@
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
-#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
+#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
#ifdef CONFIG_ARM64_PA_BITS_52
+#ifdef CONFIG_ARM64_64K_PAGES
#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
-#define PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH)
#define PTE_ADDR_HIGH_SHIFT 36
+#define PHYS_TO_PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH)
#else
-#define PTE_ADDR_MASK PTE_ADDR_LOW
+#define PTE_ADDR_HIGH (_AT(pteval_t, 0x3) << 8)
+#define PTE_ADDR_HIGH_SHIFT 42
+#define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8)
+#endif
#endif
/*
@@ -284,6 +304,7 @@
#define TCR_E0PD1 (UL(1) << 56)
#define TCR_TCMA0 (UL(1) << 57)
#define TCR_TCMA1 (UL(1) << 58)
+#define TCR_DS (UL(1) << 59)
/*
* TTBR.
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 483dbfa39c4c..dd9ee67d1d87 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -30,8 +30,8 @@
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
-#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_MAYBE_NG | PTE_MAYBE_SHARED | PTE_AF)
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_MAYBE_NG | PMD_MAYBE_SHARED | PMD_SECT_AF)
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -57,10 +57,6 @@
#define _PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
#define _PAGE_EXECONLY (_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
-#ifdef __ASSEMBLY__
-#define PTE_MAYBE_NG 0
-#endif
-
#ifndef __ASSEMBLY__
#include <asm/cpufeature.h>
@@ -71,7 +67,19 @@ extern bool arm64_use_ng_mappings;
#define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0)
#define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
+#ifndef CONFIG_ARM64_LPA2
#define lpa2_is_enabled() false
+#define PTE_MAYBE_SHARED PTE_SHARED
+#define PMD_MAYBE_SHARED PMD_SECT_S
+#else
+static inline bool __pure lpa2_is_enabled(void)
+{
+ return read_tcr() & TCR_DS;
+}
+
+#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
+#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
+#endif
/*
* If we have userspace only BTI we don't want to mark kernel pages
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index b8f158ae2527..6d6d4065b0cb 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -36,6 +36,12 @@ typedef struct { pudval_t pud; } pud_t;
#define __pud(x) ((pud_t) { (x) } )
#endif
+#if CONFIG_PGTABLE_LEVELS > 4
+typedef struct { p4dval_t p4d; } p4d_t;
+#define p4d_val(x) ((x).p4d)
+#define __p4d(x) ((p4d_t) { (x) } )
+#endif
+
typedef struct { pgdval_t pgd; } pgd_t;
#define pgd_val(x) ((x).pgd)
#define __pgd(x) ((pgd_t) { (x) } )
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 79ce70fbb751..afdd56d26ad7 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -18,11 +18,15 @@
* VMALLOC range.
*
* VMALLOC_START: beginning of the kernel vmalloc space
- * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
- * and fixed mappings
+ * VMALLOC_END: extends to the available space below vmemmap
*/
#define VMALLOC_START (MODULES_END)
-#define VMALLOC_END (VMEMMAP_START - SZ_256M)
+#if VA_BITS == VA_BITS_MIN
+#define VMALLOC_END (VMEMMAP_START - SZ_8M)
+#else
+#define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT)
+#define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M)
+#endif
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
@@ -76,15 +80,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_ARM64_PA_BITS_52
static inline phys_addr_t __pte_to_phys(pte_t pte)
{
+ pte_val(pte) &= ~PTE_MAYBE_SHARED;
return (pte_val(pte) & PTE_ADDR_LOW) |
((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
}
static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
{
- return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PTE_ADDR_MASK;
+ return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
}
#else
-#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
+#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
#define __phys_to_pte_val(phys) (phys)
#endif
@@ -93,7 +98,8 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_none(pte) (!pte_val(pte))
-#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
+#define __pte_clear(mm, addr, ptep) \
+ __set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
/*
@@ -133,11 +139,15 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
/*
+ * Returns true if the pte is valid and has the contiguous bit set.
+ */
+#define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte))
+/*
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
* remapped as PROT_NONE but are yet to be flushed from the TLB.
* Note that we can't make any assumptions based on the state of the access
- * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
+ * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the
* TLB.
*/
#define pte_accessible(mm, pte) \
@@ -261,7 +271,7 @@ static inline pte_t pte_mkdevmap(pte_t pte)
return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
}
-static inline void set_pte(pte_t *ptep, pte_t pte)
+static inline void __set_pte(pte_t *ptep, pte_t pte)
{
WRITE_ONCE(*ptep, pte);
@@ -275,6 +285,11 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
}
+static inline pte_t __ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+
extern void __sync_icache_dcache(pte_t pteval);
bool pgattr_change_is_safe(u64 old, u64 new);
@@ -302,7 +317,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
if (!IS_ENABLED(CONFIG_DEBUG_VM))
return;
- old_pte = READ_ONCE(*ptep);
+ old_pte = __ptep_get(ptep);
if (!pte_valid(old_pte) || !pte_valid(pte))
return;
@@ -311,7 +326,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
/*
* Check for potential race with hardware updates of the pte
- * (ptep_set_access_flags safely changes valid ptes without going
+ * (__ptep_set_access_flags safely changes valid ptes without going
* through an invalid entry).
*/
VM_WARN_ONCE(!pte_young(pte),
@@ -341,23 +356,38 @@ static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
mte_sync_tags(pte, nr_pages);
}
-static inline void set_ptes(struct mm_struct *mm,
- unsigned long __always_unused addr,
- pte_t *ptep, pte_t pte, unsigned int nr)
+/*
+ * Select all bits except the pfn
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+}
+
+#define pte_advance_pfn pte_advance_pfn
+static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
+{
+ return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte));
+}
+
+static inline void __set_ptes(struct mm_struct *mm,
+ unsigned long __always_unused addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
{
page_table_check_ptes_set(mm, ptep, pte, nr);
__sync_cache_and_tags(pte, nr);
for (;;) {
__check_safe_pte_update(mm, ptep, pte);
- set_pte(ptep, pte);
+ __set_pte(ptep, pte);
if (--nr == 0)
break;
ptep++;
- pte_val(pte) += PAGE_SIZE;
+ pte = pte_advance_pfn(pte, 1);
}
}
-#define set_ptes set_ptes
/*
* Huge pte definitions.
@@ -433,16 +463,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
}
-/*
- * Select all bits except the pfn
- */
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
- unsigned long pfn = pte_pfn(pte);
-
- return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
-}
-
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/linux/pgtable.h
@@ -534,7 +554,7 @@ static inline void __set_pte_at(struct mm_struct *mm,
{
__sync_cache_and_tags(pte, nr);
__check_safe_pte_update(mm, ptep, pte);
- set_pte(ptep, pte);
+ __set_pte(ptep, pte);
}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
@@ -616,12 +636,12 @@ static inline bool pud_table(pud_t pud) { return true; }
PUD_TYPE_TABLE)
#endif
-extern pgd_t init_pg_dir[PTRS_PER_PGD];
+extern pgd_t init_pg_dir[];
extern pgd_t init_pg_end[];
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
-extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
-extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
+extern pgd_t swapper_pg_dir[];
+extern pgd_t idmap_pg_dir[];
+extern pgd_t tramp_pg_dir[];
+extern pgd_t reserved_pg_dir[];
extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
@@ -694,14 +714,14 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pud_user(pud) pte_user(pud_pte(pud))
#define pud_user_exec(pud) pte_user_exec(pud_pte(pud))
+static inline bool pgtable_l4_enabled(void);
+
static inline void set_pud(pud_t *pudp, pud_t pud)
{
-#ifdef __PAGETABLE_PUD_FOLDED
- if (in_swapper_pgdir(pudp)) {
+ if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) {
set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
return;
}
-#endif /* __PAGETABLE_PUD_FOLDED */
WRITE_ONCE(*pudp, pud);
@@ -754,12 +774,27 @@ static inline pmd_t *pud_pgtable(pud_t pud)
#if CONFIG_PGTABLE_LEVELS > 3
+static __always_inline bool pgtable_l4_enabled(void)
+{
+ if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2))
+ return true;
+ if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT))
+ return vabits_actual == VA_BITS;
+ return alternative_has_cap_unlikely(ARM64_HAS_VA52);
+}
+
+static inline bool mm_pud_folded(const struct mm_struct *mm)
+{
+ return !pgtable_l4_enabled();
+}
+#define mm_pud_folded mm_pud_folded
+
#define pud_ERROR(e) \
pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
-#define p4d_none(p4d) (!p4d_val(p4d))
-#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
-#define p4d_present(p4d) (p4d_val(p4d))
+#define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d))
+#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & 2))
+#define p4d_present(p4d) (!p4d_none(p4d))
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
@@ -775,7 +810,8 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
static inline void p4d_clear(p4d_t *p4dp)
{
- set_p4d(p4dp, __p4d(0));
+ if (pgtable_l4_enabled())
+ set_p4d(p4dp, __p4d(0));
}
static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
@@ -783,27 +819,75 @@ static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
return __p4d_to_phys(p4d);
}
+#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr)
+{
+ return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr);
+}
+
static inline pud_t *p4d_pgtable(p4d_t p4d)
{
return (pud_t *)__va(p4d_page_paddr(p4d));
}
-/* Find an entry in the first-level page table. */
-#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
+static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr)
+{
+ BUG_ON(!pgtable_l4_enabled());
-#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
-#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
-#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
+ return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t);
+}
-#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
+static inline
+pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr)
+{
+ if (!pgtable_l4_enabled())
+ return p4d_to_folded_pud(p4dp, addr);
+ return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr);
+}
+#define pud_offset_lockless pud_offset_lockless
+
+static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr)
+{
+ return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr);
+}
+#define pud_offset pud_offset
+
+static inline pud_t *pud_set_fixmap(unsigned long addr)
+{
+ if (!pgtable_l4_enabled())
+ return NULL;
+ return (pud_t *)set_fixmap_offset(FIX_PUD, addr);
+}
+
+static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr)
+{
+ if (!pgtable_l4_enabled())
+ return p4d_to_folded_pud(p4dp, addr);
+ return pud_set_fixmap(pud_offset_phys(p4dp, addr));
+}
+
+static inline void pud_clear_fixmap(void)
+{
+ if (pgtable_l4_enabled())
+ clear_fixmap(FIX_PUD);
+}
/* use ONLY for statically allocated translation tables */
-#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
+static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr)
+{
+ if (!pgtable_l4_enabled())
+ return p4d_to_folded_pud(p4dp, addr);
+ return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr));
+}
+
+#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
#else
+static inline bool pgtable_l4_enabled(void) { return false; }
+
#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
-#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
#define pud_set_fixmap(addr) NULL
@@ -814,6 +898,122 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#if CONFIG_PGTABLE_LEVELS > 4
+
+static __always_inline bool pgtable_l5_enabled(void)
+{
+ if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT))
+ return vabits_actual == VA_BITS;
+ return alternative_has_cap_unlikely(ARM64_HAS_VA52);
+}
+
+static inline bool mm_p4d_folded(const struct mm_struct *mm)
+{
+ return !pgtable_l5_enabled();
+}
+#define mm_p4d_folded mm_p4d_folded
+
+#define p4d_ERROR(e) \
+ pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e))
+
+#define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd))
+#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2))
+#define pgd_present(pgd) (!pgd_none(pgd))
+
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ if (in_swapper_pgdir(pgdp)) {
+ set_swapper_pgd(pgdp, __pgd(pgd_val(pgd)));
+ return;
+ }
+
+ WRITE_ONCE(*pgdp, pgd);
+ dsb(ishst);
+ isb();
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+ if (pgtable_l5_enabled())
+ set_pgd(pgdp, __pgd(0));
+}
+
+static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
+{
+ return __pgd_to_phys(pgd);
+}
+
+#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+
+static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr)
+{
+ return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr);
+}
+
+static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr)
+{
+ BUG_ON(!pgtable_l5_enabled());
+
+ return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t);
+}
+
+static inline
+p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr)
+{
+ if (!pgtable_l5_enabled())
+ return pgd_to_folded_p4d(pgdp, addr);
+ return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr);
+}
+#define p4d_offset_lockless p4d_offset_lockless
+
+static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr)
+{
+ return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr);
+}
+
+static inline p4d_t *p4d_set_fixmap(unsigned long addr)
+{
+ if (!pgtable_l5_enabled())
+ return NULL;
+ return (p4d_t *)set_fixmap_offset(FIX_P4D, addr);
+}
+
+static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr)
+{
+ if (!pgtable_l5_enabled())
+ return pgd_to_folded_p4d(pgdp, addr);
+ return p4d_set_fixmap(p4d_offset_phys(pgdp, addr));
+}
+
+static inline void p4d_clear_fixmap(void)
+{
+ if (pgtable_l5_enabled())
+ clear_fixmap(FIX_P4D);
+}
+
+/* use ONLY for statically allocated translation tables */
+static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr)
+{
+ if (!pgtable_l5_enabled())
+ return pgd_to_folded_p4d(pgdp, addr);
+ return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr));
+}
+
+#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
+
+#else
+
+static inline bool pgtable_l5_enabled(void) { return false; }
+
+/* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */
+#define p4d_set_fixmap(addr) NULL
+#define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp)
+#define p4d_clear_fixmap()
+
+#define p4d_offset_kimg(dir,addr) ((p4d_t *)dir)
+
+#endif /* CONFIG_PGTABLE_LEVELS > 4 */
+
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
@@ -848,8 +1048,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma,
+extern int __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty);
@@ -859,7 +1058,8 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{
- return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
+ return __ptep_set_access_flags(vma, address, (pte_t *)pmdp,
+ pmd_pte(entry), dirty);
}
static inline int pud_devmap(pud_t pud)
@@ -893,12 +1093,13 @@ static inline bool pud_user_accessible_page(pud_t pud)
/*
* Atomic pte/pmd modifications.
*/
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(pte_t *ptep)
+static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
{
pte_t old_pte, pte;
- pte = READ_ONCE(*ptep);
+ pte = __ptep_get(ptep);
do {
old_pte = pte;
pte = pte_mkold(pte);
@@ -909,18 +1110,10 @@ static inline int __ptep_test_and_clear_young(pte_t *ptep)
return pte_young(pte);
}
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
-{
- return __ptep_test_and_clear_young(ptep);
-}
-
-#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+static inline int __ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- int young = ptep_test_and_clear_young(vma, address, ptep);
+ int young = __ptep_test_and_clear_young(vma, address, ptep);
if (young) {
/*
@@ -943,12 +1136,11 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
{
- return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+ return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
@@ -958,6 +1150,37 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
+static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr, int full)
+{
+ for (;;) {
+ __ptep_get_and_clear(mm, addr, ptep);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+}
+
+static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, int full)
+{
+ pte_t pte, tmp_pte;
+
+ pte = __ptep_get_and_clear(mm, addr, ptep);
+ while (--nr) {
+ ptep++;
+ addr += PAGE_SIZE;
+ tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
+ return pte;
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
@@ -971,16 +1194,12 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-/*
- * ptep_set_wrprotect - mark read-only while trasferring potential hardware
- * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
- */
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+static inline void ___ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep,
+ pte_t pte)
{
- pte_t old_pte, pte;
+ pte_t old_pte;
- pte = READ_ONCE(*ptep);
do {
old_pte = pte;
pte = pte_wrprotect(pte);
@@ -989,12 +1208,31 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
} while (pte_val(pte) != pte_val(old_pte));
}
+/*
+ * __ptep_set_wrprotect - mark read-only while trasferring potential hardware
+ * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
+ */
+static inline void __ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep));
+}
+
+static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address,
+ pte_t *ptep, unsigned int nr)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++)
+ __ptep_set_wrprotect(mm, address, ptep);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
- ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
+ __ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}
#define pmdp_establish pmdp_establish
@@ -1072,7 +1310,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
#endif /* CONFIG_ARM64_MTE */
/*
- * On AArch64, the cache coherency is handled via the set_pte_at() function.
+ * On AArch64, the cache coherency is handled via the __set_ptes() function.
*/
static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
@@ -1124,6 +1362,282 @@ extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t new_pte);
+
+#ifdef CONFIG_ARM64_CONTPTE
+
+/*
+ * The contpte APIs are used to transparently manage the contiguous bit in ptes
+ * where it is possible and makes sense to do so. The PTE_CONT bit is considered
+ * a private implementation detail of the public ptep API (see below).
+ */
+extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte);
+extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep);
+extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr);
+extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr, int full);
+extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, int full);
+extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr);
+extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t entry, int dirty);
+
+static __always_inline void contpte_try_fold(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ /*
+ * Only bother trying if both the virtual and physical addresses are
+ * aligned and correspond to the last entry in a contig range. The core
+ * code mostly modifies ranges from low to high, so this is the likely
+ * the last modification in the contig range, so a good time to fold.
+ * We can't fold special mappings, because there is no associated folio.
+ */
+
+ const unsigned long contmask = CONT_PTES - 1;
+ bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask;
+
+ if (unlikely(valign)) {
+ bool palign = (pte_pfn(pte) & contmask) == contmask;
+
+ if (unlikely(palign &&
+ pte_valid(pte) && !pte_cont(pte) && !pte_special(pte)))
+ __contpte_try_fold(mm, addr, ptep, pte);
+ }
+}
+
+static __always_inline void contpte_try_unfold(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ if (unlikely(pte_valid_cont(pte)))
+ __contpte_try_unfold(mm, addr, ptep, pte);
+}
+
+#define pte_batch_hint pte_batch_hint
+static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
+{
+ if (!pte_valid_cont(pte))
+ return 1;
+
+ return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1));
+}
+
+/*
+ * The below functions constitute the public API that arm64 presents to the
+ * core-mm to manipulate PTE entries within their page tables (or at least this
+ * is the subset of the API that arm64 needs to implement). These public
+ * versions will automatically and transparently apply the contiguous bit where
+ * it makes sense to do so. Therefore any users that are contig-aware (e.g.
+ * hugetlb, kernel mapper) should NOT use these APIs, but instead use the
+ * private versions, which are prefixed with double underscore. All of these
+ * APIs except for ptep_get_lockless() are expected to be called with the PTL
+ * held. Although the contiguous bit is considered private to the
+ * implementation, it is deliberately allowed to leak through the getters (e.g.
+ * ptep_get()), back to core code. This is required so that pte_leaf_size() can
+ * provide an accurate size for perf_get_pgtable_size(). But this leakage means
+ * its possible a pte will be passed to a setter with the contiguous bit set, so
+ * we explicitly clear the contiguous bit in those cases to prevent accidentally
+ * setting it in the pgtable.
+ */
+
+#define ptep_get ptep_get
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ pte_t pte = __ptep_get(ptep);
+
+ if (likely(!pte_valid_cont(pte)))
+ return pte;
+
+ return contpte_ptep_get(ptep, pte);
+}
+
+#define ptep_get_lockless ptep_get_lockless
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ pte_t pte = __ptep_get(ptep);
+
+ if (likely(!pte_valid_cont(pte)))
+ return pte;
+
+ return contpte_ptep_get_lockless(ptep);
+}
+
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+ /*
+ * We don't have the mm or vaddr so cannot unfold contig entries (since
+ * it requires tlb maintenance). set_pte() is not used in core code, so
+ * this should never even be called. Regardless do our best to service
+ * any call and emit a warning if there is any attempt to set a pte on
+ * top of an existing contig range.
+ */
+ pte_t orig_pte = __ptep_get(ptep);
+
+ WARN_ON_ONCE(pte_valid_cont(orig_pte));
+ __set_pte(ptep, pte_mknoncont(pte));
+}
+
+#define set_ptes set_ptes
+static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ pte = pte_mknoncont(pte);
+
+ if (likely(nr == 1)) {
+ contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
+ __set_ptes(mm, addr, ptep, pte, 1);
+ contpte_try_fold(mm, addr, ptep, pte);
+ } else {
+ contpte_set_ptes(mm, addr, ptep, pte, nr);
+ }
+}
+
+static inline void pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
+ __pte_clear(mm, addr, ptep);
+}
+
+#define clear_full_ptes clear_full_ptes
+static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr, int full)
+{
+ if (likely(nr == 1)) {
+ contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
+ __clear_full_ptes(mm, addr, ptep, nr, full);
+ } else {
+ contpte_clear_full_ptes(mm, addr, ptep, nr, full);
+ }
+}
+
+#define get_and_clear_full_ptes get_and_clear_full_ptes
+static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, int full)
+{
+ pte_t pte;
+
+ if (likely(nr == 1)) {
+ contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
+ pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
+ } else {
+ pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full);
+ }
+
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
+ return __ptep_get_and_clear(mm, addr, ptep);
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t orig_pte = __ptep_get(ptep);
+
+ if (likely(!pte_valid_cont(orig_pte)))
+ return __ptep_test_and_clear_young(vma, addr, ptep);
+
+ return contpte_ptep_test_and_clear_young(vma, addr, ptep);
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t orig_pte = __ptep_get(ptep);
+
+ if (likely(!pte_valid_cont(orig_pte)))
+ return __ptep_clear_flush_young(vma, addr, ptep);
+
+ return contpte_ptep_clear_flush_young(vma, addr, ptep);
+}
+
+#define wrprotect_ptes wrprotect_ptes
+static __always_inline void wrprotect_ptes(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
+{
+ if (likely(nr == 1)) {
+ /*
+ * Optimization: wrprotect_ptes() can only be called for present
+ * ptes so we only need to check contig bit as condition for
+ * unfold, and we can remove the contig bit from the pte we read
+ * to avoid re-reading. This speeds up fork() which is sensitive
+ * for order-0 folios. Equivalent to contpte_try_unfold().
+ */
+ pte_t orig_pte = __ptep_get(ptep);
+
+ if (unlikely(pte_cont(orig_pte))) {
+ __contpte_try_unfold(mm, addr, ptep, orig_pte);
+ orig_pte = pte_mknoncont(orig_pte);
+ }
+ ___ptep_set_wrprotect(mm, addr, ptep, orig_pte);
+ } else {
+ contpte_wrprotect_ptes(mm, addr, ptep, nr);
+ }
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ wrprotect_ptes(mm, addr, ptep, 1);
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ pte_t orig_pte = __ptep_get(ptep);
+
+ entry = pte_mknoncont(entry);
+
+ if (likely(!pte_valid_cont(orig_pte)))
+ return __ptep_set_access_flags(vma, addr, ptep, entry, dirty);
+
+ return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty);
+}
+
+#else /* CONFIG_ARM64_CONTPTE */
+
+#define ptep_get __ptep_get
+#define set_pte __set_pte
+#define set_ptes __set_ptes
+#define pte_clear __pte_clear
+#define clear_full_ptes __clear_full_ptes
+#define get_and_clear_full_ptes __get_and_clear_full_ptes
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define ptep_get_and_clear __ptep_get_and_clear
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young __ptep_test_and_clear_young
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young __ptep_clear_flush_young
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define ptep_set_wrprotect __ptep_set_wrprotect
+#define wrprotect_ptes __wrprotect_ptes
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags __ptep_set_access_flags
+
+#endif /* CONFIG_ARM64_CONTPTE */
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 5b0a04810b23..f77371232d8c 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -155,6 +155,8 @@ struct thread_struct {
struct {
unsigned long tp_value; /* TLS register */
unsigned long tp2_value;
+ u64 fpmr;
+ unsigned long pad;
struct user_fpsimd_state fpsimd_state;
} uw;
@@ -253,6 +255,8 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
sizeof_field(struct thread_struct, uw.tp_value) +
sizeof_field(struct thread_struct, uw.tp2_value) +
+ sizeof_field(struct thread_struct, uw.fpmr) +
+ sizeof_field(struct thread_struct, uw.pad) +
sizeof_field(struct thread_struct, uw.fpsimd_state));
*offset = offsetof(struct thread_struct, uw);
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 581caac525b0..5b1701c76d1c 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -29,13 +29,6 @@ void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name);
static inline void ptdump_debugfs_register(struct ptdump_info *info,
const char *name) { }
#endif
-void ptdump_check_wx(void);
#endif /* CONFIG_PTDUMP_CORE */
-#ifdef CONFIG_DEBUG_WX
-#define debug_checkwx() ptdump_check_wx()
-#else
-#define debug_checkwx() do { } while (0)
-#endif
-
#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index 3fdae5fe3142..2e010ea76be2 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -33,37 +33,11 @@
#include <asm/cpufeature.h>
#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS
-static inline bool should_patch_pac_into_scs(void)
-{
- u64 reg;
-
- /*
- * We only enable the shadow call stack dynamically if we are running
- * on a system that does not implement PAC or BTI. PAC and SCS provide
- * roughly the same level of protection, and BTI relies on the PACIASP
- * instructions serving as landing pads, preventing us from patching
- * those instructions into something else.
- */
- reg = read_sysreg_s(SYS_ID_AA64ISAR1_EL1);
- if (SYS_FIELD_GET(ID_AA64ISAR1_EL1, APA, reg) |
- SYS_FIELD_GET(ID_AA64ISAR1_EL1, API, reg))
- return false;
-
- reg = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
- if (SYS_FIELD_GET(ID_AA64ISAR2_EL1, APA3, reg))
- return false;
-
- if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) {
- reg = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
- if (reg & (0xf << ID_AA64PFR1_EL1_BT_SHIFT))
- return false;
- }
- return true;
-}
-
static inline void dynamic_scs_init(void)
{
- if (should_patch_pac_into_scs()) {
+ extern bool __pi_dynamic_scs_is_enabled;
+
+ if (__pi_dynamic_scs_is_enabled) {
pr_info("Enabling dynamic shadow call stack\n");
static_branch_enable(&dynamic_scs_enabled);
}
@@ -72,8 +46,8 @@ static inline void dynamic_scs_init(void)
static inline void dynamic_scs_init(void) {}
#endif
-int scs_patch(const u8 eh_frame[], int size);
-asmlinkage void scs_patch_vmlinux(void);
+int __pi_scs_patch(const u8 eh_frame[], int size);
+asmlinkage void __pi_scs_patch_vmlinux(void);
#endif /* __ASSEMBLY __ */
diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
index 2e4d7da74fb8..ba269a7a3201 100644
--- a/arch/arm64/include/asm/setup.h
+++ b/arch/arm64/include/asm/setup.h
@@ -7,9 +7,6 @@
#include <uapi/asm/setup.h>
-void *get_early_fdt_ptr(void);
-void early_fdt_map(u64 dt_phys);
-
/*
* These two variables are used in the head.S file.
*/
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index c3b19b376c86..9e8999592f3a 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -1181,6 +1181,8 @@
par; \
})
+#define SYS_FIELD_VALUE(reg, field, val) reg##_##field##_##val
+
#define SYS_FIELD_GET(reg, field, val) \
FIELD_GET(reg##_##field##_MASK, val)
@@ -1188,7 +1190,8 @@
FIELD_PREP(reg##_##field##_MASK, val)
#define SYS_FIELD_PREP_ENUM(reg, field, val) \
- FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
+ FIELD_PREP(reg##_##field##_MASK, \
+ SYS_FIELD_VALUE(reg, field, val))
#endif
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 0150deb332af..a947c6e784ed 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -103,6 +103,9 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
{
struct ptdesc *ptdesc = virt_to_ptdesc(pudp);
+ if (!pgtable_l4_enabled())
+ return;
+
pagetable_pud_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
}
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 1deb5d789c2e..3b0e8248e1a4 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -422,7 +422,7 @@ do { \
#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
+static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long stride, bool last_level,
int tlb_level)
@@ -456,10 +456,19 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb_range_op(vae1is, start, pages, stride, asid,
tlb_level, true, lpa2_is_enabled());
- dsb(ish);
mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
}
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long stride, bool last_level,
+ int tlb_level)
+{
+ __flush_tlb_range_nosync(vma, start, end, stride,
+ last_level, tlb_level);
+ dsb(ish);
+}
+
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 5023599fa278..285610e626f5 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -107,5 +107,20 @@
#define HWCAP2_SVE_B16B16 (1UL << 45)
#define HWCAP2_LRCPC3 (1UL << 46)
#define HWCAP2_LSE128 (1UL << 47)
+#define HWCAP2_FPMR (1UL << 48)
+#define HWCAP2_LUT (1UL << 49)
+#define HWCAP2_FAMINMAX (1UL << 50)
+#define HWCAP2_F8CVT (1UL << 51)
+#define HWCAP2_F8FMA (1UL << 52)
+#define HWCAP2_F8DP4 (1UL << 53)
+#define HWCAP2_F8DP2 (1UL << 54)
+#define HWCAP2_F8E4M3 (1UL << 55)
+#define HWCAP2_F8E5M2 (1UL << 56)
+#define HWCAP2_SME_LUTV2 (1UL << 57)
+#define HWCAP2_SME_F8F16 (1UL << 58)
+#define HWCAP2_SME_F8F32 (1UL << 59)
+#define HWCAP2_SME_SF8FMA (1UL << 60)
+#define HWCAP2_SME_SF8DP4 (1UL << 61)
+#define HWCAP2_SME_SF8DP2 (1UL << 62)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 89d2fc872d9f..964df31da975 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -37,9 +37,7 @@
#include <asm/ptrace.h>
#include <asm/sve_context.h>
-#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
-#define __KVM_HAVE_READONLY_MEM
#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -76,11 +74,11 @@ struct kvm_regs {
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
-#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
- KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_TYPE_MASK __GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
+ KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
-#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
- KVM_ARM_DEVICE_ID_SHIFT)
+#define KVM_ARM_DEVICE_ID_MASK __GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
+ KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
@@ -162,6 +160,11 @@ struct kvm_sync_regs {
__u64 device_irq_level;
};
+/* Bits for run->s.regs.device_irq_level */
+#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
+#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
+#define KVM_ARM_DEV_PMU (1 << 2)
+
/*
* PMU filter structure. Describe a range of events with a particular
* action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER.
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index f23c1dc3f002..8a45b7a411e0 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -152,6 +152,14 @@ struct tpidr2_context {
__u64 tpidr2;
};
+/* FPMR context */
+#define FPMR_MAGIC 0x46504d52
+
+struct fpmr_context {
+ struct _aarch64_ctx head;
+ __u64 fpmr;
+};
+
#define ZA_MAGIC 0x54366345
struct za_context {
diff --git a/arch/arm64/include/uapi/asm/sve_context.h b/arch/arm64/include/uapi/asm/sve_context.h
index 754ab751b523..72aefc081061 100644
--- a/arch/arm64/include/uapi/asm/sve_context.h
+++ b/arch/arm64/include/uapi/asm/sve_context.h
@@ -13,6 +13,17 @@
#define __SVE_VQ_BYTES 16 /* number of bytes per quadword */
+/*
+ * Yes, __SVE_VQ_MAX is 512 QUADWORDS.
+ *
+ * To help ensure forward portability, this is much larger than the
+ * current maximum value defined by the SVE architecture. While arrays
+ * or static allocations can be sized based on this value, watch out!
+ * It will waste a surprisingly large amount of memory.
+ *
+ * Dynamic sizing based on the actual runtime vector length is likely to
+ * be preferable for most purposes.
+ */
#define __SVE_VQ_MIN 1
#define __SVE_VQ_MAX 512
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 467cb7117273..763824963ed1 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -33,8 +33,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o proton-pack.o idreg-override.o idle.o \
- patching.o
+ syscall.o proton-pack.o idle.o patching.o pi/
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
@@ -57,7 +56,7 @@ obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
obj-$(CONFIG_PARAVIRT) += paravirt.o
-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o pi/
+obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
obj-$(CONFIG_ELF_CORE) += elfcore.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
@@ -66,20 +65,12 @@ obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-obj-$(CONFIG_CRASH_CORE) += crash_core.o
+obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso-wrap.o
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
-obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o
-
-# We need to prevent the SCS patching code from patching itself. Using
-# -mbranch-protection=none here to avoid the patchable PAC opcodes from being
-# generated triggers an issue with full LTO on Clang, which stops emitting PAC
-# instructions altogether. So disable LTO as well for the compilation unit.
-CFLAGS_patch-scs.o += -mbranch-protection=none
-CFLAGS_REMOVE_patch-scs.o += $(CC_FLAGS_LTO)
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 5a7dbbe0ce63..81496083c041 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -75,8 +75,8 @@ int main(void)
DEFINE(S_FP, offsetof(struct pt_regs, regs[29]));
DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
DEFINE(S_SP, offsetof(struct pt_regs, sp));
- DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
DEFINE(S_PC, offsetof(struct pt_regs, pc));
+ DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 8d1a634a403e..56583677c1f2 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -140,12 +140,42 @@ void dump_cpu_features(void)
pr_emerg("0x%*pb\n", ARM64_NCAPS, &system_cpucaps);
}
+#define __ARM64_MAX_POSITIVE(reg, field) \
+ ((reg##_##field##_SIGNED ? \
+ BIT(reg##_##field##_WIDTH - 1) : \
+ BIT(reg##_##field##_WIDTH)) - 1)
+
+#define __ARM64_MIN_NEGATIVE(reg, field) BIT(reg##_##field##_WIDTH - 1)
+
+#define __ARM64_CPUID_FIELDS(reg, field, min_value, max_value) \
+ .sys_reg = SYS_##reg, \
+ .field_pos = reg##_##field##_SHIFT, \
+ .field_width = reg##_##field##_WIDTH, \
+ .sign = reg##_##field##_SIGNED, \
+ .min_field_value = min_value, \
+ .max_field_value = max_value,
+
+/*
+ * ARM64_CPUID_FIELDS() encodes a field with a range from min_value to
+ * an implicit maximum that depends on the sign-ess of the field.
+ *
+ * An unsigned field will be capped at all ones, while a signed field
+ * will be limited to the positive half only.
+ */
#define ARM64_CPUID_FIELDS(reg, field, min_value) \
- .sys_reg = SYS_##reg, \
- .field_pos = reg##_##field##_SHIFT, \
- .field_width = reg##_##field##_WIDTH, \
- .sign = reg##_##field##_SIGNED, \
- .min_field_value = reg##_##field##_##min_value,
+ __ARM64_CPUID_FIELDS(reg, field, \
+ SYS_FIELD_VALUE(reg, field, min_value), \
+ __ARM64_MAX_POSITIVE(reg, field))
+
+/*
+ * ARM64_CPUID_FIELDS_NEG() encodes a field with a range from an
+ * implicit minimal value to max_value. This should be used when
+ * matching a non-implemented property.
+ */
+#define ARM64_CPUID_FIELDS_NEG(reg, field, max_value) \
+ __ARM64_CPUID_FIELDS(reg, field, \
+ __ARM64_MIN_NEGATIVE(reg, field), \
+ SYS_FIELD_VALUE(reg, field, max_value))
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
{ \
@@ -220,6 +250,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_LUT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
@@ -234,6 +265,11 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64isar3[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FAMINMAX_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0),
@@ -267,6 +303,11 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, 0),
@@ -295,6 +336,8 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_LUTv2_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0),
@@ -307,6 +350,10 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F16_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F32_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0),
@@ -316,6 +363,22 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8FMA_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP4_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP2_SHIFT, 1, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64fpfr0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8CVT_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8FMA_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP4_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP2_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E4M3_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E5M2_SHIFT, 1, 0),
ARM64_FTR_END,
};
@@ -407,6 +470,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64mmfr4[] = {
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_E2H0_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1),
@@ -655,13 +723,15 @@ static const struct arm64_ftr_bits ftr_raz[] = {
#define ARM64_FTR_REG(id, table) \
__ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
-struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
-struct arm64_ftr_override __ro_after_init id_aa64pfr0_override;
-struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
-struct arm64_ftr_override __ro_after_init id_aa64zfr0_override;
-struct arm64_ftr_override __ro_after_init id_aa64smfr0_override;
-struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
-struct arm64_ftr_override __ro_after_init id_aa64isar2_override;
+struct arm64_ftr_override id_aa64mmfr0_override;
+struct arm64_ftr_override id_aa64mmfr1_override;
+struct arm64_ftr_override id_aa64mmfr2_override;
+struct arm64_ftr_override id_aa64pfr0_override;
+struct arm64_ftr_override id_aa64pfr1_override;
+struct arm64_ftr_override id_aa64zfr0_override;
+struct arm64_ftr_override id_aa64smfr0_override;
+struct arm64_ftr_override id_aa64isar1_override;
+struct arm64_ftr_override id_aa64isar2_override;
struct arm64_ftr_override arm64_sw_feature_override;
@@ -702,10 +772,12 @@ static const struct __ftr_reg_entry {
&id_aa64pfr0_override),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
&id_aa64pfr1_override),
+ ARM64_FTR_REG(SYS_ID_AA64PFR2_EL1, ftr_id_aa64pfr2),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0,
&id_aa64zfr0_override),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0,
&id_aa64smfr0_override),
+ ARM64_FTR_REG(SYS_ID_AA64FPFR0_EL1, ftr_id_aa64fpfr0),
/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
@@ -717,13 +789,17 @@ static const struct __ftr_reg_entry {
&id_aa64isar1_override),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
&id_aa64isar2_override),
+ ARM64_FTR_REG(SYS_ID_AA64ISAR3_EL1, ftr_id_aa64isar3),
/* Op1 = 0, CRn = 0, CRm = 7 */
- ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0,
+ &id_aa64mmfr0_override),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
&id_aa64mmfr1_override),
- ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2,
+ &id_aa64mmfr2_override),
ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
+ ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
/* Op1 = 1, CRn = 0, CRm = 0 */
ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
@@ -919,7 +995,8 @@ static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
pr_warn("%s[%d:%d]: %s to %llx\n",
reg->name,
ftrp->shift + ftrp->width - 1,
- ftrp->shift, str, tmp);
+ ftrp->shift, str,
+ tmp & (BIT(ftrp->width) - 1));
} else if ((ftr_mask & reg->override->val) == ftr_mask) {
reg->override->val &= ~ftr_mask;
pr_warn("%s[%d:%d]: impossible override, ignored\n",
@@ -1043,14 +1120,18 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
+ init_cpu_ftr_reg(SYS_ID_AA64ISAR3_EL1, info->reg_id_aa64isar3);
init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
init_cpu_ftr_reg(SYS_ID_AA64MMFR3_EL1, info->reg_id_aa64mmfr3);
+ init_cpu_ftr_reg(SYS_ID_AA64MMFR4_EL1, info->reg_id_aa64mmfr4);
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64PFR2_EL1, info->reg_id_aa64pfr2);
init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0);
+ init_cpu_ftr_reg(SYS_ID_AA64FPFR0_EL1, info->reg_id_aa64fpfr0);
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
init_32bit_cpu_features(&info->aarch32);
@@ -1272,6 +1353,8 @@ void update_cpu_features(int cpu,
info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
+ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR3_EL1, cpu,
+ info->reg_id_aa64isar3, boot->reg_id_aa64isar3);
/*
* Differing PARange support is fine as long as all peripherals and
@@ -1291,6 +1374,8 @@ void update_cpu_features(int cpu,
info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
+ taint |= check_update_ftr_reg(SYS_ID_AA64PFR2_EL1, cpu,
+ info->reg_id_aa64pfr2, boot->reg_id_aa64pfr2);
taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
@@ -1298,6 +1383,9 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
+ taint |= check_update_ftr_reg(SYS_ID_AA64FPFR0_EL1, cpu,
+ info->reg_id_aa64fpfr0, boot->reg_id_aa64fpfr0);
+
/* Probe vector lengths */
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
@@ -1410,17 +1498,21 @@ u64 __read_sysreg_by_encoding(u32 sys_id)
read_sysreg_case(SYS_ID_AA64PFR0_EL1);
read_sysreg_case(SYS_ID_AA64PFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64PFR2_EL1);
read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
read_sysreg_case(SYS_ID_AA64SMFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64FPFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR1_EL1);
read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
read_sysreg_case(SYS_ID_AA64MMFR3_EL1);
+ read_sysreg_case(SYS_ID_AA64MMFR4_EL1);
read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR3_EL1);
read_sysreg_case(SYS_CNTFRQ_EL0);
read_sysreg_case(SYS_CTR_EL0);
@@ -1451,11 +1543,28 @@ has_always(const struct arm64_cpu_capabilities *entry, int scope)
static bool
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
{
- int val = cpuid_feature_extract_field_width(reg, entry->field_pos,
- entry->field_width,
- entry->sign);
+ int val, min, max;
+ u64 tmp;
+
+ val = cpuid_feature_extract_field_width(reg, entry->field_pos,
+ entry->field_width,
+ entry->sign);
+
+ tmp = entry->min_field_value;
+ tmp <<= entry->field_pos;
+
+ min = cpuid_feature_extract_field_width(tmp, entry->field_pos,
+ entry->field_width,
+ entry->sign);
- return val >= entry->min_field_value;
+ tmp = entry->max_field_value;
+ tmp <<= entry->field_pos;
+
+ max = cpuid_feature_extract_field_width(tmp, entry->field_pos,
+ entry->field_width,
+ entry->sign);
+
+ return val >= min && val <= max;
}
static u64
@@ -1620,46 +1729,6 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
return has_cpuid_feature(entry, scope);
}
-/*
- * This check is triggered during the early boot before the cpufeature
- * is initialised. Checking the status on the local CPU allows the boot
- * CPU to detect the need for non-global mappings and thus avoiding a
- * pagetable re-write after all the CPUs are booted. This check will be
- * anyway run on individual CPUs, allowing us to get the consistent
- * state once the SMP CPUs are up and thus make the switch to non-global
- * mappings if required.
- */
-bool kaslr_requires_kpti(void)
-{
- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return false;
-
- /*
- * E0PD does a similar job to KPTI so can be used instead
- * where available.
- */
- if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
- u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
- if (cpuid_feature_extract_unsigned_field(mmfr2,
- ID_AA64MMFR2_EL1_E0PD_SHIFT))
- return false;
- }
-
- /*
- * Systems affected by Cavium erratum 24756 are incompatible
- * with KPTI.
- */
- if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
- extern const struct midr_range cavium_erratum_27456_cpus[];
-
- if (is_midr_in_range_list(read_cpuid_id(),
- cavium_erratum_27456_cpus))
- return false;
- }
-
- return kaslr_enabled();
-}
-
static bool __meltdown_safe = true;
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
@@ -1712,7 +1781,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
}
/* Useful for KASLR robustness */
- if (kaslr_requires_kpti()) {
+ if (kaslr_enabled() && kaslr_requires_kpti()) {
if (!__kpti_forced) {
str = "KASLR";
__kpti_forced = 1;
@@ -1739,6 +1808,28 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
return !meltdown_safe;
}
+static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ /*
+ * Although the Apple M2 family appears to support NV1, the
+ * PTW barfs on the nVHE EL2 S1 page table format. Pretend
+ * that it doesn't support NV1 at all.
+ */
+ static const struct midr_range nv1_ni_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
+ {}
+ };
+
+ return (__system_matches_cap(ARM64_HAS_NESTED_VIRT) &&
+ !(has_cpuid_feature(entry, scope) ||
+ is_midr_in_range_list(read_cpuid_id(), nv1_ni_list)));
+}
+
#if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
static bool has_lpa2_at_stage1(u64 mmfr0)
{
@@ -1801,6 +1892,11 @@ static int __init __kpti_install_ng_mappings(void *__unused)
pgd_t *kpti_ng_temp_pgd;
u64 alloc = 0;
+ if (levels == 5 && !pgtable_l5_enabled())
+ levels = 4;
+ else if (levels == 4 && !pgtable_l4_enabled())
+ levels = 3;
+
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
if (!cpu) {
@@ -1814,9 +1910,9 @@ static int __init __kpti_install_ng_mappings(void *__unused)
//
// The physical pages are laid out as follows:
//
- // +--------+-/-------+-/------ +-\\--------+
- // : PTE[] : | PMD[] : | PUD[] : || PGD[] :
- // +--------+-\-------+-\------ +-//--------+
+ // +--------+-/-------+-/------ +-/------ +-\\\--------+
+ // : PTE[] : | PMD[] : | PUD[] : | P4D[] : ||| PGD[] :
+ // +--------+-\-------+-\------ +-\------ +-///--------+
// ^
// The first page is mapped into this hierarchy at a PMD_SHIFT
// aligned virtual address, so that we can manipulate the PTE
@@ -2042,14 +2138,7 @@ static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap,
static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
int __unused)
{
- u64 val;
-
- val = read_sysreg(id_aa64mmfr1_el1);
- if (!cpuid_feature_extract_unsigned_field(val, ID_AA64MMFR1_EL1_VH_SHIFT))
- return false;
-
- val = arm64_sw_feature_override.val & arm64_sw_feature_override.mask;
- return cpuid_feature_extract_unsigned_field(val, ARM64_SW_FEATURE_OVERRIDE_HVHE);
+ return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
}
#ifdef CONFIG_ARM64_PAN
@@ -2739,6 +2828,39 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_lpa2,
},
+ {
+ .desc = "FPMR",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_HAS_FPMR,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_fpmr,
+ ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, FPMR, IMP)
+ },
+#ifdef CONFIG_ARM64_VA_BITS_52
+ {
+ .capability = ARM64_HAS_VA52,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+#ifdef CONFIG_ARM64_64K_PAGES
+ .desc = "52-bit Virtual Addressing (LVA)",
+ ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, VARange, 52)
+#else
+ .desc = "52-bit Virtual Addressing (LPA2)",
+#ifdef CONFIG_ARM64_4K_PAGES
+ ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN4, 52_BIT)
+#else
+ ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN16, 52_BIT)
+#endif
+#endif
+ },
+#endif
+ {
+ .desc = "NV1",
+ .capability = ARM64_HAS_HCR_NV1,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_nv1,
+ ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1)
+ },
{},
};
@@ -2822,6 +2944,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, FP16, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
HWCAP_CAP(ID_AA64PFR0_EL1, DIT, IMP, CAP_HWCAP, KERNEL_HWCAP_DIT),
+ HWCAP_CAP(ID_AA64PFR2_EL1, FPMR, IMP, CAP_HWCAP, KERNEL_HWCAP_FPMR),
HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, IMP, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, DPB2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
HWCAP_CAP(ID_AA64ISAR1_EL1, JSCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
@@ -2835,6 +2958,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16),
HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH),
HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM),
+ HWCAP_CAP(ID_AA64ISAR2_EL1, LUT, IMP, CAP_HWCAP, KERNEL_HWCAP_LUT),
+ HWCAP_CAP(ID_AA64ISAR3_EL1, FAMINMAX, IMP, CAP_HWCAP, KERNEL_HWCAP_FAMINMAX),
HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
@@ -2875,6 +3000,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
#ifdef CONFIG_ARM64_SME
HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+ HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
@@ -2882,12 +3008,23 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
+ HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16),
+ HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
+ HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
+ HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
+ HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
#endif /* CONFIG_ARM64_SME */
+ HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT),
+ HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA),
+ HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP4),
+ HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP2),
+ HWCAP_CAP(ID_AA64FPFR0_EL1, F8E4M3, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E4M3),
+ HWCAP_CAP(ID_AA64FPFR0_EL1, F8E5M2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E5M2),
{},
};
@@ -3052,13 +3189,9 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
for (i = 0; i < ARM64_NCAPS; i++) {
- unsigned int num;
-
caps = cpucap_ptrs[i];
- if (!caps || !(caps->type & scope_mask))
- continue;
- num = caps->capability;
- if (!cpus_have_cap(num))
+ if (!caps || !(caps->type & scope_mask) ||
+ !cpus_have_cap(caps->capability))
continue;
if (boot_scope && caps->cpu_enable)
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 47043c0d95ec..09eeaa24d456 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -128,6 +128,21 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SVE_B16B16] = "sveb16b16",
[KERNEL_HWCAP_LRCPC3] = "lrcpc3",
[KERNEL_HWCAP_LSE128] = "lse128",
+ [KERNEL_HWCAP_FPMR] = "fpmr",
+ [KERNEL_HWCAP_LUT] = "lut",
+ [KERNEL_HWCAP_FAMINMAX] = "faminmax",
+ [KERNEL_HWCAP_F8CVT] = "f8cvt",
+ [KERNEL_HWCAP_F8FMA] = "f8fma",
+ [KERNEL_HWCAP_F8DP4] = "f8dp4",
+ [KERNEL_HWCAP_F8DP2] = "f8dp2",
+ [KERNEL_HWCAP_F8E4M3] = "f8e4m3",
+ [KERNEL_HWCAP_F8E5M2] = "f8e5m2",
+ [KERNEL_HWCAP_SME_LUTV2] = "smelutv2",
+ [KERNEL_HWCAP_SME_F8F16] = "smef8f16",
+ [KERNEL_HWCAP_SME_F8F32] = "smef8f32",
+ [KERNEL_HWCAP_SME_SF8FMA] = "smesf8fma",
+ [KERNEL_HWCAP_SME_SF8DP4] = "smesf8dp4",
+ [KERNEL_HWCAP_SME_SF8DP2] = "smesf8dp2",
};
#ifdef CONFIG_COMPAT
@@ -443,14 +458,18 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
+ info->reg_id_aa64isar3 = read_cpuid(ID_AA64ISAR3_EL1);
info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
info->reg_id_aa64mmfr3 = read_cpuid(ID_AA64MMFR3_EL1);
+ info->reg_id_aa64mmfr4 = read_cpuid(ID_AA64MMFR4_EL1);
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+ info->reg_id_aa64pfr2 = read_cpuid(ID_AA64PFR2_EL1);
info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
info->reg_id_aa64smfr0 = read_cpuid(ID_AA64SMFR0_EL1);
+ info->reg_id_aa64fpfr0 = read_cpuid(ID_AA64FPFR0_EL1);
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
info->reg_gmid = read_cpuid(GMID_EL1);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 0228001347be..9afcc690fe73 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -103,7 +103,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
struct set_perm_data *spd = data;
const efi_memory_desc_t *md = spd->md;
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = __ptep_get(ptep);
if (md->attribute & EFI_MEMORY_RO)
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
@@ -111,7 +111,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
pte = set_pte_bit(pte, __pgprot(PTE_PXN));
else if (system_supports_bti_kernel() && spd->has_bti)
pte = set_pte_bit(pte, __pgprot(PTE_GP));
- set_pte(ptep, pte);
+ __set_pte(ptep, pte);
return 0;
}
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 0fc94207e69a..b77a15955f28 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/ptrace.h>
+#include <linux/resume_user_mode.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/thread_info.h>
@@ -126,16 +127,49 @@ static __always_inline void __exit_to_user_mode(void)
lockdep_hardirqs_on(CALLER_ADDR0);
}
+static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
+{
+ do {
+ local_irq_enable();
+
+ if (thread_flags & _TIF_NEED_RESCHED)
+ schedule();
+
+ if (thread_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+ if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
+ clear_thread_flag(TIF_MTE_ASYNC_FAULT);
+ send_sig_fault(SIGSEGV, SEGV_MTEAERR,
+ (void __user *)NULL, current);
+ }
+
+ if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+
+ if (thread_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
+
+ if (thread_flags & _TIF_FOREIGN_FPSTATE)
+ fpsimd_restore_current_state();
+
+ local_irq_disable();
+ thread_flags = read_thread_flags();
+ } while (thread_flags & _TIF_WORK_MASK);
+}
+
static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long flags;
- local_daif_mask();
+ local_irq_disable();
flags = read_thread_flags();
if (unlikely(flags & _TIF_WORK_MASK))
do_notify_resume(regs, flags);
+ local_daif_mask();
+
lockdep_sys_exit();
}
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index f27acca550d5..ebb0158997ca 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -359,6 +359,9 @@ static void task_fpsimd_load(void)
WARN_ON(preemptible());
WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE));
+ if (system_supports_fpmr())
+ write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR);
+
if (system_supports_sve() || system_supports_sme()) {
switch (current->thread.fp_type) {
case FP_STATE_FPSIMD:
@@ -446,6 +449,9 @@ static void fpsimd_save_user_state(void)
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
return;
+ if (system_supports_fpmr())
+ *(last->fpmr) = read_sysreg_s(SYS_FPMR);
+
/*
* If a task is in a syscall the ABI allows us to only
* preserve the state shared with FPSIMD so don't bother
@@ -688,6 +694,12 @@ static void sve_to_fpsimd(struct task_struct *task)
}
}
+void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__always_unused p)
+{
+ write_sysreg_s(read_sysreg_s(SYS_SCTLR_EL1) | SCTLR_EL1_EnFPM_MASK,
+ SYS_SCTLR_EL1);
+}
+
#ifdef CONFIG_ARM64_SVE
/*
* Call __sve_free() directly only if you know task can't be scheduled
@@ -1134,6 +1146,8 @@ void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
{
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
isb();
+
+ write_sysreg_s(0, SYS_ZCR_EL1);
}
void __init sve_setup(void)
@@ -1245,6 +1259,9 @@ void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p)
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1);
isb();
+ /* Ensure all bits in SMCR are set to known values */
+ write_sysreg_s(0, SYS_SMCR_EL1);
+
/* Allow EL0 to access TPIDR2 */
write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1);
isb();
@@ -1696,6 +1713,7 @@ static void fpsimd_bind_task_to_cpu(void)
last->sve_vl = task_get_sve_vl(current);
last->sme_vl = task_get_sme_vl(current);
last->svcr = &current->thread.svcr;
+ last->fpmr = &current->thread.uw.fpmr;
last->fp_type = &current->thread.fp_type;
last->to_save = FP_STATE_CURRENT;
current->thread.fpsimd_cpu = smp_processor_id();
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index cab7f91949d8..ce08b744aaab 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -80,28 +80,42 @@
* x19 primary_entry() .. start_kernel() whether we entered with the MMU on
* x20 primary_entry() .. __primary_switch() CPU boot mode
* x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
- * x22 create_idmap() .. start_kernel() ID map VA of the DT blob
- * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
- * x24 __primary_switch() linear map KASLR seed
- * x25 primary_entry() .. start_kernel() supported VA size
- * x28 create_idmap() callee preserved temp register
*/
SYM_CODE_START(primary_entry)
bl record_mmu_state
bl preserve_boot_args
- bl create_idmap
+
+ adrp x1, early_init_stack
+ mov sp, x1
+ mov x29, xzr
+ adrp x0, init_idmap_pg_dir
+ mov x1, xzr
+ bl __pi_create_init_idmap
+
+ /*
+ * If the page tables have been populated with non-cacheable
+ * accesses (MMU disabled), invalidate those tables again to
+ * remove any speculatively loaded cache lines.
+ */
+ cbnz x19, 0f
+ dmb sy
+ mov x1, x0 // end of used region
+ adrp x0, init_idmap_pg_dir
+ adr_l x2, dcache_inval_poc
+ blr x2
+ b 1f
/*
* If we entered with the MMU and caches on, clean the ID mapped part
* of the primary boot code to the PoC so we can safely execute it with
* the MMU off.
*/
- cbz x19, 0f
- adrp x0, __idmap_text_start
+0: adrp x0, __idmap_text_start
adr_l x1, __idmap_text_end
adr_l x2, dcache_clean_poc
blr x2
-0: mov x0, x19
+
+1: mov x0, x19
bl init_kernel_el // w0=cpu_boot_mode
mov x20, x0
@@ -111,14 +125,6 @@ SYM_CODE_START(primary_entry)
* On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set.
*/
-#if VA_BITS > 48
- mrs_s x0, SYS_ID_AA64MMFR2_EL1
- tst x0, ID_AA64MMFR2_EL1_VARange_MASK
- mov x0, #VA_BITS
- mov x25, #VA_BITS_MIN
- csel x25, x25, x0, eq
- mov x0, x25
-#endif
bl __cpu_setup // initialise processor
b __primary_switch
SYM_CODE_END(primary_entry)
@@ -177,267 +183,6 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
ret
SYM_CODE_END(preserve_boot_args)
-SYM_FUNC_START_LOCAL(clear_page_tables)
- /*
- * Clear the init page tables.
- */
- adrp x0, init_pg_dir
- adrp x1, init_pg_end
- sub x2, x1, x0
- mov x1, xzr
- b __pi_memset // tail call
-SYM_FUNC_END(clear_page_tables)
-
-/*
- * Macro to populate page table entries, these entries can be pointers to the next level
- * or last level entries pointing to physical memory.
- *
- * tbl: page table address
- * rtbl: pointer to page table or physical memory
- * index: start index to write
- * eindex: end index to write - [index, eindex] written to
- * flags: flags for pagetable entry to or in
- * inc: increment to rtbl between each entry
- * tmp1: temporary variable
- *
- * Preserves: tbl, eindex, flags, inc
- * Corrupts: index, tmp1
- * Returns: rtbl
- */
- .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
-.Lpe\@: phys_to_pte \tmp1, \rtbl
- orr \tmp1, \tmp1, \flags // tmp1 = table entry
- str \tmp1, [\tbl, \index, lsl #3]
- add \rtbl, \rtbl, \inc // rtbl = pa next level
- add \index, \index, #1
- cmp \index, \eindex
- b.ls .Lpe\@
- .endm
-
-/*
- * Compute indices of table entries from virtual address range. If multiple entries
- * were needed in the previous page table level then the next page table level is assumed
- * to be composed of multiple pages. (This effectively scales the end index).
- *
- * vstart: virtual address of start of range
- * vend: virtual address of end of range - we map [vstart, vend]
- * shift: shift used to transform virtual address into index
- * order: #imm 2log(number of entries in page table)
- * istart: index in table corresponding to vstart
- * iend: index in table corresponding to vend
- * count: On entry: how many extra entries were required in previous level, scales
- * our end index.
- * On exit: returns how many extra entries required for next page table level
- *
- * Preserves: vstart, vend
- * Returns: istart, iend, count
- */
- .macro compute_indices, vstart, vend, shift, order, istart, iend, count
- ubfx \istart, \vstart, \shift, \order
- ubfx \iend, \vend, \shift, \order
- add \iend, \iend, \count, lsl \order
- sub \count, \iend, \istart
- .endm
-
-/*
- * Map memory for specified virtual address range. Each level of page table needed supports
- * multiple entries. If a level requires n entries the next page table level is assumed to be
- * formed from n pages.
- *
- * tbl: location of page table
- * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
- * vstart: virtual address of start of range
- * vend: virtual address of end of range - we map [vstart, vend - 1]
- * flags: flags to use to map last level entries
- * phys: physical address corresponding to vstart - physical memory is contiguous
- * order: #imm 2log(number of entries in PGD table)
- *
- * If extra_shift is set, an extra level will be populated if the end address does
- * not fit in 'extra_shift' bits. This assumes vend is in the TTBR0 range.
- *
- * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
- * Preserves: vstart, flags
- * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
- */
- .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv, extra_shift
- sub \vend, \vend, #1
- add \rtbl, \tbl, #PAGE_SIZE
- mov \count, #0
-
- .ifnb \extra_shift
- tst \vend, #~((1 << (\extra_shift)) - 1)
- b.eq .L_\@
- compute_indices \vstart, \vend, #\extra_shift, #(PAGE_SHIFT - 3), \istart, \iend, \count
- mov \sv, \rtbl
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
- .endif
-.L_\@:
- compute_indices \vstart, \vend, #PGDIR_SHIFT, #\order, \istart, \iend, \count
- mov \sv, \rtbl
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
-
-#if SWAPPER_PGTABLE_LEVELS > 3
- compute_indices \vstart, \vend, #PUD_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
- mov \sv, \rtbl
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
-#endif
-
-#if SWAPPER_PGTABLE_LEVELS > 2
- compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
- mov \sv, \rtbl
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
-#endif
-
- compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
- bic \rtbl, \phys, #SWAPPER_BLOCK_SIZE - 1
- populate_entries \tbl, \rtbl, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
- .endm
-
-/*
- * Remap a subregion created with the map_memory macro with modified attributes
- * or output address. The entire remapped region must have been covered in the
- * invocation of map_memory.
- *
- * x0: last level table address (returned in first argument to map_memory)
- * x1: start VA of the existing mapping
- * x2: start VA of the region to update
- * x3: end VA of the region to update (exclusive)
- * x4: start PA associated with the region to update
- * x5: attributes to set on the updated region
- * x6: order of the last level mappings
- */
-SYM_FUNC_START_LOCAL(remap_region)
- sub x3, x3, #1 // make end inclusive
-
- // Get the index offset for the start of the last level table
- lsr x1, x1, x6
- bfi x1, xzr, #0, #PAGE_SHIFT - 3
-
- // Derive the start and end indexes into the last level table
- // associated with the provided region
- lsr x2, x2, x6
- lsr x3, x3, x6
- sub x2, x2, x1
- sub x3, x3, x1
-
- mov x1, #1
- lsl x6, x1, x6 // block size at this level
-
- populate_entries x0, x4, x2, x3, x5, x6, x7
- ret
-SYM_FUNC_END(remap_region)
-
-SYM_FUNC_START_LOCAL(create_idmap)
- mov x28, lr
- /*
- * The ID map carries a 1:1 mapping of the physical address range
- * covered by the loaded image, which could be anywhere in DRAM. This
- * means that the required size of the VA (== PA) space is decided at
- * boot time, and could be more than the configured size of the VA
- * space for ordinary kernel and user space mappings.
- *
- * There are three cases to consider here:
- * - 39 <= VA_BITS < 48, and the ID map needs up to 48 VA bits to cover
- * the placement of the image. In this case, we configure one extra
- * level of translation on the fly for the ID map only. (This case
- * also covers 42-bit VA/52-bit PA on 64k pages).
- *
- * - VA_BITS == 48, and the ID map needs more than 48 VA bits. This can
- * only happen when using 64k pages, in which case we need to extend
- * the root level table rather than add a level. Note that we can
- * treat this case as 'always extended' as long as we take care not
- * to program an unsupported T0SZ value into the TCR register.
- *
- * - Combinations that would require two additional levels of
- * translation are not supported, e.g., VA_BITS==36 on 16k pages, or
- * VA_BITS==39/4k pages with 5-level paging, where the input address
- * requires more than 47 or 48 bits, respectively.
- */
-#if (VA_BITS < 48)
-#define IDMAP_PGD_ORDER (VA_BITS - PGDIR_SHIFT)
-#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
-
- /*
- * If VA_BITS < 48, we have to configure an additional table level.
- * First, we have to verify our assumption that the current value of
- * VA_BITS was chosen such that all translation levels are fully
- * utilised, and that lowering T0SZ will always result in an additional
- * translation level to be configured.
- */
-#if VA_BITS != EXTRA_SHIFT
-#error "Mismatch between VA_BITS and page size/number of translation levels"
-#endif
-#else
-#define IDMAP_PGD_ORDER (PHYS_MASK_SHIFT - PGDIR_SHIFT)
-#define EXTRA_SHIFT
- /*
- * If VA_BITS == 48, we don't have to configure an additional
- * translation level, but the top-level table has more entries.
- */
-#endif
- adrp x0, init_idmap_pg_dir
- adrp x3, _text
- adrp x6, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE
- mov_q x7, SWAPPER_RX_MMUFLAGS
-
- map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT
-
- /* Remap the kernel page tables r/w in the ID map */
- adrp x1, _text
- adrp x2, init_pg_dir
- adrp x3, init_pg_end
- bic x4, x2, #SWAPPER_BLOCK_SIZE - 1
- mov_q x5, SWAPPER_RW_MMUFLAGS
- mov x6, #SWAPPER_BLOCK_SHIFT
- bl remap_region
-
- /* Remap the FDT after the kernel image */
- adrp x1, _text
- adrp x22, _end + SWAPPER_BLOCK_SIZE
- bic x2, x22, #SWAPPER_BLOCK_SIZE - 1
- bfi x22, x21, #0, #SWAPPER_BLOCK_SHIFT // remapped FDT address
- add x3, x2, #MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE
- bic x4, x21, #SWAPPER_BLOCK_SIZE - 1
- mov_q x5, SWAPPER_RW_MMUFLAGS
- mov x6, #SWAPPER_BLOCK_SHIFT
- bl remap_region
-
- /*
- * Since the page tables have been populated with non-cacheable
- * accesses (MMU disabled), invalidate those tables again to
- * remove any speculatively loaded cache lines.
- */
- cbnz x19, 0f // skip cache invalidation if MMU is on
- dmb sy
-
- adrp x0, init_idmap_pg_dir
- adrp x1, init_idmap_pg_end
- bl dcache_inval_poc
-0: ret x28
-SYM_FUNC_END(create_idmap)
-
-SYM_FUNC_START_LOCAL(create_kernel_mapping)
- adrp x0, init_pg_dir
- mov_q x5, KIMAGE_VADDR // compile time __va(_text)
-#ifdef CONFIG_RELOCATABLE
- add x5, x5, x23 // add KASLR displacement
-#endif
- adrp x6, _end // runtime __pa(_end)
- adrp x3, _text // runtime __pa(_text)
- sub x6, x6, x3 // _end - _text
- add x6, x6, x5 // runtime __va(_end)
- mov_q x7, SWAPPER_RW_MMUFLAGS
-
- map_memory x0, x1, x5, x6, x7, x3, (VA_BITS - PGDIR_SHIFT), x10, x11, x12, x13, x14
-
- dsb ishst // sync with page table walker
- ret
-SYM_FUNC_END(create_kernel_mapping)
-
/*
* Initialize CPU registers with task-specific and cpu-specific context.
*
@@ -489,34 +234,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
mov x0, x20
bl set_cpu_boot_mode_flag
- // Clear BSS
- adr_l x0, __bss_start
- mov x1, xzr
- adr_l x2, __bss_stop
- sub x2, x2, x0
- bl __pi_memset
- dsb ishst // Make zero page visible to PTW
-
-#if VA_BITS > 48
- adr_l x8, vabits_actual // Set this early so KASAN early init
- str x25, [x8] // ... observes the correct value
- dc civac, x8 // Make visible to booting secondaries
-#endif
-
-#ifdef CONFIG_RANDOMIZE_BASE
- adrp x5, memstart_offset_seed // Save KASLR linear map seed
- strh w24, [x5, :lo12:memstart_offset_seed]
-#endif
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bl kasan_early_init
#endif
- mov x0, x21 // pass FDT address in x0
- bl early_fdt_map // Try mapping the FDT early
- mov x0, x20 // pass the full boot status
- bl init_feature_override // Parse cpu feature overrides
-#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS
- bl scs_patch_vmlinux
-#endif
mov x0, x20
bl finalise_el2 // Prefer VHE if possible
ldp x29, x30, [sp], #16
@@ -584,25 +304,32 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
mov_q x1, INIT_SCTLR_EL1_MMU_OFF
/*
- * Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
- * making it impossible to start in nVHE mode. Is that
- * compliant with the architecture? Absolutely not!
+ * Compliant CPUs advertise their VHE-onlyness with
+ * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
+ * RES1 in that case.
+ *
+ * Fruity CPUs seem to have HCR_EL2.E2H set to RES1, but
+ * don't advertise it (they predate this relaxation).
*/
+ mrs_s x0, SYS_ID_AA64MMFR4_EL1
+ ubfx x0, x0, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
+ tbnz x0, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
+
mrs x0, hcr_el2
and x0, x0, #HCR_E2H
- cbz x0, 1f
-
+ cbz x0, 2f
+1:
/* Set a sane SCTLR_EL1, the VHE way */
pre_disable_mmu_workaround
msr_s SYS_SCTLR_EL12, x1
mov x2, #BOOT_CPU_FLAG_E2H
- b 2f
+ b 3f
-1:
+2:
pre_disable_mmu_workaround
msr sctlr_el1, x1
mov x2, xzr
-2:
+3:
__init_el2_nvhe_prepare_eret
mov w0, #BOOT_CPU_MODE_EL2
@@ -643,10 +370,13 @@ SYM_FUNC_START_LOCAL(secondary_startup)
* Common entry point for secondary CPUs.
*/
mov x20, x0 // preserve boot mode
+
+#ifdef CONFIG_ARM64_VA_BITS_52
+alternative_if ARM64_HAS_VA52
bl __cpu_secondary_check52bitva
-#if VA_BITS > 48
- ldr_l x0, vabits_actual
+alternative_else_nop_endif
#endif
+
bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir
adrp x2, idmap_pg_dir
@@ -749,15 +479,18 @@ SYM_FUNC_START(__enable_mmu)
ret
SYM_FUNC_END(__enable_mmu)
+#ifdef CONFIG_ARM64_VA_BITS_52
SYM_FUNC_START(__cpu_secondary_check52bitva)
-#if VA_BITS > 48
- ldr_l x0, vabits_actual
- cmp x0, #52
- b.ne 2f
-
+#ifndef CONFIG_ARM64_LPA2
mrs_s x0, SYS_ID_AA64MMFR2_EL1
and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK
cbnz x0, 2f
+#else
+ mrs x0, id_aa64mmfr0_el1
+ sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
+ cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LPA2
+ b.ge 2f
+#endif
update_early_cpu_boot_status \
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
@@ -765,9 +498,9 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
wfi
b 1b
-#endif
2: ret
SYM_FUNC_END(__cpu_secondary_check52bitva)
+#endif
SYM_FUNC_START_LOCAL(__no_granule_support)
/* Indicate that this CPU can't boot and is stuck in the kernel */
@@ -779,123 +512,18 @@ SYM_FUNC_START_LOCAL(__no_granule_support)
b 1b
SYM_FUNC_END(__no_granule_support)
-#ifdef CONFIG_RELOCATABLE
-SYM_FUNC_START_LOCAL(__relocate_kernel)
- /*
- * Iterate over each entry in the relocation table, and apply the
- * relocations in place.
- */
- adr_l x9, __rela_start
- adr_l x10, __rela_end
- mov_q x11, KIMAGE_VADDR // default virtual offset
- add x11, x11, x23 // actual virtual offset
-
-0: cmp x9, x10
- b.hs 1f
- ldp x12, x13, [x9], #24
- ldr x14, [x9, #-8]
- cmp w13, #R_AARCH64_RELATIVE
- b.ne 0b
- add x14, x14, x23 // relocate
- str x14, [x12, x23]
- b 0b
-
-1:
-#ifdef CONFIG_RELR
- /*
- * Apply RELR relocations.
- *
- * RELR is a compressed format for storing relative relocations. The
- * encoded sequence of entries looks like:
- * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
- *
- * i.e. start with an address, followed by any number of bitmaps. The
- * address entry encodes 1 relocation. The subsequent bitmap entries
- * encode up to 63 relocations each, at subsequent offsets following
- * the last address entry.
- *
- * The bitmap entries must have 1 in the least significant bit. The
- * assumption here is that an address cannot have 1 in lsb. Odd
- * addresses are not supported. Any odd addresses are stored in the RELA
- * section, which is handled above.
- *
- * Excluding the least significant bit in the bitmap, each non-zero
- * bit in the bitmap represents a relocation to be applied to
- * a corresponding machine word that follows the base address
- * word. The second least significant bit represents the machine
- * word immediately following the initial address, and each bit
- * that follows represents the next word, in linear order. As such,
- * a single bitmap can encode up to 63 relocations in a 64-bit object.
- *
- * In this implementation we store the address of the next RELR table
- * entry in x9, the address being relocated by the current address or
- * bitmap entry in x13 and the address being relocated by the current
- * bit in x14.
- */
- adr_l x9, __relr_start
- adr_l x10, __relr_end
-
-2: cmp x9, x10
- b.hs 7f
- ldr x11, [x9], #8
- tbnz x11, #0, 3f // branch to handle bitmaps
- add x13, x11, x23
- ldr x12, [x13] // relocate address entry
- add x12, x12, x23
- str x12, [x13], #8 // adjust to start of bitmap
- b 2b
-
-3: mov x14, x13
-4: lsr x11, x11, #1
- cbz x11, 6f
- tbz x11, #0, 5f // skip bit if not set
- ldr x12, [x14] // relocate bit
- add x12, x12, x23
- str x12, [x14]
-
-5: add x14, x14, #8 // move to next bit's address
- b 4b
-
-6: /*
- * Move to the next bitmap's address. 8 is the word size, and 63 is the
- * number of significant bits in a bitmap entry.
- */
- add x13, x13, #(8 * 63)
- b 2b
-
-7:
-#endif
- ret
-
-SYM_FUNC_END(__relocate_kernel)
-#endif
-
SYM_FUNC_START_LOCAL(__primary_switch)
adrp x1, reserved_pg_dir
adrp x2, init_idmap_pg_dir
bl __enable_mmu
-#ifdef CONFIG_RELOCATABLE
- adrp x23, KERNEL_START
- and x23, x23, MIN_KIMG_ALIGN - 1
-#ifdef CONFIG_RANDOMIZE_BASE
- mov x0, x22
- adrp x1, init_pg_end
+
+ adrp x1, early_init_stack
mov sp, x1
mov x29, xzr
- bl __pi_kaslr_early_init
- and x24, x0, #SZ_2M - 1 // capture memstart offset seed
- bic x0, x0, #SZ_2M - 1
- orr x23, x23, x0 // record kernel offset
-#endif
-#endif
- bl clear_page_tables
- bl create_kernel_mapping
+ mov x0, x20 // pass the full boot status
+ mov x1, x21 // pass the FDT
+ bl __pi_early_map_kernel // Map and relocate the kernel
- adrp x1, init_pg_dir
- load_ttbr1 x1, x1, x2
-#ifdef CONFIG_RELOCATABLE
- bl __relocate_kernel
-#endif
ldr x8, =__primary_switched
adrp x0, KERNEL_START // __pa(KERNEL_START)
br x8
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 35225632d70a..2f5755192c2b 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -21,6 +21,7 @@
#include <asm/current.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
#include <asm/hw_breakpoint.h>
#include <asm/traps.h>
#include <asm/cputype.h>
@@ -779,7 +780,7 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
* Check that the access type matches.
* 0 => load, otherwise => store
*/
- access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
+ access = (esr & ESR_ELx_WNR) ? HW_BREAKPOINT_W :
HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
continue;
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 5e4dc72ab1bd..ba4f8f7d6a91 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -36,6 +36,41 @@ PROVIDE(__pi___memcpy = __pi_memcpy);
PROVIDE(__pi___memmove = __pi_memmove);
PROVIDE(__pi___memset = __pi_memset);
+PROVIDE(__pi_id_aa64isar1_override = id_aa64isar1_override);
+PROVIDE(__pi_id_aa64isar2_override = id_aa64isar2_override);
+PROVIDE(__pi_id_aa64mmfr0_override = id_aa64mmfr0_override);
+PROVIDE(__pi_id_aa64mmfr1_override = id_aa64mmfr1_override);
+PROVIDE(__pi_id_aa64mmfr2_override = id_aa64mmfr2_override);
+PROVIDE(__pi_id_aa64pfr0_override = id_aa64pfr0_override);
+PROVIDE(__pi_id_aa64pfr1_override = id_aa64pfr1_override);
+PROVIDE(__pi_id_aa64smfr0_override = id_aa64smfr0_override);
+PROVIDE(__pi_id_aa64zfr0_override = id_aa64zfr0_override);
+PROVIDE(__pi_arm64_sw_feature_override = arm64_sw_feature_override);
+PROVIDE(__pi_arm64_use_ng_mappings = arm64_use_ng_mappings);
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
+#endif
+PROVIDE(__pi__ctype = _ctype);
+PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
+
+PROVIDE(__pi_init_idmap_pg_dir = init_idmap_pg_dir);
+PROVIDE(__pi_init_idmap_pg_end = init_idmap_pg_end);
+PROVIDE(__pi_init_pg_dir = init_pg_dir);
+PROVIDE(__pi_init_pg_end = init_pg_end);
+PROVIDE(__pi_swapper_pg_dir = swapper_pg_dir);
+
+PROVIDE(__pi__text = _text);
+PROVIDE(__pi__stext = _stext);
+PROVIDE(__pi__etext = _etext);
+PROVIDE(__pi___start_rodata = __start_rodata);
+PROVIDE(__pi___inittext_begin = __inittext_begin);
+PROVIDE(__pi___inittext_end = __inittext_end);
+PROVIDE(__pi___initdata_begin = __initdata_begin);
+PROVIDE(__pi___initdata_end = __initdata_end);
+PROVIDE(__pi__data = _data);
+PROVIDE(__pi___bss_start = __bss_start);
+PROVIDE(__pi__end = _end);
+
#ifdef CONFIG_KVM
/*
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 12c7f3c8ba76..1da3e25f9d9e 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -16,9 +16,7 @@ bool __ro_after_init __kaslr_is_enabled = false;
void __init kaslr_init(void)
{
- if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val &
- arm64_sw_feature_override.mask,
- ARM64_SW_FEATURE_OVERRIDE_NOKASLR)) {
+ if (kaslr_disabled_cmdline()) {
pr_info("KASLR disabled on command line\n");
return;
}
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index b38aae5b488d..82e2203d86a3 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -255,7 +255,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
pr_info("Starting crashdump kernel...\n");
}
-#ifdef CONFIG_HIBERNATION
+#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_HIBERNATION)
/*
* To preserve the crash dump kernel image, the relevant memory segments
* should be mapped again around the hibernation.
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 0e017358f4ba..af1ca875c52c 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -39,6 +39,7 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
return kexec_image_post_load_cleanup_default(image);
}
+#ifdef CONFIG_CRASH_DUMP
static int prepare_elf_headers(void **addr, unsigned long *sz)
{
struct crash_mem *cmem;
@@ -80,6 +81,7 @@ out:
kfree(cmem);
return ret;
}
+#endif
/*
* Tries to add the initrd and DTB to the image. If it is not possible to find
@@ -93,8 +95,8 @@ int load_other_segments(struct kimage *image,
char *cmdline)
{
struct kexec_buf kbuf;
- void *headers, *dtb = NULL;
- unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
+ void *dtb = NULL;
+ unsigned long initrd_load_addr = 0, dtb_len,
orig_segments = image->nr_segments;
int ret = 0;
@@ -102,7 +104,10 @@ int load_other_segments(struct kimage *image,
/* not allocate anything below the kernel */
kbuf.buf_min = kernel_load_addr + kernel_size;
+#ifdef CONFIG_CRASH_DUMP
/* load elf core header */
+ void *headers;
+ unsigned long headers_sz;
if (image->type == KEXEC_TYPE_CRASH) {
ret = prepare_elf_headers(&headers, &headers_sz);
if (ret) {
@@ -130,6 +135,7 @@ int load_other_segments(struct kimage *image,
kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
}
+#endif
/* load initrd */
if (initrd) {
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index dd851297596e..47e0be610bb6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -595,7 +595,7 @@ int module_finalize(const Elf_Ehdr *hdr,
if (scs_is_dynamic()) {
s = find_section(hdr, sechdrs, ".init.eh_frame");
if (s)
- scs_patch((void *)s->sh_addr, s->sh_size);
+ __pi_scs_patch((void *)s->sh_addr, s->sh_size);
}
return module_init_ftrace_plt(hdr, sechdrs, me);
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index a41ef3213e1e..dcdcccd40891 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -67,7 +67,7 @@ int memcmp_pages(struct page *page1, struct page *page2)
/*
* If the page content is identical but at least one of the pages is
* tagged, return non-zero to avoid KSM merging. If only one of the
- * pages is tagged, set_pte_at() may zero or change the tags of the
+ * pages is tagged, __set_ptes() may zero or change the tags of the
* other page via mte_sync_tags().
*/
if (page_mte_tagged(page1) || page_mte_tagged(page2))
diff --git a/arch/arm64/kernel/pi/.gitignore b/arch/arm64/kernel/pi/.gitignore
new file mode 100644
index 000000000000..efb29b663e85
--- /dev/null
+++ b/arch/arm64/kernel/pi/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+relacheck
diff --git a/arch/arm64/kernel/pi/Makefile b/arch/arm64/kernel/pi/Makefile
index c844a0546d7f..4393b41f0b71 100644
--- a/arch/arm64/kernel/pi/Makefile
+++ b/arch/arm64/kernel/pi/Makefile
@@ -11,6 +11,9 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
-fno-asynchronous-unwind-tables -fno-unwind-tables \
$(call cc-option,-fno-addrsig)
+# this code may run with the MMU off so disable unaligned accesses
+CFLAGS_map_range.o += -mstrict-align
+
# remove SCS flags from all objects in this directory
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
# disable LTO
@@ -22,14 +25,26 @@ KCSAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
+hostprogs := relacheck
+
+quiet_cmd_piobjcopy = $(quiet_cmd_objcopy)
+ cmd_piobjcopy = $(cmd_objcopy) && $(obj)/relacheck $(@) $(<)
+
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
- --remove-section=.note.gnu.property \
- --prefix-alloc-sections=.init
-$(obj)/%.pi.o: $(obj)/%.o FORCE
- $(call if_changed,objcopy)
+ --remove-section=.note.gnu.property
+$(obj)/%.pi.o: $(obj)/%.o $(obj)/relacheck FORCE
+ $(call if_changed,piobjcopy)
+
+# ensure that all the lib- code ends up as __init code and data
+$(obj)/lib-%.pi.o: OBJCOPYFLAGS += --prefix-alloc-sections=.init
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-obj-y := kaslr_early.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
-extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
+obj-y := idreg-override.pi.o \
+ map_kernel.pi.o map_range.pi.o \
+ lib-fdt.pi.o lib-fdt_ro.pi.o
+obj-$(CONFIG_RELOCATABLE) += relocate.pi.o
+obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_early.pi.o
+obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.pi.o
+extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
index e30fd9e32ef3..aad399796e81 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/pi/idreg-override.c
@@ -14,6 +14,8 @@
#include <asm/cpufeature.h>
#include <asm/setup.h>
+#include "pi.h"
+
#define FTR_DESC_NAME_LEN 20
#define FTR_DESC_FIELD_LEN 10
#define FTR_ALIAS_NAME_LEN 30
@@ -21,15 +23,6 @@
static u64 __boot_status __initdata;
-// temporary __prel64 related definitions
-// to be removed when this code is moved under pi/
-
-#define __prel64_initconst __initconst
-
-#define PREL64(type, name) union { type *name; }
-
-#define prel64_pointer(__d) (__d)
-
typedef bool filter_t(u64 val);
struct ftr_set_desc {
@@ -66,6 +59,35 @@ static const struct ftr_set_desc mmfr1 __prel64_initconst = {
},
};
+
+static bool __init mmfr2_varange_filter(u64 val)
+{
+ int __maybe_unused feat;
+
+ if (val)
+ return false;
+
+#ifdef CONFIG_ARM64_LPA2
+ feat = cpuid_feature_extract_signed_field(read_sysreg(id_aa64mmfr0_el1),
+ ID_AA64MMFR0_EL1_TGRAN_SHIFT);
+ if (feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2) {
+ id_aa64mmfr0_override.val |=
+ (ID_AA64MMFR0_EL1_TGRAN_LPA2 - 1) << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
+ id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
+ }
+#endif
+ return true;
+}
+
+static const struct ftr_set_desc mmfr2 __prel64_initconst = {
+ .name = "id_aa64mmfr2",
+ .override = &id_aa64mmfr2_override,
+ .fields = {
+ FIELD("varange", ID_AA64MMFR2_EL1_VARange_SHIFT, mmfr2_varange_filter),
+ {}
+ },
+};
+
static bool __init pfr0_sve_filter(u64 val)
{
/*
@@ -166,6 +188,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
.fields = {
FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
+ FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
{}
},
};
@@ -173,6 +196,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
static const
PREL64(const struct ftr_set_desc, reg) regs[] __prel64_initconst = {
{ &mmfr1 },
+ { &mmfr2 },
{ &pfr0 },
{ &pfr1 },
{ &isar1 },
@@ -197,6 +221,8 @@ static const struct {
{ "arm64.nomops", "id_aa64isar2.mops=0" },
{ "arm64.nomte", "id_aa64pfr1.mte=0" },
{ "nokaslr", "arm64_sw.nokaslr=1" },
+ { "rodata=off", "arm64_sw.rodataoff=1" },
+ { "arm64.nolva", "id_aa64mmfr2.varange=0" },
};
static int __init parse_hexdigit(const char *p, u64 *v)
@@ -313,42 +339,35 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
} while (1);
}
-static __init const u8 *get_bootargs_cmdline(void)
+static __init const u8 *get_bootargs_cmdline(const void *fdt, int node)
{
+ static char const bootargs[] __initconst = "bootargs";
const u8 *prop;
- void *fdt;
- int node;
- fdt = get_early_fdt_ptr();
- if (!fdt)
- return NULL;
-
- node = fdt_path_offset(fdt, "/chosen");
if (node < 0)
return NULL;
- prop = fdt_getprop(fdt, node, "bootargs", NULL);
+ prop = fdt_getprop(fdt, node, bootargs, NULL);
if (!prop)
return NULL;
return strlen(prop) ? prop : NULL;
}
-static __init void parse_cmdline(void)
+static __init void parse_cmdline(const void *fdt, int chosen)
{
- const u8 *prop = get_bootargs_cmdline();
+ static char const cmdline[] __initconst = CONFIG_CMDLINE;
+ const u8 *prop = get_bootargs_cmdline(fdt, chosen);
if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
- __parse_cmdline(CONFIG_CMDLINE, true);
+ __parse_cmdline(cmdline, true);
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
__parse_cmdline(prop, true);
}
-/* Keep checkers quiet */
-void init_feature_override(u64 boot_status);
-
-asmlinkage void __init init_feature_override(u64 boot_status)
+void __init init_feature_override(u64 boot_status, const void *fdt,
+ int chosen)
{
struct arm64_ftr_override *override;
const struct ftr_set_desc *reg;
@@ -364,7 +383,7 @@ asmlinkage void __init init_feature_override(u64 boot_status)
__boot_status = boot_status;
- parse_cmdline();
+ parse_cmdline(fdt, chosen);
for (i = 0; i < ARRAY_SIZE(regs); i++) {
reg = prel64_pointer(regs[i].reg);
@@ -373,3 +392,10 @@ asmlinkage void __init init_feature_override(u64 boot_status)
(unsigned long)(override + 1));
}
}
+
+char * __init skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c
index 17bff6e399e4..0257b43819db 100644
--- a/arch/arm64/kernel/pi/kaslr_early.c
+++ b/arch/arm64/kernel/pi/kaslr_early.c
@@ -14,69 +14,23 @@
#include <asm/archrandom.h>
#include <asm/memory.h>
+#include <asm/pgtable.h>
-/* taken from lib/string.c */
-static char *__strstr(const char *s1, const char *s2)
-{
- size_t l1, l2;
-
- l2 = strlen(s2);
- if (!l2)
- return (char *)s1;
- l1 = strlen(s1);
- while (l1 >= l2) {
- l1--;
- if (!memcmp(s1, s2, l2))
- return (char *)s1;
- s1++;
- }
- return NULL;
-}
-static bool cmdline_contains_nokaslr(const u8 *cmdline)
-{
- const u8 *str;
-
- str = __strstr(cmdline, "nokaslr");
- return str == cmdline || (str > cmdline && *(str - 1) == ' ');
-}
-
-static bool is_kaslr_disabled_cmdline(void *fdt)
-{
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
- int node;
- const u8 *prop;
-
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- goto out;
-
- prop = fdt_getprop(fdt, node, "bootargs", NULL);
- if (!prop)
- goto out;
-
- if (cmdline_contains_nokaslr(prop))
- return true;
+#include "pi.h"
- if (IS_ENABLED(CONFIG_CMDLINE_EXTEND))
- goto out;
+extern u16 memstart_offset_seed;
- return false;
- }
-out:
- return cmdline_contains_nokaslr(CONFIG_CMDLINE);
-}
-
-static u64 get_kaslr_seed(void *fdt)
+static u64 __init get_kaslr_seed(void *fdt, int node)
{
- int node, len;
+ static char const seed_str[] __initconst = "kaslr-seed";
fdt64_t *prop;
u64 ret;
+ int len;
- node = fdt_path_offset(fdt, "/chosen");
if (node < 0)
return 0;
- prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
+ prop = fdt_getprop_w(fdt, node, seed_str, &len);
if (!prop || len != sizeof(u64))
return 0;
@@ -85,26 +39,28 @@ static u64 get_kaslr_seed(void *fdt)
return ret;
}
-asmlinkage u64 kaslr_early_init(void *fdt)
+u64 __init kaslr_early_init(void *fdt, int chosen)
{
- u64 seed;
+ u64 seed, range;
- if (is_kaslr_disabled_cmdline(fdt))
+ if (kaslr_disabled_cmdline())
return 0;
- seed = get_kaslr_seed(fdt);
+ seed = get_kaslr_seed(fdt, chosen);
if (!seed) {
if (!__early_cpu_has_rndr() ||
!__arm64_rndr((unsigned long *)&seed))
return 0;
}
+ memstart_offset_seed = seed & U16_MAX;
+
/*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the
- * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
- * the lower and upper quarters to avoid colliding with other
- * allocations.
+ * 'middle' half of the VMALLOC area, and stay clear of the lower and
+ * upper quarters to avoid colliding with other allocations.
*/
- return BIT(VA_BITS_MIN - 3) + (seed & GENMASK(VA_BITS_MIN - 3, 0));
+ range = (VMALLOC_END - KIMAGE_VADDR) / 2;
+ return range / 2 + (((__uint128_t)range * seed) >> 64);
}
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
new file mode 100644
index 000000000000..5fa08e13e17e
--- /dev/null
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2023 Google LLC
+// Author: Ard Biesheuvel <ardb@google.com>
+
+#include <linux/init.h>
+#include <linux/libfdt.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+#include <linux/string.h>
+
+#include <asm/memory.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include "pi.h"
+
+extern const u8 __eh_frame_start[], __eh_frame_end[];
+
+extern void idmap_cpu_replace_ttbr1(void *pgdir);
+
+static void __init map_segment(pgd_t *pg_dir, u64 *pgd, u64 va_offset,
+ void *start, void *end, pgprot_t prot,
+ bool may_use_cont, int root_level)
+{
+ map_range(pgd, ((u64)start + va_offset) & ~PAGE_OFFSET,
+ ((u64)end + va_offset) & ~PAGE_OFFSET, (u64)start,
+ prot, root_level, (pte_t *)pg_dir, may_use_cont, 0);
+}
+
+static void __init unmap_segment(pgd_t *pg_dir, u64 va_offset, void *start,
+ void *end, int root_level)
+{
+ map_segment(pg_dir, NULL, va_offset, start, end, __pgprot(0),
+ false, root_level);
+}
+
+static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
+{
+ bool enable_scs = IS_ENABLED(CONFIG_UNWIND_PATCH_PAC_INTO_SCS);
+ bool twopass = IS_ENABLED(CONFIG_RELOCATABLE);
+ u64 pgdp = (u64)init_pg_dir + PAGE_SIZE;
+ pgprot_t text_prot = PAGE_KERNEL_ROX;
+ pgprot_t data_prot = PAGE_KERNEL;
+ pgprot_t prot;
+
+ /*
+ * External debuggers may need to write directly to the text mapping to
+ * install SW breakpoints. Allow this (only) when explicitly requested
+ * with rodata=off.
+ */
+ if (arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF))
+ text_prot = PAGE_KERNEL_EXEC;
+
+ /*
+ * We only enable the shadow call stack dynamically if we are running
+ * on a system that does not implement PAC or BTI. PAC and SCS provide
+ * roughly the same level of protection, and BTI relies on the PACIASP
+ * instructions serving as landing pads, preventing us from patching
+ * those instructions into something else.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && cpu_has_pac())
+ enable_scs = false;
+
+ if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && cpu_has_bti()) {
+ enable_scs = false;
+
+ /*
+ * If we have a CPU that supports BTI and a kernel built for
+ * BTI then mark the kernel executable text as guarded pages
+ * now so we don't have to rewrite the page tables later.
+ */
+ text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
+ }
+
+ /* Map all code read-write on the first pass if needed */
+ twopass |= enable_scs;
+ prot = twopass ? data_prot : text_prot;
+
+ map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot,
+ !twopass, root_level);
+ map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata,
+ __inittext_begin, data_prot, false, root_level);
+ map_segment(init_pg_dir, &pgdp, va_offset, __inittext_begin,
+ __inittext_end, prot, false, root_level);
+ map_segment(init_pg_dir, &pgdp, va_offset, __initdata_begin,
+ __initdata_end, data_prot, false, root_level);
+ map_segment(init_pg_dir, &pgdp, va_offset, _data, _end, data_prot,
+ true, root_level);
+ dsb(ishst);
+
+ idmap_cpu_replace_ttbr1(init_pg_dir);
+
+ if (twopass) {
+ if (IS_ENABLED(CONFIG_RELOCATABLE))
+ relocate_kernel(kaslr_offset);
+
+ if (enable_scs) {
+ scs_patch(__eh_frame_start + va_offset,
+ __eh_frame_end - __eh_frame_start);
+ asm("ic ialluis");
+
+ dynamic_scs_is_enabled = true;
+ }
+
+ /*
+ * Unmap the text region before remapping it, to avoid
+ * potential TLB conflicts when creating the contiguous
+ * descriptors.
+ */
+ unmap_segment(init_pg_dir, va_offset, _stext, _etext,
+ root_level);
+ dsb(ishst);
+ isb();
+ __tlbi(vmalle1);
+ isb();
+
+ /*
+ * Remap these segments with different permissions
+ * No new page table allocations should be needed
+ */
+ map_segment(init_pg_dir, NULL, va_offset, _stext, _etext,
+ text_prot, true, root_level);
+ map_segment(init_pg_dir, NULL, va_offset, __inittext_begin,
+ __inittext_end, text_prot, false, root_level);
+ }
+
+ /* Copy the root page table to its final location */
+ memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE);
+ dsb(ishst);
+ idmap_cpu_replace_ttbr1(swapper_pg_dir);
+}
+
+static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
+{
+ u64 sctlr = read_sysreg(sctlr_el1);
+ u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
+
+ asm(" msr sctlr_el1, %0 ;"
+ " isb ;"
+ " msr ttbr0_el1, %1 ;"
+ " msr tcr_el1, %2 ;"
+ " isb ;"
+ " tlbi vmalle1 ;"
+ " dsb nsh ;"
+ " isb ;"
+ " msr sctlr_el1, %3 ;"
+ " isb ;"
+ :: "r"(sctlr & ~SCTLR_ELx_M), "r"(ttbr), "r"(tcr), "r"(sctlr));
+}
+
+static void __init remap_idmap_for_lpa2(void)
+{
+ /* clear the bits that change meaning once LPA2 is turned on */
+ pteval_t mask = PTE_SHARED;
+
+ /*
+ * We have to clear bits [9:8] in all block or page descriptors in the
+ * initial ID map, as otherwise they will be (mis)interpreted as
+ * physical address bits once we flick the LPA2 switch (TCR.DS). Since
+ * we cannot manipulate live descriptors in that way without creating
+ * potential TLB conflicts, let's create another temporary ID map in a
+ * LPA2 compatible fashion, and update the initial ID map while running
+ * from that.
+ */
+ create_init_idmap(init_pg_dir, mask);
+ dsb(ishst);
+ set_ttbr0_for_lpa2((u64)init_pg_dir);
+
+ /*
+ * Recreate the initial ID map with the same granularity as before.
+ * Don't bother with the FDT, we no longer need it after this.
+ */
+ memset(init_idmap_pg_dir, 0,
+ (u64)init_idmap_pg_dir - (u64)init_idmap_pg_end);
+
+ create_init_idmap(init_idmap_pg_dir, mask);
+ dsb(ishst);
+
+ /* switch back to the updated initial ID map */
+ set_ttbr0_for_lpa2((u64)init_idmap_pg_dir);
+
+ /* wipe the temporary ID map from memory */
+ memset(init_pg_dir, 0, (u64)init_pg_end - (u64)init_pg_dir);
+}
+
+static void __init map_fdt(u64 fdt)
+{
+ static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE);
+ u64 efdt = fdt + MAX_FDT_SIZE;
+ u64 ptep = (u64)ptes;
+
+ /*
+ * Map up to MAX_FDT_SIZE bytes, but avoid overlap with
+ * the kernel image.
+ */
+ map_range(&ptep, fdt, (u64)_text > fdt ? min((u64)_text, efdt) : efdt,
+ fdt, PAGE_KERNEL, IDMAP_ROOT_LEVEL,
+ (pte_t *)init_idmap_pg_dir, false, 0);
+ dsb(ishst);
+}
+
+asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
+{
+ static char const chosen_str[] __initconst = "/chosen";
+ u64 va_base, pa_base = (u64)&_text;
+ u64 kaslr_offset = pa_base % MIN_KIMG_ALIGN;
+ int root_level = 4 - CONFIG_PGTABLE_LEVELS;
+ int va_bits = VA_BITS;
+ int chosen;
+
+ map_fdt((u64)fdt);
+
+ /* Clear BSS and the initial page tables */
+ memset(__bss_start, 0, (u64)init_pg_end - (u64)__bss_start);
+
+ /* Parse the command line for CPU feature overrides */
+ chosen = fdt_path_offset(fdt, chosen_str);
+ init_feature_override(boot_status, fdt, chosen);
+
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && !cpu_has_lva()) {
+ va_bits = VA_BITS_MIN;
+ } else if (IS_ENABLED(CONFIG_ARM64_LPA2) && !cpu_has_lpa2()) {
+ va_bits = VA_BITS_MIN;
+ root_level++;
+ }
+
+ if (va_bits > VA_BITS_MIN)
+ sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits));
+
+ /*
+ * The virtual KASLR displacement modulo 2MiB is decided by the
+ * physical placement of the image, as otherwise, we might not be able
+ * to create the early kernel mapping using 2 MiB block descriptors. So
+ * take the low bits of the KASLR offset from the physical address, and
+ * fill in the high bits from the seed.
+ */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ u64 kaslr_seed = kaslr_early_init(fdt, chosen);
+
+ if (kaslr_seed && kaslr_requires_kpti())
+ arm64_use_ng_mappings = true;
+
+ kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1);
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_LPA2) && va_bits > VA_BITS_MIN)
+ remap_idmap_for_lpa2();
+
+ va_base = KIMAGE_VADDR + kaslr_offset;
+ map_kernel(kaslr_offset, va_base - pa_base, root_level);
+}
diff --git a/arch/arm64/kernel/pi/map_range.c b/arch/arm64/kernel/pi/map_range.c
new file mode 100644
index 000000000000..5410b2cac590
--- /dev/null
+++ b/arch/arm64/kernel/pi/map_range.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2023 Google LLC
+// Author: Ard Biesheuvel <ardb@google.com>
+
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#include <asm/memory.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+#include "pi.h"
+
+/**
+ * map_range - Map a contiguous range of physical pages into virtual memory
+ *
+ * @pte: Address of physical pointer to array of pages to
+ * allocate page tables from
+ * @start: Virtual address of the start of the range
+ * @end: Virtual address of the end of the range (exclusive)
+ * @pa: Physical address of the start of the range
+ * @prot: Access permissions of the range
+ * @level: Translation level for the mapping
+ * @tbl: The level @level page table to create the mappings in
+ * @may_use_cont: Whether the use of the contiguous attribute is allowed
+ * @va_offset: Offset between a physical page and its current mapping
+ * in the VA space
+ */
+void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
+ int level, pte_t *tbl, bool may_use_cont, u64 va_offset)
+{
+ u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
+ u64 protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
+ int lshift = (3 - level) * (PAGE_SHIFT - 3);
+ u64 lmask = (PAGE_SIZE << lshift) - 1;
+
+ start &= PAGE_MASK;
+ pa &= PAGE_MASK;
+
+ /* Advance tbl to the entry that covers start */
+ tbl += (start >> (lshift + PAGE_SHIFT)) % PTRS_PER_PTE;
+
+ /*
+ * Set the right block/page bits for this level unless we are
+ * clearing the mapping
+ */
+ if (protval)
+ protval |= (level < 3) ? PMD_TYPE_SECT : PTE_TYPE_PAGE;
+
+ while (start < end) {
+ u64 next = min((start | lmask) + 1, PAGE_ALIGN(end));
+
+ if (level < 3 && (start | next | pa) & lmask) {
+ /*
+ * This chunk needs a finer grained mapping. Create a
+ * table mapping if necessary and recurse.
+ */
+ if (pte_none(*tbl)) {
+ *tbl = __pte(__phys_to_pte_val(*pte) |
+ PMD_TYPE_TABLE | PMD_TABLE_UXN);
+ *pte += PTRS_PER_PTE * sizeof(pte_t);
+ }
+ map_range(pte, start, next, pa, prot, level + 1,
+ (pte_t *)(__pte_to_phys(*tbl) + va_offset),
+ may_use_cont, va_offset);
+ } else {
+ /*
+ * Start a contiguous range if start and pa are
+ * suitably aligned
+ */
+ if (((start | pa) & cmask) == 0 && may_use_cont)
+ protval |= PTE_CONT;
+
+ /*
+ * Clear the contiguous attribute if the remaining
+ * range does not cover a contiguous block
+ */
+ if ((end & ~cmask) <= start)
+ protval &= ~PTE_CONT;
+
+ /* Put down a block or page mapping */
+ *tbl = __pte(__phys_to_pte_val(pa) | protval);
+ }
+ pa += next - start;
+ start = next;
+ tbl++;
+ }
+}
+
+asmlinkage u64 __init create_init_idmap(pgd_t *pg_dir, pteval_t clrmask)
+{
+ u64 ptep = (u64)pg_dir + PAGE_SIZE;
+ pgprot_t text_prot = PAGE_KERNEL_ROX;
+ pgprot_t data_prot = PAGE_KERNEL;
+
+ pgprot_val(text_prot) &= ~clrmask;
+ pgprot_val(data_prot) &= ~clrmask;
+
+ map_range(&ptep, (u64)_stext, (u64)__initdata_begin, (u64)_stext,
+ text_prot, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0);
+ map_range(&ptep, (u64)__initdata_begin, (u64)_end, (u64)__initdata_begin,
+ data_prot, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0);
+
+ return ptep;
+}
diff --git a/arch/arm64/kernel/patch-scs.c b/arch/arm64/kernel/pi/patch-scs.c
index a1fe4b4ff591..49d8b40e61bc 100644
--- a/arch/arm64/kernel/patch-scs.c
+++ b/arch/arm64/kernel/pi/patch-scs.c
@@ -4,16 +4,17 @@
* Author: Ard Biesheuvel <ardb@google.com>
*/
-#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/linkage.h>
-#include <linux/printk.h>
#include <linux/types.h>
-#include <asm/cacheflush.h>
#include <asm/scs.h>
+#include "pi.h"
+
+bool dynamic_scs_is_enabled;
+
//
// This minimal DWARF CFI parser is partially based on the code in
// arch/arc/kernel/unwind.c, and on the document below:
@@ -49,8 +50,6 @@
#define DW_CFA_GNU_negative_offset_extended 0x2f
#define DW_CFA_hi_user 0x3f
-extern const u8 __eh_frame_start[], __eh_frame_end[];
-
enum {
PACIASP = 0xd503233f,
AUTIASP = 0xd50323bf,
@@ -81,7 +80,11 @@ static void __always_inline scs_patch_loc(u64 loc)
*/
return;
}
- dcache_clean_pou(loc, loc + sizeof(u32));
+ if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_CLEAN_CACHE))
+ asm("dc civac, %0" :: "r"(loc));
+ else
+ asm(ALTERNATIVE("dc cvau, %0", "nop", ARM64_HAS_CACHE_IDC)
+ :: "r"(loc));
}
/*
@@ -128,10 +131,10 @@ struct eh_frame {
};
};
-static int noinstr scs_handle_fde_frame(const struct eh_frame *frame,
- bool fde_has_augmentation_data,
- int code_alignment_factor,
- bool dry_run)
+static int scs_handle_fde_frame(const struct eh_frame *frame,
+ bool fde_has_augmentation_data,
+ int code_alignment_factor,
+ bool dry_run)
{
int size = frame->size - offsetof(struct eh_frame, opcodes) + 4;
u64 loc = (u64)offset_to_ptr(&frame->initial_loc);
@@ -198,14 +201,13 @@ static int noinstr scs_handle_fde_frame(const struct eh_frame *frame,
break;
default:
- pr_err("unhandled opcode: %02x in FDE frame %lx\n", opcode[-1], (uintptr_t)frame);
return -ENOEXEC;
}
}
return 0;
}
-int noinstr scs_patch(const u8 eh_frame[], int size)
+int scs_patch(const u8 eh_frame[], int size)
{
const u8 *p = eh_frame;
@@ -250,13 +252,3 @@ int noinstr scs_patch(const u8 eh_frame[], int size)
}
return 0;
}
-
-asmlinkage void __init scs_patch_vmlinux(void)
-{
- if (!should_patch_pac_into_scs())
- return;
-
- WARN_ON(scs_patch(__eh_frame_start, __eh_frame_end - __eh_frame_start));
- icache_inval_all_pou();
- isb();
-}
diff --git a/arch/arm64/kernel/pi/pi.h b/arch/arm64/kernel/pi/pi.h
new file mode 100644
index 000000000000..c91e5e965cd3
--- /dev/null
+++ b/arch/arm64/kernel/pi/pi.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2023 Google LLC
+// Author: Ard Biesheuvel <ardb@google.com>
+
+#include <linux/types.h>
+
+#define __prel64_initconst __section(".init.rodata.prel64")
+
+#define PREL64(type, name) union { type *name; prel64_t name ## _prel; }
+
+#define prel64_pointer(__d) (typeof(__d))prel64_to_pointer(&__d##_prel)
+
+typedef volatile signed long prel64_t;
+
+static inline void *prel64_to_pointer(const prel64_t *offset)
+{
+ if (!*offset)
+ return NULL;
+ return (void *)offset + *offset;
+}
+
+extern bool dynamic_scs_is_enabled;
+
+extern pgd_t init_idmap_pg_dir[], init_idmap_pg_end[];
+
+void init_feature_override(u64 boot_status, const void *fdt, int chosen);
+u64 kaslr_early_init(void *fdt, int chosen);
+void relocate_kernel(u64 offset);
+int scs_patch(const u8 eh_frame[], int size);
+
+void map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
+ int level, pte_t *tbl, bool may_use_cont, u64 va_offset);
+
+asmlinkage void early_map_kernel(u64 boot_status, void *fdt);
+
+asmlinkage u64 create_init_idmap(pgd_t *pgd, pteval_t clrmask);
diff --git a/arch/arm64/kernel/pi/relacheck.c b/arch/arm64/kernel/pi/relacheck.c
new file mode 100644
index 000000000000..b0cd4d0d275b
--- /dev/null
+++ b/arch/arm64/kernel/pi/relacheck.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 - Google LLC
+ * Author: Ard Biesheuvel <ardb@google.com>
+ */
+
+#include <elf.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define HOST_ORDER ELFDATA2LSB
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define HOST_ORDER ELFDATA2MSB
+#endif
+
+static Elf64_Ehdr *ehdr;
+static Elf64_Shdr *shdr;
+static const char *strtab;
+static bool swap;
+
+static uint64_t swab_elfxword(uint64_t val)
+{
+ return swap ? __builtin_bswap64(val) : val;
+}
+
+static uint32_t swab_elfword(uint32_t val)
+{
+ return swap ? __builtin_bswap32(val) : val;
+}
+
+static uint16_t swab_elfhword(uint16_t val)
+{
+ return swap ? __builtin_bswap16(val) : val;
+}
+
+int main(int argc, char *argv[])
+{
+ struct stat stat;
+ int fd, ret;
+
+ if (argc < 3) {
+ fprintf(stderr, "file arguments missing\n");
+ exit(EXIT_FAILURE);
+ }
+
+ fd = open(argv[1], O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "failed to open %s\n", argv[1]);
+ exit(EXIT_FAILURE);
+ }
+
+ ret = fstat(fd, &stat);
+ if (ret < 0) {
+ fprintf(stderr, "failed to stat() %s\n", argv[1]);
+ exit(EXIT_FAILURE);
+ }
+
+ ehdr = mmap(0, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (ehdr == MAP_FAILED) {
+ fprintf(stderr, "failed to mmap() %s\n", argv[1]);
+ exit(EXIT_FAILURE);
+ }
+
+ swap = ehdr->e_ident[EI_DATA] != HOST_ORDER;
+ shdr = (void *)ehdr + swab_elfxword(ehdr->e_shoff);
+ strtab = (void *)ehdr +
+ swab_elfxword(shdr[swab_elfhword(ehdr->e_shstrndx)].sh_offset);
+
+ for (int i = 0; i < swab_elfhword(ehdr->e_shnum); i++) {
+ unsigned long info, flags;
+ bool prel64 = false;
+ Elf64_Rela *rela;
+ int numrela;
+
+ if (swab_elfword(shdr[i].sh_type) != SHT_RELA)
+ continue;
+
+ /* only consider RELA sections operating on data */
+ info = swab_elfword(shdr[i].sh_info);
+ flags = swab_elfxword(shdr[info].sh_flags);
+ if ((flags & (SHF_ALLOC | SHF_EXECINSTR)) != SHF_ALLOC)
+ continue;
+
+ /*
+ * We generally don't permit ABS64 relocations in the code that
+ * runs before relocation processing occurs. If statically
+ * initialized absolute symbol references are unavoidable, they
+ * may be emitted into a *.rodata.prel64 section and they will
+ * be converted to place-relative 64-bit references. This
+ * requires special handling in the referring code.
+ */
+ if (strstr(strtab + swab_elfword(shdr[info].sh_name),
+ ".rodata.prel64")) {
+ prel64 = true;
+ }
+
+ rela = (void *)ehdr + swab_elfxword(shdr[i].sh_offset);
+ numrela = swab_elfxword(shdr[i].sh_size) / sizeof(*rela);
+
+ for (int j = 0; j < numrela; j++) {
+ uint64_t info = swab_elfxword(rela[j].r_info);
+
+ if (ELF64_R_TYPE(info) != R_AARCH64_ABS64)
+ continue;
+
+ if (prel64) {
+ /* convert ABS64 into PREL64 */
+ info ^= R_AARCH64_ABS64 ^ R_AARCH64_PREL64;
+ rela[j].r_info = swab_elfxword(info);
+ } else {
+ fprintf(stderr,
+ "Unexpected absolute relocations detected in %s\n",
+ argv[2]);
+ close(fd);
+ unlink(argv[1]);
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+ close(fd);
+ return 0;
+}
diff --git a/arch/arm64/kernel/pi/relocate.c b/arch/arm64/kernel/pi/relocate.c
new file mode 100644
index 000000000000..2407d2696398
--- /dev/null
+++ b/arch/arm64/kernel/pi/relocate.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2023 Google LLC
+// Authors: Ard Biesheuvel <ardb@google.com>
+// Peter Collingbourne <pcc@google.com>
+
+#include <linux/elf.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include "pi.h"
+
+extern const Elf64_Rela rela_start[], rela_end[];
+extern const u64 relr_start[], relr_end[];
+
+void __init relocate_kernel(u64 offset)
+{
+ u64 *place = NULL;
+
+ for (const Elf64_Rela *rela = rela_start; rela < rela_end; rela++) {
+ if (ELF64_R_TYPE(rela->r_info) != R_AARCH64_RELATIVE)
+ continue;
+ *(u64 *)(rela->r_offset + offset) = rela->r_addend + offset;
+ }
+
+ if (!IS_ENABLED(CONFIG_RELR) || !offset)
+ return;
+
+ /*
+ * Apply RELR relocations.
+ *
+ * RELR is a compressed format for storing relative relocations. The
+ * encoded sequence of entries looks like:
+ * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
+ *
+ * i.e. start with an address, followed by any number of bitmaps. The
+ * address entry encodes 1 relocation. The subsequent bitmap entries
+ * encode up to 63 relocations each, at subsequent offsets following
+ * the last address entry.
+ *
+ * The bitmap entries must have 1 in the least significant bit. The
+ * assumption here is that an address cannot have 1 in lsb. Odd
+ * addresses are not supported. Any odd addresses are stored in the
+ * RELA section, which is handled above.
+ *
+ * With the exception of the least significant bit, each bit in the
+ * bitmap corresponds with a machine word that follows the base address
+ * word, and the bit value indicates whether or not a relocation needs
+ * to be applied to it. The second least significant bit represents the
+ * machine word immediately following the initial address, and each bit
+ * that follows represents the next word, in linear order. As such, a
+ * single bitmap can encode up to 63 relocations in a 64-bit object.
+ */
+ for (const u64 *relr = relr_start; relr < relr_end; relr++) {
+ if ((*relr & 1) == 0) {
+ place = (u64 *)(*relr + offset);
+ *place++ += offset;
+ } else {
+ for (u64 *p = place, r = *relr >> 1; r; p++, r >>= 1)
+ if (r & 1)
+ *p += offset;
+ place += 63;
+ }
+ }
+}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 70b91a8c6bb3..327855a11df2 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -371,6 +371,21 @@ static struct break_hook kprobes_break_ss_hook = {
.fn = kprobe_breakpoint_ss_handler,
};
+static int __kprobes
+kretprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
+{
+ if (regs->pc != (unsigned long)__kretprobe_trampoline)
+ return DBG_HOOK_ERROR;
+
+ regs->pc = kretprobe_trampoline_handler(regs, (void *)regs->regs[29]);
+ return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook kretprobes_break_hook = {
+ .imm = KRETPROBES_BRK_IMM,
+ .fn = kretprobe_breakpoint_handler,
+};
+
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
@@ -396,11 +411,6 @@ int __init arch_populate_kprobe_blacklist(void)
return ret;
}
-void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
-{
- return (void *)kretprobe_trampoline_handler(regs, (void *)regs->regs[29]);
-}
-
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
@@ -420,6 +430,7 @@ int __init arch_init_kprobes(void)
{
register_kernel_break_hook(&kprobes_break_hook);
register_kernel_break_hook(&kprobes_break_ss_hook);
+ register_kernel_break_hook(&kretprobes_break_hook);
return 0;
}
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
index 9a6499bed58b..a362f3dbb3d1 100644
--- a/arch/arm64/kernel/probes/kprobes_trampoline.S
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -4,83 +4,17 @@
*/
#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
+#include <asm/asm-bug.h>
#include <asm/assembler.h>
.text
- .macro save_all_base_regs
- stp x0, x1, [sp, #S_X0]
- stp x2, x3, [sp, #S_X2]
- stp x4, x5, [sp, #S_X4]
- stp x6, x7, [sp, #S_X6]
- stp x8, x9, [sp, #S_X8]
- stp x10, x11, [sp, #S_X10]
- stp x12, x13, [sp, #S_X12]
- stp x14, x15, [sp, #S_X14]
- stp x16, x17, [sp, #S_X16]
- stp x18, x19, [sp, #S_X18]
- stp x20, x21, [sp, #S_X20]
- stp x22, x23, [sp, #S_X22]
- stp x24, x25, [sp, #S_X24]
- stp x26, x27, [sp, #S_X26]
- stp x28, x29, [sp, #S_X28]
- add x0, sp, #PT_REGS_SIZE
- stp lr, x0, [sp, #S_LR]
- /*
- * Construct a useful saved PSTATE
- */
- mrs x0, nzcv
- mrs x1, daif
- orr x0, x0, x1
- mrs x1, CurrentEL
- orr x0, x0, x1
- mrs x1, SPSel
- orr x0, x0, x1
- stp xzr, x0, [sp, #S_PC]
- .endm
-
- .macro restore_all_base_regs
- ldr x0, [sp, #S_PSTATE]
- and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT)
- msr nzcv, x0
- ldp x0, x1, [sp, #S_X0]
- ldp x2, x3, [sp, #S_X2]
- ldp x4, x5, [sp, #S_X4]
- ldp x6, x7, [sp, #S_X6]
- ldp x8, x9, [sp, #S_X8]
- ldp x10, x11, [sp, #S_X10]
- ldp x12, x13, [sp, #S_X12]
- ldp x14, x15, [sp, #S_X14]
- ldp x16, x17, [sp, #S_X16]
- ldp x18, x19, [sp, #S_X18]
- ldp x20, x21, [sp, #S_X20]
- ldp x22, x23, [sp, #S_X22]
- ldp x24, x25, [sp, #S_X24]
- ldp x26, x27, [sp, #S_X26]
- ldp x28, x29, [sp, #S_X28]
- .endm
-
SYM_CODE_START(__kretprobe_trampoline)
- sub sp, sp, #PT_REGS_SIZE
-
- save_all_base_regs
-
- /* Setup a frame pointer. */
- add x29, sp, #S_FP
-
- mov x0, sp
- bl trampoline_probe_handler
/*
- * Replace trampoline address in lr with actual orig_ret_addr return
- * address.
+ * Trigger a breakpoint exception. The PC will be adjusted by
+ * kretprobe_breakpoint_handler(), and no subsequent instructions will
+ * be executed from the trampoline.
*/
- mov lr, x0
-
- /* The frame pointer (x29) is restored with other registers. */
- restore_all_base_regs
-
- add sp, sp, #PT_REGS_SIZE
- ret
-
+ brk #KRETPROBES_BRK_IMM
+ ASM_BUG()
SYM_CODE_END(__kretprobe_trampoline)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7387b68c745b..4ae31b7af6c3 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -290,9 +290,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
fpsimd_preserve_current_state();
*dst = *src;
- /* We rely on the above assignment to initialize dst's thread_flags: */
- BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
-
/*
* Detach src's sve_state (if any) from dst so that it does not
* get erroneously used or freed prematurely. dst's copies
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index e3bef38fc2e2..162b030ab9da 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -174,7 +174,6 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
const char *desc = "Hardware breakpoint trap (ptrace)";
-#ifdef CONFIG_COMPAT
if (is_compat_task()) {
int si_errno = 0;
int i;
@@ -196,7 +195,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
desc);
return;
}
-#endif
+
arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
}
@@ -698,6 +697,39 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
return ret;
}
+static int fpmr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ if (!system_supports_fpmr())
+ return -EINVAL;
+
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+ return membuf_store(&to, target->thread.uw.fpmr);
+}
+
+static int fpmr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ unsigned long fpmr;
+
+ if (!system_supports_fpmr())
+ return -EINVAL;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
+ if (ret)
+ return ret;
+
+ target->thread.uw.fpmr = fpmr;
+
+ fpsimd_flush_task_state(target);
+
+ return 0;
+}
+
static int system_call_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
@@ -1419,6 +1451,7 @@ enum aarch64_regset {
REGSET_HW_BREAK,
REGSET_HW_WATCH,
#endif
+ REGSET_FPMR,
REGSET_SYSTEM_CALL,
#ifdef CONFIG_ARM64_SVE
REGSET_SVE,
@@ -1497,6 +1530,14 @@ static const struct user_regset aarch64_regsets[] = {
.regset_get = system_call_get,
.set = system_call_set,
},
+ [REGSET_FPMR] = {
+ .core_note_type = NT_ARM_FPMR,
+ .n = 1,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .regset_get = fpmr_get,
+ .set = fpmr_set,
+ },
#ifdef CONFIG_ARM64_SVE
[REGSET_SVE] = { /* Scalable Vector Extension */
.core_note_type = NT_ARM_SVE,
@@ -1597,7 +1638,6 @@ static const struct user_regset_view user_aarch64_view = {
.regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
};
-#ifdef CONFIG_COMPAT
enum compat_regset {
REGSET_COMPAT_GPR,
REGSET_COMPAT_VFP,
@@ -1854,6 +1894,7 @@ static const struct user_regset_view user_aarch32_ptrace_view = {
.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
};
+#ifdef CONFIG_COMPAT
static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
compat_ulong_t __user *ret)
{
@@ -2115,7 +2156,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
-#ifdef CONFIG_COMPAT
/*
* Core dumping of 32-bit tasks or compat ptrace requests must use the
* user_aarch32_view compatible with arm32. Native ptrace requests on
@@ -2126,7 +2166,7 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_aarch32_view;
else if (is_compat_thread(task_thread_info(task)))
return &user_aarch32_ptrace_view;
-#endif
+
return &user_aarch64_view;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 42c690bb2d60..65a052bf741f 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -166,21 +166,6 @@ static void __init smp_build_mpidr_hash(void)
pr_warn("Large number of MPIDR hash buckets detected\n");
}
-static void *early_fdt_ptr __initdata;
-
-void __init *get_early_fdt_ptr(void)
-{
- return early_fdt_ptr;
-}
-
-asmlinkage void __init early_fdt_map(u64 dt_phys)
-{
- int fdt_size;
-
- early_fixmap_init();
- early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL);
-}
-
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
int size;
@@ -298,13 +283,6 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
kaslr_init();
- /*
- * If know now we are going to need KPTI then use non-global
- * mappings from the start, avoiding the cost of rewriting
- * everything later.
- */
- arm64_use_ng_mappings = kaslr_requires_kpti();
-
early_fixmap_init();
early_ioremap_init();
@@ -320,9 +298,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
dynamic_scs_init();
/*
- * Unmask asynchronous aborts and fiq after bringing up possible
- * earlycon. (Report possible System Errors once we can report this
- * occurred).
+ * Unmask SError as soon as possible after initializing earlycon so
+ * that we can report any SErrors immediately.
*/
local_daif_restore(DAIF_PROCCTX_NOIRQ);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 425b1bc17a3f..4a77f4976e11 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -16,8 +16,8 @@
#include <linux/uaccess.h>
#include <linux/sizes.h>
#include <linux/string.h>
-#include <linux/resume_user_mode.h>
#include <linux/ratelimit.h>
+#include <linux/rseq.h>
#include <linux/syscalls.h>
#include <asm/daifflags.h>
@@ -60,6 +60,7 @@ struct rt_sigframe_user_layout {
unsigned long tpidr2_offset;
unsigned long za_offset;
unsigned long zt_offset;
+ unsigned long fpmr_offset;
unsigned long extra_offset;
unsigned long end_offset;
};
@@ -182,6 +183,8 @@ struct user_ctxs {
u32 za_size;
struct zt_context __user *zt;
u32 zt_size;
+ struct fpmr_context __user *fpmr;
+ u32 fpmr_size;
};
static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
@@ -227,6 +230,33 @@ static int restore_fpsimd_context(struct user_ctxs *user)
return err ? -EFAULT : 0;
}
+static int preserve_fpmr_context(struct fpmr_context __user *ctx)
+{
+ int err = 0;
+
+ current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR);
+
+ __put_user_error(FPMR_MAGIC, &ctx->head.magic, err);
+ __put_user_error(sizeof(*ctx), &ctx->head.size, err);
+ __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err);
+
+ return err;
+}
+
+static int restore_fpmr_context(struct user_ctxs *user)
+{
+ u64 fpmr;
+ int err = 0;
+
+ if (user->fpmr_size != sizeof(*user->fpmr))
+ return -EINVAL;
+
+ __get_user_error(fpmr, &user->fpmr->fpmr, err);
+ if (!err)
+ write_sysreg_s(fpmr, SYS_FPMR);
+
+ return err;
+}
#ifdef CONFIG_ARM64_SVE
@@ -590,6 +620,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->tpidr2 = NULL;
user->za = NULL;
user->zt = NULL;
+ user->fpmr = NULL;
if (!IS_ALIGNED((unsigned long)base, 16))
goto invalid;
@@ -684,6 +715,17 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->zt_size = size;
break;
+ case FPMR_MAGIC:
+ if (!system_supports_fpmr())
+ goto invalid;
+
+ if (user->fpmr)
+ goto invalid;
+
+ user->fpmr = (struct fpmr_context __user *)head;
+ user->fpmr_size = size;
+ break;
+
case EXTRA_MAGIC:
if (have_extra_context)
goto invalid;
@@ -806,6 +848,9 @@ static int restore_sigframe(struct pt_regs *regs,
if (err == 0 && system_supports_tpidr2() && user.tpidr2)
err = restore_tpidr2_context(&user);
+ if (err == 0 && system_supports_fpmr() && user.fpmr)
+ err = restore_fpmr_context(&user);
+
if (err == 0 && system_supports_sme() && user.za)
err = restore_za_context(&user);
@@ -928,6 +973,13 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
}
}
+ if (system_supports_fpmr()) {
+ err = sigframe_alloc(user, &user->fpmr_offset,
+ sizeof(struct fpmr_context));
+ if (err)
+ return err;
+ }
+
return sigframe_alloc_end(user);
}
@@ -983,6 +1035,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
err |= preserve_tpidr2_context(tpidr2_ctx);
}
+ /* FPMR if supported */
+ if (system_supports_fpmr() && err == 0) {
+ struct fpmr_context __user *fpmr_ctx =
+ apply_user_offset(user, user->fpmr_offset);
+ err |= preserve_fpmr_context(fpmr_ctx);
+ }
+
/* ZA state if present */
if (system_supports_sme() && err == 0 && user->za_offset) {
struct za_context __user *za_ctx =
@@ -1207,7 +1266,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
-static void do_signal(struct pt_regs *regs)
+void do_signal(struct pt_regs *regs)
{
unsigned long continue_addr = 0, restart_addr = 0;
int retval = 0;
@@ -1278,41 +1337,6 @@ static void do_signal(struct pt_regs *regs)
restore_saved_sigmask();
}
-void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
-{
- do {
- if (thread_flags & _TIF_NEED_RESCHED) {
- /* Unmask Debug and SError for the next task */
- local_daif_restore(DAIF_PROCCTX_NOIRQ);
-
- schedule();
- } else {
- local_daif_restore(DAIF_PROCCTX);
-
- if (thread_flags & _TIF_UPROBE)
- uprobe_notify_resume(regs);
-
- if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
- clear_thread_flag(TIF_MTE_ASYNC_FAULT);
- send_sig_fault(SIGSEGV, SEGV_MTEAERR,
- (void __user *)NULL, current);
- }
-
- if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
- do_signal(regs);
-
- if (thread_flags & _TIF_NOTIFY_RESUME)
- resume_user_mode_work(regs);
-
- if (thread_flags & _TIF_FOREIGN_FPSTATE)
- fpsimd_restore_current_state();
- }
-
- local_daif_mask();
- thread_flags = read_thread_flags();
- } while (thread_flags & _TIF_WORK_MASK);
-}
-
unsigned long __ro_after_init signal_minsigstksz;
/*
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 2aa5129d8253..f093cdf71be1 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -102,9 +102,6 @@ SYM_CODE_START(cpu_resume)
mov x0, xzr
bl init_kernel_el
mov x19, x0 // preserve boot mode
-#if VA_BITS > 48
- ldr_l x0, vabits_actual
-#endif
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 9a70d9746b66..ad198262b981 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -20,14 +20,11 @@ long sys_ni_syscall(void);
static long do_ni_syscall(struct pt_regs *regs, int scno)
{
-#ifdef CONFIG_COMPAT
- long ret;
if (is_compat_task()) {
- ret = compat_arm_syscall(regs, scno);
+ long ret = compat_arm_syscall(regs, scno);
if (ret != -ENOSYS)
return ret;
}
-#endif
return sys_ni_syscall();
}
diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/vmcore_info.c
index 66cde752cd74..b19d5d6cb8b3 100644
--- a/arch/arm64/kernel/crash_core.c
+++ b/arch/arm64/kernel/vmcore_info.c
@@ -4,7 +4,7 @@
* Copyright (C) Huawei Futurewei Technologies.
*/
-#include <linux/crash_core.h>
+#include <linux/vmcore_info.h>
#include <asm/cpufeature.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
@@ -23,7 +23,6 @@ void arch_crash_save_vmcoreinfo(void)
/* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR);
vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
- vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START);
vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END);
vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START);
vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 3cd7e76cc562..755a22d4f840 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -126,9 +126,9 @@ jiffies = jiffies_64;
#ifdef CONFIG_UNWIND_TABLES
#define UNWIND_DATA_SECTIONS \
.eh_frame : { \
- __eh_frame_start = .; \
+ __pi___eh_frame_start = .; \
*(.eh_frame) \
- __eh_frame_end = .; \
+ __pi___eh_frame_end = .; \
}
#else
#define UNWIND_DATA_SECTIONS
@@ -270,15 +270,15 @@ SECTIONS
HYPERVISOR_RELOC_SECTION
.rela.dyn : ALIGN(8) {
- __rela_start = .;
+ __pi_rela_start = .;
*(.rela .rela*)
- __rela_end = .;
+ __pi_rela_end = .;
}
.relr.dyn : ALIGN(8) {
- __relr_start = .;
+ __pi_relr_start = .;
*(.relr.dyn)
- __relr_end = .;
+ __pi_relr_end = .;
}
. = ALIGN(SEGMENT_ALIGN);
@@ -311,12 +311,17 @@ SECTIONS
__pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
_edata = .;
+ /* start of zero-init region */
BSS_SECTION(SBSS_ALIGN, 0, 0)
. = ALIGN(PAGE_SIZE);
init_pg_dir = .;
. += INIT_DIR_SIZE;
init_pg_end = .;
+ /* end of zero-init region */
+
+ . += SZ_4K; /* stack for the early C runtime */
+ early_init_stack = .;
. = ALIGN(SEGMENT_ALIGN);
__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 27ca89b628a0..58f09370d17e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -19,7 +19,6 @@ if VIRTUALIZATION
menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM
select KVM_COMMON
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER
@@ -33,12 +32,11 @@ menuconfig KVM
select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
- select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
+ select HAVE_KVM_READONLY_MEM
select HAVE_KVM_VCPU_RUN_PID_CHANGE
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS
- select XARRAY_MULTI
help
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 9dec8c419bf4..879982b1cc73 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -745,7 +745,7 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
WARN_ON_ONCE(ret);
/*
- * The virtual offset behaviour is "interresting", as it
+ * The virtual offset behaviour is "interesting", as it
* always applies when HCR_EL2.E2H==0, but only when
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
* track E2H when putting the HV timer in "direct" mode.
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a25265aca432..3dee5490eea9 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -190,6 +190,10 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+void kvm_arch_create_vm_debugfs(struct kvm *kvm)
+{
+ kvm_sys_regs_create_debugfs(kvm);
+}
/**
* kvm_arch_destroy_vm - destroy the VM data structure
@@ -206,6 +210,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
pkvm_destroy_hyp_vm(kvm);
kfree(kvm->arch.mpidr_data);
+ kfree(kvm->arch.sysreg_masks);
kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1);
@@ -674,6 +679,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
return ret;
}
+ /*
+ * This needs to happen after NV has imposed its own restrictions on
+ * the feature set
+ */
+ kvm_init_sysreg(vcpu);
+
ret = kvm_timer_enable(vcpu);
if (ret)
return ret;
@@ -2591,7 +2602,8 @@ static __init int kvm_arm_init(void)
} else if (in_hyp_mode) {
kvm_info("VHE mode initialized successfully\n");
} else {
- kvm_info("Hyp mode initialized successfully\n");
+ char mode = cpus_have_final_cap(ARM64_KVM_HVHE) ? 'h' : 'n';
+ kvm_info("Hyp mode (%cVHE) initialized successfully\n", mode);
}
/*
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 8725291cb00a..ce8886122ed3 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -23,7 +23,7 @@
static DEFINE_PER_CPU(u64, mdcr_el2);
-/**
+/*
* save/restore_guest_debug_regs
*
* For some debug operations we need to tweak some guest registers. As
@@ -143,6 +143,7 @@ void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
/**
* kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
+ * @vcpu: the vcpu pointer
*/
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 431fd429932d..4697ba41b3a9 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -427,12 +427,14 @@ static const complex_condition_check ccc[] = {
* [19:14] bit number in the FGT register (6 bits)
* [20] trap polarity (1 bit)
* [25:21] FG filter (5 bits)
- * [62:26] Unused (37 bits)
+ * [35:26] Main SysReg table index (10 bits)
+ * [62:36] Unused (27 bits)
* [63] RES0 - Must be zero, as lost on insertion in the xarray
*/
#define TC_CGT_BITS 10
#define TC_FGT_BITS 4
#define TC_FGF_BITS 5
+#define TC_SRI_BITS 10
union trap_config {
u64 val;
@@ -442,7 +444,8 @@ union trap_config {
unsigned long bit:6; /* Bit number */
unsigned long pol:1; /* Polarity */
unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */
- unsigned long unused:37; /* Unused, should be zero */
+ unsigned long sri:TC_SRI_BITS; /* SysReg Index */
+ unsigned long unused:27; /* Unused, should be zero */
unsigned long mbz:1; /* Must Be Zero */
};
};
@@ -1006,18 +1009,6 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
static DEFINE_XARRAY(sr_forward_xa);
-enum fgt_group_id {
- __NO_FGT_GROUP__,
- HFGxTR_GROUP,
- HDFGRTR_GROUP,
- HDFGWTR_GROUP,
- HFGITR_GROUP,
- HAFGRTR_GROUP,
-
- /* Must be last */
- __NR_FGT_GROUP_IDS__
-};
-
enum fg_filter_id {
__NO_FGF__,
HCRX_FGTnXS,
@@ -1757,6 +1748,28 @@ static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc,
err);
}
+static u32 encoding_next(u32 encoding)
+{
+ u8 op0, op1, crn, crm, op2;
+
+ op0 = sys_reg_Op0(encoding);
+ op1 = sys_reg_Op1(encoding);
+ crn = sys_reg_CRn(encoding);
+ crm = sys_reg_CRm(encoding);
+ op2 = sys_reg_Op2(encoding);
+
+ if (op2 < Op2_mask)
+ return sys_reg(op0, op1, crn, crm, op2 + 1);
+ if (crm < CRm_mask)
+ return sys_reg(op0, op1, crn, crm + 1, 0);
+ if (crn < CRn_mask)
+ return sys_reg(op0, op1, crn + 1, 0, 0);
+ if (op1 < Op1_mask)
+ return sys_reg(op0, op1 + 1, 0, 0, 0);
+
+ return sys_reg(op0 + 1, 0, 0, 0, 0);
+}
+
int __init populate_nv_trap_config(void)
{
int ret = 0;
@@ -1775,23 +1788,18 @@ int __init populate_nv_trap_config(void)
ret = -EINVAL;
}
- if (cgt->encoding != cgt->end) {
- prev = xa_store_range(&sr_forward_xa,
- cgt->encoding, cgt->end,
- xa_mk_value(cgt->tc.val),
- GFP_KERNEL);
- } else {
- prev = xa_store(&sr_forward_xa, cgt->encoding,
+ for (u32 enc = cgt->encoding; enc <= cgt->end; enc = encoding_next(enc)) {
+ prev = xa_store(&sr_forward_xa, enc,
xa_mk_value(cgt->tc.val), GFP_KERNEL);
if (prev && !xa_is_err(prev)) {
ret = -EINVAL;
print_nv_trap_error(cgt, "Duplicate CGT", ret);
}
- }
- if (xa_is_err(prev)) {
- ret = xa_err(prev);
- print_nv_trap_error(cgt, "Failed CGT insertion", ret);
+ if (xa_is_err(prev)) {
+ ret = xa_err(prev);
+ print_nv_trap_error(cgt, "Failed CGT insertion", ret);
+ }
}
}
@@ -1804,6 +1812,7 @@ int __init populate_nv_trap_config(void)
for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) {
const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i];
union trap_config tc;
+ void *prev;
if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) {
ret = -EINVAL;
@@ -1818,8 +1827,13 @@ int __init populate_nv_trap_config(void)
}
tc.val |= fgt->tc.val;
- xa_store(&sr_forward_xa, fgt->encoding,
- xa_mk_value(tc.val), GFP_KERNEL);
+ prev = xa_store(&sr_forward_xa, fgt->encoding,
+ xa_mk_value(tc.val), GFP_KERNEL);
+
+ if (xa_is_err(prev)) {
+ ret = xa_err(prev);
+ print_nv_trap_error(fgt, "Failed FGT insertion", ret);
+ }
}
kvm_info("nv: %ld fine grained trap handlers\n",
@@ -1845,6 +1859,38 @@ check_mcb:
return ret;
}
+int __init populate_sysreg_config(const struct sys_reg_desc *sr,
+ unsigned int idx)
+{
+ union trap_config tc;
+ u32 encoding;
+ void *ret;
+
+ /*
+ * 0 is a valid value for the index, but not for the storage.
+ * We'll store (idx+1), so check against an offset'd limit.
+ */
+ if (idx >= (BIT(TC_SRI_BITS) - 1)) {
+ kvm_err("sysreg %s (%d) out of range\n", sr->name, idx);
+ return -EINVAL;
+ }
+
+ encoding = sys_reg(sr->Op0, sr->Op1, sr->CRn, sr->CRm, sr->Op2);
+ tc = get_trap_config(encoding);
+
+ if (tc.sri) {
+ kvm_err("sysreg %s (%d) duplicate entry (%d)\n",
+ sr->name, idx - 1, tc.sri);
+ return -EINVAL;
+ }
+
+ tc.sri = idx + 1;
+ ret = xa_store(&sr_forward_xa, encoding,
+ xa_mk_value(tc.val), GFP_KERNEL);
+
+ return xa_err(ret);
+}
+
static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu,
const struct trap_bits *tb)
{
@@ -1892,20 +1938,64 @@ static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu,
return __compute_trap_behaviour(vcpu, tc.cgt, b);
}
-static bool check_fgt_bit(u64 val, const union trap_config tc)
+static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr)
{
- return ((val >> tc.bit) & 1) == tc.pol;
+ struct kvm_sysreg_masks *masks;
+
+ /* Only handle the VNCR-backed regs for now */
+ if (sr < __VNCR_START__)
+ return 0;
+
+ masks = kvm->arch.sysreg_masks;
+
+ return masks->mask[sr - __VNCR_START__].res0;
}
-#define sanitised_sys_reg(vcpu, reg) \
- ({ \
- u64 __val; \
- __val = __vcpu_sys_reg(vcpu, reg); \
- __val &= ~__ ## reg ## _RES0; \
- (__val); \
- })
+static bool check_fgt_bit(struct kvm *kvm, bool is_read,
+ u64 val, const union trap_config tc)
+{
+ enum vcpu_sysreg sr;
+
+ if (tc.pol)
+ return (val & BIT(tc.bit));
+
+ /*
+ * FGTs with negative polarities are an absolute nightmare, as
+ * we need to evaluate the bit in the light of the feature
+ * that defines it. WTF were they thinking?
+ *
+ * So let's check if the bit has been earmarked as RES0, as
+ * this indicates an unimplemented feature.
+ */
+ if (val & BIT(tc.bit))
+ return false;
+
+ switch ((enum fgt_group_id)tc.fgt) {
+ case HFGxTR_GROUP:
+ sr = is_read ? HFGRTR_EL2 : HFGWTR_EL2;
+ break;
+
+ case HDFGRTR_GROUP:
+ sr = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2;
+ break;
+
+ case HAFGRTR_GROUP:
+ sr = HAFGRTR_EL2;
+ break;
+
+ case HFGITR_GROUP:
+ sr = HFGITR_EL2;
+ break;
+
+ default:
+ WARN_ONCE(1, "Unhandled FGT group");
+ return false;
+ }
+
+ return !(kvm_get_sysreg_res0(kvm, sr) & BIT(tc.bit));
+}
-bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
+bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
{
union trap_config tc;
enum trap_behaviour b;
@@ -1913,9 +2003,6 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
u32 sysreg;
u64 esr, val;
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
- return false;
-
esr = kvm_vcpu_get_esr(vcpu);
sysreg = esr_sys64_to_sysreg(esr);
is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
@@ -1926,13 +2013,27 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
* A value of 0 for the whole entry means that we know nothing
* for this sysreg, and that it cannot be re-injected into the
* nested hypervisor. In this situation, let's cut it short.
- *
- * Note that ultimately, we could also make use of the xarray
- * to store the index of the sysreg in the local descriptor
- * array, avoiding another search... Hint, hint...
*/
if (!tc.val)
- return false;
+ goto local;
+
+ /*
+ * If a sysreg can be trapped using a FGT, first check whether we
+ * trap for the purpose of forbidding the feature. In that case,
+ * inject an UNDEF.
+ */
+ if (tc.fgt != __NO_FGT_GROUP__ &&
+ (vcpu->kvm->arch.fgu[tc.fgt] & BIT(tc.bit))) {
+ kvm_inject_undefined(vcpu);
+ return true;
+ }
+
+ /*
+ * If we're not nesting, immediately return to the caller, with the
+ * sysreg index, should we have it.
+ */
+ if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ goto local;
switch ((enum fgt_group_id)tc.fgt) {
case __NO_FGT_GROUP__:
@@ -1940,25 +2041,24 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
case HFGxTR_GROUP:
if (is_read)
- val = sanitised_sys_reg(vcpu, HFGRTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HFGRTR_EL2);
else
- val = sanitised_sys_reg(vcpu, HFGWTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HFGWTR_EL2);
break;
case HDFGRTR_GROUP:
- case HDFGWTR_GROUP:
if (is_read)
- val = sanitised_sys_reg(vcpu, HDFGRTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HDFGRTR_EL2);
else
- val = sanitised_sys_reg(vcpu, HDFGWTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HDFGWTR_EL2);
break;
case HAFGRTR_GROUP:
- val = sanitised_sys_reg(vcpu, HAFGRTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HAFGRTR_EL2);
break;
case HFGITR_GROUP:
- val = sanitised_sys_reg(vcpu, HFGITR_EL2);
+ val = __vcpu_sys_reg(vcpu, HFGITR_EL2);
switch (tc.fgf) {
u64 tmp;
@@ -1966,7 +2066,7 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
break;
case HCRX_FGTnXS:
- tmp = sanitised_sys_reg(vcpu, HCRX_EL2);
+ tmp = __vcpu_sys_reg(vcpu, HCRX_EL2);
if (tmp & HCRX_EL2_FGTnXS)
tc.fgt = __NO_FGT_GROUP__;
}
@@ -1975,10 +2075,11 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
case __NR_FGT_GROUP_IDS__:
/* Something is really wrong, bail out */
WARN_ONCE(1, "__NR_FGT_GROUP_IDS__");
- return false;
+ goto local;
}
- if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc))
+ if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu->kvm, is_read,
+ val, tc))
goto inject;
b = compute_trap_behaviour(vcpu, tc);
@@ -1987,6 +2088,26 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
((b & BEHAVE_FORWARD_WRITE) && !is_read))
goto inject;
+local:
+ if (!tc.sri) {
+ struct sys_reg_params params;
+
+ params = esr_sys64_to_params(esr);
+
+ /*
+ * Check for the IMPDEF range, as per DDI0487 J.a,
+ * D18.3.2 Reserved encodings for IMPLEMENTATION
+ * DEFINED registers.
+ */
+ if (!(params.Op0 == 3 && (params.CRn & 0b1011) == 0b1011))
+ print_sys_reg_msg(&params,
+ "Unsupported guest access at: %lx\n",
+ *vcpu_pc(vcpu));
+ kvm_inject_undefined(vcpu);
+ return true;
+ }
+
+ *sr_index = tc.sri - 1;
return false;
inject:
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 8c1d0d4853df..826307e19e3a 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -117,7 +117,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
}
/*
- * Called just before entering the guest once we are no longer preemptable
+ * Called just before entering the guest once we are no longer preemptible
* and interrupts are disabled. If we have managed to run anything using
* FP while we were preemptible (such as off the back of an interrupt),
* then neither the host nor the guest own the FP hardware (and it was the
@@ -153,6 +153,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
fp_state.sve_vl = vcpu->arch.sve_max_vl;
fp_state.sme_state = NULL;
fp_state.svcr = &vcpu->arch.svcr;
+ fp_state.fpmr = &vcpu->arch.fpmr;
fp_state.fp_type = &vcpu->arch.fp_type;
if (vcpu_has_sve(vcpu))
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index aaf1d4939739..e2f762d959bb 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -711,6 +711,7 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
+ * @vcpu: the vCPU pointer
*
* This is for all registers.
*/
@@ -729,6 +730,8 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
/**
* kvm_arm_copy_reg_indices - get indices of all registers.
+ * @vcpu: the vCPU pointer
+ * @uindices: register list to copy
*
* We do core registers right here, then we append system regs.
*/
@@ -902,8 +905,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
/**
* kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
- * @kvm: pointer to the KVM struct
- * @kvm_guest_debug: the ioctl data buffer
+ * @vcpu: the vCPU pointer
+ * @dbg: the ioctl data buffer
*
* This sets up and enables the VM for guest debugging. Userspace
* passes in a control flag to enable different debug types and
@@ -1072,7 +1075,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
} else {
/*
* Only locking to serialise with a concurrent
- * set_pte_at() in the VMM but still overriding the
+ * __set_ptes() in the VMM but still overriding the
* tags, hence ignoring the return value.
*/
try_page_mte_tagging(page);
diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
index f98cbe2626a1..8d9670e6615d 100644
--- a/arch/arm64/kvm/hyp/aarch32.c
+++ b/arch/arm64/kvm/hyp/aarch32.c
@@ -84,7 +84,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
}
/**
- * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * kvm_adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
* @vcpu: The VCPU pointer
*
* When exceptions occur while instructions are executed in Thumb IF-THEN
@@ -120,7 +120,7 @@ static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
}
/**
- * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * kvm_skip_instr32 - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer
*/
void kvm_skip_instr32(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index a038320cdb08..e3fcf8c4d5b4 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -79,14 +79,48 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
clr |= ~hfg & __ ## reg ## _nMASK; \
} while(0)
-#define update_fgt_traps_cs(vcpu, reg, clr, set) \
+#define reg_to_fgt_group_id(reg) \
+ ({ \
+ enum fgt_group_id id; \
+ switch(reg) { \
+ case HFGRTR_EL2: \
+ case HFGWTR_EL2: \
+ id = HFGxTR_GROUP; \
+ break; \
+ case HFGITR_EL2: \
+ id = HFGITR_GROUP; \
+ break; \
+ case HDFGRTR_EL2: \
+ case HDFGWTR_EL2: \
+ id = HDFGRTR_GROUP; \
+ break; \
+ case HAFGRTR_EL2: \
+ id = HAFGRTR_GROUP; \
+ break; \
+ default: \
+ BUILD_BUG_ON(1); \
+ } \
+ \
+ id; \
+ })
+
+#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
+ do { \
+ u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
+ set |= hfg & __ ## reg ## _MASK; \
+ clr |= hfg & __ ## reg ## _nMASK; \
+ } while(0)
+
+#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
do { \
- struct kvm_cpu_context *hctxt = \
- &this_cpu_ptr(&kvm_host_data)->host_ctxt; \
u64 c = 0, s = 0; \
\
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
- compute_clr_set(vcpu, reg, c, s); \
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
+ compute_clr_set(vcpu, reg, c, s); \
+ \
+ compute_undef_clr_set(vcpu, kvm, reg, c, s); \
+ \
s |= set; \
c |= clr; \
if (c || s) { \
@@ -97,8 +131,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
} \
} while(0)
-#define update_fgt_traps(vcpu, reg) \
- update_fgt_traps_cs(vcpu, reg, 0, 0)
+#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
+ update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
/*
* Validate the fine grain trap masks.
@@ -122,8 +156,7 @@ static inline bool cpu_has_amu(void)
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
- u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
- u64 r_val, w_val;
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
CHECK_FGT_MASKS(HFGRTR_EL2);
CHECK_FGT_MASKS(HFGWTR_EL2);
@@ -136,72 +169,45 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
- ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2);
- ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2);
-
- if (cpus_have_final_cap(ARM64_SME)) {
- tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
-
- r_clr |= tmp;
- w_clr |= tmp;
- }
-
- /*
- * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD.
- */
- if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
- w_set |= HFGxTR_EL2_TCR_EL1_MASK;
-
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
- compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set);
- compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set);
- }
-
- /* The default to trap everything not handled or supported in KVM. */
- tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 |
- HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1;
-
- r_val = __HFGRTR_EL2_nMASK & ~tmp;
- r_val |= r_set;
- r_val &= ~r_clr;
-
- w_val = __HFGWTR_EL2_nMASK & ~tmp;
- w_val |= w_set;
- w_val &= ~w_clr;
-
- write_sysreg_s(r_val, SYS_HFGRTR_EL2);
- write_sysreg_s(w_val, SYS_HFGWTR_EL2);
-
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
- return;
-
- update_fgt_traps(vcpu, HFGITR_EL2);
- update_fgt_traps(vcpu, HDFGRTR_EL2);
- update_fgt_traps(vcpu, HDFGWTR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
+ update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
+ cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
+ HFGxTR_EL2_TCR_EL1_MASK : 0);
+ update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
if (cpu_has_amu())
- update_fgt_traps(vcpu, HAFGRTR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
}
+#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \
+ do { \
+ if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \
+ kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \
+ write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
+ SYS_ ## reg); \
+ } while(0)
+
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
-
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
- return;
-
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
- write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
- write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2);
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
+ else
+ __deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2);
if (cpu_has_amu())
- write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
}
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
@@ -230,7 +236,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
- u64 hcrx = HCRX_GUEST_FLAGS;
+ u64 hcrx = vcpu->arch.hcrx_el2;
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
u64 clr = 0, set = 0;
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index bb6b571ec627..4be6a7fa0070 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -27,16 +27,34 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
}
-static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
+static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
if (!vcpu)
vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+ return vcpu;
+}
+
+static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
+
return kvm_has_mte(kern_hyp_va(vcpu->kvm));
}
+static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!cpus_have_final_cap(ARM64_HAS_S1PIE))
+ return false;
+
+ vcpu = ctxt_to_vcpu(ctxt);
+ return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP);
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
@@ -55,7 +73,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
- if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
+ if (ctxt_has_s1pie(ctxt)) {
ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
}
@@ -131,7 +149,7 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
- if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
+ if (ctxt_has_s1pie(ctxt)) {
write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 4558c02eb352..7746ea507b6f 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -31,8 +31,8 @@ static void __debug_save_spe(u64 *pmscr_el1)
return;
/* Yes; save the control register and disable data generation */
- *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
- write_sysreg_s(0, SYS_PMSCR_EL1);
+ *pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
+ write_sysreg_el1(0, SYS_PMSCR);
isb();
/* Now drain all buffered data to memory */
@@ -48,7 +48,7 @@ static void __debug_restore_spe(u64 pmscr_el1)
isb();
/* Re-enable data generation */
- write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
+ write_sysreg_el1(pmscr_el1, SYS_PMSCR);
}
static void __debug_save_trace(u64 *trfcr_el1)
@@ -63,8 +63,8 @@ static void __debug_save_trace(u64 *trfcr_el1)
* Since access to TRFCR_EL1 is trapped, the guest can't
* modify the filtering set by the host.
*/
- *trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
- write_sysreg_s(0, SYS_TRFCR_EL1);
+ *trfcr_el1 = read_sysreg_el1(SYS_TRFCR);
+ write_sysreg_el1(0, SYS_TRFCR);
isb();
/* Drain the trace buffer to memory */
tsb_csync();
@@ -76,7 +76,7 @@ static void __debug_restore_trace(u64 trfcr_el1)
return;
/* Restore trace filter controls */
- write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
+ write_sysreg_el1(trfcr_el1, SYS_TRFCR);
}
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 7693a6757cd7..135cfb294ee5 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -110,7 +110,7 @@ SYM_FUNC_END(__host_enter)
* u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
- /* Prepare and exit to the host's panic funciton. */
+ /* Prepare and exit to the host's panic function. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index b01a3d1078a8..8850b591d775 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -155,7 +155,7 @@ int hyp_back_vmemmap(phys_addr_t back)
start = hyp_memory[i].base;
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
/*
- * The begining of the hyp_vmemmap region for the current
+ * The beginning of the hyp_vmemmap region for the current
* memblock may already be backed by the page backing the end
* the previous region, so avoid mapping it twice.
*/
@@ -408,7 +408,7 @@ static void *admit_host_page(void *arg)
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
}
-/* Refill our local memcache by poping pages from the one provided by the host. */
+/* Refill our local memcache by popping pages from the one provided by the host. */
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc)
{
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index ab9d05fcf98b..3fae5830f8d2 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -717,15 +717,29 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
kvm_pte_t *ptep)
{
- bool device = prot & KVM_PGTABLE_PROT_DEVICE;
- kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
- KVM_S2_MEMATTR(pgt, NORMAL);
+ kvm_pte_t attr;
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
+ switch (prot & (KVM_PGTABLE_PROT_DEVICE |
+ KVM_PGTABLE_PROT_NORMAL_NC)) {
+ case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC:
+ return -EINVAL;
+ case KVM_PGTABLE_PROT_DEVICE:
+ if (prot & KVM_PGTABLE_PROT_X)
+ return -EINVAL;
+ attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
+ break;
+ case KVM_PGTABLE_PROT_NORMAL_NC:
+ if (prot & KVM_PGTABLE_PROT_X)
+ return -EINVAL;
+ attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
+ break;
+ default:
+ attr = KVM_S2_MEMATTR(pgt, NORMAL);
+ }
+
if (!(prot & KVM_PGTABLE_PROT_X))
attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
- else if (device)
- return -EINVAL;
if (prot & KVM_PGTABLE_PROT_R)
attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 8e1e0d5033b6..a8b9ea496706 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -95,7 +95,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
}
/**
- * __vcpu_put_switch_syregs - Restore host system registers to the physical CPU
+ * __vcpu_put_switch_sysregs - Restore host system registers to the physical CPU
*
* @vcpu: The VCPU pointer
*
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 0bd93a5f21ce..a640e839848e 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -134,7 +134,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
} else {
- /* no need to shuffle FS[4] into DFSR[10] as its 0 */
+ /* no need to shuffle FS[4] into DFSR[10] as it's 0 */
fsr = DFSR_FSC_EXTABT_nLPAE;
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index d14504821b79..18680771cdb0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -305,7 +305,7 @@ static void invalidate_icache_guest_page(void *va, size_t size)
* does.
*/
/**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range
* @mmu: The KVM stage-2 MMU pointer
* @start: The intermediate physical base address of the range to unmap
* @size: The size of the area to unmap
@@ -805,7 +805,7 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
.pgd = (kvm_pteref_t)kvm->mm->pgd,
.ia_bits = vabits_actual,
.start_level = (KVM_PGTABLE_LAST_LEVEL -
- CONFIG_PGTABLE_LEVELS + 1),
+ ARM64_HW_PGTABLE_LEVELS(pgt.ia_bits) + 1),
.mm_ops = &kvm_user_mm_ops,
};
unsigned long flags;
@@ -1381,7 +1381,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
int ret = 0;
bool write_fault, writable, force_pte = false;
bool exec_fault, mte_allowed;
- bool device = false;
+ bool device = false, vfio_allow_any_uc = false;
unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
@@ -1472,6 +1472,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn = fault_ipa >> PAGE_SHIFT;
mte_allowed = kvm_vma_mte_allowed(vma);
+ vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
+
/* Don't use the VMA after the unlock -- it may have vanished */
vma = NULL;
@@ -1557,10 +1559,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault)
prot |= KVM_PGTABLE_PROT_X;
- if (device)
- prot |= KVM_PGTABLE_PROT_DEVICE;
- else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
+ if (device) {
+ if (vfio_allow_any_uc)
+ prot |= KVM_PGTABLE_PROT_NORMAL_NC;
+ else
+ prot |= KVM_PGTABLE_PROT_DEVICE;
+ } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
prot |= KVM_PGTABLE_PROT_X;
+ }
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
@@ -1874,16 +1880,9 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
/*
- * The ID map may be configured to use an extended virtual address
- * range. This is only the case if system RAM is out of range for the
- * currently configured page size and VA_BITS_MIN, in which case we will
- * also need the extended virtual range for the HYP ID map, or we won't
- * be able to enable the EL2 MMU.
- *
- * However, in some cases the ID map may be configured for fewer than
- * the number of VA bits used by the regular kernel stage 1. This
- * happens when VA_BITS=52 and the kernel image is placed in PA space
- * below 48 bits.
+ * The ID map is always configured for 48 bits of translation, which
+ * may be fewer than the number of VA bits used by the regular kernel
+ * stage 1, when VA_BITS=52.
*
* At EL2, there is only one TTBR register, and we can't switch between
* translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
@@ -1894,7 +1893,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
* 1 VA bits to assure that the hypervisor can both ID map its code page
* and map any kernel memory.
*/
- idmap_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
+ idmap_bits = IDMAP_VA_BITS;
kernel_bits = vabits_actual;
*hyp_va_bits = max(idmap_bits, kernel_bits);
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index ba95d044bc98..ced30c90521a 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -133,6 +133,13 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
break;
+ case SYS_ID_AA64MMFR4_EL1:
+ val = 0;
+ if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
+ val |= FIELD_PREP(NV_FTR(MMFR4, E2H0),
+ ID_AA64MMFR4_EL1_E2H0_NI_NV1);
+ break;
+
case SYS_ID_AA64DFR0_EL1:
/* Only limited support for PMU, Debug, BPs and WPs */
val &= (NV_FTR(DFR0, PMUVer) |
@@ -156,15 +163,280 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
return val;
}
+
+u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
+{
+ u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
+ struct kvm_sysreg_masks *masks;
+
+ masks = vcpu->kvm->arch.sysreg_masks;
+
+ if (masks) {
+ sr -= __VNCR_START__;
+
+ v &= ~masks->mask[sr].res0;
+ v |= masks->mask[sr].res1;
+ }
+
+ return v;
+}
+
+static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
+{
+ int i = sr - __VNCR_START__;
+
+ kvm->arch.sysreg_masks->mask[i].res0 = res0;
+ kvm->arch.sysreg_masks->mask[i].res1 = res1;
+}
+
int kvm_init_nv_sysregs(struct kvm *kvm)
{
+ u64 res0, res1;
+ int ret = 0;
+
mutex_lock(&kvm->arch.config_lock);
+ if (kvm->arch.sysreg_masks)
+ goto out;
+
+ kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
+ GFP_KERNEL);
+ if (!kvm->arch.sysreg_masks) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
kvm->arch.id_regs[i]);
+ /* VTTBR_EL2 */
+ res0 = res1 = 0;
+ if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
+ res0 |= GENMASK(63, 56);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
+ res0 |= VTTBR_CNP_BIT;
+ set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
+
+ /* VTCR_EL2 */
+ res0 = GENMASK(63, 32) | GENMASK(30, 20);
+ res1 = BIT(31);
+ set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
+
+ /* VMPIDR_EL2 */
+ res0 = GENMASK(63, 40) | GENMASK(30, 24);
+ res1 = BIT(31);
+ set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
+
+ /* HCR_EL2 */
+ res0 = BIT(48);
+ res1 = HCR_RW;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
+ res0 |= GENMASK(63, 59);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
+ res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
+ res0 |= (HCR_TTLBIS | HCR_TTLBOS);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
+ !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
+ res0 |= HCR_ENSCXT;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
+ res0 |= (HCR_TOCU | HCR_TICAB | HCR_TID4);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
+ res0 |= HCR_AMVOFFEN;
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
+ res0 |= HCR_FIEN;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
+ res0 |= HCR_FWB;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
+ res0 |= HCR_NV2;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
+ res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
+ if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
+ __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
+ res0 |= (HCR_API | HCR_APK);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
+ res0 |= BIT(39);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
+ res0 |= (HCR_TEA | HCR_TERR);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
+ res0 |= HCR_TLOR;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
+ res1 |= HCR_E2H;
+ set_sysreg_masks(kvm, HCR_EL2, res0, res1);
+
+ /* HCRX_EL2 */
+ res0 = HCRX_EL2_RES0;
+ res1 = HCRX_EL2_RES1;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP))
+ res0 |= HCRX_EL2_PACMEn;
+ if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP))
+ res0 |= HCRX_EL2_EnFPM;
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
+ res0 |= HCRX_EL2_GCSEn;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP))
+ res0 |= HCRX_EL2_EnIDCP128;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC))
+ res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP))
+ res0 |= HCRX_EL2_TMEA;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
+ res0 |= HCRX_EL2_D128En;
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
+ res0 |= HCRX_EL2_PTTWI;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP))
+ res0 |= HCRX_EL2_SCTLR2En;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
+ res0 |= HCRX_EL2_TCR2En;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
+ res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP))
+ res0 |= HCRX_EL2_CMOW;
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP))
+ res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ||
+ !(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS))
+ res0 |= HCRX_EL2_SMPME;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
+ res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
+ res0 |= HCRX_EL2_EnASR;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
+ res0 |= HCRX_EL2_EnALS;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
+ res0 |= HCRX_EL2_EnAS0;
+ set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
+
+ /* HFG[RW]TR_EL2 */
+ res0 = res1 = 0;
+ if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
+ __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
+ res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey |
+ HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey |
+ HFGxTR_EL2_APIBKey);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
+ res0 |= (HFGxTR_EL2_LORC_EL1 | HFGxTR_EL2_LOREA_EL1 |
+ HFGxTR_EL2_LORID_EL1 | HFGxTR_EL2_LORN_EL1 |
+ HFGxTR_EL2_LORSA_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
+ !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
+ res0 |= (HFGxTR_EL2_SCXTNUM_EL1 | HFGxTR_EL2_SCXTNUM_EL0);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP))
+ res0 |= HFGxTR_EL2_ICC_IGRPENn_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
+ res0 |= (HFGxTR_EL2_ERRIDR_EL1 | HFGxTR_EL2_ERRSELR_EL1 |
+ HFGxTR_EL2_ERXFR_EL1 | HFGxTR_EL2_ERXCTLR_EL1 |
+ HFGxTR_EL2_ERXSTATUS_EL1 | HFGxTR_EL2_ERXMISCn_EL1 |
+ HFGxTR_EL2_ERXPFGF_EL1 | HFGxTR_EL2_ERXPFGCTL_EL1 |
+ HFGxTR_EL2_ERXPFGCDN_EL1 | HFGxTR_EL2_ERXADDR_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
+ res0 |= HFGxTR_EL2_nACCDATA_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
+ res0 |= (HFGxTR_EL2_nGCS_EL0 | HFGxTR_EL2_nGCS_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
+ res0 |= (HFGxTR_EL2_nSMPRI_EL1 | HFGxTR_EL2_nTPIDR2_EL0);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
+ res0 |= HFGxTR_EL2_nRCWMASK_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
+ res0 |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
+ res0 |= (HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nPOR_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
+ res0 |= HFGxTR_EL2_nS2POR_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP))
+ res0 |= (HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nAMAIR2_EL1);
+ set_sysreg_masks(kvm, HFGRTR_EL2, res0 | __HFGRTR_EL2_RES0, res1);
+ set_sysreg_masks(kvm, HFGWTR_EL2, res0 | __HFGWTR_EL2_RES0, res1);
+
+ /* HDFG[RW]TR_EL2 */
+ res0 = res1 = 0;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
+ res0 |= HDFGRTR_EL2_OSDLR_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
+ res0 |= (HDFGRTR_EL2_PMEVCNTRn_EL0 | HDFGRTR_EL2_PMEVTYPERn_EL0 |
+ HDFGRTR_EL2_PMCCFILTR_EL0 | HDFGRTR_EL2_PMCCNTR_EL0 |
+ HDFGRTR_EL2_PMCNTEN | HDFGRTR_EL2_PMINTEN |
+ HDFGRTR_EL2_PMOVS | HDFGRTR_EL2_PMSELR_EL0 |
+ HDFGRTR_EL2_PMMIR_EL1 | HDFGRTR_EL2_PMUSERENR_EL0 |
+ HDFGRTR_EL2_PMCEIDn_EL0);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
+ res0 |= (HDFGRTR_EL2_PMBLIMITR_EL1 | HDFGRTR_EL2_PMBPTR_EL1 |
+ HDFGRTR_EL2_PMBSR_EL1 | HDFGRTR_EL2_PMSCR_EL1 |
+ HDFGRTR_EL2_PMSEVFR_EL1 | HDFGRTR_EL2_PMSFCR_EL1 |
+ HDFGRTR_EL2_PMSICR_EL1 | HDFGRTR_EL2_PMSIDR_EL1 |
+ HDFGRTR_EL2_PMSIRR_EL1 | HDFGRTR_EL2_PMSLATFR_EL1 |
+ HDFGRTR_EL2_PMBIDR_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
+ res0 |= (HDFGRTR_EL2_TRC | HDFGRTR_EL2_TRCAUTHSTATUS |
+ HDFGRTR_EL2_TRCAUXCTLR | HDFGRTR_EL2_TRCCLAIM |
+ HDFGRTR_EL2_TRCCNTVRn | HDFGRTR_EL2_TRCID |
+ HDFGRTR_EL2_TRCIMSPECn | HDFGRTR_EL2_TRCOSLSR |
+ HDFGRTR_EL2_TRCPRGCTLR | HDFGRTR_EL2_TRCSEQSTR |
+ HDFGRTR_EL2_TRCSSCSRn | HDFGRTR_EL2_TRCSTATR |
+ HDFGRTR_EL2_TRCVICTLR);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
+ res0 |= (HDFGRTR_EL2_TRBBASER_EL1 | HDFGRTR_EL2_TRBIDR_EL1 |
+ HDFGRTR_EL2_TRBLIMITR_EL1 | HDFGRTR_EL2_TRBMAR_EL1 |
+ HDFGRTR_EL2_TRBPTR_EL1 | HDFGRTR_EL2_TRBSR_EL1 |
+ HDFGRTR_EL2_TRBTRG_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
+ res0 |= (HDFGRTR_EL2_nBRBIDR | HDFGRTR_EL2_nBRBCTL |
+ HDFGRTR_EL2_nBRBDATA);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
+ res0 |= HDFGRTR_EL2_nPMSNEVFR_EL1;
+ set_sysreg_masks(kvm, HDFGRTR_EL2, res0 | HDFGRTR_EL2_RES0, res1);
+
+ /* Reuse the bits from the read-side and add the write-specific stuff */
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
+ res0 |= (HDFGWTR_EL2_PMCR_EL0 | HDFGWTR_EL2_PMSWINC_EL0);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
+ res0 |= HDFGWTR_EL2_TRCOSLAR;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
+ res0 |= HDFGWTR_EL2_TRFCR_EL1;
+ set_sysreg_masks(kvm, HFGWTR_EL2, res0 | HDFGWTR_EL2_RES0, res1);
+
+ /* HFGITR_EL2 */
+ res0 = HFGITR_EL2_RES0;
+ res1 = HFGITR_EL2_RES1;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, DPB2))
+ res0 |= HFGITR_EL2_DCCVADP;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2))
+ res0 |= (HFGITR_EL2_ATS1E1RP | HFGITR_EL2_ATS1E1WP);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+ res0 |= (HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
+ HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS |
+ HFGITR_EL2_TLBIVAALE1OS | HFGITR_EL2_TLBIVALE1OS |
+ HFGITR_EL2_TLBIVAAE1OS | HFGITR_EL2_TLBIASIDE1OS |
+ HFGITR_EL2_TLBIVAE1OS | HFGITR_EL2_TLBIVMALLE1OS);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
+ res0 |= (HFGITR_EL2_TLBIRVAALE1 | HFGITR_EL2_TLBIRVALE1 |
+ HFGITR_EL2_TLBIRVAAE1 | HFGITR_EL2_TLBIRVAE1 |
+ HFGITR_EL2_TLBIRVAALE1IS | HFGITR_EL2_TLBIRVALE1IS |
+ HFGITR_EL2_TLBIRVAAE1IS | HFGITR_EL2_TLBIRVAE1IS |
+ HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
+ HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP))
+ res0 |= (HFGITR_EL2_CFPRCTX | HFGITR_EL2_DVPRCTX |
+ HFGITR_EL2_CPPRCTX);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
+ res0 |= (HFGITR_EL2_nBRBINJ | HFGITR_EL2_nBRBIALL);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
+ res0 |= (HFGITR_EL2_nGCSPUSHM_EL1 | HFGITR_EL2_nGCSSTR_EL1 |
+ HFGITR_EL2_nGCSEPP);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX))
+ res0 |= HFGITR_EL2_COSPRCTX;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP))
+ res0 |= HFGITR_EL2_ATS1E1A;
+ set_sysreg_masks(kvm, HFGITR_EL2, res0, res1);
+
+ /* HAFGRTR_EL2 - not a lot to see here */
+ res0 = HAFGRTR_EL2_RES0;
+ res1 = HAFGRTR_EL2_RES1;
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
+ res0 |= ~(res0 | res1);
+ set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
+out:
mutex_unlock(&kvm->arch.config_lock);
- return 0;
+ return ret;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 3d9467ff73bc..a35ce10e0a9f 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -64,12 +64,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
{
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
kvm_pmu_event_mask(kvm);
- u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
- if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
+ if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP))
mask |= ARMV8_PMU_INCLUDE_EL2;
- if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0))
+ if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
ARMV8_PMU_EXCLUDE_NS_EL1 |
ARMV8_PMU_EXCLUDE_EL3;
@@ -83,8 +82,10 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
*/
static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
{
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+
return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
- kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
+ kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5));
}
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
@@ -419,7 +420,7 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
kvm_pmu_update_state(vcpu);
}
-/**
+/*
* When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
* to the event.
* This is why we need a callback to do it once outside of the NMI context.
@@ -490,7 +491,7 @@ static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
return val;
}
-/**
+/*
* When the perf event overflows, set the overflow status and inform the vcpu.
*/
static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
@@ -556,7 +557,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
return;
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
- if (!kvm_pmu_is_3p5(vcpu))
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
val &= ~ARMV8_PMU_PMCR_LP;
/* The reset bits don't indicate any state, and shouldn't be saved. */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 30253bd19917..c9f4f387155f 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -12,6 +12,7 @@
#include <linux/bitfield.h>
#include <linux/bsearch.h>
#include <linux/cacheinfo.h>
+#include <linux/debugfs.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
#include <linux/printk.h>
@@ -505,10 +506,9 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1);
u32 sr = reg_to_encoding(r);
- if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
kvm_inject_undefined(vcpu);
return false;
}
@@ -1685,7 +1685,8 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
(val) &= ~reg##_##field##_MASK; \
(val) |= FIELD_PREP(reg##_##field##_MASK, \
- min(__f_val, (u64)reg##_##field##_##limit)); \
+ min(__f_val, \
+ (u64)SYS_FIELD_VALUE(reg, field, limit))); \
(val); \
})
@@ -2174,6 +2175,16 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
return true;
}
+static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 val = r->val;
+
+ if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
+ val |= HCR_E2H;
+
+ return __vcpu_sys_reg(vcpu, r->reg) = val;
+}
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -2186,16 +2197,6 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
* guest...
*/
static const struct sys_reg_desc sys_reg_descs[] = {
- { SYS_DESC(SYS_DC_ISW), access_dcsw },
- { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
- { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CSW), access_dcsw },
- { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CISW), access_dcsw },
- { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
-
DBG_BCR_BVR_WCR_WVR_EL1(0),
DBG_BCR_BVR_WCR_WVR_EL1(1),
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
@@ -2349,7 +2350,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64MMFR2_EL1_NV |
ID_AA64MMFR2_EL1_CCIDX)),
ID_SANITISED(ID_AA64MMFR3_EL1),
- ID_UNALLOCATED(7,4),
+ ID_SANITISED(ID_AA64MMFR4_EL1),
ID_UNALLOCATED(7,5),
ID_UNALLOCATED(7,6),
ID_UNALLOCATED(7,7),
@@ -2665,7 +2666,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
- EL2_REG_VNCR(HCR_EL2, reset_val, 0),
+ EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
@@ -2727,6 +2728,18 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
};
+static struct sys_reg_desc sys_insn_descs[] = {
+ { SYS_DESC(SYS_DC_ISW), access_dcsw },
+ { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CSW), access_dcsw },
+ { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CISW), access_dcsw },
+ { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
+};
+
static const struct sys_reg_desc *first_idreg;
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
@@ -2737,8 +2750,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
return ignore_write(vcpu, p);
} else {
u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
- u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1);
- u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr);
+ u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
@@ -3159,7 +3171,8 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
/**
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
* @vcpu: The VCPU pointer
- * @run: The kvm_run struct
+ * @global: &struct sys_reg_desc
+ * @nr_global: size of the @global array
*/
static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *global,
@@ -3326,7 +3339,9 @@ static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
/**
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
* @vcpu: The VCPU pointer
- * @run: The kvm_run struct
+ * @params: &struct sys_reg_params
+ * @global: &struct sys_reg_desc
+ * @nr_global: size of the @global array
*/
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
@@ -3384,12 +3399,6 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
}
-static bool is_imp_def_sys_reg(struct sys_reg_params *params)
-{
- // See ARM DDI 0487E.a, section D12.3.2
- return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
-}
-
/**
* emulate_sys_reg - Emulate a guest access to an AArch64 system register
* @vcpu: The VCPU pointer
@@ -3398,26 +3407,106 @@ static bool is_imp_def_sys_reg(struct sys_reg_params *params)
* Return: true if the system register access was successful, false otherwise.
*/
static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
- struct sys_reg_params *params)
+ struct sys_reg_params *params)
{
const struct sys_reg_desc *r;
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
-
if (likely(r)) {
perform_access(vcpu, params, r);
return true;
}
- if (is_imp_def_sys_reg(params)) {
- kvm_inject_undefined(vcpu);
+ print_sys_reg_msg(params,
+ "Unsupported guest sys_reg access at: %lx [%08lx]\n",
+ *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
+ kvm_inject_undefined(vcpu);
+
+ return false;
+}
+
+static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
+{
+ struct kvm *kvm = s->private;
+ u8 *iter;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ iter = &kvm->arch.idreg_debugfs_iter;
+ if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
+ *iter == (u8)~0) {
+ *iter = *pos;
+ if (*iter >= KVM_ARM_ID_REG_NUM)
+ iter = NULL;
} else {
- print_sys_reg_msg(params,
- "Unsupported guest sys_reg access at: %lx [%08lx]\n",
- *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
- kvm_inject_undefined(vcpu);
+ iter = ERR_PTR(-EBUSY);
}
- return false;
+
+ mutex_unlock(&kvm->arch.config_lock);
+
+ return iter;
+}
+
+static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct kvm *kvm = s->private;
+
+ (*pos)++;
+
+ if ((kvm->arch.idreg_debugfs_iter + 1) < KVM_ARM_ID_REG_NUM) {
+ kvm->arch.idreg_debugfs_iter++;
+
+ return &kvm->arch.idreg_debugfs_iter;
+ }
+
+ return NULL;
+}
+
+static void idregs_debug_stop(struct seq_file *s, void *v)
+{
+ struct kvm *kvm = s->private;
+
+ if (IS_ERR(v))
+ return;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ kvm->arch.idreg_debugfs_iter = ~0;
+
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
+static int idregs_debug_show(struct seq_file *s, void *v)
+{
+ struct kvm *kvm = s->private;
+ const struct sys_reg_desc *desc;
+
+ desc = first_idreg + kvm->arch.idreg_debugfs_iter;
+
+ if (!desc->name)
+ return 0;
+
+ seq_printf(s, "%20s:\t%016llx\n",
+ desc->name, IDREG(kvm, IDX_IDREG(kvm->arch.idreg_debugfs_iter)));
+
+ return 0;
+}
+
+static const struct seq_operations idregs_debug_sops = {
+ .start = idregs_debug_start,
+ .next = idregs_debug_next,
+ .stop = idregs_debug_stop,
+ .show = idregs_debug_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(idregs_debug);
+
+void kvm_sys_regs_create_debugfs(struct kvm *kvm)
+{
+ kvm->arch.idreg_debugfs_iter = ~0;
+
+ debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
+ &idregs_debug_fops);
}
static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
@@ -3467,28 +3556,39 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
}
/**
- * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
+ * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
+ * trap on a guest execution
* @vcpu: The VCPU pointer
*/
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
{
+ const struct sys_reg_desc *desc = NULL;
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_esr(vcpu);
int Rt = kvm_vcpu_sys_get_rt(vcpu);
+ int sr_idx;
trace_kvm_handle_sys_reg(esr);
- if (__check_nv_sr_forward(vcpu))
+ if (triage_sysreg_trap(vcpu, &sr_idx))
return 1;
params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt);
- if (!emulate_sys_reg(vcpu, &params))
- return 1;
+ /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
+ if (params.Op0 == 2 || params.Op0 == 3)
+ desc = &sys_reg_descs[sr_idx];
+ else
+ desc = &sys_insn_descs[sr_idx];
+
+ perform_access(vcpu, &params, desc);
- if (!params.is_write)
+ /* Read from system register? */
+ if (!params.is_write &&
+ (params.Op0 == 2 || params.Op0 == 3))
vcpu_set_reg(vcpu, Rt, params.regval);
+
return 1;
}
@@ -3930,11 +4030,84 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
return 0;
}
+void kvm_init_sysreg(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ /*
+ * In the absence of FGT, we cannot independently trap TLBI
+ * Range instructions. This isn't great, but trapping all
+ * TLBIs would be far worse. Live with it...
+ */
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+ vcpu->arch.hcr_el2 |= HCR_TTLBOS;
+
+ if (cpus_have_final_cap(ARM64_HAS_HCX)) {
+ vcpu->arch.hcrx_el2 = HCRX_GUEST_FLAGS;
+
+ if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
+ vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
+ }
+
+ if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
+ goto out;
+
+ kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
+ HFGxTR_EL2_nMAIR2_EL1 |
+ HFGxTR_EL2_nS2POR_EL1 |
+ HFGxTR_EL2_nPOR_EL1 |
+ HFGxTR_EL2_nPOR_EL0 |
+ HFGxTR_EL2_nACCDATA_EL1 |
+ HFGxTR_EL2_nSMPRI_EL1_MASK |
+ HFGxTR_EL2_nTPIDR2_EL0_MASK);
+
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+ kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS|
+ HFGITR_EL2_TLBIRVALE1OS |
+ HFGITR_EL2_TLBIRVAAE1OS |
+ HFGITR_EL2_TLBIRVAE1OS |
+ HFGITR_EL2_TLBIVAALE1OS |
+ HFGITR_EL2_TLBIVALE1OS |
+ HFGITR_EL2_TLBIVAAE1OS |
+ HFGITR_EL2_TLBIASIDE1OS |
+ HFGITR_EL2_TLBIVAE1OS |
+ HFGITR_EL2_TLBIVMALLE1OS);
+
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
+ kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 |
+ HFGITR_EL2_TLBIRVALE1 |
+ HFGITR_EL2_TLBIRVAAE1 |
+ HFGITR_EL2_TLBIRVAE1 |
+ HFGITR_EL2_TLBIRVAALE1IS|
+ HFGITR_EL2_TLBIRVALE1IS |
+ HFGITR_EL2_TLBIRVAAE1IS |
+ HFGITR_EL2_TLBIRVAE1IS |
+ HFGITR_EL2_TLBIRVAALE1OS|
+ HFGITR_EL2_TLBIRVALE1OS |
+ HFGITR_EL2_TLBIRVAAE1OS |
+ HFGITR_EL2_TLBIRVAE1OS);
+
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
+ kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
+ HFGxTR_EL2_nPIR_EL1);
+
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
+ kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
+ HAFGRTR_EL2_RES1);
+
+ set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
+out:
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
int __init kvm_sys_reg_table_init(void)
{
struct sys_reg_params params;
bool valid = true;
unsigned int i;
+ int ret = 0;
/* Make sure tables are unique and in order. */
valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
@@ -3943,6 +4116,7 @@ int __init kvm_sys_reg_table_init(void)
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
+ valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
if (!valid)
return -EINVAL;
@@ -3957,8 +4131,13 @@ int __init kvm_sys_reg_table_init(void)
if (!first_idreg)
return -EINVAL;
- if (kvm_get_mode() == KVM_MODE_NV)
- return populate_nv_trap_config();
+ ret = populate_nv_trap_config();
- return 0;
+ for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
+ ret = populate_sysreg_config(sys_reg_descs + i, i);
+
+ for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
+ ret = populate_sysreg_config(sys_insn_descs + i, i);
+
+ return ret;
}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index c65c129b3500..997eea21ba2a 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -233,6 +233,8 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num);
+bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
+
#define AA32(_x) .aarch32_map = AA32_##_x
#define Op0(_x) .Op0 = _x
#define Op1(_x) .Op1 = _x
diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
index 85606a531dc3..389025ce7749 100644
--- a/arch/arm64/kvm/vgic/vgic-debug.c
+++ b/arch/arm64/kvm/vgic/vgic-debug.c
@@ -149,7 +149,7 @@ static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
seq_printf(s, "vgic_model:\t%s\n", v3 ? "GICv3" : "GICv2");
seq_printf(s, "nr_spis:\t%d\n", dist->nr_spis);
if (v3)
- seq_printf(s, "nr_lpis:\t%d\n", dist->lpi_list_count);
+ seq_printf(s, "nr_lpis:\t%d\n", atomic_read(&dist->lpi_count));
seq_printf(s, "enabled:\t%d\n", dist->enabled);
seq_printf(s, "\n");
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index e949e1d0fd9f..f20941f83a07 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -53,9 +53,9 @@ void kvm_vgic_early_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- INIT_LIST_HEAD(&dist->lpi_list_head);
INIT_LIST_HEAD(&dist->lpi_translation_cache);
raw_spin_lock_init(&dist->lpi_list_lock);
+ xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
}
/* CREATION */
@@ -309,7 +309,7 @@ int vgic_init(struct kvm *kvm)
vgic_lpi_translation_cache_init(kvm);
/*
- * If we have GICv4.1 enabled, unconditionnaly request enable the
+ * If we have GICv4.1 enabled, unconditionally request enable the
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
* enable it if we present a virtual ITS to the guest.
*/
@@ -366,6 +366,8 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
if (vgic_supports_direct_msis(kvm))
vgic_v4_teardown(kvm);
+
+ xa_destroy(&dist->lpi_xa);
}
static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -445,13 +447,15 @@ int vgic_lazy_init(struct kvm *kvm)
/* RESOURCE MAPPING */
/**
+ * kvm_vgic_map_resources - map the MMIO regions
+ * @kvm: kvm struct pointer
+ *
* Map the MMIO regions depending on the VGIC model exposed to the guest
* called on the first VCPU run.
* Also map the virtual CPU interface into the VM.
* v2 calls vgic_init() if not already done.
* v3 and derivatives return an error if the VGIC is not initialized.
* vgic_ready() returns true if this function has succeeded.
- * @kvm: kvm struct pointer
*/
int kvm_vgic_map_resources(struct kvm *kvm)
{
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 28a93074eca1..e85a495ada9c 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -52,7 +52,12 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
if (!irq)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&irq->lpi_list);
+ ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
+ if (ret) {
+ kfree(irq);
+ return ERR_PTR(ret);
+ }
+
INIT_LIST_HEAD(&irq->ap_list);
raw_spin_lock_init(&irq->irq_lock);
@@ -68,30 +73,30 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
* There could be a race with another vgic_add_lpi(), so we need to
* check that we don't add a second list entry with the same LPI.
*/
- list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
- if (oldirq->intid != intid)
- continue;
-
+ oldirq = xa_load(&dist->lpi_xa, intid);
+ if (vgic_try_get_irq_kref(oldirq)) {
/* Someone was faster with adding this LPI, lets use that. */
kfree(irq);
irq = oldirq;
- /*
- * This increases the refcount, the caller is expected to
- * call vgic_put_irq() on the returned pointer once it's
- * finished with the IRQ.
- */
- vgic_get_irq_kref(irq);
+ goto out_unlock;
+ }
+ ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0));
+ if (ret) {
+ xa_release(&dist->lpi_xa, intid);
+ kfree(irq);
goto out_unlock;
}
- list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
- dist->lpi_list_count++;
+ atomic_inc(&dist->lpi_count);
out_unlock:
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ if (ret)
+ return ERR_PTR(ret);
+
/*
* We "cache" the configuration table entries in our struct vgic_irq's.
* However we only have those structs for mapped IRQs, so we read in
@@ -158,7 +163,7 @@ struct vgic_translation_cache_entry {
* @cte_esz: collection table entry size
* @dte_esz: device table entry size
* @ite_esz: interrupt translation table entry size
- * @save tables: save the ITS tables into guest RAM
+ * @save_tables: save the ITS tables into guest RAM
* @restore_tables: restore the ITS internal structs from tables
* stored in guest RAM
* @commit: initialize the registers which expose the ABI settings,
@@ -311,6 +316,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
return 0;
}
+#define GIC_LPI_MAX_INTID ((1 << INTERRUPT_ID_BITS_ITS) - 1)
+
/*
* Create a snapshot of the current LPIs targeting @vcpu, so that we can
* enumerate those LPIs without holding any lock.
@@ -319,6 +326,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
{
struct vgic_dist *dist = &kvm->arch.vgic;
+ XA_STATE(xas, &dist->lpi_xa, GIC_LPI_OFFSET);
struct vgic_irq *irq;
unsigned long flags;
u32 *intids;
@@ -331,13 +339,15 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
* command). If coming from another path (such as enabling LPIs),
* we must be careful not to overrun the array.
*/
- irq_count = READ_ONCE(dist->lpi_list_count);
+ irq_count = atomic_read(&dist->lpi_count);
intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
if (!intids)
return -ENOMEM;
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
- list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ rcu_read_lock();
+
+ xas_for_each(&xas, irq, GIC_LPI_MAX_INTID) {
if (i == irq_count)
break;
/* We don't need to "get" the IRQ, as we hold the list lock. */
@@ -345,6 +355,8 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue;
intids[i++] = irq->intid;
}
+
+ rcu_read_unlock();
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids;
@@ -595,8 +607,8 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
irq = __vgic_its_check_cache(dist, db, devid, eventid);
- if (irq)
- vgic_get_irq_kref(irq);
+ if (!vgic_try_get_irq_kref(irq))
+ irq = NULL;
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
@@ -640,8 +652,13 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
* was in the cache, and increment it on the new interrupt.
*/
if (cte->irq)
- __vgic_put_lpi_locked(kvm, cte->irq);
+ vgic_put_irq(kvm, cte->irq);
+ /*
+ * The irq refcount is guaranteed to be nonzero while holding the
+ * its_lock, as the ITE (and the reference it holds) cannot be freed.
+ */
+ lockdep_assert_held(&its->its_lock);
vgic_get_irq_kref(irq);
cte->db = db;
@@ -672,7 +689,7 @@ void vgic_its_invalidate_cache(struct kvm *kvm)
if (!cte->irq)
break;
- __vgic_put_lpi_locked(kvm, cte->irq);
+ vgic_put_irq(kvm, cte->irq);
cte->irq = NULL;
}
@@ -1345,8 +1362,8 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
}
/**
- * vgic_its_invall - invalidate all LPIs targetting a given vcpu
- * @vcpu: the vcpu for which the RD is targetted by an invalidation
+ * vgic_its_invall - invalidate all LPIs targeting a given vcpu
+ * @vcpu: the vcpu for which the RD is targeted by an invalidation
*
* Contrary to the INVALL command, this targets a RD instead of a
* collection, and we don't need to hold the its_lock, since no ITS is
@@ -2144,7 +2161,7 @@ static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
}
/**
- * entry_fn_t - Callback called on a table entry restore path
+ * typedef entry_fn_t - Callback called on a table entry restore path
* @its: its handle
* @id: id of the entry
* @entry: pointer to the entry
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 9465d3706ab9..4ea3340786b9 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -380,6 +380,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
struct vgic_irq *irq;
gpa_t last_ptr = ~(gpa_t)0;
bool vlpi_avail = false;
+ unsigned long index;
int ret = 0;
u8 val;
@@ -396,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
vlpi_avail = true;
}
- list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ xa_for_each(&dist->lpi_xa, index, irq) {
int byte_offset, bit_nr;
struct kvm_vcpu *vcpu;
gpa_t pendbase, ptr;
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index db2a95762b1b..4ec93587c8cd 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -30,7 +30,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
- * vgic_irq->irq_lock must be taken with IRQs disabled
+ * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
+ * vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower
@@ -54,32 +55,22 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
*/
/*
- * Iterate over the VM's list of mapped LPIs to find the one with a
- * matching interrupt ID and return a reference to the IRQ structure.
+ * Index the VM's xarray of mapped LPIs and return a reference to the IRQ
+ * structure. The caller is expected to call vgic_put_irq() later once it's
+ * finished with the IRQ.
*/
static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = NULL;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
- list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
- if (irq->intid != intid)
- continue;
+ rcu_read_lock();
- /*
- * This increases the refcount, the caller is expected to
- * call vgic_put_irq() later once it's finished with the IRQ.
- */
- vgic_get_irq_kref(irq);
- goto out_unlock;
- }
- irq = NULL;
+ irq = xa_load(&dist->lpi_xa, intid);
+ if (!vgic_try_get_irq_kref(irq))
+ irq = NULL;
-out_unlock:
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ rcu_read_unlock();
return irq;
}
@@ -120,22 +111,6 @@ static void vgic_irq_release(struct kref *ref)
{
}
-/*
- * Drop the refcount on the LPI. Must be called with lpi_list_lock held.
- */
-void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
-
- if (!kref_put(&irq->refcount, vgic_irq_release))
- return;
-
- list_del(&irq->lpi_list);
- dist->lpi_list_count--;
-
- kfree(irq);
-}
-
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
@@ -144,9 +119,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
if (irq->intid < VGIC_MIN_LPI)
return;
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
- __vgic_put_lpi_locked(kvm, irq);
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ if (!kref_put(&irq->refcount, vgic_irq_release))
+ return;
+
+ xa_lock_irqsave(&dist->lpi_xa, flags);
+ __xa_erase(&dist->lpi_xa, irq->intid);
+ xa_unlock_irqrestore(&dist->lpi_xa, flags);
+
+ atomic_dec(&dist->lpi_count);
+ kfree_rcu(irq, rcu);
}
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
@@ -203,7 +184,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
}
/**
- * kvm_vgic_target_oracle - compute the target vcpu for an irq
+ * vgic_target_oracle - compute the target vcpu for an irq
*
* @irq: The irq to route. Must be already locked.
*
@@ -404,7 +385,8 @@ retry:
/*
* Grab a reference to the irq to reflect the fact that it is
- * now in the ap_list.
+ * now in the ap_list. This is safe as the caller must already hold a
+ * reference on the irq.
*/
vgic_get_irq_kref(irq);
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 8d134569d0a1..0c2b82de8fa3 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -180,7 +180,6 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
gpa_t addr, int len);
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid);
-void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq);
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
bool vgic_get_phys_line_level(struct vgic_irq *irq);
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
@@ -220,12 +219,20 @@ void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
-static inline void vgic_get_irq_kref(struct vgic_irq *irq)
+static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq)
{
+ if (!irq)
+ return false;
+
if (irq->intid < VGIC_MIN_LPI)
- return;
+ return true;
- kref_get(&irq->refcount);
+ return kref_get_unless_zero(&irq->refcount);
+}
+
+static inline void vgic_get_irq_kref(struct vgic_irq *irq)
+{
+ WARN_ON_ONCE(!vgic_try_get_irq_kref(irq));
}
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index dbd1bc95967d..60454256945b 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -3,6 +3,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o fixmap.o
+obj-$(CONFIG_ARM64_CONTPTE) += contpte.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
new file mode 100644
index 000000000000..1b64b4c3f8bf
--- /dev/null
+++ b/arch/arm64/mm/contpte.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 ARM Ltd.
+ */
+
+#include <linux/mm.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <asm/tlbflush.h>
+
+static inline bool mm_is_user(struct mm_struct *mm)
+{
+ /*
+ * Don't attempt to apply the contig bit to kernel mappings, because
+ * dynamically adding/removing the contig bit can cause page faults.
+ * These racing faults are ok for user space, since they get serialized
+ * on the PTL. But kernel mappings can't tolerate faults.
+ */
+ if (unlikely(mm_is_efi(mm)))
+ return false;
+ return mm != &init_mm;
+}
+
+static inline pte_t *contpte_align_down(pte_t *ptep)
+{
+ return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES);
+}
+
+static void contpte_try_unfold_partial(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
+{
+ /*
+ * Unfold any partially covered contpte block at the beginning and end
+ * of the range.
+ */
+
+ if (ptep != contpte_align_down(ptep) || nr < CONT_PTES)
+ contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
+
+ if (ptep + nr != contpte_align_down(ptep + nr)) {
+ unsigned long last_addr = addr + PAGE_SIZE * (nr - 1);
+ pte_t *last_ptep = ptep + nr - 1;
+
+ contpte_try_unfold(mm, last_addr, last_ptep,
+ __ptep_get(last_ptep));
+ }
+}
+
+static void contpte_convert(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
+ unsigned long start_addr;
+ pte_t *start_ptep;
+ int i;
+
+ start_ptep = ptep = contpte_align_down(ptep);
+ start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
+ pte = pfn_pte(ALIGN_DOWN(pte_pfn(pte), CONT_PTES), pte_pgprot(pte));
+
+ for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) {
+ pte_t ptent = __ptep_get_and_clear(mm, addr, ptep);
+
+ if (pte_dirty(ptent))
+ pte = pte_mkdirty(pte);
+
+ if (pte_young(ptent))
+ pte = pte_mkyoung(pte);
+ }
+
+ __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3);
+
+ __set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES);
+}
+
+void __contpte_try_fold(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ /*
+ * We have already checked that the virtual and pysical addresses are
+ * correctly aligned for a contpte mapping in contpte_try_fold() so the
+ * remaining checks are to ensure that the contpte range is fully
+ * covered by a single folio, and ensure that all the ptes are valid
+ * with contiguous PFNs and matching prots. We ignore the state of the
+ * access and dirty bits for the purpose of deciding if its a contiguous
+ * range; the folding process will generate a single contpte entry which
+ * has a single access and dirty bit. Those 2 bits are the logical OR of
+ * their respective bits in the constituent pte entries. In order to
+ * ensure the contpte range is covered by a single folio, we must
+ * recover the folio from the pfn, but special mappings don't have a
+ * folio backing them. Fortunately contpte_try_fold() already checked
+ * that the pte is not special - we never try to fold special mappings.
+ * Note we can't use vm_normal_page() for this since we don't have the
+ * vma.
+ */
+
+ unsigned long folio_start, folio_end;
+ unsigned long cont_start, cont_end;
+ pte_t expected_pte, subpte;
+ struct folio *folio;
+ struct page *page;
+ unsigned long pfn;
+ pte_t *orig_ptep;
+ pgprot_t prot;
+
+ int i;
+
+ if (!mm_is_user(mm))
+ return;
+
+ page = pte_page(pte);
+ folio = page_folio(page);
+ folio_start = addr - (page - &folio->page) * PAGE_SIZE;
+ folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE;
+ cont_start = ALIGN_DOWN(addr, CONT_PTE_SIZE);
+ cont_end = cont_start + CONT_PTE_SIZE;
+
+ if (folio_start > cont_start || folio_end < cont_end)
+ return;
+
+ pfn = ALIGN_DOWN(pte_pfn(pte), CONT_PTES);
+ prot = pte_pgprot(pte_mkold(pte_mkclean(pte)));
+ expected_pte = pfn_pte(pfn, prot);
+ orig_ptep = ptep;
+ ptep = contpte_align_down(ptep);
+
+ for (i = 0; i < CONT_PTES; i++) {
+ subpte = pte_mkold(pte_mkclean(__ptep_get(ptep)));
+ if (!pte_same(subpte, expected_pte))
+ return;
+ expected_pte = pte_advance_pfn(expected_pte, 1);
+ ptep++;
+ }
+
+ pte = pte_mkcont(pte);
+ contpte_convert(mm, addr, orig_ptep, pte);
+}
+EXPORT_SYMBOL_GPL(__contpte_try_fold);
+
+void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ /*
+ * We have already checked that the ptes are contiguous in
+ * contpte_try_unfold(), so just check that the mm is user space.
+ */
+ if (!mm_is_user(mm))
+ return;
+
+ pte = pte_mknoncont(pte);
+ contpte_convert(mm, addr, ptep, pte);
+}
+EXPORT_SYMBOL_GPL(__contpte_try_unfold);
+
+pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte)
+{
+ /*
+ * Gather access/dirty bits, which may be populated in any of the ptes
+ * of the contig range. We are guaranteed to be holding the PTL, so any
+ * contiguous range cannot be unfolded or otherwise modified under our
+ * feet.
+ */
+
+ pte_t pte;
+ int i;
+
+ ptep = contpte_align_down(ptep);
+
+ for (i = 0; i < CONT_PTES; i++, ptep++) {
+ pte = __ptep_get(ptep);
+
+ if (pte_dirty(pte))
+ orig_pte = pte_mkdirty(orig_pte);
+
+ if (pte_young(pte))
+ orig_pte = pte_mkyoung(orig_pte);
+ }
+
+ return orig_pte;
+}
+EXPORT_SYMBOL_GPL(contpte_ptep_get);
+
+pte_t contpte_ptep_get_lockless(pte_t *orig_ptep)
+{
+ /*
+ * The ptep_get_lockless() API requires us to read and return *orig_ptep
+ * so that it is self-consistent, without the PTL held, so we may be
+ * racing with other threads modifying the pte. Usually a READ_ONCE()
+ * would suffice, but for the contpte case, we also need to gather the
+ * access and dirty bits from across all ptes in the contiguous block,
+ * and we can't read all of those neighbouring ptes atomically, so any
+ * contiguous range may be unfolded/modified/refolded under our feet.
+ * Therefore we ensure we read a _consistent_ contpte range by checking
+ * that all ptes in the range are valid and have CONT_PTE set, that all
+ * pfns are contiguous and that all pgprots are the same (ignoring
+ * access/dirty). If we find a pte that is not consistent, then we must
+ * be racing with an update so start again. If the target pte does not
+ * have CONT_PTE set then that is considered consistent on its own
+ * because it is not part of a contpte range.
+ */
+
+ pgprot_t orig_prot;
+ unsigned long pfn;
+ pte_t orig_pte;
+ pgprot_t prot;
+ pte_t *ptep;
+ pte_t pte;
+ int i;
+
+retry:
+ orig_pte = __ptep_get(orig_ptep);
+
+ if (!pte_valid_cont(orig_pte))
+ return orig_pte;
+
+ orig_prot = pte_pgprot(pte_mkold(pte_mkclean(orig_pte)));
+ ptep = contpte_align_down(orig_ptep);
+ pfn = pte_pfn(orig_pte) - (orig_ptep - ptep);
+
+ for (i = 0; i < CONT_PTES; i++, ptep++, pfn++) {
+ pte = __ptep_get(ptep);
+ prot = pte_pgprot(pte_mkold(pte_mkclean(pte)));
+
+ if (!pte_valid_cont(pte) ||
+ pte_pfn(pte) != pfn ||
+ pgprot_val(prot) != pgprot_val(orig_prot))
+ goto retry;
+
+ if (pte_dirty(pte))
+ orig_pte = pte_mkdirty(orig_pte);
+
+ if (pte_young(pte))
+ orig_pte = pte_mkyoung(orig_pte);
+ }
+
+ return orig_pte;
+}
+EXPORT_SYMBOL_GPL(contpte_ptep_get_lockless);
+
+void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ unsigned long next;
+ unsigned long end;
+ unsigned long pfn;
+ pgprot_t prot;
+
+ /*
+ * The set_ptes() spec guarantees that when nr > 1, the initial state of
+ * all ptes is not-present. Therefore we never need to unfold or
+ * otherwise invalidate a range before we set the new ptes.
+ * contpte_set_ptes() should never be called for nr < 2.
+ */
+ VM_WARN_ON(nr == 1);
+
+ if (!mm_is_user(mm))
+ return __set_ptes(mm, addr, ptep, pte, nr);
+
+ end = addr + (nr << PAGE_SHIFT);
+ pfn = pte_pfn(pte);
+ prot = pte_pgprot(pte);
+
+ do {
+ next = pte_cont_addr_end(addr, end);
+ nr = (next - addr) >> PAGE_SHIFT;
+ pte = pfn_pte(pfn, prot);
+
+ if (((addr | next | (pfn << PAGE_SHIFT)) & ~CONT_PTE_MASK) == 0)
+ pte = pte_mkcont(pte);
+ else
+ pte = pte_mknoncont(pte);
+
+ __set_ptes(mm, addr, ptep, pte, nr);
+
+ addr = next;
+ ptep += nr;
+ pfn += nr;
+
+ } while (addr != end);
+}
+EXPORT_SYMBOL_GPL(contpte_set_ptes);
+
+void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr, int full)
+{
+ contpte_try_unfold_partial(mm, addr, ptep, nr);
+ __clear_full_ptes(mm, addr, ptep, nr, full);
+}
+EXPORT_SYMBOL_GPL(contpte_clear_full_ptes);
+
+pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, int full)
+{
+ contpte_try_unfold_partial(mm, addr, ptep, nr);
+ return __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
+}
+EXPORT_SYMBOL_GPL(contpte_get_and_clear_full_ptes);
+
+int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ /*
+ * ptep_clear_flush_young() technically requires us to clear the access
+ * flag for a _single_ pte. However, the core-mm code actually tracks
+ * access/dirty per folio, not per page. And since we only create a
+ * contig range when the range is covered by a single folio, we can get
+ * away with clearing young for the whole contig range here, so we avoid
+ * having to unfold.
+ */
+
+ int young = 0;
+ int i;
+
+ ptep = contpte_align_down(ptep);
+ addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
+
+ for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE)
+ young |= __ptep_test_and_clear_young(vma, addr, ptep);
+
+ return young;
+}
+EXPORT_SYMBOL_GPL(contpte_ptep_test_and_clear_young);
+
+int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ int young;
+
+ young = contpte_ptep_test_and_clear_young(vma, addr, ptep);
+
+ if (young) {
+ /*
+ * See comment in __ptep_clear_flush_young(); same rationale for
+ * eliding the trailing DSB applies here.
+ */
+ addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
+ __flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE,
+ PAGE_SIZE, true, 3);
+ }
+
+ return young;
+}
+EXPORT_SYMBOL_GPL(contpte_ptep_clear_flush_young);
+
+void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
+{
+ /*
+ * If wrprotecting an entire contig range, we can avoid unfolding. Just
+ * set wrprotect and wait for the later mmu_gather flush to invalidate
+ * the tlb. Until the flush, the page may or may not be wrprotected.
+ * After the flush, it is guaranteed wrprotected. If it's a partial
+ * range though, we must unfold, because we can't have a case where
+ * CONT_PTE is set but wrprotect applies to a subset of the PTEs; this
+ * would cause it to continue to be unpredictable after the flush.
+ */
+
+ contpte_try_unfold_partial(mm, addr, ptep, nr);
+ __wrprotect_ptes(mm, addr, ptep, nr);
+}
+EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes);
+
+int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ unsigned long start_addr;
+ pte_t orig_pte;
+ int i;
+
+ /*
+ * Gather the access/dirty bits for the contiguous range. If nothing has
+ * changed, its a noop.
+ */
+ orig_pte = pte_mknoncont(ptep_get(ptep));
+ if (pte_val(orig_pte) == pte_val(entry))
+ return 0;
+
+ /*
+ * We can fix up access/dirty bits without having to unfold the contig
+ * range. But if the write bit is changing, we must unfold.
+ */
+ if (pte_write(orig_pte) == pte_write(entry)) {
+ /*
+ * For HW access management, we technically only need to update
+ * the flag on a single pte in the range. But for SW access
+ * management, we need to update all the ptes to prevent extra
+ * faults. Avoid per-page tlb flush in __ptep_set_access_flags()
+ * and instead flush the whole range at the end.
+ */
+ ptep = contpte_align_down(ptep);
+ start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
+
+ for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE)
+ __ptep_set_access_flags(vma, addr, ptep, entry, 0);
+
+ if (dirty)
+ __flush_tlb_range(vma, start_addr, addr,
+ PAGE_SIZE, true, 3);
+ } else {
+ __contpte_try_unfold(vma->vm_mm, addr, ptep, orig_pte);
+ __ptep_set_access_flags(vma, addr, ptep, entry, dirty);
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(contpte_ptep_set_access_flags);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 55f6455a8284..8251e2fea9c7 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -191,7 +191,7 @@ static void show_pte(unsigned long addr)
if (!ptep)
break;
- pte = READ_ONCE(*ptep);
+ pte = __ptep_get(ptep);
pr_cont(", pte=%016llx", pte_val(pte));
pte_unmap(ptep);
} while(0);
@@ -205,16 +205,16 @@ static void show_pte(unsigned long addr)
*
* It needs to cope with hardware update of the accessed/dirty state by other
* agents in the system and can safely skip the __sync_icache_dcache() call as,
- * like set_pte_at(), the PTE is never changed from no-exec to exec here.
+ * like __set_ptes(), the PTE is never changed from no-exec to exec here.
*
* Returns whether or not the PTE actually changed.
*/
-int ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry, int dirty)
+int __ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
{
pteval_t old_pteval, pteval;
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = __ptep_get(ptep);
if (pte_same(pte, entry))
return 0;
@@ -257,16 +257,14 @@ static bool is_el1_data_abort(unsigned long esr)
static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
- unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE;
-
if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
return false;
- if (fsc_type == ESR_ELx_FSC_PERM)
+ if (esr_fsc_is_permission_fault(esr))
return true;
if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
- return fsc_type == ESR_ELx_FSC_FAULT &&
+ return esr_fsc_is_translation_fault(esr) &&
(regs->pstate & PSR_PAN_BIT);
return false;
@@ -279,8 +277,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
unsigned long flags;
u64 par, dfsc;
- if (!is_el1_data_abort(esr) ||
- (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
+ if (!is_el1_data_abort(esr) || !esr_fsc_is_translation_fault(esr))
return false;
local_irq_save(flags);
@@ -301,7 +298,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
* treat the translation fault as spurious.
*/
dfsc = FIELD_GET(SYS_PAR_EL1_FST, par);
- return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT;
+ return !esr_fsc_is_translation_fault(dfsc);
}
static void die_kernel_fault(const char *msg, unsigned long addr,
@@ -368,11 +365,6 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
return false;
}
-static bool is_translation_fault(unsigned long esr)
-{
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
-}
-
static void __do_kernel_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
@@ -405,7 +397,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
} else if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else {
- if (is_translation_fault(esr) &&
+ if (esr_fsc_is_translation_fault(esr) &&
kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
return;
@@ -782,18 +774,18 @@ static const struct fault_info fault_info[] = {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 8" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 0 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 12" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 0 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" },
{ do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 18" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 19" },
+ { do_sea, SIGKILL, SI_KERNEL, "level -1 (translation table walk)" },
{ do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" },
{ do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" },
{ do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" },
@@ -801,7 +793,7 @@ static const struct fault_info fault_info[] = {
{ do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
{ do_bad, SIGKILL, SI_KERNEL, "unknown 25" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 26" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 27" },
+ { do_sea, SIGKILL, SI_KERNEL, "level -1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
{ do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
{ do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
{ do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
@@ -815,9 +807,9 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "unknown 38" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 39" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 40" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 41" },
+ { do_bad, SIGKILL, SI_KERNEL, "level -1 address size fault" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 42" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 43" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level -1 translation fault" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 44" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 45" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 46" },
diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c
index c0a3301203bd..de1e09d986ad 100644
--- a/arch/arm64/mm/fixmap.c
+++ b/arch/arm64/mm/fixmap.c
@@ -16,6 +16,9 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+/* ensure that the fixmap region does not grow down into the PCI I/O region */
+static_assert(FIXADDR_TOT_START > PCI_IO_END);
+
#define NR_BM_PTE_TABLES \
SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
#define NR_BM_PMD_TABLES \
@@ -101,7 +104,7 @@ void __init early_fixmap_init(void)
unsigned long end = FIXADDR_TOP;
pgd_t *pgdp = pgd_offset_k(addr);
- p4d_t *p4dp = p4d_offset(pgdp, addr);
+ p4d_t *p4dp = p4d_offset_kimg(pgdp, addr);
early_fixmap_init_pud(p4dp, addr, end);
}
@@ -121,9 +124,9 @@ void __set_fixmap(enum fixed_addresses idx,
ptep = fixmap_pte(addr);
if (pgprot_val(flags)) {
- set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
+ __set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
} else {
- pte_clear(&init_mm, addr, ptep);
+ __pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
}
}
@@ -167,37 +170,3 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
return dt_virt;
}
-
-/*
- * Copy the fixmap region into a new pgdir.
- */
-void __init fixmap_copy(pgd_t *pgdir)
-{
- if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) {
- /*
- * The fixmap falls in a separate pgd to the kernel, and doesn't
- * live in the carveout for the swapper_pg_dir. We can simply
- * re-use the existing dir for the fixmap.
- */
- set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START),
- READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START)));
- } else if (CONFIG_PGTABLE_LEVELS > 3) {
- pgd_t *bm_pgdp;
- p4d_t *bm_p4dp;
- pud_t *bm_pudp;
- /*
- * The fixmap shares its top level pgd entry with the kernel
- * mapping. This can really only occur when we are running
- * with 16k/4 levels, so we can simply reuse the pud level
- * entry instead.
- */
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START);
- bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START);
- bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START);
- pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
- pud_clear_fixmap();
- } else {
- BUG();
- }
-}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 8116ac599f80..0f0e10bb0a95 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -45,13 +45,6 @@ void __init arm64_hugetlb_cma_reserve(void)
else
order = CONT_PMD_SHIFT - PAGE_SHIFT;
- /*
- * HugeTLB CMA reservation is required for gigantic
- * huge pages which could not be allocated via the
- * page allocator. Just warn if there is any change
- * breaking this assumption.
- */
- WARN_ON(order <= MAX_PAGE_ORDER);
hugetlb_cma_reserve(order);
}
#endif /* CONFIG_CMA */
@@ -152,14 +145,14 @@ pte_t huge_ptep_get(pte_t *ptep)
{
int ncontig, i;
size_t pgsize;
- pte_t orig_pte = ptep_get(ptep);
+ pte_t orig_pte = __ptep_get(ptep);
if (!pte_present(orig_pte) || !pte_cont(orig_pte))
return orig_pte;
ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
for (i = 0; i < ncontig; i++, ptep++) {
- pte_t pte = ptep_get(ptep);
+ pte_t pte = __ptep_get(ptep);
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
@@ -184,11 +177,11 @@ static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
- pte_t orig_pte = ptep_get(ptep);
+ pte_t orig_pte = __ptep_get(ptep);
unsigned long i;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
- pte_t pte = ptep_get_and_clear(mm, addr, ptep);
+ pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
/*
* If HW_AFDBM is enabled, then the HW could turn on
@@ -236,7 +229,7 @@ static void clear_flush(struct mm_struct *mm,
unsigned long i, saddr = addr;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
- ptep_clear(mm, addr, ptep);
+ __ptep_get_and_clear(mm, addr, ptep);
flush_tlb_range(&vma, saddr, addr);
}
@@ -254,12 +247,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
if (!pte_present(pte)) {
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
- set_pte_at(mm, addr, ptep, pte);
+ __set_ptes(mm, addr, ptep, pte, 1);
return;
}
if (!pte_cont(pte)) {
- set_pte_at(mm, addr, ptep, pte);
+ __set_ptes(mm, addr, ptep, pte, 1);
return;
}
@@ -270,7 +263,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
clear_flush(mm, addr, ptep, pgsize, ncontig);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
- set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
+ __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
}
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -400,7 +393,7 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
ncontig = num_contig_ptes(sz, &pgsize);
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
- pte_clear(mm, addr, ptep);
+ __pte_clear(mm, addr, ptep);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
@@ -408,10 +401,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
{
int ncontig;
size_t pgsize;
- pte_t orig_pte = ptep_get(ptep);
+ pte_t orig_pte = __ptep_get(ptep);
if (!pte_cont(orig_pte))
- return ptep_get_and_clear(mm, addr, ptep);
+ return __ptep_get_and_clear(mm, addr, ptep);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
@@ -431,11 +424,11 @@ static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
{
int i;
- if (pte_write(pte) != pte_write(ptep_get(ptep)))
+ if (pte_write(pte) != pte_write(__ptep_get(ptep)))
return 1;
for (i = 0; i < ncontig; i++) {
- pte_t orig_pte = ptep_get(ptep + i);
+ pte_t orig_pte = __ptep_get(ptep + i);
if (pte_dirty(pte) != pte_dirty(orig_pte))
return 1;
@@ -459,7 +452,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t orig_pte;
if (!pte_cont(pte))
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+ return __ptep_set_access_flags(vma, addr, ptep, pte, dirty);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT;
@@ -478,7 +471,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
hugeprot = pte_pgprot(pte);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
- set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
+ __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
return 1;
}
@@ -492,8 +485,8 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
size_t pgsize;
pte_t pte;
- if (!pte_cont(READ_ONCE(*ptep))) {
- ptep_set_wrprotect(mm, addr, ptep);
+ if (!pte_cont(__ptep_get(ptep))) {
+ __ptep_set_wrprotect(mm, addr, ptep);
return;
}
@@ -507,7 +500,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
pfn = pte_pfn(pte);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
- set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
+ __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
}
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
@@ -517,7 +510,7 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
size_t pgsize;
int ncontig;
- if (!pte_cont(READ_ONCE(*ptep)))
+ if (!pte_cont(__ptep_get(ptep)))
return ptep_clear_flush(vma, addr, ptep);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
@@ -550,7 +543,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
* when the permission changes from executable to non-executable
* in cases where cpu is affected with errata #2645198.
*/
- if (pte_user_exec(READ_ONCE(*ptep)))
+ if (pte_user_exec(__ptep_get(ptep)))
return huge_ptep_clear_flush(vma, addr, ptep);
}
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 74c1db8ce271..03efd86dce0a 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -100,7 +100,7 @@ static void __init arch_reserve_crashkernel(void)
bool high = false;
int ret;
- if (!IS_ENABLED(CONFIG_KEXEC_CORE))
+ if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
return;
ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
@@ -238,7 +238,7 @@ void __init arm64_memblock_init(void)
* physical address of PAGE_OFFSET, we have to *subtract* from it.
*/
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
- memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
+ memstart_addr -= _PAGE_OFFSET(vabits_actual) - _PAGE_OFFSET(52);
/*
* Apply the memory limit if it was set. Since the kernel may be loaded
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 4c7ad574b946..b65a29440a0c 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -23,7 +23,7 @@
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
-static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
+static pgd_t tmp_pg_dir[PTRS_PER_PTE] __initdata __aligned(PAGE_SIZE);
/*
* The p*d_populate functions call virt_to_phys implicitly so they can't be used
@@ -99,6 +99,19 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
}
+static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node,
+ bool early)
+{
+ if (pgd_none(READ_ONCE(*pgdp))) {
+ phys_addr_t p4d_phys = early ?
+ __pa_symbol(kasan_early_shadow_p4d)
+ : kasan_alloc_zeroed_page(node);
+ __pgd_populate(pgdp, p4d_phys, PGD_TYPE_TABLE);
+ }
+
+ return early ? p4d_offset_kimg(pgdp, addr) : p4d_offset(pgdp, addr);
+}
+
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
unsigned long end, int node, bool early)
{
@@ -112,8 +125,8 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
if (!early)
memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
next = addr + PAGE_SIZE;
- set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
- } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
+ __set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
+ } while (ptep++, addr = next, addr != end && pte_none(__ptep_get(ptep)));
}
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
@@ -144,12 +157,12 @@ static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
- p4d_t *p4dp = p4d_offset(pgdp, addr);
+ p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
do {
next = p4d_addr_end(addr, end);
kasan_pud_populate(p4dp, addr, next, node, early);
- } while (p4dp++, addr = next, addr != end);
+ } while (p4dp++, addr = next, addr != end && p4d_none(READ_ONCE(*p4dp)));
}
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
@@ -165,19 +178,48 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
} while (pgdp++, addr = next, addr != end);
}
+#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS > 4
+#define SHADOW_ALIGN P4D_SIZE
+#else
+#define SHADOW_ALIGN PUD_SIZE
+#endif
+
+/*
+ * Return whether 'addr' is aligned to the size covered by a root level
+ * descriptor.
+ */
+static bool __init root_level_aligned(u64 addr)
+{
+ int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 1) * (PAGE_SHIFT - 3);
+
+ return (addr % (PAGE_SIZE << shift)) == 0;
+}
+
/* The early shadow maps everything to a single page of zeroes */
asmlinkage void __init kasan_early_init(void)
{
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
- /*
- * We cannot check the actual value of KASAN_SHADOW_START during build,
- * as it depends on vabits_actual. As a best-effort approach, check
- * potential values calculated based on VA_BITS and VA_BITS_MIN.
- */
- BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
- BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), SHADOW_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), SHADOW_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, SHADOW_ALIGN));
+
+ if (!root_level_aligned(KASAN_SHADOW_START)) {
+ /*
+ * The start address is misaligned, and so the next level table
+ * will be shared with the linear region. This can happen with
+ * 4 or 5 level paging, so install a generic pte_t[] as the
+ * next level. This prevents the kasan_pgd_populate call below
+ * from inserting an entry that refers to the shared KASAN zero
+ * shadow pud_t[]/p4d_t[], which could end up getting corrupted
+ * when the linear region is mapped.
+ */
+ static pte_t tbl[PTRS_PER_PTE] __page_aligned_bss;
+ pgd_t *pgdp = pgd_offset_k(KASAN_SHADOW_START);
+
+ set_pgd(pgdp, __pgd(__pa_symbol(tbl) | PGD_TYPE_TABLE));
+ }
+
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
true);
}
@@ -190,34 +232,74 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
}
/*
- * Copy the current shadow region into a new pgdir.
+ * Return the descriptor index of 'addr' in the root level table
*/
-void __init kasan_copy_shadow(pgd_t *pgdir)
+static int __init root_level_idx(u64 addr)
{
- pgd_t *pgdp, *pgdp_new, *pgdp_end;
+ /*
+ * On 64k pages, the TTBR1 range root tables are extended for 52-bit
+ * virtual addressing, and TTBR1 will simply point to the pgd_t entry
+ * that covers the start of the 48-bit addressable VA space if LVA is
+ * not implemented. This means we need to index the table as usual,
+ * instead of masking off bits based on vabits_actual.
+ */
+ u64 vabits = IS_ENABLED(CONFIG_ARM64_64K_PAGES) ? VA_BITS
+ : vabits_actual;
+ int shift = (ARM64_HW_PGTABLE_LEVELS(vabits) - 1) * (PAGE_SHIFT - 3);
- pgdp = pgd_offset_k(KASAN_SHADOW_START);
- pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
- pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
- do {
- set_pgd(pgdp_new, READ_ONCE(*pgdp));
- } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
+ return (addr & ~_PAGE_OFFSET(vabits)) >> (shift + PAGE_SHIFT);
+}
+
+/*
+ * Clone a next level table from swapper_pg_dir into tmp_pg_dir
+ */
+static void __init clone_next_level(u64 addr, pgd_t *tmp_pg_dir, pud_t *pud)
+{
+ int idx = root_level_idx(addr);
+ pgd_t pgd = READ_ONCE(swapper_pg_dir[idx]);
+ pud_t *pudp = (pud_t *)__phys_to_kimg(__pgd_to_phys(pgd));
+
+ memcpy(pud, pudp, PAGE_SIZE);
+ tmp_pg_dir[idx] = __pgd(__phys_to_pgd_val(__pa_symbol(pud)) |
+ PUD_TYPE_TABLE);
}
-static void __init clear_pgds(unsigned long start,
- unsigned long end)
+/*
+ * Return the descriptor index of 'addr' in the next level table
+ */
+static int __init next_level_idx(u64 addr)
{
- /*
- * Remove references to kasan page tables from
- * swapper_pg_dir. pgd_clear() can't be used
- * here because it's nop on 2,3-level pagetable setups
- */
- for (; start < end; start += PGDIR_SIZE)
- set_pgd(pgd_offset_k(start), __pgd(0));
+ int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 2) * (PAGE_SHIFT - 3);
+
+ return (addr >> (shift + PAGE_SHIFT)) % PTRS_PER_PTE;
+}
+
+/*
+ * Dereference the table descriptor at 'pgd_idx' and clear the entries from
+ * 'start' to 'end' (exclusive) from the table.
+ */
+static void __init clear_next_level(int pgd_idx, int start, int end)
+{
+ pgd_t pgd = READ_ONCE(swapper_pg_dir[pgd_idx]);
+ pud_t *pudp = (pud_t *)__phys_to_kimg(__pgd_to_phys(pgd));
+
+ memset(&pudp[start], 0, (end - start) * sizeof(pud_t));
+}
+
+static void __init clear_shadow(u64 start, u64 end)
+{
+ int l = root_level_idx(start), m = root_level_idx(end);
+
+ if (!root_level_aligned(start))
+ clear_next_level(l++, next_level_idx(start), PTRS_PER_PTE);
+ if (!root_level_aligned(end))
+ clear_next_level(m, 0, next_level_idx(end));
+ memset(&swapper_pg_dir[l], 0, (m - l) * sizeof(pgd_t));
}
static void __init kasan_init_shadow(void)
{
+ static pud_t pud[2][PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
u64 kimg_shadow_start, kimg_shadow_end;
u64 mod_shadow_start;
u64 vmalloc_shadow_end;
@@ -239,10 +321,23 @@ static void __init kasan_init_shadow(void)
* setup will be finished.
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
+
+ /*
+ * If the start or end address of the shadow region is not aligned to
+ * the root level size, we have to allocate a temporary next-level table
+ * in each case, clone the next level of descriptors, and install the
+ * table into tmp_pg_dir. Note that with 5 levels of paging, the next
+ * level will in fact be p4d_t, but that makes no difference in this
+ * case.
+ */
+ if (!root_level_aligned(KASAN_SHADOW_START))
+ clone_next_level(KASAN_SHADOW_START, tmp_pg_dir, pud[0]);
+ if (!root_level_aligned(KASAN_SHADOW_END))
+ clone_next_level(KASAN_SHADOW_END, tmp_pg_dir, pud[1]);
dsb(ishst);
- cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir);
+ cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
- clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+ clear_shadow(KASAN_SHADOW_START, KASAN_SHADOW_END);
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
@@ -271,12 +366,12 @@ static void __init kasan_init_shadow(void)
* so we should make sure that it maps the zero page read-only.
*/
for (i = 0; i < PTRS_PER_PTE; i++)
- set_pte(&kasan_early_shadow_pte[i],
+ __set_pte(&kasan_early_shadow_pte[i],
pfn_pte(sym_to_pfn(kasan_early_shadow_page),
PAGE_KERNEL_RO));
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
}
static void __init kasan_init_depth(void)
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 645fe60d000f..642bdf908b22 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -73,6 +73,10 @@ static int __init adjust_protection_map(void)
protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
}
+ if (lpa2_is_enabled())
+ for (int i = 0; i < ARRAY_SIZE(protection_map); i++)
+ pgprot_val(protection_map[i]) &= ~PTE_SHARED;
+
return 0;
}
arch_initcall(adjust_protection_map);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 1ac7467d34c9..495b732d5af3 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -45,18 +45,13 @@
#define NO_CONT_MAPPINGS BIT(1)
#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
-int idmap_t0sz __ro_after_init;
-
-#if VA_BITS > 48
-u64 vabits_actual __ro_after_init = VA_BITS_MIN;
-EXPORT_SYMBOL(vabits_actual);
-#endif
-
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 };
+static bool rodata_is_rw __ro_after_init = true;
+
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off.
@@ -73,10 +68,21 @@ EXPORT_SYMBOL(empty_zero_page);
static DEFINE_SPINLOCK(swapper_pgdir_lock);
static DEFINE_MUTEX(fixmap_lock);
-void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
+void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
{
pgd_t *fixmap_pgdp;
+ /*
+ * Don't bother with the fixmap if swapper_pg_dir is still mapped
+ * writable in the kernel mapping.
+ */
+ if (rodata_is_rw) {
+ WRITE_ONCE(*pgdp, pgd);
+ dsb(ishst);
+ isb();
+ return;
+ }
+
spin_lock(&swapper_pgdir_lock);
fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
WRITE_ONCE(*fixmap_pgdp, pgd);
@@ -173,16 +179,16 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
ptep = pte_set_fixmap_offset(pmdp, addr);
do {
- pte_t old_pte = READ_ONCE(*ptep);
+ pte_t old_pte = __ptep_get(ptep);
- set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
+ __set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
/*
* After the PTE entry has been populated once, we
* only allow updates to the permission attributes.
*/
BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
- READ_ONCE(pte_val(*ptep))));
+ pte_val(__ptep_get(ptep))));
phys += PAGE_SIZE;
} while (ptep++, addr += PAGE_SIZE, addr != end);
@@ -307,15 +313,14 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
} while (addr = next, addr != end);
}
-static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
+static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(int),
int flags)
{
unsigned long next;
- pud_t *pudp;
- p4d_t *p4dp = p4d_offset(pgdp, addr);
p4d_t p4d = READ_ONCE(*p4dp);
+ pud_t *pudp;
if (p4d_none(p4d)) {
p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
@@ -363,6 +368,46 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
pud_clear_fixmap();
}
+static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
+ phys_addr_t phys, pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int),
+ int flags)
+{
+ unsigned long next;
+ pgd_t pgd = READ_ONCE(*pgdp);
+ p4d_t *p4dp;
+
+ if (pgd_none(pgd)) {
+ pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN;
+ phys_addr_t p4d_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ pgdval |= PGD_TABLE_PXN;
+ BUG_ON(!pgtable_alloc);
+ p4d_phys = pgtable_alloc(P4D_SHIFT);
+ __pgd_populate(pgdp, p4d_phys, pgdval);
+ pgd = READ_ONCE(*pgdp);
+ }
+ BUG_ON(pgd_bad(pgd));
+
+ p4dp = p4d_set_fixmap_offset(pgdp, addr);
+ do {
+ p4d_t old_p4d = READ_ONCE(*p4dp);
+
+ next = p4d_addr_end(addr, end);
+
+ alloc_init_pud(p4dp, addr, next, phys, prot,
+ pgtable_alloc, flags);
+
+ BUG_ON(p4d_val(old_p4d) != 0 &&
+ p4d_val(old_p4d) != READ_ONCE(p4d_val(*p4dp)));
+
+ phys += next - addr;
+ } while (p4dp++, addr = next, addr != end);
+
+ p4d_clear_fixmap();
+}
+
static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
@@ -385,7 +430,7 @@ static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
+ alloc_init_p4d(pgdp, addr, next, phys, prot, pgtable_alloc,
flags);
phys += next - addr;
} while (pgdp++, addr = next, addr != end);
@@ -576,8 +621,12 @@ static void __init map_mem(pgd_t *pgdp)
* entries at any level are being shared between the linear region and
* the vmalloc region. Check whether this is true for the PGD level, in
* which case it is guaranteed to be true for all other levels as well.
+ * (Unless we are running with support for LPA2, in which case the
+ * entire reduced VA space is covered by a single pgd_t which will have
+ * been populated without the PXNTable attribute by the time we get here.)
*/
- BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
+ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) &&
+ pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1);
early_kfence_pool = arm64_kfence_alloc_pool();
@@ -630,15 +679,14 @@ void mark_rodata_ro(void)
* to cover NOTES and EXCEPTION_TABLE.
*/
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
+ WRITE_ONCE(rodata_is_rw, false);
update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
section_size, PAGE_KERNEL_RO);
-
- debug_checkwx();
}
-static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
- pgprot_t prot, struct vm_struct *vma,
- int flags, unsigned long vm_flags)
+static void __init declare_vma(struct vm_struct *vma,
+ void *va_start, void *va_end,
+ unsigned long vm_flags)
{
phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start;
@@ -646,9 +694,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(pa_start));
BUG_ON(!PAGE_ALIGNED(size));
- __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc, flags);
-
if (!(vm_flags & VM_NO_GUARD))
size += PAGE_SIZE;
@@ -661,12 +706,12 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
vm_area_add_early(vma);
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static pgprot_t kernel_exec_prot(void)
{
return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
}
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __init map_entry_trampoline(void)
{
int i;
@@ -701,80 +746,36 @@ core_initcall(map_entry_trampoline);
#endif
/*
- * Open coded check for BTI, only for use to determine configuration
- * for early mappings for before the cpufeature code has run.
+ * Declare the VMA areas for the kernel
*/
-static bool arm64_early_this_cpu_has_bti(void)
+static void __init declare_kernel_vmas(void)
{
- u64 pfr1;
-
- if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
- return false;
+ static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT];
- pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
- return cpuid_feature_extract_unsigned_field(pfr1,
- ID_AA64PFR1_EL1_BT_SHIFT);
+ declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[4], _data, _end, 0);
}
-/*
- * Create fine-grained mappings for the kernel.
- */
-static void __init map_kernel(pgd_t *pgdp)
-{
- static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
- vmlinux_initdata, vmlinux_data;
-
- /*
- * External debuggers may need to write directly to the text
- * mapping to install SW breakpoints. Allow this (only) when
- * explicitly requested with rodata=off.
- */
- pgprot_t text_prot = kernel_exec_prot();
-
- /*
- * If we have a CPU that supports BTI and a kernel built for
- * BTI then mark the kernel executable text as guarded pages
- * now so we don't have to rewrite the page tables later.
- */
- if (arm64_early_this_cpu_has_bti())
- text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
-
- /*
- * Only rodata will be remapped with different permissions later on,
- * all other segments are allowed to use contiguous mappings.
- */
- map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
- VM_NO_GUARD);
- map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
- &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
- map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
- &vmlinux_inittext, 0, VM_NO_GUARD);
- map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
- &vmlinux_initdata, 0, VM_NO_GUARD);
- map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
+void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
+ int level, pte_t *tbl, bool may_use_cont, u64 va_offset);
- fixmap_copy(pgdp);
- kasan_copy_shadow(pgdp);
-}
+static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
+ kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
static void __init create_idmap(void)
{
u64 start = __pa_symbol(__idmap_text_start);
- u64 size = __pa_symbol(__idmap_text_end) - start;
- pgd_t *pgd = idmap_pg_dir;
- u64 pgd_phys;
-
- /* check if we need an additional level of translation */
- if (VA_BITS < 48 && idmap_t0sz < (64 - VA_BITS_MIN)) {
- pgd_phys = early_pgtable_alloc(PAGE_SHIFT);
- set_pgd(&idmap_pg_dir[start >> VA_BITS],
- __pgd(pgd_phys | P4D_TYPE_TABLE));
- pgd = __va(pgd_phys);
- }
- __create_pgd_mapping(pgd, start, start, size, PAGE_KERNEL_ROX,
- early_pgtable_alloc, 0);
+ u64 end = __pa_symbol(__idmap_text_end);
+ u64 ptep = __pa_symbol(idmap_ptes);
- if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ __pi_map_range(&ptep, start, end, start, PAGE_KERNEL_ROX,
+ IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
+ __phys_to_virt(ptep) - ptep);
+
+ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && !arm64_use_ng_mappings) {
extern u32 __idmap_kpti_flag;
u64 pa = __pa_symbol(&__idmap_kpti_flag);
@@ -782,32 +783,21 @@ static void __init create_idmap(void)
* The KPTI G-to-nG conversion code needs a read-write mapping
* of its synchronization flag in the ID map.
*/
- __create_pgd_mapping(pgd, pa, pa, sizeof(u32), PAGE_KERNEL,
- early_pgtable_alloc, 0);
+ ptep = __pa_symbol(kpti_ptes);
+ __pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL,
+ IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
+ __phys_to_virt(ptep) - ptep);
}
}
void __init paging_init(void)
{
- pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
- extern pgd_t init_idmap_pg_dir[];
-
- idmap_t0sz = 63UL - __fls(__pa_symbol(_end) | GENMASK(VA_BITS_MIN - 1, 0));
-
- map_kernel(pgdp);
- map_mem(pgdp);
-
- pgd_clear_fixmap();
-
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir), init_idmap_pg_dir);
- init_mm.pgd = swapper_pg_dir;
-
- memblock_phys_free(__pa_symbol(init_pg_dir),
- __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
+ map_mem(swapper_pg_dir);
memblock_allow_resize();
create_idmap();
+ declare_kernel_vmas();
}
#ifdef CONFIG_MEMORY_HOTPLUG
@@ -854,12 +844,12 @@ static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
do {
ptep = pte_offset_kernel(pmdp, addr);
- pte = READ_ONCE(*ptep);
+ pte = __ptep_get(ptep);
if (pte_none(pte))
continue;
WARN_ON(!pte_present(pte));
- pte_clear(&init_mm, addr, ptep);
+ __pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
if (free_mapped)
free_hotplug_page_range(pte_page(pte),
@@ -987,7 +977,7 @@ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
do {
ptep = pte_offset_kernel(pmdp, addr);
- pte = READ_ONCE(*ptep);
+ pte = __ptep_get(ptep);
/*
* This is just a sanity check here which verifies that
@@ -1006,7 +996,7 @@ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
*/
ptep = pte_offset_kernel(pmdp, 0UL);
for (i = 0; i < PTRS_PER_PTE; i++) {
- if (!pte_none(READ_ONCE(ptep[i])))
+ if (!pte_none(__ptep_get(&ptep[i])))
return;
}
@@ -1073,10 +1063,10 @@ static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
free_empty_pmd_table(pudp, addr, next, floor, ceiling);
} while (addr = next, addr < end);
- if (CONFIG_PGTABLE_LEVELS <= 3)
+ if (!pgtable_l4_enabled())
return;
- if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
+ if (!pgtable_range_aligned(start, end, floor, ceiling, P4D_MASK))
return;
/*
@@ -1099,8 +1089,8 @@ static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling)
{
- unsigned long next;
p4d_t *p4dp, p4d;
+ unsigned long i, next, start = addr;
do {
next = p4d_addr_end(addr, end);
@@ -1112,6 +1102,27 @@ static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
WARN_ON(!p4d_present(p4d));
free_empty_pud_table(p4dp, addr, next, floor, ceiling);
} while (addr = next, addr < end);
+
+ if (!pgtable_l5_enabled())
+ return;
+
+ if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
+ return;
+
+ /*
+ * Check whether we can free the p4d page if the rest of the
+ * entries are empty. Overlap with other regions have been
+ * handled by the floor/ceiling check.
+ */
+ p4dp = p4d_offset(pgdp, 0UL);
+ for (i = 0; i < PTRS_PER_P4D; i++) {
+ if (!p4d_none(READ_ONCE(p4dp[i])))
+ return;
+ }
+
+ pgd_clear(pgdp);
+ __flush_tlb_kernel_pgtable(start);
+ free_hotplug_pgtable_page(virt_to_page(p4dp));
}
static void free_empty_tables(unsigned long addr, unsigned long end,
@@ -1196,6 +1207,12 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
return 1;
}
+#ifndef __PAGETABLE_P4D_FOLDED
+void p4d_clear_huge(p4d_t *p4dp)
+{
+}
+#endif
+
int pud_clear_huge(pud_t *pudp)
{
if (!pud_sect(READ_ONCE(*pudp)))
@@ -1475,7 +1492,7 @@ pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte
* when the permission changes from executable to non-executable
* in cases where cpu is affected with errata #2645198.
*/
- if (pte_user_exec(READ_ONCE(*ptep)))
+ if (pte_user_exec(ptep_get(ptep)))
return ptep_clear_flush(vma, addr, ptep);
}
return ptep_get_and_clear(vma->vm_mm, addr, ptep);
@@ -1486,3 +1503,35 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte
{
set_pte_at(vma->vm_mm, addr, ptep, pte);
}
+
+/*
+ * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
+ * avoiding the possibility of conflicting TLB entries being allocated.
+ */
+void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
+{
+ typedef void (ttbr_replace_func)(phys_addr_t);
+ extern ttbr_replace_func idmap_cpu_replace_ttbr1;
+ ttbr_replace_func *replace_phys;
+ unsigned long daif;
+
+ /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
+ phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
+
+ if (cnp)
+ ttbr1 |= TTBR_CNP_BIT;
+
+ replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
+
+ cpu_install_idmap();
+
+ /*
+ * We really don't want to take *any* exceptions while TTBR1 is
+ * in the process of being replaced so mask everything.
+ */
+ daif = local_daif_save();
+ replace_phys(ttbr1);
+ local_daif_restore(daif);
+
+ cpu_uninstall_idmap();
+}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 924843f1f661..0c4e3ecf989d 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -36,12 +36,12 @@ bool can_set_direct_map(void)
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
struct page_change_data *cdata = data;
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = __ptep_get(ptep);
pte = clear_pte_bit(pte, cdata->clear_mask);
pte = set_pte_bit(pte, cdata->set_mask);
- set_pte(ptep, pte);
+ __set_pte(ptep, pte);
return 0;
}
@@ -245,5 +245,5 @@ bool kernel_page_present(struct page *page)
return true;
ptep = pte_offset_kernel(pmdp, addr);
- return pte_valid(READ_ONCE(*ptep));
+ return pte_valid(__ptep_get(ptep));
}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 4a64089e5771..0c501cabc238 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -17,11 +17,22 @@
static struct kmem_cache *pgd_cache __ro_after_init;
+static bool pgdir_is_page_size(void)
+{
+ if (PGD_SIZE == PAGE_SIZE)
+ return true;
+ if (CONFIG_PGTABLE_LEVELS == 4)
+ return !pgtable_l4_enabled();
+ if (CONFIG_PGTABLE_LEVELS == 5)
+ return !pgtable_l5_enabled();
+ return false;
+}
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
gfp_t gfp = GFP_PGTABLE_USER;
- if (PGD_SIZE == PAGE_SIZE)
+ if (pgdir_is_page_size())
return (pgd_t *)__get_free_page(gfp);
else
return kmem_cache_alloc(pgd_cache, gfp);
@@ -29,7 +40,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- if (PGD_SIZE == PAGE_SIZE)
+ if (pgdir_is_page_size())
free_page((unsigned long)pgd);
else
kmem_cache_free(pgd_cache, pgd);
@@ -37,7 +48,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
void __init pgtable_cache_init(void)
{
- if (PGD_SIZE == PAGE_SIZE)
+ if (pgdir_is_page_size())
return;
#ifdef CONFIG_ARM64_PA_BITS_52
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index f66c37a1610e..9d40f3ffd8d2 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -195,27 +195,36 @@ SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
ret
SYM_FUNC_END(idmap_cpu_replace_ttbr1)
+SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
.popsection
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS | PTE_WRITE)
+#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \
+ PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE)
.pushsection ".idmap.text", "a"
+ .macro pte_to_phys, phys, pte
+ and \phys, \pte, #PTE_ADDR_LOW
+#ifdef CONFIG_ARM64_PA_BITS_52
+ and \pte, \pte, #PTE_ADDR_HIGH
+ orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
+#endif
+ .endm
+
.macro kpti_mk_tbl_ng, type, num_entries
add end_\type\()p, cur_\type\()p, #\num_entries * 8
.Ldo_\type:
- ldr \type, [cur_\type\()p] // Load the entry
+ ldr \type, [cur_\type\()p], #8 // Load the entry and advance
tbz \type, #0, .Lnext_\type // Skip invalid and
tbnz \type, #11, .Lnext_\type // non-global entries
orr \type, \type, #PTE_NG // Same bit for blocks and pages
- str \type, [cur_\type\()p] // Update the entry
+ str \type, [cur_\type\()p, #-8] // Update the entry
.ifnc \type, pte
tbnz \type, #1, .Lderef_\type
.endif
.Lnext_\type:
- add cur_\type\()p, cur_\type\()p, #8
cmp cur_\type\()p, end_\type\()p
b.ne .Ldo_\type
.endm
@@ -225,18 +234,18 @@ SYM_FUNC_END(idmap_cpu_replace_ttbr1)
* fixmap slot associated with the current level.
*/
.macro kpti_map_pgtbl, type, level
- str xzr, [temp_pte, #8 * (\level + 1)] // break before make
+ str xzr, [temp_pte, #8 * (\level + 2)] // break before make
dsb nshst
- add pte, temp_pte, #PAGE_SIZE * (\level + 1)
+ add pte, temp_pte, #PAGE_SIZE * (\level + 2)
lsr pte, pte, #12
tlbi vaae1, pte
dsb nsh
isb
phys_to_pte pte, cur_\type\()p
- add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
+ add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2)
orr pte, pte, pte_flags
- str pte, [temp_pte, #8 * (\level + 1)]
+ str pte, [temp_pte, #8 * (\level + 2)]
dsb nshst
.endm
@@ -269,6 +278,8 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
end_ptep .req x15
pte .req x16
valid .req x17
+ cur_p4dp .req x19
+ end_p4dp .req x20
mov x5, x3 // preserve temp_pte arg
mrs swapper_ttb, ttbr1_el1
@@ -276,6 +287,12 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
cbnz cpu, __idmap_kpti_secondary
+#if CONFIG_PGTABLE_LEVELS > 4
+ stp x29, x30, [sp, #-32]!
+ mov x29, sp
+ stp x19, x20, [sp, #16]
+#endif
+
/* We're the boot CPU. Wait for the others to catch up */
sevl
1: wfe
@@ -293,9 +310,32 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
mov_q pte_flags, KPTI_NG_PTE_FLAGS
/* Everybody is enjoying the idmap, so we can rewrite swapper. */
+
+#ifdef CONFIG_ARM64_LPA2
+ /*
+ * If LPA2 support is configured, but 52-bit virtual addressing is not
+ * enabled at runtime, we will fall back to one level of paging less,
+ * and so we have to walk swapper_pg_dir as if we dereferenced its
+ * address from a PGD level entry, and terminate the PGD level loop
+ * right after.
+ */
+ adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level
+ mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop
+alternative_if_not ARM64_HAS_VA52
+ b .Lderef_pgd // skip to the next level
+alternative_else_nop_endif
+ /*
+ * LPA2 based 52-bit virtual addressing requires 52-bit physical
+ * addressing to be enabled as well. In this case, the shareability
+ * bits are repurposed as physical address bits, and should not be
+ * set in pte_flags.
+ */
+ bic pte_flags, pte_flags, #PTE_SHARED
+#endif
+
/* PGD */
adrp cur_pgdp, swapper_pg_dir
- kpti_map_pgtbl pgd, 0
+ kpti_map_pgtbl pgd, -1
kpti_mk_tbl_ng pgd, PTRS_PER_PGD
/* Ensure all the updated entries are visible to secondary CPUs */
@@ -308,16 +348,33 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
+#if CONFIG_PGTABLE_LEVELS > 4
+ ldp x19, x20, [sp, #16]
+ ldp x29, x30, [sp], #32
+#endif
ret
.Lderef_pgd:
+ /* P4D */
+ .if CONFIG_PGTABLE_LEVELS > 4
+ p4d .req x30
+ pte_to_phys cur_p4dp, pgd
+ kpti_map_pgtbl p4d, 0
+ kpti_mk_tbl_ng p4d, PTRS_PER_P4D
+ b .Lnext_pgd
+ .else /* CONFIG_PGTABLE_LEVELS <= 4 */
+ p4d .req pgd
+ .set .Lnext_p4d, .Lnext_pgd
+ .endif
+
+.Lderef_p4d:
/* PUD */
.if CONFIG_PGTABLE_LEVELS > 3
pud .req x10
- pte_to_phys cur_pudp, pgd
+ pte_to_phys cur_pudp, p4d
kpti_map_pgtbl pud, 1
kpti_mk_tbl_ng pud, PTRS_PER_PUD
- b .Lnext_pgd
+ b .Lnext_p4d
.else /* CONFIG_PGTABLE_LEVELS <= 3 */
pud .req pgd
.set .Lnext_pud, .Lnext_pgd
@@ -361,6 +418,9 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
.unreq end_ptep
.unreq pte
.unreq valid
+ .unreq cur_p4dp
+ .unreq end_p4dp
+ .unreq p4d
/* Secondary CPUs end up here */
__idmap_kpti_secondary:
@@ -395,8 +455,6 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings)
*
* Initialise the processor for turning the MMU on.
*
- * Input:
- * x0 - actual number of VA bits (ignored unless VA_BITS > 48)
* Output:
* Return in x0 the value of the SCTLR_EL1 register.
*/
@@ -420,20 +478,21 @@ SYM_FUNC_START(__cpu_setup)
mair .req x17
tcr .req x16
mov_q mair, MAIR_EL1_SET
- mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
- TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
- TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
+ mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \
+ TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
+ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
tcr_clear_errata_bits tcr, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52
- sub x9, xzr, x0
- add x9, x9, #64
+ mov x9, #64 - VA_BITS
+alternative_if ARM64_HAS_VA52
tcr_set_t1sz tcr, x9
-#else
- idmap_get_t0sz x9
+#ifdef CONFIG_ARM64_LPA2
+ orr tcr, tcr, #TCR_DS
+#endif
+alternative_else_nop_endif
#endif
- tcr_set_t0sz tcr, x9
/*
* Set the IPS bits in TCR_EL1.
@@ -458,11 +517,26 @@ SYM_FUNC_START(__cpu_setup)
ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
cbz x1, .Lskip_indirection
+ /*
+ * The PROT_* macros describing the various memory types may resolve to
+ * C expressions if they include the PTE_MAYBE_* macros, and so they
+ * can only be used from C code. The PIE_E* constants below are also
+ * defined in terms of those macros, but will mask out those
+ * PTE_MAYBE_* constants, whether they are set or not. So #define them
+ * as 0x0 here so we can evaluate the PIE_E* constants in asm context.
+ */
+
+#define PTE_MAYBE_NG 0
+#define PTE_MAYBE_SHARED 0
+
mov_q x0, PIE_E0
msr REG_PIRE0_EL1, x0
mov_q x0, PIE_E1
msr REG_PIR_EL1, x0
+#undef PTE_MAYBE_NG
+#undef PTE_MAYBE_SHARED
+
mov x0, TCR2_EL1x_PIE
msr REG_TCR2_EL1, x0
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index e305b6593c4e..6986827e0d64 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -26,34 +26,6 @@
#include <asm/ptdump.h>
-enum address_markers_idx {
- PAGE_OFFSET_NR = 0,
- PAGE_END_NR,
-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
- KASAN_START_NR,
-#endif
-};
-
-static struct addr_marker address_markers[] = {
- { PAGE_OFFSET, "Linear Mapping start" },
- { 0 /* PAGE_END */, "Linear Mapping end" },
-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
- { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
- { KASAN_SHADOW_END, "Kasan shadow end" },
-#endif
- { MODULES_VADDR, "Modules start" },
- { MODULES_END, "Modules end" },
- { VMALLOC_START, "vmalloc() area" },
- { VMALLOC_END, "vmalloc() end" },
- { FIXADDR_TOT_START, "Fixmap start" },
- { FIXADDR_TOP, "Fixmap end" },
- { PCI_IO_START, "PCI I/O start" },
- { PCI_IO_END, "PCI I/O end" },
- { VMEMMAP_START, "vmemmap start" },
- { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
- { -1, NULL },
-};
-
#define pt_dump_seq_printf(m, fmt, args...) \
({ \
if (m) \
@@ -76,6 +48,7 @@ struct pg_state {
struct ptdump_state ptdump;
struct seq_file *seq;
const struct addr_marker *marker;
+ const struct mm_struct *mm;
unsigned long start_address;
int level;
u64 current_prot;
@@ -172,12 +145,12 @@ static const struct prot_bits pte_bits[] = {
struct pg_level {
const struct prot_bits *bits;
- const char *name;
- size_t num;
+ char name[4];
+ int num;
u64 mask;
};
-static struct pg_level pg_level[] = {
+static struct pg_level pg_level[] __ro_after_init = {
{ /* pgd */
.name = "PGD",
.bits = pte_bits,
@@ -187,11 +160,11 @@ static struct pg_level pg_level[] = {
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pud */
- .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
+ .name = "PUD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pmd */
- .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
+ .name = "PMD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pte */
@@ -255,6 +228,11 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
static const char units[] = "KMGTPE";
u64 prot = 0;
+ /* check if the current level has been folded dynamically */
+ if ((level == 1 && mm_p4d_folded(st->mm)) ||
+ (level == 2 && mm_pud_folded(st->mm)))
+ level = 0;
+
if (level >= 0)
prot = val & pg_level[level].mask;
@@ -316,6 +294,7 @@ void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
st = (struct pg_state){
.seq = s,
.marker = info->markers,
+ .mm = info->mm,
.level = -1,
.ptdump = {
.note_page = note_page,
@@ -339,13 +318,11 @@ static void __init ptdump_initialize(void)
pg_level[i].mask |= pg_level[i].bits[j].mask;
}
-static struct ptdump_info kernel_ptdump_info = {
+static struct ptdump_info kernel_ptdump_info __ro_after_init = {
.mm = &init_mm,
- .markers = address_markers,
- .base_addr = PAGE_OFFSET,
};
-void ptdump_check_wx(void)
+bool ptdump_check_wx(void)
{
struct pg_state st = {
.seq = NULL,
@@ -358,7 +335,7 @@ void ptdump_check_wx(void)
.ptdump = {
.note_page = note_page,
.range = (struct ptdump_range[]) {
- {PAGE_OFFSET, ~0UL},
+ {_PAGE_OFFSET(vabits_actual), ~0UL},
{0, 0}
}
}
@@ -366,19 +343,46 @@ void ptdump_check_wx(void)
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
- if (st.wx_pages || st.uxn_pages)
+ if (st.wx_pages || st.uxn_pages) {
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
st.wx_pages, st.uxn_pages);
- else
+
+ return false;
+ } else {
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
+
+ return true;
+ }
}
static int __init ptdump_init(void)
{
- address_markers[PAGE_END_NR].start_address = PAGE_END;
+ u64 page_offset = _PAGE_OFFSET(vabits_actual);
+ u64 vmemmap_start = (u64)virt_to_page((void *)page_offset);
+ struct addr_marker m[] = {
+ { PAGE_OFFSET, "Linear Mapping start" },
+ { PAGE_END, "Linear Mapping end" },
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
- address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
+ { KASAN_SHADOW_START, "Kasan shadow start" },
+ { KASAN_SHADOW_END, "Kasan shadow end" },
#endif
+ { MODULES_VADDR, "Modules start" },
+ { MODULES_END, "Modules end" },
+ { VMALLOC_START, "vmalloc() area" },
+ { VMALLOC_END, "vmalloc() end" },
+ { vmemmap_start, "vmemmap start" },
+ { VMEMMAP_END, "vmemmap end" },
+ { PCI_IO_START, "PCI I/O start" },
+ { PCI_IO_END, "PCI I/O end" },
+ { FIXADDR_TOT_START, "Fixmap start" },
+ { FIXADDR_TOP, "Fixmap end" },
+ { -1, NULL },
+ };
+ static struct addr_marker address_markers[ARRAY_SIZE(m)] __ro_after_init;
+
+ kernel_ptdump_info.markers = memcpy(address_markers, m, sizeof(m));
+ kernel_ptdump_info.base_addr = page_offset;
+
ptdump_initialize();
ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
return 0;
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index 7b14df3c6477..5139a28130c0 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -33,7 +33,7 @@ static void *trans_alloc(struct trans_pgd_info *info)
static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
{
- pte_t pte = READ_ONCE(*src_ptep);
+ pte_t pte = __ptep_get(src_ptep);
if (pte_valid(pte)) {
/*
@@ -41,7 +41,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
* read only (code, rodata). Clear the RDONLY bit from
* the temporary mappings we use during restore.
*/
- set_pte(dst_ptep, pte_mkwrite_novma(pte));
+ __set_pte(dst_ptep, pte_mkwrite_novma(pte));
} else if ((debug_pagealloc_enabled() ||
is_kfence_address((void *)addr)) && !pte_none(pte)) {
/*
@@ -55,7 +55,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
*/
BUG_ON(!pfn_valid(pte_pfn(pte)));
- set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte)));
+ __set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte)));
}
}
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index b912b1409fc0..62b2838a231a 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -26,6 +26,7 @@ HAS_ECV
HAS_ECV_CNTPOFF
HAS_EPAN
HAS_EVT
+HAS_FPMR
HAS_FGT
HAS_FPSIMD
HAS_GENERIC_AUTH
@@ -35,6 +36,7 @@ HAS_GENERIC_AUTH_IMP_DEF
HAS_GIC_CPUIF_SYSREGS
HAS_GIC_PRIO_MASKING
HAS_GIC_PRIO_RELAXED_SYNC
+HAS_HCR_NV1
HAS_HCX
HAS_LDAPR
HAS_LPA2
@@ -50,6 +52,7 @@ HAS_STAGE2_FWB
HAS_TCR2
HAS_TIDCP1
HAS_TLB_RANGE
+HAS_VA52
HAS_VIRT_HOST_EXTN
HAS_WFXT
HW_DBM
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 4c9b67934367..a4c1dd4741a4 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -200,6 +200,7 @@ UnsignedEnum 27:24 PerfMon
0b0110 PMUv3p5
0b0111 PMUv3p7
0b1000 PMUv3p8
+ 0b1001 PMUv3p9
0b1111 IMPDEF
EndEnum
Enum 23:20 MProfDbg
@@ -231,6 +232,7 @@ Enum 3:0 CopDbg
0b1000 Debugv8p2
0b1001 Debugv8p4
0b1010 Debugv8p8
+ 0b1011 Debugv8p9
EndEnum
EndSysreg
@@ -1221,6 +1223,7 @@ UnsignedEnum 35:32 PMSVer
0b0010 V1P1
0b0011 V1P2
0b0100 V1P3
+ 0b0101 V1P4
EndEnum
Field 31:28 CTX_CMPs
Res0 27:24
@@ -1247,11 +1250,41 @@ UnsignedEnum 3:0 DebugVer
0b1000 V8P2
0b1001 V8P4
0b1010 V8P8
+ 0b1011 V8P9
EndEnum
EndSysreg
Sysreg ID_AA64DFR1_EL1 3 0 0 5 1
-Res0 63:0
+Field 63:56 ABL_CMPs
+UnsignedEnum 55:52 DPFZS
+ 0b0000 IGNR
+ 0b0001 FRZN
+EndEnum
+UnsignedEnum 51:48 EBEP
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 47:44 ITE
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 43:40 ABLE
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 39:36 PMICNTR
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 35:32 SPMU
+ 0b0000 NI
+ 0b0001 IMP
+ 0b0010 IMP_SPMZR
+EndEnum
+Field 31:24 CTX_CMPs
+Field 23:16 WRPs
+Field 15:8 BRPs
+Field 7:0 SYSPMUID
EndSysreg
Sysreg ID_AA64AFR0_EL1 3 0 0 5 4
@@ -1366,6 +1399,7 @@ EndEnum
UnsignedEnum 43:40 SPECRES
0b0000 NI
0b0001 IMP
+ 0b0010 COSP_RCTX
EndEnum
UnsignedEnum 39:36 SB
0b0000 NI
@@ -1492,7 +1526,12 @@ EndEnum
EndSysreg
Sysreg ID_AA64ISAR3_EL1 3 0 0 6 3
-Res0 63:12
+Res0 63:16
+UnsignedEnum 15:12 PACM
+ 0b0000 NI
+ 0b0001 TRIVIAL_IMP
+ 0b0010 FULL_IMP
+EndEnum
UnsignedEnum 11:8 TLBIW
0b0000 NI
0b0001 IMP
@@ -1540,16 +1579,16 @@ Enum 35:32 TGRAN16_2
0b0010 IMP
0b0011 52_BIT
EndEnum
-Enum 31:28 TGRAN4
+SignedEnum 31:28 TGRAN4
0b0000 IMP
0b0001 52_BIT
0b1111 NI
EndEnum
-Enum 27:24 TGRAN64
+SignedEnum 27:24 TGRAN64
0b0000 IMP
0b1111 NI
EndEnum
-Enum 23:20 TGRAN16
+UnsignedEnum 23:20 TGRAN16
0b0000 NI
0b0001 IMP
0b0010 52_BIT
@@ -1697,7 +1736,7 @@ Enum 23:20 CCIDX
0b0000 32
0b0001 64
EndEnum
-Enum 19:16 VARange
+UnsignedEnum 19:16 VARange
0b0000 48
0b0001 52
EndEnum
@@ -1791,6 +1830,43 @@ UnsignedEnum 3:0 TCRX
EndEnum
EndSysreg
+Sysreg ID_AA64MMFR4_EL1 3 0 0 7 4
+Res0 63:40
+UnsignedEnum 39:36 E3DSE
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Res0 35:28
+SignedEnum 27:24 E2H0
+ 0b0000 IMP
+ 0b1110 NI_NV1
+ 0b1111 NI
+EndEnum
+UnsignedEnum 23:20 NV_frac
+ 0b0000 NV_NV2
+ 0b0001 NV2_ONLY
+EndEnum
+UnsignedEnum 19:16 FGWTE3
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 15:12 HACDBS
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 11:8 ASID2
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+SignedEnum 7:4 EIESB
+ 0b0000 NI
+ 0b0001 ToEL3
+ 0b0010 ToELx
+ 0b1111 ANY
+EndEnum
+Res0 3:0
+EndSysreg
+
Sysreg SCTLR_EL1 3 0 1 0 0
Field 63 TIDCP
Field 62 SPINTMASK