summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig41
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/crypto/aes-modes.S2
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S2
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S2
-rw-r--r--arch/arm64/crypto/sha3-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha512-ce-core.S2
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h2
-rw-r--r--arch/arm64/include/asm/arch_timer.h21
-rw-r--r--arch/arm64/include/asm/asm_pointer_auth.h20
-rw-r--r--arch/arm64/include/asm/assembler.h114
-rw-r--r--arch/arm64/include/asm/barrier.h23
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h17
-rw-r--r--arch/arm64/include/asm/daifflags.h10
-rw-r--r--arch/arm64/include/asm/el2_setup.h21
-rw-r--r--arch/arm64/include/asm/fpsimd.h1
-rw-r--r--arch/arm64/include/asm/irq.h4
-rw-r--r--arch/arm64/include/asm/irq_work.h2
-rw-r--r--arch/arm64/include/asm/irqflags.h16
-rw-r--r--arch/arm64/include/asm/memory.h4
-rw-r--r--arch/arm64/include/asm/mte-kasan.h9
-rw-r--r--arch/arm64/include/asm/mte.h54
-rw-r--r--arch/arm64/include/asm/pgalloc.h19
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h15
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h5
-rw-r--r--arch/arm64/include/asm/pgtable.h31
-rw-r--r--arch/arm64/include/asm/pointer_auth.h61
-rw-r--r--arch/arm64/include/asm/processor.h13
-rw-r--r--arch/arm64/include/asm/ptdump.h2
-rw-r--r--arch/arm64/include/asm/smp.h1
-rw-r--r--arch/arm64/include/asm/stacktrace.h24
-rw-r--r--arch/arm64/include/asm/sysreg.h13
-rw-r--r--arch/arm64/include/asm/uaccess.h22
-rw-r--r--arch/arm64/include/asm/vdso/gettimeofday.h6
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h4
-rw-r--r--arch/arm64/kernel/asm-offsets.c7
-rw-r--r--arch/arm64/kernel/cpufeature.c22
-rw-r--r--arch/arm64/kernel/entry-common.c6
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S5
-rw-r--r--arch/arm64/kernel/entry.S174
-rw-r--r--arch/arm64/kernel/fpsimd.c39
-rw-r--r--arch/arm64/kernel/head.S39
-rw-r--r--arch/arm64/kernel/hyp-stub.S10
-rw-r--r--arch/arm64/kernel/idreg-override.c26
-rw-r--r--arch/arm64/kernel/irq.c35
-rw-r--r--arch/arm64/kernel/kaslr.c18
-rw-r--r--arch/arm64/kernel/module.c16
-rw-r--r--arch/arm64/kernel/mte.c121
-rw-r--r--arch/arm64/kernel/perf_event.c5
-rw-r--r--arch/arm64/kernel/pointer_auth.c63
-rw-r--r--arch/arm64/kernel/probes/kprobes.c3
-rw-r--r--arch/arm64/kernel/process.c35
-rw-r--r--arch/arm64/kernel/ptrace.c41
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/kernel/stacktrace.c24
-rw-r--r--arch/arm64/kernel/suspend.c6
-rw-r--r--arch/arm64/kernel/vdso.c26
-rw-r--r--arch/arm64/mm/fault.c18
-rw-r--r--arch/arm64/mm/kasan_init.c29
-rw-r--r--arch/arm64/mm/mmu.c41
-rw-r--r--arch/arm64/mm/proc.S48
-rw-r--r--arch/arm64/mm/ptdump.c4
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c2
64 files changed, 1010 insertions, 445 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f1a032ed2274..406b42c05ee1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -108,9 +108,9 @@ config ARM64
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
+ select GENERIC_FIND_FIRST_BIT
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_IPI
- select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
@@ -138,6 +138,7 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+ select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
select HAVE_ARCH_KFENCE
@@ -195,6 +196,7 @@ config ARM64
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
+ select KASAN_VMALLOC if KASAN_GENERIC
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
@@ -1069,6 +1071,9 @@ config SYS_SUPPORTS_HUGETLBFS
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
+config ARCH_HAS_FILTER_PGPROT
+ def_bool y
+
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y if PGTABLE_LEVELS > 2
@@ -1430,19 +1435,6 @@ config ARM64_USE_LSE_ATOMICS
built with binutils >= 2.25 in order for the new instructions
to be used.
-config ARM64_VHE
- bool "Enable support for Virtualization Host Extensions (VHE)"
- default y
- help
- Virtualization Host Extensions (VHE) allow the kernel to run
- directly at EL2 (instead of EL1) on processors that support
- it. This leads to better performance for KVM, as they reduce
- the cost of the world switch.
-
- Selecting this option allows the VHE feature to be detected
- at runtime, and does not affect processors that do not
- implement this feature.
-
endmenu
menu "ARMv8.2 architectural features"
@@ -1696,10 +1688,23 @@ config ARM64_MTE
endmenu
+menu "ARMv8.7 architectural features"
+
+config ARM64_EPAN
+ bool "Enable support for Enhanced Privileged Access Never (EPAN)"
+ default y
+ depends on ARM64_PAN
+ help
+ Enhanced Privileged Access Never (EPAN) allows Privileged
+ Access Never to be used with Execute-only mappings.
+
+ The feature is detected at runtime, and will remain disabled
+ if the cpu does not implement the feature.
+endmenu
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
- depends on !KVM || ARM64_VHE
help
The Scalable Vector Extension (SVE) is an extension to the AArch64
execution state which complements and extends the SIMD functionality
@@ -1728,12 +1733,6 @@ config ARM64_SVE
booting the kernel. If unsure and you are not observing these
symptoms, you should assume that it is safe to say Y.
- CPUs that support SVE are architecturally required to support the
- Virtualization Host Extensions (VHE), so the kernel makes no
- provision for supporting SVE alongside KVM without VHE enabled.
- Thus, you will need to enable CONFIG_ARM64_VHE if you want to support
- KVM in the same kernel image.
-
config ARM64_MODULE_PLTS
bool "Use PLTs to allow module memory to spill over into vmalloc area"
depends on MODULES
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index d612f633b771..8793a9cb9d4b 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1156,6 +1156,7 @@ CONFIG_CRYPTO_DEV_HISI_TRNG=m
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 247011356d11..b495de22bb38 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -701,7 +701,7 @@ AES_FUNC_START(aes_mac_update)
cbz w5, .Lmacout
encrypt_block v0, w2, x1, x7, w8
st1 {v0.16b}, [x4] /* return dg */
- cond_yield .Lmacout, x7
+ cond_yield .Lmacout, x7, x8
b .Lmacloop4x
.Lmac1x:
add w3, w3, #4
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index 8c02bbc2684e..889ca0f8972b 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -121,7 +121,7 @@ CPU_LE( rev32 v11.16b, v11.16b )
add dgav.4s, dgav.4s, dg0v.4s
cbz w2, 2f
- cond_yield 3f, x5
+ cond_yield 3f, x5, x6
b 0b
/*
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 6cdea7d56059..491179922f49 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -129,7 +129,7 @@ CPU_LE( rev32 v19.16b, v19.16b )
/* handled all input blocks? */
cbz w2, 2f
- cond_yield 3f, x5
+ cond_yield 3f, x5, x6
b 0b
/*
diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S
index 6f5208414fe3..9c77313f5a60 100644
--- a/arch/arm64/crypto/sha3-ce-core.S
+++ b/arch/arm64/crypto/sha3-ce-core.S
@@ -184,11 +184,11 @@ SYM_FUNC_START(sha3_ce_transform)
eor v0.16b, v0.16b, v31.16b
cbnz w8, 3b
- cond_yield 3f, x8
+ cond_yield 4f, x8, x9
cbnz w2, 0b
/* save state */
-3: st1 { v0.1d- v3.1d}, [x0], #32
+4: st1 { v0.1d- v3.1d}, [x0], #32
st1 { v4.1d- v7.1d}, [x0], #32
st1 { v8.1d-v11.1d}, [x0], #32
st1 {v12.1d-v15.1d}, [x0], #32
diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S
index d6e7f6c95fa6..b6a3a36e15f5 100644
--- a/arch/arm64/crypto/sha512-ce-core.S
+++ b/arch/arm64/crypto/sha512-ce-core.S
@@ -195,7 +195,7 @@ CPU_LE( rev64 v19.16b, v19.16b )
add v10.2d, v10.2d, v2.2d
add v11.2d, v11.2d, v3.2d
- cond_yield 3f, x4
+ cond_yield 3f, x4, x5
/* handled all input blocks? */
cbnz w2, 0b
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 880b9054d75c..934b9be582d2 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -173,7 +173,7 @@ static inline void gic_pmr_mask_irqs(void)
static inline void gic_arch_enable_irqs(void)
{
- asm volatile ("msr daifclr, #2" : : : "memory");
+ asm volatile ("msr daifclr, #3" : : : "memory");
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 9f0ec21d6327..88d20f04c64a 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -165,25 +165,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
isb();
}
-/*
- * Ensure that reads of the counter are treated the same as memory reads
- * for the purposes of ordering by subsequent memory barriers.
- *
- * This insanity brought to you by speculative system register reads,
- * out-of-order memory accesses, sequence locks and Thomas Gleixner.
- *
- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
- */
-#define arch_counter_enforce_ordering(val) do { \
- u64 tmp, _val = (val); \
- \
- asm volatile( \
- " eor %0, %1, %1\n" \
- " add %0, sp, %0\n" \
- " ldr xzr, [%0]" \
- : "=r" (tmp) : "r" (_val)); \
-} while (0)
-
static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{
u64 cnt;
@@ -224,8 +205,6 @@ static __always_inline u64 __arch_counter_get_cntvct(void)
return cnt;
}
-#undef arch_counter_enforce_ordering
-
static inline int arch_timer_arch_init(void)
{
return 0;
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
index 52dead2a8640..8ca2dc0661ee 100644
--- a/arch/arm64/include/asm/asm_pointer_auth.h
+++ b/arch/arm64/include/asm/asm_pointer_auth.h
@@ -13,30 +13,12 @@
* so use the base value of ldp as thread.keys_user and offset as
* thread.keys_user.ap*.
*/
- .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+ .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_USER
add \tmp1, \tsk, \tmp1
-alternative_if_not ARM64_HAS_ADDRESS_AUTH
- b .Laddr_auth_skip_\@
-alternative_else_nop_endif
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
msr_s SYS_APIAKEYLO_EL1, \tmp2
msr_s SYS_APIAKEYHI_EL1, \tmp3
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
- msr_s SYS_APIBKEYLO_EL1, \tmp2
- msr_s SYS_APIBKEYHI_EL1, \tmp3
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
- msr_s SYS_APDAKEYLO_EL1, \tmp2
- msr_s SYS_APDAKEYHI_EL1, \tmp3
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
- msr_s SYS_APDBKEYLO_EL1, \tmp2
- msr_s SYS_APDBKEYHI_EL1, \tmp3
-.Laddr_auth_skip_\@:
-alternative_if ARM64_HAS_GENERIC_AUTH
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
- msr_s SYS_APGAKEYLO_EL1, \tmp2
- msr_s SYS_APGAKEYHI_EL1, \tmp3
-alternative_else_nop_endif
.endm
.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ca31594d3d6c..ab569b0b45fc 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -15,6 +15,7 @@
#include <asm-generic/export.h>
#include <asm/asm-offsets.h>
+#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
@@ -23,6 +24,14 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
+ /*
+ * Provide a wxN alias for each wN register so what we can paste a xN
+ * reference after a 'w' to obtain the 32-bit version.
+ */
+ .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+ wx\n .req w\n
+ .endr
+
.macro save_and_disable_daif, flags
mrs \flags, daif
msr daifset, #0xf
@@ -40,9 +49,9 @@
msr daif, \flags
.endm
- /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
- .macro enable_da_f
- msr daifclr, #(8 | 4 | 1)
+ /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
+ .macro enable_da
+ msr daifclr, #(8 | 4)
.endm
/*
@@ -50,7 +59,7 @@
*/
.macro save_and_disable_irq, flags
mrs \flags, daif
- msr daifset, #2
+ msr daifset, #3
.endm
.macro restore_irq, flags
@@ -692,90 +701,33 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
isb
.endm
-/*
- * Check whether to yield to another runnable task from kernel mode NEON code
- * (which runs with preemption disabled).
- *
- * if_will_cond_yield_neon
- * // pre-yield patchup code
- * do_cond_yield_neon
- * // post-yield patchup code
- * endif_yield_neon <label>
- *
- * where <label> is optional, and marks the point where execution will resume
- * after a yield has been performed. If omitted, execution resumes right after
- * the endif_yield_neon invocation. Note that the entire sequence, including
- * the provided patchup code, will be omitted from the image if
- * CONFIG_PREEMPTION is not defined.
- *
- * As a convenience, in the case where no patchup code is required, the above
- * sequence may be abbreviated to
- *
- * cond_yield_neon <label>
- *
- * Note that the patchup code does not support assembler directives that change
- * the output section, any use of such directives is undefined.
- *
- * The yield itself consists of the following:
- * - Check whether the preempt count is exactly 1 and a reschedule is also
- * needed. If so, calling of preempt_enable() in kernel_neon_end() will
- * trigger a reschedule. If it is not the case, yielding is pointless.
- * - Disable and re-enable kernel mode NEON, and branch to the yield fixup
- * code.
- *
- * This macro sequence may clobber all CPU state that is not guaranteed by the
- * AAPCS to be preserved across an ordinary function call.
- */
-
- .macro cond_yield_neon, lbl
- if_will_cond_yield_neon
- do_cond_yield_neon
- endif_yield_neon \lbl
- .endm
-
- .macro if_will_cond_yield_neon
-#ifdef CONFIG_PREEMPTION
- get_current_task x0
- ldr x0, [x0, #TSK_TI_PREEMPT]
- sub x0, x0, #PREEMPT_DISABLE_OFFSET
- cbz x0, .Lyield_\@
- /* fall through to endif_yield_neon */
- .subsection 1
-.Lyield_\@ :
-#else
- .section ".discard.cond_yield_neon", "ax"
-#endif
- .endm
-
- .macro do_cond_yield_neon
- bl kernel_neon_end
- bl kernel_neon_begin
- .endm
-
- .macro endif_yield_neon, lbl
- .ifnb \lbl
- b \lbl
- .else
- b .Lyield_out_\@
- .endif
- .previous
-.Lyield_out_\@ :
- .endm
-
/*
- * Check whether preempt-disabled code should yield as soon as it
- * is able. This is the case if re-enabling preemption a single
- * time results in a preempt count of zero, and the TIF_NEED_RESCHED
- * flag is set. (Note that the latter is stored negated in the
- * top word of the thread_info::preempt_count field)
+ * Check whether preempt/bh-disabled asm code should yield as soon as
+ * it is able. This is the case if we are currently running in task
+ * context, and either a softirq is pending, or the TIF_NEED_RESCHED
+ * flag is set and re-enabling preemption a single time would result in
+ * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
+ * stored negated in the top word of the thread_info::preempt_count
+ * field)
*/
- .macro cond_yield, lbl:req, tmp:req
-#ifdef CONFIG_PREEMPTION
+ .macro cond_yield, lbl:req, tmp:req, tmp2:req
get_current_task \tmp
ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
+ /*
+ * If we are serving a softirq, there is no point in yielding: the
+ * softirq will not be preempted no matter what we do, so we should
+ * run to completion as quickly as we can.
+ */
+ tbnz \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
+#ifdef CONFIG_PREEMPTION
sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
cbz \tmp, \lbl
#endif
+ adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
+ this_cpu_offset \tmp2
+ ldr w\tmp, [\tmp, \tmp2]
+ cbnz w\tmp, \lbl // yield on pending softirq in task context
+.Lnoyield_\@:
.endm
/*
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index c3009b0e5239..065ba482daf0 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,10 +25,6 @@
#define psb_csync() asm volatile("hint #17" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory")
-#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
- SB_BARRIER_INSN"nop\n", \
- ARM64_HAS_SB))
-
#ifdef CONFIG_ARM64_PSEUDO_NMI
#define pmr_sync() \
do { \
@@ -70,6 +66,25 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
return mask;
}
+/*
+ * Ensure that reads of the counter are treated the same as memory reads
+ * for the purposes of ordering by subsequent memory barriers.
+ *
+ * This insanity brought to you by speculative system register reads,
+ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
+ *
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ */
+#define arch_counter_enforce_ordering(val) do { \
+ u64 tmp, _val = (val); \
+ \
+ asm volatile( \
+ " eor %0, %1, %1\n" \
+ " add %0, sp, %0\n" \
+ " ldr xzr, [%0]" \
+ : "=r" (tmp) : "r" (_val)); \
+} while (0)
+
#define __smp_mb() dmb(ish)
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index c40f2490cd7b..b0c5eda0498f 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -67,7 +67,8 @@
#define ARM64_HAS_LDAPR 59
#define ARM64_KVM_PROTECTED_MODE 60
#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
+#define ARM64_HAS_EPAN 62
-#define ARM64_NCAPS 62
+#define ARM64_NCAPS 63
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 61177bac49fa..338840c00e8e 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -63,6 +63,23 @@ struct arm64_ftr_bits {
s64 safe_val; /* safe value for FTR_EXACT features */
};
+/*
+ * Describe the early feature override to the core override code:
+ *
+ * @val Values that are to be merged into the final
+ * sanitised value of the register. Only the bitfields
+ * set to 1 in @mask are valid
+ * @mask Mask of the features that are overridden by @val
+ *
+ * A @mask field set to full-1 indicates that the corresponding field
+ * in @val is a valid override.
+ *
+ * A @mask field set to full-0 with the corresponding @val field set
+ * to full-0 denotes that this field has no override
+ *
+ * A @mask field set to full-0 with the corresponding @val field set
+ * to full-1 denotes thath this field has an invalid override.
+ */
struct arm64_ftr_override {
u64 val;
u64 mask;
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 1c26d7baa67f..5eb7af9c4557 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -13,8 +13,8 @@
#include <asm/ptrace.h>
#define DAIF_PROCCTX 0
-#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
-#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
+#define DAIF_PROCCTX_NOIRQ (PSR_I_BIT | PSR_F_BIT)
+#define DAIF_ERRCTX (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
@@ -47,7 +47,7 @@ static inline unsigned long local_daif_save_flags(void)
if (system_uses_irq_prio_masking()) {
/* If IRQs are masked with PMR, reflect it in the flags */
if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
- flags |= PSR_I_BIT;
+ flags |= PSR_I_BIT | PSR_F_BIT;
}
return flags;
@@ -69,7 +69,7 @@ static inline void local_daif_restore(unsigned long flags)
bool irq_disabled = flags & PSR_I_BIT;
WARN_ON(system_has_prio_mask_debugging() &&
- !(read_sysreg(daif) & PSR_I_BIT));
+ (read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) != (PSR_I_BIT | PSR_F_BIT));
if (!irq_disabled) {
trace_hardirqs_on();
@@ -86,7 +86,7 @@ static inline void local_daif_restore(unsigned long flags)
* If interrupts are disabled but we can take
* asynchronous errors, we can take NMIs
*/
- flags &= ~PSR_I_BIT;
+ flags &= ~(PSR_I_BIT | PSR_F_BIT);
pmr = GIC_PRIO_IRQOFF;
} else {
pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index d77d358f9395..b3f2d3bb0938 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -131,6 +131,26 @@
.Lskip_sve_\@:
.endm
+/* Disable any fine grained traps */
+.macro __init_el2_fgt
+ mrs x1, id_aa64mmfr0_el1
+ ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
+ cbz x1, .Lskip_fgt_\@
+
+ msr_s SYS_HDFGRTR_EL2, xzr
+ msr_s SYS_HDFGWTR_EL2, xzr
+ msr_s SYS_HFGRTR_EL2, xzr
+ msr_s SYS_HFGWTR_EL2, xzr
+ msr_s SYS_HFGITR_EL2, xzr
+
+ mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
+ ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4
+ cbz x1, .Lskip_fgt_\@
+
+ msr_s SYS_HAFGRTR_EL2, xzr
+.Lskip_fgt_\@:
+.endm
+
.macro __init_el2_nvhe_prepare_eret
mov x0, #INIT_PSTATE_EL1
msr spsr_el2, x0
@@ -155,6 +175,7 @@
__init_el2_nvhe_idregs
__init_el2_nvhe_cptr
__init_el2_nvhe_sve
+ __init_el2_fgt
__init_el2_nvhe_prepare_eret
.endm
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index bec5f14b622a..ebb263b2d3b1 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -73,6 +73,7 @@ extern void sve_flush_live(void);
extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
+extern void sve_set_vq(unsigned long vq_minus_1);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index b2b0c6405eb0..fac08e18bcd5 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -8,6 +8,10 @@
struct pt_regs;
+int set_handle_irq(void (*handle_irq)(struct pt_regs *));
+#define set_handle_irq set_handle_irq
+int set_handle_fiq(void (*handle_fiq)(struct pt_regs *));
+
static inline int nr_legacy_irqs(void)
{
return 0;
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index a1020285ea75..81bbfa3a035b 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -2,6 +2,8 @@
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
+extern void arch_irq_work_raise(void);
+
static inline bool arch_irq_work_has_interrupt(void)
{
return true;
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index ff328e5bbb75..b57b9b1e4344 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -12,15 +12,13 @@
/*
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
- * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
+ * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
* order:
* Masking debug exceptions causes all other exceptions to be masked too/
- * Masking SError masks irq, but not debug exceptions. Masking irqs has no
- * side effects for other flags. Keeping to this order makes it easier for
- * entry.S to know which exceptions should be unmasked.
- *
- * FIQ is never expected, but we mask it when we disable debug exceptions, and
- * unmask it at all other times.
+ * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
+ * always masked and unmasked together, and have no side effects for other
+ * flags. Keeping to this order makes it easier for entry.S to know which
+ * exceptions should be unmasked.
*/
/*
@@ -35,7 +33,7 @@ static inline void arch_local_irq_enable(void)
}
asm volatile(ALTERNATIVE(
- "msr daifclr, #2 // arch_local_irq_enable",
+ "msr daifclr, #3 // arch_local_irq_enable",
__msr_s(SYS_ICC_PMR_EL1, "%0"),
ARM64_HAS_IRQ_PRIO_MASKING)
:
@@ -54,7 +52,7 @@ static inline void arch_local_irq_disable(void)
}
asm volatile(ALTERNATIVE(
- "msr daifset, #2 // arch_local_irq_disable",
+ "msr daifset, #3 // arch_local_irq_disable",
__msr_s(SYS_ICC_PMR_EL1, "%0"),
ARM64_HAS_IRQ_PRIO_MASKING)
:
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 0aabc3be9a75..b943879c1c24 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -243,8 +243,10 @@ static inline const void *__tag_set(const void *addr, u8 tag)
}
#ifdef CONFIG_KASAN_HW_TAGS
-#define arch_enable_tagging() mte_enable_kernel()
+#define arch_enable_tagging_sync() mte_enable_kernel_sync()
+#define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_set_tagging_report_once(state) mte_set_report_once(state)
+#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 7ab500e2ad17..4acf8bf41cad 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -77,7 +77,8 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
} while (curr != end);
}
-void mte_enable_kernel(void);
+void mte_enable_kernel_sync(void);
+void mte_enable_kernel_async(void);
void mte_init_tags(u64 max_tag);
void mte_set_report_once(bool state);
@@ -104,7 +105,11 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
}
-static inline void mte_enable_kernel(void)
+static inline void mte_enable_kernel_sync(void)
+{
+}
+
+static inline void mte_enable_kernel_async(void)
{
}
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 9b557a457f24..bc88a1ced0d7 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -39,16 +39,15 @@ void mte_free_tag_storage(char *storage);
void mte_sync_tags(pte_t *ptep, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
-void flush_mte_state(void);
+void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
+void mte_suspend_enter(void);
void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
-void mte_assign_mem_tag_range(void *addr, size_t size);
-
#else /* CONFIG_ARM64_MTE */
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
@@ -60,12 +59,15 @@ static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
{
}
-static inline void flush_mte_state(void)
+static inline void mte_thread_init_user(void)
{
}
static inline void mte_thread_switch(struct task_struct *next)
{
}
+static inline void mte_suspend_enter(void)
+{
+}
static inline void mte_suspend_exit(void)
{
}
@@ -84,11 +86,51 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
return -EIO;
}
-static inline void mte_assign_mem_tag_range(void *addr, size_t size)
+#endif /* CONFIG_ARM64_MTE */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+/* Whether the MTE asynchronous mode is enabled. */
+DECLARE_STATIC_KEY_FALSE(mte_async_mode);
+
+static inline bool system_uses_mte_async_mode(void)
{
+ return static_branch_unlikely(&mte_async_mode);
}
-#endif /* CONFIG_ARM64_MTE */
+void mte_check_tfsr_el1(void);
+
+static inline void mte_check_tfsr_entry(void)
+{
+ mte_check_tfsr_el1();
+}
+
+static inline void mte_check_tfsr_exit(void)
+{
+ /*
+ * The asynchronous faults are sync'ed automatically with
+ * TFSR_EL1 on kernel entry but for exit an explicit dsb()
+ * is required.
+ */
+ dsb(nsh);
+ isb();
+
+ mte_check_tfsr_el1();
+}
+#else
+static inline bool system_uses_mte_async_mode(void)
+{
+ return false;
+}
+static inline void mte_check_tfsr_el1(void)
+{
+}
+static inline void mte_check_tfsr_entry(void)
+{
+}
+static inline void mte_check_tfsr_exit(void)
+{
+}
+#endif /* CONFIG_KASAN_HW_TAGS */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_MTE_H */
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 3c6a7f5988b1..31fbab3d6f99 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -27,7 +27,10 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
{
- __pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
+ pudval_t pudval = PUD_TYPE_TABLE;
+
+ pudval |= (mm == &init_mm) ? PUD_TABLE_UXN : PUD_TABLE_PXN;
+ __pud_populate(pudp, __pa(pmdp), pudval);
}
#else
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
@@ -45,7 +48,10 @@ static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
{
- __p4d_populate(p4dp, __pa(pudp), PUD_TYPE_TABLE);
+ p4dval_t p4dval = P4D_TYPE_TABLE;
+
+ p4dval |= (mm == &init_mm) ? P4D_TABLE_UXN : P4D_TABLE_PXN;
+ __p4d_populate(p4dp, __pa(pudp), p4dval);
}
#else
static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
@@ -70,16 +76,15 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
- /*
- * The pmd must be loaded with the physical address of the PTE table
- */
- __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE);
+ VM_BUG_ON(mm != &init_mm);
+ __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
- __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE);
+ VM_BUG_ON(mm == &init_mm);
+ __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE | PMD_TABLE_PXN);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 42442a0ae2ab..b82575a33f8b 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -94,6 +94,17 @@
/*
* Hardware page table definitions.
*
+ * Level 0 descriptor (P4D).
+ */
+#define P4D_TYPE_TABLE (_AT(p4dval_t, 3) << 0)
+#define P4D_TABLE_BIT (_AT(p4dval_t, 1) << 1)
+#define P4D_TYPE_MASK (_AT(p4dval_t, 3) << 0)
+#define P4D_TYPE_SECT (_AT(p4dval_t, 1) << 0)
+#define P4D_SECT_RDONLY (_AT(p4dval_t, 1) << 7) /* AP[2] */
+#define P4D_TABLE_PXN (_AT(p4dval_t, 1) << 59)
+#define P4D_TABLE_UXN (_AT(p4dval_t, 1) << 60)
+
+/*
* Level 1 descriptor (PUD).
*/
#define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0)
@@ -101,6 +112,8 @@
#define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0)
#define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0)
#define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */
+#define PUD_TABLE_PXN (_AT(pudval_t, 1) << 59)
+#define PUD_TABLE_UXN (_AT(pudval_t, 1) << 60)
/*
* Level 2 descriptor (PMD).
@@ -122,6 +135,8 @@
#define PMD_SECT_CONT (_AT(pmdval_t, 1) << 52)
#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
+#define PMD_TABLE_PXN (_AT(pmdval_t, 1) << 59)
+#define PMD_TABLE_UXN (_AT(pmdval_t, 1) << 60)
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 9a65fb528110..fab2f573f7a4 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -87,12 +87,13 @@ extern bool arm64_use_ng_mappings;
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
+#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY
#define __P011 PAGE_READONLY
-#define __P100 PAGE_READONLY_EXEC
+#define __P100 PAGE_EXECONLY
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_READONLY_EXEC
#define __P111 PAGE_READONLY_EXEC
@@ -101,7 +102,7 @@ extern bool arm64_use_ng_mappings;
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
+#define __S100 PAGE_EXECONLY
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 47027796c2f9..0b10204e72fc 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -113,11 +113,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
+/*
+ * Execute-only user mappings do not have the PTE_USER bit set. All valid
+ * kernel mappings have the PTE_UXN bit set.
+ */
#define pte_valid_not_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
-
+ ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
/*
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
@@ -130,12 +131,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
/*
- * p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
- * set.
+ * p??_access_permitted() is true for valid user mappings (PTE_USER
+ * bit set, subject to the write permission check). For execute-only
+ * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
+ * not set) must return false. PROT_NONE mappings do not have the
+ * PTE_VALID bit set.
*/
#define pte_access_permitted(pte, write) \
- (pte_valid_user(pte) && (!(write) || pte_write(pte)))
+ (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
#define pmd_access_permitted(pmd, write) \
(pte_access_permitted(pmd_pte(pmd), (write)))
#define pud_access_permitted(pud, write) \
@@ -995,6 +998,18 @@ static inline bool arch_wants_old_prefaulted_pte(void)
}
#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+ if (cpus_have_const_cap(ARM64_HAS_EPAN))
+ return prot;
+
+ if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
+ return prot;
+
+ return PAGE_READONLY_EXEC;
+}
+
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index b112a11e9302..d50416be99be 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -3,6 +3,7 @@
#define __ASM_POINTER_AUTH_H
#include <linux/bitops.h>
+#include <linux/prctl.h>
#include <linux/random.h>
#include <asm/cpufeature.h>
@@ -34,6 +35,25 @@ struct ptrauth_keys_kernel {
struct ptrauth_key apia;
};
+#define __ptrauth_key_install_nosync(k, v) \
+do { \
+ struct ptrauth_key __pki_v = (v); \
+ write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1); \
+ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
+} while (0)
+
+static inline void ptrauth_keys_install_user(struct ptrauth_keys_user *keys)
+{
+ if (system_supports_address_auth()) {
+ __ptrauth_key_install_nosync(APIB, keys->apib);
+ __ptrauth_key_install_nosync(APDA, keys->apda);
+ __ptrauth_key_install_nosync(APDB, keys->apdb);
+ }
+
+ if (system_supports_generic_auth())
+ __ptrauth_key_install_nosync(APGA, keys->apga);
+}
+
static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
{
if (system_supports_address_auth()) {
@@ -45,14 +65,9 @@ static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
if (system_supports_generic_auth())
get_random_bytes(&keys->apga, sizeof(keys->apga));
-}
-#define __ptrauth_key_install_nosync(k, v) \
-do { \
- struct ptrauth_key __pki_v = (v); \
- write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1); \
- write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
-} while (0)
+ ptrauth_keys_install_user(keys);
+}
static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
{
@@ -71,6 +86,10 @@ static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kerne
extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
+extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
+ unsigned long enabled);
+extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
+
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
{
return ptrauth_clear_pac(ptr);
@@ -85,8 +104,23 @@ static __always_inline void ptrauth_enable(void)
isb();
}
-#define ptrauth_thread_init_user(tsk) \
- ptrauth_keys_init_user(&(tsk)->thread.keys_user)
+#define ptrauth_suspend_exit() \
+ ptrauth_keys_install_user(&current->thread.keys_user)
+
+#define ptrauth_thread_init_user() \
+ do { \
+ ptrauth_keys_init_user(&current->thread.keys_user); \
+ \
+ /* enable all keys */ \
+ if (system_supports_address_auth()) \
+ set_task_sctlr_el1(current->thread.sctlr_user | \
+ SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
+ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); \
+ } while (0)
+
+#define ptrauth_thread_switch_user(tsk) \
+ ptrauth_keys_install_user(&(tsk)->thread.keys_user)
+
#define ptrauth_thread_init_kernel(tsk) \
ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
#define ptrauth_thread_switch_kernel(tsk) \
@@ -95,10 +129,17 @@ static __always_inline void ptrauth_enable(void)
#else /* CONFIG_ARM64_PTR_AUTH */
#define ptrauth_enable()
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
+#define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL)
+#define ptrauth_get_enabled_keys(tsk) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr)
-#define ptrauth_thread_init_user(tsk)
+#define ptrauth_suspend_exit()
+#define ptrauth_thread_init_user()
#define ptrauth_thread_init_kernel(tsk)
+#define ptrauth_thread_switch_user(tsk)
#define ptrauth_thread_switch_kernel(tsk)
#endif /* CONFIG_ARM64_PTR_AUTH */
+#define PR_PAC_ENABLED_KEYS_MASK \
+ (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
+
#endif /* __ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index efc10e9041a0..9df3feeee890 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -151,11 +151,15 @@ struct thread_struct {
struct ptrauth_keys_kernel keys_kernel;
#endif
#ifdef CONFIG_ARM64_MTE
- u64 sctlr_tcf0;
u64 gcr_user_excl;
#endif
+ u64 sctlr_user;
};
+#define SCTLR_USER_MASK \
+ (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \
+ SCTLR_EL1_TCF0_MASK)
+
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
@@ -247,6 +251,8 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
+void set_task_sctlr_el1(u64 sctlr);
+
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
@@ -303,6 +309,11 @@ extern void __init minsigstksz_setup(void);
/* PR_PAC_RESET_KEYS prctl */
#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
+/* PR_PAC_{SET,GET}_ENABLED_KEYS prctl */
+#define PAC_SET_ENABLED_KEYS(tsk, keys, enabled) \
+ ptrauth_set_enabled_keys(tsk, keys, enabled)
+#define PAC_GET_ENABLED_KEYS(tsk) ptrauth_get_enabled_keys(tsk)
+
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 38187f74e089..b1dd7ecff7ef 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -23,7 +23,7 @@ struct ptdump_info {
void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
#ifdef CONFIG_PTDUMP_DEBUGFS
-void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
+void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
static inline void ptdump_debugfs_register(struct ptdump_info *info,
const char *name) { }
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index bcb01ca15325..0e357757c0cc 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -145,6 +145,7 @@ bool cpus_are_stuck_in_kernel(void);
extern void crash_smp_send_stop(void);
extern bool smp_crash_stop_failed(void);
+extern void panic_smp_self_stop(void);
#endif /* ifndef __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index eb29b1fe8255..4b33ca620679 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -148,27 +148,7 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
return false;
}
-static inline void start_backtrace(struct stackframe *frame,
- unsigned long fp, unsigned long pc)
-{
- frame->fp = fp;
- frame->pc = pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame->graph = 0;
-#endif
-
- /*
- * Prime the first unwind.
- *
- * In unwind_frame() we'll check that the FP points to a valid stack,
- * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
- * treated as a transition to whichever stack that happens to be. The
- * prev_fp value won't be used, but we set it to 0 such that it is
- * definitely not an accessible stack address.
- */
- bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
- frame->prev_fp = 0;
- frame->prev_type = STACK_TYPE_UNKNOWN;
-}
+void start_backtrace(struct stackframe *frame, unsigned long fp,
+ unsigned long pc);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index d4a5fca984c3..b31ac5ccc8ab 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -475,9 +475,15 @@
#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
+#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4)
+#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
+#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
+#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
+#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
+#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
@@ -565,8 +571,10 @@
#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_ENIA_SHIFT 31
+
#define SCTLR_ELx_ITFSB (BIT(37))
-#define SCTLR_ELx_ENIA (BIT(31))
+#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
#define SCTLR_ELx_ENIB (BIT(30))
#define SCTLR_ELx_ENDA (BIT(27))
#define SCTLR_ELx_EE (BIT(25))
@@ -597,6 +605,7 @@
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_EPAN (BIT(57))
#define SCTLR_EL1_ATA0 (BIT(42))
#define SCTLR_EL1_TCF0_SHIFT 38
@@ -637,7 +646,7 @@
SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
- SCTLR_EL1_RES1)
+ SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0deb88467111..b5f08621fa29 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -20,6 +20,7 @@
#include <asm/cpufeature.h>
#include <asm/mmu.h>
+#include <asm/mte.h>
#include <asm/ptrace.h>
#include <asm/memory.h>
#include <asm/extable.h>
@@ -188,6 +189,23 @@ static inline void __uaccess_enable_tco(void)
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
+/*
+ * These functions disable tag checking only if in MTE async mode
+ * since the sync mode generates exceptions synchronously and the
+ * nofault or load_unaligned_zeropad can handle them.
+ */
+static inline void __uaccess_disable_tco_async(void)
+{
+ if (system_uses_mte_async_mode())
+ __uaccess_disable_tco();
+}
+
+static inline void __uaccess_enable_tco_async(void)
+{
+ if (system_uses_mte_async_mode())
+ __uaccess_enable_tco();
+}
+
static inline void uaccess_disable_privileged(void)
{
__uaccess_disable_tco();
@@ -307,8 +325,10 @@ do { \
do { \
int __gkn_err = 0; \
\
+ __uaccess_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(dst)), \
(__force type *)(src), __gkn_err); \
+ __uaccess_disable_tco_async(); \
if (unlikely(__gkn_err)) \
goto err_label; \
} while (0)
@@ -380,8 +400,10 @@ do { \
do { \
int __pkn_err = 0; \
\
+ __uaccess_enable_tco_async(); \
__raw_put_mem("str", *((type *)(src)), \
(__force type *)(dst), __pkn_err); \
+ __uaccess_disable_tco_async(); \
if (unlikely(__pkn_err)) \
goto err_label; \
} while(0)
diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h
index 631ab1281633..4b4c0dac0e14 100644
--- a/arch/arm64/include/asm/vdso/gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/gettimeofday.h
@@ -83,11 +83,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
*/
isb();
asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
- /*
- * This isb() is required to prevent that the seq lock is
- * speculated.#
- */
- isb();
+ arch_counter_enforce_ordering(res);
return res;
}
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index ea487218db79..2dcb104c645b 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -55,6 +55,8 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret, tmp;
+ __uaccess_enable_tco_async();
+
/* Load word from unaligned pointer addr */
asm(
"1: ldr %0, %3\n"
@@ -76,6 +78,8 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
: "=&r" (ret), "=&r" (tmp)
: "r" (addr), "Q" (*(unsigned long *)addr));
+ __uaccess_disable_tco_async();
+
return ret;
}
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a36e2fc330d4..e797603e55b7 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -43,6 +43,7 @@ int main(void)
#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
+ DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user));
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
@@ -95,6 +96,8 @@ int main(void)
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK();
DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
+ DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
+ DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
BLANK();
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
@@ -147,10 +150,6 @@ int main(void)
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia));
- DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib));
- DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda));
- DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb));
- DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga));
DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
BLANK();
#endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e5281e1c8f1d..76c60b3cda53 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -808,6 +808,12 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
reg->name,
ftrp->shift + ftrp->width - 1,
ftrp->shift, str, tmp);
+ } else if ((ftr_mask & reg->override->val) == ftr_mask) {
+ reg->override->val &= ~ftr_mask;
+ pr_warn("%s[%d:%d]: impossible override, ignored\n",
+ reg->name,
+ ftrp->shift + ftrp->width - 1,
+ ftrp->shift);
}
val = arm64_ftr_set_value(ftrp, val, ftr_new);
@@ -1619,7 +1625,6 @@ int get_cpu_with_amu_feat(void)
}
#endif
-#ifdef CONFIG_ARM64_VHE
static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
{
return is_kernel_in_hyp_mode();
@@ -1638,7 +1643,6 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
}
-#endif
static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
{
@@ -1823,6 +1827,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_pan,
},
#endif /* CONFIG_ARM64_PAN */
+#ifdef CONFIG_ARM64_EPAN
+ {
+ .desc = "Enhanced Privileged Access Never",
+ .capability = ARM64_HAS_EPAN,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64MMFR1_EL1,
+ .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 3,
+ },
+#endif /* CONFIG_ARM64_EPAN */
#ifdef CONFIG_ARM64_LSE_ATOMICS
{
.desc = "LSE atomic instructions",
@@ -1841,7 +1857,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.matches = has_no_hw_prefetch,
},
-#ifdef CONFIG_ARM64_VHE
{
.desc = "Virtualization Host Extensions",
.capability = ARM64_HAS_VIRT_HOST_EXTN,
@@ -1849,7 +1864,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = runs_at_el2,
.cpu_enable = cpu_copy_el2regs,
},
-#endif /* CONFIG_ARM64_VHE */
{
.desc = "32-bit EL0 Support",
.capability = ARM64_HAS_32BIT_EL0,
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 9d3588450473..a1ec351c36bd 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
+
+ mte_check_tfsr_entry();
}
/*
@@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
+ mte_check_tfsr_exit();
+
if (interrupts_enabled(regs)) {
if (regs->exit_rcu) {
trace_hardirqs_on_prepare();
@@ -293,6 +297,8 @@ asmlinkage void noinstr enter_from_user_mode(void)
asmlinkage void noinstr exit_to_user_mode(void)
{
+ mte_check_tfsr_exit();
+
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
user_enter_irqoff();
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 2ca395c25448..3ecec60d3295 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -48,6 +48,11 @@ SYM_FUNC_START(sve_get_vl)
ret
SYM_FUNC_END(sve_get_vl)
+SYM_FUNC_START(sve_set_vq)
+ sve_load_vq x0, x1, x2
+ ret
+SYM_FUNC_END(sve_set_vq)
+
/*
* Load SVE state from FPSIMD state.
*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 6acfc5e6b5e0..4ac5455c0ead 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -249,7 +249,29 @@ alternative_else_nop_endif
check_mte_async_tcf x22, x23
apply_ssbd 1, x22, x23
- ptrauth_keys_install_kernel tsk, x20, x22, x23
+#ifdef CONFIG_ARM64_PTR_AUTH
+alternative_if ARM64_HAS_ADDRESS_AUTH
+ /*
+ * Enable IA for in-kernel PAC if the task had it disabled. Although
+ * this could be implemented with an unconditional MRS which would avoid
+ * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
+ *
+ * Install the kernel IA key only if IA was enabled in the task. If IA
+ * was disabled on kernel exit then we would have left the kernel IA
+ * installed so there is no need to install it again.
+ */
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
+ __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
+ b 2f
+1:
+ mrs x0, sctlr_el1
+ orr x0, x0, SCTLR_ELx_ENIA
+ msr sctlr_el1, x0
+2:
+ isb
+alternative_else_nop_endif
+#endif
mte_set_kernel_gcr x22, x23
@@ -353,8 +375,26 @@ alternative_else_nop_endif
3:
scs_save tsk, x0
- /* No kernel C function calls after this as user keys are set. */
- ptrauth_keys_install_user tsk, x0, x1, x2
+#ifdef CONFIG_ARM64_PTR_AUTH
+alternative_if ARM64_HAS_ADDRESS_AUTH
+ /*
+ * IA was enabled for in-kernel PAC. Disable it now if needed, or
+ * alternatively install the user's IA. All other per-task keys and
+ * SCTLR bits were updated on task switch.
+ *
+ * No kernel C function calls after this.
+ */
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
+ __ptrauth_keys_install_user tsk, x0, x1, x2
+ b 2f
+1:
+ mrs x0, sctlr_el1
+ bic x0, x0, SCTLR_ELx_ENIA
+ msr sctlr_el1, x0
+2:
+alternative_else_nop_endif
+#endif
mte_set_user_gcr tsk, x0, x1
@@ -493,28 +533,14 @@ tsk .req x28 // current thread_info
/*
* Interrupt handling.
*/
- .macro irq_handler
- ldr_l x1, handle_arch_irq
+ .macro irq_handler, handler:req
+ ldr_l x1, \handler
mov x0, sp
irq_stack_entry
blr x1
irq_stack_exit
.endm
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- /*
- * Set res to 0 if irqs were unmasked in interrupted context.
- * Otherwise set res to non-0 value.
- */
- .macro test_irqs_unmasked res:req, pmr:req
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
- sub \res, \pmr, #GIC_PRIO_IRQON
-alternative_else
- mov \res, xzr
-alternative_endif
- .endm
-#endif
-
.macro gic_prio_kentry_setup, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@@ -533,6 +559,47 @@ alternative_endif
#endif
.endm
+ .macro el1_interrupt_handler, handler:req
+ gic_prio_irq_setup pmr=x20, tmp=x1
+ enable_da
+
+ mov x0, sp
+ bl enter_el1_irq_or_nmi
+
+ irq_handler \handler
+
+#ifdef CONFIG_PREEMPTION
+ ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ /*
+ * DA were cleared at start of handling, and IF are cleared by
+ * the GIC irqchip driver using gic_arch_enable_irqs() for
+ * normal IRQs. If anything is set, it means we come back from
+ * an NMI instead of a normal IRQ, so skip preemption
+ */
+ mrs x0, daif
+ orr x24, x24, x0
+alternative_else_nop_endif
+ cbnz x24, 1f // preempt count != 0 || NMI return path
+ bl arm64_preempt_schedule_irq // irq en/disable is done inside
+1:
+#endif
+
+ mov x0, sp
+ bl exit_el1_irq_or_nmi
+ .endm
+
+ .macro el0_interrupt_handler, handler:req
+ gic_prio_irq_setup pmr=x20, tmp=x0
+ user_exit_irqoff
+ enable_da
+
+ tbz x22, #55, 1f
+ bl do_el0_irq_bp_hardening
+1:
+ irq_handler \handler
+ .endm
+
.text
/*
@@ -549,18 +616,18 @@ SYM_CODE_START(vectors)
kernel_ventry 1, sync // Synchronous EL1h
kernel_ventry 1, irq // IRQ EL1h
- kernel_ventry 1, fiq_invalid // FIQ EL1h
+ kernel_ventry 1, fiq // FIQ EL1h
kernel_ventry 1, error // Error EL1h
kernel_ventry 0, sync // Synchronous 64-bit EL0
kernel_ventry 0, irq // IRQ 64-bit EL0
- kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
+ kernel_ventry 0, fiq // FIQ 64-bit EL0
kernel_ventry 0, error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
- kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, fiq_compat, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
#else
kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
@@ -626,12 +693,6 @@ SYM_CODE_START_LOCAL(el0_error_invalid)
inv_entry 0, BAD_ERROR
SYM_CODE_END(el0_error_invalid)
-#ifdef CONFIG_COMPAT
-SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
- inv_entry 0, BAD_FIQ, 32
-SYM_CODE_END(el0_fiq_invalid_compat)
-#endif
-
SYM_CODE_START_LOCAL(el1_sync_invalid)
inv_entry 1, BAD_SYNC
SYM_CODE_END(el1_sync_invalid)
@@ -662,35 +723,16 @@ SYM_CODE_END(el1_sync)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1
- gic_prio_irq_setup pmr=x20, tmp=x1
- enable_da_f
-
- mov x0, sp
- bl enter_el1_irq_or_nmi
-
- irq_handler
-
-#ifdef CONFIG_PREEMPTION
- ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
- /*
- * DA_F were cleared at start of handling. If anything is set in DAIF,
- * we come back from an NMI, so skip preemption
- */
- mrs x0, daif
- orr x24, x24, x0
-alternative_else_nop_endif
- cbnz x24, 1f // preempt count != 0 || NMI return path
- bl arm64_preempt_schedule_irq // irq en/disable is done inside
-1:
-#endif
-
- mov x0, sp
- bl exit_el1_irq_or_nmi
-
+ el1_interrupt_handler handle_arch_irq
kernel_exit 1
SYM_CODE_END(el1_irq)
+SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
+ kernel_entry 1
+ el1_interrupt_handler handle_arch_fiq
+ kernel_exit 1
+SYM_CODE_END(el1_fiq)
+
/*
* EL0 mode handlers.
*/
@@ -717,6 +759,11 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
b el0_irq_naked
SYM_CODE_END(el0_irq_compat)
+SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
+ kernel_entry 0, 32
+ b el0_fiq_naked
+SYM_CODE_END(el0_fiq_compat)
+
SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32
b el0_error_naked
@@ -727,18 +774,17 @@ SYM_CODE_END(el0_error_compat)
SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
- gic_prio_irq_setup pmr=x20, tmp=x0
- user_exit_irqoff
- enable_da_f
-
- tbz x22, #55, 1f
- bl do_el0_irq_bp_hardening
-1:
- irq_handler
-
+ el0_interrupt_handler handle_arch_irq
b ret_to_user
SYM_CODE_END(el0_irq)
+SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
+ kernel_entry 0
+el0_fiq_naked:
+ el0_interrupt_handler handle_arch_fiq
+ b ret_to_user
+SYM_CODE_END(el0_fiq)
+
SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1
mrs x1, esr_el1
@@ -759,7 +805,7 @@ el0_error_naked:
mov x0, sp
mov x1, x25
bl do_serror
- enable_da_f
+ enable_da
b ret_to_user
SYM_CODE_END(el0_error)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 062b21f30f94..ad3dd34a83cf 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -180,7 +180,7 @@ static void __get_cpu_fpsimd_context(void)
*/
static void get_cpu_fpsimd_context(void)
{
- preempt_disable();
+ local_bh_disable();
__get_cpu_fpsimd_context();
}
@@ -201,7 +201,7 @@ static void __put_cpu_fpsimd_context(void)
static void put_cpu_fpsimd_context(void)
{
__put_cpu_fpsimd_context();
- preempt_enable();
+ local_bh_enable();
}
static bool have_cpu_fpsimd_context(void)
@@ -285,7 +285,7 @@ static void task_fpsimd_load(void)
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
- if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE))
sve_load_state(sve_pffr(&current->thread),
&current->thread.uw.fpsimd_state.fpsr,
sve_vq_from_vl(current->thread.sve_vl) - 1);
@@ -307,7 +307,8 @@ static void fpsimd_save(void)
WARN_ON(!have_cpu_fpsimd_context());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
- if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
+ if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+ test_thread_flag(TIF_SVE)) {
if (WARN_ON(sve_get_vl() != last->sve_vl)) {
/*
* Can't save the user regs, so current would
@@ -926,9 +927,8 @@ void fpsimd_release_task(struct task_struct *dead_task)
* Trapped SVE access
*
* Storage is allocated for the full SVE state, the current FPSIMD
- * register contents are migrated across, and TIF_SVE is set so that
- * the SVE access trap will be disabled the next time this task
- * reaches ret_to_user.
+ * register contents are migrated across, and the access trap is
+ * disabled.
*
* TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
* would have disabled the SVE access trap for userspace during
@@ -946,15 +946,24 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
get_cpu_fpsimd_context();
- fpsimd_save();
-
- /* Force ret_to_user to reload the registers: */
- fpsimd_flush_task_state(current);
-
- fpsimd_to_sve(current);
if (test_and_set_thread_flag(TIF_SVE))
WARN_ON(1); /* SVE access shouldn't have trapped */
+ /*
+ * Convert the FPSIMD state to SVE, zeroing all the state that
+ * is not shared with FPSIMD. If (as is likely) the current
+ * state is live in the registers then do this there and
+ * update our metadata for the current task including
+ * disabling the trap, otherwise update our in-memory copy.
+ */
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ sve_set_vq(sve_vq_from_vl(current->thread.sve_vl) - 1);
+ sve_flush_live();
+ fpsimd_bind_task_to_cpu();
+ } else {
+ fpsimd_to_sve(current);
+ }
+
put_cpu_fpsimd_context();
}
@@ -1092,7 +1101,7 @@ void fpsimd_preserve_current_state(void)
void fpsimd_signal_preserve_current_state(void)
{
fpsimd_preserve_current_state();
- if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ if (test_thread_flag(TIF_SVE))
sve_to_fpsimd(current);
}
@@ -1181,7 +1190,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
get_cpu_fpsimd_context();
current->thread.uw.fpsimd_state = *state;
- if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ if (test_thread_flag(TIF_SVE))
fpsimd_to_sve(current);
task_fpsimd_load();
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 840bda1869e9..96873dfa67fd 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -477,14 +477,13 @@ EXPORT_SYMBOL(kimage_vaddr)
* booted in EL1 or EL2 respectively.
*/
SYM_FUNC_START(init_kernel_el)
- mov_q x0, INIT_SCTLR_EL1_MMU_OFF
- msr sctlr_el1, x0
-
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.eq init_el2
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr sctlr_el1, x0
isb
mov_q x0, INIT_PSTATE_EL1
msr spsr_el1, x0
@@ -504,9 +503,43 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
msr vbar_el2, x0
isb
+ /*
+ * Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
+ * making it impossible to start in nVHE mode. Is that
+ * compliant with the architecture? Absolutely not!
+ */
+ mrs x0, hcr_el2
+ and x0, x0, #HCR_E2H
+ cbz x0, 1f
+
+ /* Switching to VHE requires a sane SCTLR_EL1 as a start */
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr_s SYS_SCTLR_EL12, x0
+
+ /*
+ * Force an eret into a helper "function", and let it return
+ * to our original caller... This makes sure that we have
+ * initialised the basic PSTATE state.
+ */
+ mov x0, #INIT_PSTATE_EL2
+ msr spsr_el1, x0
+ adr x0, __cpu_stick_to_vhe
+ msr elr_el1, x0
+ eret
+
+1:
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr sctlr_el1, x0
+
msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2
eret
+
+__cpu_stick_to_vhe:
+ mov x0, #HVC_VHE_RESTART
+ hvc #0
+ mov x0, #BOOT_CPU_MODE_EL2
+ ret
SYM_FUNC_END(init_kernel_el)
/*
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 5eccbd62fec8..74ad3db061d1 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -27,12 +27,12 @@ SYM_CODE_START(__hyp_stub_vectors)
ventry el2_fiq_invalid // FIQ EL2t
ventry el2_error_invalid // Error EL2t
- ventry el2_sync_invalid // Synchronous EL2h
+ ventry elx_sync // Synchronous EL2h
ventry el2_irq_invalid // IRQ EL2h
ventry el2_fiq_invalid // FIQ EL2h
ventry el2_error_invalid // Error EL2h
- ventry el1_sync // Synchronous 64-bit EL1
+ ventry elx_sync // Synchronous 64-bit EL1
ventry el1_irq_invalid // IRQ 64-bit EL1
ventry el1_fiq_invalid // FIQ 64-bit EL1
ventry el1_error_invalid // Error 64-bit EL1
@@ -45,7 +45,7 @@ SYM_CODE_END(__hyp_stub_vectors)
.align 11
-SYM_CODE_START_LOCAL(el1_sync)
+SYM_CODE_START_LOCAL(elx_sync)
cmp x0, #HVC_SET_VECTORS
b.ne 1f
msr vbar_el2, x1
@@ -71,7 +71,7 @@ SYM_CODE_START_LOCAL(el1_sync)
9: mov x0, xzr
eret
-SYM_CODE_END(el1_sync)
+SYM_CODE_END(elx_sync)
// nVHE? No way! Give me the real thing!
SYM_CODE_START_LOCAL(mutate_to_vhe)
@@ -224,7 +224,6 @@ SYM_FUNC_END(__hyp_reset_vectors)
* Entry point to switch to VHE if deemed capable
*/
SYM_FUNC_START(switch_to_vhe)
-#ifdef CONFIG_ARM64_VHE
// Need to have booted at EL2
adr_l x1, __boot_cpu_mode
ldr w0, [x1]
@@ -240,6 +239,5 @@ SYM_FUNC_START(switch_to_vhe)
mov x0, #HVC_VHE_RESTART
hvc #0
1:
-#endif
ret
SYM_FUNC_END(switch_to_vhe)
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index 83f1c4b92095..e628c8ce1ffe 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -25,14 +25,26 @@ struct ftr_set_desc {
struct {
char name[FTR_DESC_FIELD_LEN];
u8 shift;
+ bool (*filter)(u64 val);
} fields[];
};
+static bool __init mmfr1_vh_filter(u64 val)
+{
+ /*
+ * If we ever reach this point while running VHE, we're
+ * guaranteed to be on one of these funky, VHE-stuck CPUs. If
+ * the user was trying to force nVHE on us, proceed with
+ * attitude adjustment.
+ */
+ return !(is_kernel_in_hyp_mode() && val == 0);
+}
+
static const struct ftr_set_desc mmfr1 __initconst = {
.name = "id_aa64mmfr1",
.override = &id_aa64mmfr1_override,
.fields = {
- { "vh", ID_AA64MMFR1_VHE_SHIFT },
+ { "vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter },
{}
},
};
@@ -124,6 +136,18 @@ static void __init match_options(const char *cmdline)
if (find_field(cmdline, regs[i], f, &v))
continue;
+ /*
+ * If an override gets filtered out, advertise
+ * it by setting the value to 0xf, but
+ * clearing the mask... Yes, this is fragile.
+ */
+ if (regs[i]->fields[f].filter &&
+ !regs[i]->fields[f].filter(v)) {
+ regs[i]->override->val |= mask;
+ regs[i]->override->mask &= ~mask;
+ continue;
+ }
+
regs[i]->override->val &= ~mask;
regs[i]->override->val |= (v << shift) & mask;
regs[i]->override->mask |= mask;
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index dfb1feab867d..bda49430c9ea 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -71,13 +71,44 @@ static void init_irq_stacks(void)
}
#endif
+static void default_handle_irq(struct pt_regs *regs)
+{
+ panic("IRQ taken without a root IRQ handler\n");
+}
+
+static void default_handle_fiq(struct pt_regs *regs)
+{
+ panic("FIQ taken without a root FIQ handler\n");
+}
+
+void (*handle_arch_irq)(struct pt_regs *) __ro_after_init = default_handle_irq;
+void (*handle_arch_fiq)(struct pt_regs *) __ro_after_init = default_handle_fiq;
+
+int __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
+{
+ if (handle_arch_irq != default_handle_irq)
+ return -EBUSY;
+
+ handle_arch_irq = handle_irq;
+ pr_info("Root IRQ handler: %ps\n", handle_irq);
+ return 0;
+}
+
+int __init set_handle_fiq(void (*handle_fiq)(struct pt_regs *))
+{
+ if (handle_arch_fiq != default_handle_fiq)
+ return -EBUSY;
+
+ handle_arch_fiq = handle_fiq;
+ pr_info("Root FIQ handler: %ps\n", handle_fiq);
+ return 0;
+}
+
void __init init_IRQ(void)
{
init_irq_stacks();
init_irq_scs();
irqchip_init();
- if (!handle_arch_irq)
- panic("No interrupt controller found.");
if (system_uses_irq_prio_masking()) {
/*
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 27f8939deb1b..341342b207f6 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -128,15 +128,17 @@ u64 __init kaslr_early_init(void)
/* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48;
- if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
- IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+ if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) &&
+ (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
+ IS_ENABLED(CONFIG_KASAN_SW_TAGS)))
/*
- * KASAN does not expect the module region to intersect the
- * vmalloc region, since shadow memory is allocated for each
- * module at load time, whereas the vmalloc region is shadowed
- * by KASAN zero pages. So keep modules out of the vmalloc
- * region if KASAN is enabled, and put the kernel well within
- * 4 GB of the module region.
+ * KASAN without KASAN_VMALLOC does not expect the module region
+ * to intersect the vmalloc region, since shadow memory is
+ * allocated for each module at load time, whereas the vmalloc
+ * region is shadowed by KASAN zero pages. So keep modules
+ * out of the vmalloc region if KASAN is enabled without
+ * KASAN_VMALLOC, and put the kernel well within 4 GB of the
+ * module region.
*/
return offset % SZ_2G;
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index fe21e0f06492..b5ec010c481f 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -40,14 +40,16 @@ void *module_alloc(unsigned long size)
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
- !IS_ENABLED(CONFIG_KASAN_GENERIC) &&
- !IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+ (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
+ (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
/*
- * KASAN can only deal with module allocations being served
- * from the reserved module region, since the remainder of
- * the vmalloc region is already backed by zero shadow pages,
- * and punching holes into it is non-trivial. Since the module
- * region is not randomized when KASAN is enabled, it is even
+ * KASAN without KASAN_VMALLOC can only deal with module
+ * allocations being served from the reserved module region,
+ * since the remainder of the vmalloc region is already
+ * backed by zero shadow pages, and punching holes into it
+ * is non-trivial. Since the module region is not randomized
+ * when KASAN is enabled without KASAN_VMALLOC, it is even
* less likely that the module region gets exhausted, so we
* can simply omit this fallback in that case.
*/
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index b3c70a612c7a..125a10e413e9 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -26,6 +26,12 @@ u64 gcr_kernel_excl __ro_after_init;
static bool report_fault_once = true;
+#ifdef CONFIG_KASAN_HW_TAGS
+/* Whether the MTE asynchronous mode is enabled. */
+DEFINE_STATIC_KEY_FALSE(mte_async_mode);
+EXPORT_SYMBOL_GPL(mte_async_mode);
+#endif
+
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
{
pte_t old_pte = READ_ONCE(*ptep);
@@ -107,13 +113,45 @@ void mte_init_tags(u64 max_tag)
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
}
-void mte_enable_kernel(void)
+static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
{
/* Enable MTE Sync Mode for EL1. */
- sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
+ sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
isb();
+
+ pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
+}
+
+#ifdef CONFIG_KASAN_HW_TAGS
+void mte_enable_kernel_sync(void)
+{
+ /*
+ * Make sure we enter this function when no PE has set
+ * async mode previously.
+ */
+ WARN_ONCE(system_uses_mte_async_mode(),
+ "MTE async mode enabled system wide!");
+
+ __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
}
+void mte_enable_kernel_async(void)
+{
+ __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
+
+ /*
+ * MTE async mode is set system wide by the first PE that
+ * executes this function.
+ *
+ * Note: If in future KASAN acquires a runtime switching
+ * mode in between sync and async, this strategy needs
+ * to be reviewed.
+ */
+ if (!system_uses_mte_async_mode())
+ static_branch_enable(&mte_async_mode);
+}
+#endif
+
void mte_set_report_once(bool state)
{
WRITE_ONCE(report_fault_once, state);
@@ -124,25 +162,28 @@ bool mte_report_once(void)
return READ_ONCE(report_fault_once);
}
-static void update_sctlr_el1_tcf0(u64 tcf0)
+#ifdef CONFIG_KASAN_HW_TAGS
+void mte_check_tfsr_el1(void)
{
- /* ISB required for the kernel uaccess routines */
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
- isb();
-}
+ u64 tfsr_el1;
-static void set_sctlr_el1_tcf0(u64 tcf0)
-{
- /*
- * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
- * optimisation. Disable preemption so that it does not see
- * the variable update before the SCTLR_EL1.TCF0 one.
- */
- preempt_disable();
- current->thread.sctlr_tcf0 = tcf0;
- update_sctlr_el1_tcf0(tcf0);
- preempt_enable();
+ if (!system_supports_mte())
+ return;
+
+ tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
+
+ if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
+ /*
+ * Note: isb() is not required after this direct write
+ * because there is no indirect read subsequent to it
+ * (per ARM DDI 0487F.c table D13-1).
+ */
+ write_sysreg_s(0, SYS_TFSR_EL1);
+
+ kasan_report_async();
+ }
}
+#endif
static void update_gcr_el1_excl(u64 excl)
{
@@ -166,7 +207,7 @@ static void set_gcr_el1_excl(u64 excl)
*/
}
-void flush_mte_state(void)
+void mte_thread_init_user(void)
{
if (!system_supports_mte())
return;
@@ -176,19 +217,39 @@ void flush_mte_state(void)
write_sysreg_s(0, SYS_TFSRE0_EL1);
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
/* disable tag checking */
- set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
+ set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) |
+ SCTLR_EL1_TCF0_NONE);
/* reset tag generation mask */
set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
}
void mte_thread_switch(struct task_struct *next)
{
+ /*
+ * Check if an async tag exception occurred at EL1.
+ *
+ * Note: On the context switch path we rely on the dsb() present
+ * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
+ * are synchronized before this point.
+ */
+ isb();
+ mte_check_tfsr_el1();
+}
+
+void mte_suspend_enter(void)
+{
if (!system_supports_mte())
return;
- /* avoid expensive SCTLR_EL1 accesses if no change */
- if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
- update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
+ /*
+ * The barriers are required to guarantee that the indirect writes
+ * to TFSR_EL1 are synchronized before we report the state.
+ */
+ dsb(nsh);
+ isb();
+
+ /* Report SYS_TFSR_EL1 before suspend entry */
+ mte_check_tfsr_el1();
}
void mte_suspend_exit(void)
@@ -201,7 +262,7 @@ void mte_suspend_exit(void)
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
- u64 tcf0;
+ u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
SYS_GCR_EL1_EXCL_MASK;
@@ -210,23 +271,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
switch (arg & PR_MTE_TCF_MASK) {
case PR_MTE_TCF_NONE:
- tcf0 = SCTLR_EL1_TCF0_NONE;
+ sctlr |= SCTLR_EL1_TCF0_NONE;
break;
case PR_MTE_TCF_SYNC:
- tcf0 = SCTLR_EL1_TCF0_SYNC;
+ sctlr |= SCTLR_EL1_TCF0_SYNC;
break;
case PR_MTE_TCF_ASYNC:
- tcf0 = SCTLR_EL1_TCF0_ASYNC;
+ sctlr |= SCTLR_EL1_TCF0_ASYNC;
break;
default:
return -EINVAL;
}
if (task != current) {
- task->thread.sctlr_tcf0 = tcf0;
+ task->thread.sctlr_user = sctlr;
task->thread.gcr_user_excl = gcr_excl;
} else {
- set_sctlr_el1_tcf0(tcf0);
+ set_task_sctlr_el1(sctlr);
set_gcr_el1_excl(gcr_excl);
}
@@ -243,7 +304,7 @@ long get_mte_ctrl(struct task_struct *task)
ret = incl << PR_MTE_TAG_SHIFT;
- switch (task->thread.sctlr_tcf0) {
+ switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) {
case SCTLR_EL1_TCF0_NONE:
ret |= PR_MTE_TCF_NONE;
break;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 4658fcf88c2b..f594957e29bd 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -470,9 +470,8 @@ static inline u64 armv8pmu_read_evcntr(int idx)
static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
{
int idx = event->hw.idx;
- u64 val = 0;
+ u64 val = armv8pmu_read_evcntr(idx);
- val = armv8pmu_read_evcntr(idx);
if (armv8pmu_event_is_chained(event))
val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
return val;
@@ -520,7 +519,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- u64 value = 0;
+ u64 value;
if (idx == ARMV8_IDX_CYCLE_COUNTER)
value = read_sysreg(pmccntr_el0);
diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c
index adb955fd9bdd..60901ab0a7fe 100644
--- a/arch/arm64/kernel/pointer_auth.c
+++ b/arch/arm64/kernel/pointer_auth.c
@@ -43,6 +43,69 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
get_random_bytes(&keys->apdb, sizeof(keys->apdb));
if (arg & PR_PAC_APGAKEY)
get_random_bytes(&keys->apga, sizeof(keys->apga));
+ ptrauth_keys_install_user(keys);
return 0;
}
+
+static u64 arg_to_enxx_mask(unsigned long arg)
+{
+ u64 sctlr_enxx_mask = 0;
+
+ WARN_ON(arg & ~PR_PAC_ENABLED_KEYS_MASK);
+ if (arg & PR_PAC_APIAKEY)
+ sctlr_enxx_mask |= SCTLR_ELx_ENIA;
+ if (arg & PR_PAC_APIBKEY)
+ sctlr_enxx_mask |= SCTLR_ELx_ENIB;
+ if (arg & PR_PAC_APDAKEY)
+ sctlr_enxx_mask |= SCTLR_ELx_ENDA;
+ if (arg & PR_PAC_APDBKEY)
+ sctlr_enxx_mask |= SCTLR_ELx_ENDB;
+ return sctlr_enxx_mask;
+}
+
+int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
+ unsigned long enabled)
+{
+ u64 sctlr = tsk->thread.sctlr_user;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ if (is_compat_thread(task_thread_info(tsk)))
+ return -EINVAL;
+
+ if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys))
+ return -EINVAL;
+
+ sctlr &= ~arg_to_enxx_mask(keys);
+ sctlr |= arg_to_enxx_mask(enabled);
+ if (tsk == current)
+ set_task_sctlr_el1(sctlr);
+ else
+ tsk->thread.sctlr_user = sctlr;
+
+ return 0;
+}
+
+int ptrauth_get_enabled_keys(struct task_struct *tsk)
+{
+ int retval = 0;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ if (is_compat_thread(task_thread_info(tsk)))
+ return -EINVAL;
+
+ if (tsk->thread.sctlr_user & SCTLR_ELx_ENIA)
+ retval |= PR_PAC_APIAKEY;
+ if (tsk->thread.sctlr_user & SCTLR_ELx_ENIB)
+ retval |= PR_PAC_APIBKEY;
+ if (tsk->thread.sctlr_user & SCTLR_ELx_ENDA)
+ retval |= PR_PAC_APDAKEY;
+ if (tsk->thread.sctlr_user & SCTLR_ELx_ENDB)
+ retval |= PR_PAC_APDBKEY;
+
+ return retval;
+}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 85645b2b0c7a..d607c9912025 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -264,8 +264,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
* normal page fault.
*/
instruction_pointer_set(regs, (unsigned long) cur->addr);
- if (!instruction_pointer(regs))
- BUG();
+ BUG_ON(!instruction_pointer(regs));
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6e60aa3b5ea9..cbf52109583b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -86,7 +86,7 @@ static void noinstr __cpu_do_idle_irqprio(void)
unsigned long daif_bits;
daif_bits = read_sysreg(daif);
- write_sysreg(daif_bits | PSR_I_BIT, daif);
+ write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
/*
* Unmask PMR before going idle to make sure interrupts can
@@ -341,7 +341,6 @@ void flush_thread(void)
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
flush_tagged_addr_state();
- flush_mte_state();
}
void release_thread(struct task_struct *dead_task)
@@ -531,6 +530,31 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
write_sysreg(val, cntkctl_el1);
}
+static void update_sctlr_el1(u64 sctlr)
+{
+ /*
+ * EnIA must not be cleared while in the kernel as this is necessary for
+ * in-kernel PAC. It will be cleared on kernel exit if needed.
+ */
+ sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
+
+ /* ISB required for the kernel uaccess routines when setting TCF0. */
+ isb();
+}
+
+void set_task_sctlr_el1(u64 sctlr)
+{
+ /*
+ * __switch_to() checks current->thread.sctlr as an
+ * optimisation. Disable preemption so that it does not see
+ * the variable update before the SCTLR_EL1 one.
+ */
+ preempt_disable();
+ current->thread.sctlr_user = sctlr;
+ update_sctlr_el1(sctlr);
+ preempt_enable();
+}
+
/*
* Thread switching.
*/
@@ -546,6 +570,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next);
+ ptrauth_thread_switch_user(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -561,6 +586,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
* registers.
*/
mte_thread_switch(next);
+ /* avoid expensive SCTLR_EL1 accesses if no change */
+ if (prev->thread.sctlr_user != next->thread.sctlr_user)
+ update_sctlr_el1(next->thread.sctlr_user);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
@@ -610,7 +638,8 @@ void arch_setup_new_exec(void)
{
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
- ptrauth_thread_init_user(current);
+ ptrauth_thread_init_user();
+ mte_thread_init_user();
if (task_spec_ssb_noexec(current)) {
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 170f42fd6101..eb2f73939b7b 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -909,6 +909,38 @@ static int pac_mask_get(struct task_struct *target,
return membuf_write(&to, &uregs, sizeof(uregs));
}
+static int pac_enabled_keys_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ long enabled_keys = ptrauth_get_enabled_keys(target);
+
+ if (IS_ERR_VALUE(enabled_keys))
+ return enabled_keys;
+
+ return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
+}
+
+static int pac_enabled_keys_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ long enabled_keys = ptrauth_get_enabled_keys(target);
+
+ if (IS_ERR_VALUE(enabled_keys))
+ return enabled_keys;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
+ sizeof(long));
+ if (ret)
+ return ret;
+
+ return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
+ enabled_keys);
+}
+
#ifdef CONFIG_CHECKPOINT_RESTORE
static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
{
@@ -1074,6 +1106,7 @@ enum aarch64_regset {
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
REGSET_PAC_MASK,
+ REGSET_PAC_ENABLED_KEYS,
#ifdef CONFIG_CHECKPOINT_RESTORE
REGSET_PACA_KEYS,
REGSET_PACG_KEYS,
@@ -1160,6 +1193,14 @@ static const struct user_regset aarch64_regsets[] = {
.regset_get = pac_mask_get,
/* this cannot be set dynamically */
},
+ [REGSET_PAC_ENABLED_KEYS] = {
+ .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
+ .n = 1,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .regset_get = pac_enabled_keys_get,
+ .set = pac_enabled_keys_set,
+ },
#ifdef CONFIG_CHECKPOINT_RESTORE
[REGSET_PACA_KEYS] = {
.core_note_type = NT_ARM_PACA_KEYS,
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 357590beaabb..dcd7041b2b07 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -188,6 +188,7 @@ static void init_gic_priority_masking(void)
cpuflags = read_sysreg(daif);
WARN_ON(!(cpuflags & PSR_I_BIT));
+ WARN_ON(!(cpuflags & PSR_F_BIT));
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index d55bdfb7789c..84b676bcf867 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -32,6 +32,30 @@
* add sp, sp, #0x10
*/
+
+void start_backtrace(struct stackframe *frame, unsigned long fp,
+ unsigned long pc)
+{
+ frame->fp = fp;
+ frame->pc = pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame->graph = 0;
+#endif
+
+ /*
+ * Prime the first unwind.
+ *
+ * In unwind_frame() we'll check that the FP points to a valid stack,
+ * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
+ * treated as a transition to whichever stack that happens to be. The
+ * prev_fp value won't be used, but we set it to 0 such that it is
+ * definitely not an accessible stack address.
+ */
+ bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
+ frame->prev_fp = 0;
+ frame->prev_type = STACK_TYPE_UNKNOWN;
+}
+
/*
* Unwind from one frame record (A) to the next frame record (B).
*
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index d7564891ffe1..e3f72df9509d 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -74,8 +74,9 @@ void notrace __cpu_suspend_exit(void)
*/
spectre_v4_enable_mitigation(NULL);
- /* Restore additional MTE-specific configuration */
+ /* Restore additional feature-specific configuration */
mte_suspend_exit();
+ ptrauth_suspend_exit();
}
/*
@@ -91,6 +92,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
unsigned long flags;
struct sleep_stack_data state;
+ /* Report any MTE async fault before going to suspend */
+ mte_suspend_enter();
+
/*
* From this point debug exceptions are disabled to prevent
* updates to mdscr register (saved and restored along with
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index cee5d04ea9ad..a61fc4f989b3 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -86,7 +86,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return 0;
}
-static int __vdso_init(enum vdso_abi abi)
+static int __init __vdso_init(enum vdso_abi abi)
{
int i;
struct page **vdso_pagelist;
@@ -271,6 +271,14 @@ enum aarch32_map {
static struct page *aarch32_vectors_page __ro_after_init;
static struct page *aarch32_sig_page __ro_after_init;
+static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ current->mm->context.sigpage = (void *)new_vma->vm_start;
+
+ return 0;
+}
+
static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_VECTORS] = {
.name = "[vectors]", /* ABI */
@@ -279,6 +287,7 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_SIGPAGE] = {
.name = "[sigpage]", /* ABI */
.pages = &aarch32_sig_page,
+ .mremap = aarch32_sigpage_mremap,
},
[AA32_MAP_VVAR] = {
.name = "[vvar]",
@@ -299,34 +308,35 @@ static int aarch32_alloc_kuser_vdso_page(void)
if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
return 0;
- vdso_page = get_zeroed_page(GFP_ATOMIC);
+ vdso_page = get_zeroed_page(GFP_KERNEL);
if (!vdso_page)
return -ENOMEM;
memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
kuser_sz);
aarch32_vectors_page = virt_to_page(vdso_page);
- flush_dcache_page(aarch32_vectors_page);
return 0;
}
+#define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1
static int aarch32_alloc_sigpage(void)
{
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
- unsigned long sigpage;
+ __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
+ void *sigpage;
- sigpage = get_zeroed_page(GFP_ATOMIC);
+ sigpage = (void *)__get_free_page(GFP_KERNEL);
if (!sigpage)
return -ENOMEM;
- memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
+ memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
+ memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
aarch32_sig_page = virt_to_page(sigpage);
- flush_dcache_page(aarch32_sig_page);
return 0;
}
-static int __aarch32_alloc_vdso_pages(void)
+static int __init __aarch32_alloc_vdso_pages(void)
{
if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index f37d4e3830b7..871c82ab0a30 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -527,7 +527,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
const struct fault_info *inf;
struct mm_struct *mm = current->mm;
vm_fault_t fault;
- unsigned long vm_flags = VM_ACCESS_FLAGS;
+ unsigned long vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
@@ -544,12 +544,28 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
if (user_mode(regs))
mm_flags |= FAULT_FLAG_USER;
+ /*
+ * vm_flags tells us what bits we must have in vma->vm_flags
+ * for the fault to be benign, __do_page_fault() would check
+ * vma->vm_flags & vm_flags and returns an error if the
+ * intersection is empty
+ */
if (is_el0_instruction_abort(esr)) {
+ /* It was exec fault */
vm_flags = VM_EXEC;
mm_flags |= FAULT_FLAG_INSTRUCTION;
} else if (is_write_abort(esr)) {
+ /* It was write fault */
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
+ } else {
+ /* It was read fault */
+ vm_flags = VM_READ;
+ /* Write implies read */
+ vm_flags |= VM_WRITE;
+ /* If EPAN is absent then exec implies read */
+ if (!cpus_have_const_cap(ARM64_HAS_EPAN))
+ vm_flags |= VM_EXEC;
}
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index d8e66c78440e..61b52a92b8b6 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -79,7 +79,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
phys_addr_t pmd_phys = early ?
__pa_symbol(kasan_early_shadow_pmd)
: kasan_alloc_zeroed_page(node);
- __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
+ __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
}
return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
@@ -92,7 +92,7 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud)
: kasan_alloc_zeroed_page(node);
- __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
+ __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
}
return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
@@ -214,15 +214,18 @@ static void __init kasan_init_shadow(void)
{
u64 kimg_shadow_start, kimg_shadow_end;
u64 mod_shadow_start, mod_shadow_end;
+ u64 vmalloc_shadow_end;
phys_addr_t pa_start, pa_end;
u64 i;
- kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
- kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
+ kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
+ kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
+ vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
+
/*
* We are going to perform proper setup of shadow memory.
* At first we should unmap early shadow (clear_pgds() call below).
@@ -237,16 +240,22 @@ static void __init kasan_init_shadow(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
- early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
+ early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
(void *)mod_shadow_start);
- kasan_populate_early_shadow((void *)kimg_shadow_end,
- (void *)KASAN_SHADOW_END);
- if (kimg_shadow_start > mod_shadow_end)
- kasan_populate_early_shadow((void *)mod_shadow_end,
- (void *)kimg_shadow_start);
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ BUILD_BUG_ON(VMALLOC_START != MODULES_END);
+ kasan_populate_early_shadow((void *)vmalloc_shadow_end,
+ (void *)KASAN_SHADOW_END);
+ } else {
+ kasan_populate_early_shadow((void *)kimg_shadow_end,
+ (void *)KASAN_SHADOW_END);
+ if (kimg_shadow_start > mod_shadow_end)
+ kasan_populate_early_shadow((void *)mod_shadow_end,
+ (void *)kimg_shadow_start);
+ }
for_each_mem_range(i, &pa_start, &pa_end) {
void *start = (void *)__phys_to_virt(pa_start);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5d9550fdb9cf..d563335ad43f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -39,6 +39,7 @@
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
+#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
@@ -185,10 +186,14 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
BUG_ON(pmd_sect(pmd));
if (pmd_none(pmd)) {
+ pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN;
phys_addr_t pte_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ pmdval |= PMD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pte_phys = pgtable_alloc(PAGE_SHIFT);
- __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
+ __pmd_populate(pmdp, pte_phys, pmdval);
pmd = READ_ONCE(*pmdp);
}
BUG_ON(pmd_bad(pmd));
@@ -259,10 +264,14 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
*/
BUG_ON(pud_sect(pud));
if (pud_none(pud)) {
+ pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN;
phys_addr_t pmd_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ pudval |= PUD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pmd_phys = pgtable_alloc(PMD_SHIFT);
- __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
+ __pud_populate(pudp, pmd_phys, pudval);
pud = READ_ONCE(*pudp);
}
BUG_ON(pud_bad(pud));
@@ -306,10 +315,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
p4d_t p4d = READ_ONCE(*p4dp);
if (p4d_none(p4d)) {
+ p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
phys_addr_t pud_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ p4dval |= P4D_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pud_phys = pgtable_alloc(PUD_SHIFT);
- __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
+ __p4d_populate(p4dp, pud_phys, p4dval);
p4d = READ_ONCE(*p4dp);
}
BUG_ON(p4d_bad(p4d));
@@ -486,14 +499,24 @@ early_param("crashkernel", enable_crash_mem_map);
static void __init map_mem(pgd_t *pgdp)
{
+ static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
phys_addr_t kernel_start = __pa_symbol(_stext);
phys_addr_t kernel_end = __pa_symbol(__init_begin);
phys_addr_t start, end;
- int flags = 0;
+ int flags = NO_EXEC_MAPPINGS;
u64 i;
+ /*
+ * Setting hierarchical PXNTable attributes on table entries covering
+ * the linear region is only possible if it is guaranteed that no table
+ * entries at any level are being shared between the linear region and
+ * the vmalloc region. Check whether this is true for the PGD level, in
+ * which case it is guaranteed to be true for all other levels as well.
+ */
+ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
+
if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
- flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
* Take care not to create a writable alias for the
@@ -1210,11 +1233,11 @@ void __init early_fixmap_init(void)
pudp = pud_offset_kimg(p4dp, addr);
} else {
if (p4d_none(p4d))
- __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
+ __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
pudp = fixmap_pud(addr);
}
if (pud_none(READ_ONCE(*pudp)))
- __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
+ __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
pmdp = fixmap_pmd(addr);
__pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
@@ -1480,7 +1503,7 @@ struct range arch_get_mappable_range(void)
int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params)
{
- int ret, flags = 0;
+ int ret, flags = NO_EXEC_MAPPINGS;
VM_BUG_ON(!mhp_range_allowed(start, size, true));
@@ -1490,7 +1513,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
*/
if (rodata_full || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE))
- flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
size, params->pgprot, __pgd_pgtable_alloc,
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index c967bfd30d2b..0a48191534ff 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -419,14 +419,17 @@ SYM_FUNC_START(__cpu_setup)
reset_amuserenr_el0 x1 // Disable AMU access from EL0
/*
- * Memory region attributes
+ * Default values for VMSA control registers. These will be adjusted
+ * below depending on detected CPU features.
*/
- mov_q x5, MAIR_EL1_SET
-#ifdef CONFIG_ARM64_MTE
- mte_tcr .req x20
-
- mov mte_tcr, #0
+ mair .req x17
+ tcr .req x16
+ mov_q mair, MAIR_EL1_SET
+ mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
+ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
+#ifdef CONFIG_ARM64_MTE
/*
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
* (ID_AA64PFR1_EL1[11:8] > 1).
@@ -438,7 +441,7 @@ SYM_FUNC_START(__cpu_setup)
/* Normal Tagged memory type at the corresponding MAIR index */
mov x10, #MAIR_ATTR_NORMAL_TAGGED
- bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8
+ bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8
/* initialize GCR_EL1: all non-zero tags excluded by default */
mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
@@ -449,37 +452,26 @@ SYM_FUNC_START(__cpu_setup)
msr_s SYS_TFSRE0_EL1, xzr
/* set the TCR_EL1 bits */
- mov_q mte_tcr, TCR_KASAN_HW_FLAGS
+ mov_q x10, TCR_KASAN_HW_FLAGS
+ orr tcr, tcr, x10
1:
#endif
- msr mair_el1, x5
- /*
- * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further
- * adjusted if the kernel is compiled with 52bit VA support.
- */
- mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
- TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
- TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
-#ifdef CONFIG_ARM64_MTE
- orr x10, x10, mte_tcr
- .unreq mte_tcr
-#endif
- tcr_clear_errata_bits x10, x9, x5
+ tcr_clear_errata_bits tcr, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x9, vabits_actual
sub x9, xzr, x9
add x9, x9, #64
- tcr_set_t1sz x10, x9
+ tcr_set_t1sz tcr, x9
#else
ldr_l x9, idmap_t0sz
#endif
- tcr_set_t0sz x10, x9
+ tcr_set_t0sz tcr, x9
/*
* Set the IPS bits in TCR_EL1.
*/
- tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
+ tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Enable hardware update of the Access Flags bit.
@@ -489,13 +481,17 @@ SYM_FUNC_START(__cpu_setup)
mrs x9, ID_AA64MMFR1_EL1
and x9, x9, #0xf
cbz x9, 1f
- orr x10, x10, #TCR_HA // hardware Access flag update
+ orr tcr, tcr, #TCR_HA // hardware Access flag update
1:
#endif /* CONFIG_ARM64_HW_AFDBM */
- msr tcr_el1, x10
+ msr mair_el1, mair
+ msr tcr_el1, tcr
/*
* Prepare SCTLR
*/
mov_q x0, INIT_SCTLR_EL1_MMU_ON
ret // return to head.S
+
+ .unreq mair
+ .unreq tcr
SYM_FUNC_END(__cpu_setup)
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 0e050d76b83a..a50e92ea1878 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -337,7 +337,7 @@ void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
ptdump_walk_pgd(&st.ptdump, info->mm, NULL);
}
-static void ptdump_initialize(void)
+static void __init ptdump_initialize(void)
{
unsigned i, j;
@@ -381,7 +381,7 @@ void ptdump_check_wx(void)
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
}
-static int ptdump_init(void)
+static int __init ptdump_init(void)
{
address_markers[PAGE_END_NR].start_address = PAGE_END;
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
index d29d722ec3ec..68bf1a125502 100644
--- a/arch/arm64/mm/ptdump_debugfs.c
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -16,7 +16,7 @@ static int ptdump_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
-void ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name)
{
debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
}