summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/acpi.h3
-rw-r--r--arch/arm64/include/asm/arch_timer.h78
-rw-r--r--arch/arm64/include/asm/asm-extable.h95
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h7
-rw-r--r--arch/arm64/include/asm/assembler.h83
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/include/asm/esr.h6
-rw-r--r--arch/arm64/include/asm/extable.h23
-rw-r--r--arch/arm64/include/asm/fpsimd.h118
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h21
-rw-r--r--arch/arm64/include/asm/ftrace.h2
-rw-r--r--arch/arm64/include/asm/futex.h25
-rw-r--r--arch/arm64/include/asm/gpr-num.h26
-rw-r--r--arch/arm64/include/asm/hwcap.h1
-rw-r--r--arch/arm64/include/asm/kexec.h12
-rw-r--r--arch/arm64/include/asm/kprobes.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_asm.h55
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h4
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h5
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/mmu_context.h24
-rw-r--r--arch/arm64/include/asm/mte-kasan.h5
-rw-r--r--arch/arm64/include/asm/mte.h14
-rw-r--r--arch/arm64/include/asm/page.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/processor.h51
-rw-r--r--arch/arm64/include/asm/sections.h1
-rw-r--r--arch/arm64/include/asm/setup.h6
-rw-r--r--arch/arm64/include/asm/stacktrace.h4
-rw-r--r--arch/arm64/include/asm/string.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h34
-rw-r--r--arch/arm64/include/asm/thread_info.h3
-rw-r--r--arch/arm64/include/asm/trans_pgd.h14
-rw-r--r--arch/arm64/include/asm/uaccess.h30
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/include/asm/vdso/compat_barrier.h7
-rw-r--r--arch/arm64/include/asm/virt.h7
-rw-r--r--arch/arm64/include/asm/vmalloc.h4
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h21
43 files changed, 593 insertions, 237 deletions
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 7535dc7cc5aa..bd68e1b7f29f 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -50,9 +50,6 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
#define acpi_os_ioremap acpi_os_ioremap
-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size);
-#define acpi_os_memmap acpi_os_memmap
-
typedef u64 phys_cpuid_t;
#define PHYS_CPUID_INVALID INVALID_HWID
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 88d20f04c64a..af1fafbe7e1d 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -32,7 +32,7 @@
({ \
const struct arch_timer_erratum_workaround *__wa; \
__wa = __this_cpu_read(timer_unstable_counter_workaround); \
- (__wa && __wa->h) ? __wa->h : arch_timer_##h; \
+ (__wa && __wa->h) ? ({ isb(); __wa->h;}) : arch_timer_##h; \
})
#else
@@ -52,8 +52,6 @@ struct arch_timer_erratum_workaround {
enum arch_timer_erratum_match_type match_type;
const void *id;
const char *desc;
- u32 (*read_cntp_tval_el0)(void);
- u32 (*read_cntv_tval_el0)(void);
u64 (*read_cntpct_el0)(void);
u64 (*read_cntvct_el0)(void);
int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
@@ -64,25 +62,28 @@ struct arch_timer_erratum_workaround {
DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
timer_unstable_counter_workaround);
-/* inline sysreg accessors that make erratum_handler() work */
-static inline notrace u32 arch_timer_read_cntp_tval_el0(void)
+static inline notrace u64 arch_timer_read_cntpct_el0(void)
{
- return read_sysreg(cntp_tval_el0);
-}
+ u64 cnt;
-static inline notrace u32 arch_timer_read_cntv_tval_el0(void)
-{
- return read_sysreg(cntv_tval_el0);
-}
+ asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
+ "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
+ ARM64_HAS_ECV)
+ : "=r" (cnt));
-static inline notrace u64 arch_timer_read_cntpct_el0(void)
-{
- return read_sysreg(cntpct_el0);
+ return cnt;
}
static inline notrace u64 arch_timer_read_cntvct_el0(void)
{
- return read_sysreg(cntvct_el0);
+ u64 cnt;
+
+ asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
+ "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
+ ARM64_HAS_ECV)
+ : "=r" (cnt));
+
+ return cnt;
}
#define arch_timer_reg_read_stable(reg) \
@@ -102,51 +103,58 @@ static inline notrace u64 arch_timer_read_cntvct_el0(void)
* the code.
*/
static __always_inline
-void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
write_sysreg(val, cntp_ctl_el0);
+ isb();
break;
- case ARCH_TIMER_REG_TVAL:
- write_sysreg(val, cntp_tval_el0);
+ case ARCH_TIMER_REG_CVAL:
+ write_sysreg(val, cntp_cval_el0);
break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
write_sysreg(val, cntv_ctl_el0);
+ isb();
break;
- case ARCH_TIMER_REG_TVAL:
- write_sysreg(val, cntv_tval_el0);
+ case ARCH_TIMER_REG_CVAL:
+ write_sysreg(val, cntv_cval_el0);
break;
+ default:
+ BUILD_BUG();
}
+ } else {
+ BUILD_BUG();
}
-
- isb();
}
static __always_inline
-u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
+u64 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
return read_sysreg(cntp_ctl_el0);
- case ARCH_TIMER_REG_TVAL:
- return arch_timer_reg_read_stable(cntp_tval_el0);
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
return read_sysreg(cntv_ctl_el0);
- case ARCH_TIMER_REG_TVAL:
- return arch_timer_reg_read_stable(cntv_tval_el0);
+ default:
+ BUILD_BUG();
}
}
- BUG();
+ BUILD_BUG();
+ unreachable();
}
static inline u32 arch_timer_get_cntfrq(void)
@@ -169,7 +177,6 @@ static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{
u64 cnt;
- isb();
cnt = arch_timer_reg_read_stable(cntpct_el0);
arch_counter_enforce_ordering(cnt);
return cnt;
@@ -179,8 +186,10 @@ static __always_inline u64 __arch_counter_get_cntpct(void)
{
u64 cnt;
- isb();
- cnt = read_sysreg(cntpct_el0);
+ asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
+ "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
+ ARM64_HAS_ECV)
+ : "=r" (cnt));
arch_counter_enforce_ordering(cnt);
return cnt;
}
@@ -189,7 +198,6 @@ static __always_inline u64 __arch_counter_get_cntvct_stable(void)
{
u64 cnt;
- isb();
cnt = arch_timer_reg_read_stable(cntvct_el0);
arch_counter_enforce_ordering(cnt);
return cnt;
@@ -199,8 +207,10 @@ static __always_inline u64 __arch_counter_get_cntvct(void)
{
u64 cnt;
- isb();
- cnt = read_sysreg(cntvct_el0);
+ asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
+ "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
+ ARM64_HAS_ECV)
+ : "=r" (cnt));
arch_counter_enforce_ordering(cnt);
return cnt;
}
diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
new file mode 100644
index 000000000000..c39f2437e08e
--- /dev/null
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_ASM_EXTABLE_H
+#define __ASM_ASM_EXTABLE_H
+
+#define EX_TYPE_NONE 0
+#define EX_TYPE_FIXUP 1
+#define EX_TYPE_BPF 2
+#define EX_TYPE_UACCESS_ERR_ZERO 3
+#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
+
+#ifdef __ASSEMBLY__
+
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
+ .pushsection __ex_table, "a"; \
+ .align 2; \
+ .long ((insn) - .); \
+ .long ((fixup) - .); \
+ .short (type); \
+ .short (data); \
+ .popsection;
+
+/*
+ * Create an exception table entry for `insn`, which will branch to `fixup`
+ * when an unhandled fault is taken.
+ */
+ .macro _asm_extable, insn, fixup
+ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
+ .endm
+
+/*
+ * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
+ * do nothing.
+ */
+ .macro _cond_extable, insn, fixup
+ .ifnc \fixup,
+ _asm_extable \insn, \fixup
+ .endif
+ .endm
+
+#else /* __ASSEMBLY__ */
+
+#include <linux/bits.h>
+#include <linux/stringify.h>
+
+#include <asm/gpr-num.h>
+
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
+ ".pushsection __ex_table, \"a\"\n" \
+ ".align 2\n" \
+ ".long ((" insn ") - .)\n" \
+ ".long ((" fixup ") - .)\n" \
+ ".short (" type ")\n" \
+ ".short (" data ")\n" \
+ ".popsection\n"
+
+#define _ASM_EXTABLE(insn, fixup) \
+ __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
+
+#define EX_DATA_REG_ERR_SHIFT 0
+#define EX_DATA_REG_ERR GENMASK(4, 0)
+#define EX_DATA_REG_ZERO_SHIFT 5
+#define EX_DATA_REG_ZERO GENMASK(9, 5)
+
+#define EX_DATA_REG(reg, gpr) \
+ "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
+
+#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_UACCESS_ERR_ZERO), \
+ "(" \
+ EX_DATA_REG(ERR, err) " | " \
+ EX_DATA_REG(ZERO, zero) \
+ ")")
+
+#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
+
+#define EX_DATA_REG_DATA_SHIFT 0
+#define EX_DATA_REG_DATA GENMASK(4, 0)
+#define EX_DATA_REG_ADDR_SHIFT 5
+#define EX_DATA_REG_ADDR GENMASK(9, 5)
+
+#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD), \
+ "(" \
+ EX_DATA_REG(DATA, data) " | " \
+ EX_DATA_REG(ADDR, addr) \
+ ")")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ASM_EXTABLE_H */
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index ccedf548dac9..0557af834e03 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -3,10 +3,11 @@
#define __ASM_ASM_UACCESS_H
#include <asm/alternative-macros.h>
+#include <asm/asm-extable.h>
+#include <asm/assembler.h>
#include <asm/kernel-pgtable.h>
#include <asm/mmu.h>
#include <asm/sysreg.h>
-#include <asm/assembler.h>
/*
* User access enabling/disabling macros.
@@ -58,6 +59,10 @@ alternative_else_nop_endif
.endm
#endif
+#define USER(l, x...) \
+9999: x; \
+ _asm_extable 9999b, l
+
/*
* Generate the assembly for LDTR/STTR with exception table entries.
* This is complicated as there is no post-increment or pair versions of the
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 89faca0e740d..136d13f3d6e9 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -14,9 +14,10 @@
#include <asm-generic/export.h>
-#include <asm/asm-offsets.h>
#include <asm/alternative.h>
#include <asm/asm-bug.h>
+#include <asm/asm-extable.h>
+#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
@@ -130,32 +131,6 @@ alternative_endif
.endm
/*
- * Create an exception table entry for `insn`, which will branch to `fixup`
- * when an unhandled fault is taken.
- */
- .macro _asm_extable, insn, fixup
- .pushsection __ex_table, "a"
- .align 3
- .long (\insn - .), (\fixup - .)
- .popsection
- .endm
-
-/*
- * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
- * do nothing.
- */
- .macro _cond_extable, insn, fixup
- .ifnc \fixup,
- _asm_extable \insn, \fixup
- .endif
- .endm
-
-
-#define USER(l, x...) \
-9999: x; \
- _asm_extable 9999b, l
-
-/*
* Register aliases.
*/
lr .req x30 // link register
@@ -405,19 +380,19 @@ alternative_endif
/*
* Macro to perform a data cache maintenance for the interval
- * [start, end)
+ * [start, end) with dcache line size explicitly provided.
*
* op: operation passed to dc instruction
* domain: domain used in dsb instruciton
* start: starting virtual address of the region
* end: end virtual address of the region
+ * linesz: dcache line size
* fixup: optional label to branch to on user fault
- * Corrupts: start, end, tmp1, tmp2
+ * Corrupts: start, end, tmp
*/
- .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
- dcache_line_size \tmp1, \tmp2
- sub \tmp2, \tmp1, #1
- bic \start, \start, \tmp2
+ .macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
+ sub \tmp, \linesz, #1
+ bic \start, \start, \tmp
.Ldcache_op\@:
.ifc \op, cvau
__dcache_op_workaround_clean_cache \op, \start
@@ -436,7 +411,7 @@ alternative_endif
.endif
.endif
.endif
- add \start, \start, \tmp1
+ add \start, \start, \linesz
cmp \start, \end
b.lo .Ldcache_op\@
dsb \domain
@@ -445,6 +420,22 @@ alternative_endif
.endm
/*
+ * Macro to perform a data cache maintenance for the interval
+ * [start, end)
+ *
+ * op: operation passed to dc instruction
+ * domain: domain used in dsb instruciton
+ * start: starting virtual address of the region
+ * end: end virtual address of the region
+ * fixup: optional label to branch to on user fault
+ * Corrupts: start, end, tmp1, tmp2
+ */
+ .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
+ dcache_line_size \tmp1, \tmp2
+ dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
+ .endm
+
+/*
* Macro to perform an instruction cache maintenance for the interval
* [start, end)
*
@@ -468,6 +459,25 @@ alternative_endif
.endm
/*
+ * To prevent the possibility of old and new partial table walks being visible
+ * in the tlb, switch the ttbr to a zero page when we invalidate the old
+ * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
+ * Even switching to our copied tables will cause a changed output address at
+ * each stage of the walk.
+ */
+ .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
+ phys_to_ttbr \tmp, \zero_page
+ msr ttbr1_el1, \tmp
+ isb
+ tlbi vmalle1
+ dsb nsh
+ phys_to_ttbr \tmp, \page_table
+ offset_ttbr1 \tmp, \tmp2
+ msr ttbr1_el1, \tmp
+ isb
+ .endm
+
+/*
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
*/
.macro reset_pmuserenr_el0, tmpreg
@@ -525,6 +535,11 @@ alternative_endif
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
#endif
+#ifdef CONFIG_KASAN_HW_TAGS
+#define EXPORT_SYMBOL_NOHWKASAN(name)
+#else
+#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
+#endif
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 451e11e5fd23..1c5a00598458 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -23,7 +23,7 @@
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
#define psb_csync() asm volatile("hint #17" : : : "memory")
-#define tsb_csync() asm volatile("hint #18" : : : "memory")
+#define __tsb_csync() asm volatile("hint #18" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory")
#ifdef CONFIG_ARM64_PSEUDO_NMI
@@ -46,6 +46,20 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
+
+#define tsb_csync() \
+ do { \
+ /* \
+ * CPUs affected by Arm Erratum 2054223 or 2067961 needs \
+ * another TSB to ensure the trace is flushed. The barriers \
+ * don't have to be strictly back to back, as long as the \
+ * CPU is in trace prohibited state. \
+ */ \
+ if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
+ __tsb_csync(); \
+ __tsb_csync(); \
+ } while (0)
+
/*
* Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
* and 0 otherwise.
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 6231e1f0abe7..19b8441aa8f2 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -73,6 +73,8 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
+#define ARM_CPU_PART_CORTEX_A710 0xD47
+#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define APM_CPU_PART_POTENZA 0x000
@@ -113,6 +115,8 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 29f97eb3dad4..a305ce256090 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -227,6 +227,9 @@
#define ESR_ELx_SYS64_ISS_SYS_CNTVCT (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \
ESR_ELx_SYS64_ISS_DIR_READ)
+#define ESR_ELx_SYS64_ISS_SYS_CNTVCTSS (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 6, 14, 0) | \
+ ESR_ELx_SYS64_ISS_DIR_READ)
+
#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
ESR_ELx_SYS64_ISS_DIR_READ)
@@ -317,6 +320,9 @@
#define ESR_ELx_CP15_64_ISS_SYS_CNTVCT (ESR_ELx_CP15_64_ISS_SYS_VAL(1, 14) | \
ESR_ELx_CP15_64_ISS_DIR_READ)
+#define ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS (ESR_ELx_CP15_64_ISS_SYS_VAL(9, 14) | \
+ ESR_ELx_CP15_64_ISS_DIR_READ)
+
#define ESR_ELx_CP15_32_ISS_SYS_CNTFRQ (ESR_ELx_CP15_32_ISS_SYS_VAL(0, 0, 14, 0) |\
ESR_ELx_CP15_32_ISS_DIR_READ)
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index b15eb4a3e6b2..8b300dd28def 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -18,10 +18,21 @@
struct exception_table_entry
{
int insn, fixup;
+ short type, data;
};
#define ARCH_HAS_RELATIVE_EXTABLE
+#define swap_ex_entry_fixup(a, b, tmp, delta) \
+do { \
+ (a)->fixup = (b)->fixup + (delta); \
+ (b)->fixup = (tmp).fixup - (delta); \
+ (a)->type = (b)->type; \
+ (b)->type = (tmp).type; \
+ (a)->data = (b)->data; \
+ (b)->data = (tmp).data; \
+} while (0)
+
static inline bool in_bpf_jit(struct pt_regs *regs)
{
if (!IS_ENABLED(CONFIG_BPF_JIT))
@@ -32,16 +43,16 @@ static inline bool in_bpf_jit(struct pt_regs *regs)
}
#ifdef CONFIG_BPF_JIT
-int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
- struct pt_regs *regs);
+bool ex_handler_bpf(const struct exception_table_entry *ex,
+ struct pt_regs *regs);
#else /* !CONFIG_BPF_JIT */
static inline
-int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
- struct pt_regs *regs)
+bool ex_handler_bpf(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
{
- return 0;
+ return false;
}
#endif /* !CONFIG_BPF_JIT */
-extern int fixup_exception(struct pt_regs *regs);
+bool fixup_exception(struct pt_regs *regs);
#endif
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 9a62884183e5..dbb4b30a5648 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -62,15 +62,13 @@ static inline size_t sve_ffr_offset(int vl)
static inline void *sve_pffr(struct thread_struct *thread)
{
- return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl);
+ return (char *)thread->sve_state + sve_ffr_offset(thread_get_sve_vl(thread));
}
-extern void sve_save_state(void *state, u32 *pfpsr);
+extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
extern void sve_load_state(void const *state, u32 const *pfpsr,
- unsigned long vq_minus_1);
-extern void sve_flush_live(unsigned long vq_minus_1);
-extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
- unsigned long vq_minus_1);
+ int restore_ffr);
+extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
extern void sve_set_vq(unsigned long vq_minus_1);
@@ -79,10 +77,6 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void);
-extern int __ro_after_init sve_max_vl;
-extern int __ro_after_init sve_max_virtualisable_vl;
-extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
-
/*
* Helpers to translate bit indices in sve_vq_map to VQ values (and
* vice versa). This allows find_next_bit() to be used to find the
@@ -98,15 +92,29 @@ static inline unsigned int __bit_to_vq(unsigned int bit)
return SVE_VQ_MAX - bit;
}
-/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
-static inline bool sve_vq_available(unsigned int vq)
-{
- return test_bit(__vq_to_bit(vq), sve_vq_map);
-}
-#ifdef CONFIG_ARM64_SVE
+struct vl_info {
+ enum vec_type type;
+ const char *name; /* For display purposes */
-extern size_t sve_state_size(struct task_struct const *task);
+ /* Minimum supported vector length across all CPUs */
+ int min_vl;
+
+ /* Maximum supported vector length across all CPUs */
+ int max_vl;
+ int max_virtualisable_vl;
+
+ /*
+ * Set of available vector lengths,
+ * where length vq encoded as bit __vq_to_bit(vq):
+ */
+ DECLARE_BITMAP(vq_map, SVE_VQ_MAX);
+
+ /* Set of vector lengths present on at least one cpu: */
+ DECLARE_BITMAP(vq_partial_map, SVE_VQ_MAX);
+};
+
+#ifdef CONFIG_ARM64_SVE
extern void sve_alloc(struct task_struct *task);
extern void fpsimd_release_task(struct task_struct *task);
@@ -143,11 +151,63 @@ static inline void sve_user_enable(void)
* Probing and setup functions.
* Calls to these functions must be serialised with one another.
*/
-extern void __init sve_init_vq_map(void);
-extern void sve_update_vq_map(void);
-extern int sve_verify_vq_map(void);
+enum vec_type;
+
+extern void __init vec_init_vq_map(enum vec_type type);
+extern void vec_update_vq_map(enum vec_type type);
+extern int vec_verify_vq_map(enum vec_type type);
extern void __init sve_setup(void);
+extern __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX];
+
+static inline void write_vl(enum vec_type type, u64 val)
+{
+ u64 tmp;
+
+ switch (type) {
+#ifdef CONFIG_ARM64_SVE
+ case ARM64_VEC_SVE:
+ tmp = read_sysreg_s(SYS_ZCR_EL1) & ~ZCR_ELx_LEN_MASK;
+ write_sysreg_s(tmp | val, SYS_ZCR_EL1);
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+static inline int vec_max_vl(enum vec_type type)
+{
+ return vl_info[type].max_vl;
+}
+
+static inline int vec_max_virtualisable_vl(enum vec_type type)
+{
+ return vl_info[type].max_virtualisable_vl;
+}
+
+static inline int sve_max_vl(void)
+{
+ return vec_max_vl(ARM64_VEC_SVE);
+}
+
+static inline int sve_max_virtualisable_vl(void)
+{
+ return vec_max_virtualisable_vl(ARM64_VEC_SVE);
+}
+
+/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
+static inline bool vq_available(enum vec_type type, unsigned int vq)
+{
+ return test_bit(__vq_to_bit(vq), vl_info[type].vq_map);
+}
+
+static inline bool sve_vq_available(unsigned int vq)
+{
+ return vq_available(ARM64_VEC_SVE, vq);
+}
+
#else /* ! CONFIG_ARM64_SVE */
static inline void sve_alloc(struct task_struct *task) { }
@@ -155,6 +215,11 @@ static inline void fpsimd_release_task(struct task_struct *task) { }
static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
+static inline int sve_max_virtualisable_vl(void)
+{
+ return 0;
+}
+
static inline int sve_set_current_vl(unsigned long arg)
{
return -EINVAL;
@@ -165,14 +230,21 @@ static inline int sve_get_current_vl(void)
return -EINVAL;
}
+static inline int sve_max_vl(void)
+{
+ return -EINVAL;
+}
+
+static inline bool sve_vq_available(unsigned int vq) { return false; }
+
static inline void sve_user_disable(void) { BUILD_BUG(); }
static inline void sve_user_enable(void) { BUILD_BUG(); }
#define sve_cond_update_zcr_vq(val, reg) do { } while (0)
-static inline void sve_init_vq_map(void) { }
-static inline void sve_update_vq_map(void) { }
-static inline int sve_verify_vq_map(void) { return 0; }
+static inline void vec_init_vq_map(enum vec_type t) { }
+static inline void vec_update_vq_map(enum vec_type t) { }
+static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
static inline void sve_setup(void) { }
#endif /* ! CONFIG_ARM64_SVE */
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 00a2c0b69c2b..2509d7dde55a 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -217,28 +217,36 @@
.macro sve_flush_z
_for n, 0, 31, _sve_flush_z \n
.endm
-.macro sve_flush_p_ffr
+.macro sve_flush_p
_for n, 0, 15, _sve_pfalse \n
+.endm
+.macro sve_flush_ffr
_sve_wrffr 0
.endm
-.macro sve_save nxbase, xpfpsr, nxtmp
+.macro sve_save nxbase, xpfpsr, save_ffr, nxtmp
_for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34
_for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
+ cbz \save_ffr, 921f
_sve_rdffr 0
_sve_str_p 0, \nxbase
_sve_ldr_p 0, \nxbase, -16
-
+ b 922f
+921:
+ str xzr, [x\nxbase] // Zero out FFR
+922:
mrs x\nxtmp, fpsr
str w\nxtmp, [\xpfpsr]
mrs x\nxtmp, fpcr
str w\nxtmp, [\xpfpsr, #4]
.endm
-.macro __sve_load nxbase, xpfpsr, nxtmp
+.macro sve_load nxbase, xpfpsr, restore_ffr, nxtmp
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
+ cbz \restore_ffr, 921f
_sve_ldr_p 0, \nxbase
_sve_wrffr 0
+921:
_for n, 0, 15, _sve_ldr_p \n, \nxbase, \n - 16
ldr w\nxtmp, [\xpfpsr]
@@ -246,8 +254,3 @@
ldr w\nxtmp, [\xpfpsr, #4]
msr fpcr, x\nxtmp
.endm
-
-.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
- sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
- __sve_load \nxbase, \xpfpsr, \nxtmp
-.endm
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 91fa4baa1a93..347b0cc68f07 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -15,7 +15,7 @@
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#else
-#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define MCOUNT_ADDR ((unsigned long)function_nocfi(_mcount))
#endif
/* The BL at the callsite's adjusted rec->ip */
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 8e41faa37c69..bc06691d2062 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -25,19 +25,14 @@ do { \
" cbz %w0, 3f\n" \
" sub %w4, %w4, %w0\n" \
" cbnz %w4, 1b\n" \
-" mov %w0, %w7\n" \
+" mov %w0, %w6\n" \
"3:\n" \
" dmb ish\n" \
-" .pushsection .fixup,\"ax\"\n" \
-" .align 2\n" \
-"4: mov %w0, %w6\n" \
-" b 3b\n" \
-" .popsection\n" \
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
"+r" (loops) \
- : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
+ : "r" (oparg), "Ir" (-EAGAIN) \
: "memory"); \
uaccess_disable_privileged(); \
} while (0)
@@ -105,18 +100,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
" cbz %w3, 3f\n"
" sub %w4, %w4, %w3\n"
" cbnz %w4, 1b\n"
-" mov %w0, %w8\n"
+" mov %w0, %w7\n"
"3:\n"
" dmb ish\n"
"4:\n"
-" .pushsection .fixup,\"ax\"\n"
-"5: mov %w0, %w7\n"
-" b 4b\n"
-" .popsection\n"
- _ASM_EXTABLE(1b, 5b)
- _ASM_EXTABLE(2b, 5b)
+ _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0)
+ _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
- : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
+ : "r" (oldval), "r" (newval), "Ir" (-EAGAIN)
: "memory");
uaccess_disable_privileged();
diff --git a/arch/arm64/include/asm/gpr-num.h b/arch/arm64/include/asm/gpr-num.h
new file mode 100644
index 000000000000..05da4a7c5788
--- /dev/null
+++ b/arch/arm64/include/asm/gpr-num.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GPR_NUM_H
+#define __ASM_GPR_NUM_H
+
+#ifdef __ASSEMBLY__
+
+ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+ .equ .L__gpr_num_x\num, \num
+ .equ .L__gpr_num_w\num, \num
+ .endr
+ .equ .L__gpr_num_xzr, 31
+ .equ .L__gpr_num_wzr, 31
+
+#else /* __ASSEMBLY__ */
+
+#define __DEFINE_ASM_GPR_NUMS \
+" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
+" .equ .L__gpr_num_x\\num, \\num\n" \
+" .equ .L__gpr_num_w\\num, \\num\n" \
+" .endr\n" \
+" .equ .L__gpr_num_xzr, 31\n" \
+" .equ .L__gpr_num_wzr, 31\n"
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_GPR_NUM_H */
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 8c129db8232a..b100e0055eab 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -105,6 +105,7 @@
#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
#define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
+#define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 00dbcc71aeb2..9839bfc163d7 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -90,12 +90,24 @@ static inline void crash_prepare_suspend(void) {}
static inline void crash_post_resume(void) {}
#endif
+#if defined(CONFIG_KEXEC_CORE)
+void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2);
+#endif
+
#define ARCH_HAS_KIMAGE_ARCH
struct kimage_arch {
void *dtb;
phys_addr_t dtb_mem;
phys_addr_t kern_reloc;
+ phys_addr_t el2_vectors;
+ phys_addr_t ttbr0;
+ phys_addr_t ttbr1;
+ phys_addr_t zero_page;
+ unsigned long phys_offset;
+ unsigned long t0sz;
};
#ifdef CONFIG_KEXEC_FILE
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index 5d38ff4a4806..05cd82eeca13 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -39,7 +39,7 @@ void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
-void kretprobe_trampoline(void);
+void __kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 327120c0089f..a39fcf318c77 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -295,6 +295,7 @@
#define MDCR_EL2_HPMFZO (UL(1) << 29)
#define MDCR_EL2_MTPME (UL(1) << 28)
#define MDCR_EL2_TDCC (UL(1) << 27)
+#define MDCR_EL2_HLP (UL(1) << 26)
#define MDCR_EL2_HCCD (UL(1) << 23)
#define MDCR_EL2_TTRF (UL(1) << 19)
#define MDCR_EL2_HPMD (UL(1) << 17)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index e86045ac43ba..50d5e4de244c 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -44,31 +44,39 @@
#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
-#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run 1
-#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
-#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
-#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
-#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
-#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
-#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
-#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
-#define __KVM_HOST_SMCCC_FUNC___pkvm_init 15
-#define __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp 16
-#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
-#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
-#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
-#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20
#ifndef __ASSEMBLY__
#include <linux/mm.h>
+enum __kvm_host_smccc_func {
+ /* Hypercalls available only prior to pKVM finalisation */
+ /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
+ __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
+ __KVM_HOST_SMCCC_FUNC___pkvm_init,
+ __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
+ __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
+ __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
+ __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
+
+ /* Hypercalls available after pKVM finalisation */
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
+ __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
+ __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
+ __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
+ __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
+ __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
+ __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
+ __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
+ __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
+};
+
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
@@ -263,9 +271,10 @@ extern u64 __kvm_get_mdcr_el2(void);
/*
* KVM extable for unexpected exceptions.
- * In the same format _asm_extable, but output to a different section so that
- * it can be mapped to EL2. The KVM version is not sorted. The caller must
- * ensure:
+ * Create a struct kvm_exception_table_entry output to a section that can be
+ * mapped by EL2. The table is not sorted.
+ *
+ * The caller must ensure:
* x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
* code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
*/
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fd418955e31e..f4871e47b2d0 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -396,7 +396,10 @@ static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
if (vcpu_mode_is_32bit(vcpu))
return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
- return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
+ if (vcpu_mode_priv(vcpu))
+ return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
+ else
+ return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f8be56d5342b..4be8486042a7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -58,6 +58,7 @@
enum kvm_mode {
KVM_MODE_DEFAULT,
KVM_MODE_PROTECTED,
+ KVM_MODE_NONE,
};
enum kvm_mode kvm_get_mode(void);
@@ -771,7 +772,6 @@ int kvm_set_ipa_limit(void);
#define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void);
-void kvm_arch_free_vm(struct kvm *kvm);
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
@@ -780,6 +780,8 @@ static inline bool kvm_vm_is_protected(struct kvm *kvm)
return false;
}
+void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
+
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 657d0c94cf82..5afd14ab15b9 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -115,7 +115,12 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
#endif
+extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index f1745a843414..1b9a1e242612 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -243,6 +243,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async()
+#define arch_enable_tagging_asymm() mte_enable_kernel_asymm()
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index f4ba93d4ffeb..6770667b34a3 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -116,6 +116,30 @@ static inline void cpu_install_idmap(void)
}
/*
+ * Load our new page tables. A strict BBM approach requires that we ensure that
+ * TLBs are free of any entries that may overlap with the global mappings we are
+ * about to install.
+ *
+ * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
+ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
+ * services), while for a userspace-driven test_resume cycle it points to
+ * userspace page tables (and we must point it at a zero page ourselves).
+ *
+ * We change T0SZ as part of installing the idmap. This is undone by
+ * cpu_uninstall_idmap() in __cpu_suspend_exit().
+ */
+static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
+{
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ __cpu_set_tcr_t0sz(t0sz);
+
+ /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
+ write_sysreg(ttbr0, ttbr0_el1);
+ isb();
+}
+
+/*
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated.
*/
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 22420e1f8c03..478b9bcf69ad 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -130,6 +130,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void);
+void mte_enable_kernel_asymm(void);
#else /* CONFIG_ARM64_MTE */
@@ -161,6 +162,10 @@ static inline void mte_enable_kernel_async(void)
{
}
+static inline void mte_enable_kernel_asymm(void)
+{
+}
+
#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 3f93b9e0b339..075539f5f1c8 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -88,22 +88,28 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
-DECLARE_STATIC_KEY_FALSE(mte_async_mode);
+DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
-static inline bool system_uses_mte_async_mode(void)
+static inline bool system_uses_mte_async_or_asymm_mode(void)
{
- return static_branch_unlikely(&mte_async_mode);
+ return static_branch_unlikely(&mte_async_or_asymm_mode);
}
void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
{
+ if (!system_supports_mte())
+ return;
+
mte_check_tfsr_el1();
}
static inline void mte_check_tfsr_exit(void)
{
+ if (!system_supports_mte())
+ return;
+
/*
* The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb()
@@ -115,7 +121,7 @@ static inline void mte_check_tfsr_exit(void)
mte_check_tfsr_el1();
}
#else
-static inline bool system_uses_mte_async_mode(void)
+static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return false;
}
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index f98c91bbd7c1..993a27ea6f54 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -41,7 +41,6 @@ void tag_clear_highpage(struct page *to);
typedef struct page *pgtable_t;
-int pfn_valid(unsigned long pfn);
int pfn_is_map_memory(unsigned long pfn);
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index dfa76afa0ccf..84fbb52b4224 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1022,6 +1022,11 @@ static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
return PAGE_READONLY_EXEC;
}
+static inline bool pud_sect_supported(void)
+{
+ return PAGE_SIZE == SZ_4K;
+}
+
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index ee2bdc1b9f5b..6f41b65f9962 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -115,6 +115,11 @@ struct debug_info {
#endif
};
+enum vec_type {
+ ARM64_VEC_SVE = 0,
+ ARM64_VEC_MAX,
+};
+
struct cpu_context {
unsigned long x19;
unsigned long x20;
@@ -147,8 +152,8 @@ struct thread_struct {
unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */
- unsigned int sve_vl; /* SVE vector length */
- unsigned int sve_vl_onexec; /* SVE vl after next exec */
+ unsigned int vl[ARM64_VEC_MAX]; /* vector length */
+ unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
unsigned long fault_address; /* fault info */
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
@@ -164,6 +169,46 @@ struct thread_struct {
u64 sctlr_user;
};
+static inline unsigned int thread_get_vl(struct thread_struct *thread,
+ enum vec_type type)
+{
+ return thread->vl[type];
+}
+
+static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
+{
+ return thread_get_vl(thread, ARM64_VEC_SVE);
+}
+
+unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
+void task_set_vl(struct task_struct *task, enum vec_type type,
+ unsigned long vl);
+void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
+ unsigned long vl);
+unsigned int task_get_vl_onexec(const struct task_struct *task,
+ enum vec_type type);
+
+static inline unsigned int task_get_sve_vl(const struct task_struct *task)
+{
+ return task_get_vl(task, ARM64_VEC_SVE);
+}
+
+static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
+{
+ task_set_vl(task, ARM64_VEC_SVE, vl);
+}
+
+static inline unsigned int task_get_sve_vl_onexec(const struct task_struct *task)
+{
+ return task_get_vl_onexec(task, ARM64_VEC_SVE);
+}
+
+static inline void task_set_sve_vl_onexec(struct task_struct *task,
+ unsigned long vl)
+{
+ task_set_vl_onexec(task, ARM64_VEC_SVE, vl);
+}
+
#define SCTLR_USER_MASK \
(SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \
SCTLR_EL1_TCF0_MASK)
@@ -257,7 +302,7 @@ struct task_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-unsigned long get_wchan(struct task_struct *p);
+unsigned long __get_wchan(struct task_struct *p);
void update_sctlr_el1(u64 sctlr);
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index e4ad9db53af1..152cb35bf9df 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -21,5 +21,6 @@ extern char __exittext_begin[], __exittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
#endif /* __ASM_SECTIONS_H */
diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
index d3320618ed14..6437df661700 100644
--- a/arch/arm64/include/asm/setup.h
+++ b/arch/arm64/include/asm/setup.h
@@ -8,4 +8,10 @@
void *get_early_fdt_ptr(void);
void early_fdt_map(u64 dt_phys);
+/*
+ * These two variables are used in the head.S file.
+ */
+extern phys_addr_t __fdt_pointer __initdata;
+extern u64 __cacheline_aligned boot_args[4];
+
#endif
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 8aebc00c1718..a4e046ef4568 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/types.h>
+#include <linux/llist.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
@@ -59,6 +60,9 @@ struct stackframe {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph;
#endif
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+#endif
};
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index 3a3264ff47b9..95f7686b728d 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
#define __HAVE_ARCH_STRCHR
extern char *strchr(const char *, int c);
+#ifndef CONFIG_KASAN_HW_TAGS
#define __HAVE_ARCH_STRCMP
extern int strcmp(const char *, const char *);
#define __HAVE_ARCH_STRNCMP
extern int strncmp(const char *, const char *, __kernel_size_t);
+#endif
#define __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b268082d67ed..16b3f1a1d468 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -13,6 +13,8 @@
#include <linux/stringify.h>
#include <linux/kasan-tags.h>
+#include <asm/gpr-num.h>
+
/*
* ARMv8 ARM reserves the following encoding for system registers:
* (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
@@ -507,6 +509,9 @@
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5)
+#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6)
+
#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0)
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
@@ -621,6 +626,7 @@
#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_ASYMM (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_ENIA_SHIFT 31
@@ -666,6 +672,7 @@
#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_ASYMM (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_BT1 (BIT(36))
@@ -807,6 +814,7 @@
#define ID_AA64PFR1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1
#define ID_AA64PFR1_MTE 0x2
+#define ID_AA64PFR1_MTE_ASYMM 0x3
/* id_aa64zfr0 */
#define ID_AA64ZFR0_F64MM_SHIFT 56
@@ -1152,6 +1160,7 @@
#define ICH_HCR_TC (1 << 10)
#define ICH_HCR_TALL0 (1 << 11)
#define ICH_HCR_TALL1 (1 << 12)
+#define ICH_HCR_TDIR (1 << 14)
#define ICH_HCR_EOIcount_SHIFT 27
#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
@@ -1184,6 +1193,8 @@
#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
#define ICH_VTR_A3V_SHIFT 21
#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
+#define ICH_VTR_TDS_SHIFT 19
+#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
#define ARM64_FEATURE_FIELD_BITS 4
@@ -1192,17 +1203,12 @@
#ifdef __ASSEMBLY__
- .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
- .equ .L__reg_num_x\num, \num
- .endr
- .equ .L__reg_num_xzr, 31
-
.macro mrs_s, rt, sreg
- __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
+ __emit_inst(0xd5200000|(\sreg)|(.L__gpr_num_\rt))
.endm
.macro msr_s, sreg, rt
- __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
+ __emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt))
.endm
#else
@@ -1211,22 +1217,16 @@
#include <linux/types.h>
#include <asm/alternative.h>
-#define __DEFINE_MRS_MSR_S_REGNUM \
-" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
-" .equ .L__reg_num_x\\num, \\num\n" \
-" .endr\n" \
-" .equ .L__reg_num_xzr, 31\n"
-
#define DEFINE_MRS_S \
- __DEFINE_MRS_MSR_S_REGNUM \
+ __DEFINE_ASM_GPR_NUMS \
" .macro mrs_s, rt, sreg\n" \
- __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \
+ __emit_inst(0xd5200000|(\\sreg)|(.L__gpr_num_\\rt)) \
" .endm\n"
#define DEFINE_MSR_S \
- __DEFINE_MRS_MSR_S_REGNUM \
+ __DEFINE_ASM_GPR_NUMS \
" .macro msr_s, sreg, rt\n" \
- __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \
+ __emit_inst(0xd5000000|(\\sreg)|(.L__gpr_num_\\rt)) \
" .endm\n"
#define UNDEFINE_MRS_S \
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 6623c99f0984..e1317b7c4525 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -42,6 +42,7 @@ struct thread_info {
void *scs_base;
void *scs_sp;
#endif
+ u32 cpu;
};
#define thread_saved_pc(tsk) \
@@ -78,7 +79,7 @@ int arch_dup_task_struct(struct task_struct *dst,
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */
#define TIF_SVE 23 /* Scalable Vector Extension in use */
-#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
+#define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
diff --git a/arch/arm64/include/asm/trans_pgd.h b/arch/arm64/include/asm/trans_pgd.h
index 5d08e5adf3d5..033d400a4ea4 100644
--- a/arch/arm64/include/asm/trans_pgd.h
+++ b/arch/arm64/include/asm/trans_pgd.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2020, Microsoft Corporation.
- * Pavel Tatashin <pasha.tatashin@soleen.com>
+ * Copyright (c) 2021, Microsoft Corporation.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
*/
#ifndef _ASM_TRANS_TABLE_H
@@ -15,7 +15,7 @@
/*
* trans_alloc_page
* - Allocator that should return exactly one zeroed page, if this
- * allocator fails, trans_pgd_create_copy() and trans_pgd_map_page()
+ * allocator fails, trans_pgd_create_copy() and trans_pgd_idmap_page()
* return -ENOMEM error.
*
* trans_alloc_arg
@@ -30,10 +30,12 @@ struct trans_pgd_info {
int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **trans_pgd,
unsigned long start, unsigned long end);
-int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
- void *page, unsigned long dst_addr, pgprot_t pgprot);
-
int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
unsigned long *t0sz, void *page);
+int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info,
+ phys_addr_t *el2_vectors);
+
+extern char trans_pgd_stub_vectors[];
+
#endif /* _ASM_TRANS_TABLE_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 190b494e22ab..6e2e0b7031ab 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -18,6 +18,7 @@
#include <linux/kasan-checks.h>
#include <linux/string.h>
+#include <asm/asm-extable.h>
#include <asm/cpufeature.h>
#include <asm/mmu.h>
#include <asm/mte.h>
@@ -70,12 +71,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
#define access_ok(addr, size) __range_ok(addr, size)
-#define _ASM_EXTABLE(from, to) \
- " .pushsection __ex_table, \"a\"\n" \
- " .align 3\n" \
- " .long (" #from " - .), (" #to " - .)\n" \
- " .popsection\n"
-
/*
* User access enabling/disabling.
*/
@@ -196,13 +191,13 @@ static inline void __uaccess_enable_tco(void)
*/
static inline void __uaccess_disable_tco_async(void)
{
- if (system_uses_mte_async_mode())
+ if (system_uses_mte_async_or_asymm_mode())
__uaccess_disable_tco();
}
static inline void __uaccess_enable_tco_async(void)
{
- if (system_uses_mte_async_mode())
+ if (system_uses_mte_async_or_asymm_mode())
__uaccess_enable_tco();
}
@@ -260,15 +255,9 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
asm volatile( \
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
- " .section .fixup, \"ax\"\n" \
- " .align 2\n" \
- "3: mov %w0, %3\n" \
- " mov %1, #0\n" \
- " b 2b\n" \
- " .previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
: "+r" (err), "=&r" (x) \
- : "r" (addr), "i" (-EFAULT))
+ : "r" (addr))
#define __raw_get_mem(ldr, x, ptr, err) \
do { \
@@ -337,14 +326,9 @@ do { \
asm volatile( \
"1: " store " " reg "1, [%2]\n" \
"2:\n" \
- " .section .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %w0, %3\n" \
- " b 2b\n" \
- " .previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
: "+r" (err) \
- : "r" (x), "r" (addr), "i" (-EFAULT))
+ : "r" (x), "r" (addr))
#define __raw_put_mem(str, x, ptr, err) \
do { \
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 3cb206aea3db..6bdb5f5db438 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -38,7 +38,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 449
+#define __NR_compat_syscalls 450
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 844f6ae58662..41ea1195e44b 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -903,6 +903,8 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
#define __NR_process_mrelease 448
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
+#define __NR_futex_waitv 449
+__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/vdso/compat_barrier.h b/arch/arm64/include/asm/vdso/compat_barrier.h
index 3fd8fd6d8fc2..3ac35f4a667c 100644
--- a/arch/arm64/include/asm/vdso/compat_barrier.h
+++ b/arch/arm64/include/asm/vdso/compat_barrier.h
@@ -20,16 +20,9 @@
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
-#if __LINUX_ARM_ARCH__ >= 8 && defined(CONFIG_AS_DMB_ISHLD)
#define aarch32_smp_mb() dmb(ish)
#define aarch32_smp_rmb() dmb(ishld)
#define aarch32_smp_wmb() dmb(ishst)
-#else
-#define aarch32_smp_mb() dmb(ish)
-#define aarch32_smp_rmb() aarch32_smp_mb()
-#define aarch32_smp_wmb() dmb(ishst)
-#endif
-
#undef smp_mb
#undef smp_rmb
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7379f35ae2c6..3c8af033a997 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -67,6 +67,8 @@
*/
extern u32 __boot_cpu_mode[2];
+#define ARM64_VECTOR_TABLE_LEN SZ_2K
+
void __hyp_set_vectors(phys_addr_t phys_vector_base);
void __hyp_reset_vectors(void);
@@ -128,6 +130,11 @@ static __always_inline bool is_protected_kvm_enabled(void)
return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE);
}
+static inline bool is_hyp_nvhe(void)
+{
+ return is_hyp_mode_available() && !is_kernel_in_hyp_mode();
+}
+
#endif /* __ASSEMBLY__ */
#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
index 7a22aeea9bb5..b9185503feae 100644
--- a/arch/arm64/include/asm/vmalloc.h
+++ b/arch/arm64/include/asm/vmalloc.h
@@ -2,6 +2,7 @@
#define _ASM_ARM64_VMALLOC_H
#include <asm/page.h>
+#include <asm/pgtable.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
@@ -9,10 +10,9 @@
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
/*
- * Only 4k granule supports level 1 block mappings.
* SW table walks can't handle removal of intermediate entries.
*/
- return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
+ return pud_sect_supported() &&
!IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index 2dcb104c645b..1c8e4f2490bf 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -53,29 +53,16 @@ static inline unsigned long find_zero(unsigned long mask)
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
- unsigned long ret, tmp;
+ unsigned long ret;
__uaccess_enable_tco_async();
/* Load word from unaligned pointer addr */
asm(
- "1: ldr %0, %3\n"
+ "1: ldr %0, %2\n"
"2:\n"
- " .pushsection .fixup,\"ax\"\n"
- " .align 2\n"
- "3: bic %1, %2, #0x7\n"
- " ldr %0, [%1]\n"
- " and %1, %2, #0x7\n"
- " lsl %1, %1, #0x3\n"
-#ifndef __AARCH64EB__
- " lsr %0, %0, %1\n"
-#else
- " lsl %0, %0, %1\n"
-#endif
- " b 2b\n"
- " .popsection\n"
- _ASM_EXTABLE(1b, 3b)
- : "=&r" (ret), "=&r" (tmp)
+ _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(1b, 2b, %0, %1)
+ : "=&r" (ret)
: "r" (addr), "Q" (*(unsigned long *)addr));
__uaccess_disable_tco_async();