summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/Kconfig31
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/include/asm/assembler.h4
-rw-r--r--arch/arm64/include/asm/brk-imm.h2
-rw-r--r--arch/arm64/include/asm/elf.h10
-rw-r--r--arch/arm64/include/asm/exception.h2
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm64/include/asm/io.h12
-rw-r--r--arch/arm64/include/asm/memory.h14
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/include/uapi/asm/sve_context.h11
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kernel/entry-common.c36
-rw-r--r--arch/arm64/kernel/fpsimd.c5
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm64/kernel/image-vars.h2
-rw-r--r--arch/arm64/kernel/pi/kaslr_early.c11
-rw-r--r--arch/arm64/kernel/probes/kprobes.c21
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S78
-rw-r--r--arch/arm64/kernel/process.c3
-rw-r--r--arch/arm64/kernel/ptrace.c8
-rw-r--r--arch/arm64/kernel/setup.c5
-rw-r--r--arch/arm64/kernel/signal.c39
-rw-r--r--arch/arm64/kernel/syscall.c5
-rw-r--r--arch/arm64/mm/fixmap.c3
-rw-r--r--arch/arm64/mm/ptdump.c56
-rw-r--r--arch/arm64/tools/sysreg35
-rw-r--r--arch/loongarch/Makefile1
-rw-r--r--arch/x86/Makefile1
31 files changed, 206 insertions, 219 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index aa7c1d435139..e14e92eb5ba5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -120,6 +120,7 @@ config ARM64
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
+ select CPUMASK_OFFSTACK if NR_CPUS > 256
select CRC32
select DCACHE_WORD_ACCESS
select DYNAMIC_FTRACE if FUNCTION_TRACER
@@ -198,7 +199,7 @@ config ARM64
if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS
select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \
if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \
- !CC_OPTIMIZE_FOR_SIZE)
+ (CC_IS_CLANG || !CC_OPTIMIZE_FOR_SIZE))
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_SAMPLE_FTRACE_DIRECT
@@ -229,6 +230,7 @@ config ARM64
select HAVE_FUNCTION_ARG_ACCESS_API
select MMU_GATHER_RCU_TABLE_FREE
select HAVE_RSEQ
+ select HAVE_RUST if CPU_LITTLE_ENDIAN
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
@@ -547,9 +549,8 @@ config ARM64_ERRATUM_832075
If unsure, say Y.
config ARM64_ERRATUM_834220
- bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
+ bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault (rare)"
depends on KVM
- default y
help
This option adds an alternative code sequence to work around ARM
erratum 834220 on Cortex-A57 parts up to r1p2.
@@ -565,7 +566,7 @@ config ARM64_ERRATUM_834220
as it depends on the alternative framework, which will only patch
the kernel if an affected CPU is detected.
- If unsure, say Y.
+ If unsure, say N.
config ARM64_ERRATUM_1742098
bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
@@ -692,8 +693,7 @@ config ARM64_WORKAROUND_REPEAT_TLBI
bool
config ARM64_ERRATUM_2441007
- bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
- default y
+ bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI (rare)"
select ARM64_WORKAROUND_REPEAT_TLBI
help
This option adds a workaround for ARM Cortex-A55 erratum #2441007.
@@ -706,11 +706,10 @@ config ARM64_ERRATUM_2441007
Work around this by adding the affected CPUs to the list that needs
TLB sequences to be done twice.
- If unsure, say Y.
+ If unsure, say N.
config ARM64_ERRATUM_1286807
- bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
- default y
+ bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation (rare)"
select ARM64_WORKAROUND_REPEAT_TLBI
help
This option adds a workaround for ARM Cortex-A76 erratum 1286807.
@@ -724,6 +723,8 @@ config ARM64_ERRATUM_1286807
invalidated has been observed by other observers. The
workaround repeats the TLBI+DSB operation.
+ If unsure, say N.
+
config ARM64_ERRATUM_1463225
bool "Cortex-A76: Software Step might prevent interrupt recognition"
default y
@@ -743,8 +744,7 @@ config ARM64_ERRATUM_1463225
If unsure, say Y.
config ARM64_ERRATUM_1542419
- bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
- default y
+ bool "Neoverse-N1: workaround mis-ordering of instruction fetches (rare)"
help
This option adds a workaround for ARM Neoverse-N1 erratum
1542419.
@@ -756,7 +756,7 @@ config ARM64_ERRATUM_1542419
Workaround the issue by hiding the DIC feature from EL0. This
forces user-space to perform cache maintenance.
- If unsure, say Y.
+ If unsure, say N.
config ARM64_ERRATUM_1508412
bool "Cortex-A77: 1508412: workaround deadlock on sequence of NC/Device load and store exclusive or PAR read"
@@ -931,8 +931,7 @@ config ARM64_ERRATUM_2224489
If unsure, say Y.
config ARM64_ERRATUM_2441009
- bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
- default y
+ bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI (rare)"
select ARM64_WORKAROUND_REPEAT_TLBI
help
This option adds a workaround for ARM Cortex-A510 erratum #2441009.
@@ -945,7 +944,7 @@ config ARM64_ERRATUM_2441009
Work around this by adding the affected CPUs to the list that needs
TLB sequences to be done twice.
- If unsure, say Y.
+ If unsure, say N.
config ARM64_ERRATUM_2064142
bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled"
@@ -1427,7 +1426,7 @@ config SCHED_SMT
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
- default "256"
+ default "512"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index a88cdf910687..0e075d3c546b 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -41,6 +41,8 @@ KBUILD_CFLAGS += -mgeneral-regs-only \
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(compat_vdso)
+KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
+
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
@@ -65,7 +67,9 @@ endif
ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
KBUILD_CFLAGS += -mbranch-protection=pac-ret+bti
+ KBUILD_RUSTFLAGS += -Zbranch-protection=bti,pac-ret
else ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
+ KBUILD_RUSTFLAGS += -Zbranch-protection=pac-ret
ifeq ($(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET),y)
KBUILD_CFLAGS += -mbranch-protection=pac-ret
else
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 513787e43329..96b18a707507 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -38,10 +38,6 @@
msr daifset, #0xf
.endm
- .macro enable_daif
- msr daifclr, #0xf
- .endm
-
/*
* Save/restore interrupts.
*/
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
index 1abdcd508a11..beb42c62b6ac 100644
--- a/arch/arm64/include/asm/brk-imm.h
+++ b/arch/arm64/include/asm/brk-imm.h
@@ -11,6 +11,7 @@
* 0x004: for installing kprobes
* 0x005: for installing uprobes
* 0x006: for kprobe software single-step
+ * 0x007: for kretprobe return
* Allowed values for kgdb are 0x400 - 0x7ff
* 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
@@ -23,6 +24,7 @@
#define KPROBES_BRK_IMM 0x004
#define UPROBES_BRK_IMM 0x005
#define KPROBES_BRK_SS_IMM 0x006
+#define KRETPROBES_BRK_IMM 0x007
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 97932fbf973d..3f93f4eef953 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -201,16 +201,16 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#define COMPAT_ELF_PLATFORM ("v8l")
#endif
-#ifdef CONFIG_COMPAT
-
-/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
-#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
-
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18
typedef unsigned int compat_elf_greg_t;
typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
+#ifdef CONFIG_COMPAT
+
+/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
+#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
+
/* AArch32 EABI. */
#define EF_ARM_EABI_MASK 0xff000000
int compat_elf_check_arch(const struct elf32_hdr *);
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index ad688e157c9b..f296662590c7 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -74,7 +74,7 @@ void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_el0_mops(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr);
-void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
+void do_signal(struct pt_regs *regs);
void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 74afca3bd312..47cbd1da40b4 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -21,7 +21,6 @@
#include <linux/stddef.h>
#include <linux/types.h>
-#ifdef CONFIG_COMPAT
/* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f
#define VFP_FPSCR_CTRL_MASK 0x07f79f00
@@ -30,7 +29,6 @@
* control/status register.
*/
#define VFP_STATE_SIZE ((32 * 8) + 4)
-#endif
static inline unsigned long cpacr_save_enable_kernel_sve(void)
{
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 84055329cd8b..bd81cf17744a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -59,7 +59,6 @@ static inline void decode_ctrl_reg(u32 reg,
/* Watchpoints */
#define ARM_BREAKPOINT_LOAD 1
#define ARM_BREAKPOINT_STORE 2
-#define AARCH64_ESR_ACCESS_MASK (1 << 6)
/* Lengths */
#define ARM_BREAKPOINT_LEN_1 0x1
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 3b694511b98f..8d825522c55c 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -24,25 +24,29 @@
#define __raw_writeb __raw_writeb
static __always_inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
- asm volatile("strb %w0, [%1]" : : "rZ" (val), "r" (addr));
+ volatile u8 __iomem *ptr = addr;
+ asm volatile("strb %w0, %1" : : "rZ" (val), "Qo" (*ptr));
}
#define __raw_writew __raw_writew
static __always_inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
- asm volatile("strh %w0, [%1]" : : "rZ" (val), "r" (addr));
+ volatile u16 __iomem *ptr = addr;
+ asm volatile("strh %w0, %1" : : "rZ" (val), "Qo" (*ptr));
}
#define __raw_writel __raw_writel
static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
- asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
+ volatile u32 __iomem *ptr = addr;
+ asm volatile("str %w0, %1" : : "rZ" (val), "Qo" (*ptr));
}
#define __raw_writeq __raw_writeq
static __always_inline void __raw_writeq(u64 val, volatile void __iomem *addr)
{
- asm volatile("str %x0, [%1]" : : "rZ" (val), "r" (addr));
+ volatile u64 __iomem *ptr = addr;
+ asm volatile("str %x0, %1" : : "rZ" (val), "Qo" (*ptr));
}
#define __raw_readb __raw_readb
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index d82305ab420f..60904a6c4b42 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -30,8 +30,8 @@
* keep a constant PAGE_OFFSET and "fallback" to using the higher end
* of the VMEMMAP where 52-bit support is not available in hardware.
*/
-#define VMEMMAP_SHIFT (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)
-#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) >> VMEMMAP_SHIFT)
+#define VMEMMAP_RANGE (_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET)
+#define VMEMMAP_SIZE ((VMEMMAP_RANGE >> PAGE_SHIFT) * sizeof(struct page))
/*
* PAGE_OFFSET - the virtual address of the start of the linear map, at the
@@ -47,11 +47,11 @@
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN))
#define MODULES_VSIZE (SZ_2G)
-#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
-#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
-#define PCI_IO_END (VMEMMAP_START - SZ_8M)
-#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
-#define FIXADDR_TOP (VMEMMAP_START - SZ_32M)
+#define VMEMMAP_START (VMEMMAP_END - VMEMMAP_SIZE)
+#define VMEMMAP_END (-UL(SZ_1G))
+#define PCI_IO_START (VMEMMAP_END + SZ_8M)
+#define PCI_IO_END (PCI_IO_START + PCI_IO_SIZE)
+#define FIXADDR_TOP (-UL(SZ_8M))
#if VA_BITS > 48
#define VA_BITS_MIN (48)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 79ce70fbb751..522c21348ae8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -18,11 +18,15 @@
* VMALLOC range.
*
* VMALLOC_START: beginning of the kernel vmalloc space
- * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
- * and fixed mappings
+ * VMALLOC_END: extends to the available space below vmemmap
*/
#define VMALLOC_START (MODULES_END)
-#define VMALLOC_END (VMEMMAP_START - SZ_256M)
+#if VA_BITS == VA_BITS_MIN
+#define VMALLOC_END (VMEMMAP_START - SZ_8M)
+#else
+#define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT)
+#define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M)
+#endif
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
diff --git a/arch/arm64/include/uapi/asm/sve_context.h b/arch/arm64/include/uapi/asm/sve_context.h
index 754ab751b523..72aefc081061 100644
--- a/arch/arm64/include/uapi/asm/sve_context.h
+++ b/arch/arm64/include/uapi/asm/sve_context.h
@@ -13,6 +13,17 @@
#define __SVE_VQ_BYTES 16 /* number of bytes per quadword */
+/*
+ * Yes, __SVE_VQ_MAX is 512 QUADWORDS.
+ *
+ * To help ensure forward portability, this is much larger than the
+ * current maximum value defined by the SVE architecture. While arrays
+ * or static allocations can be sized based on this value, watch out!
+ * It will waste a surprisingly large amount of memory.
+ *
+ * Dynamic sizing based on the actual runtime vector length is likely to
+ * be preferable for most purposes.
+ */
#define __SVE_VQ_MIN 1
#define __SVE_VQ_MAX 512
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 5a7dbbe0ce63..81496083c041 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -75,8 +75,8 @@ int main(void)
DEFINE(S_FP, offsetof(struct pt_regs, regs[29]));
DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
DEFINE(S_SP, offsetof(struct pt_regs, sp));
- DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
DEFINE(S_PC, offsetof(struct pt_regs, pc));
+ DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index aefda789f510..6e1cca7b2098 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -3124,13 +3124,9 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
for (i = 0; i < ARM64_NCAPS; i++) {
- unsigned int num;
-
caps = cpucap_ptrs[i];
- if (!caps || !(caps->type & scope_mask))
- continue;
- num = caps->capability;
- if (!cpus_have_cap(num))
+ if (!caps || !(caps->type & scope_mask) ||
+ !cpus_have_cap(caps->capability))
continue;
if (boot_scope && caps->cpu_enable)
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 0fc94207e69a..b77a15955f28 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/ptrace.h>
+#include <linux/resume_user_mode.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/thread_info.h>
@@ -126,16 +127,49 @@ static __always_inline void __exit_to_user_mode(void)
lockdep_hardirqs_on(CALLER_ADDR0);
}
+static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
+{
+ do {
+ local_irq_enable();
+
+ if (thread_flags & _TIF_NEED_RESCHED)
+ schedule();
+
+ if (thread_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+ if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
+ clear_thread_flag(TIF_MTE_ASYNC_FAULT);
+ send_sig_fault(SIGSEGV, SEGV_MTEAERR,
+ (void __user *)NULL, current);
+ }
+
+ if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+
+ if (thread_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
+
+ if (thread_flags & _TIF_FOREIGN_FPSTATE)
+ fpsimd_restore_current_state();
+
+ local_irq_disable();
+ thread_flags = read_thread_flags();
+ } while (thread_flags & _TIF_WORK_MASK);
+}
+
static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long flags;
- local_daif_mask();
+ local_irq_disable();
flags = read_thread_flags();
if (unlikely(flags & _TIF_WORK_MASK))
do_notify_resume(regs, flags);
+ local_daif_mask();
+
lockdep_sys_exit();
}
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 8e24b5e5e192..0cd2bfb38bb0 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1146,6 +1146,8 @@ void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
{
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
isb();
+
+ write_sysreg_s(0, SYS_ZCR_EL1);
}
void __init sve_setup(void)
@@ -1257,6 +1259,9 @@ void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p)
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1);
isb();
+ /* Ensure all bits in SMCR are set to known values */
+ write_sysreg_s(0, SYS_SMCR_EL1);
+
/* Allow EL0 to access TPIDR2 */
write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1);
isb();
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 35225632d70a..2f5755192c2b 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -21,6 +21,7 @@
#include <asm/current.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
#include <asm/hw_breakpoint.h>
#include <asm/traps.h>
#include <asm/cputype.h>
@@ -779,7 +780,7 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
* Check that the access type matches.
* 0 => load, otherwise => store
*/
- access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
+ access = (esr & ESR_ELx_WNR) ? HW_BREAKPOINT_W :
HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
continue;
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 5e4dc72ab1bd..e931ce078a00 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -36,6 +36,8 @@ PROVIDE(__pi___memcpy = __pi_memcpy);
PROVIDE(__pi___memmove = __pi_memmove);
PROVIDE(__pi___memset = __pi_memset);
+PROVIDE(__pi_vabits_actual = vabits_actual);
+
#ifdef CONFIG_KVM
/*
diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c
index 17bff6e399e4..b9e0bb4bc6a9 100644
--- a/arch/arm64/kernel/pi/kaslr_early.c
+++ b/arch/arm64/kernel/pi/kaslr_early.c
@@ -14,6 +14,7 @@
#include <asm/archrandom.h>
#include <asm/memory.h>
+#include <asm/pgtable.h>
/* taken from lib/string.c */
static char *__strstr(const char *s1, const char *s2)
@@ -87,7 +88,7 @@ static u64 get_kaslr_seed(void *fdt)
asmlinkage u64 kaslr_early_init(void *fdt)
{
- u64 seed;
+ u64 seed, range;
if (is_kaslr_disabled_cmdline(fdt))
return 0;
@@ -102,9 +103,9 @@ asmlinkage u64 kaslr_early_init(void *fdt)
/*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the
- * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
- * the lower and upper quarters to avoid colliding with other
- * allocations.
+ * 'middle' half of the VMALLOC area, and stay clear of the lower and
+ * upper quarters to avoid colliding with other allocations.
*/
- return BIT(VA_BITS_MIN - 3) + (seed & GENMASK(VA_BITS_MIN - 3, 0));
+ range = (VMALLOC_END - KIMAGE_VADDR) / 2;
+ return range / 2 + (((__uint128_t)range * seed) >> 64);
}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 70b91a8c6bb3..327855a11df2 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -371,6 +371,21 @@ static struct break_hook kprobes_break_ss_hook = {
.fn = kprobe_breakpoint_ss_handler,
};
+static int __kprobes
+kretprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
+{
+ if (regs->pc != (unsigned long)__kretprobe_trampoline)
+ return DBG_HOOK_ERROR;
+
+ regs->pc = kretprobe_trampoline_handler(regs, (void *)regs->regs[29]);
+ return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook kretprobes_break_hook = {
+ .imm = KRETPROBES_BRK_IMM,
+ .fn = kretprobe_breakpoint_handler,
+};
+
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
@@ -396,11 +411,6 @@ int __init arch_populate_kprobe_blacklist(void)
return ret;
}
-void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
-{
- return (void *)kretprobe_trampoline_handler(regs, (void *)regs->regs[29]);
-}
-
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
@@ -420,6 +430,7 @@ int __init arch_init_kprobes(void)
{
register_kernel_break_hook(&kprobes_break_hook);
register_kernel_break_hook(&kprobes_break_ss_hook);
+ register_kernel_break_hook(&kretprobes_break_hook);
return 0;
}
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
index 9a6499bed58b..a362f3dbb3d1 100644
--- a/arch/arm64/kernel/probes/kprobes_trampoline.S
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -4,83 +4,17 @@
*/
#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
+#include <asm/asm-bug.h>
#include <asm/assembler.h>
.text
- .macro save_all_base_regs
- stp x0, x1, [sp, #S_X0]
- stp x2, x3, [sp, #S_X2]
- stp x4, x5, [sp, #S_X4]
- stp x6, x7, [sp, #S_X6]
- stp x8, x9, [sp, #S_X8]
- stp x10, x11, [sp, #S_X10]
- stp x12, x13, [sp, #S_X12]
- stp x14, x15, [sp, #S_X14]
- stp x16, x17, [sp, #S_X16]
- stp x18, x19, [sp, #S_X18]
- stp x20, x21, [sp, #S_X20]
- stp x22, x23, [sp, #S_X22]
- stp x24, x25, [sp, #S_X24]
- stp x26, x27, [sp, #S_X26]
- stp x28, x29, [sp, #S_X28]
- add x0, sp, #PT_REGS_SIZE
- stp lr, x0, [sp, #S_LR]
- /*
- * Construct a useful saved PSTATE
- */
- mrs x0, nzcv
- mrs x1, daif
- orr x0, x0, x1
- mrs x1, CurrentEL
- orr x0, x0, x1
- mrs x1, SPSel
- orr x0, x0, x1
- stp xzr, x0, [sp, #S_PC]
- .endm
-
- .macro restore_all_base_regs
- ldr x0, [sp, #S_PSTATE]
- and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT)
- msr nzcv, x0
- ldp x0, x1, [sp, #S_X0]
- ldp x2, x3, [sp, #S_X2]
- ldp x4, x5, [sp, #S_X4]
- ldp x6, x7, [sp, #S_X6]
- ldp x8, x9, [sp, #S_X8]
- ldp x10, x11, [sp, #S_X10]
- ldp x12, x13, [sp, #S_X12]
- ldp x14, x15, [sp, #S_X14]
- ldp x16, x17, [sp, #S_X16]
- ldp x18, x19, [sp, #S_X18]
- ldp x20, x21, [sp, #S_X20]
- ldp x22, x23, [sp, #S_X22]
- ldp x24, x25, [sp, #S_X24]
- ldp x26, x27, [sp, #S_X26]
- ldp x28, x29, [sp, #S_X28]
- .endm
-
SYM_CODE_START(__kretprobe_trampoline)
- sub sp, sp, #PT_REGS_SIZE
-
- save_all_base_regs
-
- /* Setup a frame pointer. */
- add x29, sp, #S_FP
-
- mov x0, sp
- bl trampoline_probe_handler
/*
- * Replace trampoline address in lr with actual orig_ret_addr return
- * address.
+ * Trigger a breakpoint exception. The PC will be adjusted by
+ * kretprobe_breakpoint_handler(), and no subsequent instructions will
+ * be executed from the trampoline.
*/
- mov lr, x0
-
- /* The frame pointer (x29) is restored with other registers. */
- restore_all_base_regs
-
- add sp, sp, #PT_REGS_SIZE
- ret
-
+ brk #KRETPROBES_BRK_IMM
+ ASM_BUG()
SYM_CODE_END(__kretprobe_trampoline)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7387b68c745b..4ae31b7af6c3 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -290,9 +290,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
fpsimd_preserve_current_state();
*dst = *src;
- /* We rely on the above assignment to initialize dst's thread_flags: */
- BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
-
/*
* Detach src's sve_state (if any) from dst so that it does not
* get erroneously used or freed prematurely. dst's copies
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index aacb45bd36e6..b096c8be3bcf 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -174,7 +174,6 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
const char *desc = "Hardware breakpoint trap (ptrace)";
-#ifdef CONFIG_COMPAT
if (is_compat_task()) {
int si_errno = 0;
int i;
@@ -196,7 +195,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
desc);
return;
}
-#endif
+
arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
}
@@ -1638,7 +1637,6 @@ static const struct user_regset_view user_aarch64_view = {
.regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
};
-#ifdef CONFIG_COMPAT
enum compat_regset {
REGSET_COMPAT_GPR,
REGSET_COMPAT_VFP,
@@ -1895,6 +1893,7 @@ static const struct user_regset_view user_aarch32_ptrace_view = {
.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
};
+#ifdef CONFIG_COMPAT
static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
compat_ulong_t __user *ret)
{
@@ -2156,7 +2155,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
-#ifdef CONFIG_COMPAT
/*
* Core dumping of 32-bit tasks or compat ptrace requests must use the
* user_aarch32_view compatible with arm32. Native ptrace requests on
@@ -2167,7 +2165,7 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_aarch32_view;
else if (is_compat_thread(task_thread_info(task)))
return &user_aarch32_ptrace_view;
-#endif
+
return &user_aarch64_view;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 42c690bb2d60..ab43bfa85368 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -320,9 +320,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
dynamic_scs_init();
/*
- * Unmask asynchronous aborts and fiq after bringing up possible
- * earlycon. (Report possible System Errors once we can report this
- * occurred).
+ * Unmask SError as soon as possible after initializing earlycon so
+ * that we can report any SErrors immediately.
*/
local_daif_restore(DAIF_PROCCTX_NOIRQ);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 460823baa603..ac69b604cac9 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -16,8 +16,8 @@
#include <linux/uaccess.h>
#include <linux/sizes.h>
#include <linux/string.h>
-#include <linux/resume_user_mode.h>
#include <linux/ratelimit.h>
+#include <linux/rseq.h>
#include <linux/syscalls.h>
#include <asm/daifflags.h>
@@ -1266,7 +1266,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
-static void do_signal(struct pt_regs *regs)
+void do_signal(struct pt_regs *regs)
{
unsigned long continue_addr = 0, restart_addr = 0;
int retval = 0;
@@ -1337,41 +1337,6 @@ static void do_signal(struct pt_regs *regs)
restore_saved_sigmask();
}
-void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
-{
- do {
- if (thread_flags & _TIF_NEED_RESCHED) {
- /* Unmask Debug and SError for the next task */
- local_daif_restore(DAIF_PROCCTX_NOIRQ);
-
- schedule();
- } else {
- local_daif_restore(DAIF_PROCCTX);
-
- if (thread_flags & _TIF_UPROBE)
- uprobe_notify_resume(regs);
-
- if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
- clear_thread_flag(TIF_MTE_ASYNC_FAULT);
- send_sig_fault(SIGSEGV, SEGV_MTEAERR,
- (void __user *)NULL, current);
- }
-
- if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
- do_signal(regs);
-
- if (thread_flags & _TIF_NOTIFY_RESUME)
- resume_user_mode_work(regs);
-
- if (thread_flags & _TIF_FOREIGN_FPSTATE)
- fpsimd_restore_current_state();
- }
-
- local_daif_mask();
- thread_flags = read_thread_flags();
- } while (thread_flags & _TIF_WORK_MASK);
-}
-
unsigned long __ro_after_init signal_minsigstksz;
/*
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 9a70d9746b66..ad198262b981 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -20,14 +20,11 @@ long sys_ni_syscall(void);
static long do_ni_syscall(struct pt_regs *regs, int scno)
{
-#ifdef CONFIG_COMPAT
- long ret;
if (is_compat_task()) {
- ret = compat_arm_syscall(regs, scno);
+ long ret = compat_arm_syscall(regs, scno);
if (ret != -ENOSYS)
return ret;
}
-#endif
return sys_ni_syscall();
}
diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c
index c0a3301203bd..6fc17b2e1714 100644
--- a/arch/arm64/mm/fixmap.c
+++ b/arch/arm64/mm/fixmap.c
@@ -16,6 +16,9 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+/* ensure that the fixmap region does not grow down into the PCI I/O region */
+static_assert(FIXADDR_TOT_START > PCI_IO_END);
+
#define NR_BM_PTE_TABLES \
SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
#define NR_BM_PMD_TABLES \
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index e305b6593c4e..5f0849528ccf 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -26,34 +26,6 @@
#include <asm/ptdump.h>
-enum address_markers_idx {
- PAGE_OFFSET_NR = 0,
- PAGE_END_NR,
-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
- KASAN_START_NR,
-#endif
-};
-
-static struct addr_marker address_markers[] = {
- { PAGE_OFFSET, "Linear Mapping start" },
- { 0 /* PAGE_END */, "Linear Mapping end" },
-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
- { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
- { KASAN_SHADOW_END, "Kasan shadow end" },
-#endif
- { MODULES_VADDR, "Modules start" },
- { MODULES_END, "Modules end" },
- { VMALLOC_START, "vmalloc() area" },
- { VMALLOC_END, "vmalloc() end" },
- { FIXADDR_TOT_START, "Fixmap start" },
- { FIXADDR_TOP, "Fixmap end" },
- { PCI_IO_START, "PCI I/O start" },
- { PCI_IO_END, "PCI I/O end" },
- { VMEMMAP_START, "vmemmap start" },
- { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
- { -1, NULL },
-};
-
#define pt_dump_seq_printf(m, fmt, args...) \
({ \
if (m) \
@@ -339,9 +311,8 @@ static void __init ptdump_initialize(void)
pg_level[i].mask |= pg_level[i].bits[j].mask;
}
-static struct ptdump_info kernel_ptdump_info = {
+static struct ptdump_info kernel_ptdump_info __ro_after_init = {
.mm = &init_mm,
- .markers = address_markers,
.base_addr = PAGE_OFFSET,
};
@@ -375,10 +346,31 @@ void ptdump_check_wx(void)
static int __init ptdump_init(void)
{
- address_markers[PAGE_END_NR].start_address = PAGE_END;
+ u64 page_offset = _PAGE_OFFSET(vabits_actual);
+ u64 vmemmap_start = (u64)virt_to_page((void *)page_offset);
+ struct addr_marker m[] = {
+ { PAGE_OFFSET, "Linear Mapping start" },
+ { PAGE_END, "Linear Mapping end" },
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
- address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
+ { KASAN_SHADOW_START, "Kasan shadow start" },
+ { KASAN_SHADOW_END, "Kasan shadow end" },
#endif
+ { MODULES_VADDR, "Modules start" },
+ { MODULES_END, "Modules end" },
+ { VMALLOC_START, "vmalloc() area" },
+ { VMALLOC_END, "vmalloc() end" },
+ { vmemmap_start, "vmemmap start" },
+ { VMEMMAP_END, "vmemmap end" },
+ { PCI_IO_START, "PCI I/O start" },
+ { PCI_IO_END, "PCI I/O end" },
+ { FIXADDR_TOT_START, "Fixmap start" },
+ { FIXADDR_TOP, "Fixmap end" },
+ { -1, NULL },
+ };
+ static struct addr_marker address_markers[ARRAY_SIZE(m)] __ro_after_init;
+
+ kernel_ptdump_info.markers = memcpy(address_markers, m, sizeof(m));
+
ptdump_initialize();
ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
return 0;
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 4c9b67934367..508224a0e078 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -200,6 +200,7 @@ UnsignedEnum 27:24 PerfMon
0b0110 PMUv3p5
0b0111 PMUv3p7
0b1000 PMUv3p8
+ 0b1001 PMUv3p9
0b1111 IMPDEF
EndEnum
Enum 23:20 MProfDbg
@@ -231,6 +232,7 @@ Enum 3:0 CopDbg
0b1000 Debugv8p2
0b1001 Debugv8p4
0b1010 Debugv8p8
+ 0b1011 Debugv8p9
EndEnum
EndSysreg
@@ -1221,6 +1223,7 @@ UnsignedEnum 35:32 PMSVer
0b0010 V1P1
0b0011 V1P2
0b0100 V1P3
+ 0b0101 V1P4
EndEnum
Field 31:28 CTX_CMPs
Res0 27:24
@@ -1247,11 +1250,41 @@ UnsignedEnum 3:0 DebugVer
0b1000 V8P2
0b1001 V8P4
0b1010 V8P8
+ 0b1011 V8P9
EndEnum
EndSysreg
Sysreg ID_AA64DFR1_EL1 3 0 0 5 1
-Res0 63:0
+Field 63:56 ABL_CMPs
+UnsignedEnum 55:52 DPFZS
+ 0b0000 IGNR
+ 0b0001 FRZN
+EndEnum
+UnsignedEnum 51:48 EBEP
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 47:44 ITE
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 43:40 ABLE
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 39:36 PMICNTR
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 35:32 SPMU
+ 0b0000 NI
+ 0b0001 IMP
+ 0b0010 IMP_SPMZR
+EndEnum
+Field 31:24 CTX_CMPs
+Field 23:16 WRPs
+Field 15:8 BRPs
+Field 7:0 SYSPMUID
EndSysreg
Sysreg ID_AA64AFR0_EL1 3 0 0 5 4
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 983aa2b1629a..fa4fb09909ae 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -82,6 +82,7 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif
+KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_RUSTFLAGS_MODULE += -Crelocation-model=pic
ifeq ($(CONFIG_RELOCATABLE),y)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2264db14a25d..18cf8f0cf7cd 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -68,6 +68,7 @@ export BITS
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
#
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
+KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
ifeq ($(CONFIG_X86_KERNEL_IBT),y)