summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kbuild6
-rw-r--r--arch/arm64/Kconfig11
-rw-r--r--arch/arm64/Makefile6
-rw-r--r--arch/arm64/include/asm/assembler.h11
-rw-r--r--arch/arm64/include/asm/atomic.h5
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/compat.h2
-rw-r--r--arch/arm64/include/asm/cpu_ops.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h28
-rw-r--r--arch/arm64/include/asm/cputype.h21
-rw-r--r--arch/arm64/include/asm/debug-monitors.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h3
-rw-r--r--arch/arm64/include/asm/esr.h3
-rw-r--r--arch/arm64/include/asm/exception.h2
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/futex.h3
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm64/include/asm/io.h7
-rw-r--r--arch/arm64/include/asm/irqflags.h5
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/pci.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h13
-rw-r--r--arch/arm64/include/asm/proc-fns.h2
-rw-r--r--arch/arm64/include/asm/processor.h11
-rw-r--r--arch/arm64/include/asm/ptrace.h5
-rw-r--r--arch/arm64/include/asm/signal32.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h32
-rw-r--r--arch/arm64/include/asm/thread_info.h29
-rw-r--r--arch/arm64/include/asm/tlbflush.h1
-rw-r--r--arch/arm64/include/asm/topology.h23
-rw-r--r--arch/arm64/include/asm/uaccess.h12
-rw-r--r--arch/arm64/include/asm/vdso.h4
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h4
-rw-r--r--arch/arm64/include/uapi/asm/stat.h17
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/cpuidle.c50
-rw-r--r--arch/arm64/kernel/cpuinfo.c2
-rw-r--r--arch/arm64/kernel/entry.S36
-rw-r--r--arch/arm64/kernel/head.S104
-rw-r--r--arch/arm64/kernel/image-vars.h51
-rw-r--r--arch/arm64/kernel/image.h42
-rw-r--r--arch/arm64/kernel/insn.c2
-rw-r--r--arch/arm64/kernel/kaslr.c5
-rw-r--r--arch/arm64/kernel/kexec_image.c2
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c22
-rw-r--r--arch/arm64/kernel/module-plts.c2
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/process.c76
-rw-r--r--arch/arm64/kernel/psci.c10
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/setup.c20
-rw-r--r--arch/arm64/kernel/smp.c4
-rw-r--r--arch/arm64/kernel/smp_spin_table.c2
-rw-r--r--arch/arm64/kernel/topology.c312
-rw-r--r--arch/arm64/kernel/traps.c20
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm64/kvm/hyp/switch.c2
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/error-inject.c18
-rw-r--r--arch/arm64/mm/fault.c37
-rw-r--r--arch/arm64/mm/init.c6
-rw-r--r--arch/arm64/mm/mmu.c15
-rw-r--r--arch/arm64/mm/numa.c2
-rw-r--r--arch/arm64/mm/pageattr.c2
-rw-r--r--arch/arm64/mm/proc.S9
66 files changed, 555 insertions, 604 deletions
diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild
new file mode 100644
index 000000000000..d6465823b281
--- /dev/null
+++ b/arch/arm64/Kbuild
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += kernel/ mm/
+obj-$(CONFIG_NET) += net/
+obj-$(CONFIG_KVM) += kvm/
+obj-$(CONFIG_XEN) += xen/
+obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f5f7cb75a698..6481964b6425 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -148,6 +148,7 @@ config ARM64
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS
@@ -1127,6 +1128,15 @@ config ARM64_SW_TTBR0_PAN
zeroed area and reserved ASID. The user access routines
restore the valid TTBR0_EL1 temporarily.
+config ARM64_TAGGED_ADDR_ABI
+ bool "Enable the tagged user addresses syscall ABI"
+ default y
+ help
+ When this option is enabled, user applications can opt in to a
+ relaxed ABI via prctl() allowing tagged addresses to be passed
+ to system calls as pointer arguments. For details, see
+ Documentation/arm64/tagged-address-abi.txt.
+
menuconfig COMPAT
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
@@ -1484,6 +1494,7 @@ endif
config RELOCATABLE
bool
+ select ARCH_HAS_RELR
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required to relocate the
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index a8d2a241ac58..2847b36f72ed 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -128,11 +128,7 @@ KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
export TEXT_OFFSET GZFLAGS
-core-y += arch/arm64/kernel/ arch/arm64/mm/
-core-$(CONFIG_NET) += arch/arm64/net/
-core-$(CONFIG_KVM) += arch/arm64/kvm/
-core-$(CONFIG_XEN) += arch/arm64/xen/
-core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
+core-y += arch/arm64/
libs-y := arch/arm64/lib/ $(libs-y)
core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index c066fc4976cd..b8cf7c85ffa2 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -124,17 +124,6 @@ alternative_endif
.endm
/*
- * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
- * of bounds.
- */
- .macro mask_nospec64, idx, limit, tmp
- sub \tmp, \idx, \limit
- bic \tmp, \tmp, \idx
- and \idx, \idx, \tmp, asr #63
- csdb
- .endm
-
-/*
* NOP sequence
*/
.macro nops, num
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 657b0457d83c..a5ca23950cfd 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -15,8 +15,6 @@
#include <asm/barrier.h>
#include <asm/lse.h>
-#ifdef __KERNEL__
-
#define __ARM64_IN_ATOMIC_IMPL
#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
@@ -157,5 +155,4 @@
#include <asm-generic/atomic-instrumented.h>
-#endif
-#endif
+#endif /* __ASM_ATOMIC_H */
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 64eeaa41e7ca..43da6dd29592 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -78,7 +78,7 @@ static inline u32 cache_type_cwg(void)
return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
}
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(.data..read_mostly)
static inline int cache_line_size_of_cpu(void)
{
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index fb8ad4616b3b..b0d53a265f1d 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -4,7 +4,6 @@
*/
#ifndef __ASM_COMPAT_H
#define __ASM_COMPAT_H
-#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
/*
@@ -215,5 +214,4 @@ static inline int is_compat_thread(struct thread_info *thread)
}
#endif /* CONFIG_COMPAT */
-#endif /* __KERNEL__ */
#endif /* __ASM_COMPAT_H */
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index c09d633c3109..86aabf1e0199 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -23,6 +23,8 @@
* @cpu_boot: Boots a cpu into the kernel.
* @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
* synchronisation. Called from the cpu being booted.
+ * @cpu_can_disable: Determines whether a CPU can be disabled based on
+ * mechanism-specific information.
* @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
* reason, which will cause the hot unplug to be aborted. Called
* from the cpu to be killed.
@@ -42,6 +44,7 @@ struct cpu_operations {
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
#ifdef CONFIG_HOTPLUG_CPU
+ bool (*cpu_can_disable)(unsigned int cpu);
int (*cpu_disable)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu);
int (*cpu_kill)(unsigned int cpu);
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c96ffa4722d3..9cde5d2e768f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -289,9 +289,16 @@ struct arm64_cpu_capabilities {
u16 type;
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
/*
- * Take the appropriate actions to enable this capability for this CPU.
- * For each successfully booted CPU, this method is called for each
- * globally detected capability.
+ * Take the appropriate actions to configure this capability
+ * for this CPU. If the capability is detected by the kernel
+ * this will be called on all the CPUs in the system,
+ * including the hotplugged CPUs, regardless of whether the
+ * capability is available on that specific CPU. This is
+ * useful for some capabilities (e.g, working around CPU
+ * errata), where all the CPUs must take some action (e.g,
+ * changing system control/configuration). Thus, if an action
+ * is required only if the CPU has the capability, then the
+ * routine must check it before taking any action.
*/
void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
union {
@@ -363,21 +370,6 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
return false;
}
-/*
- * Take appropriate action for all matching entries in the shared capability
- * entry.
- */
-static inline void
-cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
-{
- const struct arm64_cpu_capabilities *caps;
-
- for (caps = entry->match_list; caps->matches; caps++)
- if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
- caps->cpu_enable)
- caps->cpu_enable(caps);
-}
-
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index e7d46631cc42..b1454d117cd2 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -51,14 +51,6 @@
#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
MIDR_ARCHITECTURE_MASK)
-#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
-({ \
- u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
- u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
- \
- _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
- })
-
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_IMP_CAVIUM 0x43
@@ -159,10 +151,19 @@ struct midr_range {
#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
+static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
+ u32 rv_max)
+{
+ u32 _model = midr & MIDR_CPU_MODEL_MASK;
+ u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
+
+ return _model == model && rv >= rv_min && rv <= rv_max;
+}
+
static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
{
- return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
- range->rv_min, range->rv_max);
+ return midr_is_cpu_model_range(midr, range->model,
+ range->rv_min, range->rv_max);
}
static inline bool
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index d8ec5bb881c2..7619f473155f 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -5,8 +5,6 @@
#ifndef __ASM_DEBUG_MONITORS_H
#define __ASM_DEBUG_MONITORS_H
-#ifdef __KERNEL__
-
#include <linux/errno.h>
#include <linux/types.h>
#include <asm/brk-imm.h>
@@ -128,5 +126,4 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
int aarch32_break_handler(struct pt_regs *regs);
#endif /* __ASSEMBLY */
-#endif /* __KERNEL__ */
#endif /* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index bdcb0922a40c..fb3e5044f473 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -5,8 +5,6 @@
#ifndef __ASM_DMA_MAPPING_H
#define __ASM_DMA_MAPPING_H
-#ifdef __KERNEL__
-
#include <linux/types.h>
#include <linux/vmalloc.h>
@@ -27,5 +25,4 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->dma_coherent;
}
-#endif /* __KERNEL__ */
#endif /* __ASM_DMA_MAPPING_H */
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 65ac18400979..cb29253ae86b 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -34,7 +34,8 @@
#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
#define ESR_ELx_EC_SYS64 (0x18)
#define ESR_ELx_EC_SVE (0x19)
-/* Unallocated EC: 0x1A - 0x1E */
+#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
+/* Unallocated EC: 0x1b - 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index ed57b760f38c..a17393ff6677 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -30,4 +30,6 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}
+asmlinkage void enter_from_user_mode(void);
+
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index b6a2c352f4c3..59f10dd13f12 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -21,7 +21,7 @@
#include <linux/stddef.h>
#include <linux/types.h>
-#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
/* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f
#define VFP_FPSCR_CTRL_MASK 0x07f79f00
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 6211e3105491..6cc26a127819 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -5,8 +5,6 @@
#ifndef __ASM_FUTEX_H
#define __ASM_FUTEX_H
-#ifdef __KERNEL__
-
#include <linux/futex.h>
#include <linux/uaccess.h>
@@ -129,5 +127,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
return ret;
}
-#endif /* __KERNEL__ */
#endif /* __ASM_FUTEX_H */
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index db9ab760e6fd..bc7aaed4b34e 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -10,8 +10,6 @@
#include <asm/sysreg.h>
#include <asm/virt.h>
-#ifdef __KERNEL__
-
struct arch_hw_breakpoint_ctrl {
u32 __reserved : 19,
len : 8,
@@ -156,5 +154,4 @@ static inline int get_num_wrps(void)
ID_AA64DFR0_WRPS_SHIFT);
}
-#endif /* __KERNEL__ */
#endif /* __ASM_BREAKPOINT_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 7ed92626949d..e9763831186a 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -8,8 +8,6 @@
#ifndef __ASM_IO_H
#define __ASM_IO_H
-#ifdef __KERNEL__
-
#include <linux/types.h>
#include <asm/byteorder.h>
@@ -97,7 +95,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
({ \
unsigned long tmp; \
\
- rmb(); \
+ dma_rmb(); \
\
/* \
* Create a dummy control dependency from the IO read to any \
@@ -111,7 +109,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
})
#define __io_par(v) __iormb(v)
-#define __iowmb() wmb()
+#define __iowmb() dma_wmb()
/*
* Relaxed I/O memory access primitives. These follow the Device memory
@@ -207,5 +205,4 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);
-#endif /* __KERNEL__ */
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 7872f260c9ee..1a59f0ed1ae3 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -5,8 +5,6 @@
#ifndef __ASM_IRQFLAGS_H
#define __ASM_IRQFLAGS_H
-#ifdef __KERNEL__
-
#include <asm/alternative.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
@@ -128,5 +126,4 @@ static inline void arch_local_irq_restore(unsigned long flags)
: "memory");
}
-#endif
-#endif
+#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index a713bad71db5..b61b50bf68b1 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -216,7 +216,7 @@ static inline unsigned long kaslr_offset(void)
* pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) \
- ((__typeof__(addr))sign_extend64((u64)(addr), 55))
+ ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
#ifdef CONFIG_KASAN_SW_TAGS
#define __tag_shifted(tag) ((u64)(tag) << 56)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index fd6161336653..f217e3292919 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -126,7 +126,7 @@ extern void init_mem_pgprot(void);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot, bool page_mappings_only);
-extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
extern void mark_linear_text_alias_ro(void);
#define INIT_MM_CONTEXT(name) \
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index 9e690686e8aa..70b323cf8300 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_PCI_H
#define __ASM_PCI_H
-#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/slab.h>
@@ -35,5 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
}
#endif /* CONFIG_PCI */
-#endif /* __KERNEL__ */
#endif /* __ASM_PCI_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 979e24fadf35..9a8f7e51c2b1 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -220,8 +220,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
* Only if the new pte is valid and kernel, otherwise TLB maintenance
* or update_mmu_cache() have the necessary barriers.
*/
- if (pte_valid_not_user(pte))
+ if (pte_valid_not_user(pte)) {
dsb(ishst);
+ isb();
+ }
}
extern void __sync_icache_dcache(pte_t pteval);
@@ -481,8 +483,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
WRITE_ONCE(*pmdp, pmd);
- if (pmd_valid(pmd))
+ if (pmd_valid(pmd)) {
dsb(ishst);
+ isb();
+ }
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -540,8 +544,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
WRITE_ONCE(*pudp, pud);
- if (pud_valid(pud))
+ if (pud_valid(pud)) {
dsb(ishst);
+ isb();
+ }
}
static inline void pud_clear(pud_t *pudp)
@@ -599,6 +605,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
WRITE_ONCE(*pgdp, pgd);
dsb(ishst);
+ isb();
}
static inline void pgd_clear(pgd_t *pgdp)
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 368d90a9d0e5..a2ce65a0c1fa 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -9,7 +9,6 @@
#ifndef __ASM_PROCFNS_H
#define __ASM_PROCFNS_H
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/page.h>
@@ -25,5 +24,4 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index e4c93945e477..c67848c55009 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -20,7 +20,6 @@
#define NET_IP_ALIGN 0
#ifndef __ASSEMBLY__
-#ifdef __KERNEL__
#include <linux/build_bug.h>
#include <linux/cache.h>
@@ -283,8 +282,6 @@ static inline void spin_lock_prefetch(const void *ptr)
#define HAVE_ARCH_PICK_MMAP_LAYOUT
-#endif
-
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
extern void __init minsigstksz_setup(void);
@@ -306,6 +303,14 @@ extern void __init minsigstksz_setup(void);
/* PR_PAC_RESET_KEYS prctl */
#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
+long set_tagged_addr_ctrl(unsigned long arg);
+long get_tagged_addr_ctrl(void);
+#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg)
+#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl()
+#endif
+
/*
* For CONFIG_GCC_PLUGIN_STACKLEAK
*
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 1dcf63a9ac1f..fbebb411ae20 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -301,6 +301,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->regs[0];
}
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->regs[0] = rc;
+}
+
/**
* regs_get_kernel_argument() - get Nth function argument in kernel
* @regs: pt_regs of that context
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index bd43d1cf724b..7e9f163d02ec 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -5,7 +5,6 @@
#ifndef __ASM_SIGNAL32_H
#define __ASM_SIGNAL32_H
-#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
@@ -79,5 +78,4 @@ static inline void compat_setup_restart_syscall(struct pt_regs *regs)
{
}
#endif /* CONFIG_COMPAT */
-#endif /* __KERNEL__ */
#endif /* __ASM_SIGNAL32_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 06ebcfef73df..972d196c7714 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -212,6 +212,9 @@
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
+#define SYS_PAR_EL1_F BIT(1)
+#define SYS_PAR_EL1_FST GENMASK(6, 1)
+
/*** Statistical Profiling Extension ***/
/* ID registers */
#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7)
@@ -499,28 +502,11 @@
#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
(BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
(BIT(29)))
-#define SCTLR_EL2_RES0 ((BIT(6)) | (BIT(7)) | (BIT(8)) | (BIT(9)) | \
- (BIT(10)) | (BIT(13)) | (BIT(14)) | (BIT(15)) | \
- (BIT(17)) | (BIT(20)) | (BIT(24)) | (BIT(26)) | \
- (BIT(27)) | (BIT(30)) | (BIT(31)) | \
- (0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
-#define ENDIAN_CLEAR_EL2 0
#else
#define ENDIAN_SET_EL2 0
-#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE
-#endif
-
-/* SCTLR_EL2 value used for the hyp-stub */
-#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
-#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
- SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
- SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
-
-#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
-#error "Inconsistent SCTLR_EL2 set/clear bits"
#endif
/* SCTLR_EL1 specific flags. */
@@ -539,16 +525,11 @@
#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \
(BIT(29)))
-#define SCTLR_EL1_RES0 ((BIT(6)) | (BIT(10)) | (BIT(13)) | (BIT(17)) | \
- (BIT(27)) | (BIT(30)) | (BIT(31)) | \
- (0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
-#define ENDIAN_CLEAR_EL1 0
#else
#define ENDIAN_SET_EL1 0
-#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
#endif
#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
@@ -556,13 +537,6 @@
SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
-#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
- SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
- SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
-
-#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
-#error "Inconsistent SCTLR_EL1 set/clear bits"
-#endif
/* id_aa64isar0 */
#define ID_AA64ISAR0_TS_SHIFT 52
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 180b34ec5965..f0cec4160136 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -8,8 +8,6 @@
#ifndef __ASM_THREAD_INFO_H
#define __ASM_THREAD_INFO_H
-#ifdef __KERNEL__
-
#include <linux/compiler.h>
#ifndef __ASSEMBLY__
@@ -59,29 +57,18 @@ void arch_release_task_struct(struct task_struct *tsk);
#endif
-/*
- * thread information flags:
- * TIF_SYSCALL_TRACE - syscall trace active
- * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
- * TIF_SYSCALL_AUDIT - syscall auditing
- * TIF_SECCOMP - syscall secure computing
- * TIF_SYSCALL_EMU - syscall emulation active
- * TIF_SIGPENDING - signal pending
- * TIF_NEED_RESCHED - rescheduling necessary
- * TIF_NOTIFY_RESUME - callback before returning to user
- */
-#define TIF_SIGPENDING 0
-#define TIF_NEED_RESCHED 1
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
#define TIF_NOHZ 7
-#define TIF_SYSCALL_TRACE 8
-#define TIF_SYSCALL_AUDIT 9
-#define TIF_SYSCALL_TRACEPOINT 10
-#define TIF_SECCOMP 11
-#define TIF_SYSCALL_EMU 12
+#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
+#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
+#define TIF_SECCOMP 11 /* syscall secure computing */
+#define TIF_SYSCALL_EMU 12 /* syscall emulation active */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20
@@ -90,6 +77,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_SVE 23 /* Scalable Vector Extension in use */
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */
+#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -121,5 +109,4 @@ void arch_release_task_struct(struct task_struct *tsk);
.addr_limit = KERNEL_DS, \
}
-#endif /* __KERNEL__ */
#endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 8af7a85f76bd..bc3949064725 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
dsb(ishst);
__tlbi(vaae1is, addr);
dsb(ish);
+ isb();
}
#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 0524f2438649..a4d945db95a2 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -4,29 +4,6 @@
#include <linux/cpumask.h>
-struct cpu_topology {
- int thread_id;
- int core_id;
- int package_id;
- int llc_id;
- cpumask_t thread_sibling;
- cpumask_t core_sibling;
- cpumask_t llc_sibling;
-};
-
-extern struct cpu_topology cpu_topology[NR_CPUS];
-
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
-#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
-#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
-#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
-
-void init_cpu_topology(void);
-void store_cpu_topology(unsigned int cpuid);
-void remove_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(int cpu);
-
#ifdef CONFIG_NUMA
struct pci_bus;
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5a1c32260c1f..097d6bfac0b7 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -62,6 +62,10 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
{
unsigned long ret, limit = current_thread_info()->addr_limit;
+ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
+ test_thread_flag(TIF_TAGGED_ADDR))
+ addr = untagged_addr(addr);
+
__chk_user_ptr(addr);
asm volatile(
// A + B <= C + 1 for all A,B,C, in four easy steps:
@@ -215,7 +219,8 @@ static inline void uaccess_enable_not_uao(void)
/*
* Sanitise a uaccess pointer such that it becomes NULL if above the
- * current addr_limit.
+ * current addr_limit. In case the pointer is tagged (has the top byte set),
+ * untag the pointer before checking.
*/
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
@@ -223,10 +228,11 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
void __user *safe_ptr;
asm volatile(
- " bics xzr, %1, %2\n"
+ " bics xzr, %3, %2\n"
" csel %0, %1, xzr, eq\n"
: "=&r" (safe_ptr)
- : "r" (ptr), "r" (current_thread_info()->addr_limit)
+ : "r" (ptr), "r" (current_thread_info()->addr_limit),
+ "r" (untagged_addr(ptr))
: "cc");
csdb();
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
index 9c15e0a06301..07468428fd29 100644
--- a/arch/arm64/include/asm/vdso.h
+++ b/arch/arm64/include/asm/vdso.h
@@ -5,8 +5,6 @@
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
-#ifdef __KERNEL__
-
/*
* Default link address for the vDSO.
* Since we randomise the VDSO mapping, there's little point in trying
@@ -28,6 +26,4 @@
#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* __ASM_VDSO_H */
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index ba6dbc3de864..1f38bf330a6e 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -5,8 +5,6 @@
#ifndef __ASM_VDSO_DATAPAGE_H
#define __ASM_VDSO_DATAPAGE_H
-#ifdef __KERNEL__
-
#ifndef __ASSEMBLY__
struct vdso_data {
@@ -32,6 +30,4 @@ struct vdso_data {
#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm64/include/uapi/asm/stat.h b/arch/arm64/include/uapi/asm/stat.h
deleted file mode 100644
index 313325fa22fa..000000000000
--- a/arch/arm64/include/uapi/asm/stat.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <asm-generic/stat.h>
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d19d14ba9ae4..95201e5ff5e1 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -886,7 +886,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
u32 midr = read_cpuid_id();
/* Cavium ThunderX pass 1.x and 2.x */
- return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
+ return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
MIDR_CPU_VAR_REV(0, 0),
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index d1048173fd8a..e4d6af2fdec7 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -11,6 +11,7 @@
#include <linux/cpu_pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/psci.h>
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
@@ -46,17 +47,58 @@ int arm_cpuidle_suspend(int index)
#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
+static int psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i, count;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ count = pr->power.count - 1;
+ if (count <= 0)
+ return -ENODEV;
+
+ for (i = 0; i < count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i + 1];
+ /*
+ * Only bits[31:0] represent a PSCI power_state while
+ * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
+ */
+ state = lpi->address;
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
- return arm_cpuidle_init(cpu);
+ return psci_acpi_cpu_init_idle(cpu);
}
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
+ u32 state = lpi->address;
+
if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
- return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend,
- lpi->index);
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter,
+ lpi->index, state);
else
- return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
+ lpi->index, state);
}
#endif
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 876055e37352..05933c065732 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -33,7 +33,7 @@
DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
-static char *icache_policy_str[] = {
+static const char *icache_policy_str[] = {
[0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN",
[ICACHE_POLICY_VIPT] = "VIPT",
[ICACHE_POLICY_PIPT] = "PIPT",
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 320a30dbe35e..84a822748c84 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -30,9 +30,9 @@
* Context tracking subsystem. Used to instrument transitions
* between user and kernel mode.
*/
- .macro ct_user_exit
+ .macro ct_user_exit_irqoff
#ifdef CONFIG_CONTEXT_TRACKING
- bl context_tracking_user_exit
+ bl enter_from_user_mode
#endif
.endm
@@ -792,8 +792,8 @@ el0_cp15:
/*
* Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, x25
mov x1, sp
bl do_cp15instr
@@ -805,8 +805,8 @@ el0_da:
* Data abort handling
*/
mrs x26, far_el1
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
clear_address_tag x0, x26
mov x1, x25
mov x2, sp
@@ -818,11 +818,11 @@ el0_ia:
*/
mrs x26, far_el1
gic_prio_kentry_setup tmp=x0
+ ct_user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
- ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
@@ -832,8 +832,8 @@ el0_fpsimd_acc:
/*
* Floating Point or Advanced SIMD access
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
@@ -842,8 +842,8 @@ el0_sve_acc:
/*
* Scalable Vector Extension access
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, x25
mov x1, sp
bl do_sve_acc
@@ -852,8 +852,8 @@ el0_fpsimd_exc:
/*
* Floating Point, Advanced SIMD or SVE exception
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_exc
@@ -868,11 +868,11 @@ el0_sp_pc:
* Stack or PC alignment exception handling
*/
gic_prio_kentry_setup tmp=x0
+ ct_user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
- ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
@@ -882,8 +882,8 @@ el0_undef:
/*
* Undefined instruction
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, sp
bl do_undefinstr
b ret_to_user
@@ -891,8 +891,8 @@ el0_sys:
/*
* System instructions, for trapped cache maintenance instructions
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, x25
mov x1, sp
bl do_sysinstr
@@ -902,17 +902,18 @@ el0_dbg:
* Debug exception handling
*/
tbnz x24, #0, el0_inv // EL0 only
+ mrs x24, far_el1
gic_prio_kentry_setup tmp=x3
- mrs x0, far_el1
+ ct_user_exit_irqoff
+ mov x0, x24
mov x1, x25
mov x2, sp
bl do_debug_exception
enable_da_f
- ct_user_exit
b ret_to_user
el0_inv:
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
@@ -925,13 +926,13 @@ el0_irq:
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
+ ct_user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
- ct_user_exit
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
@@ -958,13 +959,14 @@ ENDPROC(el1_error)
el0_error:
kernel_entry 0
el0_error_naked:
- mrs x1, esr_el1
+ mrs x25, esr_el1
gic_prio_kentry_setup tmp=x2
+ ct_user_exit_irqoff
enable_dbg
mov x0, sp
+ mov x1, x25
bl do_serror
enable_da_f
- ct_user_exit
b ret_to_user
ENDPROC(el0_error)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 949b001a73bb..989b1944cb71 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -102,6 +102,8 @@ pe_header:
* x23 stext() .. start_kernel() physical misalignment/KASLR offset
* x28 __create_page_tables() callee preserved temp register
* x19/x20 __primary_switch() callee preserved temp registers
+ * x24 __primary_switch() .. relocate_kernel()
+ * current RELR displacement
*/
ENTRY(stext)
bl preserve_boot_args
@@ -724,14 +726,22 @@ __secondary_switched:
adr_l x0, secondary_data
ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
+ cbz x1, __secondary_too_slow
mov sp, x1
ldr x2, [x0, #CPU_BOOT_TASK]
+ cbz x2, __secondary_too_slow
msr sp_el0, x2
mov x29, #0
mov x30, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
+__secondary_too_slow:
+ wfe
+ wfi
+ b __secondary_too_slow
+ENDPROC(__secondary_too_slow)
+
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off.
@@ -834,14 +844,93 @@ __relocate_kernel:
0: cmp x9, x10
b.hs 1f
- ldp x11, x12, [x9], #24
- ldr x13, [x9, #-8]
- cmp w12, #R_AARCH64_RELATIVE
+ ldp x12, x13, [x9], #24
+ ldr x14, [x9, #-8]
+ cmp w13, #R_AARCH64_RELATIVE
b.ne 0b
- add x13, x13, x23 // relocate
- str x13, [x11, x23]
+ add x14, x14, x23 // relocate
+ str x14, [x12, x23]
b 0b
-1: ret
+
+1:
+#ifdef CONFIG_RELR
+ /*
+ * Apply RELR relocations.
+ *
+ * RELR is a compressed format for storing relative relocations. The
+ * encoded sequence of entries looks like:
+ * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
+ *
+ * i.e. start with an address, followed by any number of bitmaps. The
+ * address entry encodes 1 relocation. The subsequent bitmap entries
+ * encode up to 63 relocations each, at subsequent offsets following
+ * the last address entry.
+ *
+ * The bitmap entries must have 1 in the least significant bit. The
+ * assumption here is that an address cannot have 1 in lsb. Odd
+ * addresses are not supported. Any odd addresses are stored in the RELA
+ * section, which is handled above.
+ *
+ * Excluding the least significant bit in the bitmap, each non-zero
+ * bit in the bitmap represents a relocation to be applied to
+ * a corresponding machine word that follows the base address
+ * word. The second least significant bit represents the machine
+ * word immediately following the initial address, and each bit
+ * that follows represents the next word, in linear order. As such,
+ * a single bitmap can encode up to 63 relocations in a 64-bit object.
+ *
+ * In this implementation we store the address of the next RELR table
+ * entry in x9, the address being relocated by the current address or
+ * bitmap entry in x13 and the address being relocated by the current
+ * bit in x14.
+ *
+ * Because addends are stored in place in the binary, RELR relocations
+ * cannot be applied idempotently. We use x24 to keep track of the
+ * currently applied displacement so that we can correctly relocate if
+ * __relocate_kernel is called twice with non-zero displacements (i.e.
+ * if there is both a physical misalignment and a KASLR displacement).
+ */
+ ldr w9, =__relr_offset // offset to reloc table
+ ldr w10, =__relr_size // size of reloc table
+ add x9, x9, x11 // __va(.relr)
+ add x10, x9, x10 // __va(.relr) + sizeof(.relr)
+
+ sub x15, x23, x24 // delta from previous offset
+ cbz x15, 7f // nothing to do if unchanged
+ mov x24, x23 // save new offset
+
+2: cmp x9, x10
+ b.hs 7f
+ ldr x11, [x9], #8
+ tbnz x11, #0, 3f // branch to handle bitmaps
+ add x13, x11, x23
+ ldr x12, [x13] // relocate address entry
+ add x12, x12, x15
+ str x12, [x13], #8 // adjust to start of bitmap
+ b 2b
+
+3: mov x14, x13
+4: lsr x11, x11, #1
+ cbz x11, 6f
+ tbz x11, #0, 5f // skip bit if not set
+ ldr x12, [x14] // relocate bit
+ add x12, x12, x15
+ str x12, [x14]
+
+5: add x14, x14, #8 // move to next bit's address
+ b 4b
+
+6: /*
+ * Move to the next bitmap's address. 8 is the word size, and 63 is the
+ * number of significant bits in a bitmap entry.
+ */
+ add x13, x13, #(8 * 63)
+ b 2b
+
+7:
+#endif
+ ret
+
ENDPROC(__relocate_kernel)
#endif
@@ -854,6 +943,9 @@ __primary_switch:
adrp x1, init_pg_dir
bl __enable_mmu
#ifdef CONFIG_RELOCATABLE
+#ifdef CONFIG_RELR
+ mov x24, #0 // no RELR displacement yet
+#endif
bl __relocate_kernel
#ifdef CONFIG_RANDOMIZE_BASE
ldr x8, =__primary_switched
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
new file mode 100644
index 000000000000..25a2a9b479c2
--- /dev/null
+++ b/arch/arm64/kernel/image-vars.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linker script variables to be set after section resolution, as
+ * ld.lld does not like variables assigned before SECTIONS is processed.
+ */
+#ifndef __ARM64_KERNEL_IMAGE_VARS_H
+#define __ARM64_KERNEL_IMAGE_VARS_H
+
+#ifndef LINKER_SCRIPT
+#error This file should only be included in vmlinux.lds.S
+#endif
+
+#ifdef CONFIG_EFI
+
+__efistub_stext_offset = stext - _text;
+
+/*
+ * The EFI stub has its own symbol namespace prefixed by __efistub_, to
+ * isolate it from the kernel proper. The following symbols are legally
+ * accessed by the stub, so provide some aliases to make them accessible.
+ * Only include data symbols here, or text symbols of functions that are
+ * guaranteed to be safe when executed at another offset than they were
+ * linked at. The routines below are all implemented in assembler in a
+ * position independent manner
+ */
+__efistub_memcmp = __pi_memcmp;
+__efistub_memchr = __pi_memchr;
+__efistub_memcpy = __pi_memcpy;
+__efistub_memmove = __pi_memmove;
+__efistub_memset = __pi_memset;
+__efistub_strlen = __pi_strlen;
+__efistub_strnlen = __pi_strnlen;
+__efistub_strcmp = __pi_strcmp;
+__efistub_strncmp = __pi_strncmp;
+__efistub_strrchr = __pi_strrchr;
+__efistub___flush_dcache_area = __pi___flush_dcache_area;
+
+#ifdef CONFIG_KASAN
+__efistub___memcpy = __pi_memcpy;
+__efistub___memmove = __pi_memmove;
+__efistub___memset = __pi_memset;
+#endif
+
+__efistub__text = _text;
+__efistub__end = _end;
+__efistub__edata = _edata;
+__efistub_screen_info = screen_info;
+
+#endif
+
+#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 2b85c0d6fa3d..c7d38c660372 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -65,46 +65,4 @@
DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
-#ifdef CONFIG_EFI
-
-/*
- * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
- * https://github.com/ClangBuiltLinux/linux/issues/561
- */
-__efistub_stext_offset = ABSOLUTE(stext - _text);
-
-/*
- * The EFI stub has its own symbol namespace prefixed by __efistub_, to
- * isolate it from the kernel proper. The following symbols are legally
- * accessed by the stub, so provide some aliases to make them accessible.
- * Only include data symbols here, or text symbols of functions that are
- * guaranteed to be safe when executed at another offset than they were
- * linked at. The routines below are all implemented in assembler in a
- * position independent manner
- */
-__efistub_memcmp = __pi_memcmp;
-__efistub_memchr = __pi_memchr;
-__efistub_memcpy = __pi_memcpy;
-__efistub_memmove = __pi_memmove;
-__efistub_memset = __pi_memset;
-__efistub_strlen = __pi_strlen;
-__efistub_strnlen = __pi_strnlen;
-__efistub_strcmp = __pi_strcmp;
-__efistub_strncmp = __pi_strncmp;
-__efistub_strrchr = __pi_strrchr;
-__efistub___flush_dcache_area = __pi___flush_dcache_area;
-
-#ifdef CONFIG_KASAN
-__efistub___memcpy = __pi_memcpy;
-__efistub___memmove = __pi_memmove;
-__efistub___memset = __pi_memset;
-#endif
-
-__efistub__text = _text;
-__efistub__end = _end;
-__efistub__edata = _edata;
-__efistub_screen_info = screen_info;
-
-#endif
-
#endif /* __ARM64_KERNEL_IMAGE_H */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 84b059ed04fc..d801a7094076 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -26,7 +26,7 @@
#define AARCH64_INSN_N_BIT BIT(22)
#define AARCH64_INSN_LSL_12 BIT(22)
-static int aarch64_insn_encoding_class[] = {
+static const int aarch64_insn_encoding_class[] = {
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 5a59f7567f9c..416f537bf614 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -62,9 +62,6 @@ out:
return default_cmdline;
}
-extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
- pgprot_t prot);
-
/*
* This routine will be executed with the kernel mapped at its default virtual
* address, and if it returns successfully, the kernel will be remapped, and
@@ -93,7 +90,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* attempt at mapping the FDT in setup_machine()
*/
early_fixmap_init();
- fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+ fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
if (!fdt)
return 0;
diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c
index 2514fd6f12cb..29a9428486a5 100644
--- a/arch/arm64/kernel/kexec_image.c
+++ b/arch/arm64/kernel/kexec_image.c
@@ -84,7 +84,7 @@ static void *image_load(struct kimage *image,
kbuf.buffer = kernel;
kbuf.bufsz = kernel_len;
- kbuf.mem = 0;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf.memsz = le64_to_cpu(h->image_size);
text_offset = le64_to_cpu(h->text_offset);
kbuf.buf_align = MIN_KIMG_ALIGN;
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 58871333737a..7b08bf9499b6 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -27,6 +27,8 @@
#define FDT_PROP_INITRD_END "linux,initrd-end"
#define FDT_PROP_BOOTARGS "bootargs"
#define FDT_PROP_KASLR_SEED "kaslr-seed"
+#define FDT_PROP_RNG_SEED "rng-seed"
+#define RNG_SEED_SIZE 128
const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_image_ops,
@@ -102,6 +104,19 @@ static int setup_dtb(struct kimage *image,
FDT_PROP_KASLR_SEED);
}
+ /* add rng-seed */
+ if (rng_is_initialized()) {
+ u8 rng_seed[RNG_SEED_SIZE];
+ get_random_bytes(rng_seed, RNG_SEED_SIZE);
+ ret = fdt_setprop(dtb, off, FDT_PROP_RNG_SEED, rng_seed,
+ RNG_SEED_SIZE);
+ if (ret)
+ goto out;
+ } else {
+ pr_notice("RNG is not initialised: omitting \"%s\" property\n",
+ FDT_PROP_RNG_SEED);
+ }
+
out:
if (ret)
return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL;
@@ -110,7 +125,8 @@ out:
}
/*
- * More space needed so that we can add initrd, bootargs and kaslr-seed.
+ * More space needed so that we can add initrd, bootargs, kaslr-seed, and
+ * rng-seed.
*/
#define DTB_EXTRA_SPACE 0x1000
@@ -177,7 +193,7 @@ int load_other_segments(struct kimage *image,
if (initrd) {
kbuf.buffer = initrd;
kbuf.bufsz = initrd_len;
- kbuf.mem = 0;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf.memsz = initrd_len;
kbuf.buf_align = 0;
/* within 1GB-aligned window of up to 32GB in size */
@@ -204,7 +220,7 @@ int load_other_segments(struct kimage *image,
dtb_len = fdt_totalsize(dtb);
kbuf.buffer = dtb;
kbuf.bufsz = dtb_len;
- kbuf.mem = 0;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf.memsz = dtb_len;
/* not across 2MB boundary */
kbuf.buf_align = SZ_2M;
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 044c0ae4d6c8..b182442b87a3 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -302,7 +302,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
/* sort by type, symbol index and addend */
sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
- if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+ if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
core_plts += count_plts(syms, rels, numrels,
sechdrs[i].sh_info, dstsec);
else
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 96e90e270042..a0b4f1bca491 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
+#include <linux/smp.h>
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -157,7 +158,6 @@ armv8pmu_events_sysfs_show(struct device *dev,
return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
}
-#define ARMV8_EVENT_ATTR_RESOLVE(m) #m
#define ARMV8_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
config, armv8pmu_events_sysfs_show)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index f674f28df663..03689c0beb34 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
+#include <linux/sysctl.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/delay.h>
@@ -38,6 +39,7 @@
#include <trace/events/power.h>
#include <linux/percpu.h>
#include <linux/thread_info.h>
+#include <linux/prctl.h>
#include <asm/alternative.h>
#include <asm/arch_gicv3.h>
@@ -307,11 +309,18 @@ static void tls_thread_flush(void)
}
}
+static void flush_tagged_addr_state(void)
+{
+ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
+ clear_thread_flag(TIF_TAGGED_ADDR);
+}
+
void flush_thread(void)
{
fpsimd_flush_thread();
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
+ flush_tagged_addr_state();
}
void release_thread(struct task_struct *dead_task)
@@ -565,3 +574,70 @@ void arch_setup_new_exec(void)
ptrauth_thread_init_user(current);
}
+
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+/*
+ * Control the relaxed ABI allowing tagged user addresses into the kernel.
+ */
+static unsigned int tagged_addr_disabled;
+
+long set_tagged_addr_ctrl(unsigned long arg)
+{
+ if (is_compat_task())
+ return -EINVAL;
+ if (arg & ~PR_TAGGED_ADDR_ENABLE)
+ return -EINVAL;
+
+ /*
+ * Do not allow the enabling of the tagged address ABI if globally
+ * disabled via sysctl abi.tagged_addr_disabled.
+ */
+ if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
+ return -EINVAL;
+
+ update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
+
+ return 0;
+}
+
+long get_tagged_addr_ctrl(void)
+{
+ if (is_compat_task())
+ return -EINVAL;
+
+ if (test_thread_flag(TIF_TAGGED_ADDR))
+ return PR_TAGGED_ADDR_ENABLE;
+
+ return 0;
+}
+
+/*
+ * Global sysctl to disable the tagged user addresses support. This control
+ * only prevents the tagged address ABI enabling via prctl() and does not
+ * disable it for tasks that already opted in to the relaxed ABI.
+ */
+static int zero;
+static int one = 1;
+
+static struct ctl_table tagged_addr_sysctl_table[] = {
+ {
+ .procname = "tagged_addr_disabled",
+ .mode = 0644,
+ .data = &tagged_addr_disabled,
+ .maxlen = sizeof(int),
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ { }
+};
+
+static int __init tagged_addr_init(void)
+{
+ if (!register_sysctl("abi", tagged_addr_sysctl_table))
+ return -EINVAL;
+ return 0;
+}
+
+core_initcall(tagged_addr_init);
+#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 85ee7d07889e..c9f72b2665f1 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -46,6 +46,11 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
+static bool cpu_psci_cpu_can_disable(unsigned int cpu)
+{
+ return !psci_tos_resident_on(cpu);
+}
+
static int cpu_psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
@@ -105,14 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
-#ifdef CONFIG_CPU_IDLE
- .cpu_init_idle = psci_cpu_init_idle,
- .cpu_suspend = psci_cpu_suspend_enter,
-#endif
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
.cpu_boot = cpu_psci_cpu_boot,
#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_can_disable = cpu_psci_cpu_can_disable,
.cpu_disable = cpu_psci_cpu_disable,
.cpu_die = cpu_psci_cpu_die,
.cpu_kill = cpu_psci_cpu_kill,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 3cf3b135027e..21176d02e21a 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -870,7 +870,7 @@ static int sve_set(struct task_struct *target,
goto out;
/*
- * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
+ * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
* sve_set_vector_length(), which will also validate them for us:
*/
ret = sve_set_vector_length(target, header.vl,
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 9c4bad7d7131..56f664561754 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -170,9 +170,13 @@ static void __init smp_build_mpidr_hash(void)
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
- void *dt_virt = fixmap_remap_fdt(dt_phys);
+ int size;
+ void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
const char *name;
+ if (dt_virt)
+ memblock_reserve(dt_phys, size);
+
if (!dt_virt || !early_init_dt_scan(dt_virt)) {
pr_crit("\n"
"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
@@ -184,6 +188,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
cpu_relax();
}
+ /* Early fixups are done, map the FDT as read-only now */
+ fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
+
name = of_flat_dt_get_machine_name();
if (!name)
return;
@@ -357,6 +364,15 @@ void __init setup_arch(char **cmdline_p)
}
}
+static inline bool cpu_can_disable(unsigned int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable)
+ return cpu_ops[cpu]->cpu_can_disable(cpu);
+#endif
+ return false;
+}
+
static int __init topology_init(void)
{
int i;
@@ -366,7 +382,7 @@ static int __init topology_init(void)
for_each_possible_cpu(i) {
struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
- cpu->hotpluggable = 1;
+ cpu->hotpluggable = cpu_can_disable(i);
register_cpu(cpu, i);
}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 018a33e01b0e..dc9fe879c279 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -123,7 +123,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* time out.
*/
wait_for_completion_timeout(&cpu_running,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(5000));
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
@@ -136,6 +136,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
secondary_data.task = NULL;
secondary_data.stack = NULL;
+ __flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
if (ret && status) {
@@ -146,6 +147,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
default:
pr_err("CPU%u: failed in unknown state : 0x%lx\n",
cpu, status);
+ cpus_stuck_in_kernel++;
break;
case CPU_KILL_ME:
if (!op_cpu_kill(cpu)) {
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 76c2739ba8a4..c8a3fee00c11 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -19,7 +19,7 @@
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);
-volatile unsigned long __section(".mmuoff.data.read")
+volatile unsigned long __section(.mmuoff.data.read)
secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 0825c4a856e3..fa9528dfd0ce 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -14,250 +14,13 @@
#include <linux/acpi.h>
#include <linux/arch_topology.h>
#include <linux/cacheinfo.h>
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
-#include <linux/node.h>
-#include <linux/nodemask.h>
-#include <linux/of.h>
-#include <linux/sched.h>
-#include <linux/sched/topology.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/string.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/topology.h>
-static int __init get_cpu_for_node(struct device_node *node)
-{
- struct device_node *cpu_node;
- int cpu;
-
- cpu_node = of_parse_phandle(node, "cpu", 0);
- if (!cpu_node)
- return -1;
-
- cpu = of_cpu_node_to_id(cpu_node);
- if (cpu >= 0)
- topology_parse_cpu_capacity(cpu_node, cpu);
- else
- pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
-
- of_node_put(cpu_node);
- return cpu;
-}
-
-static int __init parse_core(struct device_node *core, int package_id,
- int core_id)
-{
- char name[10];
- bool leaf = true;
- int i = 0;
- int cpu;
- struct device_node *t;
-
- do {
- snprintf(name, sizeof(name), "thread%d", i);
- t = of_get_child_by_name(core, name);
- if (t) {
- leaf = false;
- cpu = get_cpu_for_node(t);
- if (cpu >= 0) {
- cpu_topology[cpu].package_id = package_id;
- cpu_topology[cpu].core_id = core_id;
- cpu_topology[cpu].thread_id = i;
- } else {
- pr_err("%pOF: Can't get CPU for thread\n",
- t);
- of_node_put(t);
- return -EINVAL;
- }
- of_node_put(t);
- }
- i++;
- } while (t);
-
- cpu = get_cpu_for_node(core);
- if (cpu >= 0) {
- if (!leaf) {
- pr_err("%pOF: Core has both threads and CPU\n",
- core);
- return -EINVAL;
- }
-
- cpu_topology[cpu].package_id = package_id;
- cpu_topology[cpu].core_id = core_id;
- } else if (leaf) {
- pr_err("%pOF: Can't get CPU for leaf core\n", core);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __init parse_cluster(struct device_node *cluster, int depth)
-{
- char name[10];
- bool leaf = true;
- bool has_cores = false;
- struct device_node *c;
- static int package_id __initdata;
- int core_id = 0;
- int i, ret;
-
- /*
- * First check for child clusters; we currently ignore any
- * information about the nesting of clusters and present the
- * scheduler with a flat list of them.
- */
- i = 0;
- do {
- snprintf(name, sizeof(name), "cluster%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- leaf = false;
- ret = parse_cluster(c, depth + 1);
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
- i++;
- } while (c);
-
- /* Now check for cores */
- i = 0;
- do {
- snprintf(name, sizeof(name), "core%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- has_cores = true;
-
- if (depth == 0) {
- pr_err("%pOF: cpu-map children should be clusters\n",
- c);
- of_node_put(c);
- return -EINVAL;
- }
-
- if (leaf) {
- ret = parse_core(c, package_id, core_id++);
- } else {
- pr_err("%pOF: Non-leaf cluster with core %s\n",
- cluster, name);
- ret = -EINVAL;
- }
-
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
- i++;
- } while (c);
-
- if (leaf && !has_cores)
- pr_warn("%pOF: empty cluster\n", cluster);
-
- if (leaf)
- package_id++;
-
- return 0;
-}
-
-static int __init parse_dt_topology(void)
-{
- struct device_node *cn, *map;
- int ret = 0;
- int cpu;
-
- cn = of_find_node_by_path("/cpus");
- if (!cn) {
- pr_err("No CPU information found in DT\n");
- return 0;
- }
-
- /*
- * When topology is provided cpu-map is essentially a root
- * cluster with restricted subnodes.
- */
- map = of_get_child_by_name(cn, "cpu-map");
- if (!map)
- goto out;
-
- ret = parse_cluster(map, 0);
- if (ret != 0)
- goto out_map;
-
- topology_normalize_cpu_scale();
-
- /*
- * Check that all cores are in the topology; the SMP code will
- * only mark cores described in the DT as possible.
- */
- for_each_possible_cpu(cpu)
- if (cpu_topology[cpu].package_id == -1)
- ret = -EINVAL;
-
-out_map:
- of_node_put(map);
-out:
- of_node_put(cn);
- return ret;
-}
-
-/*
- * cpu topology table
- */
-struct cpu_topology cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
-
-const struct cpumask *cpu_coregroup_mask(int cpu)
-{
- const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
-
- /* Find the smaller of NUMA, core or LLC siblings */
- if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
- /* not numa in package, lets use the package siblings */
- core_mask = &cpu_topology[cpu].core_sibling;
- }
- if (cpu_topology[cpu].llc_id != -1) {
- if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
- core_mask = &cpu_topology[cpu].llc_sibling;
- }
-
- return core_mask;
-}
-
-static void update_siblings_masks(unsigned int cpuid)
-{
- struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
- int cpu;
-
- /* update core and thread sibling masks */
- for_each_online_cpu(cpu) {
- cpu_topo = &cpu_topology[cpu];
-
- if (cpuid_topo->llc_id == cpu_topo->llc_id) {
- cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
- cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
- }
-
- if (cpuid_topo->package_id != cpu_topo->package_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
-
- if (cpuid_topo->core_id != cpu_topo->core_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
- cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
- }
-}
-
void store_cpu_topology(unsigned int cpuid)
{
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
@@ -296,60 +59,31 @@ topology_populated:
update_siblings_masks(cpuid);
}
-static void clear_cpu_topology(int cpu)
-{
- struct cpu_topology *cpu_topo = &cpu_topology[cpu];
-
- cpumask_clear(&cpu_topo->llc_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
-
- cpumask_clear(&cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
- cpumask_clear(&cpu_topo->thread_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
-}
-
-static void __init reset_cpu_topology(void)
-{
- unsigned int cpu;
-
- for_each_possible_cpu(cpu) {
- struct cpu_topology *cpu_topo = &cpu_topology[cpu];
-
- cpu_topo->thread_id = -1;
- cpu_topo->core_id = 0;
- cpu_topo->package_id = -1;
- cpu_topo->llc_id = -1;
-
- clear_cpu_topology(cpu);
- }
-}
-
-void remove_cpu_topology(unsigned int cpu)
+#ifdef CONFIG_ACPI
+static bool __init acpi_cpu_is_threaded(int cpu)
{
- int sibling;
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
- for_each_cpu(sibling, topology_core_cpumask(cpu))
- cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
- for_each_cpu(sibling, topology_sibling_cpumask(cpu))
- cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
- for_each_cpu(sibling, topology_llc_cpumask(cpu))
- cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
+ /*
+ * if the PPTT doesn't have thread information, assume a homogeneous
+ * machine and return the current CPU's thread state.
+ */
+ if (is_threaded < 0)
+ is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
- clear_cpu_topology(cpu);
+ return !!is_threaded;
}
-#ifdef CONFIG_ACPI
/*
* Propagate the topology information of the processor_topology_node tree to the
* cpu_topology array.
*/
-static int __init parse_acpi_topology(void)
+int __init parse_acpi_topology(void)
{
- bool is_threaded;
int cpu, topology_id;
- is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+ if (acpi_disabled)
+ return 0;
for_each_possible_cpu(cpu) {
int i, cache_id;
@@ -358,7 +92,7 @@ static int __init parse_acpi_topology(void)
if (topology_id < 0)
return topology_id;
- if (is_threaded) {
+ if (acpi_cpu_is_threaded(cpu)) {
cpu_topology[cpu].thread_id = topology_id;
topology_id = find_acpi_cpu_topology(cpu, 1);
cpu_topology[cpu].core_id = topology_id;
@@ -384,24 +118,6 @@ static int __init parse_acpi_topology(void)
return 0;
}
-
-#else
-static inline int __init parse_acpi_topology(void)
-{
- return -EINVAL;
-}
#endif
-void __init init_cpu_topology(void)
-{
- reset_cpu_topology();
- /*
- * Discard anything that was parsed if we hit an error so we
- * don't use partial information.
- */
- if (!acpi_disabled && parse_acpi_topology())
- reset_cpu_topology();
- else if (of_have_populated_dt() && parse_dt_topology())
- reset_cpu_topology();
-}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d3313797cca9..6e950908eb97 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -7,9 +7,11 @@
*/
#include <linux/bug.h>
+#include <linux/context_tracking.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
@@ -511,7 +513,7 @@ struct sys64_hook {
void (*handler)(unsigned int esr, struct pt_regs *regs);
};
-static struct sys64_hook sys64_hooks[] = {
+static const struct sys64_hook sys64_hooks[] = {
{
.esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
.esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
@@ -636,7 +638,7 @@ static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
arm64_compat_skip_faulting_instruction(regs, 4);
}
-static struct sys64_hook cp15_32_hooks[] = {
+static const struct sys64_hook cp15_32_hooks[] = {
{
.esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
.esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
@@ -656,7 +658,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
arm64_compat_skip_faulting_instruction(regs, 4);
}
-static struct sys64_hook cp15_64_hooks[] = {
+static const struct sys64_hook cp15_64_hooks[] = {
{
.esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
.esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
@@ -667,7 +669,7 @@ static struct sys64_hook cp15_64_hooks[] = {
asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
{
- struct sys64_hook *hook, *hook_base;
+ const struct sys64_hook *hook, *hook_base;
if (!cp15_cond_valid(esr, regs)) {
/*
@@ -707,7 +709,7 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
{
- struct sys64_hook *hook;
+ const struct sys64_hook *hook;
for (hook = sys64_hooks; hook->handler; hook++)
if ((hook->esr_mask & esr) == hook->esr_val) {
@@ -743,6 +745,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_SMC64] = "SMC (AArch64)",
[ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
[ESR_ELx_EC_SVE] = "SVE",
+ [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
@@ -899,6 +902,13 @@ asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
nmi_exit();
}
+asmlinkage void enter_from_user_mode(void)
+{
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+}
+NOKPROBE_SYMBOL(enter_from_user_mode);
+
void __pte_error(const char *file, int line, unsigned long val)
{
pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 7fa008374907..aa76f7259668 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -200,6 +200,15 @@ SECTIONS
__rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
__rela_size = SIZEOF(.rela.dyn);
+#ifdef CONFIG_RELR
+ .relr.dyn : ALIGN(8) {
+ *(.relr.dyn)
+ }
+
+ __relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
+ __relr_size = SIZEOF(.relr.dyn);
+#endif
+
. = ALIGN(SEGMENT_ALIGN);
__initdata_end = .;
__init_end = .;
@@ -245,6 +254,8 @@ SECTIONS
HEAD_SYMBOLS
}
+#include "image-vars.h"
+
/*
* The HYP init code and ID map text can't be longer than a page each,
* and should not cross a page boundary.
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index adaf266d8de8..bd978ad71936 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -264,7 +264,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
tmp = read_sysreg(par_el1);
write_sysreg(par, par_el1);
- if (unlikely(tmp & 1))
+ if (unlikely(tmp & SYS_PAR_EL1_F))
return false; /* Translation failed, back to guest */
/* Convert PAR to HPFAR format */
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 33c2a4abda04..f182ccb0438e 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -33,3 +33,5 @@ UBSAN_SANITIZE_atomic_ll_sc.o := n
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
obj-$(CONFIG_CRC32) += crc32.o
+
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/arm64/lib/error-inject.c b/arch/arm64/lib/error-inject.c
new file mode 100644
index 000000000000..ed15021da3ed
--- /dev/null
+++ b/arch/arm64/lib/error-inject.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/error-injection.h>
+#include <linux/kprobes.h>
+
+void override_function_with_return(struct pt_regs *regs)
+{
+ /*
+ * 'regs' represents the state on entry of a predefined function in
+ * the kernel/module and which is captured on a kprobe.
+ *
+ * When kprobe returns back from exception it will override the end
+ * of probed function and directly return to the predefined
+ * function's caller.
+ */
+ instruction_pointer_set(regs, procedure_link_pointer(regs));
+}
+NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index bb4e4f3fffd8..115d7a0e4b08 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/extable.h>
#include <linux/signal.h>
#include <linux/mm.h>
@@ -86,8 +87,8 @@ static void mem_abort_decode(unsigned int esr)
pr_alert("Mem abort info:\n");
pr_alert(" ESR = 0x%08x\n", esr);
- pr_alert(" Exception class = %s, IL = %u bits\n",
- esr_get_class_string(esr),
+ pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n",
+ ESR_ELx_EC(esr), esr_get_class_string(esr),
(esr & ESR_ELx_IL) ? 32 : 16);
pr_alert(" SET = %lu, FnV = %lu\n",
(esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT,
@@ -241,6 +242,34 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
return false;
}
+static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ unsigned long flags;
+ u64 par, dfsc;
+
+ if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
+ (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
+ return false;
+
+ local_irq_save(flags);
+ asm volatile("at s1e1r, %0" :: "r" (addr));
+ isb();
+ par = read_sysreg(par_el1);
+ local_irq_restore(flags);
+
+ if (!(par & SYS_PAR_EL1_F))
+ return false;
+
+ /*
+ * If we got a different type of fault from the AT instruction,
+ * treat the translation fault as spurious.
+ */
+ dfsc = FIELD_PREP(SYS_PAR_EL1_FST, par);
+ return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT;
+}
+
static void die_kernel_fault(const char *msg, unsigned long addr,
unsigned int esr, struct pt_regs *regs)
{
@@ -269,6 +298,10 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
return;
+ if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
+ "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr))
+ return;
+
if (is_el1_permission_fault(addr, esr, regs)) {
if (esr & ESR_ELx_WNR)
msg = "write to read-only memory";
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 531c497c5758..45c00a54909c 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -583,8 +583,12 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
+ unsigned long aligned_start, aligned_end;
+
+ aligned_start = __virt_to_phys(start) & PAGE_MASK;
+ aligned_end = PAGE_ALIGN(__virt_to_phys(end));
+ memblock_free(aligned_start, aligned_end - aligned_start);
free_reserved_area((void *)start, (void *)end, 0, "initrd");
- memblock_free(__virt_to_phys(start), end - start);
}
#endif
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3ed44008230e..53dc6f24cfb7 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -879,7 +879,7 @@ void __set_fixmap(enum fixed_addresses idx,
}
}
-void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
{
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
int offset;
@@ -932,19 +932,6 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
return dt_virt;
}
-void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
-{
- void *dt_virt;
- int size;
-
- dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
- if (!dt_virt)
- return NULL;
-
- memblock_reserve(dt_phys, size);
- return dt_virt;
-}
-
int __init arch_ioremap_p4d_supported(void)
{
return 0;
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 4f241cc7cc3b..4decf1659700 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -29,7 +29,7 @@ static __init int numa_parse_early_param(char *opt)
{
if (!opt)
return -EINVAL;
- if (!strncmp(opt, "off", 3))
+ if (str_has_prefix(opt, "off"))
numa_off = true;
return 0;
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 03c53f16ee77..9ce7bd9d4d9c 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -128,7 +128,6 @@ int set_memory_nx(unsigned long addr, int numpages)
__pgprot(PTE_PXN),
__pgprot(0));
}
-EXPORT_SYMBOL_GPL(set_memory_nx);
int set_memory_x(unsigned long addr, int numpages)
{
@@ -136,7 +135,6 @@ int set_memory_x(unsigned long addr, int numpages)
__pgprot(0),
__pgprot(PTE_PXN));
}
-EXPORT_SYMBOL_GPL(set_memory_x);
int set_memory_valid(unsigned long addr, int numpages, int enable)
{
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 391f9cabfe60..a1e0592d1fbc 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -286,6 +286,15 @@ skip_pgd:
msr sctlr_el1, x18
isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
+
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
ret