summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/acpi.c4
-rw-r--r--arch/riscv/kernel/alternative.c19
-rw-r--r--arch/riscv/kernel/cfi.c77
-rw-r--r--arch/riscv/kernel/compat_syscall_table.c8
-rw-r--r--arch/riscv/kernel/compat_vdso/Makefile8
-rw-r--r--arch/riscv/kernel/copy-unaligned.S71
-rw-r--r--arch/riscv/kernel/copy-unaligned.h13
-rw-r--r--arch/riscv/kernel/cpu.c186
-rw-r--r--arch/riscv/kernel/cpufeature.c635
-rw-r--r--arch/riscv/kernel/crash_core.c2
-rw-r--r--arch/riscv/kernel/elf_kexec.c7
-rw-r--r--arch/riscv/kernel/head.S6
-rw-r--r--arch/riscv/kernel/irq.c3
-rw-r--r--arch/riscv/kernel/mcount.S9
-rw-r--r--arch/riscv/kernel/probes/decode-insn.c11
-rw-r--r--arch/riscv/kernel/probes/simulate-insn.c105
-rw-r--r--arch/riscv/kernel/probes/simulate-insn.h5
-rw-r--r--arch/riscv/kernel/ptrace.c69
-rw-r--r--arch/riscv/kernel/setup.c6
-rw-r--r--arch/riscv/kernel/smp.c5
-rw-r--r--arch/riscv/kernel/smpboot.c3
-rw-r--r--arch/riscv/kernel/suspend_entry.S5
-rw-r--r--arch/riscv/kernel/sys_riscv.c6
-rw-r--r--arch/riscv/kernel/syscall_table.c8
-rw-r--r--arch/riscv/kernel/traps.c13
26 files changed, 858 insertions, 429 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 506cc4a9a45a..95cf25d48405 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -38,6 +38,7 @@ extra-y += vmlinux.lds
obj-y += head.o
obj-y += soc.o
obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o
+obj-y += copy-unaligned.o
obj-y += cpu.o
obj-y += cpufeature.o
obj-y += entry.o
@@ -91,6 +92,8 @@ obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_CFI_CLANG) += cfi.o
+
obj-$(CONFIG_EFI) += efi.o
obj-$(CONFIG_COMPAT) += compat_syscall_table.o
obj-$(CONFIG_COMPAT) += compat_signal.o
diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c
index 5ee03ebab80e..56cb2c986c48 100644
--- a/arch/riscv/kernel/acpi.c
+++ b/arch/riscv/kernel/acpi.c
@@ -215,9 +215,9 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
early_iounmap(map, size);
}
-void *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
- return memremap(phys, size, MEMREMAP_WB);
+ return (void __iomem *)memremap(phys, size, MEMREMAP_WB);
}
#ifdef CONFIG_PCI
diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c
index 6b75788c18e6..85056153fa23 100644
--- a/arch/riscv/kernel/alternative.c
+++ b/arch/riscv/kernel/alternative.c
@@ -27,8 +27,6 @@ struct cpu_manufacturer_info_t {
void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage);
- void (*feature_probe_func)(unsigned int cpu, unsigned long archid,
- unsigned long impid);
};
static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
@@ -43,7 +41,6 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info
cpu_mfr_info->imp_id = sbi_get_mimpid();
#endif
- cpu_mfr_info->feature_probe_func = NULL;
switch (cpu_mfr_info->vendor_id) {
#ifdef CONFIG_ERRATA_SIFIVE
case SIFIVE_VENDOR_ID:
@@ -53,7 +50,6 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info
#ifdef CONFIG_ERRATA_THEAD
case THEAD_VENDOR_ID:
cpu_mfr_info->patch_func = thead_errata_patch_func;
- cpu_mfr_info->feature_probe_func = thead_feature_probe_func;
break;
#endif
default:
@@ -143,20 +139,6 @@ void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
}
}
-/* Called on each CPU as it starts */
-void probe_vendor_features(unsigned int cpu)
-{
- struct cpu_manufacturer_info_t cpu_mfr_info;
-
- riscv_fill_cpu_mfr_info(&cpu_mfr_info);
- if (!cpu_mfr_info.feature_probe_func)
- return;
-
- cpu_mfr_info.feature_probe_func(cpu,
- cpu_mfr_info.arch_id,
- cpu_mfr_info.imp_id);
-}
-
/*
* This is called very early in the boot process (directly after we run
* a feature detect on the boot CPU). No need to worry about other CPUs
@@ -211,7 +193,6 @@ void __init apply_boot_alternatives(void)
/* If called on non-boot cpu things could go wrong */
WARN_ON(smp_processor_id() != 0);
- probe_vendor_features(0);
_apply_alternatives((struct alt_entry *)__alt_start,
(struct alt_entry *)__alt_end,
RISCV_ALTERNATIVES_BOOT);
diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c
new file mode 100644
index 000000000000..820158d7a291
--- /dev/null
+++ b/arch/riscv/kernel/cfi.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clang Control Flow Integrity (CFI) support.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+#include <asm/cfi.h>
+#include <asm/insn.h>
+
+/*
+ * Returns the target address and the expected type when regs->epc points
+ * to a compiler-generated CFI trap.
+ */
+static bool decode_cfi_insn(struct pt_regs *regs, unsigned long *target,
+ u32 *type)
+{
+ unsigned long *regs_ptr = (unsigned long *)regs;
+ int rs1_num;
+ u32 insn;
+
+ *target = *type = 0;
+
+ /*
+ * The compiler generates the following instruction sequence
+ * for indirect call checks:
+ *
+ *   lw t1, -4(<reg>)
+ * lui t2, <hi20>
+ * addiw t2, t2, <lo12>
+ * beq t1, t2, .Ltmp1
+ * ebreak ; <- regs->epc
+ * .Ltmp1:
+ * jalr <reg>
+ *
+ * We can read the expected type and the target address from the
+ * registers passed to the beq/jalr instructions.
+ */
+ if (get_kernel_nofault(insn, (void *)regs->epc - 4))
+ return false;
+ if (!riscv_insn_is_beq(insn))
+ return false;
+
+ *type = (u32)regs_ptr[RV_EXTRACT_RS1_REG(insn)];
+
+ if (get_kernel_nofault(insn, (void *)regs->epc) ||
+ get_kernel_nofault(insn, (void *)regs->epc + GET_INSN_LENGTH(insn)))
+ return false;
+
+ if (riscv_insn_is_jalr(insn))
+ rs1_num = RV_EXTRACT_RS1_REG(insn);
+ else if (riscv_insn_is_c_jalr(insn))
+ rs1_num = RVC_EXTRACT_C2_RS1_REG(insn);
+ else
+ return false;
+
+ *target = regs_ptr[rs1_num];
+
+ return true;
+}
+
+/*
+ * Checks if the ebreak trap is because of a CFI failure, and handles the trap
+ * if needed. Returns a bug_trap_type value similarly to report_bug.
+ */
+enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
+{
+ unsigned long target;
+ u32 type;
+
+ if (!is_cfi_trap(regs->epc))
+ return BUG_TRAP_TYPE_NONE;
+
+ if (!decode_cfi_insn(regs, &target, &type))
+ return report_cfi_failure_noaddr(regs, regs->epc);
+
+ return report_cfi_failure(regs, regs->epc, &target, type);
+}
diff --git a/arch/riscv/kernel/compat_syscall_table.c b/arch/riscv/kernel/compat_syscall_table.c
index 651f2b009c28..ad7f2d712f5f 100644
--- a/arch/riscv/kernel/compat_syscall_table.c
+++ b/arch/riscv/kernel/compat_syscall_table.c
@@ -9,11 +9,15 @@
#include <asm/syscall.h>
#undef __SYSCALL
-#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL(nr, call) asmlinkage long __riscv_##call(const struct pt_regs *);
+#include <asm/unistd.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = __riscv_##call,
asmlinkage long compat_sys_rt_sigreturn(void);
void * const compat_sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+ [0 ... __NR_syscalls - 1] = __riscv_sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
index 189345773e7e..b86e5e2c3aea 100644
--- a/arch/riscv/kernel/compat_vdso/Makefile
+++ b/arch/riscv/kernel/compat_vdso/Makefile
@@ -11,7 +11,13 @@ compat_vdso-syms += flush_icache
COMPAT_CC := $(CC)
COMPAT_LD := $(LD)
-COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
+# binutils 2.35 does not support the zifencei extension, but in the ISA
+# spec 20191213, G stands for IMAFD_ZICSR_ZIFENCEI.
+ifdef CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+ COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
+else
+ COMPAT_CC_FLAGS := -march=rv32imafd -mabi=ilp32
+endif
COMPAT_LD_FLAGS := -melf32lriscv
# Disable attributes, as they're useless and break the build.
diff --git a/arch/riscv/kernel/copy-unaligned.S b/arch/riscv/kernel/copy-unaligned.S
new file mode 100644
index 000000000000..cfdecfbaad62
--- /dev/null
+++ b/arch/riscv/kernel/copy-unaligned.S
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023 Rivos Inc. */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+ .text
+
+/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
+/* Performs a memcpy without aligning buffers, using word loads and stores. */
+/* Note: The size is truncated to a multiple of 8 * SZREG */
+ENTRY(__riscv_copy_words_unaligned)
+ andi a4, a2, ~((8*SZREG)-1)
+ beqz a4, 2f
+ add a3, a1, a4
+1:
+ REG_L a4, 0(a1)
+ REG_L a5, SZREG(a1)
+ REG_L a6, 2*SZREG(a1)
+ REG_L a7, 3*SZREG(a1)
+ REG_L t0, 4*SZREG(a1)
+ REG_L t1, 5*SZREG(a1)
+ REG_L t2, 6*SZREG(a1)
+ REG_L t3, 7*SZREG(a1)
+ REG_S a4, 0(a0)
+ REG_S a5, SZREG(a0)
+ REG_S a6, 2*SZREG(a0)
+ REG_S a7, 3*SZREG(a0)
+ REG_S t0, 4*SZREG(a0)
+ REG_S t1, 5*SZREG(a0)
+ REG_S t2, 6*SZREG(a0)
+ REG_S t3, 7*SZREG(a0)
+ addi a0, a0, 8*SZREG
+ addi a1, a1, 8*SZREG
+ bltu a1, a3, 1b
+
+2:
+ ret
+END(__riscv_copy_words_unaligned)
+
+/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
+/* Performs a memcpy without aligning buffers, using only byte accesses. */
+/* Note: The size is truncated to a multiple of 8 */
+ENTRY(__riscv_copy_bytes_unaligned)
+ andi a4, a2, ~(8-1)
+ beqz a4, 2f
+ add a3, a1, a4
+1:
+ lb a4, 0(a1)
+ lb a5, 1(a1)
+ lb a6, 2(a1)
+ lb a7, 3(a1)
+ lb t0, 4(a1)
+ lb t1, 5(a1)
+ lb t2, 6(a1)
+ lb t3, 7(a1)
+ sb a4, 0(a0)
+ sb a5, 1(a0)
+ sb a6, 2(a0)
+ sb a7, 3(a0)
+ sb t0, 4(a0)
+ sb t1, 5(a0)
+ sb t2, 6(a0)
+ sb t3, 7(a0)
+ addi a0, a0, 8
+ addi a1, a1, 8
+ bltu a1, a3, 1b
+
+2:
+ ret
+END(__riscv_copy_bytes_unaligned)
diff --git a/arch/riscv/kernel/copy-unaligned.h b/arch/riscv/kernel/copy-unaligned.h
new file mode 100644
index 000000000000..e3d70d35b708
--- /dev/null
+++ b/arch/riscv/kernel/copy-unaligned.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos, Inc.
+ */
+#ifndef __RISCV_KERNEL_COPY_UNALIGNED_H
+#define __RISCV_KERNEL_COPY_UNALIGNED_H
+
+#include <linux/types.h>
+
+void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size);
+void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size);
+
+#endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index a2fc952318e9..c17dacb1141c 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -17,6 +17,11 @@
#include <asm/smp.h>
#include <asm/pgtable.h>
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpuid_to_hartid_map(cpu);
+}
+
/*
* Returns the hart ID of the given device tree node, or -ENODEV if the node
* isn't an enabled and valid RISC-V hart node.
@@ -41,7 +46,7 @@ int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
return 0;
}
-int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart)
+int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart)
{
const char *isa;
@@ -61,16 +66,53 @@ int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *har
return -ENODEV;
}
+ if (of_property_read_string(node, "riscv,isa-base", &isa))
+ goto old_interface;
+
+ if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32i", 5)) {
+ pr_warn("CPU with hartid=%lu does not support rv32i", *hart);
+ return -ENODEV;
+ }
+
+ if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64i", 5)) {
+ pr_warn("CPU with hartid=%lu does not support rv64i", *hart);
+ return -ENODEV;
+ }
+
+ if (!of_property_present(node, "riscv,isa-extensions"))
+ return -ENODEV;
+
+ if (of_property_match_string(node, "riscv,isa-extensions", "i") < 0 ||
+ of_property_match_string(node, "riscv,isa-extensions", "m") < 0 ||
+ of_property_match_string(node, "riscv,isa-extensions", "a") < 0) {
+ pr_warn("CPU with hartid=%lu does not support ima", *hart);
+ return -ENODEV;
+ }
+
+ return 0;
+
+old_interface:
+ if (!riscv_isa_fallback) {
+ pr_warn("CPU with hartid=%lu is invalid: this kernel does not parse \"riscv,isa\"",
+ *hart);
+ return -ENODEV;
+ }
+
if (of_property_read_string(node, "riscv,isa", &isa)) {
- pr_warn("CPU with hartid=%lu has no \"riscv,isa\" property\n", *hart);
+ pr_warn("CPU with hartid=%lu has no \"riscv,isa-base\" or \"riscv,isa\" property\n",
+ *hart);
return -ENODEV;
}
- if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7))
+ if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7)) {
+ pr_warn("CPU with hartid=%lu does not support rv32ima", *hart);
return -ENODEV;
+ }
- if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7))
+ if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7)) {
+ pr_warn("CPU with hartid=%lu does not support rv64ima", *hart);
return -ENODEV;
+ }
return 0;
}
@@ -160,132 +202,46 @@ arch_initcall(riscv_cpuinfo_init);
#ifdef CONFIG_PROC_FS
-#define __RISCV_ISA_EXT_DATA(UPROP, EXTID) \
- { \
- .uprop = #UPROP, \
- .isa_ext_id = EXTID, \
- }
-
-/*
- * The canonical order of ISA extension names in the ISA string is defined in
- * chapter 27 of the unprivileged specification.
- *
- * Ordinarily, for in-kernel data structures, this order is unimportant but
- * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
- *
- * The specification uses vague wording, such as should, when it comes to
- * ordering, so for our purposes the following rules apply:
- *
- * 1. All multi-letter extensions must be separated from other extensions by an
- * underscore.
- *
- * 2. Additional standard extensions (starting with 'Z') must be sorted after
- * single-letter extensions and before any higher-privileged extensions.
-
- * 3. The first letter following the 'Z' conventionally indicates the most
- * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
- * If multiple 'Z' extensions are named, they must be ordered first by
- * category, then alphabetically within a category.
- *
- * 3. Standard supervisor-level extensions (starting with 'S') must be listed
- * after standard unprivileged extensions. If multiple supervisor-level
- * extensions are listed, they must be ordered alphabetically.
- *
- * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
- * after any lower-privileged, standard extensions. If multiple
- * machine-level extensions are listed, they must be ordered
- * alphabetically.
- *
- * 5. Non-standard extensions (starting with 'X') must be listed after all
- * standard extensions. If multiple non-standard extensions are listed, they
- * must be ordered alphabetically.
- *
- * An example string following the order is:
- * rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
- *
- * New entries to this struct should follow the ordering rules described above.
- */
-static struct riscv_isa_ext_data isa_ext_arr[] = {
- __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
- __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
- __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
- __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
- __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
- __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
- __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
- __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
- __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
- __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
- __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
- __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
- __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
- __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
- __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
- __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
- __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
- __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
-};
-
-static void print_isa_ext(struct seq_file *f)
+static void print_isa(struct seq_file *f)
{
- struct riscv_isa_ext_data *edata;
- int i = 0, arr_sz;
-
- arr_sz = ARRAY_SIZE(isa_ext_arr) - 1;
+ seq_puts(f, "isa\t\t: ");
- /* No extension support available */
- if (arr_sz <= 0)
- return;
+ if (IS_ENABLED(CONFIG_32BIT))
+ seq_write(f, "rv32", 4);
+ else
+ seq_write(f, "rv64", 4);
- for (i = 0; i <= arr_sz; i++) {
- edata = &isa_ext_arr[i];
- if (!__riscv_isa_extension_available(NULL, edata->isa_ext_id))
+ for (int i = 0; i < riscv_isa_ext_count; i++) {
+ if (!__riscv_isa_extension_available(NULL, riscv_isa_ext[i].id))
continue;
- seq_printf(f, "_%s", edata->uprop);
- }
-}
-
-/*
- * These are the only valid base (single letter) ISA extensions as per the spec.
- * It also specifies the canonical order in which it appears in the spec.
- * Some of the extension may just be a place holder for now (B, K, P, J).
- * This should be updated once corresponding extensions are ratified.
- */
-static const char base_riscv_exts[13] = "imafdqcbkjpvh";
-static void print_isa(struct seq_file *f, const char *isa)
-{
- int i;
+ /* Only multi-letter extensions are split by underscores */
+ if (strnlen(riscv_isa_ext[i].name, 2) != 1)
+ seq_puts(f, "_");
- seq_puts(f, "isa\t\t: ");
- /* Print the rv[64/32] part */
- seq_write(f, isa, 4);
- for (i = 0; i < sizeof(base_riscv_exts); i++) {
- if (__riscv_isa_extension_available(NULL, base_riscv_exts[i] - 'a'))
- /* Print only enabled the base ISA extensions */
- seq_write(f, &base_riscv_exts[i], 1);
+ seq_printf(f, "%s", riscv_isa_ext[i].name);
}
- print_isa_ext(f);
+
seq_puts(f, "\n");
}
static void print_mmu(struct seq_file *f)
{
- char sv_type[16];
+ const char *sv_type;
#ifdef CONFIG_MMU
#if defined(CONFIG_32BIT)
- strncpy(sv_type, "sv32", 5);
+ sv_type = "sv32";
#elif defined(CONFIG_64BIT)
if (pgtable_l5_enabled)
- strncpy(sv_type, "sv57", 5);
+ sv_type = "sv57";
else if (pgtable_l4_enabled)
- strncpy(sv_type, "sv48", 5);
+ sv_type = "sv48";
else
- strncpy(sv_type, "sv39", 5);
+ sv_type = "sv39";
#endif
#else
- strncpy(sv_type, "none", 5);
+ sv_type = "none";
#endif /* CONFIG_MMU */
seq_printf(f, "mmu\t\t: %s\n", sv_type);
}
@@ -316,27 +272,21 @@ static int c_show(struct seq_file *m, void *v)
unsigned long cpu_id = (unsigned long)v - 1;
struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
struct device_node *node;
- const char *compat, *isa;
+ const char *compat;
seq_printf(m, "processor\t: %lu\n", cpu_id);
seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
+ print_isa(m);
+ print_mmu(m);
if (acpi_disabled) {
node = of_get_cpu_node(cpu_id, NULL);
- if (!of_property_read_string(node, "riscv,isa", &isa))
- print_isa(m, isa);
- print_mmu(m);
if (!of_property_read_string(node, "compatible", &compat) &&
strcmp(compat, "riscv"))
seq_printf(m, "uarch\t\t: %s\n", compat);
of_node_put(node);
- } else {
- if (!acpi_get_riscv_isa(NULL, cpu_id, &isa))
- print_isa(m, isa);
-
- print_mmu(m);
}
seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index bdcf460ea53d..1cfbba65d11a 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -13,18 +13,24 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <asm/acpi.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwcap.h>
+#include <asm/hwprobe.h>
#include <asm/patch.h>
#include <asm/processor.h>
#include <asm/vector.h>
+#include "copy-unaligned.h"
+
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
+#define MISALIGNED_ACCESS_JIFFIES_LG2 1
+#define MISALIGNED_BUFFER_SIZE 0x4000
+#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
+
unsigned long elf_hwcap __read_mostly;
/* Host ISA bitmap */
@@ -99,29 +105,252 @@ static bool riscv_isa_extension_check(int id)
return true;
}
-void __init riscv_fill_hwcap(void)
+#define __RISCV_ISA_EXT_DATA(_name, _id) { \
+ .name = #_name, \
+ .property = #_name, \
+ .id = _id, \
+}
+
+/*
+ * The canonical order of ISA extension names in the ISA string is defined in
+ * chapter 27 of the unprivileged specification.
+ *
+ * Ordinarily, for in-kernel data structures, this order is unimportant but
+ * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
+ *
+ * The specification uses vague wording, such as should, when it comes to
+ * ordering, so for our purposes the following rules apply:
+ *
+ * 1. All multi-letter extensions must be separated from other extensions by an
+ * underscore.
+ *
+ * 2. Additional standard extensions (starting with 'Z') must be sorted after
+ * single-letter extensions and before any higher-privileged extensions.
+ *
+ * 3. The first letter following the 'Z' conventionally indicates the most
+ * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
+ * If multiple 'Z' extensions are named, they must be ordered first by
+ * category, then alphabetically within a category.
+ *
+ * 3. Standard supervisor-level extensions (starting with 'S') must be listed
+ * after standard unprivileged extensions. If multiple supervisor-level
+ * extensions are listed, they must be ordered alphabetically.
+ *
+ * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
+ * after any lower-privileged, standard extensions. If multiple
+ * machine-level extensions are listed, they must be ordered
+ * alphabetically.
+ *
+ * 5. Non-standard extensions (starting with 'X') must be listed after all
+ * standard extensions. If multiple non-standard extensions are listed, they
+ * must be ordered alphabetically.
+ *
+ * An example string following the order is:
+ * rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
+ *
+ * New entries to this struct should follow the ordering rules described above.
+ */
+const struct riscv_isa_ext_data riscv_isa_ext[] = {
+ __RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
+ __RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
+ __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
+ __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
+ __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
+ __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
+ __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
+ __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b),
+ __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k),
+ __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j),
+ __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p),
+ __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
+ __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
+ __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
+ __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
+ __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
+ __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
+ __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
+ __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
+ __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
+ __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
+ __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
+ __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
+ __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
+ __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
+ __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
+ __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
+ __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
+ __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
+ __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
+};
+
+const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
+
+static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
+ unsigned long *isa2hwcap, const char *isa)
+{
+ /*
+ * For all possible cpus, we have already validated in
+ * the boot process that they at least contain "rv" and
+ * whichever of "32"/"64" this kernel supports, and so this
+ * section can be skipped.
+ */
+ isa += 4;
+
+ while (*isa) {
+ const char *ext = isa++;
+ const char *ext_end = isa;
+ bool ext_long = false, ext_err = false;
+
+ switch (*ext) {
+ case 's':
+ /*
+ * Workaround for invalid single-letter 's' & 'u'(QEMU).
+ * No need to set the bit in riscv_isa as 's' & 'u' are
+ * not valid ISA extensions. It works until multi-letter
+ * extension starting with "Su" appears.
+ */
+ if (ext[-1] != '_' && ext[1] == 'u') {
+ ++isa;
+ ext_err = true;
+ break;
+ }
+ fallthrough;
+ case 'S':
+ case 'x':
+ case 'X':
+ case 'z':
+ case 'Z':
+ /*
+ * Before attempting to parse the extension itself, we find its end.
+ * As multi-letter extensions must be split from other multi-letter
+ * extensions with an "_", the end of a multi-letter extension will
+ * either be the null character or the "_" at the start of the next
+ * multi-letter extension.
+ *
+ * Next, as the extensions version is currently ignored, we
+ * eliminate that portion. This is done by parsing backwards from
+ * the end of the extension, removing any numbers. This may be a
+ * major or minor number however, so the process is repeated if a
+ * minor number was found.
+ *
+ * ext_end is intended to represent the first character *after* the
+ * name portion of an extension, but will be decremented to the last
+ * character itself while eliminating the extensions version number.
+ * A simple re-increment solves this problem.
+ */
+ ext_long = true;
+ for (; *isa && *isa != '_'; ++isa)
+ if (unlikely(!isalnum(*isa)))
+ ext_err = true;
+
+ ext_end = isa;
+ if (unlikely(ext_err))
+ break;
+
+ if (!isdigit(ext_end[-1]))
+ break;
+
+ while (isdigit(*--ext_end))
+ ;
+
+ if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
+ ++ext_end;
+ break;
+ }
+
+ while (isdigit(*--ext_end))
+ ;
+
+ ++ext_end;
+ break;
+ default:
+ /*
+ * Things are a little easier for single-letter extensions, as they
+ * are parsed forwards.
+ *
+ * After checking that our starting position is valid, we need to
+ * ensure that, when isa was incremented at the start of the loop,
+ * that it arrived at the start of the next extension.
+ *
+ * If we are already on a non-digit, there is nothing to do. Either
+ * we have a multi-letter extension's _, or the start of an
+ * extension.
+ *
+ * Otherwise we have found the current extension's major version
+ * number. Parse past it, and a subsequent p/minor version number
+ * if present. The `p` extension must not appear immediately after
+ * a number, so there is no fear of missing it.
+ *
+ */
+ if (unlikely(!isalpha(*ext))) {
+ ext_err = true;
+ break;
+ }
+
+ if (!isdigit(*isa))
+ break;
+
+ while (isdigit(*++isa))
+ ;
+
+ if (tolower(*isa) != 'p')
+ break;
+
+ if (!isdigit(*++isa)) {
+ --isa;
+ break;
+ }
+
+ while (isdigit(*++isa))
+ ;
+
+ break;
+ }
+
+ /*
+ * The parser expects that at the start of an iteration isa points to the
+ * first character of the next extension. As we stop parsing an extension
+ * on meeting a non-alphanumeric character, an extra increment is needed
+ * where the succeeding extension is a multi-letter prefixed with an "_".
+ */
+ if (*isa == '_')
+ ++isa;
+
+#define SET_ISA_EXT_MAP(name, bit) \
+ do { \
+ if ((ext_end - ext == strlen(name)) && \
+ !strncasecmp(ext, name, strlen(name)) && \
+ riscv_isa_extension_check(bit)) \
+ set_bit(bit, isainfo->isa); \
+ } while (false) \
+
+ if (unlikely(ext_err))
+ continue;
+ if (!ext_long) {
+ int nr = tolower(*ext) - 'a';
+
+ if (riscv_isa_extension_check(nr)) {
+ *this_hwcap |= isa2hwcap[nr];
+ set_bit(nr, isainfo->isa);
+ }
+ } else {
+ for (int i = 0; i < riscv_isa_ext_count; i++)
+ SET_ISA_EXT_MAP(riscv_isa_ext[i].name,
+ riscv_isa_ext[i].id);
+ }
+#undef SET_ISA_EXT_MAP
+ }
+}
+
+static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
{
struct device_node *node;
const char *isa;
- char print_str[NUM_ALPHA_EXTS + 1];
- int i, j, rc;
- unsigned long isa2hwcap[26] = {0};
+ int rc;
struct acpi_table_header *rhct;
acpi_status status;
unsigned int cpu;
- isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
- isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
- isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
- isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
- isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
- isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
- isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
-
- elf_hwcap = 0;
-
- bitmap_zero(riscv_isa, RISCV_ISA_EXT_MAX);
-
if (!acpi_disabled) {
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
if (ACPI_FAILURE(status))
@@ -153,176 +382,7 @@ void __init riscv_fill_hwcap(void)
}
}
- /*
- * For all possible cpus, we have already validated in
- * the boot process that they at least contain "rv" and
- * whichever of "32"/"64" this kernel supports, and so this
- * section can be skipped.
- */
- isa += 4;
-
- while (*isa) {
- const char *ext = isa++;
- const char *ext_end = isa;
- bool ext_long = false, ext_err = false;
-
- switch (*ext) {
- case 's':
- /*
- * Workaround for invalid single-letter 's' & 'u'(QEMU).
- * No need to set the bit in riscv_isa as 's' & 'u' are
- * not valid ISA extensions. It works until multi-letter
- * extension starting with "Su" appears.
- */
- if (ext[-1] != '_' && ext[1] == 'u') {
- ++isa;
- ext_err = true;
- break;
- }
- fallthrough;
- case 'S':
- case 'x':
- case 'X':
- case 'z':
- case 'Z':
- /*
- * Before attempting to parse the extension itself, we find its end.
- * As multi-letter extensions must be split from other multi-letter
- * extensions with an "_", the end of a multi-letter extension will
- * either be the null character or the "_" at the start of the next
- * multi-letter extension.
- *
- * Next, as the extensions version is currently ignored, we
- * eliminate that portion. This is done by parsing backwards from
- * the end of the extension, removing any numbers. This may be a
- * major or minor number however, so the process is repeated if a
- * minor number was found.
- *
- * ext_end is intended to represent the first character *after* the
- * name portion of an extension, but will be decremented to the last
- * character itself while eliminating the extensions version number.
- * A simple re-increment solves this problem.
- */
- ext_long = true;
- for (; *isa && *isa != '_'; ++isa)
- if (unlikely(!isalnum(*isa)))
- ext_err = true;
-
- ext_end = isa;
- if (unlikely(ext_err))
- break;
-
- if (!isdigit(ext_end[-1]))
- break;
-
- while (isdigit(*--ext_end))
- ;
-
- if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
- ++ext_end;
- break;
- }
-
- while (isdigit(*--ext_end))
- ;
-
- ++ext_end;
- break;
- default:
- /*
- * Things are a little easier for single-letter extensions, as they
- * are parsed forwards.
- *
- * After checking that our starting position is valid, we need to
- * ensure that, when isa was incremented at the start of the loop,
- * that it arrived at the start of the next extension.
- *
- * If we are already on a non-digit, there is nothing to do. Either
- * we have a multi-letter extension's _, or the start of an
- * extension.
- *
- * Otherwise we have found the current extension's major version
- * number. Parse past it, and a subsequent p/minor version number
- * if present. The `p` extension must not appear immediately after
- * a number, so there is no fear of missing it.
- *
- */
- if (unlikely(!isalpha(*ext))) {
- ext_err = true;
- break;
- }
-
- if (!isdigit(*isa))
- break;
-
- while (isdigit(*++isa))
- ;
-
- if (tolower(*isa) != 'p')
- break;
-
- if (!isdigit(*++isa)) {
- --isa;
- break;
- }
-
- while (isdigit(*++isa))
- ;
-
- break;
- }
-
- /*
- * The parser expects that at the start of an iteration isa points to the
- * first character of the next extension. As we stop parsing an extension
- * on meeting a non-alphanumeric character, an extra increment is needed
- * where the succeeding extension is a multi-letter prefixed with an "_".
- */
- if (*isa == '_')
- ++isa;
-
-#define SET_ISA_EXT_MAP(name, bit) \
- do { \
- if ((ext_end - ext == sizeof(name) - 1) && \
- !strncasecmp(ext, name, sizeof(name) - 1) && \
- riscv_isa_extension_check(bit)) \
- set_bit(bit, isainfo->isa); \
- } while (false) \
-
- if (unlikely(ext_err))
- continue;
- if (!ext_long) {
- int nr = tolower(*ext) - 'a';
-
- if (riscv_isa_extension_check(nr)) {
- this_hwcap |= isa2hwcap[nr];
- set_bit(nr, isainfo->isa);
- }
- } else {
- /* sorted alphabetically */
- SET_ISA_EXT_MAP("smaia", RISCV_ISA_EXT_SMAIA);
- SET_ISA_EXT_MAP("ssaia", RISCV_ISA_EXT_SSAIA);
- SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
- SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC);
- SET_ISA_EXT_MAP("svinval", RISCV_ISA_EXT_SVINVAL);
- SET_ISA_EXT_MAP("svnapot", RISCV_ISA_EXT_SVNAPOT);
- SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
- SET_ISA_EXT_MAP("zba", RISCV_ISA_EXT_ZBA);
- SET_ISA_EXT_MAP("zbb", RISCV_ISA_EXT_ZBB);
- SET_ISA_EXT_MAP("zbs", RISCV_ISA_EXT_ZBS);
- SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM);
- SET_ISA_EXT_MAP("zicboz", RISCV_ISA_EXT_ZICBOZ);
- SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE);
- }
-#undef SET_ISA_EXT_MAP
- }
-
- /*
- * Linux requires the following extensions, so we may as well
- * always set them.
- */
- set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
- set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
+ riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
/*
* These ones were as they were part of the base ISA when the
@@ -330,6 +390,8 @@ void __init riscv_fill_hwcap(void)
* unconditionally where `i` is in riscv,isa on DT systems.
*/
if (acpi_disabled) {
+ set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
+ set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
}
@@ -352,9 +414,107 @@ void __init riscv_fill_hwcap(void)
if (!acpi_disabled && rhct)
acpi_put_table((struct acpi_table_header *)rhct);
+}
+
+static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ unsigned long this_hwcap = 0;
+ struct device_node *cpu_node;
+ struct riscv_isainfo *isainfo = &hart_isa[cpu];
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node) {
+ pr_warn("Unable to find cpu node\n");
+ continue;
+ }
+
+ if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
+ of_node_put(cpu_node);
+ continue;
+ }
+
+ for (int i = 0; i < riscv_isa_ext_count; i++) {
+ if (of_property_match_string(cpu_node, "riscv,isa-extensions",
+ riscv_isa_ext[i].property) < 0)
+ continue;
+
+ if (!riscv_isa_extension_check(riscv_isa_ext[i].id))
+ continue;
+
+ /* Only single letter extensions get set in hwcap */
+ if (strnlen(riscv_isa_ext[i].name, 2) == 1)
+ this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
+
+ set_bit(riscv_isa_ext[i].id, isainfo->isa);
+ }
+
+ of_node_put(cpu_node);
+
+ /*
+ * All "okay" harts should have same isa. Set HWCAP based on
+ * common capabilities of every "okay" hart, in case they don't.
+ */
+ if (elf_hwcap)
+ elf_hwcap &= this_hwcap;
+ else
+ elf_hwcap = this_hwcap;
+
+ if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
+ bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
+ else
+ bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
+ }
- /* We don't support systems with F but without D, so mask those out
- * here. */
+ if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
+ return -ENOENT;
+
+ return 0;
+}
+
+#ifdef CONFIG_RISCV_ISA_FALLBACK
+bool __initdata riscv_isa_fallback = true;
+#else
+bool __initdata riscv_isa_fallback;
+static int __init riscv_isa_fallback_setup(char *__unused)
+{
+ riscv_isa_fallback = true;
+ return 1;
+}
+early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
+#endif
+
+void __init riscv_fill_hwcap(void)
+{
+ char print_str[NUM_ALPHA_EXTS + 1];
+ unsigned long isa2hwcap[26] = {0};
+ int i, j;
+
+ isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
+ isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
+ isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
+ isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
+ isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
+ isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
+ isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
+
+ if (!acpi_disabled) {
+ riscv_fill_hwcap_from_isa_string(isa2hwcap);
+ } else {
+ int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
+
+ if (ret && riscv_isa_fallback) {
+ pr_info("Falling back to deprecated \"riscv,isa\"\n");
+ riscv_fill_hwcap_from_isa_string(isa2hwcap);
+ }
+ }
+
+ /*
+ * We don't support systems with F but without D, so mask those out
+ * here.
+ */
if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
pr_info("This kernel does not support systems with F but not D\n");
elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
@@ -396,6 +556,103 @@ unsigned long riscv_get_elf_hwcap(void)
return hwcap;
}
+void check_unaligned_access(int cpu)
+{
+ u64 start_cycles, end_cycles;
+ u64 word_cycles;
+ u64 byte_cycles;
+ int ratio;
+ unsigned long start_jiffies, now;
+ struct page *page;
+ void *dst;
+ void *src;
+ long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
+
+ page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
+ if (!page) {
+ pr_warn("Can't alloc pages to measure memcpy performance");
+ return;
+ }
+
+ /* Make an unaligned destination buffer. */
+ dst = (void *)((unsigned long)page_address(page) | 0x1);
+ /* Unalign src as well, but differently (off by 1 + 2 = 3). */
+ src = dst + (MISALIGNED_BUFFER_SIZE / 2);
+ src += 2;
+ word_cycles = -1ULL;
+ /* Do a warmup. */
+ __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ preempt_disable();
+ start_jiffies = jiffies;
+ while ((now = jiffies) == start_jiffies)
+ cpu_relax();
+
+ /*
+ * For a fixed amount of time, repeatedly try the function, and take
+ * the best time in cycles as the measurement.
+ */
+ while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+ start_cycles = get_cycles64();
+ /* Ensure the CSR read can't reorder WRT to the copy. */
+ mb();
+ __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ /* Ensure the copy ends before the end time is snapped. */
+ mb();
+ end_cycles = get_cycles64();
+ if ((end_cycles - start_cycles) < word_cycles)
+ word_cycles = end_cycles - start_cycles;
+ }
+
+ byte_cycles = -1ULL;
+ __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ start_jiffies = jiffies;
+ while ((now = jiffies) == start_jiffies)
+ cpu_relax();
+
+ while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+ start_cycles = get_cycles64();
+ mb();
+ __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ mb();
+ end_cycles = get_cycles64();
+ if ((end_cycles - start_cycles) < byte_cycles)
+ byte_cycles = end_cycles - start_cycles;
+ }
+
+ preempt_enable();
+
+ /* Don't divide by zero. */
+ if (!word_cycles || !byte_cycles) {
+ pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
+ cpu);
+
+ goto out;
+ }
+
+ if (word_cycles < byte_cycles)
+ speed = RISCV_HWPROBE_MISALIGNED_FAST;
+
+ ratio = div_u64((byte_cycles * 100), word_cycles);
+ pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
+ cpu,
+ ratio / 100,
+ ratio % 100,
+ (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
+
+ per_cpu(misaligned_access_speed, cpu) = speed;
+
+out:
+ __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
+}
+
+static int check_unaligned_access_boot_cpu(void)
+{
+ check_unaligned_access(0);
+ return 0;
+}
+
+arch_initcall(check_unaligned_access_boot_cpu);
+
#ifdef CONFIG_RISCV_ALTERNATIVE
/*
* Alternative patch sites consider 48 bits when determining when to patch
diff --git a/arch/riscv/kernel/crash_core.c b/arch/riscv/kernel/crash_core.c
index b351a3c01355..55f1d7856b54 100644
--- a/arch/riscv/kernel/crash_core.c
+++ b/arch/riscv/kernel/crash_core.c
@@ -18,4 +18,6 @@ void arch_crash_save_vmcoreinfo(void)
vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
#endif
vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
+ vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
+ kernel_map.va_kernel_pa_offset);
}
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 5372b708fae2..f4099059ed8f 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -260,7 +260,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
cmdline = modified_cmdline;
}
-#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
+#ifdef CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY
/* Add purgatory to the image */
kbuf.top_down = true;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
@@ -274,14 +274,14 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
sizeof(kernel_start), 0);
if (ret)
pr_err("Error update purgatory ret=%d\n", ret);
-#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
+#endif /* CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY */
/* Add the initrd to the image */
if (initrd != NULL) {
kbuf.buffer = initrd;
kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE;
- kbuf.top_down = false;
+ kbuf.top_down = true;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
@@ -425,6 +425,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
* sym, instead of searching the whole relsec.
*/
case R_RISCV_PCREL_HI20:
+ case R_RISCV_CALL_PLT:
case R_RISCV_CALL:
*(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
ENCODE_UJTYPE_IMM(val - addr);
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 11c3b94c4534..3710ea5d160f 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -289,10 +289,6 @@ clear_bss:
blt a3, a4, clear_bss
clear_bss_done:
#endif
- /* Save hart ID and DTB physical address */
- mv s0, a0
- mv s1, a1
-
la a2, boot_cpu_hartid
XIP_FIXUP_OFFSET a2
REG_S a0, (a2)
@@ -306,7 +302,7 @@ clear_bss_done:
la a0, __dtb_start
XIP_FIXUP_OFFSET a0
#else
- mv a0, s1
+ mv a0, a1
#endif /* CONFIG_BUILTIN_DTB */
call setup_vm
#ifdef CONFIG_MMU
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index d0577cc6a081..a8efa053c4a5 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -84,6 +84,9 @@ void do_softirq_own_stack(void)
: [sp] "r" (sp)
: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"t0", "t1", "t2", "t3", "t4", "t5", "t6",
+#ifndef CONFIG_FRAME_POINTER
+ "s0",
+#endif
"memory");
} else
#endif
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 8a6e5a9e842a..8818a8fa9ff3 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -3,6 +3,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
@@ -47,15 +48,19 @@
addi sp, sp, 4*SZREG
.endm
-ENTRY(ftrace_stub)
+SYM_TYPED_FUNC_START(ftrace_stub)
#ifdef CONFIG_DYNAMIC_FTRACE
.global MCOUNT_NAME
.set MCOUNT_NAME, ftrace_stub
#endif
ret
-ENDPROC(ftrace_stub)
+SYM_FUNC_END(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_TYPED_FUNC_START(ftrace_stub_graph)
+ ret
+SYM_FUNC_END(ftrace_stub_graph)
+
ENTRY(return_to_handler)
/*
* On implementing the frame point test, the ideal way is to compare the
diff --git a/arch/riscv/kernel/probes/decode-insn.c b/arch/riscv/kernel/probes/decode-insn.c
index 64f6183b4717..65d9590bfb9f 100644
--- a/arch/riscv/kernel/probes/decode-insn.c
+++ b/arch/riscv/kernel/probes/decode-insn.c
@@ -29,13 +29,14 @@ riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
* TODO: the REJECTED ones below need to be implemented
*/
#ifdef CONFIG_RISCV_ISA_C
- RISCV_INSN_REJECTED(c_j, insn);
- RISCV_INSN_REJECTED(c_jr, insn);
RISCV_INSN_REJECTED(c_jal, insn);
- RISCV_INSN_REJECTED(c_jalr, insn);
- RISCV_INSN_REJECTED(c_beqz, insn);
- RISCV_INSN_REJECTED(c_bnez, insn);
RISCV_INSN_REJECTED(c_ebreak, insn);
+
+ RISCV_INSN_SET_SIMULATE(c_j, insn);
+ RISCV_INSN_SET_SIMULATE(c_jr, insn);
+ RISCV_INSN_SET_SIMULATE(c_jalr, insn);
+ RISCV_INSN_SET_SIMULATE(c_beqz, insn);
+ RISCV_INSN_SET_SIMULATE(c_bnez, insn);
#endif
RISCV_INSN_SET_SIMULATE(jal, insn);
diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
index 7441ac8a6843..d3099d67816d 100644
--- a/arch/riscv/kernel/probes/simulate-insn.c
+++ b/arch/riscv/kernel/probes/simulate-insn.c
@@ -188,3 +188,108 @@ bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *r
return true;
}
+
+bool __kprobes simulate_c_j(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ /*
+ * 15 13 12 2 1 0
+ * | funct3 | offset[11|4|9:8|10|6|7|3:1|5] | opcode |
+ * 3 11 2
+ */
+
+ s32 offset;
+
+ offset = ((opcode >> 3) & 0x7) << 1;
+ offset |= ((opcode >> 11) & 0x1) << 4;
+ offset |= ((opcode >> 2) & 0x1) << 5;
+ offset |= ((opcode >> 7) & 0x1) << 6;
+ offset |= ((opcode >> 6) & 0x1) << 7;
+ offset |= ((opcode >> 9) & 0x3) << 8;
+ offset |= ((opcode >> 8) & 0x1) << 10;
+ offset |= ((opcode >> 12) & 0x1) << 11;
+
+ instruction_pointer_set(regs, addr + sign_extend32(offset, 11));
+
+ return true;
+}
+
+static bool __kprobes simulate_c_jr_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs,
+ bool is_jalr)
+{
+ /*
+ * 15 12 11 7 6 2 1 0
+ * | funct4 | rs1 | rs2 | op |
+ * 4 5 5 2
+ */
+
+ unsigned long jump_addr;
+
+ u32 rs1 = (opcode >> 7) & 0x1f;
+
+ if (rs1 == 0) /* C.JR is only valid when rs1 != x0 */
+ return false;
+
+ if (!rv_insn_reg_get_val(regs, rs1, &jump_addr))
+ return false;
+
+ if (is_jalr && !rv_insn_reg_set_val(regs, 1, addr + 2))
+ return false;
+
+ instruction_pointer_set(regs, jump_addr);
+
+ return true;
+}
+
+bool __kprobes simulate_c_jr(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ return simulate_c_jr_jalr(opcode, addr, regs, false);
+}
+
+bool __kprobes simulate_c_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ return simulate_c_jr_jalr(opcode, addr, regs, true);
+}
+
+static bool __kprobes simulate_c_bnez_beqz(u32 opcode, unsigned long addr, struct pt_regs *regs,
+ bool is_bnez)
+{
+ /*
+ * 15 13 12 10 9 7 6 2 1 0
+ * | funct3 | offset[8|4:3] | rs1' | offset[7:6|2:1|5] | op |
+ * 3 3 3 5 2
+ */
+
+ s32 offset;
+ u32 rs1;
+ unsigned long rs1_val;
+
+ rs1 = 0x8 | ((opcode >> 7) & 0x7);
+
+ if (!rv_insn_reg_get_val(regs, rs1, &rs1_val))
+ return false;
+
+ if ((rs1_val != 0 && is_bnez) || (rs1_val == 0 && !is_bnez)) {
+ offset = ((opcode >> 3) & 0x3) << 1;
+ offset |= ((opcode >> 10) & 0x3) << 3;
+ offset |= ((opcode >> 2) & 0x1) << 5;
+ offset |= ((opcode >> 5) & 0x3) << 6;
+ offset |= ((opcode >> 12) & 0x1) << 8;
+ offset = sign_extend32(offset, 8);
+ } else {
+ offset = 2;
+ }
+
+ instruction_pointer_set(regs, addr + offset);
+
+ return true;
+}
+
+bool __kprobes simulate_c_bnez(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ return simulate_c_bnez_beqz(opcode, addr, regs, true);
+}
+
+bool __kprobes simulate_c_beqz(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ return simulate_c_bnez_beqz(opcode, addr, regs, false);
+}
diff --git a/arch/riscv/kernel/probes/simulate-insn.h b/arch/riscv/kernel/probes/simulate-insn.h
index 61e35db31001..44ebbc444db9 100644
--- a/arch/riscv/kernel/probes/simulate-insn.h
+++ b/arch/riscv/kernel/probes/simulate-insn.h
@@ -24,5 +24,10 @@ bool simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *regs);
bool simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs);
bool simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs);
bool simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_j(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_jr(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_bnez(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_beqz(u32 opcode, unsigned long addr, struct pt_regs *regs);
#endif /* _RISCV_KERNEL_PROBES_SIMULATE_INSN_H */
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 1d572cf3140f..487303e3ef22 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -25,9 +25,6 @@ enum riscv_regset {
#ifdef CONFIG_FPU
REGSET_F,
#endif
-#ifdef CONFIG_RISCV_ISA_V
- REGSET_V,
-#endif
};
static int riscv_gpr_get(struct task_struct *target,
@@ -84,61 +81,6 @@ static int riscv_fpr_set(struct task_struct *target,
}
#endif
-#ifdef CONFIG_RISCV_ISA_V
-static int riscv_vr_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
-{
- struct __riscv_v_ext_state *vstate = &target->thread.vstate;
-
- if (!riscv_v_vstate_query(task_pt_regs(target)))
- return -EINVAL;
-
- /*
- * Ensure the vector registers have been saved to the memory before
- * copying them to membuf.
- */
- if (target == current)
- riscv_v_vstate_save(current, task_pt_regs(current));
-
- /* Copy vector header from vstate. */
- membuf_write(&to, vstate, offsetof(struct __riscv_v_ext_state, datap));
- membuf_zero(&to, sizeof(vstate->datap));
-
- /* Copy all the vector registers from vstate. */
- return membuf_write(&to, vstate->datap, riscv_v_vsize);
-}
-
-static int riscv_vr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret, size;
- struct __riscv_v_ext_state *vstate = &target->thread.vstate;
-
- if (!riscv_v_vstate_query(task_pt_regs(target)))
- return -EINVAL;
-
- /* Copy rest of the vstate except datap */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate, 0,
- offsetof(struct __riscv_v_ext_state, datap));
- if (unlikely(ret))
- return ret;
-
- /* Skip copy datap. */
- size = sizeof(vstate->datap);
- count -= size;
- ubuf += size;
-
- /* Copy all the vector registers. */
- pos = 0;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate->datap,
- 0, riscv_v_vsize);
- return ret;
-}
-#endif
-
static const struct user_regset riscv_user_regset[] = {
[REGSET_X] = {
.core_note_type = NT_PRSTATUS,
@@ -158,17 +100,6 @@ static const struct user_regset riscv_user_regset[] = {
.set = riscv_fpr_set,
},
#endif
-#ifdef CONFIG_RISCV_ISA_V
- [REGSET_V] = {
- .core_note_type = NT_RISCV_VECTOR,
- .align = 16,
- .n = ((32 * RISCV_MAX_VLENB) +
- sizeof(struct __riscv_v_ext_state)) / sizeof(__u32),
- .size = sizeof(__u32),
- .regset_get = riscv_vr_get,
- .set = riscv_vr_set,
- },
-#endif
};
static const struct user_regset_view riscv_user_native_view = {
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 971fe776e2f8..32c2e1eb71bd 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -178,6 +178,11 @@ static void __init init_resources(void)
if (ret < 0)
goto error;
}
+ if (crashk_low_res.start != crashk_low_res.end) {
+ ret = add_resource(&iomem_resource, &crashk_low_res);
+ if (ret < 0)
+ goto error;
+ }
#endif
#ifdef CONFIG_CRASH_DUMP
@@ -311,6 +316,7 @@ void __init setup_arch(char **cmdline_p)
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
riscv_isa_extension_available(NULL, ZICBOM))
riscv_noncoherent_supported();
+ riscv_set_dma_cache_alignment();
}
static int __init topology_init(void)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 85bbce0f758c..40420afbb1a0 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -61,11 +61,6 @@ int riscv_hartid_to_cpuid(unsigned long hartid)
return -ENOENT;
}
-bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
-{
- return phys_id == cpuid_to_hartid_map(cpu);
-}
-
static void ipi_stop(void)
{
set_cpu_online(smp_processor_id(), false);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index f4d6acb38dd0..1b8da4e40a4d 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -26,6 +26,7 @@
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
#include <asm/cpu_ops.h>
+#include <asm/cpufeature.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
@@ -245,7 +246,7 @@ asmlinkage __visible void smp_callin(void)
numa_add_cpu(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
- probe_vendor_features(curr_cpuid);
+ check_unaligned_access(curr_cpuid);
if (has_vector()) {
if (riscv_v_setup_vsize())
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
index 12b52afe09a4..f7960c7c5f9e 100644
--- a/arch/riscv/kernel/suspend_entry.S
+++ b/arch/riscv/kernel/suspend_entry.S
@@ -5,6 +5,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
@@ -58,7 +59,7 @@ ENTRY(__cpu_suspend_enter)
ret
END(__cpu_suspend_enter)
-ENTRY(__cpu_resume_enter)
+SYM_TYPED_FUNC_START(__cpu_resume_enter)
/* Load the global pointer */
.option push
.option norelax
@@ -94,4 +95,4 @@ ENTRY(__cpu_resume_enter)
/* Return to C code */
ret
-END(__cpu_resume_enter)
+SYM_FUNC_END(__cpu_resume_enter)
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 26ef5526bfb4..473159b5f303 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -335,3 +335,9 @@ SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
return do_riscv_hwprobe(pairs, pair_count, cpu_count,
cpus, flags);
}
+
+/* Not defined using SYSCALL_DEFINE0 to avoid error injection */
+asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused)
+{
+ return -ENOSYS;
+}
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index 44b1420a2270..dda913764903 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -10,9 +10,13 @@
#include <asm/syscall.h>
#undef __SYSCALL
-#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL(nr, call) asmlinkage long __riscv_##call(const struct pt_regs *);
+#include <asm/unistd.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = __riscv_##call,
void * const sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+ [0 ... __NR_syscalls - 1] = __riscv_sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index f910dfccbf5d..19807c4d3805 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -21,6 +21,7 @@
#include <asm/asm-prototypes.h>
#include <asm/bug.h>
+#include <asm/cfi.h>
#include <asm/csr.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
@@ -271,7 +272,8 @@ void handle_break(struct pt_regs *regs)
== NOTIFY_STOP)
return;
#endif
- else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
+ else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN ||
+ handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN)
regs->epc += get_break_insn_length(regs->epc);
else
die(regs, "Kernel BUG");
@@ -297,7 +299,7 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
{
if (user_mode(regs)) {
- ulong syscall = regs->a7;
+ long syscall = regs->a7;
regs->epc += 4;
regs->orig_a0 = regs->a0;
@@ -306,9 +308,9 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
syscall = syscall_enter_from_user_mode(regs, syscall);
- if (syscall < NR_syscalls)
+ if (syscall >= 0 && syscall < NR_syscalls)
syscall_handler(regs, syscall);
- else
+ else if (syscall != -1)
regs->a0 = -ENOSYS;
syscall_exit_to_user_mode(regs);
@@ -372,6 +374,9 @@ asmlinkage void noinstr do_irq(struct pt_regs *regs)
: [sp] "r" (sp), [regs] "r" (regs)
: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"t0", "t1", "t2", "t3", "t4", "t5", "t6",
+#ifndef CONFIG_FRAME_POINTER
+ "s0",
+#endif
"memory");
} else
#endif