summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kconfig27
-rw-r--r--arch/riscv/Makefile8
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts4
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi2
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts2
-rw-r--r--arch/riscv/configs/defconfig1
-rw-r--r--arch/riscv/configs/rv32_defconfig1
-rw-r--r--arch/riscv/include/asm/efi.h9
-rw-r--r--arch/riscv/include/asm/page.h28
-rw-r--r--arch/riscv/include/asm/processor.h8
-rw-r--r--arch/riscv/include/asm/vdso.h23
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/probes/decode-insn.c5
-rw-r--r--arch/riscv/kernel/probes/simulate-insn.c112
-rw-r--r--arch/riscv/kernel/ptrace.c4
-rw-r--r--arch/riscv/kernel/setup.c6
-rw-r--r--arch/riscv/kernel/stacktrace.c8
-rw-r--r--arch/riscv/kernel/traps.c5
-rw-r--r--arch/riscv/kernel/vdso/Makefile26
-rwxr-xr-xarch/riscv/kernel/vdso/gen_vdso_offsets.sh5
-rwxr-xr-xarch/riscv/kernel/vdso/so2s.sh6
-rw-r--r--arch/riscv/lib/uaccess.S27
-rw-r--r--arch/riscv/mm/init.c243
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c4
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c4
25 files changed, 340 insertions, 230 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 8fcceb8eda07..c79955655fa4 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -13,9 +13,7 @@ config 32BIT
config RISCV
def_bool y
select ARCH_CLOCKSOURCE_INIT
- select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
- select ARCH_STACKWALK
+ select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_VIRTUAL if MMU
@@ -31,8 +29,12 @@ config RISCV
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+ select ARCH_STACKWALK
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_USE_MEMTEST
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
@@ -48,16 +50,16 @@ config RISCV
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_EARLY_IOREMAP
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
- select GENERIC_IOREMAP
+ select GENERIC_IDLE_POLL_SETUP
+ select GENERIC_IOREMAP if MMU
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_SHOW
+ select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select GENERIC_PCI_IOMAP
select GENERIC_PTDUMP if MMU
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_STRNCPY_FROM_USER if MMU
- select GENERIC_STRNLEN_USER if MMU
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
@@ -72,6 +74,7 @@ config RISCV
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
+ select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_VMAP_STACK if MMU && 64BIT
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING
@@ -97,6 +100,7 @@ config RISCV
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
+ select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
select OF
@@ -109,6 +113,7 @@ config RISCV
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
+ select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU
select ZONE_DMA32 if 64BIT
@@ -178,9 +183,6 @@ config ARCH_SUPPORTS_UPROBES
config STACKTRACE_SUPPORT
def_bool y
-config TRACE_IRQFLAGS_SUPPORT
- def_bool y
-
config GENERIC_BUG
def_bool y
depends on BUG
@@ -492,10 +494,16 @@ config CC_HAVE_STACKPROTECTOR_TLS
config STACKPROTECTOR_PER_TASK
def_bool y
+ depends on !GCC_PLUGIN_RANDSTRUCT
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
+config PHYS_RAM_BASE_FIXED
+ bool "Explicitly specified physical RAM address"
+ default n
+
config PHYS_RAM_BASE
hex "Platform Physical RAM address"
+ depends on PHYS_RAM_BASE_FIXED
default "0x80000000"
help
This is the physical address of RAM in the system. It has to be
@@ -508,6 +516,7 @@ config XIP_KERNEL
# This prevents XIP from being enabled by all{yes,mod}config, which
# fail to build since XIP doesn't support large kernels.
depends on !COMPILE_TEST
+ select PHYS_RAM_BASE_FIXED
help
Execute-In-Place allows the kernel to run from non-volatile storage
directly addressable by the CPU, such as NOR flash. This saves RAM
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index bc74afdbf31e..01906a90c501 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -41,7 +41,7 @@ endif
ifeq ($(CONFIG_LD_IS_LLD),y)
KBUILD_CFLAGS += -mno-relax
KBUILD_AFLAGS += -mno-relax
-ifneq ($(LLVM_IAS),1)
+ifndef CONFIG_AS_IS_LLVM
KBUILD_CFLAGS += -Wa,-mno-relax
KBUILD_AFLAGS += -Wa,-mno-relax
endif
@@ -108,6 +108,12 @@ PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+ifeq ($(CONFIG_MMU),y)
+prepare: vdso_prepare
+vdso_prepare: prepare0
+ $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
+endif
+
ifneq ($(CONFIG_XIP_KERNEL),y)
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
KBUILD_IMAGE := $(boot)/loader.bin
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
index ec79944065c9..baea7d204639 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
@@ -14,6 +14,10 @@
model = "Microchip PolarFire-SoC Icicle Kit";
compatible = "microchip,mpfs-icicle-kit";
+ aliases {
+ ethernet0 = &emac1;
+ };
+
chosen {
stdout-path = &serial0;
};
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
index b9819570a7d1..9d2fbbc1f777 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
@@ -317,7 +317,7 @@
reg = <0x0 0x20112000 0x0 0x2000>;
interrupt-parent = <&plic>;
interrupts = <70 71 72 73>;
- mac-address = [00 00 00 00 00 00];
+ local-mac-address = [00 00 00 00 00 00];
clocks = <&clkcfg 5>, <&clkcfg 2>;
status = "disabled";
clock-names = "pclk", "hclk";
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index b1c3c596578f..2e4ea84f27e7 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -24,7 +24,7 @@
memory@80000000 {
device_type = "memory";
- reg = <0x0 0x80000000 0x2 0x00000000>;
+ reg = <0x0 0x80000000 0x4 0x00000000>;
};
soc {
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 1f2be234b11c..bc68231a8fb7 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -132,7 +132,6 @@ CONFIG_DEBUG_PLIST=y
CONFIG_DEBUG_SG=y
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_EQS_DEBUG=y
-CONFIG_DEBUG_BLOCK_EXT_DEVT=y
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_MEMTEST=y
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index 8dd02b842fef..434ef5b64599 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -127,7 +127,6 @@ CONFIG_DEBUG_PLIST=y
CONFIG_DEBUG_SG=y
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_EQS_DEBUG=y
-CONFIG_DEBUG_BLOCK_EXT_DEVT=y
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_MEMTEST=y
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 6d98cd999680..49b398fe99f1 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -13,6 +13,7 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
#define efi_init()
#endif
@@ -27,10 +28,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
-/* Load initrd at enough distance from DRAM start */
+/* Load initrd anywhere in system RAM */
static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
- return image_addr + SZ_256M;
+ return ULONG_MAX;
}
#define alloc_screen_info(x...) (&screen_info)
@@ -39,10 +40,6 @@ static inline void free_screen_info(struct screen_info *si)
{
}
-static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
-{
-}
-
void efi_virtmap_load(void);
void efi_virtmap_unload(void);
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index cca8764aed83..109c97e991a6 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -79,8 +79,8 @@ typedef struct page *pgtable_t;
#endif
#ifdef CONFIG_MMU
-extern unsigned long pfn_base;
-#define ARCH_PFN_OFFSET (pfn_base)
+extern unsigned long riscv_pfn_base;
+#define ARCH_PFN_OFFSET (riscv_pfn_base)
#else
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
@@ -91,10 +91,8 @@ struct kernel_mapping {
uintptr_t size;
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
-#ifdef CONFIG_64BIT
/* Offset between kernel mapping virtual address and kernel load address */
unsigned long va_kernel_pa_offset;
-#endif
unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
uintptr_t xiprom;
@@ -103,26 +101,27 @@ struct kernel_mapping {
};
extern struct kernel_mapping kernel_map;
+extern phys_addr_t phys_ram_base;
-#ifdef CONFIG_64BIT
#define is_kernel_mapping(x) \
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
+
#define is_linear_mapping(x) \
- ((x) >= PAGE_OFFSET && (x) < kernel_map.virt_addr)
+ ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr))
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
#define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = y; \
- (_y >= CONFIG_PHYS_RAM_BASE) ? \
- (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET) : \
- (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
+ (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \
+ (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
})
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
#define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = y; \
- (_y < kernel_map.virt_addr + XIP_OFFSET) ? \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
})
@@ -132,15 +131,6 @@ extern struct kernel_mapping kernel_map;
is_linear_mapping(_x) ? \
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
})
-#else
-#define is_kernel_mapping(x) \
- ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
-#define is_linear_mapping(x) \
- ((x) >= PAGE_OFFSET)
-
-#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + kernel_map.va_pa_offset))
-#define __va_to_pa_nodebug(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
-#endif /* CONFIG_64BIT */
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 021ed64ee608..46b492c78cbb 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -37,6 +37,14 @@ struct thread_struct {
unsigned long bad_cause;
};
+/* Whitelist the fstate from the task_struct for hardened usercopy */
+static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ unsigned long *size)
+{
+ *offset = offsetof(struct thread_struct, fstate);
+ *size = sizeof_field(struct thread_struct, fstate);
+}
+
#define INIT_THREAD { \
.sp = sizeof(init_stack) + (long)&init_stack, \
}
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 1453a2f563bc..893e47195e30 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -8,26 +8,25 @@
#ifndef _ASM_RISCV_VDSO_H
#define _ASM_RISCV_VDSO_H
+
+/*
+ * All systems with an MMU have a VDSO, but systems without an MMU don't
+ * support shared libraries and therefor don't have one.
+ */
+#ifdef CONFIG_MMU
+
#include <linux/types.h>
+#include <generated/vdso-offsets.h>
#ifndef CONFIG_GENERIC_TIME_VSYSCALL
struct vdso_data {
};
#endif
-/*
- * The VDSO symbols are mapped into Linux so we can just use regular symbol
- * addressing to get their offsets in userspace. The symbols are mapped at an
- * offset of 0, but since the linker must support setting weak undefined
- * symbols to the absolute address 0 it also happens to support other low
- * addresses even when the code model suggests those low addresses would not
- * otherwise be availiable.
- */
#define VDSO_SYMBOL(base, name) \
-({ \
- extern const char __vdso_##name[]; \
- (void __user *)((unsigned long)(base) + __vdso_##name); \
-})
+ (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
+
+#endif /* CONFIG_MMU */
asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index d3081e4d9600..3397ddac1a30 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -11,7 +11,7 @@ endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_KEXEC
-AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax
+AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
endif
extra-y += head.o
diff --git a/arch/riscv/kernel/probes/decode-insn.c b/arch/riscv/kernel/probes/decode-insn.c
index 0ed043acc882..64f6183b4717 100644
--- a/arch/riscv/kernel/probes/decode-insn.c
+++ b/arch/riscv/kernel/probes/decode-insn.c
@@ -38,11 +38,10 @@ riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
RISCV_INSN_REJECTED(c_ebreak, insn);
#endif
- RISCV_INSN_REJECTED(auipc, insn);
- RISCV_INSN_REJECTED(branch, insn);
-
RISCV_INSN_SET_SIMULATE(jal, insn);
RISCV_INSN_SET_SIMULATE(jalr, insn);
+ RISCV_INSN_SET_SIMULATE(auipc, insn);
+ RISCV_INSN_SET_SIMULATE(branch, insn);
return INSN_GOOD;
}
diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
index 2519ce26377d..d73e96f6ed7c 100644
--- a/arch/riscv/kernel/probes/simulate-insn.c
+++ b/arch/riscv/kernel/probes/simulate-insn.c
@@ -83,3 +83,115 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg
return ret;
}
+
+#define auipc_rd_idx(opcode) \
+ ((opcode >> 7) & 0x1f)
+
+#define auipc_imm(opcode) \
+ ((((opcode) >> 12) & 0xfffff) << 12)
+
+#if __riscv_xlen == 64
+#define auipc_offset(opcode) sign_extend64(auipc_imm(opcode), 31)
+#elif __riscv_xlen == 32
+#define auipc_offset(opcode) auipc_imm(opcode)
+#else
+#error "Unexpected __riscv_xlen"
+#endif
+
+bool __kprobes simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ /*
+ * auipc instruction:
+ * 31 12 11 7 6 0
+ * | imm[31:12] | rd | opcode |
+ * 20 5 7
+ */
+
+ u32 rd_idx = auipc_rd_idx(opcode);
+ unsigned long rd_val = addr + auipc_offset(opcode);
+
+ if (!rv_insn_reg_set_val(regs, rd_idx, rd_val))
+ return false;
+
+ instruction_pointer_set(regs, addr + 4);
+
+ return true;
+}
+
+#define branch_rs1_idx(opcode) \
+ (((opcode) >> 15) & 0x1f)
+
+#define branch_rs2_idx(opcode) \
+ (((opcode) >> 20) & 0x1f)
+
+#define branch_funct3(opcode) \
+ (((opcode) >> 12) & 0x7)
+
+#define branch_imm(opcode) \
+ (((((opcode) >> 8) & 0xf ) << 1) | \
+ ((((opcode) >> 25) & 0x3f) << 5) | \
+ ((((opcode) >> 7) & 0x1 ) << 11) | \
+ ((((opcode) >> 31) & 0x1 ) << 12))
+
+#define branch_offset(opcode) \
+ sign_extend32((branch_imm(opcode)), 12)
+
+#define BRANCH_BEQ 0x0
+#define BRANCH_BNE 0x1
+#define BRANCH_BLT 0x4
+#define BRANCH_BGE 0x5
+#define BRANCH_BLTU 0x6
+#define BRANCH_BGEU 0x7
+
+bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ /*
+ * branch instructions:
+ * 31 30 25 24 20 19 15 14 12 11 8 7 6 0
+ * | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1] | imm[11] | opcode |
+ * 1 6 5 5 3 4 1 7
+ * imm[12|10:5] rs2 rs1 000 imm[4:1|11] 1100011 BEQ
+ * imm[12|10:5] rs2 rs1 001 imm[4:1|11] 1100011 BNE
+ * imm[12|10:5] rs2 rs1 100 imm[4:1|11] 1100011 BLT
+ * imm[12|10:5] rs2 rs1 101 imm[4:1|11] 1100011 BGE
+ * imm[12|10:5] rs2 rs1 110 imm[4:1|11] 1100011 BLTU
+ * imm[12|10:5] rs2 rs1 111 imm[4:1|11] 1100011 BGEU
+ */
+
+ s32 offset;
+ s32 offset_tmp;
+ unsigned long rs1_val;
+ unsigned long rs2_val;
+
+ if (!rv_insn_reg_get_val(regs, branch_rs1_idx(opcode), &rs1_val) ||
+ !rv_insn_reg_get_val(regs, branch_rs2_idx(opcode), &rs2_val))
+ return false;
+
+ offset_tmp = branch_offset(opcode);
+ switch (branch_funct3(opcode)) {
+ case BRANCH_BEQ:
+ offset = (rs1_val == rs2_val) ? offset_tmp : 4;
+ break;
+ case BRANCH_BNE:
+ offset = (rs1_val != rs2_val) ? offset_tmp : 4;
+ break;
+ case BRANCH_BLT:
+ offset = ((long)rs1_val < (long)rs2_val) ? offset_tmp : 4;
+ break;
+ case BRANCH_BGE:
+ offset = ((long)rs1_val >= (long)rs2_val) ? offset_tmp : 4;
+ break;
+ case BRANCH_BLTU:
+ offset = (rs1_val < rs2_val) ? offset_tmp : 4;
+ break;
+ case BRANCH_BGEU:
+ offset = (rs1_val >= rs2_val) ? offset_tmp : 4;
+ break;
+ default:
+ return false;
+ }
+
+ instruction_pointer_set(regs, addr + offset);
+
+ return true;
+}
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 1a85305720e8..9c0511119bad 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -10,6 +10,7 @@
#include <asm/ptrace.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
+#include <asm/switch_to.h>
#include <linux/audit.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
@@ -56,6 +57,9 @@ static int riscv_fpr_get(struct task_struct *target,
{
struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+ if (target == current)
+ fstate_save(current, task_pt_regs(current));
+
membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
membuf_store(&to, fstate->fcsr);
return membuf_zero(&to, 4); // explicitly pad
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 18bd0e4bc36c..b9620e5f00ba 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -229,8 +229,8 @@ static void __init init_resources(void)
}
/* Clean-up any unused pre-allocated resources */
- mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
- memblock_free(__pa(mem_res), mem_res_sz);
+ if (res_idx >= 0)
+ memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res));
return;
error:
@@ -255,7 +255,7 @@ static void __init parse_dtb(void)
pr_err("No DTB passed to the kernel\n");
#ifdef CONFIG_CMDLINE_FORCE
- strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
pr_info("Forcing kernel command line to: %s\n", boot_command_line);
#endif
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index ff467b98c3e3..315db3d0229b 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -27,7 +27,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
- } else if (task == current) {
+ } else if (task == NULL || task == current) {
fp = (unsigned long)__builtin_frame_address(1);
sp = (unsigned long)__builtin_frame_address(0);
pc = (unsigned long)__builtin_return_address(0);
@@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && !task_is_running(task)))
+ if (likely(task && task != current && !task_is_running(task))) {
+ if (!try_get_task_stack(task))
+ return 0;
walk_stackframe(task, NULL, save_wchan, &pc);
+ put_task_stack(task);
+ }
return pc;
}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 0a98fd0ddfe9..0daaa3e4630d 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -199,11 +199,6 @@ int is_valid_bugaddr(unsigned long pc)
}
#endif /* CONFIG_GENERIC_BUG */
-/* stvec & scratch is already set from head.S */
-void __init trap_init(void)
-{
-}
-
#ifdef CONFIG_VMAP_STACK
static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
overflow_stack)__aligned(16);
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 24d936c147cd..f2e065671e4d 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -23,10 +23,10 @@ ifneq ($(c-gettimeofday-y),)
endif
# Build rules
-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-obj-y += vdso.o vdso-syms.o
+obj-y += vdso.o
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
# Disable -pg to prevent insert call site
@@ -36,6 +36,7 @@ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
GCOV_PROFILE := n
KCOV_INSTRUMENT := n
KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
# Force dependency
$(obj)/vdso.o: $(obj)/vdso.so
@@ -43,20 +44,22 @@ $(obj)/vdso.o: $(obj)/vdso.so
# link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
-LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
+LDFLAGS_vdso.so.dbg = -shared -S -soname=linux-vdso.so.1 \
--build-id=sha1 --hash-style=both --eh-frame-hdr
-# We also create a special relocatable object that should mirror the symbol
-# table and layout of the linked DSO. With ld --just-symbols we can then
-# refer to these symbols in the kernel code rather than hand-coded addresses.
-$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
- $(call if_changed,so2s)
-
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
# actual build commands
# The DSO images are built using a special linker script
# Make sure only to export the intended __vdso_xxx symbol offsets.
@@ -65,11 +68,6 @@ quiet_cmd_vdsold = VDSOLD $@
$(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
rm $@.tmp
-# Extracts symbol offsets from the VDSO, converting them into an assembly file
-# that contains the same symbols at the same offsets.
-quiet_cmd_so2s = SO2S $@
- cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
-
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
diff --git a/arch/riscv/kernel/vdso/gen_vdso_offsets.sh b/arch/riscv/kernel/vdso/gen_vdso_offsets.sh
new file mode 100755
index 000000000000..c2e5613f3495
--- /dev/null
+++ b/arch/riscv/kernel/vdso/gen_vdso_offsets.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+LC_ALL=C
+sed -n -e 's/^[0]\+\(0[0-9a-fA-F]*\) . \(__vdso_[a-zA-Z0-9_]*\)$/\#define \2_offset\t0x\1/p'
diff --git a/arch/riscv/kernel/vdso/so2s.sh b/arch/riscv/kernel/vdso/so2s.sh
deleted file mode 100755
index e64cb6d9440e..000000000000
--- a/arch/riscv/kernel/vdso/so2s.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0+
-# Copyright 2020 Palmer Dabbelt <palmerdabbelt@google.com>
-
-sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \
-| grep '^\.'
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index bceb0629e440..63bc691cff91 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -30,23 +30,23 @@ ENTRY(__asm_copy_from_user)
* t0 - end of uncopied dst
*/
add t0, a0, a2
- bgtu a0, t0, 5f
/*
* Use byte copy only if too small.
+ * SZREG holds 4 for RV32 and 8 for RV64
*/
- li a3, 8*SZREG /* size must be larger than size in word_copy */
+ li a3, 9*SZREG /* size must be larger than size in word_copy */
bltu a2, a3, .Lbyte_copy_tail
/*
- * Copy first bytes until dst is align to word boundary.
+ * Copy first bytes until dst is aligned to word boundary.
* a0 - start of dst
* t1 - start of aligned dst
*/
addi t1, a0, SZREG-1
andi t1, t1, ~(SZREG-1)
/* dst is already aligned, skip */
- beq a0, t1, .Lskip_first_bytes
+ beq a0, t1, .Lskip_align_dst
1:
/* a5 - one byte for copying data */
fixup lb a5, 0(a1), 10f
@@ -55,7 +55,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */
bltu a0, t1, 1b /* t1 - start of aligned dst */
-.Lskip_first_bytes:
+.Lskip_align_dst:
/*
* Now dst is aligned.
* Use shift-copy if src is misaligned.
@@ -72,10 +72,9 @@ ENTRY(__asm_copy_from_user)
*
* a0 - start of aligned dst
* a1 - start of aligned src
- * a3 - a1 & mask:(SZREG-1)
* t0 - end of aligned dst
*/
- addi t0, t0, -(8*SZREG-1) /* not to over run */
+ addi t0, t0, -(8*SZREG) /* not to over run */
2:
fixup REG_L a4, 0(a1), 10f
fixup REG_L a5, SZREG(a1), 10f
@@ -97,7 +96,7 @@ ENTRY(__asm_copy_from_user)
addi a1, a1, 8*SZREG
bltu a0, t0, 2b
- addi t0, t0, 8*SZREG-1 /* revert to original value */
+ addi t0, t0, 8*SZREG /* revert to original value */
j .Lbyte_copy_tail
.Lshift_copy:
@@ -107,7 +106,7 @@ ENTRY(__asm_copy_from_user)
* For misaligned copy we still perform aligned word copy, but
* we need to use the value fetched from the previous iteration and
* do some shifts.
- * This is safe because reading less than a word size.
+ * This is safe because reading is less than a word size.
*
* a0 - start of aligned dst
* a1 - start of src
@@ -117,7 +116,7 @@ ENTRY(__asm_copy_from_user)
*/
/* calculating aligned word boundary for dst */
andi t1, t0, ~(SZREG-1)
- /* Converting unaligned src to aligned arc */
+ /* Converting unaligned src to aligned src */
andi a1, a1, ~(SZREG-1)
/*
@@ -125,11 +124,11 @@ ENTRY(__asm_copy_from_user)
* t3 - prev shift
* t4 - current shift
*/
- slli t3, a3, LGREG
+ slli t3, a3, 3 /* converting bytes in a3 to bits */
li a5, SZREG*8
sub t4, a5, t3
- /* Load the first word to combine with seceond word */
+ /* Load the first word to combine with second word */
fixup REG_L a5, 0(a1), 10f
3:
@@ -161,7 +160,7 @@ ENTRY(__asm_copy_from_user)
* a1 - start of remaining src
* t0 - end of remaining dst
*/
- bgeu a0, t0, 5f
+ bgeu a0, t0, .Lout_copy_user /* check if end of copy */
4:
fixup lb a5, 0(a1), 10f
addi a1, a1, 1 /* src */
@@ -169,7 +168,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */
bltu a0, t0, 4b /* t0 - end of dst */
-5:
+.Lout_copy_user:
/* Disable access to user memory */
csrc CSR_STATUS, t6
li a0, 0
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 269fc648ef3d..c0cddf0fc22d 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -19,6 +19,7 @@
#include <linux/set_memory.h>
#include <linux/dma-map-ops.h>
#include <linux/crash_dump.h>
+#include <linux/hugetlb.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
@@ -36,6 +37,9 @@ EXPORT_SYMBOL(kernel_map);
#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
#endif
+phys_addr_t phys_ram_base __ro_after_init;
+EXPORT_SYMBOL(phys_ram_base);
+
#ifdef CONFIG_XIP_KERNEL
extern char _xiprom[], _exiprom[];
#endif
@@ -127,10 +131,17 @@ void __init mem_init(void)
}
/*
- * The default maximal physical memory size is -PAGE_OFFSET,
- * limit the memory size via mem.
+ * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
+ * whereas for 64-bit kernel, the end of the virtual address space is occupied
+ * by the modules/BPF/kernel mappings which reduces the available size of the
+ * linear mapping.
+ * Limit the memory size via mem.
*/
+#ifdef CONFIG_64BIT
+static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
+#else
static phys_addr_t memory_limit = -PAGE_OFFSET;
+#endif
static int __init early_mem(char *p)
{
@@ -152,8 +163,8 @@ static void __init setup_bootmem(void)
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
- phys_addr_t max_mapped_addr = __pa(~(ulong)0);
- phys_addr_t dram_end;
+ phys_addr_t __maybe_unused max_mapped_addr;
+ phys_addr_t phys_ram_end;
#ifdef CONFIG_XIP_KERNEL
vmlinux_start = __pa_symbol(&_sdata);
@@ -174,18 +185,28 @@ static void __init setup_bootmem(void)
#endif
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
- dram_end = memblock_end_of_DRAM();
+
+ phys_ram_end = memblock_end_of_DRAM();
+#ifndef CONFIG_64BIT
+#ifndef CONFIG_XIP_KERNEL
+ phys_ram_base = memblock_start_of_DRAM();
+#endif
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
* macro. Make sure that last 4k bytes are not usable by memblock
- * if end of dram is equal to maximum addressable memory.
+ * if end of dram is equal to maximum addressable memory. For 64-bit
+ * kernel, this problem can't happen here as the end of the virtual
+ * address space is occupied by the kernel mapping then this check must
+ * be done as soon as the kernel mapping base address is determined.
*/
- if (max_mapped_addr == (dram_end - 1))
+ max_mapped_addr = __pa(~(ulong)0);
+ if (max_mapped_addr == (phys_ram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
+#endif
- min_low_pfn = PFN_UP(memblock_start_of_DRAM());
- max_low_pfn = max_pfn = PFN_DOWN(dram_end);
+ min_low_pfn = PFN_UP(phys_ram_base);
+ max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
@@ -202,6 +223,8 @@ static void __init setup_bootmem(void)
early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(dma32_phys_limit);
+ if (IS_ENABLED(CONFIG_64BIT))
+ hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
memblock_allow_resize();
}
@@ -214,14 +237,15 @@ static struct pt_alloc_ops _pt_ops __initdata;
#define pt_ops _pt_ops
#endif
-unsigned long pfn_base __ro_after_init;
-EXPORT_SYMBOL(pfn_base);
+unsigned long riscv_pfn_base __ro_after_init;
+EXPORT_SYMBOL(riscv_pfn_base);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
@@ -302,7 +326,6 @@ static void __init create_pte_mapping(pte_t *ptep,
static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
-static pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
@@ -388,6 +411,7 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next fixmap_pte
+#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
#endif
void __init create_pgd_mapping(pgd_t *pgdp,
@@ -495,55 +519,87 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
#endif
#ifdef CONFIG_XIP_KERNEL
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
{
uintptr_t va, end_va;
/* Map the flash resident part */
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
- for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.xiprom + (va - kernel_map.virt_addr),
- map_size, PAGE_KERNEL_EXEC);
+ PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
- for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
- map_size, PAGE_KERNEL);
+ PMD_SIZE, PAGE_KERNEL);
}
#else
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
- bool early)
+static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
{
uintptr_t va, end_va;
end_va = kernel_map.virt_addr + kernel_map.size;
- for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - kernel_map.virt_addr),
- map_size,
+ PMD_SIZE,
early ?
PAGE_KERNEL_EXEC : pgprot_from_va(va));
}
#endif
-asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+/*
+ * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
+ * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
+ * entry.
+ */
+static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
{
- uintptr_t __maybe_unused pa;
- uintptr_t map_size;
-#ifndef __PAGETABLE_PMD_FOLDED
- pmd_t fix_bmap_spmd, fix_bmap_epmd;
+#ifndef CONFIG_BUILTIN_DTB
+ uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+
+ create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+ IS_ENABLED(CONFIG_64BIT) ? (uintptr_t)early_dtb_pmd : pa,
+ PGDIR_SIZE,
+ IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
+
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
+ pa, PMD_SIZE, PAGE_KERNEL);
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
+ pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
+ }
+
+ dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+#else
+ /*
+ * For 64-bit kernel, __va can't be used since it would return a linear
+ * mapping address whereas dtb_early_va will be used before
+ * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
+ * kernel is mapped in the linear mapping, that makes no difference.
+ */
+ dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
#endif
+ dtb_early_pa = dtb_pa;
+}
+
+asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+{
+ pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
+
kernel_map.virt_addr = KERNEL_LINK_ADDR;
#ifdef CONFIG_XIP_KERNEL
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
+ phys_ram_base = CONFIG_PHYS_RAM_BASE;
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
@@ -552,23 +608,22 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif
-
kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
-#ifdef CONFIG_64BIT
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
-#endif
- pfn_base = PFN_DOWN(kernel_map.phys_addr);
-
- /*
- * Enforce boot alignment requirements of RV32 and
- * RV64 by only allowing PMD or PGD mappings.
- */
- map_size = PMD_SIZE;
+ riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
- BUG_ON((kernel_map.phys_addr % map_size) != 0);
+ BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
+
+#ifdef CONFIG_64BIT
+ /*
+ * The last 4K bytes of the addressable memory can not be mapped because
+ * of IS_ERR_VALUE macro.
+ */
+ BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
+#endif
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
@@ -605,50 +660,10 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
- create_kernel_page_table(early_pg_dir, map_size, true);
+ create_kernel_page_table(early_pg_dir, true);
-#ifndef __PAGETABLE_PMD_FOLDED
- /* Setup early PMD for DTB */
- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
- (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
-#ifndef CONFIG_BUILTIN_DTB
- /* Create two consecutive PMD mappings for FDT early scan */
- pa = dtb_pa & ~(PMD_SIZE - 1);
- create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
- pa, PMD_SIZE, PAGE_KERNEL);
- create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
- pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
- dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
-#else /* CONFIG_BUILTIN_DTB */
-#ifdef CONFIG_64BIT
- /*
- * __va can't be used since it would return a linear mapping address
- * whereas dtb_early_va will be used before setup_vm_final installs
- * the linear mapping.
- */
- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
-#else
- dtb_early_va = __va(dtb_pa);
-#endif /* CONFIG_64BIT */
-#endif /* CONFIG_BUILTIN_DTB */
-#else
-#ifndef CONFIG_BUILTIN_DTB
- /* Create two consecutive PGD mappings for FDT early scan */
- pa = dtb_pa & ~(PGDIR_SIZE - 1);
- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
- pa, PGDIR_SIZE, PAGE_KERNEL);
- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
- pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
- dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
-#else /* CONFIG_BUILTIN_DTB */
-#ifdef CONFIG_64BIT
- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
-#else
- dtb_early_va = __va(dtb_pa);
-#endif /* CONFIG_64BIT */
-#endif /* CONFIG_BUILTIN_DTB */
-#endif
- dtb_early_pa = dtb_pa;
+ /* Setup early mapping for FDT early scan */
+ create_fdt_early_page_table(early_pg_dir, dtb_pa);
/*
* Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
@@ -709,6 +724,8 @@ static void __init setup_vm_final(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
+ if (end >= __pa(PAGE_OFFSET) + memory_limit)
+ end = __pa(PAGE_OFFSET) + memory_limit;
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
@@ -721,7 +738,7 @@ static void __init setup_vm_final(void)
#ifdef CONFIG_64BIT
/* Map the kernel */
- create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
+ create_kernel_page_table(swapper_pg_dir, false);
#endif
/* Clear fixmap PTE and PMD mappings */
@@ -788,38 +805,22 @@ static void __init reserve_crashkernel(void)
crash_size = PAGE_ALIGN(crash_size);
- if (crash_base == 0) {
- /*
- * Current riscv boot protocol requires 2MB alignment for
- * RV64 and 4MB alignment for RV32 (hugepage size)
- */
- crash_base = memblock_find_in_range(search_start, search_end,
- crash_size, PMD_SIZE);
-
- if (crash_base == 0) {
- pr_warn("crashkernel: couldn't allocate %lldKB\n",
- crash_size >> 10);
- return;
- }
- } else {
- /* User specifies base address explicitly. */
- if (!memblock_is_region_memory(crash_base, crash_size)) {
- pr_warn("crashkernel: requested region is not memory\n");
- return;
- }
-
- if (memblock_is_region_reserved(crash_base, crash_size)) {
- pr_warn("crashkernel: requested region is reserved\n");
- return;
- }
-
+ if (crash_base) {
+ search_start = crash_base;
+ search_end = crash_base + crash_size;
+ }
- if (!IS_ALIGNED(crash_base, PMD_SIZE)) {
- pr_warn("crashkernel: requested region is misaligned\n");
- return;
- }
+ /*
+ * Current riscv boot protocol requires 2MB alignment for
+ * RV64 and 4MB alignment for RV32 (hugepage size)
+ */
+ crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+ search_start, search_end);
+ if (crash_base == 0) {
+ pr_warn("crashkernel: couldn't allocate %lldKB\n",
+ crash_size >> 10);
+ return;
}
- memblock_reserve(crash_base, crash_size);
pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
@@ -829,26 +830,6 @@ static void __init reserve_crashkernel(void)
}
#endif /* CONFIG_KEXEC_CORE */
-#ifdef CONFIG_CRASH_DUMP
-/*
- * We keep track of the ELF core header of the crashed
- * kernel with a reserved-memory region with compatible
- * string "linux,elfcorehdr". Here we register a callback
- * to populate elfcorehdr_addr/size when this region is
- * present. Note that this region will be marked as
- * reserved once we call early_init_fdt_scan_reserved_mem()
- * later on.
- */
-static int __init elfcore_hdr_setup(struct reserved_mem *rmem)
-{
- elfcorehdr_addr = rmem->base;
- elfcorehdr_size = rmem->size;
- return 0;
-}
-
-RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup);
-#endif
-
void __init paging_init(void)
{
setup_bootmem();
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index 81de865f4c7c..e6497424cbf6 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
return -1;
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_W:
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 87e3bf5b9086..3af4131c22c7 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -939,6 +939,10 @@ out_be:
emit_ld(rd, 0, RV_REG_T1, ctx);
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
emit_imm(RV_REG_T1, imm, ctx);