summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/include/asm/efi.h4
-rw-r--r--arch/riscv/kernel/stacktrace.c6
-rw-r--r--arch/riscv/lib/uaccess.S27
-rw-r--r--arch/riscv/mm/init.c32
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c4
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c4
6 files changed, 56 insertions, 21 deletions
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 6d98cd999680..7b3483ba2e84 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
-/* Load initrd at enough distance from DRAM start */
+/* Load initrd anywhere in system RAM */
static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
- return image_addr + SZ_256M;
+ return ULONG_MAX;
}
#define alloc_screen_info(x...) (&screen_info)
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index ff467b98c3e3..ac7593607fa6 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && !task_is_running(task)))
+ if (likely(task && task != current && !task_is_running(task))) {
+ if (!try_get_task_stack(task))
+ return 0;
walk_stackframe(task, NULL, save_wchan, &pc);
+ put_task_stack(task);
+ }
return pc;
}
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index bceb0629e440..63bc691cff91 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -30,23 +30,23 @@ ENTRY(__asm_copy_from_user)
* t0 - end of uncopied dst
*/
add t0, a0, a2
- bgtu a0, t0, 5f
/*
* Use byte copy only if too small.
+ * SZREG holds 4 for RV32 and 8 for RV64
*/
- li a3, 8*SZREG /* size must be larger than size in word_copy */
+ li a3, 9*SZREG /* size must be larger than size in word_copy */
bltu a2, a3, .Lbyte_copy_tail
/*
- * Copy first bytes until dst is align to word boundary.
+ * Copy first bytes until dst is aligned to word boundary.
* a0 - start of dst
* t1 - start of aligned dst
*/
addi t1, a0, SZREG-1
andi t1, t1, ~(SZREG-1)
/* dst is already aligned, skip */
- beq a0, t1, .Lskip_first_bytes
+ beq a0, t1, .Lskip_align_dst
1:
/* a5 - one byte for copying data */
fixup lb a5, 0(a1), 10f
@@ -55,7 +55,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */
bltu a0, t1, 1b /* t1 - start of aligned dst */
-.Lskip_first_bytes:
+.Lskip_align_dst:
/*
* Now dst is aligned.
* Use shift-copy if src is misaligned.
@@ -72,10 +72,9 @@ ENTRY(__asm_copy_from_user)
*
* a0 - start of aligned dst
* a1 - start of aligned src
- * a3 - a1 & mask:(SZREG-1)
* t0 - end of aligned dst
*/
- addi t0, t0, -(8*SZREG-1) /* not to over run */
+ addi t0, t0, -(8*SZREG) /* not to over run */
2:
fixup REG_L a4, 0(a1), 10f
fixup REG_L a5, SZREG(a1), 10f
@@ -97,7 +96,7 @@ ENTRY(__asm_copy_from_user)
addi a1, a1, 8*SZREG
bltu a0, t0, 2b
- addi t0, t0, 8*SZREG-1 /* revert to original value */
+ addi t0, t0, 8*SZREG /* revert to original value */
j .Lbyte_copy_tail
.Lshift_copy:
@@ -107,7 +106,7 @@ ENTRY(__asm_copy_from_user)
* For misaligned copy we still perform aligned word copy, but
* we need to use the value fetched from the previous iteration and
* do some shifts.
- * This is safe because reading less than a word size.
+ * This is safe because reading is less than a word size.
*
* a0 - start of aligned dst
* a1 - start of src
@@ -117,7 +116,7 @@ ENTRY(__asm_copy_from_user)
*/
/* calculating aligned word boundary for dst */
andi t1, t0, ~(SZREG-1)
- /* Converting unaligned src to aligned arc */
+ /* Converting unaligned src to aligned src */
andi a1, a1, ~(SZREG-1)
/*
@@ -125,11 +124,11 @@ ENTRY(__asm_copy_from_user)
* t3 - prev shift
* t4 - current shift
*/
- slli t3, a3, LGREG
+ slli t3, a3, 3 /* converting bytes in a3 to bits */
li a5, SZREG*8
sub t4, a5, t3
- /* Load the first word to combine with seceond word */
+ /* Load the first word to combine with second word */
fixup REG_L a5, 0(a1), 10f
3:
@@ -161,7 +160,7 @@ ENTRY(__asm_copy_from_user)
* a1 - start of remaining src
* t0 - end of remaining dst
*/
- bgeu a0, t0, 5f
+ bgeu a0, t0, .Lout_copy_user /* check if end of copy */
4:
fixup lb a5, 0(a1), 10f
addi a1, a1, 1 /* src */
@@ -169,7 +168,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */
bltu a0, t0, 4b /* t0 - end of dst */
-5:
+.Lout_copy_user:
/* Disable access to user memory */
csrc CSR_STATUS, t6
li a0, 0
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 269fc648ef3d..a14bf3910eec 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -127,10 +127,17 @@ void __init mem_init(void)
}
/*
- * The default maximal physical memory size is -PAGE_OFFSET,
- * limit the memory size via mem.
+ * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
+ * whereas for 64-bit kernel, the end of the virtual address space is occupied
+ * by the modules/BPF/kernel mappings which reduces the available size of the
+ * linear mapping.
+ * Limit the memory size via mem.
*/
+#ifdef CONFIG_64BIT
+static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
+#else
static phys_addr_t memory_limit = -PAGE_OFFSET;
+#endif
static int __init early_mem(char *p)
{
@@ -152,7 +159,7 @@ static void __init setup_bootmem(void)
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
- phys_addr_t max_mapped_addr = __pa(~(ulong)0);
+ phys_addr_t __maybe_unused max_mapped_addr;
phys_addr_t dram_end;
#ifdef CONFIG_XIP_KERNEL
@@ -175,14 +182,21 @@ static void __init setup_bootmem(void)
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
dram_end = memblock_end_of_DRAM();
+
+#ifndef CONFIG_64BIT
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
* macro. Make sure that last 4k bytes are not usable by memblock
- * if end of dram is equal to maximum addressable memory.
+ * if end of dram is equal to maximum addressable memory. For 64-bit
+ * kernel, this problem can't happen here as the end of the virtual
+ * address space is occupied by the kernel mapping then this check must
+ * be done in create_kernel_page_table.
*/
+ max_mapped_addr = __pa(~(ulong)0);
if (max_mapped_addr == (dram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
+#endif
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
max_low_pfn = max_pfn = PFN_DOWN(dram_end);
@@ -570,6 +584,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((kernel_map.phys_addr % map_size) != 0);
+#ifdef CONFIG_64BIT
+ /*
+ * The last 4K bytes of the addressable memory can not be mapped because
+ * of IS_ERR_VALUE macro.
+ */
+ BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
+#endif
+
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
@@ -709,6 +731,8 @@ static void __init setup_vm_final(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
+ if (end >= __pa(PAGE_OFFSET) + memory_limit)
+ end = __pa(PAGE_OFFSET) + memory_limit;
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index 81de865f4c7c..e6497424cbf6 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
return -1;
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_W:
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 87e3bf5b9086..3af4131c22c7 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -939,6 +939,10 @@ out_be:
emit_ld(rd, 0, RV_REG_T1, ctx);
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
emit_imm(RV_REG_T1, imm, ctx);