summaryrefslogtreecommitdiff
path: root/arch/riscv/include
diff options
context:
space:
mode:
authorCharlie Jenkins <charlie@rivosinc.com>2024-03-13 02:53:41 +0300
committerPalmer Dabbelt <palmer@rivosinc.com>2024-04-18 18:10:58 +0300
commit6b9391b581fddd8579239dad4de4f0393149e10a (patch)
treecc60083c637b481c0df1f5e8e426b05cf25b0534 /arch/riscv/include
parentbebc345413f5fb4c8fafb59ff0bd8509197627e6 (diff)
downloadlinux-6b9391b581fddd8579239dad4de4f0393149e10a.tar.xz
riscv: Include riscv_set_icache_flush_ctx prctl
Support new prctl with key PR_RISCV_SET_ICACHE_FLUSH_CTX to enable optimization of cross modifying code. This prctl enables userspace code to use icache flushing instructions such as fence.i with the guarantee that the icache will continue to be clean after thread migration. Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Samuel Holland <samuel.holland@sifive.com> Link: https://lore.kernel.org/r/20240312-fencei-v13-2-4b6bdc2bbf32@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch/riscv/include')
-rw-r--r--arch/riscv/include/asm/mmu.h2
-rw-r--r--arch/riscv/include/asm/processor.h10
-rw-r--r--arch/riscv/include/asm/switch_to.h23
3 files changed, 35 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 355504b37f8e..60be458e94da 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -19,6 +19,8 @@ typedef struct {
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
+ /* Force local icache flush on all migrations. */
+ bool force_icache_flush;
#endif
#ifdef CONFIG_BINFMT_ELF_FDPIC
unsigned long exec_fdpic_loadmap;
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index a8509cc31ab2..cca62013c3c0 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -69,6 +69,7 @@
#endif
#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
struct task_struct;
struct pt_regs;
@@ -123,6 +124,12 @@ struct thread_struct {
struct __riscv_v_ext_state vstate;
unsigned long align_ctl;
struct __riscv_v_ext_state kernel_vstate;
+#ifdef CONFIG_SMP
+ /* Flush the icache on migration */
+ bool force_icache_flush;
+ /* A forced icache flush is not needed if migrating to the previous cpu. */
+ unsigned int prev_cpu;
+#endif
};
/* Whitelist the fstate from the task_struct for hardened usercopy */
@@ -184,6 +191,9 @@ extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr))
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
+#define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2)
+extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 7efdb0584d47..7594df37cc9f 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -8,6 +8,7 @@
#include <linux/jump_label.h>
#include <linux/sched/task_stack.h>
+#include <linux/mm_types.h>
#include <asm/vector.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
@@ -72,14 +73,36 @@ static __always_inline bool has_fpu(void) { return false; }
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
+static inline bool switch_to_should_flush_icache(struct task_struct *task)
+{
+#ifdef CONFIG_SMP
+ bool stale_mm = task->mm && task->mm->context.force_icache_flush;
+ bool stale_thread = task->thread.force_icache_flush;
+ bool thread_migrated = smp_processor_id() != task->thread.prev_cpu;
+
+ return thread_migrated && (stale_mm || stale_thread);
+#else
+ return false;
+#endif
+}
+
+#ifdef CONFIG_SMP
+#define __set_prev_cpu(thread) ((thread).prev_cpu = smp_processor_id())
+#else
+#define __set_prev_cpu(thread)
+#endif
+
#define switch_to(prev, next, last) \
do { \
struct task_struct *__prev = (prev); \
struct task_struct *__next = (next); \
+ __set_prev_cpu(__prev->thread); \
if (has_fpu()) \
__switch_to_fpu(__prev, __next); \
if (has_vector()) \
__switch_to_vector(__prev, __next); \
+ if (switch_to_should_flush_icache(__next)) \
+ local_flush_icache_all(); \
((last) = __switch_to(__prev, __next)); \
} while (0)