diff options
Diffstat (limited to 'arch/riscv/include')
63 files changed, 1237 insertions, 790 deletions
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 504f8b7e72d4..1461af12da6e 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -1,7 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += syscall_table_32.h +syscall-y += syscall_table_64.h + generic-y += early_ioremap.h generic-y += flat.h generic-y += kvm_para.h +generic-y += mmzone.h generic-y += parport.h generic-y += spinlock.h generic-y += spinlock_types.h diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h index 7dad0cf9d701..6e13695120bc 100644 --- a/arch/riscv/include/asm/acpi.h +++ b/arch/riscv/include/asm/acpi.h @@ -61,11 +61,14 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { } void acpi_init_rintc_map(void); struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu); -u32 get_acpi_id_for_cpu(int cpu); +static inline u32 get_acpi_id_for_cpu(int cpu) +{ + return acpi_cpu_get_madt_rintc(cpu)->uid; +} + int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const char **isa); -static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size, u32 *cboz_size, u32 *cbop_size); #else @@ -87,4 +90,10 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table, #endif /* CONFIG_ACPI */ +#ifdef CONFIG_ACPI_NUMA +void acpi_map_cpus_to_nodes(void); +#else +static inline void acpi_map_cpus_to_nodes(void) { } +#endif /* CONFIG_ACPI_NUMA */ + #endif /*_ASM_ACPI_H*/ diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h index 85b2c443823e..613769b9cdc9 100644 --- a/arch/riscv/include/asm/arch_hweight.h +++ b/arch/riscv/include/asm/arch_hweight.h @@ -26,9 +26,9 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w) asm (".option push\n" ".option arch,+zbb\n" - CPOPW "%0, %0\n" + CPOPW "%0, %1\n" ".option pop\n" - : "+r" (w) : :); + : "=r" (w) : "r" (w) :); return w; @@ -57,9 +57,9 @@ static __always_inline unsigned long __arch_hweight64(__u64 w) asm (".option push\n" ".option arch,+zbb\n" - "cpop %0, %0\n" + "cpop %0, %1\n" ".option pop\n" - : "+r" (w) : :); + : "=r" (w) : "r" (w) :); return w; diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 0e0522e588ca..5b96c2f61adb 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -195,22 +195,28 @@ ATOMIC_OPS(xor, xor, i) #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \ +({ \ + __asm__ __volatile__ ( \ + "0: lr." sfx " %[p], %[c]\n" \ + " beq %[p], %[u], 1f\n" \ + " add %[rc], %[p], %[a]\n" \ + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \ + " bnez %[rc], 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \ + : [a]"r" (_a), [u]"r" (_u) \ + : "memory"); \ +}) + /* This is required to provide a full barrier on success. */ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int prev, rc; - __asm__ __volatile__ ( - "0: lr.w %[p], %[c]\n" - " beq %[p], %[u], 1f\n" - " add %[rc], %[p], %[a]\n" - " sc.w.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - RISCV_FULL_BARRIER - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : [a]"r" (a), [u]"r" (u) - : "memory"); + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w"); + return prev; } #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless @@ -221,77 +227,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 prev; long rc; - __asm__ __volatile__ ( - "0: lr.d %[p], %[c]\n" - " beq %[p], %[u], 1f\n" - " add %[rc], %[p], %[a]\n" - " sc.d.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - RISCV_FULL_BARRIER - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : [a]"r" (a), [u]"r" (u) - : "memory"); + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d"); + return prev; } #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \ +({ \ + __asm__ __volatile__ ( \ + "0: lr." sfx " %[p], %[c]\n" \ + " bltz %[p], 1f\n" \ + " addi %[rc], %[p], 1\n" \ + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \ + " bnez %[rc], 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \ + : \ + : "memory"); \ +}) + static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) { int prev, rc; - __asm__ __volatile__ ( - "0: lr.w %[p], %[c]\n" - " bltz %[p], 1f\n" - " addi %[rc], %[p], 1\n" - " sc.w.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - RISCV_FULL_BARRIER - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w"); + return !(prev < 0); } #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \ +({ \ + __asm__ __volatile__ ( \ + "0: lr." sfx " %[p], %[c]\n" \ + " bgtz %[p], 1f\n" \ + " addi %[rc], %[p], -1\n" \ + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \ + " bnez %[rc], 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \ + : \ + : "memory"); \ +}) + static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v) { int prev, rc; - __asm__ __volatile__ ( - "0: lr.w %[p], %[c]\n" - " bgtz %[p], 1f\n" - " addi %[rc], %[p], -1\n" - " sc.w.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - RISCV_FULL_BARRIER - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w"); + return !(prev > 0); } #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \ +({ \ + __asm__ __volatile__ ( \ + "0: lr." sfx " %[p], %[c]\n" \ + " addi %[rc], %[p], -1\n" \ + " bltz %[rc], 1f\n" \ + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \ + " bnez %[rc], 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \ + : \ + : "memory"); \ +}) + static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) { int prev, rc; - __asm__ __volatile__ ( - "0: lr.w %[p], %[c]\n" - " addi %[rc], %[p], -1\n" - " bltz %[rc], 1f\n" - " sc.w.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - RISCV_FULL_BARRIER - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_dec_if_positive(prev, rc, v->counter, "w"); + return prev - 1; } @@ -303,17 +318,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v) s64 prev; long rc; - __asm__ __volatile__ ( - "0: lr.d %[p], %[c]\n" - " bltz %[p], 1f\n" - " addi %[rc], %[p], 1\n" - " sc.d.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - RISCV_FULL_BARRIER - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d"); + return !(prev < 0); } @@ -324,17 +330,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v) s64 prev; long rc; - __asm__ __volatile__ ( - "0: lr.d %[p], %[c]\n" - " bgtz %[p], 1f\n" - " addi %[rc], %[p], -1\n" - " sc.d.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - RISCV_FULL_BARRIER - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d"); + return !(prev > 0); } @@ -345,17 +342,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) s64 prev; long rc; - __asm__ __volatile__ ( - "0: lr.d %[p], %[c]\n" - " addi %[rc], %[p], -1\n" - " bltz %[rc], 1f\n" - " sc.d.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - RISCV_FULL_BARRIER - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_dec_if_positive(prev, rc, v->counter, "d"); + return prev - 1; } diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 880b56d8480d..e1d9bf1deca6 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -11,6 +11,7 @@ #define _ASM_RISCV_BARRIER_H #ifndef __ASSEMBLY__ +#include <asm/cmpxchg.h> #include <asm/fence.h> #define nop() __asm__ __volatile__ ("nop") @@ -28,21 +29,6 @@ #define __smp_rmb() RISCV_FENCE(r, r) #define __smp_wmb() RISCV_FENCE(w, w) -#define __smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(rw, w); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define __smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(r, rw); \ - ___p1; \ -}) - /* * This is a very specific barrier: it's currently only used in two places in * the kernel, both in the scheduler. See include/linux/spinlock.h for the two @@ -70,6 +56,35 @@ do { \ */ #define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw) +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(rw, w); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(r, rw); \ + ___p1; \ +}) + +#ifdef CONFIG_RISCV_ISA_ZAWRS +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + __cmpwait_relaxed(ptr, VAL); \ + } \ + (typeof(*ptr))VAL; \ +}) +#endif + #include <asm-generic/barrier.h> #endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 880606b0469a..fae152ea0508 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -170,7 +170,7 @@ legacy: ({ \ typeof(x) x_ = (x); \ __builtin_constant_p(x_) ? \ - (int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \ + ((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \ : \ variable_fls(x_); \ }) @@ -222,44 +222,44 @@ legacy: #define __NOT(x) (~(x)) /** - * test_and_set_bit - Set a bit and return its old value + * arch_test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation may be reordered on other architectures than x86. */ -static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr) { return __test_and_op_bit(or, __NOP, nr, addr); } /** - * test_and_clear_bit - Clear a bit and return its old value + * arch_test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation can be reordered on other architectures other than x86. */ -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr) { return __test_and_op_bit(and, __NOT, nr, addr); } /** - * test_and_change_bit - Change a bit and return its old value + * arch_test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr) { return __test_and_op_bit(xor, __NOP, nr, addr); } /** - * set_bit - Atomically set a bit in memory + * arch_set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -270,13 +270,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void set_bit(int nr, volatile unsigned long *addr) +static inline void arch_set_bit(int nr, volatile unsigned long *addr) { __op_bit(or, __NOP, nr, addr); } /** - * clear_bit - Clears a bit in memory + * arch_clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * @@ -284,13 +284,13 @@ static inline void set_bit(int nr, volatile unsigned long *addr) * on non x86 architectures, so if you are writing portable code, * make sure not to rely on its reordering guarantees. */ -static inline void clear_bit(int nr, volatile unsigned long *addr) +static inline void arch_clear_bit(int nr, volatile unsigned long *addr) { __op_bit(and, __NOT, nr, addr); } /** - * change_bit - Toggle a bit in memory + * arch_change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * @@ -298,40 +298,40 @@ static inline void clear_bit(int nr, volatile unsigned long *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void change_bit(int nr, volatile unsigned long *addr) +static inline void arch_change_bit(int nr, volatile unsigned long *addr) { __op_bit(xor, __NOP, nr, addr); } /** - * test_and_set_bit_lock - Set a bit and return its old value, for lock + * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and provides acquire barrier semantics. * It can be used to implement bit locks. */ -static inline int test_and_set_bit_lock( +static inline int arch_test_and_set_bit_lock( unsigned long nr, volatile unsigned long *addr) { return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq); } /** - * clear_bit_unlock - Clear a bit in memory, for unlock + * arch_clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ -static inline void clear_bit_unlock( +static inline void arch_clear_bit_unlock( unsigned long nr, volatile unsigned long *addr) { __op_bit_ord(and, __NOT, nr, addr, .rl); } /** - * __clear_bit_unlock - Clear a bit in memory, for unlock + * arch___clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * @@ -345,13 +345,13 @@ static inline void clear_bit_unlock( * non-atomic property here: it's a lot more instructions and we still have to * provide release semantics anyway. */ -static inline void __clear_bit_unlock( +static inline void arch___clear_bit_unlock( unsigned long nr, volatile unsigned long *addr) { - clear_bit_unlock(nr, addr); + arch_clear_bit_unlock(nr, addr); } -static inline bool xor_unlock_is_negative_byte(unsigned long mask, +static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, volatile unsigned long *addr) { unsigned long res; @@ -369,6 +369,9 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, #undef __NOT #undef __AMO +#include <asm-generic/bitops/instrumented-atomic.h> +#include <asm-generic/bitops/instrumented-lock.h> + #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic.h> diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h index 2174fe7bac9a..570e9d8acad1 100644 --- a/arch/riscv/include/asm/cache.h +++ b/arch/riscv/include/asm/cache.h @@ -26,8 +26,8 @@ #ifndef __ASSEMBLY__ -#ifdef CONFIG_RISCV_DMA_NONCOHERENT extern int dma_cache_alignment; +#ifdef CONFIG_RISCV_DMA_NONCOHERENT #define dma_get_cache_alignment dma_get_cache_alignment static inline int dma_get_cache_alignment(void) { diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index a129dac4521d..8de73f91bfa3 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -13,6 +13,12 @@ static inline void local_flush_icache_all(void) asm volatile ("fence.i" ::: "memory"); } +static inline void local_flush_icache_range(unsigned long start, + unsigned long end) +{ + local_flush_icache_all(); +} + #define PG_dcache_clean PG_arch_1 static inline void flush_dcache_folio(struct folio *folio) @@ -33,11 +39,30 @@ static inline void flush_dcache_page(struct page *page) * so instead we just flush the whole thing. */ #define flush_icache_range(start, end) flush_icache_all() -#define flush_icache_user_page(vma, pg, addr, len) \ - flush_icache_mm(vma->vm_mm, 0) +#define flush_icache_user_page(vma, pg, addr, len) \ +do { \ + if (vma->vm_flags & VM_EXEC) \ + flush_icache_mm(vma->vm_mm, 0); \ +} while (0) #ifdef CONFIG_64BIT -#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) +extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1]; +extern char _end[]; +#define flush_cache_vmap flush_cache_vmap +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ + if (is_vmalloc_or_module_addr((void *)start)) { + int i; + + /* + * We don't care if concurrently a cpu resets this value since + * the only place this can happen is in handle_exception() where + * an sfence.vma is emitted. + */ + for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i) + new_vmalloc[i] = -1ULL; + } +} #define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end) #endif diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 2fee65cc8443..ebbce134917c 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -8,142 +8,86 @@ #include <linux/bug.h> +#include <asm/alternative-macros.h> #include <asm/fence.h> +#include <asm/hwcap.h> +#include <asm/insn-def.h> -#define __xchg_relaxed(ptr, new, size) \ +#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ ({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - " amoswap.w %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - " amoswap.d %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z4\n" \ + " or %1, %1, %z3\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + append \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" (__newx), "rJ" (~__mask) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ }) -#define arch_xchg_relaxed(ptr, x) \ +#define __arch_xchg(sfx, prepend, append, r, p, n) \ ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_relaxed((ptr), \ - _x_, sizeof(*(ptr))); \ + __asm__ __volatile__ ( \ + prepend \ + " amoswap" sfx " %0, %2, %1\n" \ + append \ + : "=r" (r), "+A" (*(p)) \ + : "r" (n) \ + : "memory"); \ }) -#define __xchg_acquire(ptr, new, size) \ +#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \ + sc_append, swap_append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ + __typeof__(*(__ptr)) __new = (new); \ + __typeof__(*(__ptr)) __ret; \ + \ + switch (sizeof(*__ptr)) { \ + case 1: \ + case 2: \ + __arch_xchg_masked(sc_sfx, prepend, sc_append, \ + __ret, __ptr, __new); \ + break; \ case 4: \ - __asm__ __volatile__ ( \ - " amoswap.w %0, %2, %1\n" \ - RISCV_ACQUIRE_BARRIER \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ + __arch_xchg(".w" swap_sfx, prepend, swap_append, \ + __ret, __ptr, __new); \ break; \ case 8: \ - __asm__ __volatile__ ( \ - " amoswap.d %0, %2, %1\n" \ - RISCV_ACQUIRE_BARRIER \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ + __arch_xchg(".d" swap_sfx, prepend, swap_append, \ + __ret, __ptr, __new); \ break; \ default: \ BUILD_BUG(); \ } \ - __ret; \ + (__typeof__(*(__ptr)))__ret; \ }) -#define arch_xchg_acquire(ptr, x) \ -({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_acquire((ptr), \ - _x_, sizeof(*(ptr))); \ -}) +#define arch_xchg_relaxed(ptr, x) \ + _arch_xchg(ptr, x, "", "", "", "", "") -#define __xchg_release(ptr, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - " amoswap.w %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - " amoswap.d %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ -}) +#define arch_xchg_acquire(ptr, x) \ + _arch_xchg(ptr, x, "", "", "", \ + RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER) #define arch_xchg_release(ptr, x) \ -({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_release((ptr), \ - _x_, sizeof(*(ptr))); \ -}) - -#define __arch_xchg(ptr, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - " amoswap.w.aqrl %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - " amoswap.d.aqrl %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ -}) + _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "") #define arch_xchg(ptr, x) \ -({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \ -}) + _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "") #define xchg32(ptr, x) \ ({ \ @@ -162,190 +106,95 @@ * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ -#define __cmpxchg_relaxed(ptr, old, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - register unsigned int __rc; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - "0: lr.w %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.w %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - "0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ - : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ -}) -#define arch_cmpxchg_relaxed(ptr, o, n) \ +#define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n) \ ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __oldx = (ulong)(o) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z5\n" \ + " bne %1, %z3, 1f\n" \ + " and %1, %0, %z6\n" \ + " or %1, %1, %z4\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + append \ + "1:\n" \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" ((long)__oldx), "rJ" (__newx), \ + "rJ" (__mask), "rJ" (~__mask) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ }) -#define __cmpxchg_acquire(ptr, old, new, size) \ +#define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n) \ ({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __ret; \ register unsigned int __rc; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - "0: lr.w %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.w %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - RISCV_ACQUIRE_BARRIER \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - "0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - RISCV_ACQUIRE_BARRIER \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ - : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ -}) - -#define arch_cmpxchg_acquire(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr" lr_sfx " %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc" sc_sfx " %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + append \ + "1:\n" \ + : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ + : "rJ" (co o), "rJ" (n) \ + : "memory"); \ }) -#define __cmpxchg_release(ptr, old, new, size) \ +#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - register unsigned int __rc; \ - switch (size) { \ + __typeof__(*(__ptr)) __old = (old); \ + __typeof__(*(__ptr)) __new = (new); \ + __typeof__(*(__ptr)) __ret; \ + \ + switch (sizeof(*__ptr)) { \ + case 1: \ + case 2: \ + __arch_cmpxchg_masked(sc_sfx, prepend, append, \ + __ret, __ptr, __old, __new); \ + break; \ case 4: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - "0: lr.w %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.w %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ - : "memory"); \ + __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \ + __ret, __ptr, (long), __old, __new); \ break; \ case 8: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - "0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ - : "memory"); \ + __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \ + __ret, __ptr, /**/, __old, __new); \ break; \ default: \ BUILD_BUG(); \ } \ - __ret; \ + (__typeof__(*(__ptr)))__ret; \ }) -#define arch_cmpxchg_release(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_release((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ -}) +#define arch_cmpxchg_relaxed(ptr, o, n) \ + _arch_cmpxchg((ptr), (o), (n), "", "", "") -#define __cmpxchg(ptr, old, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - register unsigned int __rc; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - "0: lr.w %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.w.rl %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - RISCV_FULL_BARRIER \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - "0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d.rl %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - RISCV_FULL_BARRIER \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ - : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ -}) +#define arch_cmpxchg_acquire(ptr, o, n) \ + _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER) + +#define arch_cmpxchg_release(ptr, o, n) \ + _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "") #define arch_cmpxchg(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ -}) + _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n") #define arch_cmpxchg_local(ptr, o, n) \ - (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) + arch_cmpxchg_relaxed((ptr), (o), (n)) #define arch_cmpxchg64(ptr, o, n) \ ({ \ @@ -359,4 +208,77 @@ arch_cmpxchg_relaxed((ptr), (o), (n)); \ }) +#define arch_cmpxchg64_relaxed(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_relaxed((ptr), (o), (n)); \ +}) + +#define arch_cmpxchg64_acquire(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_acquire((ptr), (o), (n)); \ +}) + +#define arch_cmpxchg64_release(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_release((ptr), (o), (n)); \ +}) + +#ifdef CONFIG_RISCV_ISA_ZAWRS +/* + * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to + * @val we expect it to still terminate within a "reasonable" amount of time + * for an implementation-specific other reason, a pending, locally-enabled + * interrupt, or because it has been configured to raise an illegal + * instruction exception. + */ +static __always_inline void __cmpwait(volatile void *ptr, + unsigned long val, + int size) +{ + unsigned long tmp; + + asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop", + 0, RISCV_ISA_EXT_ZAWRS, 1) + : : : : no_zawrs); + + switch (size) { + case 4: + asm volatile( + " lr.w %0, %1\n" + " xor %0, %0, %2\n" + " bnez %0, 1f\n" + ZAWRS_WRS_NTO "\n" + "1:" + : "=&r" (tmp), "+A" (*(u32 *)ptr) + : "r" (val)); + break; +#if __riscv_xlen == 64 + case 8: + asm volatile( + " lr.d %0, %1\n" + " xor %0, %0, %2\n" + " bnez %0, 1f\n" + ZAWRS_WRS_NTO "\n" + "1:" + : "=&r" (tmp), "+A" (*(u64 *)ptr) + : "r" (val)); + break; +#endif + default: + BUILD_BUG(); + } + + return; + +no_zawrs: + asm volatile(RISCV_PAUSE : : : "memory"); +} + +#define __cmpwait_relaxed(ptr, val) \ + __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) +#endif + #endif /* _ASM_RISCV_CMPXCHG_H */ diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 347805446151..45f9c1171a48 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -33,6 +33,31 @@ extern struct riscv_isainfo hart_isa[NR_CPUS]; void riscv_user_isa_enable(void); +#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \ + .name = #_name, \ + .property = #_name, \ + .id = _id, \ + .subset_ext_ids = _subset_exts, \ + .subset_ext_size = _subset_exts_size, \ + .validate = _validate \ +} + +#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, NULL) + +#define __RISCV_ISA_EXT_DATA_VALIDATE(_name, _id, _validate) \ + _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, _validate) + +/* Used to declare pure "lasso" extension (Zk for instance) */ +#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \ + _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \ + ARRAY_SIZE(_bundled_exts), NULL) + +/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */ +#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \ + _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), NULL) +#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ + _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) + #if defined(CONFIG_RISCV_MISALIGNED) bool check_unaligned_access_emulated_all_cpus(void); void unaligned_emulation_finish(void); @@ -70,6 +95,7 @@ struct riscv_isa_ext_data { const char *property; const unsigned int *subset_ext_ids; const unsigned int subset_ext_size; + int (*validate)(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap); }; extern const struct riscv_isa_ext_data riscv_isa_ext[]; @@ -78,59 +104,66 @@ extern bool riscv_isa_fallback; unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); +#define STANDARD_EXT 0 + bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); #define riscv_isa_extension_available(isa_bitmap, ext) \ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) -static __always_inline bool -riscv_has_extension_likely(const unsigned long ext) +static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor, + const unsigned long ext) { - compiletime_assert(ext < RISCV_ISA_EXT_MAX, - "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { - asm goto( - ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) - : - : [ext] "i" (ext) - : - : l_no); - } else { - if (!__riscv_isa_extension_available(NULL, ext)) - goto l_no; - } + asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1) + : + : [vendor] "i" (vendor), [ext] "i" (ext) + : + : l_no); return true; l_no: return false; } -static __always_inline bool -riscv_has_extension_unlikely(const unsigned long ext) +static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor, + const unsigned long ext) { - compiletime_assert(ext < RISCV_ISA_EXT_MAX, - "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { - asm goto( - ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) - : - : [ext] "i" (ext) - : - : l_yes); - } else { - if (__riscv_isa_extension_available(NULL, ext)) - goto l_yes; - } + asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1) + : + : [vendor] "i" (vendor), [ext] "i" (ext) + : + : l_yes); return false; l_yes: return true; } +static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_unlikely(STANDARD_EXT, ext); + + return __riscv_isa_extension_available(NULL, ext); +} + +static __always_inline bool riscv_has_extension_likely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_likely(STANDARD_EXT, ext); + + return __riscv_isa_extension_available(NULL, ext); +} + static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) { - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext)) + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && + __riscv_has_extension_likely(STANDARD_EXT, ext)) return true; return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); @@ -138,7 +171,10 @@ static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsign static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext) { - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext)) + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && + __riscv_has_extension_unlikely(STANDARD_EXT, ext)) return true; return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 2468c55933cd..25966995da04 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -168,7 +168,8 @@ #define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT) #define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \ (_AC(1, UL) << IRQ_S_TIMER) | \ - (_AC(1, UL) << IRQ_S_EXT)) + (_AC(1, UL) << IRQ_S_EXT) | \ + (_AC(1, UL) << IRQ_PMU_OVF)) /* AIA CSR bits */ #define TOPI_IID_SHIFT 16 @@ -281,7 +282,7 @@ #define CSR_HPMCOUNTER30H 0xc9e #define CSR_HPMCOUNTER31H 0xc9f -#define CSR_SSCOUNTOVF 0xda0 +#define CSR_SCOUNTOVF 0xda0 #define CSR_SSTATUS 0x100 #define CSR_SIE 0x104 diff --git a/arch/riscv/include/asm/dmi.h b/arch/riscv/include/asm/dmi.h new file mode 100644 index 000000000000..ca7cce557ef7 --- /dev/null +++ b/arch/riscv/include/asm/dmi.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Intel Corporation + * + * based on arch/arm64/include/asm/dmi.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef __ASM_DMI_H +#define __ASM_DMI_H + +#include <linux/io.h> +#include <linux/slab.h> + +#define dmi_early_remap(x, l) memremap(x, l, MEMREMAP_WB) +#define dmi_early_unmap(x, l) memunmap(x) +#define dmi_remap(x, l) memremap(x, l, MEMREMAP_WB) +#define dmi_unmap(x) memunmap(x) +#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) + +#endif diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index efd851e1b483..7c8a71a526a3 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -43,11 +43,21 @@ ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \ CONFIG_ERRATA_SIFIVE_CIP_453) #else /* !__ASSEMBLY__ */ -#define ALT_FLUSH_TLB_PAGE(x) \ +#define ALT_SFENCE_VMA_ASID(asid) \ +asm(ALTERNATIVE("sfence.vma x0, %0", "sfence.vma", SIFIVE_VENDOR_ID, \ + ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ + : : "r" (asid) : "memory") + +#define ALT_SFENCE_VMA_ADDR(addr) \ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ : : "r" (addr) : "memory") +#define ALT_SFENCE_VMA_ADDR_ASID(addr, asid) \ +asm(ALTERNATIVE("sfence.vma %0, %1", "sfence.vma", SIFIVE_VENDOR_ID, \ + ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ + : : "r" (addr), "r" (asid) : "memory") + /* * _val is marked as "will be overwritten", so need to set it to 0 * in the default case. diff --git a/arch/riscv/include/asm/exec.h b/arch/riscv/include/asm/exec.h new file mode 100644 index 000000000000..07d9942682e0 --- /dev/null +++ b/arch/riscv/include/asm/exec.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_EXEC_H +#define __ASM_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* __ASM_EXEC_H */ diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h index 6bcd80325dfc..182db7930edc 100644 --- a/arch/riscv/include/asm/fence.h +++ b/arch/riscv/include/asm/fence.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_RISCV_FENCE_H #define _ASM_RISCV_FENCE_H diff --git a/arch/riscv/include/asm/fpu.h b/arch/riscv/include/asm/fpu.h new file mode 100644 index 000000000000..91c04c244e12 --- /dev/null +++ b/arch/riscv/include/asm/fpu.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 SiFive + */ + +#ifndef _ASM_RISCV_FPU_H +#define _ASM_RISCV_FPU_H + +#include <asm/switch_to.h> + +#define kernel_fpu_available() has_fpu() + +void kernel_fpu_begin(void); +void kernel_fpu_end(void); + +#endif /* ! _ASM_RISCV_FPU_H */ diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 1276d7d9ca8b..2cddd79ff21b 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -11,7 +11,6 @@ #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER) #define HAVE_FUNCTION_GRAPH_FP_TEST #endif -#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR #define ARCH_SUPPORTS_FTRACE_OPS 1 #ifndef __ASSEMBLY__ @@ -124,20 +123,82 @@ struct dyn_ftrace; int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS +#define arch_ftrace_get_regs(regs) NULL struct ftrace_ops; -struct ftrace_regs; +struct ftrace_regs { + unsigned long epc; + unsigned long ra; + unsigned long sp; + unsigned long s0; + unsigned long t1; + union { + unsigned long args[8]; + struct { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + }; + }; +}; + +static __always_inline unsigned long ftrace_regs_get_instruction_pointer(const struct ftrace_regs + *fregs) +{ + return fregs->epc; +} + +static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, + unsigned long pc) +{ + fregs->epc = pc; +} + +static __always_inline unsigned long ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs) +{ + return fregs->sp; +} + +static __always_inline unsigned long ftrace_regs_get_argument(struct ftrace_regs *fregs, + unsigned int n) +{ + if (n < 8) + return fregs->args[n]; + return 0; +} + +static __always_inline unsigned long ftrace_regs_get_return_value(const struct ftrace_regs *fregs) +{ + return fregs->a0; +} + +static __always_inline void ftrace_regs_set_return_value(struct ftrace_regs *fregs, + unsigned long ret) +{ + fregs->a0 = ret; +} + +static __always_inline void ftrace_override_function_with_return(struct ftrace_regs *fregs) +{ + fregs->epc = fregs->ra; +} + +int ftrace_regs_query_register_offset(const char *name); + void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs); #define ftrace_graph_func ftrace_graph_func -static inline void __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) +static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr) { - regs->t1 = addr; + fregs->t1 = addr; } -#define arch_ftrace_set_direct_caller(fregs, addr) \ - __arch_ftrace_set_direct_caller(&(fregs)->regs, addr) -#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ #endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index 22deb7a2a6ec..faf3624d8057 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -5,11 +5,11 @@ #include <asm/cacheflush.h> #include <asm/page.h> -static inline void arch_clear_hugepage_flags(struct page *page) +static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_dcache_clean, &page->flags); + clear_bit(PG_dcache_clean, &folio->flags); } -#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION bool arch_hugetlb_migration_supported(struct hstate *h); @@ -44,7 +44,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t pte, int dirty); #define __HAVE_ARCH_HUGE_PTEP_GET -pte_t huge_ptep_get(pte_t *ptep); +pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep); pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags); #define arch_make_huge_pte arch_make_huge_pte diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index e17d0078a651..46d9de54179e 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -80,7 +80,19 @@ #define RISCV_ISA_EXT_ZFA 71 #define RISCV_ISA_EXT_ZTSO 72 #define RISCV_ISA_EXT_ZACAS 73 -#define RISCV_ISA_EXT_XANDESPMU 74 +#define RISCV_ISA_EXT_ZVE32X 74 +#define RISCV_ISA_EXT_ZVE32F 75 +#define RISCV_ISA_EXT_ZVE64X 76 +#define RISCV_ISA_EXT_ZVE64F 77 +#define RISCV_ISA_EXT_ZVE64D 78 +#define RISCV_ISA_EXT_ZIMOP 79 +#define RISCV_ISA_EXT_ZCA 80 +#define RISCV_ISA_EXT_ZCB 81 +#define RISCV_ISA_EXT_ZCD 82 +#define RISCV_ISA_EXT_ZCF 83 +#define RISCV_ISA_EXT_ZCMOP 84 +#define RISCV_ISA_EXT_ZAWRS 85 +#define RISCV_ISA_EXT_SVVPTC 86 #define RISCV_ISA_EXT_XLINUXENVCFG 127 diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index 630507dff5ea..ffb9484531af 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -8,7 +8,7 @@ #include <uapi/asm/hwprobe.h> -#define RISCV_HWPROBE_MAX_KEY 6 +#define RISCV_HWPROBE_MAX_KEY 9 static inline bool riscv_hwprobe_key_is_valid(__s64 key) { diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index e27179b26086..9a913010cdd9 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -196,4 +196,8 @@ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(4)) +#define RISCV_PAUSE ".4byte 0x100000f" +#define ZAWRS_WRS_NTO ".4byte 0x00d00073" +#define ZAWRS_WRS_STO ".4byte 0x01d00073" + #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h index 06e439eeef9a..09fde95a5e8f 100644 --- a/arch/riscv/include/asm/insn.h +++ b/arch/riscv/include/asm/insn.h @@ -145,7 +145,7 @@ /* parts of opcode for RVF, RVD and RVQ */ #define RVFDQ_FL_FS_WIDTH_OFF 12 -#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(3, 0) +#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(2, 0) #define RVFDQ_FL_FS_WIDTH_W 2 #define RVFDQ_FL_FS_WIDTH_D 3 #define RVFDQ_LS_FS_WIDTH_Q 4 diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h index 8e10a94430a2..7b038f3b7cb0 100644 --- a/arch/riscv/include/asm/irq.h +++ b/arch/riscv/include/asm/irq.h @@ -12,8 +12,68 @@ #include <asm-generic/irq.h> +#define INVALID_CONTEXT UINT_MAX + +#ifdef CONFIG_SMP +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace +#endif + void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)); struct fwnode_handle *riscv_get_intc_hwnode(void); +#ifdef CONFIG_ACPI + +enum riscv_irqchip_type { + ACPI_RISCV_IRQCHIP_INTC = 0x00, + ACPI_RISCV_IRQCHIP_IMSIC = 0x01, + ACPI_RISCV_IRQCHIP_PLIC = 0x02, + ACPI_RISCV_IRQCHIP_APLIC = 0x03, +}; + +int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, + u32 *id, u32 *nr_irqs, u32 *nr_idcs); +struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi); +unsigned long acpi_rintc_index_to_hartid(u32 index); +unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx); +unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id); +unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx); +int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res); + +#else +static inline int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, + u32 *id, u32 *nr_irqs, u32 *nr_idcs) +{ + return 0; +} + +static inline unsigned long acpi_rintc_index_to_hartid(u32 index) +{ + return INVALID_HARTID; +} + +static inline unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, + unsigned int ctxt_idx) +{ + return INVALID_HARTID; +} + +static inline unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) +{ + return INVALID_CONTEXT; +} + +static inline unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) +{ + return INVALID_CONTEXT; +} + +static inline int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) +{ + return 0; +} + +#endif /* CONFIG_ACPI */ + #endif /* _ASM_RISCV_IRQ_H */ diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h index 08d4d6a5b7e9..6fd8cbfcfcc7 100644 --- a/arch/riscv/include/asm/irqflags.h +++ b/arch/riscv/include/asm/irqflags.h @@ -7,7 +7,6 @@ #ifndef _ASM_RISCV_IRQFLAGS_H #define _ASM_RISCV_IRQFLAGS_H -#include <asm/processor.h> #include <asm/csr.h> /* read interrupt enabled status */ diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h index 4a35d787c019..1c768d02bd0c 100644 --- a/arch/riscv/include/asm/jump_label.h +++ b/arch/riscv/include/asm/jump_label.h @@ -12,6 +12,8 @@ #include <linux/types.h> #include <asm/asm.h> +#define HAVE_JUMP_LABEL_BATCH + #define JUMP_LABEL_NOP_SIZE 4 static __always_inline bool arch_static_branch(struct static_key * const key, @@ -44,7 +46,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke " .option push \n\t" " .option norelax \n\t" " .option norvc \n\t" - "1: jal zero, %l[label] \n\t" + "1: j %l[label] \n\t" " .option pop \n\t" " .pushsection __jump_table, \"aw\" \n\t" " .align " RISCV_LGPTR " \n\t" diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h index 0b85e363e778..e6a0071bdb56 100644 --- a/arch/riscv/include/asm/kasan.h +++ b/arch/riscv/include/asm/kasan.h @@ -6,8 +6,6 @@ #ifndef __ASSEMBLY__ -#ifdef CONFIG_KASAN - /* * The following comment was copied from arm64: * KASAN_SHADOW_START: beginning of the kernel virtual addresses. @@ -34,6 +32,8 @@ */ #define KASAN_SHADOW_START ((KASAN_SHADOW_END - KASAN_SHADOW_SIZE) & PGDIR_MASK) #define KASAN_SHADOW_END MODULES_LOWEST_VADDR + +#ifdef CONFIG_KASAN #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) void kasan_init(void); diff --git a/arch/riscv/include/asm/kvm_aia_aplic.h b/arch/riscv/include/asm/kvm_aia_aplic.h deleted file mode 100644 index 6dd1a4809ec1..000000000000 --- a/arch/riscv/include/asm/kvm_aia_aplic.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2021 Western Digital Corporation or its affiliates. - * Copyright (C) 2022 Ventana Micro Systems Inc. - */ -#ifndef __KVM_RISCV_AIA_IMSIC_H -#define __KVM_RISCV_AIA_IMSIC_H - -#include <linux/bitops.h> - -#define APLIC_MAX_IDC BIT(14) -#define APLIC_MAX_SOURCE 1024 - -#define APLIC_DOMAINCFG 0x0000 -#define APLIC_DOMAINCFG_RDONLY 0x80000000 -#define APLIC_DOMAINCFG_IE BIT(8) -#define APLIC_DOMAINCFG_DM BIT(2) -#define APLIC_DOMAINCFG_BE BIT(0) - -#define APLIC_SOURCECFG_BASE 0x0004 -#define APLIC_SOURCECFG_D BIT(10) -#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff -#define APLIC_SOURCECFG_SM_MASK 0x00000007 -#define APLIC_SOURCECFG_SM_INACTIVE 0x0 -#define APLIC_SOURCECFG_SM_DETACH 0x1 -#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4 -#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5 -#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6 -#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7 - -#define APLIC_IRQBITS_PER_REG 32 - -#define APLIC_SETIP_BASE 0x1c00 -#define APLIC_SETIPNUM 0x1cdc - -#define APLIC_CLRIP_BASE 0x1d00 -#define APLIC_CLRIPNUM 0x1ddc - -#define APLIC_SETIE_BASE 0x1e00 -#define APLIC_SETIENUM 0x1edc - -#define APLIC_CLRIE_BASE 0x1f00 -#define APLIC_CLRIENUM 0x1fdc - -#define APLIC_SETIPNUM_LE 0x2000 -#define APLIC_SETIPNUM_BE 0x2004 - -#define APLIC_GENMSI 0x3000 - -#define APLIC_TARGET_BASE 0x3004 -#define APLIC_TARGET_HART_IDX_SHIFT 18 -#define APLIC_TARGET_HART_IDX_MASK 0x3fff -#define APLIC_TARGET_GUEST_IDX_SHIFT 12 -#define APLIC_TARGET_GUEST_IDX_MASK 0x3f -#define APLIC_TARGET_IPRIO_MASK 0xff -#define APLIC_TARGET_EIID_MASK 0x7ff - -#endif diff --git a/arch/riscv/include/asm/kvm_aia_imsic.h b/arch/riscv/include/asm/kvm_aia_imsic.h deleted file mode 100644 index da5881d2bde0..000000000000 --- a/arch/riscv/include/asm/kvm_aia_imsic.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2021 Western Digital Corporation or its affiliates. - * Copyright (C) 2022 Ventana Micro Systems Inc. - */ -#ifndef __KVM_RISCV_AIA_IMSIC_H -#define __KVM_RISCV_AIA_IMSIC_H - -#include <linux/types.h> -#include <asm/csr.h> - -#define IMSIC_MMIO_PAGE_SHIFT 12 -#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT) -#define IMSIC_MMIO_PAGE_LE 0x00 -#define IMSIC_MMIO_PAGE_BE 0x04 - -#define IMSIC_MIN_ID 63 -#define IMSIC_MAX_ID 2048 - -#define IMSIC_EIDELIVERY 0x70 - -#define IMSIC_EITHRESHOLD 0x72 - -#define IMSIC_EIP0 0x80 -#define IMSIC_EIP63 0xbf -#define IMSIC_EIPx_BITS 32 - -#define IMSIC_EIE0 0xc0 -#define IMSIC_EIE63 0xff -#define IMSIC_EIEx_BITS 32 - -#define IMSIC_FIRST IMSIC_EIDELIVERY -#define IMSIC_LAST IMSIC_EIE63 - -#define IMSIC_MMIO_SETIPNUM_LE 0x00 -#define IMSIC_MMIO_SETIPNUM_BE 0x04 - -#endif diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 484d04a92fa6..2e2254fd2a2a 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -43,6 +43,17 @@ KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6) +#define KVM_HEDELEG_DEFAULT (BIT(EXC_INST_MISALIGNED) | \ + BIT(EXC_BREAKPOINT) | \ + BIT(EXC_SYSCALL) | \ + BIT(EXC_INST_PAGE_FAULT) | \ + BIT(EXC_LOAD_PAGE_FAULT) | \ + BIT(EXC_STORE_PAGE_FAULT)) + +#define KVM_HIDELEG_DEFAULT (BIT(IRQ_VS_SOFT) | \ + BIT(IRQ_VS_TIMER) | \ + BIT(IRQ_VS_EXT)) + enum kvm_riscv_hfence_type { KVM_RISCV_HFENCE_UNKNOWN = 0, KVM_RISCV_HFENCE_GVMA_VMID_GPA, @@ -69,6 +80,7 @@ struct kvm_vcpu_stat { struct kvm_vcpu_stat_generic generic; u64 ecall_exit_stat; u64 wfi_exit_stat; + u64 wrs_exit_stat; u64 mmio_exit_user; u64 mmio_exit_kernel; u64 csr_exit_user; @@ -169,6 +181,7 @@ struct kvm_vcpu_csr { struct kvm_vcpu_config { u64 henvcfg; u64 hstateen0; + unsigned long hedeleg; }; struct kvm_vcpu_smstateen_csr { @@ -211,6 +224,7 @@ struct kvm_vcpu_arch { /* CPU context upon Guest VCPU reset */ struct kvm_cpu_context guest_reset_context; + spinlock_t reset_cntx_lock; /* CPU CSR context upon Guest VCPU reset */ struct kvm_vcpu_csr guest_reset_csr; @@ -252,8 +266,9 @@ struct kvm_vcpu_arch { /* Cache pages needed to program page tables with spinlock held */ struct kvm_mmu_memory_cache mmu_page_cache; - /* VCPU power-off state */ - bool power_off; + /* VCPU power state */ + struct kvm_mp_state mp_state; + spinlock_t mp_state_lock; /* Don't run the VCPU (blocked) */ bool pause; @@ -272,7 +287,6 @@ struct kvm_vcpu_arch { }; static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 @@ -374,8 +388,11 @@ int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); +void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); +void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu); diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h index 395518a1664e..1d85b6617508 100644 --- a/arch/riscv/include/asm/kvm_vcpu_pmu.h +++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h @@ -10,6 +10,7 @@ #define __KVM_VCPU_RISCV_PMU_H #include <linux/perf/riscv_pmu.h> +#include <asm/kvm_vcpu_insn.h> #include <asm/sbi.h> #ifdef CONFIG_RISCV_PMU_SBI @@ -20,7 +21,7 @@ static_assert(RISCV_KVM_MAX_COUNTERS <= 64); struct kvm_fw_event { /* Current value of the event */ - unsigned long value; + u64 value; /* Event monitoring status */ bool started; @@ -36,6 +37,7 @@ struct kvm_pmc { bool started; /* Monitoring event ID */ unsigned long event_idx; + struct kvm_vcpu *vcpu; }; /* PMU data structure per vcpu */ @@ -50,6 +52,12 @@ struct kvm_pmu { bool init_done; /* Bit map of all the virtual counter used */ DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS); + /* Bit map of all the virtual counter overflown */ + DECLARE_BITMAP(pmc_overflown, RISCV_KVM_MAX_COUNTERS); + /* The address of the counter snapshot area (guest physical address) */ + gpa_t snapshot_addr; + /* The actual data of the snapshot */ + struct riscv_pmu_snapshot_data *sdata; }; #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context) @@ -57,11 +65,11 @@ struct kvm_pmu { #if defined(CONFIG_32BIT) #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ -{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, #else #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, #endif int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); @@ -82,9 +90,14 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba unsigned long ctr_mask, unsigned long flags, unsigned long eidx, u64 evtdata, struct kvm_vcpu_sbi_return *retdata); -int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, +int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx, + struct kvm_vcpu_sbi_return *retdata); void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low, + unsigned long saddr_high, unsigned long flags, + struct kvm_vcpu_sbi_return *retdata); void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); @@ -92,8 +105,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); struct kvm_pmu { }; +static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask) +{ + if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) { + *val = 0; + return KVM_INSN_CONTINUE_NEXT_SEPC; + } else { + return KVM_INSN_ILLEGAL_TRAP; + } +} + #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = 0, .count = 0, .func = NULL }, +{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy }, static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 355504b37f8e..c9e03e9da3dc 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -19,6 +19,8 @@ typedef struct { #ifdef CONFIG_SMP /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; + /* Force local icache flush on all migrations. */ + bool force_icache_flush; #endif #ifdef CONFIG_BINFMT_ELF_FDPIC unsigned long exec_fdpic_loadmap; @@ -26,8 +28,11 @@ typedef struct { #endif } mm_context_t; -void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, - phys_addr_t sz, pgprot_t prot); +#define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK) +#define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK) + +void __meminit create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, + pgprot_t prot); #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_MMU_H */ diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h deleted file mode 100644 index fa17e01d9ab2..000000000000 --- a/arch/riscv/include/asm/mmzone.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_MMZONE_H -#define __ASM_MMZONE_H - -#ifdef CONFIG_NUMA - -#include <asm/numa.h> - -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[(nid)]) - -#endif /* CONFIG_NUMA */ -#endif /* __ASM_MMZONE_H */ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 115ac98b8d72..32d308a3355f 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -37,7 +37,7 @@ * define the PAGE_OFFSET value for SV48 and SV39. */ #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL) -#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL) +#define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL) #else #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) #endif /* CONFIG_64BIT */ @@ -112,11 +112,13 @@ struct kernel_mapping { /* Offset between linear mapping virtual address and kernel load address */ unsigned long va_pa_offset; /* Offset between kernel mapping virtual address and kernel load address */ - unsigned long va_kernel_pa_offset; - unsigned long va_kernel_xip_pa_offset; #ifdef CONFIG_XIP_KERNEL + unsigned long va_kernel_xip_text_pa_offset; + unsigned long va_kernel_xip_data_pa_offset; uintptr_t xiprom; uintptr_t xiprom_sz; +#else + unsigned long va_kernel_pa_offset; #endif }; @@ -134,12 +136,18 @@ extern phys_addr_t phys_ram_base; #else void *linear_mapping_pa_to_va(unsigned long x); #endif + +#ifdef CONFIG_XIP_KERNEL #define kernel_mapping_pa_to_va(y) ({ \ unsigned long _y = (unsigned long)(y); \ - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ - (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \ - (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ + (_y < phys_ram_base) ? \ + (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \ + (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \ }) +#else +#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset)) +#endif + #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) #ifndef CONFIG_DEBUG_VIRTUAL @@ -147,12 +155,17 @@ void *linear_mapping_pa_to_va(unsigned long x); #else phys_addr_t linear_mapping_va_to_pa(unsigned long x); #endif + +#ifdef CONFIG_XIP_KERNEL #define kernel_mapping_va_to_pa(y) ({ \ unsigned long _y = (unsigned long)(y); \ - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \ - (_y - kernel_map.va_kernel_xip_pa_offset) : \ - (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ + (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \ + (_y - kernel_map.va_kernel_xip_text_pa_offset) : \ + (_y - kernel_map.va_kernel_xip_data_pa_offset); \ }) +#else +#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset) +#endif #define __va_to_pa_nodebug(x) ({ \ unsigned long _x = x; \ @@ -188,6 +201,11 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); unsigned long kaslr_offset(void); +static __always_inline void *pfn_to_kaddr(unsigned long pfn) +{ + return __va(pfn << PAGE_SHIFT); +} + #endif /* __ASSEMBLY__ */ #define virt_addr_valid(vaddr) ({ \ diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h index e88b52d39eac..7228e266b9a1 100644 --- a/arch/riscv/include/asm/patch.h +++ b/arch/riscv/include/asm/patch.h @@ -6,9 +6,10 @@ #ifndef _ASM_RISCV_PATCH_H #define _ASM_RISCV_PATCH_H +int patch_insn_write(void *addr, const void *insn, size_t len); int patch_text_nosync(void *addr, const void *insns, size_t len); int patch_text_set_nosync(void *addr, u8 c, size_t len); -int patch_text(void *addr, u32 *insns, int ninsns); +int patch_text(void *addr, u32 *insns, size_t len); extern int riscv_patch_in_stop_machine; diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index deaf971253a2..f52264304f77 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -8,6 +8,7 @@ #define _ASM_RISCV_PGALLOC_H #include <linux/mm.h> +#include <asm/sbi.h> #include <asm/tlb.h> #ifdef CONFIG_MMU @@ -15,6 +16,14 @@ #define __HAVE_ARCH_PUD_FREE #include <asm-generic/pgalloc.h> +static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt) +{ + if (riscv_use_sbi_for_rfence()) + tlb_remove_ptdesc(tlb, pt); + else + tlb_remove_page_ptdesc(tlb, pt); +} + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { @@ -102,10 +111,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, struct ptdesc *ptdesc = virt_to_ptdesc(pud); pagetable_pud_dtor(ptdesc); - if (riscv_use_ipi_for_rfence()) - tlb_remove_page_ptdesc(tlb, ptdesc); - else - tlb_remove_ptdesc(tlb, ptdesc); + riscv_tlb_remove_ptdesc(tlb, ptdesc); } } @@ -139,12 +145,8 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr) { - if (pgtable_l5_enabled) { - if (riscv_use_ipi_for_rfence()) - tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d)); - else - tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); - } + if (pgtable_l5_enabled) + riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); } #endif /* __PAGETABLE_PMD_FOLDED */ @@ -176,10 +178,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, struct ptdesc *ptdesc = virt_to_ptdesc(pmd); pagetable_pmd_dtor(ptdesc); - if (riscv_use_ipi_for_rfence()) - tlb_remove_page_ptdesc(tlb, ptdesc); - else - tlb_remove_ptdesc(tlb, ptdesc); + riscv_tlb_remove_ptdesc(tlb, ptdesc); } #endif /* __PAGETABLE_PMD_FOLDED */ @@ -190,10 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, struct ptdesc *ptdesc = page_ptdesc(pte); pagetable_pte_dtor(ptdesc); - if (riscv_use_ipi_for_rfence()) - tlb_remove_page_ptdesc(tlb, ptdesc); - else - tlb_remove_ptdesc(tlb, ptdesc); + riscv_tlb_remove_ptdesc(tlb, ptdesc); } #endif /* CONFIG_MMU */ diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index 221a5c1ee287..0897dd99ab8d 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -16,8 +16,6 @@ extern bool pgtable_l5_enabled; #define PGDIR_SHIFT_L3 30 #define PGDIR_SHIFT_L4 39 #define PGDIR_SHIFT_L5 48 -#define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3) - #define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \ (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3)) /* Size of region mapped by a page global directory */ @@ -400,4 +398,24 @@ static inline struct page *pgd_page(pgd_t pgd) #define p4d_offset p4d_offset p4d_t *p4d_offset(pgd_t *pgd, unsigned long address); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pte_devmap(pte_t pte); +static inline pte_t pmd_pte(pmd_t pmd); + +static inline int pmd_devmap(pmd_t pmd) +{ + return pte_devmap(pmd_pte(pmd)); +} + +static inline int pud_devmap(pud_t pud) +{ + return 0; +} + +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} +#endif + #endif /* _ASM_RISCV_PGTABLE_64_H */ diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 179bd4afece4..a8f5205cea54 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -19,6 +19,7 @@ #define _PAGE_SOFT (3 << 8) /* Reserved for software */ #define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */ +#define _PAGE_DEVMAP (1 << 9) /* RSW, devmap */ #define _PAGE_TABLE _PAGE_PRESENT /* diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 6afd6bb4882e..e79f15293492 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -55,6 +55,9 @@ #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G) #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) #define MODULES_END (PFN_ALIGN((unsigned long)&_start)) +#else +#define MODULES_VADDR VMALLOC_START +#define MODULES_END VMALLOC_END #endif /* @@ -104,13 +107,6 @@ #endif -#ifdef CONFIG_XIP_KERNEL -#define XIP_OFFSET SZ_32M -#define XIP_OFFSET_MASK (SZ_32M - 1) -#else -#define XIP_OFFSET 0 -#endif - #ifndef __ASSEMBLY__ #include <asm/page.h> @@ -139,11 +135,14 @@ #ifdef CONFIG_XIP_KERNEL #define XIP_FIXUP(addr) ({ \ + extern char _sdata[], _start[], _end[]; \ + uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \ + + (uintptr_t)&_sdata - (uintptr_t)&_start; \ + uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \ + + (uintptr_t)&_end - (uintptr_t)&_start; \ uintptr_t __a = (uintptr_t)(addr); \ - (__a >= CONFIG_XIP_PHYS_ADDR && \ - __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ - __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ - __a; \ + (__a >= __rom_start_data && __a < __rom_end_data) ? \ + __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \ }) #else #define XIP_FIXUP(addr) (addr) @@ -162,7 +161,7 @@ struct pt_alloc_ops { #endif }; -extern struct pt_alloc_ops pt_ops __initdata; +extern struct pt_alloc_ops pt_ops __meminitdata; #ifdef CONFIG_MMU /* Number of PGD entries that a user-mode program can use */ @@ -347,6 +346,19 @@ static inline int pte_present(pte_t pte) return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } +#define pte_accessible pte_accessible +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) +{ + if (pte_val(a) & _PAGE_PRESENT) + return true; + + if ((pte_val(a) & _PAGE_PROT_NONE) && + atomic_read(&mm->tlb_flush_pending)) + return true; + + return false; +} + static inline int pte_none(pte_t pte) { return (pte_val(pte) == 0); @@ -387,6 +399,13 @@ static inline int pte_special(pte_t pte) return pte_val(pte) & _PAGE_SPECIAL; } +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pte_devmap(pte_t pte) +{ + return pte_val(pte) & _PAGE_DEVMAP; +} +#endif + /* static inline pte_t pte_rdprotect(pte_t pte) */ static inline pte_t pte_wrprotect(pte_t pte) @@ -428,6 +447,11 @@ static inline pte_t pte_mkspecial(pte_t pte) return __pte(pte_val(pte) | _PAGE_SPECIAL); } +static inline pte_t pte_mkdevmap(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DEVMAP); +} + static inline pte_t pte_mkhuge(pte_t pte) { return pte; @@ -473,6 +497,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) { + asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) + : : : : svvptc); + /* * The kernel assumes that TLBs don't cache invalid entries, but * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a @@ -482,12 +509,19 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, */ while (nr--) local_flush_tlb_page(address + nr * PAGE_SIZE); + +svvptc:; + /* + * Svvptc guarantees that the new valid pte will be visible within + * a bounded timeframe, so when the uarch does not cache invalid + * entries, we don't have to do anything. + */ } #define update_mmu_cache(vma, addr, ptep) \ update_mmu_cache_range(NULL, vma, addr, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, addr, ptep, nr) \ + update_mmu_cache_range(NULL, vma, addr, ptep, nr) static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) @@ -648,6 +682,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd) #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT) +#define pud_pfn pud_pfn static inline unsigned long pud_pfn(pud_t pud) { return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT); @@ -717,6 +752,11 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd) return pte_pmd(pte_mkdirty(pmd_pte(pmd))); } +static inline pmd_t pmd_mkdevmap(pmd_t pmd) +{ + return pte_pmd(pte_mkdevmap(pmd_pte(pmd))); +} + static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { @@ -876,7 +916,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) */ #ifdef CONFIG_64BIT #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) -#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2) +#define TASK_SIZE_MAX LONG_MAX #ifdef CONFIG_COMPAT #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) @@ -888,7 +928,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) #else #define TASK_SIZE FIXADDR_START -#define TASK_SIZE_MIN TASK_SIZE #endif #else /* CONFIG_MMU */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 0faf5f161f1e..efa1b3519b23 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -14,36 +14,14 @@ #include <asm/ptrace.h> -/* - * addr is a hint to the maximum userspace address that mmap should provide, so - * this macro needs to return the largest address space available so that - * mmap_end < addr, being mmap_end the top of that address space. - * See Documentation/arch/riscv/vm-layout.rst for more details. - */ #define arch_get_mmap_end(addr, len, flags) \ ({ \ - unsigned long mmap_end; \ - typeof(addr) _addr = (addr); \ - if ((_addr) == 0 || is_compat_task() || \ - ((_addr + len) > BIT(VA_BITS - 1))) \ - mmap_end = STACK_TOP_MAX; \ - else \ - mmap_end = (_addr + len); \ - mmap_end; \ + STACK_TOP_MAX; \ }) #define arch_get_mmap_base(addr, base) \ ({ \ - unsigned long mmap_base; \ - typeof(addr) _addr = (addr); \ - typeof(base) _base = (base); \ - unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \ - if ((_addr) == 0 || is_compat_task() || \ - ((_addr + len) > BIT(VA_BITS - 1))) \ - mmap_base = (_base); \ - else \ - mmap_base = (_addr + len) - rnd_gap; \ - mmap_base; \ + base; \ }) #ifdef CONFIG_64BIT @@ -57,6 +35,12 @@ #define STACK_TOP DEFAULT_MAP_WINDOW +#ifdef CONFIG_MMU +#define user_max_virt_addr() arch_get_mmap_end(ULONG_MAX, 0, 0) +#else +#define user_max_virt_addr() 0 +#endif /* CONFIG_MMU */ + /* * This decides where the kernel will search for a free chunk of vm * space during mmap's. @@ -68,6 +52,7 @@ #endif #ifndef __ASSEMBLY__ +#include <linux/cpumask.h> struct task_struct; struct pt_regs; @@ -122,6 +107,12 @@ struct thread_struct { struct __riscv_v_ext_state vstate; unsigned long align_ctl; struct __riscv_v_ext_state kernel_vstate; +#ifdef CONFIG_SMP + /* Flush the icache on migration */ + bool force_icache_flush; + /* A forced icache flush is not needed if migrating to the previous cpu. */ + unsigned int prev_cpu; +#endif }; /* Whitelist the fstate from the task_struct for hardened usercopy */ @@ -183,6 +174,9 @@ extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); #define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr)) #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) +#define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2) +extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 6e68f8dff76b..98f631b051db 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/cpumask.h> +#include <linux/jump_label.h> #ifdef CONFIG_RISCV_SBI enum sbi_ext_id { @@ -131,6 +132,8 @@ enum sbi_ext_pmu_fid { SBI_EXT_PMU_COUNTER_START, SBI_EXT_PMU_COUNTER_STOP, SBI_EXT_PMU_COUNTER_FW_READ, + SBI_EXT_PMU_COUNTER_FW_READ_HI, + SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, }; union sbi_pmu_ctr_info { @@ -147,8 +150,16 @@ union sbi_pmu_ctr_info { }; }; +/* Data structure to contain the pmu snapshot data */ +struct riscv_pmu_snapshot_data { + u64 ctr_overflow_mask; + u64 ctr_values[64]; + u64 reserved[447]; +}; + #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0) #define RISCV_PMU_RAW_EVENT_IDX 0x20000 +#define RISCV_PLAT_FW_EVENT 0xFFFF /** General pmu event codes specified in SBI PMU extension */ enum sbi_pmu_hw_generic_events_t { @@ -232,20 +243,22 @@ enum sbi_pmu_ctr_type { #define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF /* Flags defined for config matching function */ -#define SBI_PMU_CFG_FLAG_SKIP_MATCH (1 << 0) -#define SBI_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1) -#define SBI_PMU_CFG_FLAG_AUTO_START (1 << 2) -#define SBI_PMU_CFG_FLAG_SET_VUINH (1 << 3) -#define SBI_PMU_CFG_FLAG_SET_VSINH (1 << 4) -#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5) -#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6) -#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7) +#define SBI_PMU_CFG_FLAG_SKIP_MATCH BIT(0) +#define SBI_PMU_CFG_FLAG_CLEAR_VALUE BIT(1) +#define SBI_PMU_CFG_FLAG_AUTO_START BIT(2) +#define SBI_PMU_CFG_FLAG_SET_VUINH BIT(3) +#define SBI_PMU_CFG_FLAG_SET_VSINH BIT(4) +#define SBI_PMU_CFG_FLAG_SET_UINH BIT(5) +#define SBI_PMU_CFG_FLAG_SET_SINH BIT(6) +#define SBI_PMU_CFG_FLAG_SET_MINH BIT(7) /* Flags defined for counter start function */ -#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0) +#define SBI_PMU_START_FLAG_SET_INIT_VALUE BIT(0) +#define SBI_PMU_START_FLAG_INIT_SNAPSHOT BIT(1) /* Flags defined for counter stop function */ -#define SBI_PMU_STOP_FLAG_RESET (1 << 0) +#define SBI_PMU_STOP_FLAG_RESET BIT(0) +#define SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT BIT(1) enum sbi_ext_dbcn_fid { SBI_EXT_DBCN_CONSOLE_WRITE = 0, @@ -266,7 +279,7 @@ struct sbi_sta_struct { u8 pad[47]; } __packed; -#define SBI_STA_SHMEM_DISABLE -1 +#define SBI_SHMEM_DISABLE -1 /* SBI spec version fields */ #define SBI_SPEC_VERSION_DEFAULT 0x1 @@ -284,6 +297,7 @@ struct sbi_sta_struct { #define SBI_ERR_ALREADY_AVAILABLE -6 #define SBI_ERR_ALREADY_STARTED -7 #define SBI_ERR_ALREADY_STOPPED -8 +#define SBI_ERR_NO_SHMEM -9 extern unsigned long sbi_spec_version; struct sbiret { @@ -292,10 +306,13 @@ struct sbiret { }; void sbi_init(void); -struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, - unsigned long arg1, unsigned long arg2, - unsigned long arg3, unsigned long arg4, - unsigned long arg5); +long __sbi_base_ecall(int fid); +struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + int fid, int ext); +#define sbi_ecall(e, f, a0, a1, a2, a3, a4, a5) \ + __sbi_ecall(a0, a1, a2, a3, a4, a5, f, e) #ifdef CONFIG_RISCV_SBI_V01 void sbi_console_putchar(int ch); @@ -355,11 +372,27 @@ static inline unsigned long sbi_minor_version(void) static inline unsigned long sbi_mk_version(unsigned long major, unsigned long minor) { - return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << - SBI_SPEC_VERSION_MAJOR_SHIFT) | minor; + return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << SBI_SPEC_VERSION_MAJOR_SHIFT) + | (minor & SBI_SPEC_VERSION_MINOR_MASK); } -int sbi_err_map_linux_errno(int err); +static inline int sbi_err_map_linux_errno(int err) +{ + switch (err) { + case SBI_SUCCESS: + return 0; + case SBI_ERR_DENIED: + return -EPERM; + case SBI_ERR_INVALID_PARAM: + return -EINVAL; + case SBI_ERR_INVALID_ADDRESS: + return -EFAULT; + case SBI_ERR_NOT_SUPPORTED: + case SBI_ERR_FAILURE: + default: + return -ENOTSUPP; + }; +} extern bool sbi_debug_console_available; int sbi_debug_console_write(const char *bytes, unsigned int num_bytes); @@ -370,13 +403,19 @@ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1 static inline void sbi_init(void) {} #endif /* CONFIG_RISCV_SBI */ +unsigned long riscv_get_mvendorid(void); +unsigned long riscv_get_marchid(void); unsigned long riscv_cached_mvendorid(unsigned int cpu_id); unsigned long riscv_cached_marchid(unsigned int cpu_id); unsigned long riscv_cached_mimpid(unsigned int cpu_id); #if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI) +DECLARE_STATIC_KEY_FALSE(riscv_sbi_for_rfence); +#define riscv_use_sbi_for_rfence() \ + static_branch_unlikely(&riscv_sbi_for_rfence) void sbi_ipi_init(void); #else +static inline bool riscv_use_sbi_for_rfence(void) { return false; } static inline void sbi_ipi_init(void) { } #endif diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index ec11001c3fe0..ab92fc84e1fc 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -46,7 +46,7 @@ bool kernel_page_present(struct page *page); #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_STRICT_KERNEL_RWX +#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL) #ifdef CONFIG_64BIT #define SECTION_ALIGN (1 << 21) #else diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h deleted file mode 100644 index 956ae0a01bad..000000000000 --- a/arch/riscv/include/asm/signal.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - -#ifndef __ASM_SIGNAL_H -#define __ASM_SIGNAL_H - -#include <uapi/asm/signal.h> -#include <uapi/asm/ptrace.h> - -asmlinkage __visible -void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags); - -#endif diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index 0d555847cde6..7ac80e9f2288 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -49,12 +49,7 @@ void riscv_ipi_disable(void); bool riscv_ipi_have_virq_range(void); /* Set the IPI interrupt numbers for arch (called by irqchip drivers) */ -void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence); - -/* Check if we can use IPIs for remote FENCEs */ -DECLARE_STATIC_KEY_FALSE(riscv_ipi_for_rfence); -#define riscv_use_ipi_for_rfence() \ - static_branch_unlikely(&riscv_ipi_for_rfence) +void riscv_ipi_set_virq_range(int virq, int nr); /* Check other CPUs stop or not */ bool smp_crash_stop_failed(void); @@ -104,16 +99,10 @@ static inline bool riscv_ipi_have_virq_range(void) return false; } -static inline void riscv_ipi_set_virq_range(int virq, int nr, - bool use_for_rfence) +static inline void riscv_ipi_set_virq_range(int virq, int nr) { } -static inline bool riscv_use_ipi_for_rfence(void) -{ - return false; -} - #endif /* CONFIG_SMP */ #if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP) diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h index 63acaecc3374..2f901a410586 100644 --- a/arch/riscv/include/asm/sparsemem.h +++ b/arch/riscv/include/asm/sparsemem.h @@ -7,7 +7,7 @@ #ifdef CONFIG_64BIT #define MAX_PHYSMEM_BITS 56 #else -#define MAX_PHYSMEM_BITS 34 +#define MAX_PHYSMEM_BITS 32 #endif /* CONFIG_64BIT */ #define SECTION_SIZE_BITS 27 #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h index a96b1fea24fe..5ba77f60bf0b 100644 --- a/arch/riscv/include/asm/string.h +++ b/arch/riscv/include/asm/string.h @@ -19,6 +19,7 @@ extern asmlinkage void *__memcpy(void *, const void *, size_t); extern asmlinkage void *memmove(void *, const void *, size_t); extern asmlinkage void *__memmove(void *, const void *, size_t); +#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) #define __HAVE_ARCH_STRCMP extern asmlinkage int strcmp(const char *cs, const char *ct); @@ -27,6 +28,7 @@ extern asmlinkage __kernel_size_t strlen(const char *); #define __HAVE_ARCH_STRNCMP extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count); +#endif /* For those files which don't want to check by kasan. */ #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h index 4718096fa5e3..4ffb022b097f 100644 --- a/arch/riscv/include/asm/suspend.h +++ b/arch/riscv/include/asm/suspend.h @@ -13,7 +13,6 @@ struct suspend_context { /* Saved and restored by low-level functions */ struct pt_regs regs; /* Saved and restored by high-level functions */ - unsigned long scratch; unsigned long envcfg; unsigned long tvec; unsigned long ie; diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 7efdb0584d47..7594df37cc9f 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -8,6 +8,7 @@ #include <linux/jump_label.h> #include <linux/sched/task_stack.h> +#include <linux/mm_types.h> #include <asm/vector.h> #include <asm/cpufeature.h> #include <asm/processor.h> @@ -72,14 +73,36 @@ static __always_inline bool has_fpu(void) { return false; } extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); +static inline bool switch_to_should_flush_icache(struct task_struct *task) +{ +#ifdef CONFIG_SMP + bool stale_mm = task->mm && task->mm->context.force_icache_flush; + bool stale_thread = task->thread.force_icache_flush; + bool thread_migrated = smp_processor_id() != task->thread.prev_cpu; + + return thread_migrated && (stale_mm || stale_thread); +#else + return false; +#endif +} + +#ifdef CONFIG_SMP +#define __set_prev_cpu(thread) ((thread).prev_cpu = smp_processor_id()) +#else +#define __set_prev_cpu(thread) +#endif + #define switch_to(prev, next, last) \ do { \ struct task_struct *__prev = (prev); \ struct task_struct *__next = (next); \ + __set_prev_cpu(__prev->thread); \ if (has_fpu()) \ __switch_to_fpu(__prev, __next); \ if (has_vector()) \ __switch_to_vector(__prev, __next); \ + if (switch_to_should_flush_icache(__next)) \ + local_flush_icache_all(); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/include/asm/syscall_table.h b/arch/riscv/include/asm/syscall_table.h new file mode 100644 index 000000000000..0c2d61782813 --- /dev/null +++ b/arch/riscv/include/asm/syscall_table.h @@ -0,0 +1,7 @@ +#include <asm/bitsperlong.h> + +#if __BITS_PER_LONG == 64 +#include <asm/syscall_table_64.h> +#else +#include <asm/syscall_table_32.h> +#endif diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 5d473343634b..ebe52f96da34 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -10,6 +10,7 @@ #include <asm/page.h> #include <linux/const.h> +#include <linux/sizes.h> /* thread information allocation */ #define THREAD_SIZE_ORDER CONFIG_THREAD_SIZE_ORDER @@ -60,6 +61,13 @@ struct thread_info { void *scs_base; void *scs_sp; #endif +#ifdef CONFIG_64BIT + /* + * Used in handle_exception() to save a0, a1 and a2 before knowing if we + * can access the kernel stack. + */ + unsigned long a0, a1, a2; +#endif }; #ifdef CONFIG_SHADOW_CALL_STACK @@ -111,8 +119,4 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE) -#define _TIF_WORK_MASK \ - (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) - #endif /* _ASM_RISCV_THREAD_INFO_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 4112cc8d1d69..72e559934952 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -15,24 +15,34 @@ #define FLUSH_TLB_NO_ASID ((unsigned long)-1) #ifdef CONFIG_MMU -extern unsigned long asid_mask; - static inline void local_flush_tlb_all(void) { __asm__ __volatile__ ("sfence.vma" : : : "memory"); } +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + if (asid != FLUSH_TLB_NO_ASID) + ALT_SFENCE_VMA_ASID(asid); + else + local_flush_tlb_all(); +} + /* Flush one page from local TLB */ static inline void local_flush_tlb_page(unsigned long addr) { - ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); + ALT_SFENCE_VMA_ADDR(addr); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + if (asid != FLUSH_TLB_NO_ASID) + ALT_SFENCE_VMA_ADDR_ASID(addr, asid); + else + local_flush_tlb_page(addr); } -#else /* CONFIG_MMU */ -#define local_flush_tlb_all() do { } while (0) -#define local_flush_tlb_page(addr) do { } while (0) -#endif /* CONFIG_MMU */ -#if defined(CONFIG_SMP) && defined(CONFIG_MMU) void flush_tlb_all(void); void flush_tlb_mm(struct mm_struct *mm); void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, @@ -55,27 +65,9 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, void arch_flush_tlb_batched_pending(struct mm_struct *mm); void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); -#else /* CONFIG_SMP && CONFIG_MMU */ - -#define flush_tlb_all() local_flush_tlb_all() -#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) - -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - local_flush_tlb_all(); -} - -/* Flush a range of kernel pages */ -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - local_flush_tlb_all(); -} - -#define flush_tlb_mm(mm) flush_tlb_all() -#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() -#define local_flush_tlb_kernel_range(start, end) flush_tlb_all() -#endif /* !CONFIG_SMP || !CONFIG_MMU */ +extern unsigned long tlb_flush_all_threshold; +#else /* CONFIG_MMU */ +#define local_flush_tlb_all() do { } while (0) +#endif /* CONFIG_MMU */ #endif /* _ASM_RISCV_TLBFLUSH_H */ diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h index 61183688bdd5..fe1a8bf6902d 100644 --- a/arch/riscv/include/asm/topology.h +++ b/arch/riscv/include/asm/topology.h @@ -4,6 +4,10 @@ #include <linux/arch_topology.h> +#ifdef CONFIG_NUMA +#include <asm/numa.h> +#endif + /* Replace task scheduler's default frequency-invariant accounting */ #define arch_scale_freq_tick topology_scale_freq_tick #define arch_set_freq_scale topology_set_freq_scale diff --git a/arch/riscv/include/asm/trace.h b/arch/riscv/include/asm/trace.h new file mode 100644 index 000000000000..6151cee5450c --- /dev/null +++ b/arch/riscv/include/asm/trace.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM riscv + +#if !defined(_TRACE_RISCV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RISCV_H + +#include <linux/tracepoint.h> + +TRACE_EVENT_CONDITION(sbi_call, + TP_PROTO(int ext, int fid), + TP_ARGS(ext, fid), + TP_CONDITION(ext != SBI_EXT_HSM), + + TP_STRUCT__entry( + __field(int, ext) + __field(int, fid) + ), + + TP_fast_assign( + __entry->ext = ext; + __entry->fid = fid; + ), + + TP_printk("ext=0x%x fid=%d", __entry->ext, __entry->fid) +); + +TRACE_EVENT_CONDITION(sbi_return, + TP_PROTO(int ext, long error, long value), + TP_ARGS(ext, error, value), + TP_CONDITION(ext != SBI_EXT_HSM), + + TP_STRUCT__entry( + __field(long, error) + __field(long, value) + ), + + TP_fast_assign( + __entry->error = error; + __entry->value = value; + ), + + TP_printk("error=%ld value=0x%lx", __entry->error, __entry->value) +); + +#endif /* _TRACE_RISCV_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm +#define TRACE_INCLUDE_FILE trace + +#include <trace/define_trace.h> diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h index 221630bdbd07..e6d904fa67c5 100644 --- a/arch/riscv/include/asm/unistd.h +++ b/arch/riscv/include/asm/unistd.h @@ -3,11 +3,6 @@ * Copyright (C) 2012 Regents of the University of California */ -/* - * There is explicitly no include guard here because this file is expected to - * be included multiple times. - */ - #define __ARCH_WANT_SYS_CLONE #ifdef CONFIG_COMPAT @@ -21,6 +16,14 @@ #define __ARCH_WANT_COMPAT_FADVISE64_64 #endif +#if defined(__LP64__) && !defined(__SYSCALL_COMPAT) +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SET_GET_RLIMIT +#endif /* __LP64__ */ + +#define __ARCH_WANT_MEMFD_SECRET + + #include <uapi/asm/unistd.h> #define NR_syscalls (__NR_syscalls) diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h index 96b65a5396df..8f383f05a290 100644 --- a/arch/riscv/include/asm/vdso/processor.h +++ b/arch/riscv/include/asm/vdso/processor.h @@ -5,6 +5,7 @@ #ifndef __ASSEMBLY__ #include <asm/barrier.h> +#include <asm/insn-def.h> static inline void cpu_relax(void) { @@ -14,16 +15,11 @@ static inline void cpu_relax(void) __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); #endif -#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE /* * Reduce instruction retirement. * This assumes the PC changes. */ - __asm__ __volatile__ ("pause"); -#else - /* Encoding of the pause instruction */ - __asm__ __volatile__ (".4byte 0x100000F"); -#endif + __asm__ __volatile__ (RISCV_PAUSE); barrier(); } diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index 731dcd0ed4de..be7d309cca8a 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -37,7 +37,7 @@ static inline u32 riscv_v_flags(void) static __always_inline bool has_vector(void) { - return riscv_has_extension_unlikely(RISCV_ISA_EXT_v); + return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X); } static inline void __riscv_v_vstate_clean(struct pt_regs *regs) @@ -91,7 +91,7 @@ static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src { asm volatile ( ".option push\n\t" - ".option arch, +v\n\t" + ".option arch, +zve32x\n\t" "vsetvl x0, %2, %1\n\t" ".option pop\n\t" "csrw " __stringify(CSR_VSTART) ", %0\n\t" @@ -109,7 +109,7 @@ static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to, __vstate_csr_save(save_to); asm volatile ( ".option push\n\t" - ".option arch, +v\n\t" + ".option arch, +zve32x\n\t" "vsetvli %0, x0, e8, m8, ta, ma\n\t" "vse8.v v0, (%1)\n\t" "add %1, %1, %0\n\t" @@ -131,7 +131,7 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_ riscv_v_enable(); asm volatile ( ".option push\n\t" - ".option arch, +v\n\t" + ".option arch, +zve32x\n\t" "vsetvli %0, x0, e8, m8, ta, ma\n\t" "vle8.v v0, (%1)\n\t" "add %1, %1, %0\n\t" @@ -153,7 +153,7 @@ static inline void __riscv_v_vstate_discard(void) riscv_v_enable(); asm volatile ( ".option push\n\t" - ".option arch, +v\n\t" + ".option arch, +zve32x\n\t" "vsetvli %0, x0, e8, m8, ta, ma\n\t" "vmv.v.i v0, -1\n\t" "vmv.v.i v8, -1\n\t" diff --git a/arch/riscv/include/asm/vendor_extensions.h b/arch/riscv/include/asm/vendor_extensions.h new file mode 100644 index 000000000000..7437304a71b9 --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2024 Rivos, Inc + */ + +#ifndef _ASM_VENDOR_EXTENSIONS_H +#define _ASM_VENDOR_EXTENSIONS_H + +#include <asm/cpufeature.h> + +#include <linux/array_size.h> +#include <linux/types.h> + +/* + * The extension keys of each vendor must be strictly less than this value. + */ +#define RISCV_ISA_VENDOR_EXT_MAX 32 + +struct riscv_isavendorinfo { + DECLARE_BITMAP(isa, RISCV_ISA_VENDOR_EXT_MAX); +}; + +struct riscv_isa_vendor_ext_data_list { + bool is_initialized; + const size_t ext_data_count; + const struct riscv_isa_ext_data *ext_data; + struct riscv_isavendorinfo per_hart_isa_bitmap[NR_CPUS]; + struct riscv_isavendorinfo all_harts_isa_bitmap; +}; + +extern struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[]; + +extern const size_t riscv_isa_vendor_ext_list_size; + +/* + * The alternatives need some way of distinguishing between vendor extensions + * and errata. Incrementing all of the vendor extension keys so they are at + * least 0x8000 accomplishes that. + */ +#define RISCV_VENDOR_EXT_ALTERNATIVES_BASE 0x8000 + +#define VENDOR_EXT_ALL_CPUS -1 + +bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit); +#define riscv_cpu_isa_vendor_extension_available(cpu, vendor, ext) \ + __riscv_isa_vendor_extension_available(cpu, vendor, RISCV_ISA_VENDOR_EXT_##ext) +#define riscv_isa_vendor_extension_available(vendor, ext) \ + __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, \ + RISCV_ISA_VENDOR_EXT_##ext) + +static __always_inline bool riscv_has_vendor_extension_likely(const unsigned long vendor, + const unsigned long ext) +{ + if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) + return false; + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_likely(vendor, + ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); + + return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); +} + +static __always_inline bool riscv_has_vendor_extension_unlikely(const unsigned long vendor, + const unsigned long ext) +{ + if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) + return false; + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_unlikely(vendor, + ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); + + return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); +} + +static __always_inline bool riscv_cpu_has_vendor_extension_likely(const unsigned long vendor, + int cpu, const unsigned long ext) +{ + if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) + return false; + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && + __riscv_has_extension_likely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) + return true; + + return __riscv_isa_vendor_extension_available(cpu, vendor, ext); +} + +static __always_inline bool riscv_cpu_has_vendor_extension_unlikely(const unsigned long vendor, + int cpu, + const unsigned long ext) +{ + if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) + return false; + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && + __riscv_has_extension_unlikely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) + return true; + + return __riscv_isa_vendor_extension_available(cpu, vendor, ext); +} + +#endif /* _ASM_VENDOR_EXTENSIONS_H */ diff --git a/arch/riscv/include/asm/vendor_extensions/andes.h b/arch/riscv/include/asm/vendor_extensions/andes.h new file mode 100644 index 000000000000..7bb2fc43438f --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions/andes.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H +#define _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H + +#include <asm/vendor_extensions.h> + +#include <linux/types.h> + +#define RISCV_ISA_VENDOR_EXT_XANDESPMU 0 + +/* + * Extension keys should be strictly less than max. + * It is safe to increment this when necessary. + */ +#define RISCV_ISA_VENDOR_EXT_MAX_ANDES 32 + +extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes; + +#endif diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h index 51f6dfe19745..fefe94dc98e2 100644 --- a/arch/riscv/include/asm/vmalloc.h +++ b/arch/riscv/include/asm/vmalloc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_RISCV_VMALLOC_H #define _ASM_RISCV_VMALLOC_H diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h index b65bf6306f69..f3d56299bc22 100644 --- a/arch/riscv/include/asm/xip_fixup.h +++ b/arch/riscv/include/asm/xip_fixup.h @@ -9,18 +9,36 @@ #ifdef CONFIG_XIP_KERNEL .macro XIP_FIXUP_OFFSET reg - REG_L t0, _xip_fixup + /* Fix-up address in Flash into address in RAM early during boot before + * MMU is up. Because generated code "thinks" data is in Flash, but it + * is actually in RAM (actually data is also in Flash, but Flash is + * read-only, thus we need to use the data residing in RAM). + * + * The start of data in Flash is _sdata and the start of data in RAM is + * CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this: + * reg += CONFIG_PHYS_RAM_BASE - _start + */ + li t0, CONFIG_PHYS_RAM_BASE add \reg, \reg, t0 + la t0, _sdata + sub \reg, \reg, t0 .endm .macro XIP_FIXUP_FLASH_OFFSET reg + /* In linker script, at the transition from read-only section to + * writable section, the VMA is increased while LMA remains the same. + * (See in linker script how _sdata, __data_loc and LOAD_OFFSET is + * changed) + * + * Consequently, early during boot before MMU is up, the generated code + * reads the "writable" section at wrong addresses, because VMA is used + * by compiler to generate code, but the data is located in Flash using + * LMA. + */ + la t0, _sdata + sub \reg, \reg, t0 la t0, __data_loc - REG_L t1, _xip_phys_offset - sub \reg, \reg, t1 add \reg, \reg, t0 .endm - -_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET -_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET #else .macro XIP_FIXUP_OFFSET reg .endm diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild index f66554cd5c45..89ac01faa5ae 100644 --- a/arch/riscv/include/uapi/asm/Kbuild +++ b/arch/riscv/include/uapi/asm/Kbuild @@ -1 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += unistd_32.h +syscall-y += unistd_64.h diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index 2902f68dc913..1e153cda57db 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -59,6 +59,19 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) #define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) #define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35) +#define RISCV_HWPROBE_EXT_ZIHINTPAUSE (1ULL << 36) +#define RISCV_HWPROBE_EXT_ZVE32X (1ULL << 37) +#define RISCV_HWPROBE_EXT_ZVE32F (1ULL << 38) +#define RISCV_HWPROBE_EXT_ZVE64X (1ULL << 39) +#define RISCV_HWPROBE_EXT_ZVE64F (1ULL << 40) +#define RISCV_HWPROBE_EXT_ZVE64D (1ULL << 41) +#define RISCV_HWPROBE_EXT_ZIMOP (1ULL << 42) +#define RISCV_HWPROBE_EXT_ZCA (1ULL << 43) +#define RISCV_HWPROBE_EXT_ZCB (1ULL << 44) +#define RISCV_HWPROBE_EXT_ZCD (1ULL << 45) +#define RISCV_HWPROBE_EXT_ZCF (1ULL << 46) +#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) +#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) @@ -67,6 +80,14 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0) #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0) #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 +#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7 +#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8 +#define RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF 9 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN 0 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED 1 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ /* Flags */ diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index b1c503c2959c..e97db3296456 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -167,6 +167,14 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZFA, KVM_RISCV_ISA_EXT_ZTSO, KVM_RISCV_ISA_EXT_ZACAS, + KVM_RISCV_ISA_EXT_SSCOFPMF, + KVM_RISCV_ISA_EXT_ZIMOP, + KVM_RISCV_ISA_EXT_ZCA, + KVM_RISCV_ISA_EXT_ZCB, + KVM_RISCV_ISA_EXT_ZCD, + KVM_RISCV_ISA_EXT_ZCF, + KVM_RISCV_ISA_EXT_ZCMOP, + KVM_RISCV_ISA_EXT_ZAWRS, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h index 950ab3fd4409..81896bbbf727 100644 --- a/arch/riscv/include/uapi/asm/unistd.h +++ b/arch/riscv/include/uapi/asm/unistd.h @@ -14,41 +14,10 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ +#include <asm/bitsperlong.h> -#if defined(__LP64__) && !defined(__SYSCALL_COMPAT) -#define __ARCH_WANT_NEW_STAT -#define __ARCH_WANT_SET_GET_RLIMIT -#endif /* __LP64__ */ - -#define __ARCH_WANT_SYS_CLONE3 -#define __ARCH_WANT_MEMFD_SECRET - -#include <asm-generic/unistd.h> - -/* - * Allows the instruction cache to be flushed from userspace. Despite RISC-V - * having a direct 'fence.i' instruction available to userspace (which we - * can't trap!), that's not actually viable when running on Linux because the - * kernel might schedule a process on another hart. There is no way for - * userspace to handle this without invoking the kernel (as it doesn't know the - * thread->hart mappings), so we've defined a RISC-V specific system call to - * flush the instruction cache. - * - * __NR_riscv_flush_icache is defined to flush the instruction cache over an - * address range, with the flush applying to either all threads or just the - * caller. We don't currently do anything with the address range, that's just - * in there for forwards compatibility. - */ -#ifndef __NR_riscv_flush_icache -#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15) -#endif -__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache) - -/* - * Allows userspace to query the kernel for CPU architecture and - * microarchitecture details across a given set of CPUs. - */ -#ifndef __NR_riscv_hwprobe -#define __NR_riscv_hwprobe (__NR_arch_specific_syscall + 14) +#if __BITS_PER_LONG == 64 +#include <asm/unistd_64.h> +#else +#include <asm/unistd_32.h> #endif -__SYSCALL(__NR_riscv_hwprobe, sys_riscv_hwprobe) |