summaryrefslogtreecommitdiff
path: root/lib/sbi
diff options
context:
space:
mode:
authorAtish Patra <atish.patra@wdc.com>2019-06-19 00:54:01 +0300
committerAnup Patel <anup.patel@wdc.com>2019-06-19 07:18:51 +0300
commit749b0b093242a4c27f7c4f66121afd7852b2de48 (patch)
tree90c46fe6e750ddf08dd57347ddd498571792353a /lib/sbi
parenta5b37bd7d275fc65d8fd0b19bd3a08edfe4e6096 (diff)
downloadopensbi-749b0b093242a4c27f7c4f66121afd7852b2de48.tar.xz
lib: Move sbi core library to lib/sbi
Signed-off-by: Atish Patra <atish.patra@wdc.com> Acked-by: Anup Patel <anup.patel@wdc.com>
Diffstat (limited to 'lib/sbi')
-rw-r--r--lib/sbi/objects.mk32
-rw-r--r--lib/sbi/riscv_asm.c272
-rw-r--r--lib/sbi/riscv_atomic.c222
-rw-r--r--lib/sbi/riscv_hardfp.S171
-rw-r--r--lib/sbi/riscv_locks.c45
-rw-r--r--lib/sbi/riscv_unpriv.c145
-rw-r--r--lib/sbi/sbi_console.c383
-rw-r--r--lib/sbi/sbi_ecall.c107
-rw-r--r--lib/sbi/sbi_emulate_csr.c137
-rw-r--r--lib/sbi/sbi_fifo.c184
-rw-r--r--lib/sbi/sbi_hart.c366
-rw-r--r--lib/sbi/sbi_illegal_insn.c131
-rw-r--r--lib/sbi/sbi_init.c178
-rw-r--r--lib/sbi/sbi_ipi.c153
-rw-r--r--lib/sbi/sbi_misaligned_ldst.c191
-rw-r--r--lib/sbi/sbi_scratch.c59
-rw-r--r--lib/sbi/sbi_system.c45
-rw-r--r--lib/sbi/sbi_timer.c68
-rw-r--r--lib/sbi/sbi_tlb.c227
-rw-r--r--lib/sbi/sbi_trap.c207
20 files changed, 3323 insertions, 0 deletions
diff --git a/lib/sbi/objects.mk b/lib/sbi/objects.mk
new file mode 100644
index 0000000..817e805
--- /dev/null
+++ b/lib/sbi/objects.mk
@@ -0,0 +1,32 @@
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+#
+# Authors:
+# Anup Patel <anup.patel@wdc.com>
+#
+
+libsbi-objs-y += riscv_asm.o
+libsbi-objs-y += riscv_atomic.o
+libsbi-objs-y += riscv_hardfp.o
+libsbi-objs-y += riscv_locks.o
+libsbi-objs-y += riscv_unpriv.o
+
+libsbi-objs-y += sbi_console.o
+libsbi-objs-y += sbi_ecall.o
+libsbi-objs-y += sbi_emulate_csr.o
+libsbi-objs-y += sbi_fifo.o
+libsbi-objs-y += sbi_hart.o
+libsbi-objs-y += sbi_illegal_insn.o
+libsbi-objs-y += sbi_init.o
+libsbi-objs-y += sbi_ipi.o
+libsbi-objs-y += sbi_misaligned_ldst.o
+libsbi-objs-y += sbi_scratch.o
+libsbi-objs-y += sbi_system.o
+libsbi-objs-y += sbi_timer.o
+libsbi-objs-y += sbi_tlb.o
+libsbi-objs-y += sbi_trap.o
+
+# External Libraries to include
+PLATFORM_INCLUDE_LIBC=y
diff --git a/lib/sbi/riscv_asm.c b/lib/sbi/riscv_asm.c
new file mode 100644
index 0000000..e0c8889
--- /dev/null
+++ b/lib/sbi/riscv_asm.c
@@ -0,0 +1,272 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_error.h>
+
+unsigned long csr_read_num(int csr_num)
+{
+ unsigned long ret = 0;
+
+ switch (csr_num) {
+ case CSR_PMPCFG0:
+ ret = csr_read(CSR_PMPCFG0);
+ break;
+ case CSR_PMPCFG1:
+ ret = csr_read(CSR_PMPCFG1);
+ break;
+ case CSR_PMPCFG2:
+ ret = csr_read(CSR_PMPCFG2);
+ break;
+ case CSR_PMPCFG3:
+ ret = csr_read(CSR_PMPCFG3);
+ break;
+ case CSR_PMPADDR0:
+ ret = csr_read(CSR_PMPADDR0);
+ break;
+ case CSR_PMPADDR1:
+ ret = csr_read(CSR_PMPADDR1);
+ break;
+ case CSR_PMPADDR2:
+ ret = csr_read(CSR_PMPADDR2);
+ break;
+ case CSR_PMPADDR3:
+ ret = csr_read(CSR_PMPADDR3);
+ break;
+ case CSR_PMPADDR4:
+ ret = csr_read(CSR_PMPADDR4);
+ break;
+ case CSR_PMPADDR5:
+ ret = csr_read(CSR_PMPADDR5);
+ break;
+ case CSR_PMPADDR6:
+ ret = csr_read(CSR_PMPADDR6);
+ break;
+ case CSR_PMPADDR7:
+ ret = csr_read(CSR_PMPADDR7);
+ break;
+ case CSR_PMPADDR8:
+ ret = csr_read(CSR_PMPADDR8);
+ break;
+ case CSR_PMPADDR9:
+ ret = csr_read(CSR_PMPADDR9);
+ break;
+ case CSR_PMPADDR10:
+ ret = csr_read(CSR_PMPADDR10);
+ break;
+ case CSR_PMPADDR11:
+ ret = csr_read(CSR_PMPADDR11);
+ break;
+ case CSR_PMPADDR12:
+ ret = csr_read(CSR_PMPADDR12);
+ break;
+ case CSR_PMPADDR13:
+ ret = csr_read(CSR_PMPADDR13);
+ break;
+ case CSR_PMPADDR14:
+ ret = csr_read(CSR_PMPADDR14);
+ break;
+ case CSR_PMPADDR15:
+ ret = csr_read(CSR_PMPADDR15);
+ break;
+ default:
+ break;
+ };
+
+ return ret;
+}
+
+void csr_write_num(int csr_num, unsigned long val)
+{
+ switch (csr_num) {
+ case CSR_PMPCFG0:
+ csr_write(CSR_PMPCFG0, val);
+ break;
+ case CSR_PMPCFG1:
+ csr_write(CSR_PMPCFG1, val);
+ break;
+ case CSR_PMPCFG2:
+ csr_write(CSR_PMPCFG2, val);
+ break;
+ case CSR_PMPCFG3:
+ csr_write(CSR_PMPCFG3, val);
+ break;
+ case CSR_PMPADDR0:
+ csr_write(CSR_PMPADDR0, val);
+ break;
+ case CSR_PMPADDR1:
+ csr_write(CSR_PMPADDR1, val);
+ break;
+ case CSR_PMPADDR2:
+ csr_write(CSR_PMPADDR2, val);
+ break;
+ case CSR_PMPADDR3:
+ csr_write(CSR_PMPADDR3, val);
+ break;
+ case CSR_PMPADDR4:
+ csr_write(CSR_PMPADDR4, val);
+ break;
+ case CSR_PMPADDR5:
+ csr_write(CSR_PMPADDR5, val);
+ break;
+ case CSR_PMPADDR6:
+ csr_write(CSR_PMPADDR6, val);
+ break;
+ case CSR_PMPADDR7:
+ csr_write(CSR_PMPADDR7, val);
+ break;
+ case CSR_PMPADDR8:
+ csr_write(CSR_PMPADDR8, val);
+ break;
+ case CSR_PMPADDR9:
+ csr_write(CSR_PMPADDR9, val);
+ break;
+ case CSR_PMPADDR10:
+ csr_write(CSR_PMPADDR10, val);
+ break;
+ case CSR_PMPADDR11:
+ csr_write(CSR_PMPADDR11, val);
+ break;
+ case CSR_PMPADDR12:
+ csr_write(CSR_PMPADDR12, val);
+ break;
+ case CSR_PMPADDR13:
+ csr_write(CSR_PMPADDR13, val);
+ break;
+ case CSR_PMPADDR14:
+ csr_write(CSR_PMPADDR14, val);
+ break;
+ case CSR_PMPADDR15:
+ csr_write(CSR_PMPADDR15, val);
+ break;
+ default:
+ break;
+ };
+}
+
+static unsigned long ctz(unsigned long x)
+{
+ unsigned long ret = 0;
+
+ while (!(x & 1UL)) {
+ ret++;
+ x = x >> 1;
+ }
+
+ return ret;
+}
+
+int pmp_set(unsigned int n, unsigned long prot, unsigned long addr,
+ unsigned long log2len)
+{
+ int pmpcfg_csr, pmpcfg_shift, pmpaddr_csr;
+ unsigned long cfgmask, pmpcfg;
+ unsigned long addrmask, pmpaddr;
+
+ /* check parameters */
+ if (n >= PMP_COUNT || log2len > __riscv_xlen || log2len < PMP_SHIFT)
+ return SBI_EINVAL;
+
+ /* calculate PMP register and offset */
+#if __riscv_xlen == 32
+ pmpcfg_csr = CSR_PMPCFG0 + (n >> 2);
+ pmpcfg_shift = (n & 3) << 3;
+#elif __riscv_xlen == 64
+ pmpcfg_csr = (CSR_PMPCFG0 + (n >> 2)) & ~1;
+ pmpcfg_shift = (n & 7) << 3;
+#else
+ pmpcfg_csr = -1;
+ pmpcfg_shift = -1;
+#endif
+ pmpaddr_csr = CSR_PMPADDR0 + n;
+ if (pmpcfg_csr < 0 || pmpcfg_shift < 0)
+ return SBI_ENOTSUPP;
+
+ /* encode PMP config */
+ prot |= (log2len == PMP_SHIFT) ? PMP_A_NA4 : PMP_A_NAPOT;
+ cfgmask = ~(0xff << pmpcfg_shift);
+ pmpcfg = (csr_read_num(pmpcfg_csr) & cfgmask);
+ pmpcfg |= ((prot << pmpcfg_shift) & ~cfgmask);
+
+ /* encode PMP address */
+ if (log2len == PMP_SHIFT) {
+ pmpaddr = (addr >> PMP_SHIFT);
+ } else {
+ if (log2len == __riscv_xlen) {
+ pmpaddr = -1UL;
+ } else {
+ addrmask = (1UL << (log2len - PMP_SHIFT)) - 1;
+ pmpaddr = ((addr >> PMP_SHIFT) & ~addrmask);
+ pmpaddr |= (addrmask >> 1);
+ }
+ }
+
+ /* write csrs */
+ csr_write_num(pmpaddr_csr, pmpaddr);
+ csr_write_num(pmpcfg_csr, pmpcfg);
+
+ return 0;
+}
+
+int pmp_get(unsigned int n, unsigned long *prot_out, unsigned long *addr_out,
+ unsigned long *log2len_out)
+{
+ int pmpcfg_csr, pmpcfg_shift, pmpaddr_csr;
+ unsigned long cfgmask, pmpcfg, prot;
+ unsigned long t1, addr, log2len;
+
+ /* check parameters */
+ if (n >= PMP_COUNT || !prot_out || !addr_out || !log2len_out)
+ return SBI_EINVAL;
+ *prot_out = *addr_out = *log2len_out = 0;
+
+ /* calculate PMP register and offset */
+#if __riscv_xlen == 32
+ pmpcfg_csr = CSR_PMPCFG0 + (n >> 2);
+ pmpcfg_shift = (n & 3) << 3;
+#elif __riscv_xlen == 64
+ pmpcfg_csr = (CSR_PMPCFG0 + (n >> 2)) & ~1;
+ pmpcfg_shift = (n & 7) << 3;
+#else
+ pmpcfg_csr = -1;
+ pmpcfg_shift = -1;
+#endif
+ pmpaddr_csr = CSR_PMPADDR0 + n;
+ if (pmpcfg_csr < 0 || pmpcfg_shift < 0)
+ return SBI_ENOTSUPP;
+
+ /* decode PMP config */
+ cfgmask = (0xff << pmpcfg_shift);
+ pmpcfg = csr_read_num(pmpcfg_csr) & cfgmask;
+ prot = pmpcfg >> pmpcfg_shift;
+
+ /* decode PMP address */
+ if ((prot & PMP_A) == PMP_A_NAPOT) {
+ addr = csr_read_num(pmpaddr_csr);
+ if (addr == -1UL) {
+ addr = 0;
+ log2len = __riscv_xlen;
+ } else {
+ t1 = ctz(~addr);
+ addr = (addr & ~((1UL << t1) - 1)) << PMP_SHIFT;
+ log2len = (t1 + PMP_SHIFT + 1);
+ }
+ } else {
+ addr = csr_read_num(pmpaddr_csr) << PMP_SHIFT;
+ log2len = PMP_SHIFT;
+ }
+
+ /* return details */
+ *prot_out = prot;
+ *addr_out = addr;
+ *log2len_out = log2len;
+
+ return 0;
+}
diff --git a/lib/sbi/riscv_atomic.c b/lib/sbi/riscv_atomic.c
new file mode 100644
index 0000000..34bf522
--- /dev/null
+++ b/lib/sbi/riscv_atomic.c
@@ -0,0 +1,222 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/sbi_types.h>
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/sbi_bits.h>
+
+long atomic_read(atomic_t *atom)
+{
+ long ret = atom->counter;
+ rmb();
+ return ret;
+}
+
+void atomic_write(atomic_t *atom, long value)
+{
+ atom->counter = value;
+ wmb();
+}
+
+long atomic_add_return(atomic_t *atom, long value)
+{
+ long ret;
+
+ __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
+ : "+A"(atom->counter), "=r"(ret)
+ : "r"(value)
+ : "memory");
+
+ return ret + value;
+}
+
+long atomic_sub_return(atomic_t *atom, long value)
+{
+ long ret;
+
+ __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
+ : "+A"(atom->counter), "=r"(ret)
+ : "r"(-value)
+ : "memory");
+
+ return ret - value;
+}
+
+#define __xchg(ptr, new, size) \
+ ({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__("0: lr.w %0, %2\n" \
+ " sc.w.rl %1, %z3, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ : "=&r"(__ret), "=&r"(__rc), \
+ "+A"(*__ptr) \
+ : "rJ"(__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__("0: lr.d %0, %2\n" \
+ " sc.d.rl %1, %z3, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ : "=&r"(__ret), "=&r"(__rc), \
+ "+A"(*__ptr) \
+ : "rJ"(__new) \
+ : "memory"); \
+ break; \
+ default: \
+ break; \
+ } \
+ __ret; \
+ })
+
+#define xchg(ptr, n) \
+ ({ \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __xchg((ptr), _n_, sizeof(*(ptr))); \
+ })
+
+#define __cmpxchg(ptr, old, new, size) \
+ ({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__("0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r"(__ret), "=&r"(__rc), \
+ "+A"(*__ptr) \
+ : "rJ"(__old), "rJ"(__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__("0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r"(__ret), "=&r"(__rc), \
+ "+A"(*__ptr) \
+ : "rJ"(__old), "rJ"(__new) \
+ : "memory"); \
+ break; \
+ default: \
+ break; \
+ } \
+ __ret; \
+ })
+
+#define cmpxchg(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) \
+ __cmpxchg((ptr), _o_, _n_, sizeof(*(ptr))); \
+ })
+
+long arch_atomic_cmpxchg(atomic_t *atom, long oldval, long newval)
+{
+#ifdef __riscv_atomic
+ return __sync_val_compare_and_swap(&atom->counter, oldval, newval);
+#else
+ return cmpxchg(&atom->counter, oldval, newval);
+#endif
+}
+
+long arch_atomic_xchg(atomic_t *atom, long newval)
+{
+ /* Atomically set new value and return old value. */
+#ifdef __riscv_atomic
+ /*
+ * The name of GCC built-in macro __sync_lock_test_and_set()
+ * is misleading. A more appropriate name for GCC built-in
+ * macro would be __sync_val_exchange().
+ */
+ return __sync_lock_test_and_set(&atom->counter, newval);
+#else
+ return xchg(&atom->counter, newval);
+#endif
+}
+
+unsigned int atomic_raw_xchg_uint(volatile unsigned int *ptr,
+ unsigned int newval)
+{
+ /* Atomically set new value and return old value. */
+#ifdef __riscv_atomic
+ /*
+ * The name of GCC built-in macro __sync_lock_test_and_set()
+ * is misleading. A more appropriate name for GCC built-in
+ * macro would be __sync_val_exchange().
+ */
+ return __sync_lock_test_and_set(ptr, newval);
+#else
+ return xchg(ptr, newval);
+#endif
+}
+
+#if (BITS_PER_LONG == 64)
+#define __AMO(op) "amo" #op ".d"
+#elif (BITS_PER_LONG == 32)
+#define __AMO(op) "amo" #op ".w"
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+#define __atomic_op_bit_ord(op, mod, nr, addr, ord) \
+ ({ \
+ unsigned long __res, __mask; \
+ __mask = BIT_MASK(nr); \
+ __asm__ __volatile__(__AMO(op) #ord " %0, %2, %1" \
+ : "=r"(__res), "+A"(addr[BIT_WORD(nr)]) \
+ : "r"(mod(__mask)) \
+ : "memory"); \
+ __res; \
+ })
+
+#define __atomic_op_bit(op, mod, nr, addr) \
+ __atomic_op_bit_ord(op, mod, nr, addr, .aqrl)
+
+/* Bitmask modifiers */
+#define __NOP(x) (x)
+#define __NOT(x) (~(x))
+
+inline int atomic_raw_set_bit(int nr, volatile unsigned long *addr)
+{
+ return __atomic_op_bit(or, __NOP, nr, addr);
+}
+
+inline int atomic_raw_clear_bit(int nr, volatile unsigned long *addr)
+{
+ return __atomic_op_bit(and, __NOT, nr, addr);
+}
+
+inline int atomic_set_bit(int nr, atomic_t *atom)
+{
+ return atomic_raw_set_bit(nr, (unsigned long *)&atom->counter);
+}
+
+inline int atomic_clear_bit(int nr, atomic_t *atom)
+{
+ return atomic_raw_clear_bit(nr, (unsigned long *)&atom->counter);
+}
diff --git a/lib/sbi/riscv_hardfp.S b/lib/sbi/riscv_hardfp.S
new file mode 100644
index 0000000..f363908
--- /dev/null
+++ b/lib/sbi/riscv_hardfp.S
@@ -0,0 +1,171 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#ifdef __riscv_flen
+
+#if __riscv_flen != 64
+# error single-float only is not supported
+#endif
+
+#define get_f32(which) fmv.x.s a0, which; jr t0
+#define put_f32(which) fmv.s.x which, a0; jr t0
+#if __riscv_xlen == 64
+# define get_f64(which) fmv.x.d a0, which; jr t0
+# define put_f64(which) fmv.d.x which, a0; jr t0
+#else
+# define get_f64(which) fsd which, 0(a0); jr t0
+# define put_f64(which) fld which, 0(a0); jr t0
+#endif
+
+ .text
+ .option norvc
+ .globl get_f32_reg
+ get_f32_reg:
+ get_f32(f0)
+ get_f32(f1)
+ get_f32(f2)
+ get_f32(f3)
+ get_f32(f4)
+ get_f32(f5)
+ get_f32(f6)
+ get_f32(f7)
+ get_f32(f8)
+ get_f32(f9)
+ get_f32(f10)
+ get_f32(f11)
+ get_f32(f12)
+ get_f32(f13)
+ get_f32(f14)
+ get_f32(f15)
+ get_f32(f16)
+ get_f32(f17)
+ get_f32(f18)
+ get_f32(f19)
+ get_f32(f20)
+ get_f32(f21)
+ get_f32(f22)
+ get_f32(f23)
+ get_f32(f24)
+ get_f32(f25)
+ get_f32(f26)
+ get_f32(f27)
+ get_f32(f28)
+ get_f32(f29)
+ get_f32(f30)
+ get_f32(f31)
+
+ .text
+ .globl put_f32_reg
+ put_f32_reg:
+ put_f32(f0)
+ put_f32(f1)
+ put_f32(f2)
+ put_f32(f3)
+ put_f32(f4)
+ put_f32(f5)
+ put_f32(f6)
+ put_f32(f7)
+ put_f32(f8)
+ put_f32(f9)
+ put_f32(f10)
+ put_f32(f11)
+ put_f32(f12)
+ put_f32(f13)
+ put_f32(f14)
+ put_f32(f15)
+ put_f32(f16)
+ put_f32(f17)
+ put_f32(f18)
+ put_f32(f19)
+ put_f32(f20)
+ put_f32(f21)
+ put_f32(f22)
+ put_f32(f23)
+ put_f32(f24)
+ put_f32(f25)
+ put_f32(f26)
+ put_f32(f27)
+ put_f32(f28)
+ put_f32(f29)
+ put_f32(f30)
+ put_f32(f31)
+
+ .text
+ .globl get_f64_reg
+ get_f64_reg:
+ get_f64(f0)
+ get_f64(f1)
+ get_f64(f2)
+ get_f64(f3)
+ get_f64(f4)
+ get_f64(f5)
+ get_f64(f6)
+ get_f64(f7)
+ get_f64(f8)
+ get_f64(f9)
+ get_f64(f10)
+ get_f64(f11)
+ get_f64(f12)
+ get_f64(f13)
+ get_f64(f14)
+ get_f64(f15)
+ get_f64(f16)
+ get_f64(f17)
+ get_f64(f18)
+ get_f64(f19)
+ get_f64(f20)
+ get_f64(f21)
+ get_f64(f22)
+ get_f64(f23)
+ get_f64(f24)
+ get_f64(f25)
+ get_f64(f26)
+ get_f64(f27)
+ get_f64(f28)
+ get_f64(f29)
+ get_f64(f30)
+ get_f64(f31)
+
+ .text
+ .globl put_f64_reg
+ put_f64_reg:
+ put_f64(f0)
+ put_f64(f1)
+ put_f64(f2)
+ put_f64(f3)
+ put_f64(f4)
+ put_f64(f5)
+ put_f64(f6)
+ put_f64(f7)
+ put_f64(f8)
+ put_f64(f9)
+ put_f64(f10)
+ put_f64(f11)
+ put_f64(f12)
+ put_f64(f13)
+ put_f64(f14)
+ put_f64(f15)
+ put_f64(f16)
+ put_f64(f17)
+ put_f64(f18)
+ put_f64(f19)
+ put_f64(f20)
+ put_f64(f21)
+ put_f64(f22)
+ put_f64(f23)
+ put_f64(f24)
+ put_f64(f25)
+ put_f64(f26)
+ put_f64(f27)
+ put_f64(f28)
+ put_f64(f29)
+ put_f64(f30)
+ put_f64(f31)
+
+#endif
diff --git a/lib/sbi/riscv_locks.c b/lib/sbi/riscv_locks.c
new file mode 100644
index 0000000..4d1d9c0
--- /dev/null
+++ b/lib/sbi/riscv_locks.c
@@ -0,0 +1,45 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_barrier.h>
+#include <sbi/riscv_locks.h>
+
+int spin_lock_check(spinlock_t *lock)
+{
+ return (lock->lock == __RISCV_SPIN_UNLOCKED) ? 0 : 1;
+}
+
+int spin_trylock(spinlock_t *lock)
+{
+ int tmp = 1, busy;
+
+ __asm__ __volatile__(
+ " amoswap.w %0, %2, %1\n" RISCV_ACQUIRE_BARRIER
+ : "=r"(busy), "+A"(lock->lock)
+ : "r"(tmp)
+ : "memory");
+
+ return !busy;
+}
+
+void spin_lock(spinlock_t *lock)
+{
+ while (1) {
+ if (spin_lock_check(lock))
+ continue;
+
+ if (spin_trylock(lock))
+ break;
+ }
+}
+
+void spin_unlock(spinlock_t *lock)
+{
+ __smp_store_release(&lock->lock, __RISCV_SPIN_UNLOCKED);
+}
diff --git a/lib/sbi/riscv_unpriv.c b/lib/sbi/riscv_unpriv.c
new file mode 100644
index 0000000..aa353c0
--- /dev/null
+++ b/lib/sbi/riscv_unpriv.c
@@ -0,0 +1,145 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_unpriv.h>
+#include <sbi/sbi_bits.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_scratch.h>
+
+#define DEFINE_UNPRIVILEGED_LOAD_FUNCTION(type, insn, insnlen) \
+ type load_##type(const type *addr, \
+ struct sbi_scratch *scratch, \
+ struct unpriv_trap *trap) \
+ { \
+ register ulong __mstatus asm("a2"); \
+ type val = 0; \
+ trap->ilen = insnlen; \
+ trap->cause = 0; \
+ trap->tval = 0; \
+ sbi_hart_set_trap_info(scratch, trap); \
+ asm volatile( \
+ "csrrs %0, " STR(CSR_MSTATUS) ", %3\n" \
+ #insn " %1, %2\n" \
+ "csrw " STR(CSR_MSTATUS) ", %0" \
+ : "+&r"(__mstatus), "=&r"(val) \
+ : "m"(*addr), "r"(MSTATUS_MPRV)); \
+ sbi_hart_set_trap_info(scratch, NULL); \
+ return val; \
+ }
+
+#define DEFINE_UNPRIVILEGED_STORE_FUNCTION(type, insn, insnlen) \
+ void store_##type(type *addr, type val, \
+ struct sbi_scratch *scratch, \
+ struct unpriv_trap *trap) \
+ { \
+ register ulong __mstatus asm("a3"); \
+ trap->ilen = insnlen; \
+ trap->cause = 0; \
+ trap->tval = 0; \
+ sbi_hart_set_trap_info(scratch, trap); \
+ asm volatile( \
+ "csrrs %0, " STR(CSR_MSTATUS) ", %3\n" \
+ #insn " %1, %2\n" \
+ "csrw " STR(CSR_MSTATUS) ", %0" \
+ : "+&r"(__mstatus) \
+ : "r"(val), "m"(*addr), "r"(MSTATUS_MPRV)); \
+ sbi_hart_set_trap_info(scratch, NULL); \
+ }
+
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu, 4)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu, 4)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb, 4)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh, 4)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw, 2)
+DEFINE_UNPRIVILEGED_STORE_FUNCTION(u8, sb, 4)
+DEFINE_UNPRIVILEGED_STORE_FUNCTION(u16, sh, 4)
+DEFINE_UNPRIVILEGED_STORE_FUNCTION(u32, sw, 2)
+#if __riscv_xlen == 64
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu, 4)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld, 2)
+DEFINE_UNPRIVILEGED_STORE_FUNCTION(u64, sd, 2)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld, 2)
+#else
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw, 2)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw, 2)
+
+u64 load_u64(const u64 *addr,
+ struct sbi_scratch *scratch, struct unpriv_trap *trap)
+{
+ u64 ret = load_u32((u32 *)addr, scratch, trap);
+
+ if (trap->cause)
+ return 0;
+ ret |= ((u64)load_u32((u32 *)addr + 1, scratch, trap) << 32);
+ if (trap->cause)
+ return 0;
+
+ return ret;
+}
+
+void store_u64(u64 *addr, u64 val,
+ struct sbi_scratch *scratch, struct unpriv_trap *trap)
+{
+ store_u32((u32 *)addr, val, scratch, trap);
+ if (trap->cause)
+ return;
+
+ store_u32((u32 *)addr + 1, val >> 32, scratch, trap);
+ if (trap->cause)
+ return;
+}
+#endif
+
+ulong get_insn(ulong mepc, ulong *mstatus)
+{
+ register ulong __mepc asm("a2") = mepc;
+ register ulong __mstatus asm("a3");
+ ulong val;
+#ifndef __riscv_compressed
+ asm("csrrs %[mstatus], " STR(CSR_MSTATUS) ", %[mprv]\n"
+#if __riscv_xlen == 64
+ STR(LWU) " %[insn], (%[addr])\n"
+#else
+ STR(LW) " %[insn], (%[addr])\n"
+#endif
+ "csrw " STR(CSR_MSTATUS) ", %[mstatus]"
+ : [mstatus] "+&r"(__mstatus), [insn] "=&r"(val)
+ : [mprv] "r"(MSTATUS_MPRV | MSTATUS_MXR), [addr] "r"(__mepc));
+#else
+ ulong rvc_mask = 3, tmp;
+ asm("csrrs %[mstatus], " STR(CSR_MSTATUS) ", %[mprv]\n"
+ "and %[tmp], %[addr], 2\n"
+ "bnez %[tmp], 1f\n"
+#if __riscv_xlen == 64
+ STR(LWU) " %[insn], (%[addr])\n"
+#else
+ STR(LW) " %[insn], (%[addr])\n"
+#endif
+ "and %[tmp], %[insn], %[rvc_mask]\n"
+ "beq %[tmp], %[rvc_mask], 2f\n"
+ "sll %[insn], %[insn], %[xlen_minus_16]\n"
+ "srl %[insn], %[insn], %[xlen_minus_16]\n"
+ "j 2f\n"
+ "1:\n"
+ "lhu %[insn], (%[addr])\n"
+ "and %[tmp], %[insn], %[rvc_mask]\n"
+ "bne %[tmp], %[rvc_mask], 2f\n"
+ "lhu %[tmp], 2(%[addr])\n"
+ "sll %[tmp], %[tmp], 16\n"
+ "add %[insn], %[insn], %[tmp]\n"
+ "2: csrw " STR(CSR_MSTATUS) ", %[mstatus]"
+ : [mstatus] "+&r"(__mstatus), [insn] "=&r"(val), [tmp] "=&r"(tmp)
+ : [mprv] "r"(MSTATUS_MPRV | MSTATUS_MXR), [addr] "r"(__mepc),
+ [rvc_mask] "r"(rvc_mask), [xlen_minus_16] "i"(__riscv_xlen - 16));
+#endif
+ if (mstatus)
+ *mstatus = __mstatus;
+ return val;
+}
diff --git a/lib/sbi/sbi_console.c b/lib/sbi/sbi_console.c
new file mode 100644
index 0000000..30fb2c8
--- /dev/null
+++ b/lib/sbi/sbi_console.c
@@ -0,0 +1,383 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_console.h>
+#include <sbi/riscv_locks.h>
+
+static const struct sbi_platform *console_plat = NULL;
+static spinlock_t console_out_lock = SPIN_LOCK_INITIALIZER;
+
+bool sbi_isprintable(char c)
+{
+ if (((31 < c) && (c < 127)) || (c == '\f') || (c == '\r') ||
+ (c == '\n') || (c == '\t')) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+int sbi_getc(void)
+{
+ return sbi_platform_console_getc(console_plat);
+}
+
+void sbi_putc(char ch)
+{
+ if (ch == '\n')
+ sbi_platform_console_putc(console_plat, '\r');
+ sbi_platform_console_putc(console_plat, ch);
+}
+
+void sbi_puts(const char *str)
+{
+ spin_lock(&console_out_lock);
+ while (*str) {
+ sbi_putc(*str);
+ str++;
+ }
+ spin_unlock(&console_out_lock);
+}
+
+void sbi_gets(char *s, int maxwidth, char endchar)
+{
+ int ch;
+ char *retval = s;
+
+ while ((ch = sbi_getc()) != endchar && ch >= 0 && maxwidth > 1) {
+ *retval = (char)ch;
+ retval++;
+ maxwidth--;
+ }
+ *retval = '\0';
+}
+
+#define PAD_RIGHT 1
+#define PAD_ZERO 2
+#define PAD_ALTERNATE 4
+#define PRINT_BUF_LEN 64
+
+#define va_start(v, l) __builtin_va_start((v), l)
+#define va_end __builtin_va_end
+#define va_arg __builtin_va_arg
+typedef __builtin_va_list va_list;
+
+static void printc(char **out, u32 *out_len, char ch)
+{
+ if (out) {
+ if (*out) {
+ if (out_len && (0 < *out_len)) {
+ **out = ch;
+ ++(*out);
+ (*out_len)--;
+ } else {
+ **out = ch;
+ ++(*out);
+ }
+ }
+ } else {
+ sbi_putc(ch);
+ }
+}
+
+static int prints(char **out, u32 *out_len, const char *string, int width,
+ int flags)
+{
+ int pc = 0;
+ char padchar = ' ';
+
+ if (width > 0) {
+ int len = 0;
+ const char *ptr;
+ for (ptr = string; *ptr; ++ptr)
+ ++len;
+ if (len >= width)
+ width = 0;
+ else
+ width -= len;
+ if (flags & PAD_ZERO)
+ padchar = '0';
+ }
+ if (!(flags & PAD_RIGHT)) {
+ for (; width > 0; --width) {
+ printc(out, out_len, padchar);
+ ++pc;
+ }
+ }
+ for (; *string; ++string) {
+ printc(out, out_len, *string);
+ ++pc;
+ }
+ for (; width > 0; --width) {
+ printc(out, out_len, padchar);
+ ++pc;
+ }
+
+ return pc;
+}
+
+static int printi(char **out, u32 *out_len, long long i, int b, int sg,
+ int width, int flags, int letbase)
+{
+ char print_buf[PRINT_BUF_LEN];
+ char *s;
+ int neg = 0, pc = 0;
+ u64 t;
+ unsigned long long u = i;
+
+ if (sg && b == 10 && i < 0) {
+ neg = 1;
+ u = -i;
+ }
+
+ s = print_buf + PRINT_BUF_LEN - 1;
+ *s = '\0';
+
+ if (!u) {
+ *--s = '0';
+ } else {
+ while (u) {
+ t = u % b;
+ u = u / b;
+ if (t >= 10)
+ t += letbase - '0' - 10;
+ *--s = t + '0';
+ }
+ }
+
+ if (flags & PAD_ALTERNATE) {
+ if ((b == 16) && (letbase == 'A')) {
+ *--s = 'X';
+ } else if ((b == 16) && (letbase == 'a')) {
+ *--s = 'x';
+ }
+ *--s = '0';
+ }
+
+ if (neg) {
+ if (width && (flags & PAD_ZERO)) {
+ printc(out, out_len, '-');
+ ++pc;
+ --width;
+ } else {
+ *--s = '-';
+ }
+ }
+
+ return pc + prints(out, out_len, s, width, flags);
+}
+
+static int print(char **out, u32 *out_len, const char *format, va_list args)
+{
+ int width, flags, acnt = 0;
+ int pc = 0;
+ char scr[2];
+ unsigned long long tmp;
+
+ for (; *format != 0; ++format) {
+ if (*format == '%') {
+ ++format;
+ width = flags = 0;
+ if (*format == '\0')
+ break;
+ if (*format == '%')
+ goto out;
+ /* Get flags */
+ if (*format == '-') {
+ ++format;
+ flags = PAD_RIGHT;
+ }
+ if (*format == '#') {
+ ++format;
+ flags |= PAD_ALTERNATE;
+ }
+ while (*format == '0') {
+ ++format;
+ flags |= PAD_ZERO;
+ }
+ /* Get width */
+ for (; *format >= '0' && *format <= '9'; ++format) {
+ width *= 10;
+ width += *format - '0';
+ }
+ if (*format == 's') {
+ char *s = va_arg(args, char *);
+ acnt += sizeof(char *);
+ pc += prints(out, out_len, s ? s : "(null)",
+ width, flags);
+ continue;
+ }
+ if ((*format == 'd') || (*format == 'i')) {
+ pc += printi(out, out_len, va_arg(args, int),
+ 10, 1, width, flags, '0');
+ acnt += sizeof(int);
+ continue;
+ }
+ if (*format == 'x') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned int), 16, 0,
+ width, flags, 'a');
+ acnt += sizeof(unsigned int);
+ continue;
+ }
+ if (*format == 'X') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned int), 16, 0,
+ width, flags, 'A');
+ acnt += sizeof(unsigned int);
+ continue;
+ }
+ if (*format == 'u') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned int), 10, 0,
+ width, flags, 'a');
+ acnt += sizeof(unsigned int);
+ continue;
+ }
+ if (*format == 'p') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned long), 16, 0,
+ width, flags, 'a');
+ acnt += sizeof(unsigned long);
+ continue;
+ }
+ if (*format == 'P') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned long), 16, 0,
+ width, flags, 'A');
+ acnt += sizeof(unsigned long);
+ continue;
+ }
+ if (*format == 'l' && *(format + 1) == 'l') {
+ while (acnt &
+ (sizeof(unsigned long long) - 1)) {
+ va_arg(args, int);
+ acnt += sizeof(int);
+ }
+ if (sizeof(unsigned long long) ==
+ sizeof(unsigned long)) {
+ tmp = va_arg(args, unsigned long long);
+ acnt += sizeof(unsigned long long);
+ } else {
+ ((unsigned long *)&tmp)[0] =
+ va_arg(args, unsigned long);
+ ((unsigned long *)&tmp)[1] =
+ va_arg(args, unsigned long);
+ acnt += 2 * sizeof(unsigned long);
+ }
+ if (*(format + 2) == 'u') {
+ format += 2;
+ pc += printi(out, out_len, tmp, 10, 0,
+ width, flags, 'a');
+ } else if (*(format + 2) == 'x') {
+ format += 2;
+ pc += printi(out, out_len, tmp, 16, 0,
+ width, flags, 'a');
+ } else if (*(format + 2) == 'X') {
+ format += 2;
+ pc += printi(out, out_len, tmp, 16, 0,
+ width, flags, 'A');
+ } else {
+ format += 1;
+ pc += printi(out, out_len, tmp, 10, 1,
+ width, flags, '0');
+ }
+ continue;
+ } else if (*format == 'l') {
+ if (*(format + 1) == 'u') {
+ format += 1;
+ pc += printi(
+ out, out_len,
+ va_arg(args, unsigned long), 10,
+ 0, width, flags, 'a');
+ } else if (*(format + 1) == 'x') {
+ format += 1;
+ pc += printi(
+ out, out_len,
+ va_arg(args, unsigned long), 16,
+ 0, width, flags, 'a');
+ acnt += sizeof(unsigned long);
+ } else if (*(format + 1) == 'X') {
+ format += 1;
+ pc += printi(
+ out, out_len,
+ va_arg(args, unsigned long), 16,
+ 0, width, flags, 'A');
+ acnt += sizeof(unsigned long);
+ } else {
+ pc += printi(out, out_len,
+ va_arg(args, long), 10, 1,
+ width, flags, '0');
+ acnt += sizeof(long);
+ }
+ }
+ if (*format == 'c') {
+ /* char are converted to int then pushed on the stack */
+ scr[0] = va_arg(args, int);
+ scr[1] = '\0';
+ pc += prints(out, out_len, scr, width, flags);
+ acnt += sizeof(int);
+ continue;
+ }
+ } else {
+ out:
+ printc(out, out_len, *format);
+ ++pc;
+ }
+ }
+ if (out)
+ **out = '\0';
+
+ return pc;
+}
+
+int sbi_sprintf(char *out, const char *format, ...)
+{
+ va_list args;
+ int retval;
+
+ va_start(args, format);
+ retval = print(&out, NULL, format, args);
+ va_end(args);
+
+ return retval;
+}
+
+int sbi_snprintf(char *out, u32 out_sz, const char *format, ...)
+{
+ va_list args;
+ int retval;
+
+ va_start(args, format);
+ retval = print(&out, &out_sz, format, args);
+ va_end(args);
+
+ return retval;
+}
+
+int sbi_printf(const char *format, ...)
+{
+ va_list args;
+ int retval;
+
+ spin_lock(&console_out_lock);
+ va_start(args, format);
+ retval = print(NULL, NULL, format, args);
+ va_end(args);
+ spin_unlock(&console_out_lock);
+
+ return retval;
+}
+
+int sbi_console_init(struct sbi_scratch *scratch)
+{
+ console_plat = sbi_platform_ptr(scratch);
+
+ return sbi_platform_console_init(console_plat);
+}
diff --git a/lib/sbi/sbi_ecall.c b/lib/sbi/sbi_ecall.c
new file mode 100644
index 0000000..50c05d6
--- /dev/null
+++ b/lib/sbi/sbi_ecall.c
@@ -0,0 +1,107 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_system.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_tlb.h>
+#include <sbi/sbi_trap.h>
+
+#define SBI_ECALL_VERSION_MAJOR 0
+#define SBI_ECALL_VERSION_MINOR 1
+
+u16 sbi_ecall_version_major(void)
+{
+ return SBI_ECALL_VERSION_MAJOR;
+}
+
+u16 sbi_ecall_version_minor(void)
+{
+ return SBI_ECALL_VERSION_MINOR;
+}
+
+int sbi_ecall_handler(u32 hartid, ulong mcause, struct sbi_trap_regs *regs,
+ struct sbi_scratch *scratch)
+{
+ int ret = SBI_ENOTSUPP;
+ struct unpriv_trap uptrap;
+ struct sbi_tlb_info tlb_info;
+
+ switch (regs->a7) {
+ case SBI_ECALL_SET_TIMER:
+#if __riscv_xlen == 32
+ sbi_timer_event_start(scratch,
+ (((u64)regs->a1 << 32) | (u64)regs->a0));
+#else
+ sbi_timer_event_start(scratch, (u64)regs->a0);
+#endif
+ ret = 0;
+ break;
+ case SBI_ECALL_CONSOLE_PUTCHAR:
+ sbi_putc(regs->a0);
+ ret = 0;
+ break;
+ case SBI_ECALL_CONSOLE_GETCHAR:
+ regs->a0 = sbi_getc();
+ ret = 0;
+ break;
+ case SBI_ECALL_CLEAR_IPI:
+ sbi_ipi_clear_smode(scratch);
+ ret = 0;
+ break;
+ case SBI_ECALL_SEND_IPI:
+ ret = sbi_ipi_send_many(scratch, &uptrap, (ulong *)regs->a0,
+ SBI_IPI_EVENT_SOFT, NULL);
+ break;
+ case SBI_ECALL_REMOTE_FENCE_I:
+ ret = sbi_ipi_send_many(scratch, &uptrap, (ulong *)regs->a0,
+ SBI_IPI_EVENT_FENCE_I, NULL);
+ break;
+ case SBI_ECALL_REMOTE_SFENCE_VMA:
+ tlb_info.start = (unsigned long)regs->a1;
+ tlb_info.size = (unsigned long)regs->a2;
+ tlb_info.type = SBI_TLB_FLUSH_VMA;
+
+ ret = sbi_ipi_send_many(scratch, &uptrap, (ulong *)regs->a0,
+ SBI_IPI_EVENT_SFENCE_VMA, &tlb_info);
+ break;
+ case SBI_ECALL_REMOTE_SFENCE_VMA_ASID:
+ tlb_info.start = (unsigned long)regs->a1;
+ tlb_info.size = (unsigned long)regs->a2;
+ tlb_info.asid = (unsigned long)regs->a3;
+ tlb_info.type = SBI_TLB_FLUSH_VMA_ASID;
+
+ ret = sbi_ipi_send_many(scratch, &uptrap, (ulong *)regs->a0,
+ SBI_IPI_EVENT_SFENCE_VMA_ASID,
+ &tlb_info);
+ break;
+ case SBI_ECALL_SHUTDOWN:
+ sbi_system_shutdown(scratch, 0);
+ ret = 0;
+ break;
+ default:
+ regs->a0 = SBI_ENOTSUPP;
+ ret = 0;
+ break;
+ };
+
+ if (!ret) {
+ regs->mepc += 4;
+ } else if (ret == SBI_ETRAP) {
+ ret = 0;
+ sbi_trap_redirect(regs, scratch, regs->mepc,
+ uptrap.cause, uptrap.tval);
+ }
+
+ return ret;
+}
diff --git a/lib/sbi/sbi_emulate_csr.c b/lib/sbi/sbi_emulate_csr.c
new file mode 100644
index 0000000..5d6819e
--- /dev/null
+++ b/lib/sbi/sbi_emulate_csr.c
@@ -0,0 +1,137 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_bits.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_emulate_csr.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_timer.h>
+
+int sbi_emulate_csr_read(int csr_num, u32 hartid, ulong mstatus,
+ struct sbi_scratch *scratch, ulong *csr_val)
+{
+ ulong cen = -1UL;
+
+ if (EXTRACT_FIELD(mstatus, MSTATUS_MPP) == PRV_U)
+ cen = csr_read(CSR_SCOUNTEREN);
+
+ switch (csr_num) {
+ case CSR_CYCLE:
+ if (!((cen >> (CSR_CYCLE - CSR_CYCLE)) & 1))
+ return -1;
+ *csr_val = csr_read(CSR_MCYCLE);
+ break;
+ case CSR_TIME:
+ if (!((cen >> (CSR_TIME - CSR_CYCLE)) & 1))
+ return -1;
+ *csr_val = sbi_timer_value(scratch);
+ break;
+ case CSR_INSTRET:
+ if (!((cen >> (CSR_INSTRET - CSR_CYCLE)) & 1))
+ return -1;
+ *csr_val = csr_read(CSR_MINSTRET);
+ break;
+ case CSR_MHPMCOUNTER3:
+ if (!((cen >> (3 + CSR_MHPMCOUNTER3 - CSR_MHPMCOUNTER3)) & 1))
+ return -1;
+ *csr_val = csr_read(CSR_MHPMCOUNTER3);
+ break;
+ case CSR_MHPMCOUNTER4:
+ if (!((cen >> (3 + CSR_MHPMCOUNTER4 - CSR_MHPMCOUNTER3)) & 1))
+ return -1;
+ *csr_val = csr_read(CSR_MHPMCOUNTER4);
+ break;
+#if __riscv_xlen == 32
+ case CSR_CYCLEH:
+ if (!((cen >> (CSR_CYCLE - CSR_CYCLE)) & 1))
+ return -1;
+ *csr_val = csr_read(CSR_MCYCLEH);
+ break;
+ case CSR_TIMEH:
+ if (!((cen >> (CSR_TIME - CSR_CYCLE)) & 1))
+ return -1;
+ *csr_val = sbi_timer_value(scratch) >> 32;
+ break;
+ case CSR_INSTRETH:
+ if (!((cen >> (CSR_INSTRET - CSR_CYCLE)) & 1))
+ return -1;
+ *csr_val = csr_read(CSR_MINSTRETH);
+ break;
+ case CSR_MHPMCOUNTER3H:
+ if (!((cen >> (3 + CSR_MHPMCOUNTER3 - CSR_MHPMCOUNTER3)) & 1))
+ return -1;
+ *csr_val = csr_read(CSR_MHPMCOUNTER3H);
+ break;
+ case CSR_MHPMCOUNTER4H:
+ if (!((cen >> (3 + CSR_MHPMCOUNTER4 - CSR_MHPMCOUNTER3)) & 1))
+ return -1;
+ *csr_val = csr_read(CSR_MHPMCOUNTER4H);
+ break;
+#endif
+ case CSR_MHPMEVENT3:
+ *csr_val = csr_read(CSR_MHPMEVENT3);
+ break;
+ case CSR_MHPMEVENT4:
+ *csr_val = csr_read(CSR_MHPMEVENT4);
+ break;
+ default:
+ sbi_printf("%s: hartid%d: invalid csr_num=0x%x\n", __func__,
+ hartid, csr_num);
+ return SBI_ENOTSUPP;
+ };
+
+ return 0;
+}
+
+int sbi_emulate_csr_write(int csr_num, u32 hartid, ulong mstatus,
+ struct sbi_scratch *scratch, ulong csr_val)
+{
+ switch (csr_num) {
+ case CSR_CYCLE:
+ csr_write(CSR_MCYCLE, csr_val);
+ break;
+ case CSR_INSTRET:
+ csr_write(CSR_MINSTRET, csr_val);
+ break;
+ case CSR_MHPMCOUNTER3:
+ csr_write(CSR_MHPMCOUNTER3, csr_val);
+ break;
+ case CSR_MHPMCOUNTER4:
+ csr_write(CSR_MHPMCOUNTER4, csr_val);
+ break;
+#if __riscv_xlen == 32
+ case CSR_CYCLEH:
+ csr_write(CSR_MCYCLEH, csr_val);
+ break;
+ case CSR_INSTRETH:
+ csr_write(CSR_MINSTRETH, csr_val);
+ break;
+ case CSR_MHPMCOUNTER3H:
+ csr_write(CSR_MHPMCOUNTER3H, csr_val);
+ break;
+ case CSR_MHPMCOUNTER4H:
+ csr_write(CSR_MHPMCOUNTER4H, csr_val);
+ break;
+#endif
+ case CSR_MHPMEVENT3:
+ csr_write(CSR_MHPMEVENT3, csr_val);
+ break;
+ case CSR_MHPMEVENT4:
+ csr_write(CSR_MHPMEVENT4, csr_val);
+ break;
+ default:
+ sbi_printf("%s: hartid%d: invalid csr_num=0x%x\n", __func__,
+ hartid, csr_num);
+ return SBI_ENOTSUPP;
+ };
+
+ return 0;
+}
diff --git a/lib/sbi/sbi_fifo.c b/lib/sbi/sbi_fifo.c
new file mode 100644
index 0000000..a92b46c
--- /dev/null
+++ b/lib/sbi/sbi_fifo.c
@@ -0,0 +1,184 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra<atish.patra@wdc.com>
+ *
+ */
+#include <sbi/riscv_locks.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_fifo.h>
+#include <plat/string.h>
+
+void sbi_fifo_init(struct sbi_fifo *fifo, void *queue_mem, u16 entries,
+ u16 entry_size)
+{
+ fifo->queue = queue_mem;
+ fifo->num_entries = entries;
+ fifo->entry_size = entry_size;
+ SPIN_LOCK_INIT(&fifo->qlock);
+ fifo->avail = fifo->tail = 0;
+ memset(fifo->queue, 0, entries * entry_size);
+}
+
+/* Note: must be called with fifo->qlock held */
+static inline bool __sbi_fifo_is_full(struct sbi_fifo *fifo)
+{
+ return (fifo->avail == fifo->num_entries) ? TRUE : FALSE;
+}
+
+u16 sbi_fifo_avail(struct sbi_fifo *fifo)
+{
+ u16 ret;
+
+ if (!fifo)
+ return 0;
+
+ spin_lock(&fifo->qlock);
+ ret = fifo->avail;
+ spin_unlock(&fifo->qlock);
+
+ return ret;
+}
+
+bool sbi_fifo_is_full(struct sbi_fifo *fifo)
+{
+ bool ret;
+
+ spin_lock(&fifo->qlock);
+ ret = __sbi_fifo_is_full(fifo);
+ spin_unlock(&fifo->qlock);
+
+ return ret;
+}
+
+/* Note: must be called with fifo->qlock held */
+static inline bool __sbi_fifo_is_empty(struct sbi_fifo *fifo)
+{
+ return (fifo->avail == 0) ? TRUE : FALSE;
+}
+
+bool sbi_fifo_is_empty(struct sbi_fifo *fifo)
+{
+ bool ret;
+
+ spin_lock(&fifo->qlock);
+ ret = __sbi_fifo_is_empty(fifo);
+ spin_unlock(&fifo->qlock);
+
+ return ret;
+}
+
+/* Note: must be called with fifo->qlock held */
+static inline void __sbi_fifo_reset(struct sbi_fifo *fifo)
+{
+ fifo->avail = 0;
+ fifo->tail = 0;
+ memset(fifo->queue, 0, fifo->num_entries * fifo->entry_size);
+}
+
+bool sbi_fifo_reset(struct sbi_fifo *fifo)
+{
+ if (!fifo)
+ return FALSE;
+
+ spin_lock(&fifo->qlock);
+ __sbi_fifo_reset(fifo);
+ spin_unlock(&fifo->qlock);
+
+ return TRUE;
+}
+
+/**
+ * Provide a helper function to do inplace update to the fifo.
+ * Note: The callback function is called with lock being held.
+ *
+ * **Do not** invoke any other fifo function from callback. Otherwise, it will
+ * lead to deadlock.
+ */
+int sbi_fifo_inplace_update(struct sbi_fifo *fifo, void *in,
+ int (*fptr)(void *in, void *data))
+{
+ int i, index = 0;
+ int ret = SBI_FIFO_UNCHANGED;
+ void *entry;
+
+ if (!fifo || !in)
+ return ret;
+ spin_lock(&fifo->qlock);
+ if (__sbi_fifo_is_empty(fifo)) {
+ spin_unlock(&fifo->qlock);
+ return ret;
+ }
+
+ for (i = 0; i < fifo->avail; i++) {
+ index = fifo->tail + i;
+ if (index >= fifo->num_entries)
+ index = index - fifo->num_entries;
+ entry = (void *)fifo->queue + (u32)index * fifo->entry_size;
+ ret = fptr(in, entry);
+ if (ret == SBI_FIFO_SKIP || ret == SBI_FIFO_UPDATED) {
+ break;
+ } else if (ret == SBI_FIFO_RESET) {
+ __sbi_fifo_reset(fifo);
+ break;
+ }
+ }
+ spin_unlock(&fifo->qlock);
+
+ return ret;
+}
+
+int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
+{
+ u32 head;
+
+ if (!fifo || !data)
+ return SBI_EINVAL;
+
+ spin_lock(&fifo->qlock);
+
+ if (__sbi_fifo_is_full(fifo)) {
+ spin_unlock(&fifo->qlock);
+ return SBI_ENOSPC;
+ }
+
+ head = (u32)fifo->tail + fifo->avail;
+ if (head >= fifo->num_entries)
+ head = head - fifo->num_entries;
+
+ memcpy(fifo->queue + head * fifo->entry_size, data, fifo->entry_size);
+
+ fifo->avail++;
+
+ spin_unlock(&fifo->qlock);
+
+ return 0;
+}
+
+int sbi_fifo_dequeue(struct sbi_fifo *fifo, void *data)
+{
+ if (!fifo || !data)
+ return SBI_EINVAL;
+
+ spin_lock(&fifo->qlock);
+
+ if (__sbi_fifo_is_empty(fifo)) {
+ spin_unlock(&fifo->qlock);
+ return SBI_ENOENT;
+ }
+
+ memcpy(data, fifo->queue + (u32)fifo->tail * fifo->entry_size,
+ fifo->entry_size);
+
+ fifo->avail--;
+ fifo->tail++;
+ if (fifo->tail >= fifo->num_entries)
+ fifo->tail = 0;
+
+ spin_unlock(&fifo->qlock);
+
+ return 0;
+}
diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c
new file mode 100644
index 0000000..187b493
--- /dev/null
+++ b/lib/sbi/sbi_hart.c
@@ -0,0 +1,366 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_fp.h>
+#include <sbi/riscv_locks.h>
+#include <sbi/sbi_bits.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_platform.h>
+
+/**
+ * Return HART ID of the caller.
+ */
+unsigned int sbi_current_hartid()
+{
+ return (u32)csr_read(CSR_MHARTID);
+}
+
+static void mstatus_init(struct sbi_scratch *scratch, u32 hartid)
+{
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ /* Enable FPU */
+ if (misa_extension('D') || misa_extension('F'))
+ csr_write(CSR_MSTATUS, MSTATUS_FS);
+
+ /* Enable user/supervisor use of perf counters */
+ if (misa_extension('S') && sbi_platform_has_scounteren(plat))
+ csr_write(CSR_SCOUNTEREN, -1);
+ if (sbi_platform_has_mcounteren(plat))
+ csr_write(CSR_MCOUNTEREN, -1);
+
+ /* Disable all interrupts */
+ csr_write(CSR_MIE, 0);
+
+ /* Disable S-mode paging */
+ if (misa_extension('S'))
+ csr_write(CSR_SATP, 0);
+}
+
+static int fp_init(u32 hartid)
+{
+#ifdef __riscv_flen
+ int i;
+#else
+ unsigned long fd_mask;
+#endif
+
+ if (!misa_extension('D') && !misa_extension('F'))
+ return 0;
+
+ if (!(csr_read(CSR_MSTATUS) & MSTATUS_FS))
+ return SBI_EINVAL;
+
+#ifdef __riscv_flen
+ for (i = 0; i < 32; i++)
+ init_fp_reg(i);
+ csr_write(CSR_FCSR, 0);
+#else
+ fd_mask = (1 << ('F' - 'A')) | (1 << ('D' - 'A'));
+ csr_clear(CSR_MISA, fd_mask);
+ if (csr_read(CSR_MISA) & fd_mask)
+ return SBI_ENOTSUPP;
+#endif
+
+ return 0;
+}
+
+static int delegate_traps(struct sbi_scratch *scratch, u32 hartid)
+{
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ unsigned long interrupts, exceptions;
+
+ if (!misa_extension('S'))
+ /* No delegation possible as mideleg does not exist*/
+ return 0;
+
+ /* Send M-mode interrupts and most exceptions to S-mode */
+ interrupts = MIP_SSIP | MIP_STIP | MIP_SEIP;
+ exceptions = (1U << CAUSE_MISALIGNED_FETCH) | (1U << CAUSE_BREAKPOINT) |
+ (1U << CAUSE_USER_ECALL);
+ if (sbi_platform_has_mfaults_delegation(plat))
+ exceptions |= (1U << CAUSE_FETCH_PAGE_FAULT) |
+ (1U << CAUSE_LOAD_PAGE_FAULT) |
+ (1U << CAUSE_STORE_PAGE_FAULT);
+
+ csr_write(CSR_MIDELEG, interrupts);
+ csr_write(CSR_MEDELEG, exceptions);
+
+ if (csr_read(CSR_MIDELEG) != interrupts)
+ return SBI_EFAIL;
+ if (csr_read(CSR_MEDELEG) != exceptions)
+ return SBI_EFAIL;
+
+ return 0;
+}
+
+unsigned long log2roundup(unsigned long x)
+{
+ unsigned long ret = 0;
+
+ while (ret < __riscv_xlen) {
+ if (x <= (1UL << ret))
+ break;
+ ret++;
+ }
+
+ return ret;
+}
+
+void sbi_hart_pmp_dump(struct sbi_scratch *scratch)
+{
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ unsigned long prot, addr, size, l2l;
+ unsigned int i;
+
+ if (!sbi_platform_has_pmp(plat))
+ return;
+
+ for (i = 0; i < PMP_COUNT; i++) {
+ pmp_get(i, &prot, &addr, &l2l);
+ if (!(prot & PMP_A))
+ continue;
+ if (l2l < __riscv_xlen)
+ size = (1UL << l2l);
+ else
+ size = 0;
+#if __riscv_xlen == 32
+ sbi_printf("PMP%d: 0x%08lx-0x%08lx (A",
+#else
+ sbi_printf("PMP%d: 0x%016lx-0x%016lx (A",
+#endif
+ i, addr, addr + size - 1);
+ if (prot & PMP_L)
+ sbi_printf(",L");
+ if (prot & PMP_R)
+ sbi_printf(",R");
+ if (prot & PMP_W)
+ sbi_printf(",W");
+ if (prot & PMP_X)
+ sbi_printf(",X");
+ sbi_printf(")\n");
+ }
+}
+
+static int pmp_init(struct sbi_scratch *scratch, u32 hartid)
+{
+ u32 i, count;
+ unsigned long fw_start, fw_size_log2;
+ ulong prot, addr, log2size;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ if (!sbi_platform_has_pmp(plat))
+ return 0;
+
+ fw_size_log2 = log2roundup(scratch->fw_size);
+ fw_start = scratch->fw_start & ~((1UL << fw_size_log2) - 1UL);
+
+ pmp_set(0, 0, fw_start, fw_size_log2);
+
+ count = sbi_platform_pmp_region_count(plat, hartid);
+ if ((PMP_COUNT - 1) < count)
+ count = (PMP_COUNT - 1);
+
+ for (i = 0; i < count; i++) {
+ if (sbi_platform_pmp_region_info(plat, hartid, i, &prot, &addr,
+ &log2size))
+ continue;
+ pmp_set(i + 1, prot, addr, log2size);
+ }
+
+ return 0;
+}
+
+static unsigned long trap_info_offset;
+
+int sbi_hart_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
+{
+ int rc;
+
+ if (cold_boot) {
+ trap_info_offset = sbi_scratch_alloc_offset(__SIZEOF_POINTER__,
+ "HART_TRAP_INFO");
+ if (!trap_info_offset)
+ return SBI_ENOMEM;
+ }
+
+ mstatus_init(scratch, hartid);
+
+ rc = fp_init(hartid);
+ if (rc)
+ return rc;
+
+ rc = delegate_traps(scratch, hartid);
+ if (rc)
+ return rc;
+
+ return pmp_init(scratch, hartid);
+}
+
+void *sbi_hart_get_trap_info(struct sbi_scratch *scratch)
+{
+ unsigned long *trap_info;
+
+ if (!trap_info_offset)
+ return NULL;
+
+ trap_info = sbi_scratch_offset_ptr(scratch, trap_info_offset);
+
+ return (void *)(*trap_info);
+}
+
+void sbi_hart_set_trap_info(struct sbi_scratch *scratch, void *data)
+{
+ unsigned long *trap_info;
+
+ if (!trap_info_offset)
+ return;
+
+ trap_info = sbi_scratch_offset_ptr(scratch, trap_info_offset);
+ *trap_info = (unsigned long)data;
+}
+
+void __attribute__((noreturn)) sbi_hart_hang(void)
+{
+ while (1)
+ wfi();
+ __builtin_unreachable();
+}
+
+void __attribute__((noreturn))
+sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1,
+ unsigned long next_addr, unsigned long next_mode)
+{
+ unsigned long val;
+
+ switch (next_mode) {
+ case PRV_M:
+ break;
+ case PRV_S:
+ if (!misa_extension('S'))
+ sbi_hart_hang();
+ break;
+ case PRV_U:
+ if (!misa_extension('U'))
+ sbi_hart_hang();
+ break;
+ default:
+ sbi_hart_hang();
+ }
+
+ val = csr_read(CSR_MSTATUS);
+ val = INSERT_FIELD(val, MSTATUS_MPP, next_mode);
+ val = INSERT_FIELD(val, MSTATUS_MPIE, 0);
+
+ csr_write(CSR_MSTATUS, val);
+ csr_write(CSR_MEPC, next_addr);
+
+ if (next_mode == PRV_S) {
+ csr_write(CSR_STVEC, next_addr);
+ csr_write(CSR_SSCRATCH, 0);
+ csr_write(CSR_SIE, 0);
+ csr_write(CSR_SATP, 0);
+ } else if (next_mode == PRV_U) {
+ csr_write(CSR_UTVEC, next_addr);
+ csr_write(CSR_USCRATCH, 0);
+ csr_write(CSR_UIE, 0);
+ }
+
+ register unsigned long a0 asm("a0") = arg0;
+ register unsigned long a1 asm("a1") = arg1;
+ __asm__ __volatile__("mret" : : "r"(a0), "r"(a1));
+ __builtin_unreachable();
+}
+
+static spinlock_t avail_hart_mask_lock = SPIN_LOCK_INITIALIZER;
+static volatile unsigned long avail_hart_mask = 0;
+
+void sbi_hart_mark_available(u32 hartid)
+{
+ spin_lock(&avail_hart_mask_lock);
+ avail_hart_mask |= (1UL << hartid);
+ spin_unlock(&avail_hart_mask_lock);
+}
+
+void sbi_hart_unmark_available(u32 hartid)
+{
+ spin_lock(&avail_hart_mask_lock);
+ avail_hart_mask &= ~(1UL << hartid);
+ spin_unlock(&avail_hart_mask_lock);
+}
+
+ulong sbi_hart_available_mask(void)
+{
+ ulong ret;
+
+ spin_lock(&avail_hart_mask_lock);
+ ret = avail_hart_mask;
+ spin_unlock(&avail_hart_mask_lock);
+
+ return ret;
+}
+
+typedef struct sbi_scratch *(*h2s)(ulong hartid);
+
+struct sbi_scratch *sbi_hart_id_to_scratch(struct sbi_scratch *scratch,
+ u32 hartid)
+{
+ return ((h2s)scratch->hartid_to_scratch)(hartid);
+}
+
+#define COLDBOOT_WAIT_BITMAP_SIZE __riscv_xlen
+static spinlock_t coldboot_wait_bitmap_lock = SPIN_LOCK_INITIALIZER;
+static unsigned long coldboot_wait_bitmap = 0;
+
+void sbi_hart_wait_for_coldboot(struct sbi_scratch *scratch, u32 hartid)
+{
+ unsigned long mipval;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ if ((sbi_platform_hart_count(plat) <= hartid) ||
+ (COLDBOOT_WAIT_BITMAP_SIZE <= hartid))
+ sbi_hart_hang();
+
+ /* Set MSIE bit to receive IPI */
+ csr_set(CSR_MIE, MIP_MSIP);
+
+ do {
+ spin_lock(&coldboot_wait_bitmap_lock);
+ coldboot_wait_bitmap |= (1UL << hartid);
+ spin_unlock(&coldboot_wait_bitmap_lock);
+
+ wfi();
+ mipval = csr_read(CSR_MIP);
+
+ spin_lock(&coldboot_wait_bitmap_lock);
+ coldboot_wait_bitmap &= ~(1UL << hartid);
+ spin_unlock(&coldboot_wait_bitmap_lock);
+ } while (!(mipval && MIP_MSIP));
+
+ csr_clear(CSR_MIP, MIP_MSIP);
+}
+
+void sbi_hart_wake_coldboot_harts(struct sbi_scratch *scratch, u32 hartid)
+{
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ int max_hart = sbi_platform_hart_count(plat);
+
+ for (int i = 0; i < max_hart; i++) {
+ /* send an IPI to every other hart */
+ spin_lock(&coldboot_wait_bitmap_lock);
+ if ((i != hartid) && (coldboot_wait_bitmap & (1UL << i)))
+ sbi_platform_ipi_send(plat, i);
+ spin_unlock(&coldboot_wait_bitmap_lock);
+ }
+}
diff --git a/lib/sbi/sbi_illegal_insn.c b/lib/sbi/sbi_illegal_insn.c
new file mode 100644
index 0000000..5541838
--- /dev/null
+++ b/lib/sbi/sbi_illegal_insn.c
@@ -0,0 +1,131 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_unpriv.h>
+#include <sbi/sbi_bits.h>
+#include <sbi/sbi_emulate_csr.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_illegal_insn.h>
+#include <sbi/sbi_trap.h>
+
+typedef int (*illegal_insn_func)(ulong insn, u32 hartid, ulong mcause,
+ struct sbi_trap_regs *regs,
+ struct sbi_scratch *scratch);
+
+static int truly_illegal_insn(ulong insn, u32 hartid, ulong mcause,
+ struct sbi_trap_regs *regs,
+ struct sbi_scratch *scratch)
+{
+ return sbi_trap_redirect(regs, scratch, regs->mepc, mcause, insn);
+}
+
+static int system_opcode_insn(ulong insn, u32 hartid, ulong mcause,
+ struct sbi_trap_regs *regs,
+ struct sbi_scratch *scratch)
+{
+ int do_write, rs1_num = (insn >> 15) & 0x1f;
+ ulong rs1_val = GET_RS1(insn, regs);
+ int csr_num = (u32)insn >> 20;
+ ulong csr_val, new_csr_val;
+
+ if (sbi_emulate_csr_read(csr_num, hartid, regs->mstatus, scratch,
+ &csr_val))
+ return truly_illegal_insn(insn, hartid, mcause, regs, scratch);
+
+ do_write = rs1_num;
+ switch (GET_RM(insn)) {
+ case 1:
+ new_csr_val = rs1_val;
+ do_write = 1;
+ break;
+ case 2:
+ new_csr_val = csr_val | rs1_val;
+ break;
+ case 3:
+ new_csr_val = csr_val & ~rs1_val;
+ break;
+ case 5:
+ new_csr_val = rs1_num;
+ do_write = 1;
+ break;
+ case 6:
+ new_csr_val = csr_val | rs1_num;
+ break;
+ case 7:
+ new_csr_val = csr_val & ~rs1_num;
+ break;
+ default:
+ return truly_illegal_insn(insn, hartid, mcause, regs, scratch);
+ };
+
+ if (do_write && sbi_emulate_csr_write(csr_num, hartid, regs->mstatus,
+ scratch, new_csr_val))
+ return truly_illegal_insn(insn, hartid, mcause, regs, scratch);
+
+ SET_RD(insn, regs, csr_val);
+
+ regs->mepc += 4;
+
+ return 0;
+}
+
+static illegal_insn_func illegal_insn_table[32] = {
+ truly_illegal_insn, /* 0 */
+ truly_illegal_insn, /* 1 */
+ truly_illegal_insn, /* 2 */
+ truly_illegal_insn, /* 3 */
+ truly_illegal_insn, /* 4 */
+ truly_illegal_insn, /* 5 */
+ truly_illegal_insn, /* 6 */
+ truly_illegal_insn, /* 7 */
+ truly_illegal_insn, /* 8 */
+ truly_illegal_insn, /* 9 */
+ truly_illegal_insn, /* 10 */
+ truly_illegal_insn, /* 11 */
+ truly_illegal_insn, /* 12 */
+ truly_illegal_insn, /* 13 */
+ truly_illegal_insn, /* 14 */
+ truly_illegal_insn, /* 15 */
+ truly_illegal_insn, /* 16 */
+ truly_illegal_insn, /* 17 */
+ truly_illegal_insn, /* 18 */
+ truly_illegal_insn, /* 19 */
+ truly_illegal_insn, /* 20 */
+ truly_illegal_insn, /* 21 */
+ truly_illegal_insn, /* 22 */
+ truly_illegal_insn, /* 23 */
+ truly_illegal_insn, /* 24 */
+ truly_illegal_insn, /* 25 */
+ truly_illegal_insn, /* 26 */
+ truly_illegal_insn, /* 27 */
+ system_opcode_insn, /* 28 */
+ truly_illegal_insn, /* 29 */
+ truly_illegal_insn, /* 30 */
+ truly_illegal_insn /* 31 */
+};
+
+int sbi_illegal_insn_handler(u32 hartid, ulong mcause,
+ struct sbi_trap_regs *regs,
+ struct sbi_scratch *scratch)
+{
+ ulong insn = csr_read(mbadaddr);
+
+ if (unlikely((insn & 3) != 3)) {
+ if (insn == 0)
+ insn = get_insn(regs->mepc, NULL);
+ if ((insn & 3) != 3)
+ return truly_illegal_insn(insn, hartid, mcause, regs,
+ scratch);
+ }
+
+ return illegal_insn_table[(insn & 0x7c) >> 2](insn, hartid, mcause,
+ regs, scratch);
+}
diff --git a/lib/sbi/sbi_init.c b/lib/sbi/sbi_init.c
new file mode 100644
index 0000000..4f47a6c
--- /dev/null
+++ b/lib/sbi/sbi_init.c
@@ -0,0 +1,178 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_system.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_version.h>
+
+#define BANNER \
+ " ____ _____ ____ _____\n" \
+ " / __ \\ / ____| _ \\_ _|\n" \
+ " | | | |_ __ ___ _ __ | (___ | |_) || |\n" \
+ " | | | | '_ \\ / _ \\ '_ \\ \\___ \\| _ < | |\n" \
+ " | |__| | |_) | __/ | | |____) | |_) || |_\n" \
+ " \\____/| .__/ \\___|_| |_|_____/|____/_____|\n" \
+ " | |\n" \
+ " |_|\n\n"
+
+static void sbi_boot_prints(struct sbi_scratch *scratch, u32 hartid)
+{
+ char str[64];
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ misa_string(str, sizeof(str));
+ sbi_printf("\nOpenSBI v%d.%d (%s %s)\n", OPENSBI_VERSION_MAJOR,
+ OPENSBI_VERSION_MINOR, __DATE__, __TIME__);
+
+ sbi_printf(BANNER);
+
+ /* Platform details */
+ sbi_printf("Platform Name : %s\n", sbi_platform_name(plat));
+ sbi_printf("Platform HART Features : RV%d%s\n", misa_xlen(), str);
+ sbi_printf("Platform Max HARTs : %d\n",
+ sbi_platform_hart_count(plat));
+ sbi_printf("Current Hart : %u\n", hartid);
+ /* Firmware details */
+ sbi_printf("Firmware Base : 0x%lx\n", scratch->fw_start);
+ sbi_printf("Firmware Size : %d KB\n",
+ (u32)(scratch->fw_size / 1024));
+ /* Generic details */
+ sbi_printf("Runtime SBI Version : %d.%d\n",
+ sbi_ecall_version_major(), sbi_ecall_version_minor());
+ sbi_printf("\n");
+
+ sbi_hart_pmp_dump(scratch);
+}
+
+static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
+{
+ int rc;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ rc = sbi_system_early_init(scratch, TRUE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_hart_init(scratch, hartid, TRUE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_console_init(scratch);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_platform_irqchip_init(plat, TRUE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_ipi_init(scratch, TRUE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_timer_init(scratch, TRUE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_system_final_init(scratch, TRUE);
+ if (rc)
+ sbi_hart_hang();
+
+ if (!(scratch->options & SBI_SCRATCH_NO_BOOT_PRINTS))
+ sbi_boot_prints(scratch, hartid);
+
+ if (!sbi_platform_has_hart_hotplug(plat))
+ sbi_hart_wake_coldboot_harts(scratch, hartid);
+ sbi_hart_mark_available(hartid);
+ sbi_hart_switch_mode(hartid, scratch->next_arg1, scratch->next_addr,
+ scratch->next_mode);
+}
+
+static void __noreturn init_warmboot(struct sbi_scratch *scratch, u32 hartid)
+{
+ int rc;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ if (!sbi_platform_has_hart_hotplug(plat))
+ sbi_hart_wait_for_coldboot(scratch, hartid);
+
+ if (sbi_platform_hart_disabled(plat, hartid))
+ sbi_hart_hang();
+
+ rc = sbi_system_early_init(scratch, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_hart_init(scratch, hartid, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_platform_irqchip_init(plat, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_ipi_init(scratch, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_timer_init(scratch, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_system_final_init(scratch, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ sbi_hart_mark_available(hartid);
+
+ if (sbi_platform_has_hart_hotplug(plat))
+ /* TODO: To be implemented in-future. */
+ sbi_hart_hang();
+ else
+ sbi_hart_switch_mode(hartid, scratch->next_arg1,
+ scratch->next_addr, scratch->next_mode);
+}
+
+static atomic_t coldboot_lottery = ATOMIC_INITIALIZER(0);
+
+/**
+ * Initialize OpenSBI library for current HART and jump to next
+ * booting stage.
+ *
+ * The function expects following:
+ * 1. The 'mscratch' CSR is pointing to sbi_scratch of current HART
+ * 2. Stack pointer (SP) is setup for current HART
+ * 3. Interrupts are disabled in MSTATUS CSR
+ * 4. All interrupts are disabled in MIE CSR
+ *
+ * @param scratch pointer to sbi_scratch of current HART
+ */
+void __noreturn sbi_init(struct sbi_scratch *scratch)
+{
+ bool coldboot = FALSE;
+ u32 hartid = sbi_current_hartid();
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ if (sbi_platform_hart_disabled(plat, hartid))
+ sbi_hart_hang();
+
+ if (atomic_add_return(&coldboot_lottery, 1) == 1)
+ coldboot = TRUE;
+
+ if (coldboot)
+ init_coldboot(scratch, hartid);
+ else
+ init_warmboot(scratch, hartid);
+}
diff --git a/lib/sbi/sbi_ipi.c b/lib/sbi/sbi_ipi.c
new file mode 100644
index 0000000..48d5b22
--- /dev/null
+++ b/lib/sbi/sbi_ipi.c
@@ -0,0 +1,153 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/riscv_unpriv.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_bitops.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_tlb.h>
+#include <plat/string.h>
+
+static unsigned long ipi_data_off;
+
+static int sbi_ipi_send(struct sbi_scratch *scratch, u32 hartid, u32 event,
+ void *data)
+{
+ int ret;
+ struct sbi_scratch *remote_scratch = NULL;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ struct sbi_ipi_data *ipi_data;
+
+ if (sbi_platform_hart_disabled(plat, hartid))
+ return -1;
+
+ /*
+ * Set IPI type on remote hart's scratch area and
+ * trigger the interrupt
+ */
+ remote_scratch = sbi_hart_id_to_scratch(scratch, hartid);
+ ipi_data = sbi_scratch_offset_ptr(remote_scratch, ipi_data_off);
+ if (event == SBI_IPI_EVENT_SFENCE_VMA ||
+ event == SBI_IPI_EVENT_SFENCE_VMA_ASID) {
+ ret = sbi_tlb_fifo_update(remote_scratch, event, data);
+ if (ret > 0)
+ goto done;
+ else if (ret < 0)
+ return ret;
+ }
+ atomic_raw_set_bit(event, &ipi_data->ipi_type);
+ mb();
+ sbi_platform_ipi_send(plat, hartid);
+ if (event != SBI_IPI_EVENT_SOFT)
+ sbi_platform_ipi_sync(plat, hartid);
+
+done:
+ return 0;
+}
+
+int sbi_ipi_send_many(struct sbi_scratch *scratch, struct unpriv_trap *uptrap,
+ ulong *pmask, u32 event, void *data)
+{
+ ulong i, m;
+ ulong mask = sbi_hart_available_mask();
+ u32 hartid = sbi_current_hartid();
+
+ if (pmask) {
+ mask &= load_ulong(pmask, scratch, uptrap);
+ if (uptrap->cause)
+ return SBI_ETRAP;
+ }
+
+ /* send IPIs to every other hart on the set */
+ for (i = 0, m = mask; m; i++, m >>= 1)
+ if ((m & 1UL) && (i != hartid))
+ sbi_ipi_send(scratch, i, event, data);
+
+ /* If the current hart is on the set, send an IPI
+ * to it as well
+ */
+ if (mask & (1UL << hartid))
+ sbi_ipi_send(scratch, hartid, event, data);
+
+ return 0;
+}
+
+void sbi_ipi_clear_smode(struct sbi_scratch *scratch)
+{
+ csr_clear(CSR_MIP, MIP_SSIP);
+}
+
+void sbi_ipi_process(struct sbi_scratch *scratch)
+{
+ volatile unsigned long ipi_type;
+ unsigned int ipi_event;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ struct sbi_ipi_data *ipi_data =
+ sbi_scratch_offset_ptr(scratch, ipi_data_off);
+
+ u32 hartid = sbi_current_hartid();
+ sbi_platform_ipi_clear(plat, hartid);
+
+ do {
+ ipi_type = ipi_data->ipi_type;
+ rmb();
+ ipi_event = __ffs(ipi_type);
+ switch (ipi_event) {
+ case SBI_IPI_EVENT_SOFT:
+ csr_set(CSR_MIP, MIP_SSIP);
+ break;
+ case SBI_IPI_EVENT_FENCE_I:
+ __asm__ __volatile("fence.i");
+ break;
+ case SBI_IPI_EVENT_SFENCE_VMA:
+ case SBI_IPI_EVENT_SFENCE_VMA_ASID:
+ sbi_tlb_fifo_process(scratch, ipi_event);
+ break;
+ case SBI_IPI_EVENT_HALT:
+ sbi_hart_hang();
+ break;
+ };
+ ipi_type = atomic_raw_clear_bit(ipi_event, &ipi_data->ipi_type);
+ } while (ipi_type > 0);
+}
+
+int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ int ret;
+ struct sbi_ipi_data *ipi_data;
+
+ if (cold_boot) {
+ ipi_data_off = sbi_scratch_alloc_offset(sizeof(*ipi_data),
+ "IPI_DATA");
+ if (!ipi_data_off)
+ return SBI_ENOMEM;
+ } else {
+ if (!ipi_data_off)
+ return SBI_ENOMEM;
+ }
+
+ ipi_data = sbi_scratch_offset_ptr(scratch, ipi_data_off);
+ ipi_data->ipi_type = 0x00;
+
+ ret = sbi_tlb_fifo_init(scratch, cold_boot);
+ if (ret)
+ return ret;
+
+ /* Enable software interrupts */
+ csr_set(CSR_MIE, MIP_MSIP);
+
+ return sbi_platform_ipi_init(sbi_platform_ptr(scratch), cold_boot);
+}
diff --git a/lib/sbi/sbi_misaligned_ldst.c b/lib/sbi/sbi_misaligned_ldst.c
new file mode 100644
index 0000000..7f911c3
--- /dev/null
+++ b/lib/sbi/sbi_misaligned_ldst.c
@@ -0,0 +1,191 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_unpriv.h>
+#include <sbi/riscv_fp.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_misaligned_ldst.h>
+#include <sbi/sbi_trap.h>
+
+union reg_data {
+ u8 data_bytes[8];
+ ulong data_ulong;
+ u64 data_u64;
+};
+
+int sbi_misaligned_load_handler(u32 hartid, ulong mcause,
+ struct sbi_trap_regs *regs,
+ struct sbi_scratch *scratch)
+{
+ union reg_data val;
+ struct unpriv_trap uptrap;
+ ulong insn = get_insn(regs->mepc, NULL);
+ ulong addr = csr_read(CSR_MTVAL);
+ int i, fp = 0, shift = 0, len = 0;
+
+ if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
+ len = 4;
+ shift = 8 * (sizeof(ulong) - len);
+#if __riscv_xlen == 64
+ } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
+ len = 8;
+ shift = 8 * (sizeof(ulong) - len);
+ } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
+ len = 4;
+#endif
+ } else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
+ fp = 1;
+ len = 8;
+ } else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
+ fp = 1;
+ len = 4;
+ } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
+ len = 2;
+ shift = 8 * (sizeof(ulong) - len);
+ } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
+ len = 2;
+#ifdef __riscv_compressed
+#if __riscv_xlen >= 64
+ } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
+ len = 8;
+ shift = 8 * (sizeof(ulong) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 8;
+ shift = 8 * (sizeof(ulong) - len);
+#endif
+ } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
+ len = 4;
+ shift = 8 * (sizeof(ulong) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 4;
+ shift = 8 * (sizeof(ulong) - len);
+ } else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
+ fp = 1;
+ len = 8;
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) {
+ fp = 1;
+ len = 8;
+#if __riscv_xlen == 32
+ } else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) {
+ fp = 1;
+ len = 4;
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) {
+ fp = 1;
+ len = 4;
+#endif
+#endif
+ } else
+ return SBI_EILL;
+
+ val.data_u64 = 0;
+ for (i = 0; i < len; i++) {
+ val.data_bytes[i] = load_u8((void *)(addr + i),
+ scratch, &uptrap);
+ if (uptrap.cause) {
+ sbi_trap_redirect(regs, scratch, regs->mepc,
+ uptrap.cause, uptrap.tval);
+ return 0;
+ }
+ }
+
+ if (!fp)
+ SET_RD(insn, regs, val.data_ulong << shift >> shift);
+ else if (len == 8)
+ SET_F64_RD(insn, regs, val.data_u64);
+ else
+ SET_F32_RD(insn, regs, val.data_ulong);
+
+ regs->mepc += INSN_LEN(insn);
+
+ return 0;
+}
+
+int sbi_misaligned_store_handler(u32 hartid, ulong mcause,
+ struct sbi_trap_regs *regs,
+ struct sbi_scratch *scratch)
+{
+ union reg_data val;
+ struct unpriv_trap uptrap;
+ ulong insn = get_insn(regs->mepc, NULL);
+ ulong addr = csr_read(CSR_MTVAL);
+ int i, len = 0;
+
+ val.data_ulong = GET_RS2(insn, regs);
+
+ if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
+ len = 4;
+#if __riscv_xlen == 64
+ } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
+ len = 8;
+#endif
+ } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
+ len = 8;
+ val.data_u64 = GET_F64_RS2(insn, regs);
+ } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
+ len = 4;
+ val.data_ulong = GET_F32_RS2(insn, regs);
+ } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
+ len = 2;
+#ifdef __riscv_compressed
+#if __riscv_xlen >= 64
+ } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
+ len = 8;
+ val.data_ulong = GET_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 8;
+ val.data_ulong = GET_RS2C(insn, regs);
+#endif
+ } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
+ len = 4;
+ val.data_ulong = GET_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 4;
+ val.data_ulong = GET_RS2C(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
+ len = 8;
+ val.data_u64 = GET_F64_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
+ len = 8;
+ val.data_u64 = GET_F64_RS2C(insn, regs);
+#if __riscv_xlen == 32
+ } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
+ len = 4;
+ val.data_ulong = GET_F32_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
+ len = 4;
+ val.data_ulong = GET_F32_RS2C(insn, regs);
+#endif
+#endif
+ } else
+ return SBI_EILL;
+
+ for (i = 0; i < len; i++) {
+ store_u8((void *)(addr + i), val.data_bytes[i],
+ scratch, &uptrap);
+ if (uptrap.cause) {
+ sbi_trap_redirect(regs, scratch, regs->mepc,
+ uptrap.cause, uptrap.tval);
+ return 0;
+ }
+ }
+
+ regs->mepc += INSN_LEN(insn);
+
+ return 0;
+}
diff --git a/lib/sbi/sbi_scratch.c b/lib/sbi/sbi_scratch.c
new file mode 100644
index 0000000..0a615a2
--- /dev/null
+++ b/lib/sbi/sbi_scratch.c
@@ -0,0 +1,59 @@
+ /*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_locks.h>
+#include <sbi/sbi_scratch.h>
+
+static spinlock_t extra_lock = SPIN_LOCK_INITIALIZER;
+static unsigned long extra_offset = SBI_SCRATCH_EXTRA_SPACE_OFFSET;
+
+unsigned long sbi_scratch_alloc_offset(unsigned long size, const char *owner)
+{
+ unsigned long ret = 0;
+
+ /*
+ * We have a simple brain-dead allocator which never expects
+ * anything to be free-ed hence it keeps incrementing the
+ * next allocation offset until it runs-out of space.
+ *
+ * In future, we will have more sophisticated allocator which
+ * will allow us to re-claim free-ed space.
+ */
+
+ if (!size)
+ return 0;
+
+ while (size & (__SIZEOF_POINTER__ - 1))
+ size++;
+
+ spin_lock(&extra_lock);
+
+ if (SBI_SCRATCH_SIZE < (extra_offset + size))
+ goto done;
+
+ ret = extra_offset;
+ extra_offset += size;
+
+done:
+ spin_unlock(&extra_lock);
+
+ return ret;
+}
+
+void sbi_scratch_free_offset(unsigned long offset)
+{
+ if ((offset < SBI_SCRATCH_EXTRA_SPACE_OFFSET) ||
+ (SBI_SCRATCH_SIZE <= offset))
+ return;
+
+ /*
+ * We don't actually free-up because it's a simple
+ * brain-dead allocator.
+ */
+}
diff --git a/lib/sbi/sbi_system.c b/lib/sbi/sbi_system.c
new file mode 100644
index 0000000..2cb30d4
--- /dev/null
+++ b/lib/sbi/sbi_system.c
@@ -0,0 +1,45 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_system.h>
+#include <sbi/sbi_ipi.h>
+
+int sbi_system_early_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ return sbi_platform_early_init(sbi_platform_ptr(scratch), cold_boot);
+}
+
+int sbi_system_final_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ return sbi_platform_final_init(sbi_platform_ptr(scratch), cold_boot);
+}
+
+void __attribute__((noreturn))
+sbi_system_reboot(struct sbi_scratch *scratch, u32 type)
+
+{
+ sbi_platform_system_reboot(sbi_platform_ptr(scratch), type);
+ sbi_hart_hang();
+}
+
+void __attribute__((noreturn))
+sbi_system_shutdown(struct sbi_scratch *scratch, u32 type)
+{
+ /* First try the platform-specific method */
+ sbi_platform_system_shutdown(sbi_platform_ptr(scratch), type);
+
+ /* If that fails (or is not implemented) send an IPI on every
+ * hart to hang and then hang the current hart */
+ sbi_ipi_send_many(scratch, NULL, NULL, SBI_IPI_EVENT_HALT, NULL);
+
+ sbi_hart_hang();
+}
diff --git a/lib/sbi/sbi_timer.c b/lib/sbi/sbi_timer.c
new file mode 100644
index 0000000..c58441d
--- /dev/null
+++ b/lib/sbi/sbi_timer.c
@@ -0,0 +1,68 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_timer.h>
+
+#if __riscv_xlen == 32
+u64 get_ticks(void)
+{
+ u32 lo, hi, tmp;
+ __asm__ __volatile__("1:\n"
+ "rdtimeh %0\n"
+ "rdtime %1\n"
+ "rdtimeh %2\n"
+ "bne %0, %2, 1b"
+ : "=&r"(hi), "=&r"(lo), "=&r"(tmp));
+ return ((u64)hi << 32) | lo;
+}
+#else
+u64 get_ticks(void)
+{
+ unsigned long n;
+
+ __asm__ __volatile__("rdtime %0" : "=r"(n));
+ return n;
+}
+#endif
+
+u64 sbi_timer_value(struct sbi_scratch *scratch)
+{
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ if (sbi_platform_has_timer_value(plat))
+ return sbi_platform_timer_value(plat);
+ else
+ return get_ticks();
+}
+
+void sbi_timer_event_stop(struct sbi_scratch *scratch)
+{
+ sbi_platform_timer_event_stop(sbi_platform_ptr(scratch));
+}
+
+void sbi_timer_event_start(struct sbi_scratch *scratch, u64 next_event)
+{
+ sbi_platform_timer_event_start(sbi_platform_ptr(scratch), next_event);
+ csr_clear(CSR_MIP, MIP_STIP);
+ csr_set(CSR_MIE, MIP_MTIP);
+}
+
+void sbi_timer_process(struct sbi_scratch *scratch)
+{
+ csr_clear(CSR_MIE, MIP_MTIP);
+ csr_set(CSR_MIP, MIP_STIP);
+}
+
+int sbi_timer_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ return sbi_platform_timer_init(sbi_platform_ptr(scratch), cold_boot);
+}
diff --git a/lib/sbi/sbi_tlb.c b/lib/sbi/sbi_tlb.c
new file mode 100644
index 0000000..814d402
--- /dev/null
+++ b/lib/sbi/sbi_tlb.c
@@ -0,0 +1,227 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_fifo.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_bitops.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_tlb.h>
+#include <plat/string.h>
+
+static unsigned long ipi_tlb_fifo_off;
+static unsigned long ipi_tlb_fifo_mem_off;
+
+static inline int __sbi_tlb_fifo_range_check(struct sbi_tlb_info *curr,
+ struct sbi_tlb_info *next)
+{
+ unsigned long curr_end;
+ unsigned long next_end;
+ int ret = SBI_FIFO_UNCHANGED;
+
+ if (!curr || !next)
+ return ret;
+
+ next_end = next->start + next->size;
+ curr_end = curr->start + curr->size;
+ if (next->start <= curr->start && next_end > curr_end) {
+ curr->start = next->start;
+ curr->size = next->size;
+ ret = SBI_FIFO_UPDATED;
+ } else if (next->start >= curr->start && next_end <= curr_end) {
+ ret = SBI_FIFO_SKIP;
+ }
+
+ return ret;
+}
+
+/**
+ * Call back to decide if an inplace fifo update is required or next entry can
+ * can be skipped. Here are the different cases that are being handled.
+ *
+ * Case1:
+ * if next flush request range lies within one of the existing entry, skip
+ * the next entry.
+ * Case2:
+ * if flush request range in current fifo entry lies within next flush
+ * request, update the current entry.
+ * Case3:
+ if a complete vma flush is requested, then all entries can be deleted
+ and new request can be enqueued. This will not be done for ASID case
+ as that means we have to iterate again in the fifo to figure out which
+ entries belong to that ASID.
+ */
+static int sbi_tlb_fifo_update_cb(void *in, void *data)
+{
+ struct sbi_tlb_info *curr;
+ struct sbi_tlb_info *next;
+ int ret = SBI_FIFO_UNCHANGED;
+
+ if (!in && !!data)
+ return ret;
+
+ curr = (struct sbi_tlb_info *)data;
+ next = (struct sbi_tlb_info *)in;
+ if (next->type == SBI_TLB_FLUSH_VMA_ASID &&
+ curr->type == SBI_TLB_FLUSH_VMA_ASID) {
+ if (next->asid == curr->asid)
+ ret = __sbi_tlb_fifo_range_check(curr, next);
+ } else if (next->type == SBI_TLB_FLUSH_VMA &&
+ curr->type == SBI_TLB_FLUSH_VMA) {
+ if (next->size == SBI_TLB_FLUSH_ALL)
+ ret = SBI_FIFO_RESET;
+ else
+ ret = __sbi_tlb_fifo_range_check(curr, next);
+ }
+
+ return ret;
+}
+
+int sbi_tlb_fifo_update(struct sbi_scratch *scratch, u32 event, void *data)
+{
+ int ret;
+ struct sbi_fifo *ipi_tlb_fifo;
+ struct sbi_tlb_info *tinfo = data;
+
+ ipi_tlb_fifo = sbi_scratch_offset_ptr(scratch,
+ ipi_tlb_fifo_off);
+ /*
+ * If address range to flush is too big then simply
+ * upgrade it to flush all because we can only flush
+ * 4KB at a time.
+ */
+ if (tinfo->size >= SBI_TLB_FLUSH_MAX_SIZE) {
+ tinfo->start = 0;
+ tinfo->size = SBI_TLB_FLUSH_ALL;
+ }
+
+ ret = sbi_fifo_inplace_update(ipi_tlb_fifo, data,
+ sbi_tlb_fifo_update_cb);
+ if (ret == SBI_FIFO_SKIP || ret == SBI_FIFO_UPDATED) {
+ return 1;
+ }
+
+ while (sbi_fifo_enqueue(ipi_tlb_fifo, data) < 0) {
+ /**
+ * For now, Busy loop until there is space in the fifo.
+ * There may be case where target hart is also
+ * enqueue in source hart's fifo. Both hart may busy
+ * loop leading to a deadlock.
+ * TODO: Introduce a wait/wakeup event mechansim to handle
+ * this properly.
+ */
+ __asm__ __volatile("nop");
+ __asm__ __volatile("nop");
+ }
+
+ return 0;
+}
+
+static void sbi_tlb_flush_all(void)
+{
+ __asm__ __volatile("sfence.vma");
+}
+
+static void sbi_tlb_fifo_sfence_vma(struct sbi_tlb_info *tinfo)
+{
+ unsigned long start = tinfo->start;
+ unsigned long size = tinfo->size;
+ unsigned long i;
+
+ if ((start == 0 && size == 0) || (size == SBI_TLB_FLUSH_ALL)) {
+ sbi_tlb_flush_all();
+ return;
+ }
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ __asm__ __volatile__("sfence.vma %0"
+ :
+ : "r"(start + i)
+ : "memory");
+ }
+}
+
+static void sbi_tlb_fifo_sfence_vma_asid(struct sbi_tlb_info *tinfo)
+{
+ unsigned long start = tinfo->start;
+ unsigned long size = tinfo->size;
+ unsigned long asid = tinfo->asid;
+ unsigned long i;
+
+ if (start == 0 && size == 0) {
+ sbi_tlb_flush_all();
+ return;
+ }
+
+ /* Flush entire MM context for a given ASID */
+ if (size == SBI_TLB_FLUSH_ALL) {
+ __asm__ __volatile__("sfence.vma x0, %0"
+ :
+ : "r"(asid)
+ : "memory");
+ return;
+ }
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ __asm__ __volatile__("sfence.vma %0, %1"
+ :
+ : "r"(start + i), "r"(asid)
+ : "memory");
+ }
+}
+
+void sbi_tlb_fifo_process(struct sbi_scratch *scratch, u32 event)
+{
+ struct sbi_tlb_info tinfo;
+ struct sbi_fifo *ipi_tlb_fifo =
+ sbi_scratch_offset_ptr(scratch, ipi_tlb_fifo_off);
+
+ while (!sbi_fifo_dequeue(ipi_tlb_fifo, &tinfo)) {
+ if (tinfo.type == SBI_TLB_FLUSH_VMA)
+ sbi_tlb_fifo_sfence_vma(&tinfo);
+ else if (tinfo.type == SBI_TLB_FLUSH_VMA_ASID)
+ sbi_tlb_fifo_sfence_vma_asid(&tinfo);
+ memset(&tinfo, 0, SBI_TLB_INFO_SIZE);
+ }
+}
+
+int sbi_tlb_fifo_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ void *ipi_tlb_mem;
+ struct sbi_fifo *ipi_tlb_q;
+
+ if (cold_boot) {
+ ipi_tlb_fifo_off = sbi_scratch_alloc_offset(sizeof(*ipi_tlb_q),
+ "IPI_TLB_FIFO");
+ if (!ipi_tlb_fifo_off)
+ return SBI_ENOMEM;
+ ipi_tlb_fifo_mem_off = sbi_scratch_alloc_offset(
+ SBI_TLB_FIFO_NUM_ENTRIES * SBI_TLB_INFO_SIZE,
+ "IPI_TLB_FIFO_MEM");
+ if (!ipi_tlb_fifo_mem_off) {
+ sbi_scratch_free_offset(ipi_tlb_fifo_off);
+ return SBI_ENOMEM;
+ }
+ } else {
+ if (!ipi_tlb_fifo_off ||
+ !ipi_tlb_fifo_mem_off)
+ return SBI_ENOMEM;
+ }
+
+ ipi_tlb_q = sbi_scratch_offset_ptr(scratch, ipi_tlb_fifo_off);
+ ipi_tlb_mem = sbi_scratch_offset_ptr(scratch, ipi_tlb_fifo_mem_off);
+
+ sbi_fifo_init(ipi_tlb_q, ipi_tlb_mem,
+ SBI_TLB_FIFO_NUM_ENTRIES, SBI_TLB_INFO_SIZE);
+
+ return 0;
+}
diff --git a/lib/sbi/sbi_trap.c b/lib/sbi/sbi_trap.c
new file mode 100644
index 0000000..82f7b65
--- /dev/null
+++ b/lib/sbi/sbi_trap.c
@@ -0,0 +1,207 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_unpriv.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_illegal_insn.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_misaligned_ldst.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_trap.h>
+
+static void __noreturn sbi_trap_error(const char *msg, int rc, u32 hartid,
+ ulong mcause, ulong mtval,
+ struct sbi_trap_regs *regs)
+{
+ sbi_printf("%s: hart%d: %s (error %d)\n", __func__, hartid, msg, rc);
+ sbi_printf("%s: hart%d: mcause=0x%" PRILX " mtval=0x%" PRILX "\n",
+ __func__, hartid, mcause, mtval);
+ sbi_printf("%s: hart%d: mepc=0x%" PRILX " mstatus=0x%" PRILX "\n",
+ __func__, hartid, regs->mepc, regs->mstatus);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "ra", regs->ra, "sp", regs->sp);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "gp", regs->gp, "tp", regs->tp);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s0", regs->s0, "s1", regs->s1);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "a0", regs->a0, "a1", regs->a1);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "a2", regs->a2, "a3", regs->a3);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "a4", regs->a4, "a5", regs->a5);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "a6", regs->a6, "a7", regs->a7);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s2", regs->s2, "s3", regs->s3);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s4", regs->s4, "s5", regs->s5);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s6", regs->s6, "s7", regs->s7);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s8", regs->s8, "s9", regs->s9);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s10", regs->s10, "s11", regs->s11);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "t0", regs->t0, "t1", regs->t1);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "t2", regs->t2, "t3", regs->t3);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "t4", regs->t4, "t5", regs->t5);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX "\n", __func__, hartid, "t6",
+ regs->t6);
+
+ sbi_hart_hang();
+}
+
+/**
+ * Redirect trap to lower privledge mode (S-mode or U-mode)
+ *
+ * @param regs pointer to register state
+ * @param scratch pointer to sbi_scratch of current HART
+ * @param epc error PC for lower privledge mode
+ * @param cause exception cause for lower privledge mode
+ * @param tval trap value for lower privledge mode
+ *
+ * @return 0 on success and negative error code on failure
+ */
+int sbi_trap_redirect(struct sbi_trap_regs *regs, struct sbi_scratch *scratch,
+ ulong epc, ulong cause, ulong tval)
+{
+ ulong new_mstatus, prev_mode;
+
+ /* Sanity check on previous mode */
+ prev_mode = (regs->mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT;
+ if (prev_mode != PRV_S && prev_mode != PRV_U)
+ return SBI_ENOTSUPP;
+
+ /* Update S-mode exception info */
+ csr_write(CSR_STVAL, tval);
+ csr_write(CSR_SEPC, epc);
+ csr_write(CSR_SCAUSE, cause);
+
+ /* Set MEPC to S-mode exception vector base */
+ regs->mepc = csr_read(CSR_STVEC);
+
+ /* Initial value of new MSTATUS */
+ new_mstatus = regs->mstatus;
+
+ /* Clear MPP, SPP, SPIE, and SIE */
+ new_mstatus &=
+ ~(MSTATUS_MPP | MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE);
+
+ /* Set SPP */
+ if (prev_mode == PRV_S)
+ new_mstatus |= (1UL << MSTATUS_SPP_SHIFT);
+
+ /* Set SPIE */
+ if (regs->mstatus & MSTATUS_SIE)
+ new_mstatus |= (1UL << MSTATUS_SPIE_SHIFT);
+
+ /* Set MPP */
+ new_mstatus |= (PRV_S << MSTATUS_MPP_SHIFT);
+
+ /* Set new value in MSTATUS */
+ regs->mstatus = new_mstatus;
+
+ return 0;
+}
+
+/**
+ * Handle trap/interrupt
+ *
+ * This function is called by firmware linked to OpenSBI
+ * library for handling trap/interrupt. It expects the
+ * following:
+ * 1. The 'mscratch' CSR is pointing to sbi_scratch of current HART
+ * 2. The 'mcause' CSR is having exception/interrupt cause
+ * 3. The 'mtval' CSR is having additional trap information
+ * 4. Stack pointer (SP) is setup for current HART
+ * 5. Interrupts are disabled in MSTATUS CSR
+ *
+ * @param regs pointer to register state
+ * @param scratch pointer to sbi_scratch of current HART
+ */
+void sbi_trap_handler(struct sbi_trap_regs *regs, struct sbi_scratch *scratch)
+{
+ int rc = SBI_ENOTSUPP;
+ const char *msg = "trap handler failed";
+ u32 hartid = sbi_current_hartid();
+ ulong mcause = csr_read(CSR_MCAUSE);
+ ulong mtval = csr_read(CSR_MTVAL);
+ struct unpriv_trap *uptrap;
+
+ if (mcause & (1UL << (__riscv_xlen - 1))) {
+ mcause &= ~(1UL << (__riscv_xlen - 1));
+ switch (mcause) {
+ case IRQ_M_TIMER:
+ sbi_timer_process(scratch);
+ break;
+ case IRQ_M_SOFT:
+ sbi_ipi_process(scratch);
+ break;
+ default:
+ msg = "unhandled external interrupt";
+ goto trap_error;
+ };
+ return;
+ }
+
+ switch (mcause) {
+ case CAUSE_ILLEGAL_INSTRUCTION:
+ rc = sbi_illegal_insn_handler(hartid, mcause, regs, scratch);
+ msg = "illegal instruction handler failed";
+ break;
+ case CAUSE_MISALIGNED_LOAD:
+ rc = sbi_misaligned_load_handler(hartid, mcause, regs, scratch);
+ msg = "misaligned load handler failed";
+ break;
+ case CAUSE_MISALIGNED_STORE:
+ rc = sbi_misaligned_store_handler(hartid, mcause, regs,
+ scratch);
+ msg = "misaligned store handler failed";
+ break;
+ case CAUSE_SUPERVISOR_ECALL:
+ case CAUSE_HYPERVISOR_ECALL:
+ rc = sbi_ecall_handler(hartid, mcause, regs, scratch);
+ msg = "ecall handler failed";
+ break;
+ case CAUSE_LOAD_ACCESS:
+ case CAUSE_STORE_ACCESS:
+ case CAUSE_LOAD_PAGE_FAULT:
+ case CAUSE_STORE_PAGE_FAULT:
+ uptrap = sbi_hart_get_trap_info(scratch);
+ if ((regs->mstatus & MSTATUS_MPRV) && uptrap) {
+ rc = 0;
+ regs->mepc += uptrap->ilen;
+ uptrap->cause = mcause;
+ uptrap->tval = mtval;
+ } else {
+ rc = sbi_trap_redirect(regs, scratch, regs->mepc,
+ mcause, mtval);
+ }
+ msg = "page/access fault handler failed";
+ break;
+ default:
+ /* If the trap came from S or U mode, redirect it there */
+ rc = sbi_trap_redirect(regs, scratch, regs->mepc, mcause, mtval);
+ break;
+ };
+
+trap_error:
+ if (rc) {
+ sbi_trap_error(msg, rc, hartid, mcause, csr_read(CSR_MTVAL),
+ regs);
+ }
+}