summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorAtish Patra <atish.patra@wdc.com>2019-04-02 03:07:54 +0300
committerAnup Patel <anup@brainfault.org>2019-04-03 07:27:42 +0300
commitf700216cb55fe690b0eec61bb618b11a0630040e (patch)
treef004ccbba3ae179ac4e4c47555d36a5a8d9dc301 /lib
parent1eba298b0d1be5c1d9d7d4c07a8a18e9613bcb44 (diff)
downloadopensbi-f700216cb55fe690b0eec61bb618b11a0630040e.tar.xz
lib: Use a fifo to keep track of sfence related IPIs.
Currently, there is no provision for tracking multiple IPIs sent to a single hart at the same time by different harts. Use a fifo manage the outstanding requests. While dequeueing, read all the entries once, because we have only 1 bit to track the type of IPI. Once the queue is full, busy wait until the there is space available in queue. This is not the most elegant approach. It should be changed in favor of a wakeup event once available in opensbi. Signed-off-by: Atish Patra <atish.patra@wdc.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/sbi_ecall.c2
-rw-r--r--lib/sbi_fifo.c1
-rw-r--r--lib/sbi_ipi.c46
3 files changed, 34 insertions, 15 deletions
diff --git a/lib/sbi_ecall.c b/lib/sbi_ecall.c
index 94c6e77..164cf92 100644
--- a/lib/sbi_ecall.c
+++ b/lib/sbi_ecall.c
@@ -69,6 +69,7 @@ int sbi_ecall_handler(u32 hartid, ulong mcause,
case SBI_ECALL_REMOTE_SFENCE_VMA:
tlb_info.start = (unsigned long)regs->a1;
tlb_info.size = (unsigned long)regs->a2;
+ tlb_info.type = SBI_TLB_FLUSH_VMA;
ret = sbi_ipi_send_many(scratch, (ulong *)regs->a0,
SBI_IPI_EVENT_SFENCE_VMA, &tlb_info);
@@ -77,6 +78,7 @@ int sbi_ecall_handler(u32 hartid, ulong mcause,
tlb_info.start = (unsigned long)regs->a1;
tlb_info.size = (unsigned long)regs->a2;
tlb_info.asid = (unsigned long)regs->a3;
+ tlb_info.type = SBI_TLB_FLUSH_VMA_ASID;
ret = sbi_ipi_send_many(scratch, (ulong *)regs->a0,
SBI_IPI_EVENT_SFENCE_VMA_ASID, &tlb_info);
diff --git a/lib/sbi_fifo.c b/lib/sbi_fifo.c
index 9d9a5b9..1b283e8 100644
--- a/lib/sbi_fifo.c
+++ b/lib/sbi_fifo.c
@@ -42,6 +42,7 @@ int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
{
if (!fifo || !data)
return -1;
+
spin_lock(&fifo->qlock);
if (sbi_fifo_is_full(fifo)) {
spin_unlock(&fifo->qlock);
diff --git a/lib/sbi_ipi.c b/lib/sbi_ipi.c
index 471b593..4253fb1 100644
--- a/lib/sbi_ipi.c
+++ b/lib/sbi_ipi.c
@@ -12,20 +12,20 @@
#include <sbi/riscv_barrier.h>
#include <sbi/riscv_atomic.h>
#include <sbi/riscv_unpriv.h>
+#include <sbi/sbi_fifo.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_bitops.h>
-#include <sbi/sbi_console.h>
#include <sbi/sbi_ipi.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_timer.h>
+#include <plat/string.h>
static int sbi_ipi_send(struct sbi_scratch *scratch, u32 hartid,
u32 event, void *data)
{
struct sbi_scratch *remote_scratch = NULL;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
- struct sbi_tlb_info *tlb_info = data;
- struct sbi_tlb_info *ipi_tlb_data;
+ struct sbi_fifo *ipi_tlb_fifo;
if (sbi_platform_hart_disabled(plat, hartid))
return -1;
@@ -36,10 +36,19 @@ static int sbi_ipi_send(struct sbi_scratch *scratch, u32 hartid,
remote_scratch = sbi_hart_id_to_scratch(scratch, hartid);
if (event == SBI_IPI_EVENT_SFENCE_VMA ||
event == SBI_IPI_EVENT_SFENCE_VMA_ASID) {
- ipi_tlb_data = sbi_tlb_info_ptr(remote_scratch);
- ipi_tlb_data->start = tlb_info->start;
- ipi_tlb_data->size = tlb_info->size;
- ipi_tlb_data->asid = tlb_info->asid;
+ ipi_tlb_fifo = sbi_tlb_fifo_head_ptr(remote_scratch);
+ while(sbi_fifo_enqueue(ipi_tlb_fifo, data) < 0) {
+ /**
+ * For now, Busy loop until there is space in the fifo.
+ * There may be case where target hart is also
+ * enqueue in source hart's fifo. Both hart may busy
+ * loop leading to a deadlock.
+ * TODO: Introduce a wait/wakeup event mechansim to handle
+ * this properly.
+ */
+ __asm__ __volatile("nop");
+ __asm__ __volatile("nop");
+ }
}
atomic_raw_set_bit(event, &sbi_ipi_data_ptr(remote_scratch)->ipi_type);
mb();
@@ -132,11 +141,13 @@ static void sbi_ipi_sfence_vma_asid(struct sbi_tlb_info *tinfo)
void sbi_ipi_process(struct sbi_scratch *scratch)
{
- const struct sbi_platform *plat = sbi_platform_ptr(scratch);
volatile unsigned long ipi_type;
+ struct sbi_tlb_info tinfo;
unsigned int ipi_event;
- u32 hartid = sbi_current_hartid();
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ struct sbi_fifo *ipi_tlb_fifo = sbi_tlb_fifo_head_ptr(scratch);
+ u32 hartid = sbi_current_hartid();
sbi_platform_ipi_clear(plat, hartid);
do {
@@ -151,10 +162,14 @@ void sbi_ipi_process(struct sbi_scratch *scratch)
__asm__ __volatile("fence.i");
break;
case SBI_IPI_EVENT_SFENCE_VMA:
- sbi_ipi_sfence_vma(sbi_tlb_info_ptr(scratch));
- break;
case SBI_IPI_EVENT_SFENCE_VMA_ASID:
- sbi_ipi_sfence_vma_asid(sbi_tlb_info_ptr(scratch));
+ while(!sbi_fifo_dequeue(ipi_tlb_fifo, &tinfo)) {
+ if (tinfo.type == SBI_TLB_FLUSH_VMA)
+ sbi_ipi_sfence_vma(&tinfo);
+ else if (tinfo.type == SBI_TLB_FLUSH_VMA_ASID)
+ sbi_ipi_sfence_vma_asid(&tinfo);
+ memset(&tinfo, 0, SBI_TLB_INFO_SIZE);
+ }
break;
case SBI_IPI_EVENT_HALT:
sbi_hart_hang();
@@ -166,10 +181,11 @@ void sbi_ipi_process(struct sbi_scratch *scratch)
int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
{
+ struct sbi_fifo *tlb_info_q = sbi_tlb_fifo_head_ptr(scratch);
+
sbi_ipi_data_ptr(scratch)->ipi_type = 0x00;
- sbi_tlb_info_ptr(scratch)->start = 0x00;
- sbi_tlb_info_ptr(scratch)->size = 0x00;
- sbi_tlb_info_ptr(scratch)->asid = 0x00;
+ tlb_info_q->queue = sbi_tlb_fifo_mem_ptr(scratch);
+ sbi_fifo_init(tlb_info_q, SBI_TLB_FIFO_NUM_ENTRIES, SBI_TLB_INFO_SIZE);
/* Enable software interrupts */
csr_set(CSR_MIE, MIP_MSIP);