summaryrefslogtreecommitdiff
path: root/lib/sbi/sbi_fifo.c
diff options
context:
space:
mode:
authorAtish Patra <atish.patra@wdc.com>2019-08-15 04:02:14 +0300
committerAnup Patel <anup@brainfault.org>2019-08-16 06:12:55 +0300
commit897a97a6af31174eb2c6058c6ceb9d3ccc6c6e3c (patch)
treee667994b5a11efdef3a2b3be8a8bf27a32b018b0 /lib/sbi/sbi_fifo.c
parentf6e13e0dd30b164eb444bc08c70fa1b8576e0bca (diff)
downloadopensbi-897a97a6af31174eb2c6058c6ceb9d3ccc6c6e3c.tar.xz
lib: Fix race conditions in tlb fifo access.
Linux kernel expects tlb flush SBI call to be completely synchronous i.e. the SBI call should only return once corresponding *fence* instruction is executed. OpenSBI manages the outstanding TLB flush requests by keeping them in a per hart based fifo. However, there are few corner cases that may lead to race conditions while updating the fifo. Currently, the caller hart waits for IPI acknowledgement via clint address which is not a very good method as synchronization on MMIO may not be supported in every platform. Moreover, the waiter doesn't have any way of identifying if the IPI is received for specific tlb flush request or any other IPI. This may lead to unpredictable behavior in supervisor/user space. Fix this by waiting on individual fifo entries rather than MMIO address. Currently, a relaxed loop is being used because wfi again involves MMIO write which would be slower compared to relaxed loop. To avoid deadlock, fifo is processed every time a hart loops for fifo enqueue or fifo sync to consume the tlb flush requests sent by other harts. Signed-off-by: Anup Patel <anup.patel@wdc.com> Signed-off-by: Atish Patra <atish.patra@wdc.com>
Diffstat (limited to 'lib/sbi/sbi_fifo.c')
-rw-r--r--lib/sbi/sbi_fifo.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/lib/sbi/sbi_fifo.c b/lib/sbi/sbi_fifo.c
index 18ff0d6..8d1dbf0 100644
--- a/lib/sbi/sbi_fifo.c
+++ b/lib/sbi/sbi_fifo.c
@@ -55,6 +55,21 @@ bool sbi_fifo_is_full(struct sbi_fifo *fifo)
}
/* Note: must be called with fifo->qlock held */
+static inline void __sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
+{
+ u32 head;
+
+ head = (u32)fifo->tail + fifo->avail;
+ if (head >= fifo->num_entries)
+ head = head - fifo->num_entries;
+
+ sbi_memcpy(fifo->queue + head * fifo->entry_size, data, fifo->entry_size);
+
+ fifo->avail++;
+}
+
+
+/* Note: must be called with fifo->qlock held */
static inline bool __sbi_fifo_is_empty(struct sbi_fifo *fifo)
{
return (fifo->avail == 0) ? TRUE : FALSE;
@@ -109,7 +124,9 @@ int sbi_fifo_inplace_update(struct sbi_fifo *fifo, void *in,
if (!fifo || !in)
return ret;
+
spin_lock(&fifo->qlock);
+
if (__sbi_fifo_is_empty(fifo)) {
spin_unlock(&fifo->qlock);
return ret;
@@ -120,12 +137,10 @@ int sbi_fifo_inplace_update(struct sbi_fifo *fifo, void *in,
if (index >= fifo->num_entries)
index = index - fifo->num_entries;
entry = (void *)fifo->queue + (u32)index * fifo->entry_size;
- ret = fptr(in, entry);
+ ret = fptr(in, entry);
+
if (ret == SBI_FIFO_SKIP || ret == SBI_FIFO_UPDATED) {
break;
- } else if (ret == SBI_FIFO_RESET) {
- __sbi_fifo_reset(fifo);
- break;
}
}
spin_unlock(&fifo->qlock);
@@ -135,8 +150,6 @@ int sbi_fifo_inplace_update(struct sbi_fifo *fifo, void *in,
int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
{
- u32 head;
-
if (!fifo || !data)
return SBI_EINVAL;
@@ -146,14 +159,7 @@ int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
spin_unlock(&fifo->qlock);
return SBI_ENOSPC;
}
-
- head = (u32)fifo->tail + fifo->avail;
- if (head >= fifo->num_entries)
- head = head - fifo->num_entries;
-
- sbi_memcpy(fifo->queue + head * fifo->entry_size, data, fifo->entry_size);
-
- fifo->avail++;
+ __sbi_fifo_enqueue(fifo, data);
spin_unlock(&fifo->qlock);