summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-01-16 19:48:59 +0300
committerJens Axboe <axboe@kernel.dk>2023-01-30 01:17:41 +0300
commit31f084b7b0288fd51740b1e1efdb0ff61fb81e48 (patch)
tree3cb8e1ef95f98eb8c0b70fecefd1716f8320e5d2 /io_uring
parent89800a2dd570919bfe01ced90c80e3b472d1c723 (diff)
downloadlinux-31f084b7b0288fd51740b1e1efdb0ff61fb81e48.tar.xz
io_uring: simplify fallback execution
Lock the ring with uring_lock in io_fallback_req_func(), which should make it a bit safer and easier. With that we also don't need refs pinning as io_ring_exit_work() will wait until uring_lock is freed. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/56170e6a0cbfc8edee2794c6613e8f6f1d76d276.1673887636.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f49d0036657f..c314dc111d5d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -245,17 +245,15 @@ static __cold void io_fallback_req_func(struct work_struct *work)
fallback_work.work);
struct llist_node *node = llist_del_all(&ctx->fallback_llist);
struct io_kiocb *req, *tmp;
- bool locked = false;
+ bool locked = true;
- percpu_ref_get(&ctx->refs);
+ mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
req->io_task_work.func(req, &locked);
-
- if (locked) {
- io_submit_flush_completions(ctx);
- mutex_unlock(&ctx->uring_lock);
- }
- percpu_ref_put(&ctx->refs);
+ if (WARN_ON_ONCE(!locked))
+ return;
+ io_submit_flush_completions(ctx);
+ mutex_unlock(&ctx->uring_lock);
}
static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)