From 31f084b7b0288fd51740b1e1efdb0ff61fb81e48 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 16 Jan 2023 16:48:59 +0000 Subject: io_uring: simplify fallback execution Lock the ring with uring_lock in io_fallback_req_func(), which should make it a bit safer and easier. With that we also don't need refs pinning as io_ring_exit_work() will wait until uring_lock is freed. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/56170e6a0cbfc8edee2794c6613e8f6f1d76d276.1673887636.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'io_uring') diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index f49d0036657f..c314dc111d5d 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -245,17 +245,15 @@ static __cold void io_fallback_req_func(struct work_struct *work) fallback_work.work); struct llist_node *node = llist_del_all(&ctx->fallback_llist); struct io_kiocb *req, *tmp; - bool locked = false; + bool locked = true; - percpu_ref_get(&ctx->refs); + mutex_lock(&ctx->uring_lock); llist_for_each_entry_safe(req, tmp, node, io_task_work.node) req->io_task_work.func(req, &locked); - - if (locked) { - io_submit_flush_completions(ctx); - mutex_unlock(&ctx->uring_lock); - } - percpu_ref_put(&ctx->refs); + if (WARN_ON_ONCE(!locked)) + return; + io_submit_flush_completions(ctx); + mutex_unlock(&ctx->uring_lock); } static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) -- cgit v1.2.3