summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-09-03 19:09:22 +0300
committerJens Axboe <axboe@kernel.dk>2022-09-21 19:30:43 +0300
commit8ac5d85a89b48269e5aefb92b640d38367670a1b (patch)
treec0c504bb905bc4ab2f2474f8231188763d5f09ad /io_uring/io_uring.c
parenta1119fb0711591c2aaf99be79d87ce8ebeb9d250 (diff)
downloadlinux-8ac5d85a89b48269e5aefb92b640d38367670a1b.tar.xz
io_uring: add local task_work run helper that is entered locked
We have a few spots that drop the mutex just to run local task_work, which immediately tries to grab it again. Add a helper that just passes in whether we're locked already. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 31ac87ee17b2..a1692dad52db 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1161,9 +1161,8 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
}
}
-int io_run_local_work(struct io_ring_ctx *ctx)
+int __io_run_local_work(struct io_ring_ctx *ctx, bool locked)
{
- bool locked;
struct llist_node *node;
struct llist_node fake;
struct llist_node *current_final = NULL;
@@ -1178,8 +1177,6 @@ int io_run_local_work(struct io_ring_ctx *ctx)
return -EEXIST;
}
- locked = mutex_trylock(&ctx->uring_lock);
-
node = io_llist_xchg(&ctx->work_llist, &fake);
ret = 0;
again:
@@ -1204,12 +1201,24 @@ again:
goto again;
}
- if (locked) {
+ if (locked)
io_submit_flush_completions(ctx);
- mutex_unlock(&ctx->uring_lock);
- }
trace_io_uring_local_work_run(ctx, ret, loops);
return ret;
+
+}
+
+int io_run_local_work(struct io_ring_ctx *ctx)
+{
+ bool locked;
+ int ret;
+
+ locked = mutex_trylock(&ctx->uring_lock);
+ ret = __io_run_local_work(ctx, locked);
+ if (locked)
+ mutex_unlock(&ctx->uring_lock);
+
+ return ret;
}
static void io_req_tw_post(struct io_kiocb *req, bool *locked)