summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-10-06 23:42:33 +0300
committerJens Axboe <axboe@kernel.dk>2022-10-13 01:30:56 +0300
commit44f87745d5f24a3cdf0548bf1d84fbb7316ce229 (patch)
treec4de76d2b6b2ab7c696c8cc31cc87b2c088e197d /io_uring
parentfc86f9d3bb4904117eea70347d323fde34a47c79 (diff)
downloadlinux-44f87745d5f24a3cdf0548bf1d84fbb7316ce229.tar.xz
io_uring: optimise locking for local tw with submit_wait
Running local task_work requires taking uring_lock, for submit + wait we can try to run them right after submit while we still hold the lock and save one lock/unlokc pair. The optimisation was implemented in the first local tw patches but got dropped for simplicity. Suggested-by: Dylan Yudaken <dylany@fb.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/281fc79d98b5d91fe4778c5137a17a2ab4693e5c.1665088876.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c12
-rw-r--r--io_uring/io_uring.h7
2 files changed, 17 insertions, 2 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 12870cd7cb07..de08d9902b30 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3227,8 +3227,16 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
mutex_unlock(&ctx->uring_lock);
goto out;
}
- if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
- goto iopoll_locked;
+ if (flags & IORING_ENTER_GETEVENTS) {
+ if (ctx->syscall_iopoll)
+ goto iopoll_locked;
+ /*
+ * Ignore errors, we'll soon call io_cqring_wait() and
+ * it should handle ownership problems if any.
+ */
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ (void)io_run_local_work_locked(ctx);
+ }
mutex_unlock(&ctx->uring_lock);
}
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 47d4cad1e9c4..ef77d2aa3172 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -275,6 +275,13 @@ static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
return ret;
}
+static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
+{
+ if (llist_empty(&ctx->work_llist))
+ return 0;
+ return __io_run_local_work(ctx, true);
+}
+
static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
{
if (!*locked) {