summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorDylan Yudaken <dylany@meta.com>2022-11-24 12:35:55 +0300
committerJens Axboe <axboe@kernel.dk>2022-11-25 16:10:04 +0300
commit9b8c54755a2b16d4f23c0ea184b75e2edf77d906 (patch)
treef5084117ac5bfeca0b7ce2c50b8d276411bc5f6e /io_uring
parent931147ddfa6e9ffd814272f1c0370c4740acbe17 (diff)
downloadlinux-9b8c54755a2b16d4f23c0ea184b75e2edf77d906.tar.xz
io_uring: add io_aux_cqe which allows deferred completion
Use the just introduced deferred post cqe completion state when possible in io_aux_cqe. If not possible fallback to io_post_aux_cqe. This introduces a complication because of allow_overflow. For deferred completions we cannot know without locking the completion_lock if it will overflow (and even if we locked it, another post could sneak in and cause this cqe to be in overflow). However since overflow protection is mostly a best effort defence in depth to prevent infinite loops of CQEs for poll, just checking the overflow bit is going to be good enough and will result in at most 16 (array size of deferred cqes) overflows. Suggested-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Dylan Yudaken <dylany@meta.com> Link: https://lore.kernel.org/r/20221124093559.3780686-6-dylany@meta.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c34
-rw-r--r--io_uring/io_uring.h2
-rw-r--r--io_uring/net.c7
-rw-r--r--io_uring/poll.c4
4 files changed, 42 insertions, 5 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 28635e3e578a..056aea917cd6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -830,6 +830,40 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx,
return filled;
}
+bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
+ bool allow_overflow)
+{
+ struct io_uring_cqe *cqe;
+ unsigned int length;
+
+ if (!defer)
+ return io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow);
+
+ length = ARRAY_SIZE(ctx->submit_state.cqes);
+
+ lockdep_assert_held(&ctx->uring_lock);
+
+ if (ctx->submit_state.cqes_count == length) {
+ io_cq_lock(ctx);
+ __io_flush_post_cqes(ctx);
+ /* no need to flush - flush is deferred */
+ spin_unlock(&ctx->completion_lock);
+ }
+
+ /* For defered completions this is not as strict as it is otherwise,
+ * however it's main job is to prevent unbounded posted completions,
+ * and in that it works just as well.
+ */
+ if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
+ return false;
+
+ cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++];
+ cqe->user_data = user_data;
+ cqe->res = res;
+ cqe->flags = cflags;
+ return true;
+}
+
static void __io_req_complete_post(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index a26d5aa7f3f3..dd02adf3d0df 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -36,6 +36,8 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
bool allow_overflow);
bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
bool allow_overflow);
+bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
+ bool allow_overflow);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
static inline void io_req_complete_post_tw(struct io_kiocb *req, bool *locked)
diff --git a/io_uring/net.c b/io_uring/net.c
index 0de6f78ad978..90342dcb6b1d 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -601,8 +601,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
}
if (!mshot_finished) {
- if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
- cflags | IORING_CQE_F_MORE, true)) {
+ if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
io_recv_prep_retry(req);
return false;
}
@@ -1320,7 +1320,8 @@ retry:
if (ret < 0)
return ret;
- if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
+ if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
goto retry;
return -ECANCELED;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 4bd43e6f5b72..922c1a366c41 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -252,8 +252,8 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
- if (!io_post_aux_cqe(ctx, req->cqe.user_data,
- mask, IORING_CQE_F_MORE, false)) {
+ if (!io_aux_cqe(ctx, *locked, req->cqe.user_data,
+ mask, IORING_CQE_F_MORE, false)) {
io_req_set_res(req, mask, 0);
return IOU_POLL_REMOVE_POLL_USE_RES;
}