summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c11
-rw-r--r--io_uring/io_uring.h3
-rw-r--r--io_uring/net.c8
-rw-r--r--io_uring/poll.c4
-rw-r--r--io_uring/timeout.c4
5 files changed, 16 insertions, 14 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 7595658a5073..e57d00939ab9 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -939,15 +939,18 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
}
-bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
- bool allow_overflow)
+/*
+ * A helper for multishot requests posting additional CQEs.
+ * Should only be used from a task_work including IO_URING_F_MULTISHOT.
+ */
+bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags)
{
struct io_ring_ctx *ctx = req->ctx;
u64 user_data = req->cqe.user_data;
struct io_uring_cqe *cqe;
if (!defer)
- return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow);
+ return __io_post_aux_cqe(ctx, user_data, res, cflags, false);
lockdep_assert_held(&ctx->uring_lock);
@@ -962,7 +965,7 @@ bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
* however it's main job is to prevent unbounded posted completions,
* and in that it works just as well.
*/
- if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
+ if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
return false;
cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++];
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 3dc0b6fb0ef7..3e6ff3cd9a24 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -44,8 +44,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx);
void io_req_defer_failed(struct io_kiocb *req, s32 res);
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
-bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
- bool allow_overflow);
+bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
diff --git a/io_uring/net.c b/io_uring/net.c
index 8c419c01a5db..3d07bf79c1e0 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -641,8 +641,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
}
if (!mshot_finished) {
- if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
- *ret, cflags | IORING_CQE_F_MORE, false)) {
+ if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ *ret, cflags | IORING_CQE_F_MORE)) {
io_recv_prep_retry(req);
/* Known not-empty or unknown state, retry */
if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
@@ -1366,8 +1366,8 @@ retry:
if (ret < 0)
return ret;
- if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret,
- IORING_CQE_F_MORE, false))
+ if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ ret, IORING_CQE_F_MORE))
goto retry;
return -ECANCELED;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 65ec363f6377..4c360ba8793a 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -300,8 +300,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
- if (!io_aux_cqe(req, ts->locked, mask,
- IORING_CQE_F_MORE, false)) {
+ if (!io_fill_cqe_req_aux(req, ts->locked, mask,
+ IORING_CQE_F_MORE)) {
io_req_set_res(req, mask, 0);
return IOU_POLL_REMOVE_POLL_USE_RES;
}
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 6242130e73c6..7fd7dbb211d6 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -73,8 +73,8 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
if (!io_timeout_finish(timeout, data)) {
bool filled;
- filled = io_aux_cqe(req, ts->locked, -ETIME, IORING_CQE_F_MORE,
- false);
+ filled = io_fill_cqe_req_aux(req, ts->locked, -ETIME,
+ IORING_CQE_F_MORE);
if (filled) {
/* re-arm timer */
spin_lock_irq(&ctx->timeout_lock);