summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-12-07 06:53:29 +0300
committerJens Axboe <axboe@kernel.dk>2022-12-07 16:47:13 +0300
commit1b346e4aa8e79227391ffd6b7c6ee5acf0fa8bfc (patch)
tree9a943ee1ad653902fe10020b01e92300d724c112 /io_uring
parenta85381d8326d75417ae177bddf44be533d1d21be (diff)
downloadlinux-1b346e4aa8e79227391ffd6b7c6ee5acf0fa8bfc.tar.xz
io_uring: don't check overflow flush failures
The only way to fail overflowed CQEs flush is for CQ to be fully packed. There is one place checking for flush failures, i.e. io_cqring_wait(), but we limit the number to be waited for by the CQ size, so getting a failure automatically means that we're done with waiting. Don't check for failures, rarely but they might spuriously fail CQ waiting with -EBUSY. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/6b720a45c03345655517f8202cbd0bece2848fb2.1670384893.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c24
1 files changed, 6 insertions, 18 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ecd78898dbd3..ba5ef1e675c1 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -618,13 +618,12 @@ static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
}
/* Returns true if there are no backlogged entries after the flush */
-static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
+static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{
- bool all_flushed;
size_t cqe_size = sizeof(struct io_uring_cqe);
if (__io_cqring_events(ctx) == ctx->cq_entries)
- return false;
+ return;
if (ctx->flags & IORING_SETUP_CQE32)
cqe_size <<= 1;
@@ -643,30 +642,23 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
kfree(ocqe);
}
- all_flushed = list_empty(&ctx->cq_overflow_list);
- if (all_flushed) {
+ if (list_empty(&ctx->cq_overflow_list)) {
clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
}
-
io_cq_unlock_post(ctx);
- return all_flushed;
}
-static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
+static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{
- bool ret = true;
-
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
/* iopoll syncs against uring_lock, not completion_lock */
if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock);
- ret = __io_cqring_overflow_flush(ctx);
+ __io_cqring_overflow_flush(ctx);
if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&ctx->uring_lock);
}
-
- return ret;
}
void __io_put_task(struct task_struct *task, int nr)
@@ -2494,11 +2486,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events);
do {
- /* if we can't even flush overflow, don't wait for more */
- if (!io_cqring_overflow_flush(ctx)) {
- ret = -EBUSY;
- break;
- }
+ io_cqring_overflow_flush(ctx);
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);