summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-12-17 03:24:35 +0300
committerJens Axboe <axboe@kernel.dk>2020-12-17 18:40:45 +0300
commitcda286f0715c82f8117e166afd42cca068876dde (patch)
tree1b63e33470acada3fa84b45c68a1d5d888652c52 /fs/io_uring.c
parent4bc4a912534a72f1c96f483448f0be16e5a48063 (diff)
downloadlinux-cda286f0715c82f8117e166afd42cca068876dde.tar.xz
io_uring: cancel reqs shouldn't kill overflow list
io_uring_cancel_task_requests() doesn't imply that the ring is going away, it may continue to work well after that. The problem is that it sets ->cq_overflow_flushed effectively disabling the CQ overflow feature Split setting cq_overflow_flushed from flush, and do the first one only on exit. It's ok in terms of cancellations because there is a io_uring->in_idle check in __io_cqring_fill_event(). It also fixes a race with setting ->cq_overflow_flushed in io_uring_cancel_task_requests, whuch's is not atomic and a part of a bitmask with other flags. Though, the only other flag that's not set during init is drain_next, so it's not as bad for sane architectures. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Fixes: 0f2122045b946 ("io_uring: don't rely on weak ->files references") Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 2d07d35e7262..0d0217281ccf 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1733,10 +1733,6 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
spin_lock_irqsave(&ctx->completion_lock, flags);
- /* if force is set, the ring is going away. always drop after that */
- if (force)
- ctx->cq_overflow_flushed = 1;
-
cqe = NULL;
list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
if (!io_match_task(req, tsk, files))
@@ -8655,6 +8651,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
+ /* if force is set, the ring is going away. always drop after that */
+ ctx->cq_overflow_flushed = 1;
if (ctx->rings)
io_cqring_overflow_flush(ctx, true, NULL, NULL);
mutex_unlock(&ctx->uring_lock);