summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-12-03 18:37:53 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-12-13 20:39:18 +0300
commitbfe5a5e2f9e96e17c05c296f70b09088ab2fe991 (patch)
treec04efd5ace438e8f45819d7628573ea9d7c80618 /io_uring
parent8bba38f7a0d479603d239d922f606a54ab84d99b (diff)
downloadlinux-bfe5a5e2f9e96e17c05c296f70b09088ab2fe991.tar.xz
io_uring: fix mutex_unlock with unreferenced ctx
commit f7b32e785042d2357c5abc23ca6db1b92c91a070 upstream. Callers of mutex_unlock() have to make sure that the mutex stays alive for the whole duration of the function call. For io_uring that means that the following pattern is not valid unless we ensure that the context outlives the mutex_unlock() call. mutex_lock(&ctx->uring_lock); req_put(req); // typically via io_req_task_submit() mutex_unlock(&ctx->uring_lock); Most contexts are fine: io-wq pins requests, syscalls hold the file, task works are taking ctx references and so on. However, the task work fallback path doesn't follow the rule. Cc: <stable@vger.kernel.org> Fixes: 04fc6c802d ("io_uring: save ctx put/get for task_work submit") Reported-by: Jann Horn <jannh@google.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/io-uring/CAG48ez3xSoYb+45f1RLtktROJrpiDQ1otNvdR+YLQf7m+Krj5Q@mail.gmail.com/ Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f413ebed81ab..35894955b454 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1377,6 +1377,7 @@ static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
if (!(ctx->flags & IORING_SETUP_IOPOLL))
return;
+ percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
while (!wq_list_empty(&ctx->iopoll_list)) {
/* let it sleep and repeat later if can't complete a request */
@@ -1394,6 +1395,7 @@ static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
}
}
mutex_unlock(&ctx->uring_lock);
+ percpu_ref_put(&ctx->refs);
}
static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
@@ -2800,12 +2802,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
init_completion(&exit.completion);
init_task_work(&exit.task_work, io_tctx_exit_cb);
exit.ctx = ctx;
- /*
- * Some may use context even when all refs and requests have been put,
- * and they are free to do so while still holding uring_lock or
- * completion_lock, see io_req_task_submit(). Apart from other work,
- * this lock/unlock section also waits them to finish.
- */
+
mutex_lock(&ctx->uring_lock);
while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout));