summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorGabriel Krisman Bertazi <krisman@suse.de>2024-04-16 05:10:53 +0300
committerJens Axboe <axboe@kernel.dk>2024-04-17 17:20:32 +0300
commit068c27e32e51e94e4a9eb30ae85f4097a3602980 (patch)
tree2a780cd67afbf9a64419aff1dbce510685622348 /io_uring
parentc4ce0ab27646f4206a9eb502d6fe45cb080e1cae (diff)
downloadlinux-068c27e32e51e94e4a9eb30ae85f4097a3602980.tar.xz
io-wq: write next_work before dropping acct_lock
Commit 361aee450c6e ("io-wq: add intermediate work step between pending list and active work") closed a race between a cancellation and the work being removed from the wq for execution. To ensure the request is always reachable by the cancellation, we need to move it within the wq lock, which also synchronizes the cancellation. But commit 42abc95f05bf ("io-wq: decouple work_list protection from the big wqe->lock") replaced the wq lock here and accidentally reintroduced the race by releasing the acct_lock too early. In other words: worker | cancellation work = io_get_next_work() | raw_spin_unlock(&acct->lock); | | | io_acct_cancel_pending_work | io_wq_worker_cancel() worker->next_work = work Using acct_lock is still enough since we synchronize on it on io_acct_cancel_pending_work. Fixes: 42abc95f05bf ("io-wq: decouple work_list protection from the big wqe->lock") Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20240416021054.3940-2-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io-wq.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 522196dfb0ff..318ed067dbf6 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -564,10 +564,7 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
* clear the stalled flag.
*/
work = io_get_next_work(acct, worker);
- raw_spin_unlock(&acct->lock);
if (work) {
- __io_worker_busy(wq, worker);
-
/*
* Make sure cancelation can find this, even before
* it becomes the active work. That avoids a window
@@ -578,9 +575,15 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
raw_spin_lock(&worker->lock);
worker->next_work = work;
raw_spin_unlock(&worker->lock);
- } else {
- break;
}
+
+ raw_spin_unlock(&acct->lock);
+
+ if (!work)
+ break;
+
+ __io_worker_busy(wq, worker);
+
io_assign_current_work(worker, work);
__set_current_state(TASK_RUNNING);