summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-03-15 17:23:08 +0300
committerJens Axboe <axboe@kernel.dk>2021-03-15 18:32:40 +0300
commitb7f5a0bfe2061b2c7b2164de06fa4072d7373a45 (patch)
tree2ad3c494d8f86a89c2d3ec30db5852df025c1cd8 /fs/io_uring.c
parent9b46571142e47503ed4f3ae3be5ed3968d8cb9cc (diff)
downloadlinux-b7f5a0bfe2061b2c7b2164de06fa4072d7373a45.tar.xz
io_uring: fix sqpoll cancellation via task_work
Running sqpoll cancellations via task_work_run() is a bad idea because it depends on other task works to be run, but those may be locked in currently running task_work_run() because of how it's (splicing the list in batches). Enqueue and run them through a separate callback head, namely struct io_sq_data::park_task_work. As a nice bonus we now precisely control where it's run, that's much safer than guessing where it can happen as it was before. Reported-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5c2de43b99f5..5f4e312111ea 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -274,6 +274,7 @@ struct io_sq_data {
unsigned long state;
struct completion exited;
+ struct callback_head *park_task_work;
};
#define IO_IOPOLL_BATCH 8
@@ -6727,6 +6728,7 @@ static int io_sq_thread(void *data)
cond_resched();
mutex_lock(&sqd->lock);
io_run_task_work();
+ io_run_task_work_head(&sqd->park_task_work);
timeout = jiffies + sqd->sq_thread_idle;
continue;
}
@@ -6781,6 +6783,7 @@ static int io_sq_thread(void *data)
}
finish_wait(&sqd->wait, &wait);
+ io_run_task_work_head(&sqd->park_task_work);
timeout = jiffies + sqd->sq_thread_idle;
}
@@ -6792,6 +6795,7 @@ static int io_sq_thread(void *data)
mutex_unlock(&sqd->lock);
io_run_task_work();
+ io_run_task_work_head(&sqd->park_task_work);
complete(&sqd->exited);
do_exit(0);
}
@@ -8890,7 +8894,7 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
if (task) {
init_completion(&work.completion);
init_task_work(&work.task_work, io_sqpoll_cancel_cb);
- WARN_ON_ONCE(task_work_add(task, &work.task_work, TWA_SIGNAL));
+ io_task_work_add_head(&sqd->park_task_work, &work.task_work);
wake_up_process(task);
}
io_sq_thread_unpark(sqd);