summaryrefslogtreecommitdiff
path: root/fs/io-wq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-10-29 06:49:21 +0300
committerJens Axboe <axboe@kernel.dk>2019-11-01 17:35:31 +0300
commit62755e35dfb2b113c52b81cd96d01c20971c8e02 (patch)
treed8b0c898a5e1cf7ef8233af49773b18560198b83 /fs/io-wq.c
parent975c99a570967dd48e917dd7853867fee3febabd (diff)
downloadlinux-62755e35dfb2b113c52b81cd96d01c20971c8e02.tar.xz
io_uring: support for generic async request cancel
This adds support for IORING_OP_ASYNC_CANCEL, which will attempt to cancel requests that have been punted to async context and are now in-flight. This works for regular read/write requests to files, as long as they haven't been started yet. For socket based IO (or things like accept4(2)), we can cancel work that is already running as well. To cancel a request, the sqe must have ->addr set to the user_data of the request it wishes to cancel. If the request is cancelled successfully, the original request is completed with -ECANCELED and the cancel request is completed with a result of 0. If the request was already running, the original may or may not complete in error. The cancel request will complete with -EALREADY for that case. And finally, if the request to cancel wasn't found, the cancel request is completed with -ENOENT. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r--fs/io-wq.c85
1 files changed, 85 insertions, 0 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 253c04a40db5..652b8bac2dbc 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -639,6 +639,91 @@ void io_wq_cancel_all(struct io_wq *wq)
rcu_read_unlock();
}
+struct io_cb_cancel_data {
+ struct io_wqe *wqe;
+ work_cancel_fn *cancel;
+ void *caller_data;
+};
+
+static bool io_work_cancel(struct io_worker *worker, void *cancel_data)
+{
+ struct io_cb_cancel_data *data = cancel_data;
+ struct io_wqe *wqe = data->wqe;
+ bool ret = false;
+
+ /*
+ * Hold the lock to avoid ->cur_work going out of scope, caller
+ * may deference the passed in work.
+ */
+ spin_lock_irq(&wqe->lock);
+ if (worker->cur_work &&
+ data->cancel(worker->cur_work, data->caller_data)) {
+ send_sig(SIGINT, worker->task, 1);
+ ret = true;
+ }
+ spin_unlock_irq(&wqe->lock);
+
+ return ret;
+}
+
+static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
+ work_cancel_fn *cancel,
+ void *cancel_data)
+{
+ struct io_cb_cancel_data data = {
+ .wqe = wqe,
+ .cancel = cancel,
+ .caller_data = cancel_data,
+ };
+ struct io_wq_work *work;
+ bool found = false;
+
+ spin_lock_irq(&wqe->lock);
+ list_for_each_entry(work, &wqe->work_list, list) {
+ if (cancel(work, cancel_data)) {
+ list_del(&work->list);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irq(&wqe->lock);
+
+ if (found) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return IO_WQ_CANCEL_OK;
+ }
+
+ rcu_read_lock();
+ found = io_wq_for_each_worker(wqe, &wqe->free_list, io_work_cancel,
+ &data);
+ if (found)
+ goto done;
+
+ found = io_wq_for_each_worker(wqe, &wqe->busy_list, io_work_cancel,
+ &data);
+done:
+ rcu_read_unlock();
+ return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
+}
+
+enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+ void *data)
+{
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+ int i;
+
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ ret = io_wqe_cancel_cb_work(wqe, cancel, data);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
+ }
+
+ return ret;
+}
+
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
struct io_wq_work *work = data;