summaryrefslogtreecommitdiff
path: root/include/linux/io_uring.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-02-10 03:03:20 +0300
committerJens Axboe <axboe@kernel.dk>2021-02-10 17:28:43 +0300
commit7cbf1722d5fc5779946ee8f338e9e38b5de15856 (patch)
treecafc7b4a2cc122789ca7c1b2045d44ec2a36847a /include/linux/io_uring.h
parent1b4c351f6eb7467c77fc19e0cd7e5f0083ecd847 (diff)
downloadlinux-7cbf1722d5fc5779946ee8f338e9e38b5de15856.tar.xz
io_uring: provide FIFO ordering for task_work
task_work is a LIFO list, due to how it's implemented as a lockless list. For long chains of task_work, this can be problematic as the first entry added is the last one processed. Similarly, we'd waste a lot of CPU cycles reversing this list. Wrap the task_work so we have a single task_work entry per task per ctx, and use that to run it in the right order. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/io_uring.h')
-rw-r--r--include/linux/io_uring.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 35b2d845704d..2eb6d19de336 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -22,6 +22,15 @@ struct io_identity {
refcount_t count;
};
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
+};
+
struct io_uring_task {
/* submission side */
struct xarray xa;
@@ -32,6 +41,11 @@ struct io_uring_task {
struct io_identity *identity;
atomic_t in_idle;
bool sqpoll;
+
+ spinlock_t task_lock;
+ struct io_wq_work_list task_list;
+ unsigned long task_state;
+ struct callback_head task_work;
};
#if defined(CONFIG_IO_URING)