summaryrefslogtreecommitdiff
path: root/include/linux/io_uring_types.h
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-08-25 01:53:34 +0300
committerJens Axboe <axboe@kernel.dk>2023-08-25 02:16:20 +0300
commit18df385f42f0b3310ed2e4a3e39264bf5e784692 (patch)
tree478a3aa845af730f4f4dd9089bf8c48676fb8aba /include/linux/io_uring_types.h
parentd7f06fea5d6be78403d42c9637f67bc883870094 (diff)
downloadlinux-18df385f42f0b3310ed2e4a3e39264bf5e784692.tar.xz
io_uring: banish non-hot data to end of io_ring_ctx
Let's move all slow path, setup/init and so on fields to the end of io_ring_ctx, that makes ctx reorganisation later easier. That includes, page arrays used only on tear down, CQ overflow list, old provided buffer caches and used by io-wq poll hashes. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/fc471b63925a0bf90a34943c4d36163c523cfb43.1692916914.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/io_uring_types.h')
-rw-r--r--include/linux/io_uring_types.h37
1 files changed, 19 insertions, 18 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index ad87d6074fb2..72e609752323 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -211,20 +211,11 @@ struct io_ring_ctx {
unsigned int drain_disabled: 1;
unsigned int compat: 1;
- enum task_work_notify_mode notify_method;
+ struct task_struct *submitter_task;
+ struct io_rings *rings;
+ struct percpu_ref refs;
- /*
- * If IORING_SETUP_NO_MMAP is used, then the below holds
- * the gup'ed pages for the two rings, and the sqes.
- */
- unsigned short n_ring_pages;
- unsigned short n_sqe_pages;
- struct page **ring_pages;
- struct page **sqe_pages;
-
- struct io_rings *rings;
- struct task_struct *submitter_task;
- struct percpu_ref refs;
+ enum task_work_notify_mode notify_method;
} ____cacheline_aligned_in_smp;
/* submission data */
@@ -262,10 +253,8 @@ struct io_ring_ctx {
struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
- struct list_head io_buffers_cache;
struct io_hash_table cancel_table_locked;
- struct list_head cq_overflow_list;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
} ____cacheline_aligned_in_smp;
@@ -298,11 +287,8 @@ struct io_ring_ctx {
* manipulate the list, hence no extra locking is needed there.
*/
struct io_wq_work_list iopoll_list;
- struct io_hash_table cancel_table;
struct llist_head work_llist;
-
- struct list_head io_buffers_comp;
} ____cacheline_aligned_in_smp;
/* timeouts */
@@ -318,6 +304,10 @@ struct io_ring_ctx {
struct io_wq_work_list locked_free_list;
unsigned int locked_free_nr;
+ struct list_head io_buffers_comp;
+ struct list_head cq_overflow_list;
+ struct io_hash_table cancel_table;
+
const struct cred *sq_creds; /* cred used for __io_sq_thread() */
struct io_sq_data *sq_data; /* if using sq thread polling */
@@ -332,6 +322,8 @@ struct io_ring_ctx {
struct xarray personalities;
u32 pers_next;
+ struct list_head io_buffers_cache;
+
/* Keep this last, we don't need it for the fast path */
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
@@ -375,6 +367,15 @@ struct io_ring_ctx {
unsigned sq_thread_idle;
/* protected by ->completion_lock */
unsigned evfd_last_cq_tail;
+
+ /*
+ * If IORING_SETUP_NO_MMAP is used, then the below holds
+ * the gup'ed pages for the two rings, and the sqes.
+ */
+ unsigned short n_ring_pages;
+ unsigned short n_sqe_pages;
+ struct page **ring_pages;
+ struct page **sqe_pages;
};
struct io_tw_state {