summaryrefslogtreecommitdiff
path: root/io_uring/uring_cmd.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-03-19 05:41:58 +0300
committerJens Axboe <axboe@kernel.dk>2024-04-15 17:10:25 +0300
commitd10f19dff56eac5ae44dc270336b18071a8bd51c (patch)
treeecc43b35af05671e2821eaeb455184791ab9efbf /io_uring/uring_cmd.h
parente2ea5a7069133c01fe3dbda95d77af7f193a1a52 (diff)
downloadlinux-d10f19dff56eac5ae44dc270336b18071a8bd51c.tar.xz
io_uring/uring_cmd: switch to always allocating async data
Basic conversion ensuring async_data is allocated off the prep path. Adds a basic alloc cache as well, as passthrough IO can be quite high in rate. Tested-by: Anuj Gupta <anuj20.g@samsung.com> Reviewed-by: Anuj Gupta <anuj20.g@samsung.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/uring_cmd.h')
-rw-r--r--io_uring/uring_cmd.h10
1 files changed, 9 insertions, 1 deletions
diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h
index 7356bf9aa655..b0ccff7091ee 100644
--- a/io_uring/uring_cmd.h
+++ b/io_uring/uring_cmd.h
@@ -1,8 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
+struct uring_cache {
+ union {
+ struct io_cache_entry cache;
+ struct io_uring_sqe sqes[2];
+ };
+};
+
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_uring_cmd_prep_async(struct io_kiocb *req);
+void io_uring_cache_free(struct io_cache_entry *entry);
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
- struct task_struct *task, bool cancel_all); \ No newline at end of file
+ struct task_struct *task, bool cancel_all);