From a87acfde949140946456859eafa5f15175d0f960 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 11 Sep 2021 16:04:50 -0600 Subject: io_uring: dump sqe contents if issue fails I recently had to look at a production problem where a request ended up getting the dreaded -EINVAL error on submit. The most used and hence useless of error codes, as it just tells you that something was wrong with your request, but not more than that. Let's dump the full sqe contents if we run into an issue failure, that'll allow easier diagnosing of a wide variety of issues. Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index d4631a55a692..5daee36c1f33 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7117,6 +7117,8 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ret = io_init_req(ctx, req, sqe); if (unlikely(ret)) { fail_req: + trace_io_uring_req_failed(sqe, ret); + /* fail even hard links since we don't submit */ if (link->head) { /* -- cgit v1.2.3 From 4b628aeb69cc49adf56b9c1fde43558143e810f5 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:49 +0100 Subject: io_uring: kill off ios_left ->ios_left is only used to decide whether to plug or not, kill it to avoid this extra accounting, just use the initial submission number. There is no much difference in regards of enabling plugging, where this one does it in a few more cases, but all major ones should be covered well. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/f13993bcf5b477f9a7d52881fc49f9457ea9870a.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 5daee36c1f33..6917f96f0e82 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -315,6 +315,7 @@ struct io_submit_state { unsigned int free_reqs; bool plug_started; + bool need_plug; /* * Batch completion logic @@ -323,8 +324,6 @@ struct io_submit_state { unsigned int compl_nr; /* inline/task_work completion list, under ->uring_lock */ struct list_head free_list; - - unsigned int ios_left; }; struct io_ring_ctx { @@ -7090,10 +7089,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, * Plug now if we have more than 1 IO left after this, and the target * is potentially a read/write to block based storage. */ - if (!state->plug_started && state->ios_left > 1 && - io_op_defs[req->opcode].plug) { + if (state->need_plug && io_op_defs[req->opcode].plug) { blk_start_plug(&state->plug); state->plug_started = true; + state->need_plug = false; } if (io_op_defs[req->opcode].needs_file) { @@ -7102,8 +7101,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if (unlikely(!req->file)) ret = -EBADF; } - - state->ios_left--; return ret; } @@ -7212,7 +7209,7 @@ static void io_submit_state_start(struct io_submit_state *state, unsigned int max_ios) { state->plug_started = false; - state->ios_left = max_ios; + state->need_plug = max_ios > 2; /* set only head, no need to init link_last in advance */ state->link.head = NULL; } -- cgit v1.2.3 From 6b639522f63f82437350038a4925633f769e4ec8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:50 +0100 Subject: io_uring: inline io_dismantle_req io_dismantle_req() is hot, and not _too_ huge. Inline it, there are 3 call sites, which hopefully will turn into 2 in the future. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/bdd2dc30716cac270c2403e99bccd6286e4ae201.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 6917f96f0e82..43ed9f6821fb 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1960,7 +1960,7 @@ static inline void io_put_file(struct file *file) fput(file); } -static void io_dismantle_req(struct io_kiocb *req) +static inline void io_dismantle_req(struct io_kiocb *req) { unsigned int flags = req->flags; -- cgit v1.2.3 From d81499bfcd471e6b920683f8fcf06ce65f84286b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:51 +0100 Subject: io_uring: inline linked part of io_req_find_next Inline part of __io_req_find_next() that returns a request but doesn't need io_disarm_next(). It's just two places, but makes links a bit faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4126d13f23d0e91b39b3558e16bd86cafa7fcef2.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 43ed9f6821fb..10dd3589f8be 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2074,40 +2074,39 @@ static bool io_disarm_next(struct io_kiocb *req) return posted; } -static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) +static void __io_req_find_next_prep(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + bool posted; + + spin_lock(&ctx->completion_lock); + posted = io_disarm_next(req); + if (posted) + io_commit_cqring(req->ctx); + spin_unlock(&ctx->completion_lock); + if (posted) + io_cqring_ev_posted(ctx); +} + +static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) { struct io_kiocb *nxt; + if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) + return NULL; /* * If LINK is set, we have dependent requests in this chain. If we * didn't fail this request, queue the first one up, moving any other * dependencies to the next request. In case of failure, fail the rest * of the chain. */ - if (req->flags & IO_DISARM_MASK) { - struct io_ring_ctx *ctx = req->ctx; - bool posted; - - spin_lock(&ctx->completion_lock); - posted = io_disarm_next(req); - if (posted) - io_commit_cqring(req->ctx); - spin_unlock(&ctx->completion_lock); - if (posted) - io_cqring_ev_posted(ctx); - } + if (unlikely(req->flags & IO_DISARM_MASK)) + __io_req_find_next_prep(req); nxt = req->link; req->link = NULL; return nxt; } -static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) -{ - if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) - return NULL; - return __io_req_find_next(req); -} - static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) { if (!ctx) -- cgit v1.2.3 From c450178d9be9dc44327f3b526f82c36e4c5d362b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:52 +0100 Subject: io_uring: dedup CQE flushing non-empty checks We don't do io_submit_flush_completions() when there is no requests enqueued, and every single caller checks for it. Hide that check into the function not forgetting about inlining. That will make it much easier for changing the empty check condition in the future. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/d7ff8cef5da1b38e8ea648f5aad9a315ddfc7b57.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 10dd3589f8be..c4ec3672da77 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1092,7 +1092,7 @@ static void __io_queue_sqe(struct io_kiocb *req); static void io_rsrc_put_work(struct work_struct *work); static void io_req_task_queue(struct io_kiocb *req); -static void io_submit_flush_completions(struct io_ring_ctx *ctx); +static void __io_submit_flush_completions(struct io_ring_ctx *ctx); static int io_req_prep_async(struct io_kiocb *req); static int io_install_fixed_file(struct io_kiocb *req, struct file *file, @@ -1164,6 +1164,12 @@ static inline void req_ref_get(struct io_kiocb *req) atomic_inc(&req->refs); } +static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) +{ + if (ctx->submit_state.compl_nr) + __io_submit_flush_completions(ctx); +} + static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) { if (!(req->flags & REQ_F_REFCOUNT)) { @@ -1252,8 +1258,7 @@ static void io_fallback_req_func(struct work_struct *work) req->io_task_work.func(req, &locked); if (locked) { - if (ctx->submit_state.compl_nr) - io_submit_flush_completions(ctx); + io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); } percpu_ref_put(&ctx->refs); @@ -2112,8 +2117,7 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) if (!ctx) return; if (*locked) { - if (ctx->submit_state.compl_nr) - io_submit_flush_completions(ctx); + io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); *locked = false; } @@ -2130,7 +2134,7 @@ static void tctx_task_work(struct callback_head *cb) while (1) { struct io_wq_work_node *node; - if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr) + if (!tctx->task_list.first && locked) io_submit_flush_completions(ctx); spin_lock_irq(&tctx->task_lock); @@ -2315,7 +2319,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, list_add(&req->inflight_entry, &state->free_list); } -static void io_submit_flush_completions(struct io_ring_ctx *ctx) +static void __io_submit_flush_completions(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_submit_state *state = &ctx->submit_state; @@ -7195,8 +7199,7 @@ static void io_submit_state_end(struct io_submit_state *state, { if (state->link.head) io_queue_sqe(state->link.head); - if (state->compl_nr) - io_submit_flush_completions(ctx); + io_submit_flush_completions(ctx); if (state->plug_started) blk_finish_plug(&state->plug); } -- cgit v1.2.3 From d97ec6239ad8684e4d31b12b39cefca6782c09a3 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:53 +0100 Subject: io_uring: kill extra wake_up_process in tw add TWA_SIGNAL already wakes the thread, no need in wake_up_process() after it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/7e90cf643f633e857443e0c9e72471b221735c50.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index c4ec3672da77..ead85f4d3427 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2197,8 +2197,9 @@ static void io_req_task_work_add(struct io_kiocb *req) * will do the job. */ notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; - if (!task_work_add(tsk, &tctx->task_work, notify)) { - wake_up_process(tsk); + if (likely(!task_work_add(tsk, &tctx->task_work, notify))) { + if (notify == TWA_NONE) + wake_up_process(tsk); return; } -- cgit v1.2.3 From 83f84356bc8f2dda9d0c9c7edb94decf71a36d26 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Mon, 13 Sep 2021 21:08:54 +0800 Subject: io_uring: add more uring info to fdinfo for debug Developers may need some uring info to help themselves debug and address issues in production. This includes sqring/cqring head/tail and the detailed sqe/cqe info, which is very useful when an application is hung on a ring. Signed-off-by: Hao Xu Link: https://lore.kernel.org/r/20210913130854.38542-1-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 4 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index ead85f4d3427..323ef0bb4bc1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -10056,8 +10056,48 @@ static int io_uring_show_cred(struct seq_file *m, unsigned int id, static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) { struct io_sq_data *sq = NULL; + struct io_overflow_cqe *ocqe; + struct io_rings *r = ctx->rings; + unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; + unsigned int cached_sq_head = ctx->cached_sq_head; + unsigned int cached_cq_tail = ctx->cached_cq_tail; + unsigned int sq_head = READ_ONCE(r->sq.head); + unsigned int sq_tail = READ_ONCE(r->sq.tail); + unsigned int cq_head = READ_ONCE(r->cq.head); + unsigned int cq_tail = READ_ONCE(r->cq.tail); bool has_lock; - int i; + unsigned int i; + + /* + * we may get imprecise sqe and cqe info if uring is actively running + * since we get cached_sq_head and cached_cq_tail without uring_lock + * and sq_tail and cq_head are changed by userspace. But it's ok since + * we usually use these info when it is stuck. + */ + seq_printf(m, "SqHead:\t%u\n", sq_head & sq_mask); + seq_printf(m, "SqTail:\t%u\n", sq_tail & sq_mask); + seq_printf(m, "CachedSqHead:\t%u\n", cached_sq_head & sq_mask); + seq_printf(m, "CqHead:\t%u\n", cq_head & cq_mask); + seq_printf(m, "CqTail:\t%u\n", cq_tail & cq_mask); + seq_printf(m, "CachedCqTail:\t%u\n", cached_cq_tail & cq_mask); + seq_printf(m, "SQEs:\t%u\n", sq_tail - cached_sq_head); + for (i = cached_sq_head; i < sq_tail; i++) { + unsigned int sq_idx = READ_ONCE(ctx->sq_array[i & sq_mask]); + + if (likely(sq_idx <= sq_mask)) { + struct io_uring_sqe *sqe = &ctx->sq_sqes[sq_idx]; + + seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", + sq_idx, sqe->opcode, sqe->fd, sqe->flags, sqe->user_data); + } + } + seq_printf(m, "CQEs:\t%u\n", cached_cq_tail - cq_head); + for (i = cq_head; i < cached_cq_tail; i++) { + struct io_uring_cqe *cqe = &r->cqes[i & cq_mask]; + + seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n", + i & cq_mask, cqe->user_data, cqe->res, cqe->flags); + } /* * Avoid ABBA deadlock between the seq lock and the io_uring mutex, @@ -10099,7 +10139,10 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) xa_for_each(&ctx->personalities, index, cred) io_uring_show_cred(m, index, cred); } - seq_printf(m, "PollList:\n"); + if (has_lock) + mutex_unlock(&ctx->uring_lock); + + seq_puts(m, "PollList:\n"); spin_lock(&ctx->completion_lock); for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { struct hlist_head *list = &ctx->cancel_hash[i]; @@ -10109,9 +10152,17 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) seq_printf(m, " op=%d, task_works=%d\n", req->opcode, req->task->task_works != NULL); } + + seq_puts(m, "CqOverflowList:\n"); + list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) { + struct io_uring_cqe *cqe = &ocqe->cqe; + + seq_printf(m, " user_data=%llu, res=%d, flags=%x\n", + cqe->user_data, cqe->res, cqe->flags); + + } + spin_unlock(&ctx->completion_lock); - if (has_lock) - mutex_unlock(&ctx->uring_lock); } static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) -- cgit v1.2.3 From a3f349071eb0ae2aa18ac2da55060865d8190cf0 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Sep 2021 12:04:20 +0100 Subject: io_uring: remove ctx referencing from complete_post Now completions are done from task context, that means that it's either the task itself, task_work or io-wq worker. In all those cases the ctx will be staying alive by mutexing, explicit referencing or req references by iowq. Remove extra ctx pinning from io_req_complete_post(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/60a0e96434c16ab4fe587651448290d61ec9a113.1631703756.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 323ef0bb4bc1..027ca7a6df44 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1809,17 +1809,11 @@ static void io_req_complete_post(struct io_kiocb *req, long res, io_put_task(req->task, 1); list_add(&req->inflight_entry, &ctx->locked_free_list); ctx->locked_free_nr++; - } else { - if (!percpu_ref_tryget(&ctx->refs)) - req = NULL; + percpu_ref_put(&ctx->refs); } io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); - - if (req) { - io_cqring_ev_posted(ctx); - percpu_ref_put(&ctx->refs); - } + io_cqring_ev_posted(ctx); } static inline bool io_req_needs_clean(struct io_kiocb *req) -- cgit v1.2.3 From 68fe256aadc0db70cb27132a6f5583819794d867 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Sep 2021 12:03:38 +0100 Subject: io_uring: optimise io_req_init() sqe flags checks IOSQE_IO_DRAIN is quite marginal and we don't care too much about IOSQE_BUFFER_SELECT. Save to ifs and hide both of them under SQE_VALID_FLAGS check. Now we first check whether it uses a "safe" subset, i.e. without DRAIN and BUFFER_SELECT, and only if it's not true we test the rest of the flags. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/dccfb9ab2ab0969a2d8dc59af88fa0ce44eeb1d5.1631703764.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 027ca7a6df44..3801f2e5ea1b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -103,9 +103,11 @@ #define IORING_MAX_REG_BUFFERS (1U << 14) -#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ - IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ - IOSQE_BUFFER_SELECT) +#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \ + IOSQE_IO_HARDLINK | IOSQE_ASYNC) + +#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS|IOSQE_BUFFER_SELECT|IOSQE_IO_DRAIN) + #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS) @@ -7059,20 +7061,21 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->fixed_rsrc_refs = NULL; req->task = current; - /* enforce forwards compatibility on users */ - if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) - return -EINVAL; if (unlikely(req->opcode >= IORING_OP_LAST)) return -EINVAL; + if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { + /* enforce forwards compatibility on users */ + if (sqe_flags & ~SQE_VALID_FLAGS) + return -EINVAL; + if ((sqe_flags & IOSQE_BUFFER_SELECT) && + !io_op_defs[req->opcode].buffer_select) + return -EOPNOTSUPP; + if (sqe_flags & IOSQE_IO_DRAIN) + ctx->drain_active = true; + } if (!io_check_restriction(ctx, req, sqe_flags)) return -EACCES; - if ((sqe_flags & IOSQE_BUFFER_SELECT) && - !io_op_defs[req->opcode].buffer_select) - return -EOPNOTSUPP; - if (unlikely(sqe_flags & IOSQE_IO_DRAIN)) - ctx->drain_active = true; - personality = READ_ONCE(sqe->personality); if (personality) { req->creds = xa_load(&ctx->personalities, personality); @@ -11017,6 +11020,8 @@ static int __init io_uring_init(void) /* should fit into one byte */ BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); + BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8)); + BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS); BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST); BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int)); -- cgit v1.2.3 From 8d4af6857c6fb5b1922218e93052bee29eb540f4 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Wed, 22 Sep 2021 18:15:22 +0800 Subject: io_uring: return boolean value for io_alloc_async_data boolean value is good enough for io_alloc_async_data. Signed-off-by: Hao Xu Link: https://lore.kernel.org/r/20210922101522.9179-1-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 3801f2e5ea1b..3ced6a7bdf2b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3301,7 +3301,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, } } -static inline int io_alloc_async_data(struct io_kiocb *req) +static inline bool io_alloc_async_data(struct io_kiocb *req) { WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); -- cgit v1.2.3 From 6878b40e7b28bd780dedb7d505c13dbf82b73290 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:41 +0100 Subject: io_uring: mark having different creds unlikely Hint the compiler that it's not as likely to have creds different from current attached to a request. The current code generation is far from ideal, hopefully it can help to some compilers to remove duplicated jump tables and so. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/e7815251ac4bf5a4a23d298c752f029ae19f3837.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 3ced6a7bdf2b..bda302bedeef 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6638,7 +6638,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) const struct cred *creds = NULL; int ret; - if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) + if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) creds = override_creds(req->creds); switch (req->opcode) { -- cgit v1.2.3 From 87a115fb715bf1f6765e122babbcba566ada286e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:42 +0100 Subject: io_uring: force_nonspin We don't really need to pass the number of requests to complete into io_do_iopoll(), a flag whether to enforce non-spin mode is enough. Should be straightforward, maybe except io_iopoll_check(). We pass !min there, because we do never enter with the number of already reaped requests is larger than the specified @min, apart from the first iteration, where nr_events is 0 and so the final check should be identical. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/782b39d1d8ec584eae15bca0a1feb6f0571fe5b8.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index bda302bedeef..d60595487976 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2453,7 +2453,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, } static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, - long min) + bool force_nonspin) { struct io_kiocb *req, *tmp; unsigned int poll_flags = BLK_POLL_NOSLEEP; @@ -2462,9 +2462,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, /* * Only spin for completions if we don't have multiple devices hanging - * off our complete list, and we're under the requested amount. + * off our complete list. */ - if (ctx->poll_multi_queue || *nr_events >= min) + if (ctx->poll_multi_queue || force_nonspin) poll_flags |= BLK_POLL_ONESHOT; list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { @@ -2516,7 +2516,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) while (!list_empty(&ctx->iopoll_list)) { unsigned int nr_events = 0; - io_do_iopoll(ctx, &nr_events, 0); + io_do_iopoll(ctx, &nr_events, true); /* let it sleep and repeat later if can't complete a request */ if (nr_events == 0) @@ -2578,7 +2578,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) list_empty(&ctx->iopoll_list)) break; } - ret = io_do_iopoll(ctx, &nr_events, min); + ret = io_do_iopoll(ctx, &nr_events, !min); } while (!ret && nr_events < min && !need_resched()); out: mutex_unlock(&ctx->uring_lock); @@ -7347,7 +7347,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) mutex_lock(&ctx->uring_lock); if (!list_empty(&ctx->iopoll_list)) - io_do_iopoll(ctx, &nr_events, 0); + io_do_iopoll(ctx, &nr_events, true); /* * Don't submit if refs are dying, good for io_uring_register(), -- cgit v1.2.3 From 5ba3c874eb8acb09fd8aea78ded67e023931894a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:43 +0100 Subject: io_uring: make io_do_iopoll return number of reqs Don't pass nr_events pointer around but return directly, it's less expensive than pointer increments. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/f771a8153a86f16f12ff4272524e9e549c5de40b.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index d60595487976..27ca4fb733f9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2425,8 +2425,7 @@ static inline bool io_run_task_work(void) /* * Find and free completed poll iocbs */ -static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, - struct list_head *done) +static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) { struct req_batch rb; struct io_kiocb *req; @@ -2441,7 +2440,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, __io_cqring_fill_event(ctx, req->user_data, req->result, io_put_rw_kbuf(req)); - (*nr_events)++; if (req_ref_put_and_test(req)) io_req_free_batch(&rb, req, &ctx->submit_state); @@ -2452,12 +2450,12 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, io_req_free_batch_finish(ctx, &rb); } -static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, - bool force_nonspin) +static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { struct io_kiocb *req, *tmp; unsigned int poll_flags = BLK_POLL_NOSLEEP; DEFINE_IO_COMP_BATCH(iob); + int nr_events = 0; LIST_HEAD(done); /* @@ -2478,6 +2476,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, */ if (READ_ONCE(req->iopoll_completed)) { list_move_tail(&req->inflight_entry, &done); + nr_events++; continue; } if (!list_empty(&done)) @@ -2491,16 +2490,18 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, /* iopoll may have completed current req */ if (!rq_list_empty(iob.req_list) || - READ_ONCE(req->iopoll_completed)) + READ_ONCE(req->iopoll_completed)) { list_move_tail(&req->inflight_entry, &done); + nr_events++; + } } if (!rq_list_empty(iob.req_list)) iob.complete(&iob); if (!list_empty(&done)) - io_iopoll_complete(ctx, nr_events, &done); + io_iopoll_complete(ctx, &done); - return 0; + return nr_events; } /* @@ -2514,12 +2515,8 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) mutex_lock(&ctx->uring_lock); while (!list_empty(&ctx->iopoll_list)) { - unsigned int nr_events = 0; - - io_do_iopoll(ctx, &nr_events, true); - /* let it sleep and repeat later if can't complete a request */ - if (nr_events == 0) + if (io_do_iopoll(ctx, true) == 0) break; /* * Ensure we allow local-to-the-cpu processing to take place, @@ -2578,8 +2575,12 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) list_empty(&ctx->iopoll_list)) break; } - ret = io_do_iopoll(ctx, &nr_events, !min); - } while (!ret && nr_events < min && !need_resched()); + ret = io_do_iopoll(ctx, !min); + if (ret < 0) + break; + nr_events += ret; + ret = 0; + } while (nr_events < min && !need_resched()); out: mutex_unlock(&ctx->uring_lock); return ret; @@ -7339,7 +7340,6 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; if (!list_empty(&ctx->iopoll_list) || to_submit) { - unsigned nr_events = 0; const struct cred *creds = NULL; if (ctx->sq_creds != current_cred()) @@ -7347,7 +7347,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) mutex_lock(&ctx->uring_lock); if (!list_empty(&ctx->iopoll_list)) - io_do_iopoll(ctx, &nr_events, true); + io_do_iopoll(ctx, true); /* * Don't submit if refs are dying, good for io_uring_register(), -- cgit v1.2.3 From 6f33b0bc4ea43f5c5ce7b7c9ab66051f80837862 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:44 +0100 Subject: io_uring: use slist for completion batching Currently we collect requests for completion batching in an array. Replace them with a singly linked list. It's as fast as arrays but doesn't take some much space in ctx, and will be used in future patches. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a666826f2854d17e9fb9417fb302edfeb750f425.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 52 +++++++++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 27 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 27ca4fb733f9..0ccfd70af3e0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -322,8 +322,8 @@ struct io_submit_state { /* * Batch completion logic */ - struct io_kiocb *compl_reqs[IO_COMPL_BATCH]; - unsigned int compl_nr; + struct io_wq_work_list compl_reqs; + /* inline/task_work completion list, under ->uring_lock */ struct list_head free_list; }; @@ -882,6 +882,8 @@ struct io_kiocb { struct io_wq_work work; const struct cred *creds; + struct io_wq_work_node comp_list; + /* store used ubuf, so we can prevent reloading */ struct io_mapped_ubuf *imu; }; @@ -1168,7 +1170,7 @@ static inline void req_ref_get(struct io_kiocb *req) static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) { - if (ctx->submit_state.compl_nr) + if (!wq_list_empty(&ctx->submit_state.compl_reqs)) __io_submit_flush_completions(ctx); } @@ -1325,6 +1327,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->submit_state.free_list); INIT_LIST_HEAD(&ctx->locked_free_list); INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); + INIT_WQ_LIST(&ctx->submit_state.compl_reqs); return ctx; err: kfree(ctx->dummy_ubuf); @@ -1826,11 +1829,16 @@ static inline bool io_req_needs_clean(struct io_kiocb *req) static void io_req_complete_state(struct io_kiocb *req, long res, unsigned int cflags) { + struct io_submit_state *state; + if (io_req_needs_clean(req)) io_clean_op(req); req->result = res; req->compl.cflags = cflags; req->flags |= REQ_F_COMPLETE_INLINE; + + state = &req->ctx->submit_state; + wq_list_add_tail(&req->comp_list, &state->compl_reqs); } static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, @@ -2319,13 +2327,14 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, static void __io_submit_flush_completions(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { + struct io_wq_work_node *node, *prev; struct io_submit_state *state = &ctx->submit_state; - int i, nr = state->compl_nr; struct req_batch rb; spin_lock(&ctx->completion_lock); - for (i = 0; i < nr; i++) { - struct io_kiocb *req = state->compl_reqs[i]; + wq_list_for_each(node, prev, &state->compl_reqs) { + struct io_kiocb *req = container_of(node, struct io_kiocb, + comp_list); __io_cqring_fill_event(ctx, req->user_data, req->result, req->compl.cflags); @@ -2335,15 +2344,18 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) io_cqring_ev_posted(ctx); io_init_req_batch(&rb); - for (i = 0; i < nr; i++) { - struct io_kiocb *req = state->compl_reqs[i]; + node = state->compl_reqs.first; + do { + struct io_kiocb *req = container_of(node, struct io_kiocb, + comp_list); + node = req->comp_list.next; if (req_ref_put_and_test(req)) io_req_free_batch(&rb, req, &ctx->submit_state); - } + } while (node); io_req_free_batch_finish(ctx, &rb); - state->compl_nr = 0; + INIT_WQ_LIST(&state->compl_reqs); } /* @@ -2668,17 +2680,10 @@ static void io_req_task_complete(struct io_kiocb *req, bool *locked) unsigned int cflags = io_put_rw_kbuf(req); long res = req->result; - if (*locked) { - struct io_ring_ctx *ctx = req->ctx; - struct io_submit_state *state = &ctx->submit_state; - + if (*locked) io_req_complete_state(req, res, cflags); - state->compl_reqs[state->compl_nr++] = req; - if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) - io_submit_flush_completions(ctx); - } else { + else io_req_complete_post(req, res, cflags); - } } static void __io_complete_rw(struct io_kiocb *req, long res, long res2, @@ -6962,15 +6967,8 @@ issue_sqe: * doesn't support non-blocking read/write attempts */ if (likely(!ret)) { - if (req->flags & REQ_F_COMPLETE_INLINE) { - struct io_ring_ctx *ctx = req->ctx; - struct io_submit_state *state = &ctx->submit_state; - - state->compl_reqs[state->compl_nr++] = req; - if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) - io_submit_flush_completions(ctx); + if (req->flags & REQ_F_COMPLETE_INLINE) return; - } linked_timeout = io_prep_linked_timeout(req); if (linked_timeout) -- cgit v1.2.3 From 3ab665b74e59ad467b3eab5b2b95d378cea78b22 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:45 +0100 Subject: io_uring: remove allocation cache array We have several of request allocation layers, remove the last one, which is the submit->reqs array, and always use submit->free_reqs instead. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8547095c35f7a87bab14f6447ecd30a273ed7500.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 60 +++++++++++++++++------------------------------------------ 1 file changed, 17 insertions(+), 43 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 0ccfd70af3e0..e3bfbd3f4787 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -310,12 +310,6 @@ struct io_submit_state { struct blk_plug plug; struct io_submit_link link; - /* - * io_kiocb alloc cache - */ - void *reqs[IO_REQ_CACHE_SIZE]; - unsigned int free_reqs; - bool plug_started; bool need_plug; @@ -1898,7 +1892,6 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; - int nr; /* * If we have more than a batch's worth of requests in our IRQ side @@ -1907,20 +1900,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) */ if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) io_flush_cached_locked_reqs(ctx, state); - - nr = state->free_reqs; - while (!list_empty(&state->free_list)) { - struct io_kiocb *req = list_first_entry(&state->free_list, - struct io_kiocb, inflight_entry); - - list_del(&req->inflight_entry); - state->reqs[nr++] = req; - if (nr == ARRAY_SIZE(state->reqs)) - break; - } - - state->free_reqs = nr; - return nr != 0; + return !list_empty(&state->free_list); } /* @@ -1934,33 +1914,36 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + void *reqs[IO_REQ_ALLOC_BATCH]; + struct io_kiocb *req; int ret, i; - BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH); - - if (likely(state->free_reqs || io_flush_cached_reqs(ctx))) + if (likely(!list_empty(&state->free_list) || io_flush_cached_reqs(ctx))) goto got_req; - ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH, - state->reqs); + ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); /* * Bulk alloc is all-or-nothing. If we fail to get a batch, * retry single alloc to be on the safe side. */ if (unlikely(ret <= 0)) { - state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); - if (!state->reqs[0]) + reqs[0] = kmem_cache_alloc(req_cachep, gfp); + if (!reqs[0]) return NULL; ret = 1; } - for (i = 0; i < ret; i++) - io_preinit_req(state->reqs[i], ctx); - state->free_reqs = ret; + for (i = 0; i < ret; i++) { + req = reqs[i]; + + io_preinit_req(req, ctx); + list_add(&req->inflight_entry, &state->free_list); + } got_req: - state->free_reqs--; - return state->reqs[state->free_reqs]; + req = list_first_entry(&state->free_list, struct io_kiocb, inflight_entry); + list_del(&req->inflight_entry); + return req; } static inline void io_put_file(struct file *file) @@ -2318,10 +2301,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, rb->task_refs++; rb->ctx_refs++; - if (state->free_reqs != ARRAY_SIZE(state->reqs)) - state->reqs[state->free_reqs++] = req; - else - list_add(&req->inflight_entry, &state->free_list); + list_add(&req->inflight_entry, &state->free_list); } static void __io_submit_flush_completions(struct io_ring_ctx *ctx) @@ -9228,12 +9208,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) struct io_submit_state *state = &ctx->submit_state; mutex_lock(&ctx->uring_lock); - - if (state->free_reqs) { - kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); - state->free_reqs = 0; - } - io_flush_cached_locked_reqs(ctx, state); io_req_cache_free(&state->free_list); mutex_unlock(&ctx->uring_lock); -- cgit v1.2.3 From c2b6c6bc4e0d3452ac4e208c198f6062e0f1d9df Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:47 +0100 Subject: io_uring: replace list with stack for req caches Replace struct list_head free_list serving for caching requests with singly linked stack, which is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/1bc942b82422fb2624b8353bd93aca183a022846.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 51 ++++++++++++++++++++++++--------------------------- 1 file changed, 24 insertions(+), 27 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index e3bfbd3f4787..90ad9cd254c5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -319,7 +319,7 @@ struct io_submit_state { struct io_wq_work_list compl_reqs; /* inline/task_work completion list, under ->uring_lock */ - struct list_head free_list; + struct io_wq_work_node free_list; }; struct io_ring_ctx { @@ -379,7 +379,7 @@ struct io_ring_ctx { } ____cacheline_aligned_in_smp; /* IRQ completion list, under ->completion_lock */ - struct list_head locked_free_list; + struct io_wq_work_list locked_free_list; unsigned int locked_free_nr; const struct cred *sq_creds; /* cred used for __io_sq_thread() */ @@ -1318,8 +1318,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); init_llist_head(&ctx->rsrc_put_llist); INIT_LIST_HEAD(&ctx->tctx_list); - INIT_LIST_HEAD(&ctx->submit_state.free_list); - INIT_LIST_HEAD(&ctx->locked_free_list); + ctx->submit_state.free_list.next = NULL; + INIT_WQ_LIST(&ctx->locked_free_list); INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); INIT_WQ_LIST(&ctx->submit_state.compl_reqs); return ctx; @@ -1806,7 +1806,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res, } io_dismantle_req(req); io_put_task(req->task, 1); - list_add(&req->inflight_entry, &ctx->locked_free_list); + wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; percpu_ref_put(&ctx->refs); } @@ -1883,7 +1883,7 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, struct io_submit_state *state) { spin_lock(&ctx->completion_lock); - list_splice_init(&ctx->locked_free_list, &state->free_list); + wq_list_splice(&ctx->locked_free_list, &state->free_list); ctx->locked_free_nr = 0; spin_unlock(&ctx->completion_lock); } @@ -1900,7 +1900,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) */ if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) io_flush_cached_locked_reqs(ctx, state); - return !list_empty(&state->free_list); + return !!state->free_list.next; } /* @@ -1915,10 +1915,11 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) struct io_submit_state *state = &ctx->submit_state; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; void *reqs[IO_REQ_ALLOC_BATCH]; + struct io_wq_work_node *node; struct io_kiocb *req; int ret, i; - if (likely(!list_empty(&state->free_list) || io_flush_cached_reqs(ctx))) + if (likely(state->free_list.next || io_flush_cached_reqs(ctx))) goto got_req; ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); @@ -1938,12 +1939,11 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) req = reqs[i]; io_preinit_req(req, ctx); - list_add(&req->inflight_entry, &state->free_list); + wq_stack_add_head(&req->comp_list, &state->free_list); } got_req: - req = list_first_entry(&state->free_list, struct io_kiocb, inflight_entry); - list_del(&req->inflight_entry); - return req; + node = wq_stack_extract(&state->free_list); + return container_of(node, struct io_kiocb, comp_list); } static inline void io_put_file(struct file *file) @@ -1976,7 +1976,7 @@ static void __io_free_req(struct io_kiocb *req) io_put_task(req->task, 1); spin_lock(&ctx->completion_lock); - list_add(&req->inflight_entry, &ctx->locked_free_list); + wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; spin_unlock(&ctx->completion_lock); @@ -2300,8 +2300,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, } rb->task_refs++; rb->ctx_refs++; - - list_add(&req->inflight_entry, &state->free_list); + wq_stack_add_head(&req->comp_list, &state->free_list); } static void __io_submit_flush_completions(struct io_ring_ctx *ctx) @@ -7261,7 +7260,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) } sqe = io_get_sqe(ctx); if (unlikely(!sqe)) { - list_add(&req->inflight_entry, &ctx->submit_state.free_list); + wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); break; } /* will complete beyond this point, count as submitted */ @@ -9193,23 +9192,21 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) } } -static void io_req_cache_free(struct list_head *list) -{ - struct io_kiocb *req, *nxt; - - list_for_each_entry_safe(req, nxt, list, inflight_entry) { - list_del(&req->inflight_entry); - kmem_cache_free(req_cachep, req); - } -} - static void io_req_caches_free(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; mutex_lock(&ctx->uring_lock); io_flush_cached_locked_reqs(ctx, state); - io_req_cache_free(&state->free_list); + + while (state->free_list.next) { + struct io_wq_work_node *node; + struct io_kiocb *req; + + node = wq_stack_extract(&state->free_list); + req = container_of(node, struct io_kiocb, comp_list); + kmem_cache_free(req_cachep, req); + } mutex_unlock(&ctx->uring_lock); } -- cgit v1.2.3 From e3f721e6f6d5d916edf71e5f26ac0547a4b28e1e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:48 +0100 Subject: io_uring: split iopoll loop The main loop of io_do_iopoll() iterates and does ->iopoll() until it meets a first completed request, then it continues from that position and splices requests to pass them through io_iopoll_complete(). Split the loop in two for clearness, iopolling and reaping completed requests from the list. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a7f6fd27a94845e5dc925a47a4a9765a92e514fb.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 90ad9cd254c5..0e46ed6f92ea 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2441,6 +2441,12 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) io_req_free_batch_finish(ctx, &rb); } +/* same as "continue" but starts from the pos, not next to it */ +#define list_for_each_entry_safe_resume(pos, n, head, member) \ + for (n = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_next_entry(n, member)) + static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { struct io_kiocb *req, *tmp; @@ -2456,7 +2462,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (ctx->poll_multi_queue || force_nonspin) poll_flags |= BLK_POLL_ONESHOT; - list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { + list_for_each_entry(req, &ctx->iopoll_list, inflight_entry) { struct kiocb *kiocb = &req->rw.kiocb; int ret; @@ -2465,12 +2471,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) * If we find a request that requires polling, break out * and complete those lists first, if we have entries there. */ - if (READ_ONCE(req->iopoll_completed)) { - list_move_tail(&req->inflight_entry, &done); - nr_events++; - continue; - } - if (!list_empty(&done)) + if (READ_ONCE(req->iopoll_completed)) break; ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags); @@ -2481,17 +2482,22 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) /* iopoll may have completed current req */ if (!rq_list_empty(iob.req_list) || - READ_ONCE(req->iopoll_completed)) { - list_move_tail(&req->inflight_entry, &done); - nr_events++; - } + READ_ONCE(req->iopoll_completed)) + break; } if (!rq_list_empty(iob.req_list)) iob.complete(&iob); - if (!list_empty(&done)) - io_iopoll_complete(ctx, &done); + list_for_each_entry_safe_resume(req, tmp, &ctx->iopoll_list, + inflight_entry) { + if (!READ_ONCE(req->iopoll_completed)) + break; + list_move_tail(&req->inflight_entry, &done); + nr_events++; + } + if (nr_events) + io_iopoll_complete(ctx, &done); return nr_events; } -- cgit v1.2.3 From 5eef4e87eb0b2b8482f6a77ebcad067636a867b3 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:49 +0100 Subject: io_uring: use single linked list for iopoll Use single linked lists for keeping iopoll requests, takes less space, may be faster, but mostly will be of benefit for further patches. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/314033676b100cd485518c3bc55e1b95a0dcd71f.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io-wq.h | 3 +++ fs/io_uring.c | 54 +++++++++++++++++++++++++++--------------------------- 2 files changed, 30 insertions(+), 27 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io-wq.h b/fs/io-wq.h index c870062105d1..87ba6a733630 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -32,6 +32,9 @@ struct io_wq_work_list { #define wq_list_for_each(pos, prv, head) \ for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) +#define wq_list_for_each_resume(pos, prv) \ + for (; pos; prv = pos, pos = (pos)->next) + #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) #define INIT_WQ_LIST(list) do { \ (list)->first = NULL; \ diff --git a/fs/io_uring.c b/fs/io_uring.c index 0e46ed6f92ea..fac1d0310d66 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -412,7 +412,7 @@ struct io_ring_ctx { * For SQPOLL, only the single threaded io_sq_thread() will * manipulate the list, hence no extra locking is needed there. */ - struct list_head iopoll_list; + struct io_wq_work_list iopoll_list; struct hlist_head *cancel_hash; unsigned cancel_hash_bits; bool poll_multi_queue; @@ -1309,7 +1309,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) init_waitqueue_head(&ctx->cq_wait); spin_lock_init(&ctx->completion_lock); spin_lock_init(&ctx->timeout_lock); - INIT_LIST_HEAD(&ctx->iopoll_list); + INIT_WQ_LIST(&ctx->iopoll_list); INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); INIT_LIST_HEAD(&ctx->ltimeout_list); @@ -2441,15 +2441,9 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) io_req_free_batch_finish(ctx, &rb); } -/* same as "continue" but starts from the pos, not next to it */ -#define list_for_each_entry_safe_resume(pos, n, head, member) \ - for (n = list_next_entry(pos, member); \ - !list_entry_is_head(pos, head, member); \ - pos = n, n = list_next_entry(n, member)) - static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { - struct io_kiocb *req, *tmp; + struct io_wq_work_node *pos, *start, *prev; unsigned int poll_flags = BLK_POLL_NOSLEEP; DEFINE_IO_COMP_BATCH(iob); int nr_events = 0; @@ -2462,7 +2456,8 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (ctx->poll_multi_queue || force_nonspin) poll_flags |= BLK_POLL_ONESHOT; - list_for_each_entry(req, &ctx->iopoll_list, inflight_entry) { + wq_list_for_each(pos, start, &ctx->iopoll_list) { + struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); struct kiocb *kiocb = &req->rw.kiocb; int ret; @@ -2488,14 +2483,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (!rq_list_empty(iob.req_list)) iob.complete(&iob); - list_for_each_entry_safe_resume(req, tmp, &ctx->iopoll_list, - inflight_entry) { + else if (!pos) + return 0; + + prev = start; + wq_list_for_each_resume(pos, prev) { + struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); + if (!READ_ONCE(req->iopoll_completed)) break; - list_move_tail(&req->inflight_entry, &done); + list_add_tail(&req->inflight_entry, &done); nr_events++; } + wq_list_cut(&ctx->iopoll_list, prev, start); if (nr_events) io_iopoll_complete(ctx, &done); return nr_events; @@ -2511,7 +2512,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) return; mutex_lock(&ctx->uring_lock); - while (!list_empty(&ctx->iopoll_list)) { + while (!wq_list_empty(&ctx->iopoll_list)) { /* let it sleep and repeat later if can't complete a request */ if (io_do_iopoll(ctx, true) == 0) break; @@ -2560,7 +2561,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) * forever, while the workqueue is stuck trying to acquire the * very same mutex. */ - if (list_empty(&ctx->iopoll_list)) { + if (wq_list_empty(&ctx->iopoll_list)) { u32 tail = ctx->cached_cq_tail; mutex_unlock(&ctx->uring_lock); @@ -2569,7 +2570,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) /* some requests don't go through iopoll_list */ if (tail != ctx->cached_cq_tail || - list_empty(&ctx->iopoll_list)) + wq_list_empty(&ctx->iopoll_list)) break; } ret = io_do_iopoll(ctx, !min); @@ -2729,14 +2730,13 @@ static void io_iopoll_req_issued(struct io_kiocb *req) * how we do polling eventually, not spinning if we're on potentially * different devices. */ - if (list_empty(&ctx->iopoll_list)) { + if (wq_list_empty(&ctx->iopoll_list)) { ctx->poll_multi_queue = false; } else if (!ctx->poll_multi_queue) { struct io_kiocb *list_req; - list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb, - inflight_entry); - + list_req = container_of(ctx->iopoll_list.first, struct io_kiocb, + comp_list); if (list_req->file != req->file) ctx->poll_multi_queue = true; } @@ -2746,9 +2746,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req) * it to the front so we find it first. */ if (READ_ONCE(req->iopoll_completed)) - list_add(&req->inflight_entry, &ctx->iopoll_list); + wq_list_add_head(&req->comp_list, &ctx->iopoll_list); else - list_add_tail(&req->inflight_entry, &ctx->iopoll_list); + wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); if (unlikely(in_async)) { /* @@ -7322,14 +7322,14 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE) to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; - if (!list_empty(&ctx->iopoll_list) || to_submit) { + if (!wq_list_empty(&ctx->iopoll_list) || to_submit) { const struct cred *creds = NULL; if (ctx->sq_creds != current_cred()) creds = override_creds(ctx->sq_creds); mutex_lock(&ctx->uring_lock); - if (!list_empty(&ctx->iopoll_list)) + if (!wq_list_empty(&ctx->iopoll_list)) io_do_iopoll(ctx, true); /* @@ -7407,7 +7407,7 @@ static int io_sq_thread(void *data) list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { int ret = __io_sq_thread(ctx, cap_entries); - if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list))) + if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) sqt_spin = true; } if (io_run_task_work()) @@ -7428,7 +7428,7 @@ static int io_sq_thread(void *data) io_ring_set_wakeup_flag(ctx); if ((ctx->flags & IORING_SETUP_IOPOLL) && - !list_empty_careful(&ctx->iopoll_list)) { + !wq_list_empty(&ctx->iopoll_list)) { needs_sched = false; break; } @@ -9583,7 +9583,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, /* SQPOLL thread does its own polling */ if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || (ctx->sq_data && ctx->sq_data->thread == current)) { - while (!list_empty_careful(&ctx->iopoll_list)) { + while (!wq_list_empty(&ctx->iopoll_list)) { io_iopoll_try_reap_events(ctx); ret = true; } -- cgit v1.2.3 From 3aa83bfb6e5c3a89d0ec67e598ee04bfc1425b13 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:50 +0100 Subject: io_uring: add a helper for batch free Add a helper io_free_batch_list(), which takes a single linked list and puts/frees all requests from it in an efficient manner. Will be reused later. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4fc8306b542c6b1dd1d08e8021ef3bdb0ad15010.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index fac1d0310d66..df122cdfc85d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2303,12 +2303,31 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, wq_stack_add_head(&req->comp_list, &state->free_list); } +static void io_free_batch_list(struct io_ring_ctx *ctx, + struct io_wq_work_list *list) + __must_hold(&ctx->uring_lock) +{ + struct io_wq_work_node *node; + struct req_batch rb; + + io_init_req_batch(&rb); + node = list->first; + do { + struct io_kiocb *req = container_of(node, struct io_kiocb, + comp_list); + + node = req->comp_list.next; + if (req_ref_put_and_test(req)) + io_req_free_batch(&rb, req, &ctx->submit_state); + } while (node); + io_req_free_batch_finish(ctx, &rb); +} + static void __io_submit_flush_completions(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_wq_work_node *node, *prev; struct io_submit_state *state = &ctx->submit_state; - struct req_batch rb; spin_lock(&ctx->completion_lock); wq_list_for_each(node, prev, &state->compl_reqs) { @@ -2322,18 +2341,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) spin_unlock(&ctx->completion_lock); io_cqring_ev_posted(ctx); - io_init_req_batch(&rb); - node = state->compl_reqs.first; - do { - struct io_kiocb *req = container_of(node, struct io_kiocb, - comp_list); - - node = req->comp_list.next; - if (req_ref_put_and_test(req)) - io_req_free_batch(&rb, req, &ctx->submit_state); - } while (node); - - io_req_free_batch_finish(ctx, &rb); + io_free_batch_list(ctx, &state->compl_reqs); INIT_WQ_LIST(&state->compl_reqs); } -- cgit v1.2.3 From b3fa03fd1b17f557e2c43736440feed66fb86741 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:51 +0100 Subject: io_uring: convert iopoll_completed to store_release Convert explicit barrier around iopoll_completed to smp_load_acquire() and smp_store_release(). Similar on the callback side, but replaces a single smp_rmb() with per-request smp_load_acquire(), neither imply any extra CPU ordering for x86. Use READ_ONCE as usual where it doesn't matter. Use it to move filling CQEs by iopoll earlier, that will be necessary to avoid traversing the list one extra time in the future. Suggested-by: Bart Van Assche Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8bd663cb15efdc72d6247c38ee810964e744a450.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index df122cdfc85d..fb122c2b69f2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2429,17 +2429,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) struct req_batch rb; struct io_kiocb *req; - /* order with ->result store in io_complete_rw_iopoll() */ - smp_rmb(); - io_init_req_batch(&rb); while (!list_empty(done)) { req = list_first_entry(done, struct io_kiocb, inflight_entry); list_del(&req->inflight_entry); - __io_cqring_fill_event(ctx, req->user_data, req->result, - io_put_rw_kbuf(req)); - if (req_ref_put_and_test(req)) io_req_free_batch(&rb, req, &ctx->submit_state); } @@ -2498,8 +2492,12 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) wq_list_for_each_resume(pos, prev) { struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); - if (!READ_ONCE(req->iopoll_completed)) + /* order with io_complete_rw_iopoll(), e.g. ->result updates */ + if (!smp_load_acquire(&req->iopoll_completed)) break; + __io_cqring_fill_event(ctx, req->user_data, req->result, + io_put_rw_kbuf(req)); + list_add_tail(&req->inflight_entry, &done); nr_events++; } @@ -2712,10 +2710,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) } } - WRITE_ONCE(req->result, res); - /* order with io_iopoll_complete() checking ->result */ - smp_wmb(); - WRITE_ONCE(req->iopoll_completed, 1); + req->result = res; + /* order with io_iopoll_complete() checking ->iopoll_completed */ + smp_store_release(&req->iopoll_completed, 1); } /* -- cgit v1.2.3 From f5ed3bcd5b117ffe73b9dc2394bbbad26a68c086 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:52 +0100 Subject: io_uring: optimise batch completion First, convert rest of iopoll bits to single linked lists, and also replace per-request list_add_tail() with splicing a part of slist. With that, use io_free_batch_list() to put/free requests. The main advantage of it is that it's now the only user of struct req_batch and friends, and so they can be inlined. The main overhead there was per-request call to not-inlined io_req_free_batch(), which is expensive enough. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b37fc6d5954b241e025eead7ab92c6f44a42f229.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 38 ++++++++++---------------------------- 1 file changed, 10 insertions(+), 28 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index fb122c2b69f2..91077b56a564 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2421,35 +2421,13 @@ static inline bool io_run_task_work(void) return false; } -/* - * Find and free completed poll iocbs - */ -static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) -{ - struct req_batch rb; - struct io_kiocb *req; - - io_init_req_batch(&rb); - while (!list_empty(done)) { - req = list_first_entry(done, struct io_kiocb, inflight_entry); - list_del(&req->inflight_entry); - - if (req_ref_put_and_test(req)) - io_req_free_batch(&rb, req, &ctx->submit_state); - } - - io_commit_cqring(ctx); - io_cqring_ev_posted_iopoll(ctx); - io_req_free_batch_finish(ctx, &rb); -} - static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { struct io_wq_work_node *pos, *start, *prev; unsigned int poll_flags = BLK_POLL_NOSLEEP; + struct io_wq_work_list list; DEFINE_IO_COMP_BATCH(iob); int nr_events = 0; - LIST_HEAD(done); /* * Only spin for completions if we don't have multiple devices hanging @@ -2496,15 +2474,19 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (!smp_load_acquire(&req->iopoll_completed)) break; __io_cqring_fill_event(ctx, req->user_data, req->result, - io_put_rw_kbuf(req)); - - list_add_tail(&req->inflight_entry, &done); + io_put_rw_kbuf(req)); nr_events++; } + if (unlikely(!nr_events)) + return 0; + + io_commit_cqring(ctx); + io_cqring_ev_posted_iopoll(ctx); + list.first = start ? start->next : ctx->iopoll_list.first; + list.last = prev; wq_list_cut(&ctx->iopoll_list, prev, start); - if (nr_events) - io_iopoll_complete(ctx, &done); + io_free_batch_list(ctx, &list); return nr_events; } -- cgit v1.2.3 From d4b7a5ef2b9c06def90d12db9b99bd12d75758fb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:53 +0100 Subject: io_uring: inline completion batching helpers We now have a single function for batched put of requests, just inline struct req_batch and all related helpers into it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/595a2917f80dd94288cd7203052c7934f5446580.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 66 ++++++++++++++++++++--------------------------------------- 1 file changed, 22 insertions(+), 44 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 91077b56a564..7229bf74c9e1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2264,63 +2264,41 @@ static void io_free_req_work(struct io_kiocb *req, bool *locked) io_free_req(req); } -struct req_batch { - struct task_struct *task; - int task_refs; - int ctx_refs; -}; - -static inline void io_init_req_batch(struct req_batch *rb) -{ - rb->task_refs = 0; - rb->ctx_refs = 0; - rb->task = NULL; -} - -static void io_req_free_batch_finish(struct io_ring_ctx *ctx, - struct req_batch *rb) -{ - if (rb->ctx_refs) - percpu_ref_put_many(&ctx->refs, rb->ctx_refs); - if (rb->task) - io_put_task(rb->task, rb->task_refs); -} - -static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, - struct io_submit_state *state) -{ - io_queue_next(req); - io_dismantle_req(req); - - if (req->task != rb->task) { - if (rb->task) - io_put_task(rb->task, rb->task_refs); - rb->task = req->task; - rb->task_refs = 0; - } - rb->task_refs++; - rb->ctx_refs++; - wq_stack_add_head(&req->comp_list, &state->free_list); -} - static void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_list *list) __must_hold(&ctx->uring_lock) { struct io_wq_work_node *node; - struct req_batch rb; + struct task_struct *task = NULL; + int task_refs = 0, ctx_refs = 0; - io_init_req_batch(&rb); node = list->first; do { struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); node = req->comp_list.next; - if (req_ref_put_and_test(req)) - io_req_free_batch(&rb, req, &ctx->submit_state); + if (!req_ref_put_and_test(req)) + continue; + + io_queue_next(req); + io_dismantle_req(req); + + if (req->task != task) { + if (task) + io_put_task(task, task_refs); + task = req->task; + task_refs = 0; + } + task_refs++; + ctx_refs++; + wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); } while (node); - io_req_free_batch_finish(ctx, &rb); + + if (ctx_refs) + percpu_ref_put_many(&ctx->refs, ctx_refs); + if (task) + io_put_task(task, task_refs); } static void __io_submit_flush_completions(struct io_ring_ctx *ctx) -- cgit v1.2.3 From 1cce17aca621c38c657410dc278a48cda982dd2e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:54 +0100 Subject: io_uring: don't pass tail into io_free_batch_list io_free_batch_list() iterates all requests in the passed in list, so we don't really need to know the tail but can keep iterating until meet NULL. Just pass the first node into it and it will be enough. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4a12c84b6d887d980e05f417ba4172d04c64acae.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 7229bf74c9e1..693ad31082ae 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2265,14 +2265,12 @@ static void io_free_req_work(struct io_kiocb *req, bool *locked) } static void io_free_batch_list(struct io_ring_ctx *ctx, - struct io_wq_work_list *list) + struct io_wq_work_node *node) __must_hold(&ctx->uring_lock) { - struct io_wq_work_node *node; struct task_struct *task = NULL; int task_refs = 0, ctx_refs = 0; - node = list->first; do { struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); @@ -2319,7 +2317,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) spin_unlock(&ctx->completion_lock); io_cqring_ev_posted(ctx); - io_free_batch_list(ctx, &state->compl_reqs); + io_free_batch_list(ctx, state->compl_reqs.first); INIT_WQ_LIST(&state->compl_reqs); } @@ -2403,7 +2401,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { struct io_wq_work_node *pos, *start, *prev; unsigned int poll_flags = BLK_POLL_NOSLEEP; - struct io_wq_work_list list; DEFINE_IO_COMP_BATCH(iob); int nr_events = 0; @@ -2461,10 +2458,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) io_commit_cqring(ctx); io_cqring_ev_posted_iopoll(ctx); - list.first = start ? start->next : ctx->iopoll_list.first; - list.last = prev; + pos = start ? start->next : ctx->iopoll_list.first; wq_list_cut(&ctx->iopoll_list, prev, start); - io_free_batch_list(ctx, &list); + io_free_batch_list(ctx, pos); return nr_events; } -- cgit v1.2.3 From 553deffd0920fc52e8c91e7f8a42d1186a75760a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:55 +0100 Subject: io_uring: don't pass state to io_submit_state_end Submission state and ctx and coupled together, no need to passs Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/e22d77a5786ef77e0c49b933ad74bae55cfb6ca6.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 693ad31082ae..5088b47d1bc4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7137,11 +7137,13 @@ fail_req: /* * Batched submission is done, ensure local IO is flushed out. */ -static void io_submit_state_end(struct io_submit_state *state, - struct io_ring_ctx *ctx) +static void io_submit_state_end(struct io_ring_ctx *ctx) { + struct io_submit_state *state = &ctx->submit_state; + if (state->link.head) io_queue_sqe(state->link.head); + /* flush only after queuing links as they can generate completions */ io_submit_flush_completions(ctx); if (state->plug_started) blk_finish_plug(&state->plug); @@ -7244,7 +7246,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) percpu_ref_put_many(&ctx->refs, unused); } - io_submit_state_end(&ctx->submit_state, ctx); + io_submit_state_end(ctx); /* Commit SQ ring head once we've consumed and submitted all SQEs */ io_commit_sqring(ctx); -- cgit v1.2.3 From f15a3431775a598ed89028acee334200160cc2d6 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:56 +0100 Subject: io_uring: deduplicate io_queue_sqe() call sites There are two call sites of io_queue_sqe() in io_submit_sqe(), combine them into one, because io_queue_sqe() is inline and we don't want to bloat binary, and will become even bigger text data bss dec hex filename 92126 13986 8 106120 19e88 ./fs/io_uring.o 91966 13986 8 105960 19de8 ./fs/io_uring.o Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/506124b8e767f0a4576f7a459f6aea3d13fb4dda.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 5088b47d1bc4..9b99ec46da25 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7117,20 +7117,18 @@ fail_req: link->last->link = req; link->last = req; + if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) + return 0; /* last request of a link, enqueue the link */ - if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { - link->head = NULL; - io_queue_sqe(head); - } - } else { - if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { - link->head = req; - link->last = req; - } else { - io_queue_sqe(req); - } + link->head = NULL; + req = head; + } else if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { + link->head = req; + link->last = req; + return 0; } + io_queue_sqe(req); return 0; } -- cgit v1.2.3 From 2a56a9bd64dbdff4fe5cbe00b20014da07694a78 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:57 +0100 Subject: io_uring: remove drain_active check from hot path req->ctx->active_drain is a bit too expensive, partially because of two dereferences. Do a trick, if we see it set in io_init_req(), set REQ_F_FORCE_ASYNC and it automatically goes through a slower path where we can catch it. It's nearly free to do in io_init_req() because there is already ->restricted check and it's in the same byte of a bitmask. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/d7e7ddc63c15e8a300833132abb3eb8fd3918aef.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 53 +++++++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 24 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 9b99ec46da25..f0fb458e81f3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6438,23 +6438,15 @@ static bool io_drain_req(struct io_kiocb *req) int ret; u32 seq; - if (req->flags & REQ_F_FAIL) { - io_req_complete_fail_submit(req); - return true; - } - - /* - * If we need to drain a request in the middle of a link, drain the - * head request and the next request/link after the current link. - * Considering sequential execution of links, IOSQE_IO_DRAIN will be - * maintained for every request of our link. - */ - if (ctx->drain_next) { - req->flags |= REQ_F_IO_DRAIN; - ctx->drain_next = false; - } /* not interested in head, start from the first linked */ io_for_each_link(pos, req->link) { + /* + * If we need to drain a request in the middle of a link, drain + * the head request and the next request/link after the current + * link. Considering sequential execution of links, + * IOSQE_IO_DRAIN will be maintained for every request of our + * link. + */ if (pos->flags & REQ_F_IO_DRAIN) { ctx->drain_next = true; req->flags |= REQ_F_IO_DRAIN; @@ -6946,13 +6938,12 @@ issue_sqe: static inline void io_queue_sqe(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { - if (unlikely(req->ctx->drain_active) && io_drain_req(req)) - return; - if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { __io_queue_sqe(req); } else if (req->flags & REQ_F_FAIL) { io_req_complete_fail_submit(req); + } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) { + return; } else { int ret = io_req_prep_async(req); @@ -6972,9 +6963,6 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx, struct io_kiocb *req, unsigned int sqe_flags) { - if (likely(!ctx->restricted)) - return true; - if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) return false; @@ -7015,11 +7003,28 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if ((sqe_flags & IOSQE_BUFFER_SELECT) && !io_op_defs[req->opcode].buffer_select) return -EOPNOTSUPP; - if (sqe_flags & IOSQE_IO_DRAIN) + if (sqe_flags & IOSQE_IO_DRAIN) { + struct io_submit_link *link = &ctx->submit_state.link; + ctx->drain_active = true; + req->flags |= REQ_F_FORCE_ASYNC; + if (link->head) + link->head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC; + } + } + if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { + if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) + return -EACCES; + /* knock it to the slow queue path, will be drained there */ + if (ctx->drain_active) + req->flags |= REQ_F_FORCE_ASYNC; + /* if there is no link, we're at "next" request and need to drain */ + if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { + ctx->drain_next = false; + ctx->drain_active = true; + req->flags |= REQ_F_FORCE_ASYNC | IOSQE_IO_DRAIN; + } } - if (!io_check_restriction(ctx, req, sqe_flags)) - return -EACCES; personality = READ_ONCE(sqe->personality); if (personality) { -- cgit v1.2.3 From 4652fe3f10e57abc3fc6ac11c431b2ef39f78c03 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:58 +0100 Subject: io_uring: split slow path from io_queue_sqe We don't want the slow path of io_queue_sqe to be inlined, so extract a function from it. text data bss dec hex filename 91950 13986 8 105944 19dd8 ./fs/io_uring.o 91758 13986 8 105752 19d18 ./fs/io_uring.o Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/fb01253911f8fb374268f65b1ba939b54ca6583f.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index f0fb458e81f3..e5a414c208bd 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6935,12 +6935,10 @@ issue_sqe: } } -static inline void io_queue_sqe(struct io_kiocb *req) +static void io_queue_sqe_fallback(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { - if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { - __io_queue_sqe(req); - } else if (req->flags & REQ_F_FAIL) { + if (req->flags & REQ_F_FAIL) { io_req_complete_fail_submit(req); } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) { return; @@ -6954,6 +6952,15 @@ static inline void io_queue_sqe(struct io_kiocb *req) } } +static inline void io_queue_sqe(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) +{ + if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) + __io_queue_sqe(req); + else + io_queue_sqe_fallback(req); +} + /* * Check SQE restrictions (opcode and flags). * -- cgit v1.2.3 From d475a9a6226c86b7febe3863b900b820a0e6b71c Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:59 +0100 Subject: io_uring: inline hot path of __io_queue_sqe() Extract slow paths from __io_queue_sqe() into a function and inline the hot path. With that we have everything completely inlined on the submission path up until io_issue_sqe(). -> io_submit_sqes() -> io_submit_sqe() (inlined) -> io_queue_sqe() (inlined) -> __io_queue_sqe() (inlined) -> io_issue_sqe() Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/f1606864d95d7f26dc28c7eec3dc6ed6ec32618a.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 48 ++++++++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 20 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index e5a414c208bd..d4f8abb451c4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6891,13 +6891,38 @@ static void io_queue_linked_timeout(struct io_kiocb *req) io_put_req(req); } -static void __io_queue_sqe(struct io_kiocb *req) +static void io_queue_sqe_arm_apoll(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) +{ + struct io_kiocb *linked_timeout = io_prep_linked_timeout(req); + + switch (io_arm_poll_handler(req)) { + case IO_APOLL_READY: + if (linked_timeout) { + io_unprep_linked_timeout(req); + linked_timeout = NULL; + } + io_req_task_queue(req); + break; + case IO_APOLL_ABORTED: + /* + * Queued up for async execution, worker will release + * submit reference when the iocb is actually submitted. + */ + io_queue_async_work(req, NULL); + break; + } + + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); +} + +static inline void __io_queue_sqe(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { struct io_kiocb *linked_timeout; int ret; -issue_sqe: ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); /* @@ -6912,24 +6937,7 @@ issue_sqe: if (linked_timeout) io_queue_linked_timeout(linked_timeout); } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { - linked_timeout = io_prep_linked_timeout(req); - - switch (io_arm_poll_handler(req)) { - case IO_APOLL_READY: - if (linked_timeout) - io_unprep_linked_timeout(req); - goto issue_sqe; - case IO_APOLL_ABORTED: - /* - * Queued up for async execution, worker will release - * submit reference when the iocb is actually submitted. - */ - io_queue_async_work(req, NULL); - break; - } - - if (linked_timeout) - io_queue_linked_timeout(linked_timeout); + io_queue_sqe_arm_apoll(req); } else { io_req_complete_failed(req, ret); } -- cgit v1.2.3 From d9f9d2842c9156470b3f1d3dafe5684a3c036366 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:00 +0100 Subject: io_uring: reshuffle queue_sqe completion handling If a request completed inline the result should only be zero, it's a grave error otherwise. So, when we see REQ_F_COMPLETE_INLINE it's not even necessary to check the return code, and the flag check can be moved earlier. It's one "if" less for inline completions, and same two checks for it normally completing (ret == 0). Those are two cases we care about the most. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ebd4e397a9c26d96c99b24447acc309741041a83.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index d4f8abb451c4..bbb2a262c272 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6925,14 +6925,13 @@ static inline void __io_queue_sqe(struct io_kiocb *req) ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); + if (req->flags & REQ_F_COMPLETE_INLINE) + return; /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts */ if (likely(!ret)) { - if (req->flags & REQ_F_COMPLETE_INLINE) - return; - linked_timeout = io_prep_linked_timeout(req); if (linked_timeout) io_queue_linked_timeout(linked_timeout); -- cgit v1.2.3 From 6962980947e2b967ab26bfd34004b6b573597513 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:01 +0100 Subject: io_uring: restructure submit sqes to_submit checks Put an explicit check for number of requests to submit. First, we can turn while into do-while and it generates better code, and second that if can be cheaper, e.g. by using CPU flags after sub in io_sqring_entries(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5926baadd20c28feab7a5e1725fedf32e4553ff7.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index bbb2a262c272..b8f60cf36156 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7225,16 +7225,19 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) __must_hold(&ctx->uring_lock) { + unsigned int entries = io_sqring_entries(ctx); int submitted = 0; + if (!entries) + return 0; /* make sure SQ entry isn't read before tail */ - nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx)); + nr = min3(nr, ctx->sq_entries, entries); if (!percpu_ref_tryget_many(&ctx->refs, nr)) return -EAGAIN; io_get_task_refs(nr); io_submit_state_start(&ctx->submit_state, nr); - while (submitted < nr) { + do { const struct io_uring_sqe *sqe; struct io_kiocb *req; @@ -7253,7 +7256,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) submitted++; if (io_submit_sqe(ctx, req, sqe)) break; - } + } while (submitted < nr); if (unlikely(submitted != nr)) { int ref_used = (submitted == -EAGAIN) ? 0 : submitted; -- cgit v1.2.3 From ef05d9ebcc927260f700a94436e7c9347657bbef Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:02 +0100 Subject: io_uring: kill off ->inflight_entry field ->inflight_entry is not used anymore after converting everything to single linked lists, remove it. Also adjust io_kiocb layout, so all hot bits are in first 3 cachelines. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/fd8d68087ede26c4e1707ce6b175aa1eb2381f2b.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index b8f60cf36156..eff24d8eb399 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -868,18 +868,15 @@ struct io_kiocb { struct percpu_ref *fixed_rsrc_refs; /* used with ctx->iopoll_list with reads/writes */ - struct list_head inflight_entry; + struct io_wq_work_node comp_list; struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; struct async_poll *apoll; - struct io_wq_work work; - const struct cred *creds; - - struct io_wq_work_node comp_list; - /* store used ubuf, so we can prevent reloading */ struct io_mapped_ubuf *imu; + struct io_wq_work work; + const struct cred *creds; }; struct io_tctx_node { -- cgit v1.2.3 From a1cdbb4cb5f7190c429b1127892f756b7cd32db4 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:03 +0100 Subject: io_uring: comment why inline complete calls io_clean_op() io_req_complete_state() calls io_clean_op() and it may be not entirely obvious, leave a comment. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/21806f862151e223fdf439e5e8ed7178a8d66979.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index eff24d8eb399..5a06063d5d81 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1822,6 +1822,7 @@ static void io_req_complete_state(struct io_kiocb *req, long res, { struct io_submit_state *state; + /* clean per-opcode space, because req->compl is aliased with it */ if (io_req_needs_clean(req)) io_clean_op(req); req->result = res; -- cgit v1.2.3 From 5e371265ea1d3e0cd02236b1a6d79fe322523ae8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:04 +0100 Subject: io_uring: disable draining earlier Clear ->drain_active in two more cases where we check for a need of draining. It's not a bug, but still may lead to some extra requests being punted to io-wq, and that may be not desirable. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/d20b265f77bb4e8860b15b9987252c7c711dfcba.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 5a06063d5d81..fd3dc6432a34 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6461,8 +6461,10 @@ static bool io_drain_req(struct io_kiocb *req) seq = io_get_sequence(req); /* Still a chance to pass the sequence check */ - if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) + if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { + ctx->drain_active = false; return false; + } ret = io_req_prep_async(req); if (ret) @@ -6481,6 +6483,7 @@ fail: spin_unlock(&ctx->completion_lock); kfree(de); io_queue_async_work(req, NULL); + ctx->drain_active = false; return true; } -- cgit v1.2.3 From 22b2ca310afcea319c72e051df0371f668192b10 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 1 Oct 2021 18:07:00 +0100 Subject: io_uring: extra a helper for drain init Add a helper io_init_req_drain for initialising requests with IOSQE_DRAIN set. Also move bits from preambule of io_drain_req() in there, because we already modify all the bits needed inside the helper. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/dcb412825b35b1cb8891245a387d7d69f8d14cef.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 48 ++++++++++++++++++++++-------------------------- 1 file changed, 22 insertions(+), 26 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index fd3dc6432a34..fb073915fa5c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6430,28 +6430,11 @@ static u32 io_get_sequence(struct io_kiocb *req) static bool io_drain_req(struct io_kiocb *req) { - struct io_kiocb *pos; struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; int ret; u32 seq; - /* not interested in head, start from the first linked */ - io_for_each_link(pos, req->link) { - /* - * If we need to drain a request in the middle of a link, drain - * the head request and the next request/link after the current - * link. Considering sequential execution of links, - * IOSQE_IO_DRAIN will be maintained for every request of our - * link. - */ - if (pos->flags & REQ_F_IO_DRAIN) { - ctx->drain_next = true; - req->flags |= REQ_F_IO_DRAIN; - break; - } - } - /* Still need defer if there is pending req in defer list. */ if (likely(list_empty_careful(&ctx->defer_list) && !(req->flags & REQ_F_IO_DRAIN))) { @@ -6992,6 +6975,25 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx, return true; } +static void io_init_req_drain(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *head = ctx->submit_state.link.head; + + ctx->drain_active = true; + if (head) { + /* + * If we need to drain a request in the middle of a link, drain + * the head request and the next request/link after the current + * link. Considering sequential execution of links, + * IOSQE_IO_DRAIN will be maintained for every request of our + * link. + */ + head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC; + ctx->drain_next = true; + } +} + static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe) __must_hold(&ctx->uring_lock) @@ -7018,14 +7020,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if ((sqe_flags & IOSQE_BUFFER_SELECT) && !io_op_defs[req->opcode].buffer_select) return -EOPNOTSUPP; - if (sqe_flags & IOSQE_IO_DRAIN) { - struct io_submit_link *link = &ctx->submit_state.link; - - ctx->drain_active = true; - req->flags |= REQ_F_FORCE_ASYNC; - if (link->head) - link->head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC; - } + if (sqe_flags & IOSQE_IO_DRAIN) + io_init_req_drain(req); } if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) @@ -7037,7 +7033,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { ctx->drain_next = false; ctx->drain_active = true; - req->flags |= REQ_F_FORCE_ASYNC | IOSQE_IO_DRAIN; + req->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC; } } -- cgit v1.2.3 From e0eb71dcfc4b862261d99f7f90169142867beb0a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 1 Oct 2021 18:07:01 +0100 Subject: io_uring: don't return from io_drain_req() Never return from io_drain_req() but punt to tw if we've got there but it's a false positive and we shouldn't actually drain. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/93583cee51b8783706b76c73196c155b28d9e762.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index fb073915fa5c..b0b4c5b00f37 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6428,46 +6428,39 @@ static u32 io_get_sequence(struct io_kiocb *req) return seq; } -static bool io_drain_req(struct io_kiocb *req) +static void io_drain_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; int ret; - u32 seq; + u32 seq = io_get_sequence(req); /* Still need defer if there is pending req in defer list. */ - if (likely(list_empty_careful(&ctx->defer_list) && - !(req->flags & REQ_F_IO_DRAIN))) { - ctx->drain_active = false; - return false; - } - - seq = io_get_sequence(req); - /* Still a chance to pass the sequence check */ if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { +queue: ctx->drain_active = false; - return false; + io_req_task_queue(req); + return; } ret = io_req_prep_async(req); - if (ret) - goto fail; + if (ret) { +fail: + io_req_complete_failed(req, ret); + return; + } io_prep_async_link(req); de = kmalloc(sizeof(*de), GFP_KERNEL); if (!de) { ret = -ENOMEM; -fail: - io_req_complete_failed(req, ret); - return true; + goto fail; } spin_lock(&ctx->completion_lock); if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { spin_unlock(&ctx->completion_lock); kfree(de); - io_queue_async_work(req, NULL); - ctx->drain_active = false; - return true; + goto queue; } trace_io_uring_defer(ctx, req, req->user_data); @@ -6475,7 +6468,6 @@ fail: de->seq = seq; list_add_tail(&de->list, &ctx->defer_list); spin_unlock(&ctx->completion_lock); - return true; } static void io_clean_op(struct io_kiocb *req) @@ -6931,8 +6923,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req) { if (req->flags & REQ_F_FAIL) { io_req_complete_fail_submit(req); - } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) { - return; + } else if (unlikely(req->ctx->drain_active)) { + io_drain_req(req); } else { int ret = io_req_prep_async(req); -- cgit v1.2.3 From fc0ae0244bbbedf309a835afb278e39a3fc9bb94 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 1 Oct 2021 18:07:02 +0100 Subject: io_uring: init opcode in io_init_req() Move io_req_prep() call inside of io_init_req(), it simplifies a bit error handling for callers. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a0f59291fd52da4672c323542fd56fd899e23f8f.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index b0b4c5b00f37..81292041378f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6992,7 +6992,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, { struct io_submit_state *state; unsigned int sqe_flags; - int personality, ret = 0; + int personality; /* req is partially pre-initialised, see io_preinit_req() */ req->opcode = READ_ONCE(sqe->opcode); @@ -7053,9 +7053,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), (sqe_flags & IOSQE_FIXED_FILE)); if (unlikely(!req->file)) - ret = -EBADF; + return -EBADF; } - return ret; + + return io_req_prep(req, sqe); } static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, @@ -7067,7 +7068,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ret = io_init_req(ctx, req, sqe); if (unlikely(ret)) { -fail_req: trace_io_uring_req_failed(sqe, ret); /* fail even hard links since we don't submit */ @@ -7092,10 +7092,6 @@ fail_req: return ret; } req_fail_link_node(req, ret); - } else { - ret = io_req_prep(req, sqe); - if (unlikely(ret)) - goto fail_req; } /* don't need @sqe from now on */ -- cgit v1.2.3 From 30d51dd4ad2040d4c90497287b69635af7c67502 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 1 Oct 2021 18:07:03 +0100 Subject: io_uring: clean up buffer select Hiding a pointer to a struct io_buffer in rw.addr is error prone. We have some place in io_kiocb, so keep kbuf's in a separate field without aliasing and risks of it being misused. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/3e63a6a953b04cad81d9ea827b12344dd57b37b4.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 46 ++++++++++++---------------------------------- 1 file changed, 12 insertions(+), 34 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 81292041378f..5b3b52815b88 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -573,7 +573,6 @@ struct io_sr_msg { int msg_flags; int bgid; size_t len; - struct io_buffer *kbuf; }; struct io_open { @@ -877,6 +876,7 @@ struct io_kiocb { struct io_mapped_ubuf *imu; struct io_wq_work work; const struct cred *creds; + struct io_buffer *kbuf; }; struct io_tctx_node { @@ -2376,12 +2376,9 @@ static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf) static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) { - struct io_buffer *kbuf; - if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) return 0; - kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; - return io_put_kbuf(req, kbuf); + return io_put_kbuf(req, req->kbuf); } static inline bool io_run_task_work(void) @@ -3000,9 +2997,9 @@ static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock) } static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, - int bgid, struct io_buffer *kbuf, - bool needs_lock) + int bgid, bool needs_lock) { + struct io_buffer *kbuf = req->kbuf; struct io_buffer *head; if (req->flags & REQ_F_BUFFER_SELECTED) @@ -3024,12 +3021,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, } if (*len > kbuf->len) *len = kbuf->len; + req->flags |= REQ_F_BUFFER_SELECTED; + req->kbuf = kbuf; } else { kbuf = ERR_PTR(-ENOBUFS); } io_ring_submit_unlock(req->ctx, needs_lock); - return kbuf; } @@ -3039,13 +3037,10 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, struct io_buffer *kbuf; u16 bgid; - kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; bgid = req->buf_index; - kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); + kbuf = io_buffer_select(req, len, bgid, needs_lock); if (IS_ERR(kbuf)) return kbuf; - req->rw.addr = (u64) (unsigned long) kbuf; - req->flags |= REQ_F_BUFFER_SELECTED; return u64_to_user_ptr(kbuf->addr); } @@ -3101,9 +3096,8 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, bool needs_lock) { if (req->flags & REQ_F_BUFFER_SELECTED) { - struct io_buffer *kbuf; + struct io_buffer *kbuf = req->kbuf; - kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; iov[0].iov_base = u64_to_user_ptr(kbuf->addr); iov[0].iov_len = kbuf->len; return 0; @@ -4869,20 +4863,13 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, bool needs_lock) { struct io_sr_msg *sr = &req->sr_msg; - struct io_buffer *kbuf; - - kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); - if (IS_ERR(kbuf)) - return kbuf; - sr->kbuf = kbuf; - req->flags |= REQ_F_BUFFER_SELECTED; - return kbuf; + return io_buffer_select(req, &sr->len, sr->bgid, needs_lock); } static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) { - return io_put_kbuf(req, req->sr_msg.kbuf); + return io_put_kbuf(req, req->kbuf); } static int io_recvmsg_prep_async(struct io_kiocb *req) @@ -6473,17 +6460,8 @@ fail: static void io_clean_op(struct io_kiocb *req) { if (req->flags & REQ_F_BUFFER_SELECTED) { - switch (req->opcode) { - case IORING_OP_READV: - case IORING_OP_READ_FIXED: - case IORING_OP_READ: - kfree((void *)(unsigned long)req->rw.addr); - break; - case IORING_OP_RECVMSG: - case IORING_OP_RECV: - kfree(req->sr_msg.kbuf); - break; - } + kfree(req->kbuf); + req->kbuf = NULL; } if (req->flags & REQ_F_NEED_CLEANUP) { -- cgit v1.2.3 From 6224590d242fc8c6b26941328e02a40b4384949b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 2 Oct 2021 19:36:14 +0100 Subject: io_uring: add flag to not fail link after timeout For some reason non-off IORING_OP_TIMEOUT always fails links, it's pretty inconvenient and unnecessary limits chaining after it to hard linking, which is far from ideal, e.g. doesn't pair well with timeout cancellation. Add a flag forcing it to not fail links on -ETIME. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/17c7ec0fb7a6113cc6be8cdaedcada0ba836ac0e.1633199723.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 ++++++-- include/uapi/linux/io_uring.h | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 5b3b52815b88..5b3b4f8f9659 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5857,7 +5857,10 @@ err: static void io_req_task_timeout(struct io_kiocb *req, bool *locked) { - req_set_fail(req); + struct io_timeout_data *data = req->async_data; + + if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) + req_set_fail(req); io_req_complete_post(req, -ETIME, 0); } @@ -6063,7 +6066,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (off && is_timeout_link) return -EINVAL; flags = READ_ONCE(sqe->timeout_flags); - if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK)) + if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK | + IORING_TIMEOUT_ETIME_SUCCESS)) return -EINVAL; /* more than one clock specified is invalid, obviously */ if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index b270a07b285e..c45b5e9a9387 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -158,6 +158,7 @@ enum { #define IORING_TIMEOUT_BOOTTIME (1U << 2) #define IORING_TIMEOUT_REALTIME (1U << 3) #define IORING_LINK_TIMEOUT_UPDATE (1U << 4) +#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5) #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) /* -- cgit v1.2.3 From 7e3709d57651feab9c77d7a5a8024041f73f69f7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:46 +0100 Subject: io_uring: optimise kiocb layout We want ->comp_list in the second cacheline, which is hotter comparing to the 3rd. Swap the field with ->link, which is not as hot and controlled by flags and so not accessed unless there is a link. By the way add a couple of comments for io_kiocb fields. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/9d9dde31f8f62279a5f48c575bbc27b8290edc0c.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 5b3b4f8f9659..ab07a0813f20 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -863,19 +863,22 @@ struct io_kiocb { struct task_struct *task; u64 user_data; - struct io_kiocb *link; struct percpu_ref *fixed_rsrc_refs; - /* used with ctx->iopoll_list with reads/writes */ + /* used by request caches, completion batching and iopoll */ struct io_wq_work_node comp_list; + struct io_kiocb *link; struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; + /* internal polling, see IORING_FEAT_FAST_POLL */ struct async_poll *apoll; /* store used ubuf, so we can prevent reloading */ struct io_mapped_ubuf *imu; struct io_wq_work work; + /* custom credentials, valid IFF REQ_F_CREDS is set */ const struct cred *creds; + /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ struct io_buffer *kbuf; }; -- cgit v1.2.3 From 51d48dab62ed9e44e0ccc723411b0d95286a821c Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:47 +0100 Subject: io_uring: add more likely/unlikely() annotations Add two extra unlikely() in io_submit_sqes() and one around io_req_needs_clean() to help the compiler to avoid extra jumps in hot paths. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/88e087afe657e7660194353aada9b00f11d480f9.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index ab07a0813f20..bc87ed85f206 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1957,7 +1957,7 @@ static inline void io_dismantle_req(struct io_kiocb *req) { unsigned int flags = req->flags; - if (io_req_needs_clean(req)) + if (unlikely(io_req_needs_clean(req))) io_clean_op(req); if (!(flags & REQ_F_FIXED_FILE)) io_put_file(req->file); @@ -7198,11 +7198,11 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) unsigned int entries = io_sqring_entries(ctx); int submitted = 0; - if (!entries) + if (unlikely(!entries)) return 0; /* make sure SQ entry isn't read before tail */ nr = min3(nr, ctx->sq_entries, entries); - if (!percpu_ref_tryget_many(&ctx->refs, nr)) + if (unlikely(!percpu_ref_tryget_many(&ctx->refs, nr))) return -EAGAIN; io_get_task_refs(nr); -- cgit v1.2.3 From fff4e40e3094972b119e38c8f0de4e1ca9fec654 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:48 +0100 Subject: io_uring: delay req queueing into compl-batch list io_req_complete_state() is inlined and used in lots of places, so we want to keep it concise. Move adding a request into a completion batch list from io_req_complete_state() into the consumer, i.e. __io_queue_sqe(). before vs after text data bss dec hex filename 91894 14002 8 105904 19db0 ./fs/io_uring.o 91046 14002 8 105056 19a60 ./fs/io_uring.o Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4afca4e11abfd4cc8e99777fdcaf4d34cf4d022d.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index bc87ed85f206..045474b7b9c2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1446,6 +1446,13 @@ static void io_prep_async_link(struct io_kiocb *req) } } +static inline void io_req_add_compl_list(struct io_kiocb *req) +{ + struct io_submit_state *state = &req->ctx->submit_state; + + wq_list_add_tail(&req->comp_list, &state->compl_reqs); +} + static void io_queue_async_work(struct io_kiocb *req, bool *locked) { struct io_ring_ctx *ctx = req->ctx; @@ -1820,20 +1827,15 @@ static inline bool io_req_needs_clean(struct io_kiocb *req) return req->flags & IO_REQ_CLEAN_FLAGS; } -static void io_req_complete_state(struct io_kiocb *req, long res, - unsigned int cflags) +static inline void io_req_complete_state(struct io_kiocb *req, long res, + unsigned int cflags) { - struct io_submit_state *state; - /* clean per-opcode space, because req->compl is aliased with it */ if (io_req_needs_clean(req)) io_clean_op(req); req->result = res; req->compl.cflags = cflags; req->flags |= REQ_F_COMPLETE_INLINE; - - state = &req->ctx->submit_state; - wq_list_add_tail(&req->comp_list, &state->compl_reqs); } static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, @@ -2626,10 +2628,12 @@ static void io_req_task_complete(struct io_kiocb *req, bool *locked) unsigned int cflags = io_put_rw_kbuf(req); long res = req->result; - if (*locked) + if (*locked) { io_req_complete_state(req, res, cflags); - else + io_req_add_compl_list(req); + } else { io_req_complete_post(req, res, cflags); + } } static void __io_complete_rw(struct io_kiocb *req, long res, long res2, @@ -6886,8 +6890,10 @@ static inline void __io_queue_sqe(struct io_kiocb *req) ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); - if (req->flags & REQ_F_COMPLETE_INLINE) + if (req->flags & REQ_F_COMPLETE_INLINE) { + io_req_add_compl_list(req); return; + } /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts -- cgit v1.2.3 From a33ae9ce16a8ca62c5dffbe8909d185c6c5b4d77 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:49 +0100 Subject: io_uring: optimise request allocation Even after fully inlining io_alloc_req() my compiler does a NULL check in the path of successful allocation, no hacks like an empty dereference help it. Restructure io_alloc_req() by splitting out refilling part, so the compiler generate a slightly better binary. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/eda17571bdc7248d8e617b23e7132a5416e4680b.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 045474b7b9c2..614c982c2697 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1912,18 +1912,17 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) * Because of that, io_alloc_req() should be called only under ->uring_lock * and with extra caution to not get a request that is still worked on. */ -static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) +static bool __io_alloc_req_refill(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_submit_state *state = &ctx->submit_state; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; void *reqs[IO_REQ_ALLOC_BATCH]; - struct io_wq_work_node *node; struct io_kiocb *req; int ret, i; if (likely(state->free_list.next || io_flush_cached_reqs(ctx))) - goto got_req; + return true; ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); @@ -1934,7 +1933,7 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) if (unlikely(ret <= 0)) { reqs[0] = kmem_cache_alloc(req_cachep, gfp); if (!reqs[0]) - return NULL; + return false; ret = 1; } @@ -1944,8 +1943,21 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) io_preinit_req(req, ctx); wq_stack_add_head(&req->comp_list, &state->free_list); } -got_req: - node = wq_stack_extract(&state->free_list); + return true; +} + +static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) +{ + if (unlikely(!ctx->submit_state.free_list.next)) + return __io_alloc_req_refill(ctx); + return true; +} + +static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) +{ + struct io_wq_work_node *node; + + node = wq_stack_extract(&ctx->submit_state.free_list); return container_of(node, struct io_kiocb, comp_list); } @@ -7217,12 +7229,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) const struct io_uring_sqe *sqe; struct io_kiocb *req; - req = io_alloc_req(ctx); - if (unlikely(!req)) { + if (unlikely(!io_alloc_req_refill(ctx))) { if (!submitted) submitted = -EAGAIN; break; } + req = io_alloc_req(ctx); sqe = io_get_sqe(ctx); if (unlikely(!sqe)) { wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); -- cgit v1.2.3 From aede728aae355d4c1d38dd02747d415af011eea7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:51 +0100 Subject: io_uring: don't wake sqpoll in io_cqring_ev_posted io_cqring_ev_posted() doesn't need to wake SQPOLL, it's either done by userspace or with task_work, but no action is required on request completion. Rip off bits waking it up in io_cqring_ev_posted(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b49dab27b64cf11f4c50f2f90dcaac123430e05d.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 614c982c2697..af2cbe633345 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1619,8 +1619,6 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) */ if (wq_has_sleeper(&ctx->cq_wait)) wake_up_all(&ctx->cq_wait); - if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait)) - wake_up(&ctx->sq_data->wait); if (io_should_trigger_evfd(ctx)) eventfd_signal(ctx->cq_ev_fd, 1); if (waitqueue_active(&ctx->poll_wait)) -- cgit v1.2.3 From d60aa65ba221f038404b98d8484f562f72bb807b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:52 +0100 Subject: io_uring: merge CQ and poll waitqueues ->cq_wait and ->poll_wait and waken up in the same manner, use a single waitqueue for both of them. CQ waiters are queued exclusively, so wake up should first go over all pollers and that's what we need. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/00fe603e50000365774cf8435ef5fe03f049c1c9.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index af2cbe633345..c9e6f4b68718 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -394,7 +394,6 @@ struct io_ring_ctx { unsigned cached_cq_tail; unsigned cq_entries; struct eventfd_ctx *cq_ev_fd; - struct wait_queue_head poll_wait; struct wait_queue_head cq_wait; unsigned cq_extra; atomic_t cq_timeouts; @@ -1300,7 +1299,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ctx->flags = p->flags; init_waitqueue_head(&ctx->sqo_sq_wait); INIT_LIST_HEAD(&ctx->sqd_list); - init_waitqueue_head(&ctx->poll_wait); INIT_LIST_HEAD(&ctx->cq_overflow_list); init_completion(&ctx->ref_comp); xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1); @@ -1621,8 +1619,6 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) wake_up_all(&ctx->cq_wait); if (io_should_trigger_evfd(ctx)) eventfd_signal(ctx->cq_ev_fd, 1); - if (waitqueue_active(&ctx->poll_wait)) - wake_up_interruptible(&ctx->poll_wait); } static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) @@ -1636,8 +1632,6 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) } if (io_should_trigger_evfd(ctx)) eventfd_signal(ctx->cq_ev_fd, 1); - if (waitqueue_active(&ctx->poll_wait)) - wake_up_interruptible(&ctx->poll_wait); } /* Returns true if there are no backlogged entries after the flush */ @@ -9253,7 +9247,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) struct io_ring_ctx *ctx = file->private_data; __poll_t mask = 0; - poll_wait(file, &ctx->poll_wait, wait); + poll_wait(file, &ctx->cq_wait, wait); /* * synchronizes with barrier from wq_has_sleeper call in * io_commit_cqring -- cgit v1.2.3 From 37f0e767e177af328fcaf74d3772495235e361a8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:53 +0100 Subject: io_uring: optimise ctx referencing by requests Currenlty, we allocate one ctx reference per request at submission time and put them at free. It's batched and not so expensive but it still bloats the kernel, adds 2 function calls for rcu and adds some overhead for request counting in io_free_batch_list(). Always keep one reference with a request, even when it's freed and in io_uring request caches. There is extra work at ring exit / quiesce paths, which now need to put all cached requests. io_ring_exit_work() is already looping, so it's not a problem. Add hybrid-busy waiting to io_ctx_quiesce() as well for now. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/99613fbe396e80777228cde39bbda1aa8938554e.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index c9e6f4b68718..9ec86e35668f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1807,7 +1807,6 @@ static void io_req_complete_post(struct io_kiocb *req, long res, io_put_task(req->task, 1); wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; - percpu_ref_put(&ctx->refs); } io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); @@ -1929,6 +1928,7 @@ static bool __io_alloc_req_refill(struct io_ring_ctx *ctx) ret = 1; } + percpu_ref_get_many(&ctx->refs, ret); for (i = 0; i < ret; i++) { req = reqs[i]; @@ -1986,8 +1986,6 @@ static void __io_free_req(struct io_kiocb *req) wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; spin_unlock(&ctx->completion_lock); - - percpu_ref_put(&ctx->refs); } static inline void io_remove_next_linked(struct io_kiocb *req) @@ -2276,7 +2274,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, __must_hold(&ctx->uring_lock) { struct task_struct *task = NULL; - int task_refs = 0, ctx_refs = 0; + int task_refs = 0; do { struct io_kiocb *req = container_of(node, struct io_kiocb, @@ -2296,12 +2294,9 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, task_refs = 0; } task_refs++; - ctx_refs++; wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); } while (node); - if (ctx_refs) - percpu_ref_put_many(&ctx->refs, ctx_refs); if (task) io_put_task(task, task_refs); } @@ -7212,8 +7207,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) return 0; /* make sure SQ entry isn't read before tail */ nr = min3(nr, ctx->sq_entries, entries); - if (unlikely(!percpu_ref_tryget_many(&ctx->refs, nr))) - return -EAGAIN; io_get_task_refs(nr); io_submit_state_start(&ctx->submit_state, nr); @@ -7243,7 +7236,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) int unused = nr - ref_used; current->io_uring->cached_refs += unused; - percpu_ref_put_many(&ctx->refs, unused); } io_submit_state_end(ctx); @@ -9164,6 +9156,7 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) static void io_req_caches_free(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; + int nr = 0; mutex_lock(&ctx->uring_lock); io_flush_cached_locked_reqs(ctx, state); @@ -9175,7 +9168,10 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) node = wq_stack_extract(&state->free_list); req = container_of(node, struct io_kiocb, comp_list); kmem_cache_free(req_cachep, req); + nr++; } + if (nr) + percpu_ref_put_many(&ctx->refs, nr); mutex_unlock(&ctx->uring_lock); } @@ -9345,6 +9341,8 @@ static void io_ring_exit_work(struct work_struct *work) io_sq_thread_unpark(sqd); } + io_req_caches_free(ctx); + if (WARN_ON_ONCE(time_after(jiffies, timeout))) { /* there is little hope left, don't run it too often */ interval = HZ * 60; @@ -10724,10 +10722,14 @@ static int io_ctx_quiesce(struct io_ring_ctx *ctx) */ mutex_unlock(&ctx->uring_lock); do { - ret = wait_for_completion_interruptible(&ctx->ref_comp); - if (!ret) + ret = wait_for_completion_interruptible_timeout(&ctx->ref_comp, HZ); + if (ret) { + ret = min(0L, ret); break; + } + ret = io_run_task_work_sig(); + io_req_caches_free(ctx); } while (ret >= 0); mutex_lock(&ctx->uring_lock); -- cgit v1.2.3 From c072481ded14bceb9a4163ac81ce73d907a43855 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:54 +0100 Subject: io_uring: mark cold functions Attribute cold functions so compilers can optimise them for size. It shrinks the binary by 2.5-3% text data bss dec hex filename 90670 14002 8 104680 198e8 ./fs/io_uring.o 88053 14002 8 102063 18eaf ./fs/io_uring.o Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b53d385f91dca45170b67d7f11c7abd787e821f6.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 123 ++++++++++++++++++++++++++++++---------------------------- 1 file changed, 64 insertions(+), 59 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 9ec86e35668f..e404c98e846a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1230,7 +1230,7 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res) req->result = res; } -static void io_ring_ctx_ref_free(struct percpu_ref *ref) +static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); @@ -1242,7 +1242,7 @@ static inline bool io_is_timeout_noseq(struct io_kiocb *req) return !req->timeout.off; } -static void io_fallback_req_func(struct work_struct *work) +static __cold void io_fallback_req_func(struct work_struct *work) { struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, fallback_work.work); @@ -1262,7 +1262,7 @@ static void io_fallback_req_func(struct work_struct *work) } -static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) +static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { struct io_ring_ctx *ctx; int hash_bits; @@ -1361,7 +1361,7 @@ static inline bool io_req_ffs_set(struct io_kiocb *req) return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); } -static void io_req_track_inflight(struct io_kiocb *req) +static inline void io_req_track_inflight(struct io_kiocb *req) { if (!(req->flags & REQ_F_INFLIGHT)) { req->flags |= REQ_F_INFLIGHT; @@ -1500,7 +1500,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status) } } -static void io_queue_deferred(struct io_ring_ctx *ctx) +static __cold void io_queue_deferred(struct io_ring_ctx *ctx) { while (!list_empty(&ctx->defer_list)) { struct io_defer_entry *de = list_first_entry(&ctx->defer_list, @@ -1514,7 +1514,7 @@ static void io_queue_deferred(struct io_ring_ctx *ctx) } } -static void io_flush_timeouts(struct io_ring_ctx *ctx) +static __cold void io_flush_timeouts(struct io_ring_ctx *ctx) __must_hold(&ctx->completion_lock) { u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); @@ -1547,7 +1547,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) spin_unlock_irq(&ctx->timeout_lock); } -static void __io_commit_cqring_flush(struct io_ring_ctx *ctx) +static __cold void __io_commit_cqring_flush(struct io_ring_ctx *ctx) { if (ctx->off_timeout_used) io_flush_timeouts(ctx); @@ -1903,7 +1903,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) * Because of that, io_alloc_req() should be called only under ->uring_lock * and with extra caution to not get a request that is still worked on. */ -static bool __io_alloc_req_refill(struct io_ring_ctx *ctx) +static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_submit_state *state = &ctx->submit_state; @@ -1975,7 +1975,7 @@ static inline void io_dismantle_req(struct io_kiocb *req) } } -static void __io_free_req(struct io_kiocb *req) +static __cold void __io_free_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; @@ -2467,7 +2467,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) * We can't just wait for polled events to come to us, we have to actively * find and complete them. */ -static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) +static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) { if (!(ctx->flags & IORING_SETUP_IOPOLL)) return; @@ -5632,8 +5632,8 @@ static bool io_poll_remove_one(struct io_kiocb *req) /* * Returns true if we found and killed one or more poll requests */ -static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, - bool cancel_all) +static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, + struct task_struct *tsk, bool cancel_all) { struct hlist_node *tmp; struct io_kiocb *req; @@ -6425,7 +6425,7 @@ static u32 io_get_sequence(struct io_kiocb *req) return seq; } -static void io_drain_req(struct io_kiocb *req) +static __cold void io_drain_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; @@ -7305,7 +7305,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) return ret; } -static void io_sqd_update_thread_idle(struct io_sq_data *sqd) +static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) { struct io_ring_ctx *ctx; unsigned sq_thread_idle = 0; @@ -7559,7 +7559,7 @@ static void io_free_page_table(void **table, size_t size) kfree(table); } -static void **io_alloc_page_table(size_t size) +static __cold void **io_alloc_page_table(size_t size) { unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); size_t init_size = size; @@ -7588,7 +7588,7 @@ static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node) kfree(ref_node); } -static void io_rsrc_node_ref_zero(struct percpu_ref *ref) +static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref) { struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs); struct io_ring_ctx *ctx = node->rsrc_data->ctx; @@ -7665,7 +7665,8 @@ static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx) return ctx->rsrc_backup_node ? 0 : -ENOMEM; } -static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx) +static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data, + struct io_ring_ctx *ctx) { int ret; @@ -7721,9 +7722,9 @@ static void io_rsrc_data_free(struct io_rsrc_data *data) kfree(data); } -static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put, - u64 __user *utags, unsigned nr, - struct io_rsrc_data **pdata) +static __cold int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put, + u64 __user *utags, unsigned nr, + struct io_rsrc_data **pdata) { struct io_rsrc_data *data; int ret = -ENOMEM; @@ -8493,8 +8494,8 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, return io_wq_create(concurrency, &data); } -static int io_uring_alloc_task_context(struct task_struct *task, - struct io_ring_ctx *ctx) +static __cold int io_uring_alloc_task_context(struct task_struct *task, + struct io_ring_ctx *ctx) { struct io_uring_task *tctx; int ret; @@ -8541,8 +8542,8 @@ void __io_uring_free(struct task_struct *tsk) tsk->io_uring = NULL; } -static int io_sq_offload_create(struct io_ring_ctx *ctx, - struct io_uring_params *p) +static __cold int io_sq_offload_create(struct io_ring_ctx *ctx, + struct io_uring_params *p) { int ret; @@ -9181,7 +9182,7 @@ static void io_wait_rsrc_data(struct io_rsrc_data *data) wait_for_completion(&data->done); } -static void io_ring_ctx_free(struct io_ring_ctx *ctx) +static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) { io_sq_thread_finish(ctx); @@ -9290,7 +9291,7 @@ struct io_tctx_exit { struct io_ring_ctx *ctx; }; -static void io_tctx_exit_cb(struct callback_head *cb) +static __cold void io_tctx_exit_cb(struct callback_head *cb) { struct io_uring_task *tctx = current->io_uring; struct io_tctx_exit *work; @@ -9305,14 +9306,14 @@ static void io_tctx_exit_cb(struct callback_head *cb) complete(&work->completion); } -static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) +static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); return req->ctx == data; } -static void io_ring_exit_work(struct work_struct *work) +static __cold void io_ring_exit_work(struct work_struct *work) { struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); unsigned long timeout = jiffies + HZ * 60 * 5; @@ -9383,8 +9384,8 @@ static void io_ring_exit_work(struct work_struct *work) } /* Returns true if we found and killed one or more timeouts */ -static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, - bool cancel_all) +static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, + struct task_struct *tsk, bool cancel_all) { struct io_kiocb *req, *tmp; int canceled = 0; @@ -9406,7 +9407,7 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, return canceled != 0; } -static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) +static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) { unsigned long index; struct creds *creds; @@ -9468,8 +9469,9 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) return ret; } -static bool io_cancel_defer_files(struct io_ring_ctx *ctx, - struct task_struct *task, bool cancel_all) +static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, + struct task_struct *task, + bool cancel_all) { struct io_defer_entry *de; LIST_HEAD(list); @@ -9494,7 +9496,7 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx, return true; } -static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) +static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) { struct io_tctx_node *node; enum io_wq_cancel cret; @@ -9518,9 +9520,9 @@ static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) return ret; } -static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, - struct task_struct *task, - bool cancel_all) +static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, + struct task_struct *task, + bool cancel_all) { struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; struct io_uring_task *tctx = task ? task->io_uring : NULL; @@ -9610,7 +9612,7 @@ static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx) /* * Remove this io_uring_file -> task mapping. */ -static void io_uring_del_tctx_node(unsigned long index) +static __cold void io_uring_del_tctx_node(unsigned long index) { struct io_uring_task *tctx = current->io_uring; struct io_tctx_node *node; @@ -9633,7 +9635,7 @@ static void io_uring_del_tctx_node(unsigned long index) kfree(node); } -static void io_uring_clean_tctx(struct io_uring_task *tctx) +static __cold void io_uring_clean_tctx(struct io_uring_task *tctx) { struct io_wq *wq = tctx->io_wq; struct io_tctx_node *node; @@ -9660,7 +9662,7 @@ static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) return percpu_counter_sum(&tctx->inflight); } -static void io_uring_drop_tctx_refs(struct task_struct *task) +static __cold void io_uring_drop_tctx_refs(struct task_struct *task) { struct io_uring_task *tctx = task->io_uring; unsigned int refs = tctx->cached_refs; @@ -9676,7 +9678,8 @@ static void io_uring_drop_tctx_refs(struct task_struct *task) * Find any io_uring ctx that this task has registered or done IO on, and cancel * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation. */ -static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) +static __cold void io_uring_cancel_generic(bool cancel_all, + struct io_sq_data *sqd) { struct io_uring_task *tctx = current->io_uring; struct io_ring_ctx *ctx; @@ -9769,7 +9772,7 @@ static void *io_uring_validate_mmap_request(struct file *file, #ifdef CONFIG_MMU -static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) +static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) { size_t sz = vma->vm_end - vma->vm_start; unsigned long pfn; @@ -9954,7 +9957,7 @@ out_fput: } #ifdef CONFIG_PROC_FS -static int io_uring_show_cred(struct seq_file *m, unsigned int id, +static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id, const struct cred *cred) { struct user_namespace *uns = seq_user_ns(m); @@ -9986,7 +9989,8 @@ static int io_uring_show_cred(struct seq_file *m, unsigned int id, return 0; } -static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) +static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, + struct seq_file *m) { struct io_sq_data *sq = NULL; struct io_overflow_cqe *ocqe; @@ -10098,7 +10102,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) spin_unlock(&ctx->completion_lock); } -static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) +static __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f) { struct io_ring_ctx *ctx = f->private_data; @@ -10122,8 +10126,8 @@ static const struct file_operations io_uring_fops = { #endif }; -static int io_allocate_scq_urings(struct io_ring_ctx *ctx, - struct io_uring_params *p) +static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, + struct io_uring_params *p) { struct io_rings *rings; size_t size, sq_array_offset; @@ -10212,8 +10216,8 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx) return file; } -static int io_uring_create(unsigned entries, struct io_uring_params *p, - struct io_uring_params __user *params) +static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, + struct io_uring_params __user *params) { struct io_ring_ctx *ctx; struct file *file; @@ -10371,7 +10375,8 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries, return io_uring_setup(entries, params); } -static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args) +static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg, + unsigned nr_args) { struct io_uring_probe *p; size_t size; @@ -10427,8 +10432,8 @@ static int io_register_personality(struct io_ring_ctx *ctx) return id; } -static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg, - unsigned int nr_args) +static __cold int io_register_restrictions(struct io_ring_ctx *ctx, + void __user *arg, unsigned int nr_args) { struct io_uring_restriction *res; size_t size; @@ -10562,7 +10567,7 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, return __io_register_rsrc_update(ctx, type, &up, up.nr); } -static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, +static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, unsigned int size, unsigned int type) { struct io_uring_rsrc_register rr; @@ -10588,8 +10593,8 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, return -EINVAL; } -static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg, - unsigned len) +static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx, + void __user *arg, unsigned len) { struct io_uring_task *tctx = current->io_uring; cpumask_var_t new_mask; @@ -10615,7 +10620,7 @@ static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg, return ret; } -static int io_unregister_iowq_aff(struct io_ring_ctx *ctx) +static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx) { struct io_uring_task *tctx = current->io_uring; @@ -10625,8 +10630,8 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx) return io_wq_cpu_affinity(tctx->io_wq, NULL); } -static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, - void __user *arg) +static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, + void __user *arg) { struct io_uring_task *tctx = NULL; struct io_sq_data *sqd = NULL; @@ -10707,7 +10712,7 @@ static bool io_register_op_must_quiesce(int op) } } -static int io_ctx_quiesce(struct io_ring_ctx *ctx) +static __cold int io_ctx_quiesce(struct io_ring_ctx *ctx) { long ret; -- cgit v1.2.3 From c1e53a6988b9c83dd8bbc759414bc0f13ff1fe0c Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:55 +0100 Subject: io_uring: optimise io_free_batch_list() Delay reading the next node in io_free_batch_list(), allows the compiler to load the value a bit later improving register spilling in some cases. With gcc 11.1 it helped to move @task_refs variable from the stack to a register and optimises out a couple of per request instructions. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/cc9fdfb6f72a4e8bc9918a5e9f2d97869a263ae4.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index e404c98e846a..1b007f77fe52 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2280,9 +2280,10 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); - node = req->comp_list.next; - if (!req_ref_put_and_test(req)) + if (!req_ref_put_and_test(req)) { + node = req->comp_list.next; continue; + } io_queue_next(req); io_dismantle_req(req); @@ -2294,6 +2295,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, task_refs = 0; } task_refs++; + node = req->comp_list.next; wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); } while (node); -- cgit v1.2.3 From d886e185a128a4f350c4df6471c0f403c2982ae8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:56 +0100 Subject: io_uring: control ->async_data with a REQ_F flag ->async_data is a slow path, so it won't matter much if we do the clean up inside io_clean_op(). Moreover, in many cases it's allocated together with setting one or more of IO_REQ_CLEAN_FLAGS flags, so it'd go through io_clean_op() anyway. Control ->async_data allocation with a new flag REQ_F_ASYNC_DATA, so we can do all the maintainence under io_req_needs_clean() fast check. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6892cf5883c459f36bda26f30ceb16742b20b84b.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 72 ++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 26 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 1b007f77fe52..aee8ecc09168 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -109,7 +109,8 @@ #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS|IOSQE_BUFFER_SELECT|IOSQE_IO_DRAIN) #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ - REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS) + REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \ + REQ_F_ASYNC_DATA) #define IO_TCTX_REFS_CACHE_NR (1U << 10) @@ -732,6 +733,7 @@ enum { REQ_F_CREDS_BIT, REQ_F_REFCOUNT_BIT, REQ_F_ARM_LTIMEOUT_BIT, + REQ_F_ASYNC_DATA_BIT, /* keep async read/write and isreg together and in order */ REQ_F_NOWAIT_READ_BIT, REQ_F_NOWAIT_WRITE_BIT, @@ -787,6 +789,8 @@ enum { REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), /* there is a linked timeout that has to be armed */ REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), + /* ->async_data allocated */ + REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT), }; struct async_poll { @@ -847,8 +851,6 @@ struct io_kiocb { struct io_completion compl; }; - /* opcode allocated if it needs to store data for async defer */ - void *async_data; u8 opcode; /* polled IO has completed */ u8 iopoll_completed; @@ -863,6 +865,8 @@ struct io_kiocb { u64 user_data; struct percpu_ref *fixed_rsrc_refs; + /* store used ubuf, so we can prevent reloading */ + struct io_mapped_ubuf *imu; /* used by request caches, completion batching and iopoll */ struct io_wq_work_node comp_list; @@ -872,8 +876,9 @@ struct io_kiocb { struct hlist_node hash_node; /* internal polling, see IORING_FEAT_FAST_POLL */ struct async_poll *apoll; - /* store used ubuf, so we can prevent reloading */ - struct io_mapped_ubuf *imu; + + /* opcode allocated if it needs to store data for async defer */ + void *async_data; struct io_wq_work work; /* custom credentials, valid IFF REQ_F_CREDS is set */ const struct cred *creds; @@ -1219,6 +1224,11 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task, return false; } +static inline bool req_has_async_data(struct io_kiocb *req) +{ + return req->flags & REQ_F_ASYNC_DATA; +} + static inline void req_set_fail(struct io_kiocb *req) { req->flags |= REQ_F_FAIL; @@ -1969,10 +1979,6 @@ static inline void io_dismantle_req(struct io_kiocb *req) io_put_file(req->file); if (req->fixed_rsrc_refs) percpu_ref_put(req->fixed_rsrc_refs); - if (req->async_data) { - kfree(req->async_data); - req->async_data = NULL; - } } static __cold void __io_free_req(struct io_kiocb *req) @@ -2566,7 +2572,7 @@ static bool io_resubmit_prep(struct io_kiocb *req) { struct io_async_rw *rw = req->async_data; - if (!rw) + if (!req_has_async_data(req)) return !io_req_prep_async(req); iov_iter_restore(&rw->iter, &rw->iter_state); return true; @@ -2878,7 +2884,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_async_rw *io = req->async_data; /* add previously done IO, if any */ - if (io && io->bytes_done > 0) { + if (req_has_async_data(req) && io->bytes_done > 0) { if (ret < 0) ret = io->bytes_done; else @@ -3257,7 +3263,11 @@ static inline bool io_alloc_async_data(struct io_kiocb *req) { WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); - return req->async_data == NULL; + if (req->async_data) { + req->flags |= REQ_F_ASYNC_DATA; + return false; + } + return true; } static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, @@ -3266,7 +3276,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, { if (!force && !io_op_defs[req->opcode].needs_async_setup) return 0; - if (!req->async_data) { + if (!req_has_async_data(req)) { struct io_async_rw *iorw; if (io_alloc_async_data(req)) { @@ -3399,12 +3409,13 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; - struct io_async_rw *rw = req->async_data; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; struct iov_iter_state __state, *state; + struct io_async_rw *rw; ssize_t ret, ret2; - if (rw) { + if (req_has_async_data(req)) { + rw = req->async_data; iter = &rw->iter; state = &rw->iter_state; /* @@ -3534,12 +3545,13 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; - struct io_async_rw *rw = req->async_data; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; struct iov_iter_state __state, *state; + struct io_async_rw *rw; ssize_t ret, ret2; - if (rw) { + if (req_has_async_data(req)) { + rw = req->async_data; iter = &rw->iter; state = &rw->iter_state; iov_iter_restore(iter, state); @@ -4708,8 +4720,9 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(!sock)) return -ENOTSOCK; - kmsg = req->async_data; - if (!kmsg) { + if (req_has_async_data(req)) { + kmsg = req->async_data; + } else { ret = io_sendmsg_copy_hdr(req, &iomsg); if (ret) return ret; @@ -4925,8 +4938,9 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(!sock)) return -ENOTSOCK; - kmsg = req->async_data; - if (!kmsg) { + if (req_has_async_data(req)) { + kmsg = req->async_data; + } else { ret = io_recvmsg_copy_hdr(req, &iomsg); if (ret) return ret; @@ -5117,7 +5131,7 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags) int ret; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - if (req->async_data) { + if (req_has_async_data(req)) { io = req->async_data; } else { ret = move_addr_to_kernel(req->connect.addr, @@ -5133,7 +5147,7 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags) ret = __sys_connect_file(req->file, &io->address, req->connect.addr_len, file_flags); if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { - if (req->async_data) + if (req_has_async_data(req)) return -EAGAIN; if (io_alloc_async_data(req)) { ret = -ENOMEM; @@ -5424,7 +5438,10 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake); req_ref_get(req); poll->wait.private = req; + *poll_ptr = poll; + if (req->opcode == IORING_OP_POLL_ADD) + req->flags |= REQ_F_ASYNC_DATA; } pt->nr_entries++; @@ -6086,7 +6103,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (unlikely(off && !req->ctx->off_timeout_used)) req->ctx->off_timeout_used = true; - if (!req->async_data && io_alloc_async_data(req)) + if (!req_has_async_data(req) && io_alloc_async_data(req)) return -ENOMEM; data = req->async_data; @@ -6395,7 +6412,7 @@ static int io_req_prep_async(struct io_kiocb *req) { if (!io_op_defs[req->opcode].needs_async_setup) return 0; - if (WARN_ON_ONCE(req->async_data)) + if (WARN_ON_ONCE(req_has_async_data(req))) return -EFAULT; if (io_alloc_async_data(req)) return -EAGAIN; @@ -6538,7 +6555,10 @@ static void io_clean_op(struct io_kiocb *req) } if (req->flags & REQ_F_CREDS) put_cred(req->creds); - + if (req->flags & REQ_F_ASYNC_DATA) { + kfree(req->async_data); + req->async_data = NULL; + } req->flags &= ~IO_REQ_CLEAN_FLAGS; } -- cgit v1.2.3 From d17e56eb4907c72054e63c71a2123d32b04ebd67 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:57 +0100 Subject: io_uring: remove struct io_completion We keep struct io_completion only as a temporal storage of cflags, Place it in io_kiocb, it's cleaner, removes extra bits and even might be used for future optimisations. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5299bd5c223204065464bd87a515d0e405316086.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index aee8ecc09168..c7dea155f43a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -684,11 +684,6 @@ struct io_hardlink { int flags; }; -struct io_completion { - struct file *file; - u32 cflags; -}; - struct io_async_connect { struct sockaddr_storage address; }; @@ -847,22 +842,20 @@ struct io_kiocb { struct io_mkdir mkdir; struct io_symlink symlink; struct io_hardlink hardlink; - /* use only after cleaning per-op data, see io_clean_op() */ - struct io_completion compl; }; u8 opcode; /* polled IO has completed */ u8 iopoll_completed; - u16 buf_index; + unsigned int flags; + + u64 user_data; u32 result; + u32 cflags; struct io_ring_ctx *ctx; - unsigned int flags; - atomic_t refs; struct task_struct *task; - u64 user_data; struct percpu_ref *fixed_rsrc_refs; /* store used ubuf, so we can prevent reloading */ @@ -870,13 +863,13 @@ struct io_kiocb { /* used by request caches, completion batching and iopoll */ struct io_wq_work_node comp_list; + atomic_t refs; struct io_kiocb *link; struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; /* internal polling, see IORING_FEAT_FAST_POLL */ struct async_poll *apoll; - /* opcode allocated if it needs to store data for async defer */ void *async_data; struct io_wq_work work; @@ -1831,11 +1824,8 @@ static inline bool io_req_needs_clean(struct io_kiocb *req) static inline void io_req_complete_state(struct io_kiocb *req, long res, unsigned int cflags) { - /* clean per-opcode space, because req->compl is aliased with it */ - if (io_req_needs_clean(req)) - io_clean_op(req); req->result = res; - req->compl.cflags = cflags; + req->cflags = cflags; req->flags |= REQ_F_COMPLETE_INLINE; } @@ -2321,7 +2311,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) comp_list); __io_cqring_fill_event(ctx, req->user_data, req->result, - req->compl.cflags); + req->cflags); } io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); -- cgit v1.2.3 From 867f8fa5aeb7fa51290bf3567ce2bbc45580a469 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:58 +0100 Subject: io_uring: inline io_req_needs_clean() There is only a single user of io_req_needs_clean() inline it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6111d0221ef4b439cad401e135dd6a5f990a0501.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index c7dea155f43a..dc83effa8317 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1816,11 +1816,6 @@ static void io_req_complete_post(struct io_kiocb *req, long res, io_cqring_ev_posted(ctx); } -static inline bool io_req_needs_clean(struct io_kiocb *req) -{ - return req->flags & IO_REQ_CLEAN_FLAGS; -} - static inline void io_req_complete_state(struct io_kiocb *req, long res, unsigned int cflags) { @@ -1963,7 +1958,7 @@ static inline void io_dismantle_req(struct io_kiocb *req) { unsigned int flags = req->flags; - if (unlikely(io_req_needs_clean(req))) + if (unlikely(flags & IO_REQ_CLEAN_FLAGS)) io_clean_op(req); if (!(flags & REQ_F_FIXED_FILE)) io_put_file(req->file); -- cgit v1.2.3 From eb6e6f0690c846f7de46181bab3954c12c96e11e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:59 +0100 Subject: io_uring: inline io_poll_complete Inline io_poll_complete(), it's simple and doesn't have any particular purpose. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/933d7ee3e4450749a2d892235462c8f18d030293.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index dc83effa8317..815a7ec68dbc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5292,16 +5292,6 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask) return !(flags & IORING_CQE_F_MORE); } -static inline bool io_poll_complete(struct io_kiocb *req, __poll_t mask) - __must_hold(&req->ctx->completion_lock) -{ - bool done; - - done = __io_poll_complete(req, mask); - io_commit_cqring(req->ctx); - return done; -} - static void io_poll_task_func(struct io_kiocb *req, bool *locked) { struct io_ring_ctx *ctx = req->ctx; @@ -5791,7 +5781,8 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) if (mask) { /* no async, we'd stolen it */ ipt.error = 0; - done = io_poll_complete(req, mask); + done = __io_poll_complete(req, mask); + io_commit_cqring(req->ctx); } spin_unlock(&ctx->completion_lock); -- cgit v1.2.3 From 54daa9b2d80ab35824464b35a99f716e1cdf2ccb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:03:00 +0100 Subject: io_uring: correct fill events helpers types CQE result is a 32-bit integer, so the functions generating CQEs are better to accept not long but ints. Convert io_cqring_fill_event() and other helpers. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/7ca6f15255e9117eae28adcac272744cae29b113.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 815a7ec68dbc..44946711fe62 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1072,7 +1072,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data, - long res, unsigned int cflags); + s32 res, u32 cflags); static void io_put_req(struct io_kiocb *req); static void io_put_req_deferred(struct io_kiocb *req); static void io_dismantle_req(struct io_kiocb *req); @@ -1730,7 +1730,7 @@ static inline void io_get_task_refs(int nr) } static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, - long res, unsigned int cflags) + s32 res, u32 cflags) { struct io_overflow_cqe *ocqe; @@ -1758,7 +1758,7 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, } static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data, - long res, unsigned int cflags) + s32 res, u32 cflags) { struct io_uring_cqe *cqe; @@ -1781,13 +1781,13 @@ static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data /* not as hot to bloat with inlining */ static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data, - long res, unsigned int cflags) + s32 res, u32 cflags) { return __io_cqring_fill_event(ctx, user_data, res, cflags); } -static void io_req_complete_post(struct io_kiocb *req, long res, - unsigned int cflags) +static void io_req_complete_post(struct io_kiocb *req, s32 res, + u32 cflags) { struct io_ring_ctx *ctx = req->ctx; @@ -1816,8 +1816,8 @@ static void io_req_complete_post(struct io_kiocb *req, long res, io_cqring_ev_posted(ctx); } -static inline void io_req_complete_state(struct io_kiocb *req, long res, - unsigned int cflags) +static inline void io_req_complete_state(struct io_kiocb *req, s32 res, + u32 cflags) { req->result = res; req->cflags = cflags; @@ -1825,7 +1825,7 @@ static inline void io_req_complete_state(struct io_kiocb *req, long res, } static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, - long res, unsigned cflags) + s32 res, u32 cflags) { if (issue_flags & IO_URING_F_COMPLETE_DEFER) io_req_complete_state(req, res, cflags); @@ -1833,12 +1833,12 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, io_req_complete_post(req, res, cflags); } -static inline void io_req_complete(struct io_kiocb *req, long res) +static inline void io_req_complete(struct io_kiocb *req, s32 res) { __io_req_complete(req, 0, res, 0); } -static void io_req_complete_failed(struct io_kiocb *req, long res) +static void io_req_complete_failed(struct io_kiocb *req, s32 res) { req_set_fail(req); io_req_complete_post(req, res, 0); @@ -2618,7 +2618,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) static void io_req_task_complete(struct io_kiocb *req, bool *locked) { unsigned int cflags = io_put_rw_kbuf(req); - long res = req->result; + int res = req->result; if (*locked) { io_req_complete_state(req, res, cflags); -- cgit v1.2.3 From 6d63416dc57eb27a3d35e7b7526e9915479d7eff Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:46 +0100 Subject: io_uring: optimise plugging Plugging is only needed with requests that also need a file, so hide plugging under a ->needs_file check. Also, place ->needs_file and ->plug bits into the same byte of io_op_defs, it may matter for compilers, e.g. only with the change a tested one decided to optimise two memory testb into a more with two register testb. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/1600d1287bb7d16451d4ef3343252787a5314927.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 44946711fe62..7759fabf9ae0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -894,12 +894,12 @@ struct io_defer_entry { struct io_op_def { /* needs req->file assigned */ unsigned needs_file : 1; + /* should block plug */ + unsigned plug : 1; /* hash wq insertion if file is a regular file */ unsigned hash_reg_file : 1; /* unbound wq insertion if file is a non-regular file */ unsigned unbound_nonreg_file : 1; - /* opcode is not supported by this kernel */ - unsigned not_supported : 1; /* set if opcode supports polled "wait" */ unsigned pollin : 1; unsigned pollout : 1; @@ -907,8 +907,8 @@ struct io_op_def { unsigned buffer_select : 1; /* do prep async if is going to be punted */ unsigned needs_async_setup : 1; - /* should block plug */ - unsigned plug : 1; + /* opcode is not supported by this kernel */ + unsigned not_supported : 1; /* size of async data needed, if any */ unsigned short async_size; }; @@ -6978,7 +6978,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe) __must_hold(&ctx->uring_lock) { - struct io_submit_state *state; unsigned int sqe_flags; int personality; @@ -7025,19 +7024,20 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, get_cred(req->creds); req->flags |= REQ_F_CREDS; } - state = &ctx->submit_state; - - /* - * Plug now if we have more than 1 IO left after this, and the target - * is potentially a read/write to block based storage. - */ - if (state->need_plug && io_op_defs[req->opcode].plug) { - blk_start_plug(&state->plug); - state->plug_started = true; - state->need_plug = false; - } if (io_op_defs[req->opcode].needs_file) { + struct io_submit_state *state = &ctx->submit_state; + + /* + * Plug now if we have more than 2 IO left after this, and the + * target is potentially a read/write to block based storage. + */ + if (state->need_plug && io_op_defs[req->opcode].plug) { + state->plug_started = true; + state->need_plug = false; + blk_start_plug(&state->plug); + } + req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), (sqe_flags & IOSQE_FIXED_FILE)); if (unlikely(!req->file)) -- cgit v1.2.3 From 756ab7c0ec71e5e1e8e6ea11af53324b23d5efd2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:47 +0100 Subject: io_uring: safer fallback_work free Add extra wq flushing for fallback_work, that's not necessary but safer if invariants of io_fallback_req_func() change. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/24179419d6748516299600bc914f50b9e0b02275.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 7759fabf9ae0..90bef813556e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1262,7 +1262,6 @@ static __cold void io_fallback_req_func(struct work_struct *work) mutex_unlock(&ctx->uring_lock); } percpu_ref_put(&ctx->refs); - } static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) @@ -9212,6 +9211,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) if (ctx->rsrc_backup_node) io_rsrc_node_destroy(ctx->rsrc_backup_node); flush_delayed_work(&ctx->rsrc_put_work); + flush_delayed_work(&ctx->fallback_work); WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist)); -- cgit v1.2.3 From 5a158c6b0d033893cc80c28b182e1207253768a5 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:48 +0100 Subject: io_uring: reshuffle io_submit_state bits struct io_submit_state's ->free_list and ->link are hotter and smaller than ->plug, place them first. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6ad3c15849f50b27ad012c042c73e6e069d22df7.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 90bef813556e..04d6e35ea0df 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -308,19 +308,15 @@ struct io_submit_link { }; struct io_submit_state { - struct blk_plug plug; + /* inline/task_work completion list, under ->uring_lock */ + struct io_wq_work_node free_list; + /* batch completion logic */ + struct io_wq_work_list compl_reqs; struct io_submit_link link; bool plug_started; bool need_plug; - - /* - * Batch completion logic - */ - struct io_wq_work_list compl_reqs; - - /* inline/task_work completion list, under ->uring_lock */ - struct io_wq_work_node free_list; + struct blk_plug plug; }; struct io_ring_ctx { -- cgit v1.2.3 From 4a04d1d14831d31f2cd0e31eb1568cc9c1be0095 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:49 +0100 Subject: io_uring: optimise out req->opcode reloading Looking at the assembly, the compiler decided to reload req->opcode in io_op_defs[opcode].needs_file instead of one it had in a register, so store it in a temp variable so it can be optimised out. Also move the personality block later, it's better for spilling/etc. as it only depends on @sqe, which we're keeping anyway. By the way, zero req->opcode if it over IORING_OP_LAST, not a problem, at the moment but is safer. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6ba869f5f8b7b0f991c87fdf089f0abf87cbe06b.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 04d6e35ea0df..7918a320104d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6975,9 +6975,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, { unsigned int sqe_flags; int personality; + u8 opcode; /* req is partially pre-initialised, see io_preinit_req() */ - req->opcode = READ_ONCE(sqe->opcode); + req->opcode = opcode = READ_ONCE(sqe->opcode); /* same numerical values with corresponding REQ_F_*, safe to copy */ req->flags = sqe_flags = READ_ONCE(sqe->flags); req->user_data = READ_ONCE(sqe->user_data); @@ -6985,14 +6986,16 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->fixed_rsrc_refs = NULL; req->task = current; - if (unlikely(req->opcode >= IORING_OP_LAST)) + if (unlikely(opcode >= IORING_OP_LAST)) { + req->opcode = 0; return -EINVAL; + } if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { /* enforce forwards compatibility on users */ if (sqe_flags & ~SQE_VALID_FLAGS) return -EINVAL; if ((sqe_flags & IOSQE_BUFFER_SELECT) && - !io_op_defs[req->opcode].buffer_select) + !io_op_defs[opcode].buffer_select) return -EOPNOTSUPP; if (sqe_flags & IOSQE_IO_DRAIN) io_init_req_drain(req); @@ -7011,23 +7014,14 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, } } - personality = READ_ONCE(sqe->personality); - if (personality) { - req->creds = xa_load(&ctx->personalities, personality); - if (!req->creds) - return -EINVAL; - get_cred(req->creds); - req->flags |= REQ_F_CREDS; - } - - if (io_op_defs[req->opcode].needs_file) { + if (io_op_defs[opcode].needs_file) { struct io_submit_state *state = &ctx->submit_state; /* * Plug now if we have more than 2 IO left after this, and the * target is potentially a read/write to block based storage. */ - if (state->need_plug && io_op_defs[req->opcode].plug) { + if (state->need_plug && io_op_defs[opcode].plug) { state->plug_started = true; state->need_plug = false; blk_start_plug(&state->plug); @@ -7039,6 +7033,15 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, return -EBADF; } + personality = READ_ONCE(sqe->personality); + if (personality) { + req->creds = xa_load(&ctx->personalities, personality); + if (!req->creds) + return -EINVAL; + get_cred(req->creds); + req->flags |= REQ_F_CREDS; + } + return io_req_prep(req, sqe); } -- cgit v1.2.3 From 0cd3e3ddb4f60062c401929b2005eb7ff6399a4d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:50 +0100 Subject: io_uring: remove extra io_ring_exit_work wake up task_work_add() takes care of waking up the thread, remove useless wake_up_process(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/de9a71ee255112dcaed3b5d426be24934e74722c.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 7918a320104d..383b8f61359d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -9367,7 +9367,6 @@ static __cold void io_ring_exit_work(struct work_struct *work) ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); if (WARN_ON_ONCE(ret)) continue; - wake_up_process(node->task); mutex_unlock(&ctx->uring_lock); wait_for_completion(&exit.completion); -- cgit v1.2.3 From def77acf4396a850544b41623d7720ea3a2674a9 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 12 Oct 2021 15:02:14 +0100 Subject: io_uring: fix io_free_batch_list races [ 158.514382] WARNING: CPU: 5 PID: 15251 at fs/io_uring.c:1141 io_free_batch_list+0x269/0x360 [ 158.514426] RIP: 0010:io_free_batch_list+0x269/0x360 [ 158.514437] Call Trace: [ 158.514440] __io_submit_flush_completions+0xde/0x180 [ 158.514444] tctx_task_work+0x14a/0x220 [ 158.514447] task_work_run+0x64/0xa0 [ 158.514448] __do_sys_io_uring_enter+0x7c/0x970 [ 158.514450] __x64_sys_io_uring_enter+0x22/0x30 [ 158.514451] do_syscall_64+0x43/0x90 [ 158.514453] entry_SYSCALL_64_after_hwframe+0x44/0xae We should not touch request internals including req->comp_list.next after putting our ref if it's not final, e.g. we can start freeing requests from the free cache. Fixed: 62ca9cb93e7f8 ("io_uring: optimise io_free_batch_list()") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b1f4df38fbb8f111f52911a02fd418d0283a4e6f.1634047298.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 383b8f61359d..141bfe720ce2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2266,9 +2266,10 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); - if (!req_ref_put_and_test(req)) { + if (unlikely(req->flags & REQ_F_REFCOUNT)) { node = req->comp_list.next; - continue; + if (!req_ref_put_and_test(req)) + continue; } io_queue_next(req); -- cgit v1.2.3 From a46be971edb69fe4b1dcc4359c3ddf9127629dab Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 9 Oct 2021 23:14:40 +0100 Subject: io_uring: optimise io_req_set_rsrc_node() io_req_set_rsrc_node() reloads loads req->ctx, however it's already in registers in all use cases, so better to pass it as a parameter. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/67a25557b8a51e90bfd578447a6f1671911b05ae.1633817310.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 141bfe720ce2..6486ebf31866 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1174,10 +1174,9 @@ static inline void io_req_set_refcount(struct io_kiocb *req) __io_req_set_refcount(req, 1); } -static inline void io_req_set_rsrc_node(struct io_kiocb *req) +static inline void io_req_set_rsrc_node(struct io_kiocb *req, + struct io_ring_ctx *ctx) { - struct io_ring_ctx *ctx = req->ctx; - if (!req->fixed_rsrc_refs) { req->fixed_rsrc_refs = &ctx->rsrc_node->refs; percpu_ref_get(req->fixed_rsrc_refs); @@ -2828,7 +2827,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (req->opcode == IORING_OP_READ_FIXED || req->opcode == IORING_OP_WRITE_FIXED) { req->imu = NULL; - io_req_set_rsrc_node(req); + io_req_set_rsrc_node(req, ctx); } req->rw.addr = READ_ONCE(sqe->addr); @@ -6757,7 +6756,7 @@ static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx, file_ptr &= ~FFS_MASK; /* mask in overlapping REQ_F and FFS bits */ req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); - io_req_set_rsrc_node(req); + io_req_set_rsrc_node(req, ctx); return file; } -- cgit v1.2.3 From ab409402478462b5da007bfc46d165587c3adfc3 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 9 Oct 2021 23:14:41 +0100 Subject: io_uring: optimise rsrc referencing Apparently, percpu_ref_put/get() are expensive enough if done per request, get them in a batch and cache on the submission side to avoid getting it over and over again. Also, if we're completing under uring_lock, return refs back into the cache instead of perfcpu_ref_put(). Pretty similar to how we do tctx->cached_refs accounting, but fall back to normal putting when we already changed a rsrc node by the time of free. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b40d8c5bc77d3c9550df8a319117a374ac85f8f4.1633817310.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 6486ebf31866..6859dae218f9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -360,6 +360,7 @@ struct io_ring_ctx { * uring_lock, and updated through io_uring_register(2) */ struct io_rsrc_node *rsrc_node; + int rsrc_cached_refs; struct io_file_table file_table; unsigned nr_user_files; unsigned nr_user_bufs; @@ -1174,12 +1175,52 @@ static inline void io_req_set_refcount(struct io_kiocb *req) __io_req_set_refcount(req, 1); } +#define IO_RSRC_REF_BATCH 100 + +static inline void io_req_put_rsrc_locked(struct io_kiocb *req, + struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + struct percpu_ref *ref = req->fixed_rsrc_refs; + + if (ref) { + if (ref == &ctx->rsrc_node->refs) + ctx->rsrc_cached_refs++; + else + percpu_ref_put(ref); + } +} + +static inline void io_req_put_rsrc(struct io_kiocb *req, struct io_ring_ctx *ctx) +{ + if (req->fixed_rsrc_refs) + percpu_ref_put(req->fixed_rsrc_refs); +} + +static __cold void io_rsrc_refs_drop(struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + if (ctx->rsrc_cached_refs) { + percpu_ref_put_many(&ctx->rsrc_node->refs, ctx->rsrc_cached_refs); + ctx->rsrc_cached_refs = 0; + } +} + +static void io_rsrc_refs_refill(struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH; + percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH); +} + static inline void io_req_set_rsrc_node(struct io_kiocb *req, struct io_ring_ctx *ctx) { if (!req->fixed_rsrc_refs) { req->fixed_rsrc_refs = &ctx->rsrc_node->refs; - percpu_ref_get(req->fixed_rsrc_refs); + ctx->rsrc_cached_refs--; + if (unlikely(ctx->rsrc_cached_refs < 0)) + io_rsrc_refs_refill(ctx); } } @@ -1800,6 +1841,7 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res, req->link = NULL; } } + io_req_put_rsrc(req, ctx); io_dismantle_req(req); io_put_task(req->task, 1); wq_list_add_head(&req->comp_list, &ctx->locked_free_list); @@ -1956,14 +1998,13 @@ static inline void io_dismantle_req(struct io_kiocb *req) io_clean_op(req); if (!(flags & REQ_F_FIXED_FILE)) io_put_file(req->file); - if (req->fixed_rsrc_refs) - percpu_ref_put(req->fixed_rsrc_refs); } static __cold void __io_free_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; + io_req_put_rsrc(req, ctx); io_dismantle_req(req); io_put_task(req->task, 1); @@ -2271,6 +2312,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, continue; } + io_req_put_rsrc_locked(req, ctx); io_queue_next(req); io_dismantle_req(req); @@ -7630,10 +7672,13 @@ static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) static void io_rsrc_node_switch(struct io_ring_ctx *ctx, struct io_rsrc_data *data_to_kill) + __must_hold(&ctx->uring_lock) { WARN_ON_ONCE(!ctx->rsrc_backup_node); WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node); + io_rsrc_refs_drop(ctx); + if (data_to_kill) { struct io_rsrc_node *rsrc_node = ctx->rsrc_node; @@ -9187,6 +9232,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) ctx->mm_account = NULL; } + io_rsrc_refs_drop(ctx); /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ io_wait_rsrc_data(ctx->buf_data); io_wait_rsrc_data(ctx->file_data); -- cgit v1.2.3 From 04f34081c5de35749274cf3bea8d8eb1c79b6ad1 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:12 +0100 Subject: io_uring: consistent typing for issue_flags Some of the functions keep issue_flags as int, change those to unsigned. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/04ad43797783bc9cc7567f287ab545518f8e8cf2.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 6859dae218f9..094cd5f49214 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3786,7 +3786,7 @@ static int io_mkdirat_prep(struct io_kiocb *req, return 0; } -static int io_mkdirat(struct io_kiocb *req, int issue_flags) +static int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) { struct io_mkdir *mkd = &req->mkdir; int ret; @@ -3835,7 +3835,7 @@ static int io_symlinkat_prep(struct io_kiocb *req, return 0; } -static int io_symlinkat(struct io_kiocb *req, int issue_flags) +static int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags) { struct io_symlink *sl = &req->symlink; int ret; @@ -3885,7 +3885,7 @@ static int io_linkat_prep(struct io_kiocb *req, return 0; } -static int io_linkat(struct io_kiocb *req, int issue_flags) +static int io_linkat(struct io_kiocb *req, unsigned int issue_flags) { struct io_hardlink *lnk = &req->hardlink; int ret; -- cgit v1.2.3 From f80a50a632d6f5c64729563892b0deb95d563f6d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:13 +0100 Subject: io_uring: prioritise read success path over fails Rearrange io_read return handling so first we expect it completing successfully and only then checking for errors, which is a colder path. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/c91c7c2da11815ec8b04b5d872f60dc4cde662c5.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 094cd5f49214..86ef803db27a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3487,7 +3487,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) ret = 0; } else if (ret == -EIOCBQUEUED) { goto out_free; - } else if (ret <= 0 || ret == req->result || !force_nonblock || + } else if (ret == req->result || ret <= 0 || !force_nonblock || (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { /* read all, failed, already did sync or don't want to retry */ goto done; -- cgit v1.2.3 From 258f3a7f84d11ce1d6cb1f60fe524bb37f7c1019 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:14 +0100 Subject: io_uring: optimise rw comletion handlers Don't override req->result in io_complete_rw_iopoll() when it's already of the same value, we have an if just above it, so move the assignment there. Also, add one simle unlikely() in __io_complete_rw_common(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8dfeb4f84026a20172bcf82c05010abe955874ae.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 86ef803db27a..18a91d268fb6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2640,7 +2640,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) { if (req->rw.kiocb.ki_flags & IOCB_WRITE) kiocb_end_write(req); - if (res != req->result) { + if (unlikely(res != req->result)) { if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) { req->flags |= REQ_F_REISSUE; @@ -2695,9 +2695,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) req->flags |= REQ_F_REISSUE; return; } + req->result = res; } - req->result = res; /* order with io_iopoll_complete() checking ->iopoll_completed */ smp_store_release(&req->iopoll_completed, 1); } -- cgit v1.2.3 From 538941e2681c76ba6d6a7dd28ce691a9e02a9bdf Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:15 +0100 Subject: io_uring: encapsulate rw state Add a new struct io_rw_state storing all iov related bits: fast iov, iterator and iterator state. Not much changes here, simply convert struct io_async_rw to use it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/e8245ffcb568b228a009ec1eb79c993c813679f1.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 18a91d268fb6..47365610de5d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -694,11 +694,15 @@ struct io_async_msghdr { struct sockaddr_storage addr; }; -struct io_async_rw { +struct io_rw_state { struct iovec fast_iov[UIO_FASTIOV]; - const struct iovec *free_iovec; struct iov_iter iter; struct iov_iter_state iter_state; +}; + +struct io_async_rw { + struct io_rw_state s; + const struct iovec *free_iovec; size_t bytes_done; struct wait_page_queue wpq; }; @@ -2596,7 +2600,7 @@ static bool io_resubmit_prep(struct io_kiocb *req) if (!req_has_async_data(req)) return !io_req_prep_async(req); - iov_iter_restore(&rw->iter, &rw->iter_state); + iov_iter_restore(&rw->s.iter, &rw->s.iter_state); return true; } @@ -3259,7 +3263,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, { struct io_async_rw *rw = req->async_data; - memcpy(&rw->iter, iter, sizeof(*iter)); + memcpy(&rw->s.iter, iter, sizeof(*iter)); rw->free_iovec = iovec; rw->bytes_done = 0; /* can only be fixed buffers, no need to do anything */ @@ -3268,13 +3272,13 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, if (!iovec) { unsigned iov_off = 0; - rw->iter.iov = rw->fast_iov; + rw->s.iter.iov = rw->s.fast_iov; if (iter->iov != fast_iov) { iov_off = iter->iov - fast_iov; - rw->iter.iov += iov_off; + rw->s.iter.iov += iov_off; } - if (rw->fast_iov != fast_iov) - memcpy(rw->fast_iov + iov_off, fast_iov + iov_off, + if (rw->s.fast_iov != fast_iov) + memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off, sizeof(struct iovec) * iter->nr_segs); } else { req->flags |= REQ_F_NEED_CLEANUP; @@ -3309,7 +3313,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, io_req_map_rw(req, iovec, fast_iov, iter); iorw = req->async_data; /* we've copied and mapped the iter, ensure state is saved */ - iov_iter_save_state(&iorw->iter, &iorw->iter_state); + iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); } return 0; } @@ -3317,10 +3321,10 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, static inline int io_rw_prep_async(struct io_kiocb *req, int rw) { struct io_async_rw *iorw = req->async_data; - struct iovec *iov = iorw->fast_iov; + struct iovec *iov = iorw->s.fast_iov; int ret; - ret = io_import_iovec(rw, req, &iov, &iorw->iter, false); + ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, false); if (unlikely(ret < 0)) return ret; @@ -3328,7 +3332,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) iorw->free_iovec = iov; if (iov) req->flags |= REQ_F_NEED_CLEANUP; - iov_iter_save_state(&iorw->iter, &iorw->iter_state); + iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); return 0; } @@ -3438,8 +3442,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) if (req_has_async_data(req)) { rw = req->async_data; - iter = &rw->iter; - state = &rw->iter_state; + iter = &rw->s.iter; + state = &rw->s.iter_state; /* * We come here from an earlier attempt, restore our state to * match in case it doesn't. It's cheap enough that we don't @@ -3510,9 +3514,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) * Now use our persistent iterator and state, if we aren't already. * We've restored and mapped the iter to match. */ - if (iter != &rw->iter) { - iter = &rw->iter; - state = &rw->iter_state; + if (iter != &rw->s.iter) { + iter = &rw->s.iter; + state = &rw->s.iter_state; } do { @@ -3574,8 +3578,8 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) if (req_has_async_data(req)) { rw = req->async_data; - iter = &rw->iter; - state = &rw->iter_state; + iter = &rw->s.iter; + state = &rw->s.iter_state; iov_iter_restore(iter, state); iovec = NULL; } else { -- cgit v1.2.3 From c88598a92a587109d915b4d97831bcea774c8b6f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:16 +0100 Subject: io_uring: optimise read/write iov state storing Currently io_read() and io_write() keep separate pointers to an iter and to struct iov_iter_state, which is not great for register spilling and requires more on-stack copies. They are both either on-stack or in req->async_data at the same time, so use struct io_rw_state and keep a pointer only to it, so having all the state with just one pointer. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5c5e7ffd7dc25fc35075c70411ba99df72f237fa.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 79 ++++++++++++++++++++++++++++------------------------------- 1 file changed, 37 insertions(+), 42 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 47365610de5d..bb5cd8715f61 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -695,9 +695,9 @@ struct io_async_msghdr { }; struct io_rw_state { - struct iovec fast_iov[UIO_FASTIOV]; struct iov_iter iter; struct iov_iter_state iter_state; + struct iovec fast_iov[UIO_FASTIOV]; }; struct io_async_rw { @@ -3297,8 +3297,7 @@ static inline bool io_alloc_async_data(struct io_kiocb *req) } static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, - const struct iovec *fast_iov, - struct iov_iter *iter, bool force) + struct io_rw_state *s, bool force) { if (!force && !io_op_defs[req->opcode].needs_async_setup) return 0; @@ -3310,7 +3309,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, return -ENOMEM; } - io_req_map_rw(req, iovec, fast_iov, iter); + io_req_map_rw(req, iovec, s->fast_iov, &s->iter); iorw = req->async_data; /* we've copied and mapped the iter, ensure state is saved */ iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); @@ -3432,33 +3431,33 @@ static bool need_read_all(struct io_kiocb *req) static int io_read(struct io_kiocb *req, unsigned int issue_flags) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; + struct io_rw_state __s, *s; + struct iovec *iovec; struct kiocb *kiocb = &req->rw.kiocb; - struct iov_iter __iter, *iter = &__iter; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - struct iov_iter_state __state, *state; struct io_async_rw *rw; ssize_t ret, ret2; if (req_has_async_data(req)) { rw = req->async_data; - iter = &rw->s.iter; - state = &rw->s.iter_state; + s = &rw->s; /* * We come here from an earlier attempt, restore our state to * match in case it doesn't. It's cheap enough that we don't * need to make this conditional. */ - iov_iter_restore(iter, state); + iov_iter_restore(&s->iter, &s->iter_state); iovec = NULL; } else { - ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); + s = &__s; + iovec = s->fast_iov; + ret = io_import_iovec(READ, req, &iovec, &s->iter, !force_nonblock); if (ret < 0) return ret; - state = &__state; - iov_iter_save_state(iter, state); + + iov_iter_save_state(&s->iter, &s->iter_state); } - req->result = iov_iter_count(iter); + req->result = iov_iter_count(&s->iter); /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3468,7 +3467,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) /* If the file doesn't support async, just async punt */ if (force_nonblock && !io_file_supports_nowait(req, READ)) { - ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true); + ret = io_setup_async_rw(req, iovec, s, true); return ret ?: -EAGAIN; } @@ -3478,7 +3477,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) return ret; } - ret = io_iter_do_read(req, iter); + ret = io_iter_do_read(req, &s->iter); if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { req->flags &= ~REQ_F_REISSUE; @@ -3502,22 +3501,19 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) * untouched in case of error. Restore it and we'll advance it * manually if we need to. */ - iov_iter_restore(iter, state); + iov_iter_restore(&s->iter, &s->iter_state); - ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); + ret2 = io_setup_async_rw(req, iovec, s, true); if (ret2) return ret2; iovec = NULL; rw = req->async_data; + s = &rw->s; /* * Now use our persistent iterator and state, if we aren't already. * We've restored and mapped the iter to match. */ - if (iter != &rw->s.iter) { - iter = &rw->s.iter; - state = &rw->s.iter_state; - } do { /* @@ -3525,11 +3521,11 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) * above or inside this loop. Advance the iter by the bytes * that were consumed. */ - iov_iter_advance(iter, ret); - if (!iov_iter_count(iter)) + iov_iter_advance(&s->iter, ret); + if (!iov_iter_count(&s->iter)) break; rw->bytes_done += ret; - iov_iter_save_state(iter, state); + iov_iter_save_state(&s->iter, &s->iter_state); /* if we can retry, do so with the callbacks armed */ if (!io_rw_should_retry(req)) { @@ -3543,12 +3539,12 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) * desired page gets unlocked. We can also get a partial read * here, and if we do, then just retry at the new offset. */ - ret = io_iter_do_read(req, iter); + ret = io_iter_do_read(req, &s->iter); if (ret == -EIOCBQUEUED) return 0; /* we got some bytes, but not all. retry. */ kiocb->ki_flags &= ~IOCB_WAITQ; - iov_iter_restore(iter, state); + iov_iter_restore(&s->iter, &s->iter_state); } while (ret > 0); done: kiocb_done(kiocb, ret, issue_flags); @@ -3568,28 +3564,27 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_write(struct io_kiocb *req, unsigned int issue_flags) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; + struct io_rw_state __s, *s; + struct io_async_rw *rw; + struct iovec *iovec; struct kiocb *kiocb = &req->rw.kiocb; - struct iov_iter __iter, *iter = &__iter; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - struct iov_iter_state __state, *state; - struct io_async_rw *rw; ssize_t ret, ret2; if (req_has_async_data(req)) { rw = req->async_data; - iter = &rw->s.iter; - state = &rw->s.iter_state; - iov_iter_restore(iter, state); + s = &rw->s; + iov_iter_restore(&s->iter, &s->iter_state); iovec = NULL; } else { - ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); + s = &__s; + iovec = s->fast_iov; + ret = io_import_iovec(WRITE, req, &iovec, &s->iter, !force_nonblock); if (ret < 0) return ret; - state = &__state; - iov_iter_save_state(iter, state); + iov_iter_save_state(&s->iter, &s->iter_state); } - req->result = iov_iter_count(iter); + req->result = iov_iter_count(&s->iter); /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3625,9 +3620,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) kiocb->ki_flags |= IOCB_WRITE; if (req->file->f_op->write_iter) - ret2 = call_write_iter(req->file, kiocb, iter); + ret2 = call_write_iter(req->file, kiocb, &s->iter); else if (req->file->f_op->write) - ret2 = loop_rw_iter(WRITE, req, iter); + ret2 = loop_rw_iter(WRITE, req, &s->iter); else ret2 = -EINVAL; @@ -3653,8 +3648,8 @@ done: kiocb_done(kiocb, ret2, issue_flags); } else { copy_iov: - iov_iter_restore(iter, state); - ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); + iov_iter_restore(&s->iter, &s->iter_state); + ret = io_setup_async_rw(req, iovec, s, false); return ret ?: -EAGAIN; } out_free: -- cgit v1.2.3 From 51aac424aef980a6238d4907a5bb5a411fa926eb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:17 +0100 Subject: io_uring: optimise io_import_iovec nonblock passing First, change IO_URING_F_NONBLOCK to take sign bit of the int, so checking for it can be turned into test + sign-based-jump, makes the binary smaller and may be faster. Then, instead of passing need_lock boolean into io_import_iovec() just give it issue_flags, which is already stored somewhere. Saves some space on stack, a couple of test + cmov operations and other conversions. note: we still leave force_nonblock = issue_flags & IO_URING_F_NONBLOCK variable, but it's optimised out by the compiler into testing issue_flags directly. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ee96547e692f6c975c229cd82fc721679571a734.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 47 +++++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 22 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index bb5cd8715f61..0a0f84b28eba 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -198,8 +198,9 @@ struct io_rings { }; enum io_uring_cmd_flags { - IO_URING_F_NONBLOCK = 1, - IO_URING_F_COMPLETE_DEFER = 2, + IO_URING_F_COMPLETE_DEFER = 1, + /* int's last bit, sign checks are usually faster than a bit test */ + IO_URING_F_NONBLOCK = INT_MIN, }; struct io_mapped_ubuf { @@ -3037,10 +3038,11 @@ static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock) } static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, - int bgid, bool needs_lock) + int bgid, unsigned int issue_flags) { struct io_buffer *kbuf = req->kbuf; struct io_buffer *head; + bool needs_lock = !(issue_flags & IO_URING_F_NONBLOCK); if (req->flags & REQ_F_BUFFER_SELECTED) return kbuf; @@ -3072,13 +3074,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, } static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, - bool needs_lock) + unsigned int issue_flags) { struct io_buffer *kbuf; u16 bgid; bgid = req->buf_index; - kbuf = io_buffer_select(req, len, bgid, needs_lock); + kbuf = io_buffer_select(req, len, bgid, issue_flags); if (IS_ERR(kbuf)) return kbuf; return u64_to_user_ptr(kbuf->addr); @@ -3086,7 +3088,7 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, #ifdef CONFIG_COMPAT static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, - bool needs_lock) + unsigned int issue_flags) { struct compat_iovec __user *uiov; compat_ssize_t clen; @@ -3102,7 +3104,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, return -EINVAL; len = clen; - buf = io_rw_buffer_select(req, &len, needs_lock); + buf = io_rw_buffer_select(req, &len, issue_flags); if (IS_ERR(buf)) return PTR_ERR(buf); iov[0].iov_base = buf; @@ -3112,7 +3114,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, #endif static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, - bool needs_lock) + unsigned int issue_flags) { struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr); void __user *buf; @@ -3124,7 +3126,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, len = iov[0].iov_len; if (len < 0) return -EINVAL; - buf = io_rw_buffer_select(req, &len, needs_lock); + buf = io_rw_buffer_select(req, &len, issue_flags); if (IS_ERR(buf)) return PTR_ERR(buf); iov[0].iov_base = buf; @@ -3133,7 +3135,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, } static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, - bool needs_lock) + unsigned int issue_flags) { if (req->flags & REQ_F_BUFFER_SELECTED) { struct io_buffer *kbuf = req->kbuf; @@ -3147,14 +3149,14 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, #ifdef CONFIG_COMPAT if (req->ctx->compat) - return io_compat_import(req, iov, needs_lock); + return io_compat_import(req, iov, issue_flags); #endif - return __io_iov_buffer_select(req, iov, needs_lock); + return __io_iov_buffer_select(req, iov, issue_flags); } static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, - struct iov_iter *iter, bool needs_lock) + struct iov_iter *iter, unsigned int issue_flags) { void __user *buf = u64_to_user_ptr(req->rw.addr); size_t sqe_len = req->rw.len; @@ -3172,7 +3174,7 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (req->flags & REQ_F_BUFFER_SELECT) { - buf = io_rw_buffer_select(req, &sqe_len, needs_lock); + buf = io_rw_buffer_select(req, &sqe_len, issue_flags); if (IS_ERR(buf)) return PTR_ERR(buf); req->rw.len = sqe_len; @@ -3184,7 +3186,7 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, } if (req->flags & REQ_F_BUFFER_SELECT) { - ret = io_iov_buffer_select(req, *iovec, needs_lock); + ret = io_iov_buffer_select(req, *iovec, issue_flags); if (!ret) iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len); *iovec = NULL; @@ -3323,7 +3325,8 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) struct iovec *iov = iorw->s.fast_iov; int ret; - ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, false); + /* submission path, ->uring_lock should already be taken */ + ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, IO_URING_F_NONBLOCK); if (unlikely(ret < 0)) return ret; @@ -3451,7 +3454,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) } else { s = &__s; iovec = s->fast_iov; - ret = io_import_iovec(READ, req, &iovec, &s->iter, !force_nonblock); + ret = io_import_iovec(READ, req, &iovec, &s->iter, issue_flags); if (ret < 0) return ret; @@ -3579,7 +3582,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) } else { s = &__s; iovec = s->fast_iov; - ret = io_import_iovec(WRITE, req, &iovec, &s->iter, !force_nonblock); + ret = io_import_iovec(WRITE, req, &iovec, &s->iter, issue_flags); if (ret < 0) return ret; iov_iter_save_state(&s->iter, &s->iter_state); @@ -4902,11 +4905,11 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req, } static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, - bool needs_lock) + unsigned int issue_flags) { struct io_sr_msg *sr = &req->sr_msg; - return io_buffer_select(req, &sr->len, sr->bgid, needs_lock); + return io_buffer_select(req, &sr->len, sr->bgid, issue_flags); } static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) @@ -4969,7 +4972,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) } if (req->flags & REQ_F_BUFFER_SELECT) { - kbuf = io_recv_buffer_select(req, !force_nonblock); + kbuf = io_recv_buffer_select(req, issue_flags); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr); @@ -5021,7 +5024,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) return -ENOTSOCK; if (req->flags & REQ_F_BUFFER_SELECT) { - kbuf = io_recv_buffer_select(req, !force_nonblock); + kbuf = io_recv_buffer_select(req, issue_flags); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); buf = u64_to_user_ptr(kbuf->addr); -- cgit v1.2.3 From 5e49c973fc39ffce287ab0763cb6f1e607962689 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:18 +0100 Subject: io_uring: clean up io_import_iovec Make io_import_iovec taking struct io_rw_state instead of an iter pointer. First it takes care of initialising iovec pointer, which can be forgotten. Even more, we can not init it if not needed, e.g. in case of IORING_OP_READ_FIXED or IORING_OP_READ. Also hide saving iter_state inside of it by splitting out an inline function of it to avoid extra ifs. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b1bbc213a95e5272d4da5867bb977d9acb6f2109.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 0a0f84b28eba..cf208d713f49 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3155,9 +3155,10 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, return __io_iov_buffer_select(req, iov, issue_flags); } -static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, - struct iov_iter *iter, unsigned int issue_flags) +static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, + struct io_rw_state *s, unsigned int issue_flags) { + struct iov_iter *iter = &s->iter; void __user *buf = u64_to_user_ptr(req->rw.addr); size_t sqe_len = req->rw.len; u8 opcode = req->opcode; @@ -3180,11 +3181,13 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, req->rw.len = sqe_len; } - ret = import_single_range(rw, buf, sqe_len, *iovec, iter); + ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter); *iovec = NULL; return ret; } + *iovec = s->fast_iov; + if (req->flags & REQ_F_BUFFER_SELECT) { ret = io_iov_buffer_select(req, *iovec, issue_flags); if (!ret) @@ -3197,6 +3200,19 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, req->ctx->compat); } +static inline int io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct io_rw_state *s, + unsigned int issue_flags) +{ + int ret; + + ret = __io_import_iovec(rw, req, iovec, s, issue_flags); + if (unlikely(ret < 0)) + return ret; + iov_iter_save_state(&s->iter, &s->iter_state); + return ret; +} + static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) { return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; @@ -3322,11 +3338,11 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, static inline int io_rw_prep_async(struct io_kiocb *req, int rw) { struct io_async_rw *iorw = req->async_data; - struct iovec *iov = iorw->s.fast_iov; + struct iovec *iov; int ret; /* submission path, ->uring_lock should already be taken */ - ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, IO_URING_F_NONBLOCK); + ret = io_import_iovec(rw, req, &iov, &iorw->s, IO_URING_F_NONBLOCK); if (unlikely(ret < 0)) return ret; @@ -3334,7 +3350,6 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) iorw->free_iovec = iov; if (iov) req->flags |= REQ_F_NEED_CLEANUP; - iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); return 0; } @@ -3453,12 +3468,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) iovec = NULL; } else { s = &__s; - iovec = s->fast_iov; - ret = io_import_iovec(READ, req, &iovec, &s->iter, issue_flags); - if (ret < 0) + ret = io_import_iovec(READ, req, &iovec, s, issue_flags); + if (unlikely(ret < 0)) return ret; - - iov_iter_save_state(&s->iter, &s->iter_state); } req->result = iov_iter_count(&s->iter); @@ -3581,11 +3593,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) iovec = NULL; } else { s = &__s; - iovec = s->fast_iov; - ret = io_import_iovec(WRITE, req, &iovec, &s->iter, issue_flags); - if (ret < 0) + ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); + if (unlikely(ret < 0)) return ret; - iov_iter_save_state(&s->iter, &s->iter_state); } req->result = iov_iter_count(&s->iter); -- cgit v1.2.3 From 607b6fb8017a684f3f1567ccf776bdc0fe6d120e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:19 +0100 Subject: io_uring: rearrange io_read()/write() Combine force_nonblock branches (which is already optimised by compiler), flip branches so the most hot/common path is the first, e.g. as with non on-stack iov setup, and add extra likely/unlikely attributions for errror paths. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/2c2536c5896d70994de76e387ea09a0402173a3f.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 75 +++++++++++++++++++++++++++++------------------------------ 1 file changed, 37 insertions(+), 38 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index cf208d713f49..a2f989e84583 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3433,7 +3433,7 @@ static bool io_rw_should_retry(struct io_kiocb *req) static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) { - if (req->file->f_op->read_iter) + if (likely(req->file->f_op->read_iter)) return call_read_iter(req->file, &req->rw.kiocb, iter); else if (req->file->f_op->read) return loop_rw_iter(READ, req, iter); @@ -3449,14 +3449,18 @@ static bool need_read_all(struct io_kiocb *req) static int io_read(struct io_kiocb *req, unsigned int issue_flags) { - struct io_rw_state __s, *s; + struct io_rw_state __s, *s = &__s; struct iovec *iovec; struct kiocb *kiocb = &req->rw.kiocb; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; struct io_async_rw *rw; ssize_t ret, ret2; - if (req_has_async_data(req)) { + if (!req_has_async_data(req)) { + ret = io_import_iovec(READ, req, &iovec, s, issue_flags); + if (unlikely(ret < 0)) + return ret; + } else { rw = req->async_data; s = &rw->s; /* @@ -3466,24 +3470,19 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) */ iov_iter_restore(&s->iter, &s->iter_state); iovec = NULL; - } else { - s = &__s; - ret = io_import_iovec(READ, req, &iovec, s, issue_flags); - if (unlikely(ret < 0)) - return ret; } req->result = iov_iter_count(&s->iter); - /* Ensure we clear previously set non-block flag */ - if (!force_nonblock) - kiocb->ki_flags &= ~IOCB_NOWAIT; - else + if (force_nonblock) { + /* If the file doesn't support async, just async punt */ + if (unlikely(!io_file_supports_nowait(req, READ))) { + ret = io_setup_async_rw(req, iovec, s, true); + return ret ?: -EAGAIN; + } kiocb->ki_flags |= IOCB_NOWAIT; - - /* If the file doesn't support async, just async punt */ - if (force_nonblock && !io_file_supports_nowait(req, READ)) { - ret = io_setup_async_rw(req, iovec, s, true); - return ret ?: -EAGAIN; + } else { + /* Ensure we clear previously set non-block flag */ + kiocb->ki_flags &= ~IOCB_NOWAIT; } ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result); @@ -3579,40 +3578,40 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_write(struct io_kiocb *req, unsigned int issue_flags) { - struct io_rw_state __s, *s; - struct io_async_rw *rw; + struct io_rw_state __s, *s = &__s; struct iovec *iovec; struct kiocb *kiocb = &req->rw.kiocb; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; ssize_t ret, ret2; - if (req_has_async_data(req)) { - rw = req->async_data; - s = &rw->s; - iov_iter_restore(&s->iter, &s->iter_state); - iovec = NULL; - } else { - s = &__s; + if (!req_has_async_data(req)) { ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); if (unlikely(ret < 0)) return ret; + } else { + struct io_async_rw *rw = req->async_data; + + s = &rw->s; + iov_iter_restore(&s->iter, &s->iter_state); + iovec = NULL; } req->result = iov_iter_count(&s->iter); - /* Ensure we clear previously set non-block flag */ - if (!force_nonblock) - kiocb->ki_flags &= ~IOCB_NOWAIT; - else - kiocb->ki_flags |= IOCB_NOWAIT; + if (force_nonblock) { + /* If the file doesn't support async, just async punt */ + if (unlikely(!io_file_supports_nowait(req, WRITE))) + goto copy_iov; - /* If the file doesn't support async, just async punt */ - if (force_nonblock && !io_file_supports_nowait(req, WRITE)) - goto copy_iov; + /* file path doesn't support NOWAIT for non-direct_IO */ + if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) && + (req->flags & REQ_F_ISREG)) + goto copy_iov; - /* file path doesn't support NOWAIT for non-direct_IO */ - if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) && - (req->flags & REQ_F_ISREG)) - goto copy_iov; + kiocb->ki_flags |= IOCB_NOWAIT; + } else { + /* Ensure we clear previously set non-block flag */ + kiocb->ki_flags &= ~IOCB_NOWAIT; + } ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result); if (unlikely(ret)) -- cgit v1.2.3 From 9983028e7660a2cc5e58403d8ce29569dbf3162d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:11 +0100 Subject: io_uring: optimise req->ctx reloads Don't load req->ctx in advance, it takes an extra register and the field stays valid even after opcode handlers. It also optimises out req->ctx load in io_iopoll_req_issued() once it's inlined. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/1e45ff671c44be0eb904f2e448a211734893fa0b.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index a2f989e84583..5a4d13ad1277 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6588,7 +6588,6 @@ static void io_clean_op(struct io_kiocb *req) static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) { - struct io_ring_ctx *ctx = req->ctx; const struct cred *creds = NULL; int ret; @@ -6715,7 +6714,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) if (ret) return ret; /* If the op doesn't have a file, we're not polling for it */ - if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) + if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file) io_iopoll_req_issued(req); return 0; -- cgit v1.2.3 From 9882131cd9de525d484de117644e5008c6557ac7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:12 +0100 Subject: io_uring: kill io_wq_current_is_worker() in iopoll Don't decide about locking based on io_wq_current_is_worker(), it's not consistent with all other code and is expensive, use issue_flags. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/7546d5a58efa4360173541c6fe02ee6b8c7b4ea7.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 5a4d13ad1277..348e587524cf 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2713,13 +2713,13 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) * find it from a io_do_iopoll() thread before the issuer is done * accessing the kiocb cookie. */ -static void io_iopoll_req_issued(struct io_kiocb *req) +static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; - const bool in_async = io_wq_current_is_worker(); + const bool need_lock = !(issue_flags & IO_URING_F_NONBLOCK); /* workqueue context doesn't hold uring_lock, grab it now */ - if (unlikely(in_async)) + if (unlikely(need_lock)) mutex_lock(&ctx->uring_lock); /* @@ -2747,7 +2747,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req) else wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); - if (unlikely(in_async)) { + if (unlikely(need_lock)) { /* * If IORING_SETUP_SQPOLL is enabled, sqes are either handle * in sq thread task context or in io worker task context. If @@ -6715,7 +6715,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) return ret; /* If the op doesn't have a file, we're not polling for it */ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file) - io_iopoll_req_issued(req); + io_iopoll_req_issued(req, issue_flags); return 0; } -- cgit v1.2.3 From d1d681b0846af8585904be562610bdfc70bf21aa Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:13 +0100 Subject: io_uring: optimise io_import_iovec fixed path Delay loading req->rw.{addr,len} in io_import_iovec until it's really needed, so removing extra loads for the fixed path, which doesn't use them. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/3cc48dd0c4f1a37c4ce9aab5784281a2d83ad8be.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 348e587524cf..00267e799ac2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3159,9 +3159,9 @@ static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, struct io_rw_state *s, unsigned int issue_flags) { struct iov_iter *iter = &s->iter; - void __user *buf = u64_to_user_ptr(req->rw.addr); - size_t sqe_len = req->rw.len; u8 opcode = req->opcode; + void __user *buf; + size_t sqe_len; ssize_t ret; if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { @@ -3170,9 +3170,12 @@ static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, } /* buffer index only valid with fixed read/write, or buffer select */ - if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) + if (unlikely(req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))) return -EINVAL; + buf = u64_to_user_ptr(req->rw.addr); + sqe_len = req->rw.len; + if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (req->flags & REQ_F_BUFFER_SELECT) { buf = io_rw_buffer_select(req, &sqe_len, issue_flags); -- cgit v1.2.3 From caa8fe6e86fd32e26d05f724e07ec826807c6ad4 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:14 +0100 Subject: io_uring: return iovec from __io_import_iovec We pass iovec** into __io_import_iovec(), which should keep it, initialise and modify accordingly. It's expensive, return it directly from __io_import_iovec encoding errors with ERR_PTR if needed. io_import_iovec keeps the old interface, but it's inline and so everything is optimised nicely. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6230e9769982f03a8f86fa58df24666088c44d3e.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 00267e799ac2..d97545e43a16 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3155,23 +3155,25 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, return __io_iov_buffer_select(req, iov, issue_flags); } -static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, - struct io_rw_state *s, unsigned int issue_flags) +static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req, + struct io_rw_state *s, + unsigned int issue_flags) { struct iov_iter *iter = &s->iter; u8 opcode = req->opcode; + struct iovec *iovec; void __user *buf; size_t sqe_len; ssize_t ret; - if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { - *iovec = NULL; - return io_import_fixed(req, rw, iter); - } + BUILD_BUG_ON(ERR_PTR(0) != NULL); + + if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) + return ERR_PTR(io_import_fixed(req, rw, iter)); /* buffer index only valid with fixed read/write, or buffer select */ if (unlikely(req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))) - return -EINVAL; + return ERR_PTR(-EINVAL); buf = u64_to_user_ptr(req->rw.addr); sqe_len = req->rw.len; @@ -3180,40 +3182,39 @@ static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, if (req->flags & REQ_F_BUFFER_SELECT) { buf = io_rw_buffer_select(req, &sqe_len, issue_flags); if (IS_ERR(buf)) - return PTR_ERR(buf); + return ERR_PTR(PTR_ERR(buf)); req->rw.len = sqe_len; } ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter); - *iovec = NULL; - return ret; + return ERR_PTR(ret); } - *iovec = s->fast_iov; - + iovec = s->fast_iov; if (req->flags & REQ_F_BUFFER_SELECT) { - ret = io_iov_buffer_select(req, *iovec, issue_flags); + ret = io_iov_buffer_select(req, iovec, issue_flags); if (!ret) - iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len); - *iovec = NULL; - return ret; + iov_iter_init(iter, rw, iovec, 1, iovec->iov_len); + return ERR_PTR(ret); } - return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter, + ret = __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, &iovec, iter, req->ctx->compat); + if (unlikely(ret < 0)) + return ERR_PTR(ret); + return iovec; } static inline int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, struct io_rw_state *s, unsigned int issue_flags) { - int ret; + *iovec = __io_import_iovec(rw, req, s, issue_flags); + if (unlikely(IS_ERR(*iovec))) + return PTR_ERR(*iovec); - ret = __io_import_iovec(rw, req, iovec, s, issue_flags); - if (unlikely(ret < 0)) - return ret; iov_iter_save_state(&s->iter, &s->iter_state); - return ret; + return 0; } static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) -- cgit v1.2.3 From 578c0ee234e51d9746e526a26bdb2c1b7ad81dba Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:15 +0100 Subject: io_uring: optimise fixed rw rsrc node setting Move fixed rw io_req_set_rsrc_node() from rw prep into io_import_fixed(), if we're using fixed buffers it will always be called during submission as we save the state in advance, Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/68c06f66d5aa9661f1e4b88d08c52d23528297ec.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index d97545e43a16..014363abc2b2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2871,12 +2871,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, kiocb->ki_complete = io_complete_rw; } - if (req->opcode == IORING_OP_READ_FIXED || - req->opcode == IORING_OP_WRITE_FIXED) { - req->imu = NULL; - io_req_set_rsrc_node(req, ctx); - } - + req->imu = NULL; req->rw.addr = READ_ONCE(sqe->addr); req->rw.len = READ_ONCE(sqe->len); req->buf_index = READ_ONCE(sqe->buf_index); @@ -3005,13 +3000,15 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) { - struct io_ring_ctx *ctx = req->ctx; struct io_mapped_ubuf *imu = req->imu; u16 index, buf_index = req->buf_index; if (likely(!imu)) { + struct io_ring_ctx *ctx = req->ctx; + if (unlikely(buf_index >= ctx->nr_user_bufs)) return -EFAULT; + io_req_set_rsrc_node(req, ctx); index = array_index_nospec(buf_index, ctx->nr_user_bufs); imu = READ_ONCE(ctx->user_bufs[index]); req->imu = imu; -- cgit v1.2.3 From 5cb03d63420bcf533111d30690141a5b28c172fa Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:16 +0100 Subject: io_uring: clean io_prep_rw() We already store req->file in a variable in io_prep_rw(), just use it instead of a couple of left references to kicob->ki_filp. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/2f5889fc7ab670daefd5ccaedd99416d8355f0ad.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 014363abc2b2..d5d06e034959 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2832,8 +2832,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, req->flags |= REQ_F_CUR_POS; kiocb->ki_pos = file->f_pos; } - kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); - kiocb->ki_flags = iocb_flags(kiocb->ki_filp); + kiocb->ki_hint = ki_hint_validate(file_write_hint(file)); + kiocb->ki_flags = iocb_flags(file); ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); if (unlikely(ret)) return ret; @@ -2858,8 +2858,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, kiocb->ki_ioprio = get_current_ioprio(); if (ctx->flags & IORING_SETUP_IOPOLL) { - if (!(kiocb->ki_flags & IOCB_DIRECT) || - !kiocb->ki_filp->f_op->iopoll) + if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) return -EOPNOTSUPP; kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE; -- cgit v1.2.3 From b10841c98c8980ee50cc1e90640b04c935a24285 Mon Sep 17 00:00:00 2001 From: Noah Goldstein Date: Sat, 16 Oct 2021 20:32:29 -0500 Subject: fs/io_uring: Prioritise checking faster conditions first in io_write This commit reorders the conditions in a branch in io_write. The reorder to check 'ret2 == -EAGAIN' first as checking '(req->ctx->flags & IORING_SETUP_IOPOLL)' will likely be more expensive due to 2x memory derefences. Signed-off-by: Noah Goldstein Link: https://lore.kernel.org/r/20211017013229.4124279-1-goldstein.w.n@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index d5d06e034959..11bd0dd1425a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3654,7 +3654,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) goto done; if (!force_nonblock || ret2 != -EAGAIN) { /* IOPOLL retry should happen for io-wq threads */ - if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) + if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) goto copy_iov; done: kiocb_done(kiocb, ret2, issue_flags); -- cgit v1.2.3 From e74ead135bc4459f7d40b1f8edab1333a28b54e8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 Oct 2021 00:07:08 +0100 Subject: io_uring: arm poll for non-nowait files Don't check if we can do nowait before arming apoll, there are several reasons for that. First, we don't care much about files that don't support nowait. Second, it may be useful -- we don't want to be taking away extra workers from io-wq when it can go in some async. Even if it will go through io-wq eventually, it make difference in the numbers of workers actually used. And the last one, it's needed to clean nowait in future commits. [kernel test robot: fix unused-var] Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/9d06f3cb2c8b686d970269a87986f154edb83043.1634425438.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 11bd0dd1425a..8a5b20eafccf 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5588,7 +5588,6 @@ static int io_arm_poll_handler(struct io_kiocb *req) struct async_poll *apoll; struct io_poll_table ipt; __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI; - int rw; if (!req->file || !file_can_poll(req->file)) return IO_APOLL_ABORTED; @@ -5598,7 +5597,6 @@ static int io_arm_poll_handler(struct io_kiocb *req) return IO_APOLL_ABORTED; if (def->pollin) { - rw = READ; mask |= POLLIN | POLLRDNORM; /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ @@ -5606,14 +5604,9 @@ static int io_arm_poll_handler(struct io_kiocb *req) (req->sr_msg.msg_flags & MSG_ERRQUEUE)) mask &= ~POLLIN; } else { - rw = WRITE; mask |= POLLOUT | POLLWRNORM; } - /* if we can't nonblock try, then no point in arming a poll handler */ - if (!io_file_supports_nowait(req, rw)) - return IO_APOLL_ABORTED; - apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); if (unlikely(!apoll)) return IO_APOLL_ABORTED; -- cgit v1.2.3 From 35645ac3c1853fbb54d8acd50fd12184f7905d5f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 Oct 2021 00:07:09 +0100 Subject: io_uring: combine REQ_F_NOWAIT_{READ,WRITE} flags Merge REQ_F_NOWAIT_READ and REQ_F_NOWAIT_WRITE into one flag, i.e. REQ_F_SUPPORT_NOWAIT. First it gets rid of dependence on CONFIG_64BIT but also simplifies the code. One thing to consider is when we don't have ->{read,write}_iter and go through loop_rw_iter(). Just fail it with -EAGAIN if we expect nowait behaviour but not sure whether it supports it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/f832a20e5186c2e79c6519280c238f559a1d2bbc.1634425438.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 61 ++++++++++++++++++++--------------------------------------- 1 file changed, 21 insertions(+), 40 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 8a5b20eafccf..d7bf2002ab0c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -732,8 +732,7 @@ enum { REQ_F_ARM_LTIMEOUT_BIT, REQ_F_ASYNC_DATA_BIT, /* keep async read/write and isreg together and in order */ - REQ_F_NOWAIT_READ_BIT, - REQ_F_NOWAIT_WRITE_BIT, + REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, /* not a real bit, just to check we're not overflowing the space */ @@ -774,10 +773,8 @@ enum { REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), /* caller should reissue async */ REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), - /* supports async reads */ - REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT), - /* supports async writes */ - REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT), + /* supports async reads/writes */ + REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT), /* regular file */ REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), /* has creds assigned */ @@ -1390,18 +1387,13 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq) return false; } -#define FFS_ASYNC_READ 0x1UL -#define FFS_ASYNC_WRITE 0x2UL -#ifdef CONFIG_64BIT -#define FFS_ISREG 0x4UL -#else -#define FFS_ISREG 0x0UL -#endif -#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG) +#define FFS_NOWAIT 0x1UL +#define FFS_ISREG 0x2UL +#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG) static inline bool io_req_ffs_set(struct io_kiocb *req) { - return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); + return req->flags & REQ_F_FIXED_FILE; } static inline void io_req_track_inflight(struct io_kiocb *req) @@ -2772,7 +2764,7 @@ static bool io_bdev_nowait(struct block_device *bdev) * any file. For now, just ensure that anything potentially problematic is done * inline. */ -static bool __io_file_supports_nowait(struct file *file, int rw) +static bool __io_file_supports_nowait(struct file *file) { umode_t mode = file_inode(file)->i_mode; @@ -2795,24 +2787,14 @@ static bool __io_file_supports_nowait(struct file *file, int rw) /* any ->read/write should understand O_NONBLOCK */ if (file->f_flags & O_NONBLOCK) return true; - - if (!(file->f_mode & FMODE_NOWAIT)) - return false; - - if (rw == READ) - return file->f_op->read_iter != NULL; - - return file->f_op->write_iter != NULL; + return file->f_mode & FMODE_NOWAIT; } -static bool io_file_supports_nowait(struct io_kiocb *req, int rw) +static inline bool io_file_supports_nowait(struct io_kiocb *req) { - if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) + if (likely(req->flags & REQ_F_SUPPORT_NOWAIT)) return true; - else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) - return true; - - return __io_file_supports_nowait(req->file, rw); + return __io_file_supports_nowait(req->file); } static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, @@ -2844,7 +2826,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, * reliably. If not, or it IOCB_NOWAIT is set, don't retry. */ if ((kiocb->ki_flags & IOCB_NOWAIT) || - ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) + ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) req->flags |= REQ_F_NOWAIT; ioprio = READ_ONCE(sqe->ioprio); @@ -3235,7 +3217,8 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) */ if (kiocb->ki_flags & IOCB_HIPRI) return -EOPNOTSUPP; - if (kiocb->ki_flags & IOCB_NOWAIT) + if ((kiocb->ki_flags & IOCB_NOWAIT) && + !(kiocb->ki_filp->f_flags & O_NONBLOCK)) return -EAGAIN; while (iov_iter_count(iter)) { @@ -3475,7 +3458,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) if (force_nonblock) { /* If the file doesn't support async, just async punt */ - if (unlikely(!io_file_supports_nowait(req, READ))) { + if (unlikely(!io_file_supports_nowait(req))) { ret = io_setup_async_rw(req, iovec, s, true); return ret ?: -EAGAIN; } @@ -3599,7 +3582,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) if (force_nonblock) { /* If the file doesn't support async, just async punt */ - if (unlikely(!io_file_supports_nowait(req, WRITE))) + if (unlikely(!io_file_supports_nowait(req))) goto copy_iov; /* file path doesn't support NOWAIT for non-direct_IO */ @@ -3631,7 +3614,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) } kiocb->ki_flags |= IOCB_WRITE; - if (req->file->f_op->write_iter) + if (likely(req->file->f_op->write_iter)) ret2 = call_write_iter(req->file, kiocb, &s->iter); else if (req->file->f_op->write) ret2 = loop_rw_iter(WRITE, req, &s->iter); @@ -6778,10 +6761,8 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file { unsigned long file_ptr = (unsigned long) file; - if (__io_file_supports_nowait(file, READ)) - file_ptr |= FFS_ASYNC_READ; - if (__io_file_supports_nowait(file, WRITE)) - file_ptr |= FFS_ASYNC_WRITE; + if (__io_file_supports_nowait(file)) + file_ptr |= FFS_NOWAIT; if (S_ISREG(file_inode(file)->i_mode)) file_ptr |= FFS_ISREG; file_slot->file_ptr = file_ptr; @@ -6800,7 +6781,7 @@ static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx, file = (struct file *) (file_ptr & FFS_MASK); file_ptr &= ~FFS_MASK; /* mask in overlapping REQ_F and FFS bits */ - req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); + req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT); io_req_set_rsrc_node(req, ctx); return file; } -- cgit v1.2.3 From 88459b50b42a4bd58e528006663afabd0b8652f2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 Oct 2021 00:07:10 +0100 Subject: io_uring: simplify io_file_supports_nowait() Make sure that REQ_F_SUPPORT_NOWAIT is always set io_prep_rw(), and so we can stop caring about setting it down the line simplifying io_file_supports_nowait(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/60c8f1f5e2cb45e00f4897b2cec10c5b3669da91.1634425438.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index d7bf2002ab0c..b834cf2778f6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2764,10 +2764,8 @@ static bool io_bdev_nowait(struct block_device *bdev) * any file. For now, just ensure that anything potentially problematic is done * inline. */ -static bool __io_file_supports_nowait(struct file *file) +static bool __io_file_supports_nowait(struct file *file, umode_t mode) { - umode_t mode = file_inode(file)->i_mode; - if (S_ISBLK(mode)) { if (IS_ENABLED(CONFIG_BLOCK) && io_bdev_nowait(I_BDEV(file->f_mapping->host))) @@ -2790,11 +2788,26 @@ static bool __io_file_supports_nowait(struct file *file) return file->f_mode & FMODE_NOWAIT; } +/* + * If we tracked the file through the SCM inflight mechanism, we could support + * any file. For now, just ensure that anything potentially problematic is done + * inline. + */ +static unsigned int io_file_get_flags(struct file *file) +{ + umode_t mode = file_inode(file)->i_mode; + unsigned int res = 0; + + if (S_ISREG(mode)) + res |= FFS_ISREG; + if (__io_file_supports_nowait(file, mode)) + res |= FFS_NOWAIT; + return res; +} + static inline bool io_file_supports_nowait(struct io_kiocb *req) { - if (likely(req->flags & REQ_F_SUPPORT_NOWAIT)) - return true; - return __io_file_supports_nowait(req->file); + return req->flags & REQ_F_SUPPORT_NOWAIT; } static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, @@ -2806,8 +2819,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, unsigned ioprio; int ret; - if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode)) - req->flags |= REQ_F_ISREG; + if (!io_req_ffs_set(req)) + req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT; kiocb->ki_pos = READ_ONCE(sqe->off); if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) { @@ -6761,10 +6774,7 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file { unsigned long file_ptr = (unsigned long) file; - if (__io_file_supports_nowait(file)) - file_ptr |= FFS_NOWAIT; - if (S_ISREG(file_inode(file)->i_mode)) - file_ptr |= FFS_ISREG; + file_ptr |= io_file_get_flags(file); file_slot->file_ptr = file_ptr; } -- cgit v1.2.3 From 5ca7a8b3f698b111729ed8e133b14e30f961de0f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 6 Oct 2021 11:01:42 -0600 Subject: io_uring: inform block layer of how many requests we are submitting The block layer can use this knowledge to make smarter decisions on how to handle the request, if it knows that N more may be coming. Switch to using blk_start_plug_nr_ios() to pass in that information. Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index b834cf2778f6..240222da2989 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -317,6 +317,7 @@ struct io_submit_state { bool plug_started; bool need_plug; + unsigned short submit_nr; struct blk_plug plug; }; @@ -7060,7 +7061,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if (state->need_plug && io_op_defs[opcode].plug) { state->plug_started = true; state->need_plug = false; - blk_start_plug(&state->plug); + blk_start_plug_nr_ios(&state->plug, state->submit_nr); } req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), @@ -7181,6 +7182,7 @@ static void io_submit_state_start(struct io_submit_state *state, { state->plug_started = false; state->need_plug = max_ios > 2; + state->submit_nr = max_ios; /* set only head, no need to init link_last in advance */ state->link.head = NULL; } -- cgit v1.2.3 From 00169246e6981752e53266c62d0ab0c827493634 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 19 Oct 2021 17:34:53 +0200 Subject: io_uring: warning about unused-but-set parameter When enabling -Wunused warnings by building with W=1, I get an instance of the -Wunused-but-set-parameter warning in the io_uring code: fs/io_uring.c: In function 'io_queue_async_work': fs/io_uring.c:1445:61: error: parameter 'locked' set but not used [-Werror=unused-but-set-parameter] 1445 | static void io_queue_async_work(struct io_kiocb *req, bool *locked) | ~~~~~~^~~~~~ There are very few warnings of this type, so it would be nice to enable this by default and fix all the existing instances. As the assignment serves no purpose by itself other than to prevent developers from using the variable, an easy workaround is to remove the assignment and just rename the argument to "dont_use". Fixes: f237c30a5610 ("io_uring: batch task work locking") Link: https://lore.kernel.org/lkml/20210920121352.93063-1-arnd@kernel.org/ Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20211019153507.348480-1-arnd@kernel.org Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 240222da2989..df07ce2fa6bc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1487,15 +1487,12 @@ static inline void io_req_add_compl_list(struct io_kiocb *req) wq_list_add_tail(&req->comp_list, &state->compl_reqs); } -static void io_queue_async_work(struct io_kiocb *req, bool *locked) +static void io_queue_async_work(struct io_kiocb *req, bool *dont_use) { struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *link = io_prep_linked_timeout(req); struct io_uring_task *tctx = req->task->io_uring; - /* must not take the lock, NULL it as a precaution */ - locked = NULL; - BUG_ON(!tctx); BUG_ON(!tctx->io_wq); -- cgit v1.2.3 From 3b44b3712c5b19b0af11c25cd978abdc3680d5e7 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Mon, 18 Oct 2021 21:34:31 +0800 Subject: io_uring: split logic of force_nonblock Currently force_nonblock stands for three meanings: - nowait or not - in an io-worker or not(hold uring_lock or not) Let's split the logic to two flags, IO_URING_F_NONBLOCK and IO_URING_F_UNLOCKED for convenience of the next patch. Suggested-by: Pavel Begunkov Signed-off-by: Hao Xu Reviewed-by: Pavel Begunkov Link: https://lore.kernel.org/r/20211018133431.103298-1-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 48 ++++++++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 22 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index df07ce2fa6bc..c87931d8b503 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -199,6 +199,7 @@ struct io_rings { enum io_uring_cmd_flags { IO_URING_F_COMPLETE_DEFER = 1, + IO_URING_F_UNLOCKED = 2, /* int's last bit, sign checks are usually faster than a bit test */ IO_URING_F_NONBLOCK = INT_MIN, }; @@ -2706,10 +2707,10 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; - const bool need_lock = !(issue_flags & IO_URING_F_NONBLOCK); + const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; /* workqueue context doesn't hold uring_lock, grab it now */ - if (unlikely(need_lock)) + if (unlikely(needs_lock)) mutex_lock(&ctx->uring_lock); /* @@ -2737,7 +2738,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) else wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); - if (unlikely(need_lock)) { + if (unlikely(needs_lock)) { /* * If IORING_SETUP_SQPOLL is enabled, sqes are either handle * in sq thread task context or in io worker task context. If @@ -2921,7 +2922,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_ring_ctx *ctx = req->ctx; req_set_fail(req); - if (!(issue_flags & IO_URING_F_NONBLOCK)) { + if (issue_flags & IO_URING_F_UNLOCKED) { mutex_lock(&ctx->uring_lock); __io_req_complete(req, issue_flags, ret, cflags); mutex_unlock(&ctx->uring_lock); @@ -3031,7 +3032,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, { struct io_buffer *kbuf = req->kbuf; struct io_buffer *head; - bool needs_lock = !(issue_flags & IO_URING_F_NONBLOCK); + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; if (req->flags & REQ_F_BUFFER_SELECTED) return kbuf; @@ -3336,7 +3337,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) int ret; /* submission path, ->uring_lock should already be taken */ - ret = io_import_iovec(rw, req, &iov, &iorw->s, IO_URING_F_NONBLOCK); + ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); if (unlikely(ret < 0)) return ret; @@ -4309,9 +4310,9 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) struct io_ring_ctx *ctx = req->ctx; struct io_buffer *head; int ret = 0; - bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; - io_ring_submit_lock(ctx, !force_nonblock); + io_ring_submit_lock(ctx, needs_lock); lockdep_assert_held(&ctx->uring_lock); @@ -4324,7 +4325,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) /* complete before unlock, IOPOLL may need the lock */ __io_req_complete(req, issue_flags, ret, 0); - io_ring_submit_unlock(ctx, !force_nonblock); + io_ring_submit_unlock(ctx, needs_lock); return 0; } @@ -4396,9 +4397,9 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) struct io_ring_ctx *ctx = req->ctx; struct io_buffer *head, *list; int ret = 0; - bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; - io_ring_submit_lock(ctx, !force_nonblock); + io_ring_submit_lock(ctx, needs_lock); lockdep_assert_held(&ctx->uring_lock); @@ -4414,7 +4415,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) req_set_fail(req); /* complete before unlock, IOPOLL may need the lock */ __io_req_complete(req, issue_flags, ret, 0); - io_ring_submit_unlock(ctx, !force_nonblock); + io_ring_submit_unlock(ctx, needs_lock); return 0; } @@ -6271,6 +6272,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; u64 sqe_addr = req->cancel.addr; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; struct io_tctx_node *node; int ret; @@ -6279,7 +6281,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) goto done; /* slow path, try all io-wq's */ - io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_lock(ctx, needs_lock); ret = -ENOENT; list_for_each_entry(node, &ctx->tctx_list, ctx_node) { struct io_uring_task *tctx = node->task->io_uring; @@ -6288,7 +6290,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) if (ret != -ENOENT) break; } - io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_unlock(ctx, needs_lock); done: if (ret < 0) req_set_fail(req); @@ -6315,6 +6317,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req, static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; struct io_uring_rsrc_update2 up; int ret; @@ -6324,10 +6327,10 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) up.tags = 0; up.resv = 0; - io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_lock(ctx, needs_lock); ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, req->rsrc_update.nr_args); - io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_unlock(ctx, needs_lock); if (ret < 0) req_set_fail(req); @@ -6737,7 +6740,7 @@ static void io_wq_submit_work(struct io_wq_work *work) if (!ret) { do { - ret = io_issue_sqe(req, 0); + ret = io_issue_sqe(req, IO_URING_F_UNLOCKED); /* * We can get EAGAIN for polled IO even though we're * forcing a sync submission from here, since we can't @@ -8326,12 +8329,12 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, unsigned int issue_flags, u32 slot_index) { struct io_ring_ctx *ctx = req->ctx; - bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; bool needs_switch = false; struct io_fixed_file *file_slot; int ret = -EBADF; - io_ring_submit_lock(ctx, !force_nonblock); + io_ring_submit_lock(ctx, needs_lock); if (file->f_op == &io_uring_fops) goto err; ret = -ENXIO; @@ -8372,7 +8375,7 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, err: if (needs_switch) io_rsrc_node_switch(ctx, ctx->file_data); - io_ring_submit_unlock(ctx, !force_nonblock); + io_ring_submit_unlock(ctx, needs_lock); if (ret) fput(file); return ret; @@ -8382,11 +8385,12 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) { unsigned int offset = req->close.file_slot - 1; struct io_ring_ctx *ctx = req->ctx; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; struct io_fixed_file *file_slot; struct file *file; int ret, i; - io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_lock(ctx, needs_lock); ret = -ENXIO; if (unlikely(!ctx->file_data)) goto out; @@ -8412,7 +8416,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) io_rsrc_node_switch(ctx, ctx->file_data); ret = 0; out: - io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_unlock(ctx, needs_lock); return ret; } -- cgit v1.2.3 From 898df2447b9ee8d759e85d33087505d3905bf2f0 Mon Sep 17 00:00:00 2001 From: Changcheng Deng Date: Wed, 20 Oct 2021 08:49:48 +0000 Subject: io_uring: Use ERR_CAST() instead of ERR_PTR(PTR_ERR()) Use ERR_CAST() instead of ERR_PTR(PTR_ERR()). This makes it more readable and also fix this warning detected by err_cast.cocci: ./fs/io_uring.c: WARNING: 3208: 11-18: ERR_CAST can be used with buf Reported-by: Zeal Robot Signed-off-by: Changcheng Deng Link: https://lore.kernel.org/r/20211020084948.1038420-1-deng.changcheng@zte.com.cn Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index c87931d8b503..88c5ee4dc242 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3172,7 +3172,7 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req, if (req->flags & REQ_F_BUFFER_SELECT) { buf = io_rw_buffer_select(req, &sqe_len, issue_flags); if (IS_ERR(buf)) - return ERR_PTR(PTR_ERR(buf)); + return ERR_CAST(buf); req->rw.len = sqe_len; } -- cgit v1.2.3 From 90fa02883f063b971ebfd9f5b2184b38b83b7ee3 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Mon, 18 Oct 2021 21:34:45 +0800 Subject: io_uring: implement async hybrid mode for pollable requests The current logic of requests with IOSQE_ASYNC is first queueing it to io-worker, then execute it in a synchronous way. For unbound works like pollable requests(e.g. read/write a socketfd), the io-worker may stuck there waiting for events for a long time. And thus other works wait in the list for a long time too. Let's introduce a new way for unbound works (currently pollable requests), with this a request will first be queued to io-worker, then executed in a nonblock try rather than a synchronous way. Failure of that leads it to arm poll stuff and then the worker can begin to handle other works. The detail process of this kind of requests is: step1: original context: queue it to io-worker step2: io-worker context: nonblock try(the old logic is a synchronous try here) | |--fail--> arm poll | |--(fail/ready)-->synchronous issue | |--(succeed)-->worker finish it's job, tw take over the req This works much better than the old IOSQE_ASYNC logic in cases where unbound max_worker is relatively small. In this case, number of io-worker eazily increments to max_worker, new worker cannot be created and running workers stuck there handling old works in IOSQE_ASYNC mode. In my 64-core machine, set unbound max_worker to 20, run echo-server, turns out: (arguments: register_file, connetion number is 1000, message size is 12 Byte) original IOSQE_ASYNC: 76664.151 tps after this patch: 166934.985 tps Suggested-by: Jens Axboe Signed-off-by: Hao Xu Link: https://lore.kernel.org/r/20211018133445.103438-1-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 88c5ee4dc242..736d456e7913 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6739,8 +6739,18 @@ static void io_wq_submit_work(struct io_wq_work *work) ret = -ECANCELED; if (!ret) { + bool needs_poll = false; + unsigned int issue_flags = IO_URING_F_UNLOCKED; + + if (req->flags & REQ_F_FORCE_ASYNC) { + needs_poll = req->file && file_can_poll(req->file); + if (needs_poll) + issue_flags |= IO_URING_F_NONBLOCK; + } + do { - ret = io_issue_sqe(req, IO_URING_F_UNLOCKED); +issue_sqe: + ret = io_issue_sqe(req, issue_flags); /* * We can get EAGAIN for polled IO even though we're * forcing a sync submission from here, since we can't @@ -6748,6 +6758,30 @@ static void io_wq_submit_work(struct io_wq_work *work) */ if (ret != -EAGAIN) break; + if (needs_poll) { + bool armed = false; + + ret = 0; + needs_poll = false; + issue_flags &= ~IO_URING_F_NONBLOCK; + + switch (io_arm_poll_handler(req)) { + case IO_APOLL_READY: + goto issue_sqe; + case IO_APOLL_ABORTED: + /* + * somehow we failed to arm the poll infra, + * fallback it to a normal async worker try. + */ + break; + case IO_APOLL_OK: + armed = true; + break; + } + + if (armed) + break; + } cond_resched(); } while (1); } -- cgit v1.2.3 From 255657d237042fd51673aef6f22463f662f9933f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:13:56 +0100 Subject: io_uring: clean io_wq_submit_work()'s main loop Do a bit of cleaning for the main loop of io_wq_submit_work(). Get rid of switch, just replace it with a single if as we're retrying in both other cases. Kill issue_sqe label, Get rid of needs_poll nesting and disambiguate a bit the comment. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ed12ce0c64e051f9a6b8a37a24f8ea554d299c29.1634987320.git.asml.silence@gmail.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 40 ++++++++++++---------------------------- 1 file changed, 12 insertions(+), 28 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 736d456e7913..7f92523c1282 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6749,40 +6749,24 @@ static void io_wq_submit_work(struct io_wq_work *work) } do { -issue_sqe: ret = io_issue_sqe(req, issue_flags); + if (ret != -EAGAIN) + break; /* - * We can get EAGAIN for polled IO even though we're + * We can get EAGAIN for iopolled IO even though we're * forcing a sync submission from here, since we can't * wait for request slots on the block side. */ - if (ret != -EAGAIN) - break; - if (needs_poll) { - bool armed = false; - - ret = 0; - needs_poll = false; - issue_flags &= ~IO_URING_F_NONBLOCK; - - switch (io_arm_poll_handler(req)) { - case IO_APOLL_READY: - goto issue_sqe; - case IO_APOLL_ABORTED: - /* - * somehow we failed to arm the poll infra, - * fallback it to a normal async worker try. - */ - break; - case IO_APOLL_OK: - armed = true; - break; - } - - if (armed) - break; + if (!needs_poll) { + cond_resched(); + continue; } - cond_resched(); + + if (io_arm_poll_handler(req) == IO_APOLL_OK) + return; + /* aborted or ready, in either case retry blocking */ + needs_poll = false; + issue_flags &= ~IO_URING_F_NONBLOCK; } while (1); } -- cgit v1.2.3 From d01905db14eb6223dd1c375001f4daa26cb15c1f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:13:57 +0100 Subject: io_uring: clean iowq submit work cancellation If we've got IO_WQ_WORK_CANCEL in io_wq_submit_work(), handle the error on the same lines as the check instead of having a weird code flow. The main loop doesn't change but goes one indention left. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ff4a09cf41f7a22bbb294b6f1faea721e21fe615.1634987320.git.asml.silence@gmail.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 59 +++++++++++++++++++++++++++++------------------------------ 1 file changed, 29 insertions(+), 30 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 7f92523c1282..58cb3a14d58e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6721,6 +6721,8 @@ static struct io_wq_work *io_wq_free_work(struct io_wq_work *work) static void io_wq_submit_work(struct io_wq_work *work) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); + unsigned int issue_flags = IO_URING_F_UNLOCKED; + bool needs_poll = false; struct io_kiocb *timeout; int ret = 0; @@ -6735,40 +6737,37 @@ static void io_wq_submit_work(struct io_wq_work *work) io_queue_linked_timeout(timeout); /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ - if (work->flags & IO_WQ_WORK_CANCEL) - ret = -ECANCELED; + if (work->flags & IO_WQ_WORK_CANCEL) { + io_req_task_queue_fail(req, -ECANCELED); + return; + } - if (!ret) { - bool needs_poll = false; - unsigned int issue_flags = IO_URING_F_UNLOCKED; + if (req->flags & REQ_F_FORCE_ASYNC) { + needs_poll = req->file && file_can_poll(req->file); + if (needs_poll) + issue_flags |= IO_URING_F_NONBLOCK; + } - if (req->flags & REQ_F_FORCE_ASYNC) { - needs_poll = req->file && file_can_poll(req->file); - if (needs_poll) - issue_flags |= IO_URING_F_NONBLOCK; + do { + ret = io_issue_sqe(req, issue_flags); + if (ret != -EAGAIN) + break; + /* + * We can get EAGAIN for iopolled IO even though we're + * forcing a sync submission from here, since we can't + * wait for request slots on the block side. + */ + if (!needs_poll) { + cond_resched(); + continue; } - do { - ret = io_issue_sqe(req, issue_flags); - if (ret != -EAGAIN) - break; - /* - * We can get EAGAIN for iopolled IO even though we're - * forcing a sync submission from here, since we can't - * wait for request slots on the block side. - */ - if (!needs_poll) { - cond_resched(); - continue; - } - - if (io_arm_poll_handler(req) == IO_APOLL_OK) - return; - /* aborted or ready, in either case retry blocking */ - needs_poll = false; - issue_flags &= ~IO_URING_F_NONBLOCK; - } while (1); - } + if (io_arm_poll_handler(req) == IO_APOLL_OK) + return; + /* aborted or ready, in either case retry blocking */ + needs_poll = false; + issue_flags &= ~IO_URING_F_NONBLOCK; + } while (1); /* avoid locking problems by failing it from a clean context */ if (ret) -- cgit v1.2.3 From 658d0a401637ffbc733b0f03723e9e1255289429 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:13:58 +0100 Subject: io_uring: check if opcode needs poll first on arming ->pollout or ->pollin are set only for opcodes that need a file, so if io_arm_poll_handler() tests them first we can be sure that the request has file set and the ->file check can be removed. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/9adfe4f543d984875e516fce6da35348aab48668.1634987320.git.asml.silence@gmail.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 58cb3a14d58e..bff911f951ed 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5584,12 +5584,10 @@ static int io_arm_poll_handler(struct io_kiocb *req) struct io_poll_table ipt; __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI; - if (!req->file || !file_can_poll(req->file)) - return IO_APOLL_ABORTED; - if (req->flags & REQ_F_POLLED) - return IO_APOLL_ABORTED; if (!def->pollin && !def->pollout) return IO_APOLL_ABORTED; + if (!file_can_poll(req->file) || (req->flags & REQ_F_POLLED)) + return IO_APOLL_ABORTED; if (def->pollin) { mask |= POLLIN | POLLRDNORM; -- cgit v1.2.3 From afb7f56fc624fb6ade8fde70a67eda4d831b4ed0 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:13:59 +0100 Subject: io_uring: don't try io-wq polling if not supported If an opcode doesn't support polling, just let it be executed synchronously in iowq, otherwise it will do a nonblock attempt just to fail in io_arm_poll_handler() and return back to blocking execution. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6401256db01b88f448f15fcd241439cb76f5b940.1634987320.git.asml.silence@gmail.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index bff911f951ed..c6f32fcf387b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6741,9 +6741,13 @@ static void io_wq_submit_work(struct io_wq_work *work) } if (req->flags & REQ_F_FORCE_ASYNC) { - needs_poll = req->file && file_can_poll(req->file); - if (needs_poll) + const struct io_op_def *def = &io_op_defs[req->opcode]; + bool opcode_poll = def->pollin || def->pollout; + + if (opcode_poll && file_can_poll(req->file)) { + needs_poll = true; issue_flags |= IO_URING_F_NONBLOCK; + } } do { -- cgit v1.2.3 From d6a644a795451d5fd063a5c08d6bb3a91d021887 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:14:00 +0100 Subject: io_uring: clean up timeout async_data allocation opcode prep functions are one of the first things that are called, we can't have ->async_data allocated at this point and it's certainly a bug. Reflect this assumption in io_timeout_prep() and add a WARN_ONCE just in case. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/75a28ca7dbcc5af8b6cd9092819e8384c24dedd4.1634987320.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index c6f32fcf387b..e775529a36d8 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6113,7 +6113,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (unlikely(off && !req->ctx->off_timeout_used)) req->ctx->off_timeout_used = true; - if (!req_has_async_data(req) && io_alloc_async_data(req)) + if (WARN_ON_ONCE(req_has_async_data(req))) + return -EFAULT; + if (io_alloc_async_data(req)) return -ENOMEM; data = req->async_data; -- cgit v1.2.3 From b9a6b8f92f6feebf40609e4f5f22a3c0404afb60 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:14:01 +0100 Subject: io_uring: kill unused param from io_file_supports_nowait io_file_supports_nowait() doesn't use rw argument anymore, remove it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4bd6709fc573d70c866ea656cb7a7dbe94be8026.1634987320.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index e775529a36d8..7042ed870b52 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2809,8 +2809,7 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req) return req->flags & REQ_F_SUPPORT_NOWAIT; } -static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, - int rw) +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; struct kiocb *kiocb = &req->rw.kiocb; @@ -3352,7 +3351,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(!(req->file->f_mode & FMODE_READ))) return -EBADF; - return io_prep_rw(req, sqe, READ); + return io_prep_rw(req, sqe); } /* @@ -3568,7 +3567,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; - return io_prep_rw(req, sqe, WRITE); + return io_prep_rw(req, sqe); } static int io_write(struct io_kiocb *req, unsigned int issue_flags) -- cgit v1.2.3 From fb27274a90eac5a687fe73229775ad36df737d8b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:14:02 +0100 Subject: io_uring: clusterise ki_flags access in rw_prep ioprio setup doesn't depend on other fields that are modified in io_prep_rw() and we can move it down in the function without worrying about performance. It's useful as it makes iocb->ki_flags accesses/modifications closer together, so it's more likely the compiler will cache it in a register and avoid extra reloads. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8ee98779c06f1b59f6039b1e292db4332efd664b.1634987320.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index 7042ed870b52..bba2f77ae7e7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2840,16 +2840,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) req->flags |= REQ_F_NOWAIT; - ioprio = READ_ONCE(sqe->ioprio); - if (ioprio) { - ret = ioprio_check_cap(ioprio); - if (ret) - return ret; - - kiocb->ki_ioprio = ioprio; - } else - kiocb->ki_ioprio = get_current_ioprio(); - if (ctx->flags & IORING_SETUP_IOPOLL) { if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) return -EOPNOTSUPP; @@ -2863,6 +2853,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) kiocb->ki_complete = io_complete_rw; } + ioprio = READ_ONCE(sqe->ioprio); + if (ioprio) { + ret = ioprio_check_cap(ioprio); + if (ret) + return ret; + + kiocb->ki_ioprio = ioprio; + } else { + kiocb->ki_ioprio = get_current_ioprio(); + } + req->imu = NULL; req->rw.addr = READ_ONCE(sqe->addr); req->rw.len = READ_ONCE(sqe->len); -- cgit v1.2.3 From 3884b83dff245e41def99ceacca8ed2056baf0a8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 25 Oct 2021 13:45:12 -0600 Subject: io_uring: don't assign write hint in the read path Move this out of the generic read/write prep path, and place it in the write specific kiocb setup instead. Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index bba2f77ae7e7..d001cd7a6c51 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2825,7 +2825,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) req->flags |= REQ_F_CUR_POS; kiocb->ki_pos = file->f_pos; } - kiocb->ki_hint = ki_hint_validate(file_write_hint(file)); kiocb->ki_flags = iocb_flags(file); ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); if (unlikely(ret)) @@ -3568,6 +3567,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; + req->rw.kiocb.ki_hint = ki_hint_validate(file_write_hint(req->file)); return io_prep_rw(req, sqe); } -- cgit v1.2.3 From f75d118349be055d47407b4ba4ceb98e6437e472 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 29 Oct 2021 06:36:45 -0600 Subject: io_uring: harder fdinfo sq/cq ring iterating The ring iteration is racy, which isn't necessarily a problem except it can cause us to iterate the whole thing. That isn't desired or ideal, and it can lead to excessive runtimes of reading fdinfo. Cap the iteration at tail - head OR the ring size. While in there, clean up the ring masking and just dump the raw values along with the masks. That provides more useful debug info. Fixes: 83f84356bc8f ("io_uring: add more uring info to fdinfo for debug") Reported-by: Eric Dumazet Signed-off-by: Jens Axboe --- fs/io_uring.c | 51 +++++++++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 22 deletions(-) (limited to 'fs/io_uring.c') diff --git a/fs/io_uring.c b/fs/io_uring.c index d001cd7a6c51..c887e4e19e9e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -10056,12 +10056,11 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct io_overflow_cqe *ocqe; struct io_rings *r = ctx->rings; unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; - unsigned int cached_sq_head = ctx->cached_sq_head; - unsigned int cached_cq_tail = ctx->cached_cq_tail; unsigned int sq_head = READ_ONCE(r->sq.head); unsigned int sq_tail = READ_ONCE(r->sq.tail); unsigned int cq_head = READ_ONCE(r->cq.head); unsigned int cq_tail = READ_ONCE(r->cq.tail); + unsigned int sq_entries, cq_entries; bool has_lock; unsigned int i; @@ -10071,29 +10070,37 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, * and sq_tail and cq_head are changed by userspace. But it's ok since * we usually use these info when it is stuck. */ - seq_printf(m, "SqHead:\t%u\n", sq_head & sq_mask); - seq_printf(m, "SqTail:\t%u\n", sq_tail & sq_mask); - seq_printf(m, "CachedSqHead:\t%u\n", cached_sq_head & sq_mask); - seq_printf(m, "CqHead:\t%u\n", cq_head & cq_mask); - seq_printf(m, "CqTail:\t%u\n", cq_tail & cq_mask); - seq_printf(m, "CachedCqTail:\t%u\n", cached_cq_tail & cq_mask); - seq_printf(m, "SQEs:\t%u\n", sq_tail - cached_sq_head); - for (i = cached_sq_head; i < sq_tail; i++) { - unsigned int sq_idx = READ_ONCE(ctx->sq_array[i & sq_mask]); - - if (likely(sq_idx <= sq_mask)) { - struct io_uring_sqe *sqe = &ctx->sq_sqes[sq_idx]; - - seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", - sq_idx, sqe->opcode, sqe->fd, sqe->flags, sqe->user_data); - } + seq_printf(m, "SqMask:\t\t0x%x\n", sq_mask); + seq_printf(m, "SqHead:\t%u\n", sq_head); + seq_printf(m, "SqTail:\t%u\n", sq_tail); + seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); + seq_printf(m, "CqMask:\t0x%x\n", cq_mask); + seq_printf(m, "CqHead:\t%u\n", cq_head); + seq_printf(m, "CqTail:\t%u\n", cq_tail); + seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); + seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head); + sq_entries = min(sq_tail - sq_head, ctx->sq_entries); + for (i = 0; i < sq_entries; i++) { + unsigned int entry = i + sq_head; + unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]); + struct io_uring_sqe *sqe = &ctx->sq_sqes[sq_idx]; + + if (sq_idx > sq_mask) + continue; + sqe = &ctx->sq_sqes[sq_idx]; + seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", + sq_idx, sqe->opcode, sqe->fd, sqe->flags, + sqe->user_data); } - seq_printf(m, "CQEs:\t%u\n", cached_cq_tail - cq_head); - for (i = cq_head; i < cached_cq_tail; i++) { - struct io_uring_cqe *cqe = &r->cqes[i & cq_mask]; + seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); + cq_entries = min(cq_tail - cq_head, ctx->cq_entries); + for (i = 0; i < cq_entries; i++) { + unsigned int entry = i + cq_head; + struct io_uring_cqe *cqe = &r->cqes[entry & cq_mask]; seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n", - i & cq_mask, cqe->user_data, cqe->res, cqe->flags); + entry & cq_mask, cqe->user_data, cqe->res, + cqe->flags); } /* -- cgit v1.2.3