From bf34e697931f64b21c82232e98b3d1f566214e40 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 17 May 2023 12:15:00 -0600 Subject: io_uring/net: initialize struct msghdr more sanely for io_recv() We only need to clear the input fields on the first invocation, not when potentially doing a retry. Signed-off-by: Jens Axboe --- io_uring/net.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 89e839013837..08fe42673b75 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -860,6 +860,14 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(!sock)) return -ENOTSOCK; + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_control = NULL; + msg.msg_get_inq = 1; + msg.msg_controllen = 0; + msg.msg_iocb = NULL; + msg.msg_ubuf = NULL; + retry_multishot: if (io_do_buffer_select(req)) { void __user *buf; @@ -874,14 +882,7 @@ retry_multishot: if (unlikely(ret)) goto out_free; - msg.msg_name = NULL; - msg.msg_namelen = 0; - msg.msg_control = NULL; - msg.msg_get_inq = 1; msg.msg_flags = 0; - msg.msg_controllen = 0; - msg.msg_iocb = NULL; - msg.msg_ubuf = NULL; flags = sr->msg_flags; if (force_nonblock) -- cgit v1.2.3 From 88fc8b8463b024df556d5c4245f2c273f22d83a1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 17 May 2023 12:18:13 -0600 Subject: io_uring/net: initalize msghdr->msg_inq to known value We can't currently tell if ->msg_inq was set when we ask for msg_get_inq, initialize it to -1U so we can tell apart if it was set and there's no data left, or if it just wasn't set at all by the protocol. Signed-off-by: Jens Axboe --- io_uring/net.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 08fe42673b75..45f9c3046d67 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -785,6 +785,7 @@ retry_multishot: min_ret = iov_iter_count(&kmsg->msg.msg_iter); kmsg->msg.msg_get_inq = 1; + kmsg->msg.msg_inq = -1U; if (req->flags & REQ_F_APOLL_MULTISHOT) ret = io_recvmsg_multishot(sock, sr, kmsg, flags, &mshot_finished); @@ -821,7 +822,7 @@ retry_multishot: io_kbuf_recycle(req, issue_flags); cflags = io_put_kbuf(req, issue_flags); - if (kmsg->msg.msg_inq) + if (kmsg->msg.msg_inq && kmsg->msg.msg_inq != -1U) cflags |= IORING_CQE_F_SOCK_NONEMPTY; if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags)) @@ -882,6 +883,7 @@ retry_multishot: if (unlikely(ret)) goto out_free; + msg.msg_inq = -1U; msg.msg_flags = 0; flags = sr->msg_flags; @@ -923,7 +925,7 @@ out_free: io_kbuf_recycle(req, issue_flags); cflags = io_put_kbuf(req, issue_flags); - if (msg.msg_inq) + if (msg.msg_inq && msg.msg_inq != -1U) cflags |= IORING_CQE_F_SOCK_NONEMPTY; if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags)) -- cgit v1.2.3 From 7d41bcb7f32fbeac05d6fab553821a228af18bee Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 17 May 2023 12:20:44 -0600 Subject: io_uring/net: push IORING_CQE_F_SOCK_NONEMPTY into io_recv_finish() Rather than have this logic in both io_recv() and io_recvmsg_multishot(), push it into the handler they both call when finishing a receive operation. Signed-off-by: Jens Axboe --- io_uring/net.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 45f9c3046d67..9e0034771dbb 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -616,9 +616,15 @@ static inline void io_recv_prep_retry(struct io_kiocb *req) * again (for multishot). */ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, - unsigned int cflags, bool mshot_finished, + struct msghdr *msg, bool mshot_finished, unsigned issue_flags) { + unsigned int cflags; + + cflags = io_put_kbuf(req, issue_flags); + if (msg->msg_inq && msg->msg_inq != -1U) + cflags |= IORING_CQE_F_SOCK_NONEMPTY; + if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { io_req_set_res(req, *ret, cflags); *ret = IOU_OK; @@ -732,7 +738,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_async_msghdr iomsg, *kmsg; struct socket *sock; - unsigned int cflags; unsigned flags; int ret, min_ret = 0; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; @@ -821,11 +826,7 @@ retry_multishot: else io_kbuf_recycle(req, issue_flags); - cflags = io_put_kbuf(req, issue_flags); - if (kmsg->msg.msg_inq && kmsg->msg.msg_inq != -1U) - cflags |= IORING_CQE_F_SOCK_NONEMPTY; - - if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags)) + if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags)) goto retry_multishot; if (mshot_finished) { @@ -844,7 +845,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct msghdr msg; struct socket *sock; - unsigned int cflags; unsigned flags; int ret, min_ret = 0; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; @@ -924,11 +924,7 @@ out_free: else io_kbuf_recycle(req, issue_flags); - cflags = io_put_kbuf(req, issue_flags); - if (msg.msg_inq && msg.msg_inq != -1U) - cflags |= IORING_CQE_F_SOCK_NONEMPTY; - - if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags)) + if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags)) goto retry_multishot; return ret; -- cgit v1.2.3 From a2741c58ac677e5de35bba7dec6376579dd513cd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 17 May 2023 12:23:41 -0600 Subject: io_uring/net: don't retry recvmsg() unnecessarily If we're doing multishot receives, then we always end up doing two trips through sock_recvmsg(). For protocols that sanely set msghdr->msg_inq, then we don't need to waste time picking a new buffer and attempting a new receive if there's nothing there. Signed-off-by: Jens Axboe --- io_uring/net.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 9e0034771dbb..0795f3783013 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -635,7 +635,15 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER, req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) { io_recv_prep_retry(req); - return false; + /* Known not-empty or unknown state, retry */ + if (cflags & IORING_CQE_F_SOCK_NONEMPTY || + msg->msg_inq == -1U) + return false; + if (issue_flags & IO_URING_F_MULTISHOT) + *ret = IOU_ISSUE_SKIP_COMPLETE; + else + *ret = -EAGAIN; + return true; } /* Otherwise stop multishot but use the current result. */ } -- cgit v1.2.3 From d86eaed185e9c6052d1ee2ca538f1936ff255887 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 7 Jun 2023 14:41:20 -0600 Subject: io_uring: cleanup io_aux_cqe() API Everybody is passing in the request, so get rid of the io_ring_ctx and explicit user_data pass-in. Both the ctx and user_data can be deduced from the request at hand. Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 4 +++- io_uring/io_uring.h | 2 +- io_uring/net.c | 9 ++++----- io_uring/poll.c | 4 ++-- io_uring/timeout.c | 4 ++-- 5 files changed, 12 insertions(+), 11 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index fc511cb6761d..08574a86da72 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -935,9 +935,11 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags return __io_post_aux_cqe(ctx, user_data, res, cflags, true); } -bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags, +bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags, bool allow_overflow) { + struct io_ring_ctx *ctx = req->ctx; + u64 user_data = req->cqe.user_data; struct io_uring_cqe *cqe; unsigned int length; diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 9b8dfb3bb2b4..a937b4b75aee 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -47,7 +47,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx); void io_req_defer_failed(struct io_kiocb *req, s32 res); void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); -bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags, +bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags, bool allow_overflow); void __io_commit_cqring_flush(struct io_ring_ctx *ctx); diff --git a/io_uring/net.c b/io_uring/net.c index 0795f3783013..369167e45fa8 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -632,8 +632,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, } if (!mshot_finished) { - if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER, - req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) { + if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, + *ret, cflags | IORING_CQE_F_MORE, true)) { io_recv_prep_retry(req); /* Known not-empty or unknown state, retry */ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || @@ -1304,7 +1304,6 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_accept(struct io_kiocb *req, unsigned int issue_flags) { - struct io_ring_ctx *ctx = req->ctx; struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; @@ -1354,8 +1353,8 @@ retry: if (ret < 0) return ret; - if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER, - req->cqe.user_data, ret, IORING_CQE_F_MORE, true)) + if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret, + IORING_CQE_F_MORE, true)) goto retry; return -ECANCELED; diff --git a/io_uring/poll.c b/io_uring/poll.c index 9689806d3c16..6b9179e8228e 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -300,8 +300,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) __poll_t mask = mangle_poll(req->cqe.res & req->apoll_events); - if (!io_aux_cqe(req->ctx, ts->locked, req->cqe.user_data, - mask, IORING_CQE_F_MORE, false)) { + if (!io_aux_cqe(req, ts->locked, mask, + IORING_CQE_F_MORE, false)) { io_req_set_res(req, mask, 0); return IOU_POLL_REMOVE_POLL_USE_RES; } diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 350eb830b485..fb0547b35dcd 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -73,8 +73,8 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) if (!io_timeout_finish(timeout, data)) { bool filled; - filled = io_aux_cqe(ctx, ts->locked, req->cqe.user_data, -ETIME, - IORING_CQE_F_MORE, false); + filled = io_aux_cqe(req, ts->locked, -ETIME, IORING_CQE_F_MORE, + false); if (filled) { /* re-arm timer */ spin_lock_irq(&ctx->timeout_lock); -- cgit v1.2.3