summaryrefslogtreecommitdiff
path: root/io_uring/net.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-06-26 22:30:26 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-06-26 22:30:26 +0300
commit0aa69d53ac7c30f6184f88f2e310d808b32b35a5 (patch)
tree26a666a128578dd1c46f39b432543411caa0d6eb /io_uring/net.c
parent3eccc0c886b1796f95a289c9d127c8ca1a254bd5 (diff)
parentc98c81a4ac37b651be7eb9d16f562fc4acc5f867 (diff)
downloadlinux-0aa69d53ac7c30f6184f88f2e310d808b32b35a5.tar.xz
Merge tag 'for-6.5/io_uring-2023-06-23' of git://git.kernel.dk/linux
Pull io_uring updates from Jens Axboe: "Nothing major in this release, just a bunch of cleanups and some optimizations around networking mostly. - clean up file request flags handling (Christoph) - clean up request freeing and CQ locking (Pavel) - support for using pre-registering the io_uring fd at setup time (Josh) - Add support for user allocated ring memory, rather than having the kernel allocate it. Mostly for packing rings into a huge page (me) - avoid an unnecessary double retry on receive (me) - maintain ordering for task_work, which also improves performance (me) - misc cleanups/fixes (Pavel, me)" * tag 'for-6.5/io_uring-2023-06-23' of git://git.kernel.dk/linux: (39 commits) io_uring: merge conditional unlock flush helpers io_uring: make io_cq_unlock_post static io_uring: inline __io_cq_unlock io_uring: fix acquire/release annotations io_uring: kill io_cq_unlock() io_uring: remove IOU_F_TWQ_FORCE_NORMAL io_uring: don't batch task put on reqs free io_uring: move io_clean_op() io_uring: inline io_dismantle_req() io_uring: remove io_free_req_tw io_uring: open code io_put_req_find_next io_uring: add helpers to decode the fixed file file_ptr io_uring: use io_file_from_index in io_msg_grab_file io_uring: use io_file_from_index in __io_sync_cancel io_uring: return REQ_F_ flags from io_file_get_flags io_uring: remove io_req_ffs_set io_uring: remove a confusing comment above io_file_get_flags io_uring: remove the mode variable in io_file_get_flags io_uring: remove __io_file_supports_nowait io_uring: wait interruptibly for request completions on exit ...
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c58
1 files changed, 32 insertions, 26 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index c8a4b2ac00f7..a8e303796f16 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -624,9 +624,15 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
* again (for multishot).
*/
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
- unsigned int cflags, bool mshot_finished,
+ struct msghdr *msg, bool mshot_finished,
unsigned issue_flags)
{
+ unsigned int cflags;
+
+ cflags = io_put_kbuf(req, issue_flags);
+ if (msg->msg_inq && msg->msg_inq != -1U)
+ cflags |= IORING_CQE_F_SOCK_NONEMPTY;
+
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
io_req_set_res(req, *ret, cflags);
*ret = IOU_OK;
@@ -634,10 +640,18 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
}
if (!mshot_finished) {
- if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
- req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
+ if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ *ret, cflags | IORING_CQE_F_MORE, true)) {
io_recv_prep_retry(req);
- return false;
+ /* Known not-empty or unknown state, retry */
+ if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
+ msg->msg_inq == -1U)
+ return false;
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ *ret = IOU_ISSUE_SKIP_COMPLETE;
+ else
+ *ret = -EAGAIN;
+ return true;
}
/* Otherwise stop multishot but use the current result. */
}
@@ -740,7 +754,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
- unsigned int cflags;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
@@ -791,6 +804,7 @@ retry_multishot:
flags |= MSG_DONTWAIT;
kmsg->msg.msg_get_inq = 1;
+ kmsg->msg.msg_inq = -1U;
if (req->flags & REQ_F_APOLL_MULTISHOT) {
ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
&mshot_finished);
@@ -831,11 +845,7 @@ retry_multishot:
else
io_kbuf_recycle(req, issue_flags);
- cflags = io_put_kbuf(req, issue_flags);
- if (kmsg->msg.msg_inq)
- cflags |= IORING_CQE_F_SOCK_NONEMPTY;
-
- if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
+ if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
goto retry_multishot;
if (mshot_finished) {
@@ -854,7 +864,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg;
struct socket *sock;
- unsigned int cflags;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
@@ -871,6 +880,14 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!sock))
return -ENOTSOCK;
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_get_inq = 1;
+ msg.msg_controllen = 0;
+ msg.msg_iocb = NULL;
+ msg.msg_ubuf = NULL;
+
retry_multishot:
if (io_do_buffer_select(req)) {
void __user *buf;
@@ -885,14 +902,8 @@ retry_multishot:
if (unlikely(ret))
goto out_free;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_get_inq = 1;
+ msg.msg_inq = -1U;
msg.msg_flags = 0;
- msg.msg_controllen = 0;
- msg.msg_iocb = NULL;
- msg.msg_ubuf = NULL;
flags = sr->msg_flags;
if (force_nonblock)
@@ -932,11 +943,7 @@ out_free:
else
io_kbuf_recycle(req, issue_flags);
- cflags = io_put_kbuf(req, issue_flags);
- if (msg.msg_inq)
- cflags |= IORING_CQE_F_SOCK_NONEMPTY;
-
- if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
+ if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
goto retry_multishot;
return ret;
@@ -1308,7 +1315,6 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_ring_ctx *ctx = req->ctx;
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
@@ -1358,8 +1364,8 @@ retry:
if (ret < 0)
return ret;
- if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
- req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
+ if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret,
+ IORING_CQE_F_MORE, true))
goto retry;
return -ECANCELED;