summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-10 22:25:58 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-10 22:25:58 +0300
commitb712075e03cf95d9009c99230775dc41195bde8a (patch)
tree84352bcd4dce58a0b240a26a9d9f6a5fab3ef1aa
parent4b803784178d1721ba26cadd8aef13b2c7456730 (diff)
parente53759298a7d7e98c3e5c2440d395d19cea7d6bf (diff)
downloadlinux-b712075e03cf95d9009c99230775dc41195bde8a.tar.xz
Merge tag 'io_uring-6.7-2023-11-10' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: "Mostly just a few fixes and cleanups caused by the read multishot support. Outside of that, a stable fix for how a connect retry is done" * tag 'io_uring-6.7-2023-11-10' of git://git.kernel.dk/linux: io_uring: do not clamp read length for multishot read io_uring: do not allow multishot read to set addr or len io_uring: indicate if io_kbuf_recycle did recycle anything io_uring/rw: add separate prep handler for fixed read/write io_uring/rw: add separate prep handler for readv/writev io_uring/net: ensure socket is marked connected on connect retry io_uring/rw: don't attempt to allocate async data if opcode doesn't need it
-rw-r--r--io_uring/kbuf.c6
-rw-r--r--io_uring/kbuf.h13
-rw-r--r--io_uring/net.c24
-rw-r--r--io_uring/opdef.c8
-rw-r--r--io_uring/rw.c72
-rw-r--r--io_uring/rw.h2
6 files changed, 80 insertions, 45 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index fea06810b43d..a1e4239c7d75 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -52,7 +52,7 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
}
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
+bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
@@ -65,7 +65,7 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
* multiple use.
*/
if (req->flags & REQ_F_PARTIAL_IO)
- return;
+ return false;
io_ring_submit_lock(ctx, issue_flags);
@@ -76,7 +76,7 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
req->buf_index = buf->bgid;
io_ring_submit_unlock(ctx, issue_flags);
- return;
+ return true;
}
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index d14345ef61fc..f2d615236b2c 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -53,11 +53,11 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
-static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
+static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
{
/*
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
@@ -80,8 +80,10 @@ static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
} else {
req->buf_index = req->buf_list->bgid;
req->flags &= ~REQ_F_BUFFER_RING;
+ return true;
}
}
+ return false;
}
static inline bool io_do_buffer_select(struct io_kiocb *req)
@@ -91,12 +93,13 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
}
-static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
+static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
if (req->flags & REQ_F_BUFFER_SELECTED)
- io_kbuf_recycle_legacy(req, issue_flags);
+ return io_kbuf_recycle_legacy(req, issue_flags);
if (req->flags & REQ_F_BUFFER_RING)
- io_kbuf_recycle_ring(req);
+ return io_kbuf_recycle_ring(req);
+ return false;
}
static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
diff --git a/io_uring/net.c b/io_uring/net.c
index 7a8e298af81b..75d494dad7e2 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1461,16 +1461,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- if (connect->in_progress) {
- struct socket *socket;
-
- ret = -ENOTSOCK;
- socket = sock_from_file(req->file);
- if (socket)
- ret = sock_error(socket->sk);
- goto out;
- }
-
if (req_has_async_data(req)) {
io = req->async_data;
} else {
@@ -1490,9 +1480,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
&& force_nonblock) {
if (ret == -EINPROGRESS) {
connect->in_progress = true;
- return -EAGAIN;
- }
- if (ret == -ECONNABORTED) {
+ } else if (ret == -ECONNABORTED) {
if (connect->seen_econnaborted)
goto out;
connect->seen_econnaborted = true;
@@ -1506,6 +1494,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
memcpy(req->async_data, &__io, sizeof(__io));
return -EAGAIN;
}
+ if (connect->in_progress) {
+ /*
+ * At least bluetooth will return -EBADFD on a re-connect
+ * attempt, and it's (supposedly) also valid to get -EISCONN
+ * which means the previous result is good. For both of these,
+ * grab the sock_error() and use that for the completion.
+ */
+ if (ret == -EBADFD || ret == -EISCONN)
+ ret = sock_error(sock_from_file(req->file)->sk);
+ }
if (ret == -ERESTARTSYS)
ret = -EINTR;
out:
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 25a3515a177c..799db44283c7 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -66,7 +66,7 @@ const struct io_issue_def io_issue_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.vectored = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rwv,
.issue = io_read,
},
[IORING_OP_WRITEV] = {
@@ -80,7 +80,7 @@ const struct io_issue_def io_issue_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.vectored = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rwv,
.issue = io_write,
},
[IORING_OP_FSYNC] = {
@@ -98,7 +98,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rw_fixed,
.issue = io_read,
},
[IORING_OP_WRITE_FIXED] = {
@@ -111,7 +111,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rw_fixed,
.issue = io_write,
},
[IORING_OP_POLL_ADD] = {
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 3398e1d944c2..64390d4e20c1 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -83,18 +83,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
/* used for fixed read/write too - just read unconditionally */
req->buf_index = READ_ONCE(sqe->buf_index);
- if (req->opcode == IORING_OP_READ_FIXED ||
- req->opcode == IORING_OP_WRITE_FIXED) {
- struct io_ring_ctx *ctx = req->ctx;
- u16 index;
-
- if (unlikely(req->buf_index >= ctx->nr_user_bufs))
- return -EFAULT;
- index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
- req->imu = ctx->user_bufs[index];
- io_req_set_rsrc_node(req, ctx, 0);
- }
-
ioprio = READ_ONCE(sqe->ioprio);
if (ioprio) {
ret = ioprio_check_cap(ioprio);
@@ -110,16 +98,42 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
rw->addr = READ_ONCE(sqe->addr);
rw->len = READ_ONCE(sqe->len);
rw->flags = READ_ONCE(sqe->rw_flags);
+ return 0;
+}
+
+int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ int ret;
+
+ ret = io_prep_rw(req, sqe);
+ if (unlikely(ret))
+ return ret;
- /* Have to do this validation here, as this is in io_read() rw->len might
- * have chanaged due to buffer selection
+ /*
+ * Have to do this validation here, as this is in io_read() rw->len
+ * might have chanaged due to buffer selection
*/
- if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
- ret = io_iov_buffer_select_prep(req);
- if (ret)
- return ret;
- }
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return io_iov_buffer_select_prep(req);
+
+ return 0;
+}
+int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ u16 index;
+ int ret;
+
+ ret = io_prep_rw(req, sqe);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+ return -EFAULT;
+ index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+ req->imu = ctx->user_bufs[index];
+ io_req_set_rsrc_node(req, ctx, 0);
return 0;
}
@@ -129,12 +143,20 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
*/
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
int ret;
+ /* must be used with provided buffers */
+ if (!(req->flags & REQ_F_BUFFER_SELECT))
+ return -EINVAL;
+
ret = io_prep_rw(req, sqe);
if (unlikely(ret))
return ret;
+ if (rw->addr || rw->len)
+ return -EINVAL;
+
req->flags |= REQ_F_APOLL_MULTISHOT;
return 0;
}
@@ -542,6 +564,9 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
{
if (!force && !io_cold_defs[req->opcode].prep_async)
return 0;
+ /* opcode type doesn't need async data */
+ if (!io_cold_defs[req->opcode].async_size)
+ return 0;
if (!req_has_async_data(req)) {
struct io_async_rw *iorw;
@@ -887,6 +912,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
unsigned int cflags = 0;
int ret;
@@ -903,7 +929,12 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* handling arm it.
*/
if (ret == -EAGAIN) {
- io_kbuf_recycle(req, issue_flags);
+ /*
+ * Reset rw->len to 0 again to avoid clamping future mshot
+ * reads, in case the buffer size varies.
+ */
+ if (io_kbuf_recycle(req, issue_flags))
+ rw->len = 0;
return -EAGAIN;
}
@@ -916,6 +947,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* jump to the termination path. This request is then done.
*/
cflags = io_put_kbuf(req, issue_flags);
+ rw->len = 0; /* similarly to above, reset len to 0 */
if (io_fill_cqe_req_aux(req,
issue_flags & IO_URING_F_COMPLETE_DEFER,
diff --git a/io_uring/rw.h b/io_uring/rw.h
index c5aed03d42a4..f9e89b4fe4da 100644
--- a/io_uring/rw.h
+++ b/io_uring/rw.h
@@ -16,6 +16,8 @@ struct io_async_rw {
};
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_read(struct io_kiocb *req, unsigned int issue_flags);
int io_readv_prep_async(struct io_kiocb *req);
int io_write(struct io_kiocb *req, unsigned int issue_flags);