From 2ad9bd8332ac1bc072cc7d785e7cb09d6ad36f4c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 5 Jan 2023 11:07:30 -0800 Subject: iov: add import_ubuf() Like import_single_range(), but for ITER_UBUF. Signed-off-by: Jens Axboe Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig --- include/linux/uio.h | 1 + lib/iov_iter.c | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/include/linux/uio.h b/include/linux/uio.h index 9f158238edba..73b1d5d1e4f1 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -346,6 +346,7 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec, struct iov_iter *i, bool compat); int import_single_range(int type, void __user *buf, size_t len, struct iovec *iov, struct iov_iter *i); +int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i); static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, void __user *buf, size_t count) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f9a3ff37ecd1..f036760b66e2 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1877,6 +1877,17 @@ int import_single_range(int rw, void __user *buf, size_t len, } EXPORT_SYMBOL(import_single_range); +int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) +{ + if (len > MAX_RW_COUNT) + len = MAX_RW_COUNT; + if (unlikely(!access_ok(buf, len))) + return -EFAULT; + + iov_iter_ubuf(i, rw, buf, len); + return 0; +} + /** * iov_iter_restore() - Restore a &struct iov_iter to the same state as when * iov_iter_save_state() was called. -- cgit v1.2.3 From 4b61152e107a95bc0a73d84072dbd75cb97689e3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 5 Jan 2023 11:07:31 -0800 Subject: io_uring: switch network send/recv to ITER_UBUF This is more efficient than iter_iov. Signed-off-by: Jens Axboe [merged to 6.2] Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig --- io_uring/net.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/io_uring/net.c b/io_uring/net.c index fbc34a7c2743..0fb9dcab20b7 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -181,7 +181,7 @@ static int io_setup_async_msg(struct io_kiocb *req, if (async_msg->msg.msg_name) async_msg->msg.msg_name = &async_msg->addr; /* if were using fast_iov, set it to the new one */ - if (!kmsg->free_iov) { + if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) { size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov; async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx]; } @@ -344,7 +344,6 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) struct sockaddr_storage __address; struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct msghdr msg; - struct iovec iov; struct socket *sock; unsigned flags; int min_ret = 0; @@ -378,7 +377,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(!sock)) return -ENOTSOCK; - ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter); + ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter); if (unlikely(ret)) return ret; @@ -764,10 +763,7 @@ retry_multishot: } } - kmsg->fast_iov[0].iov_base = buf; - kmsg->fast_iov[0].iov_len = len; - iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1, - len); + iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); } flags = sr->msg_flags; @@ -835,7 +831,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct msghdr msg; struct socket *sock; - struct iovec iov; unsigned int cflags; unsigned flags; int ret, min_ret = 0; @@ -863,7 +858,7 @@ retry_multishot: sr->buf = buf; } - ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter); + ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter); if (unlikely(ret)) goto out_free; @@ -1074,7 +1069,6 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) struct sockaddr_storage __address; struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); struct msghdr msg; - struct iovec iov; struct socket *sock; unsigned msg_flags; int ret, min_ret = 0; @@ -1116,8 +1110,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) msg.sg_from_iter = io_sg_from_iter; } else { io_notif_set_extended(zc->notif); - ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov, - &msg.msg_iter); + ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter); if (unlikely(ret)) return ret; ret = io_notif_account_mem(zc->notif, zc->len); -- cgit v1.2.3 From 1e23db450cff5f0410480137041181d1514bda2a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 5 Jan 2023 11:07:32 -0800 Subject: io_uring: use iter_ubuf for single range imports This is more efficient than iter_iov. Signed-off-by: Jens Axboe [merge to 6.2, minor fixes] Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig --- io_uring/rw.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/io_uring/rw.c b/io_uring/rw.c index 8227af2e1c0f..436a2e064df6 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -391,7 +391,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, rw->len = sqe_len; } - ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter); + ret = import_ubuf(ddir, buf, sqe_len, iter); if (ret) return ERR_PTR(ret); return NULL; @@ -450,7 +450,10 @@ static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) struct iovec iovec; ssize_t nr; - if (!iov_iter_is_bvec(iter)) { + if (iter_is_ubuf(iter)) { + iovec.iov_base = iter->ubuf + iter->iov_offset; + iovec.iov_len = iov_iter_count(iter); + } else if (!iov_iter_is_bvec(iter)) { iovec = iov_iter_iovec(iter); } else { iovec.iov_base = u64_to_user_ptr(rw->addr); @@ -495,7 +498,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, io->free_iovec = iovec; io->bytes_done = 0; /* can only be fixed buffers, no need to do anything */ - if (iov_iter_is_bvec(iter)) + if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter)) return; if (!iovec) { unsigned iov_off = 0; -- cgit v1.2.3 From 4397a17c1dc53f436285f372432dd1aea44e7953 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 5 Jan 2023 11:07:33 -0800 Subject: iov_iter: move iter_ubuf check inside restore WARN io_uring is using iter_ubuf types for single vector requests. We expect state restore may happen for this type now, and it is already handled correctly, so suppress the warning. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe Reviewed-by: Christoph Hellwig --- lib/iov_iter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f036760b66e2..d9b3332c8405 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1902,8 +1902,8 @@ int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) */ void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) { - if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && - !iov_iter_is_kvec(i) && !iter_is_ubuf(i)) + if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) && + !iter_is_ubuf(i)) && !iov_iter_is_kvec(i)) return; i->iov_offset = state->iov_offset; i->count = state->count; -- cgit v1.2.3 From d46aa786fa53cbc92593089374e49c94fd9063ae Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 5 Jan 2023 11:07:34 -0800 Subject: block: use iter_ubuf for single range This is more efficient than iter_iov. Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig [axboe: fold in iovec assumption fix] Signed-off-by: Jens Axboe --- block/blk-map.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/block/blk-map.c b/block/blk-map.c index 19940c978c73..f2135e6ee8f6 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -31,7 +31,8 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, return NULL; memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); bmd->iter = *data; - bmd->iter.iov = bmd->iov; + if (iter_is_iovec(data)) + bmd->iter.iov = bmd->iov; return bmd; } @@ -641,7 +642,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, copy = true; else if (iov_iter_is_bvec(iter)) map_bvec = true; - else if (!iter_is_iovec(iter)) + else if (!user_backed_iter(iter)) copy = true; else if (queue_virt_boundary(q)) copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); @@ -682,9 +683,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) { - struct iovec iov; struct iov_iter i; - int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); + int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i); if (unlikely(ret < 0)) return ret; -- cgit v1.2.3