summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-06-01 01:22:58 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2024-06-01 01:22:58 +0300
commit6d541d6672eeaf526d67b67b5407f48fe0522c6d (patch)
treec77aacebb645fc3c9a69ddd025504071adcf84ab
parentb050496579632f86ee1ef7e7501906db579f3457 (diff)
parent18414a4a2eabb0281d12d374c92874327e0e3fe3 (diff)
downloadlinux-6d541d6672eeaf526d67b67b5407f48fe0522c6d.tar.xz
Merge tag 'io_uring-6.10-20240530' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: "A couple of minor fixes for issues introduced in the 6.10 merge window: - Ensure that all read/write ops have an appropriate cleanup handler set (Breno) - Regression for applications still doing multiple mmaps even if FEAT_SINGLE_MMAP is set (me) - Move kmsg inquiry setting above any potential failure point, avoiding a spurious NONEMPTY flag setting on early error (me)" * tag 'io_uring-6.10-20240530' of git://git.kernel.dk/linux: io_uring/net: assign kmsg inq/flags before buffer selection io_uring/rw: Free iovec before cleaning async data io_uring: don't attempt to mmap larger than what the user asks for
-rw-r--r--io_uring/memmap.c5
-rw-r--r--io_uring/net.c6
-rw-r--r--io_uring/opdef.c5
3 files changed, 11 insertions, 5 deletions
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index 4785d6af5fee..a0f32a255fd1 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -244,6 +244,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
struct io_ring_ctx *ctx = file->private_data;
size_t sz = vma->vm_end - vma->vm_start;
long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned int npages;
void *ptr;
ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
@@ -253,8 +254,8 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
switch (offset & IORING_OFF_MMAP_MASK) {
case IORING_OFF_SQ_RING:
case IORING_OFF_CQ_RING:
- return io_uring_mmap_pages(ctx, vma, ctx->ring_pages,
- ctx->n_ring_pages);
+ npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
case IORING_OFF_SQES:
return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
ctx->n_sqe_pages);
diff --git a/io_uring/net.c b/io_uring/net.c
index 0a48596429d9..7c98c4d50946 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1127,6 +1127,9 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
flags |= MSG_DONTWAIT;
retry_multishot:
+ kmsg->msg.msg_inq = -1;
+ kmsg->msg.msg_flags = 0;
+
if (io_do_buffer_select(req)) {
ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
if (unlikely(ret))
@@ -1134,9 +1137,6 @@ retry_multishot:
sr->buf = NULL;
}
- kmsg->msg.msg_inq = -1;
- kmsg->msg.msg_flags = 0;
-
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 2de5cca9504e..2e3b7b16effb 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -516,10 +516,12 @@ const struct io_cold_def io_cold_defs[] = {
},
[IORING_OP_READ_FIXED] = {
.name = "READ_FIXED",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_WRITE_FIXED] = {
.name = "WRITE_FIXED",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_POLL_ADD] = {
@@ -582,10 +584,12 @@ const struct io_cold_def io_cold_defs[] = {
},
[IORING_OP_READ] = {
.name = "READ",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_WRITE] = {
.name = "WRITE",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_FADVISE] = {
@@ -692,6 +696,7 @@ const struct io_cold_def io_cold_defs[] = {
},
[IORING_OP_READ_MULTISHOT] = {
.name = "READ_MULTISHOT",
+ .cleanup = io_readv_writev_cleanup,
},
[IORING_OP_WAITID] = {
.name = "WAITID",