summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c4
-rw-r--r--io_uring/net.c14
-rw-r--r--io_uring/rw.c10
-rw-r--r--io_uring/xattr.c8
4 files changed, 18 insertions, 18 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index d367dbe1284f..b521186efa5c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2794,8 +2794,10 @@ static __cold void io_tctx_exit_cb(struct callback_head *cb)
/*
* When @in_idle, we're in cancellation and it's racy to remove the
* node. It'll be removed by the end of cancellation, just ignore it.
+ * tctx can be NULL if the queueing of this task_work raced with
+ * work cancelation off the exec path.
*/
- if (!atomic_read(&tctx->in_idle))
+ if (tctx && !atomic_read(&tctx->in_idle))
io_uring_del_tctx_node((unsigned long)work->ctx);
complete(&work->completion);
}
diff --git a/io_uring/net.c b/io_uring/net.c
index f276f6dd5b09..5229976cb582 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -378,7 +378,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!sock))
return -ENOTSOCK;
- ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
+ ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
if (unlikely(ret))
return ret;
@@ -464,7 +464,7 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
}
} else {
iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
+ ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
&iomsg->free_iov, &iomsg->msg.msg_iter,
false);
if (ret > 0)
@@ -516,7 +516,7 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
}
} else {
iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
+ ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
UIO_FASTIOV, &iomsg->free_iov,
&iomsg->msg.msg_iter, true);
if (ret < 0)
@@ -765,7 +765,7 @@ retry_multishot:
kmsg->fast_iov[0].iov_base = buf;
kmsg->fast_iov[0].iov_len = len;
- iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
+ iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
len);
}
@@ -862,7 +862,7 @@ retry_multishot:
sr->buf = buf;
}
- ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
+ ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
if (unlikely(ret))
goto out_free;
@@ -1108,14 +1108,14 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
return io_setup_async_addr(req, &__address, issue_flags);
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
- ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
+ ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
(u64)(uintptr_t)zc->buf, zc->len);
if (unlikely(ret))
return ret;
msg.sg_from_iter = io_sg_from_iter;
} else {
io_notif_set_extended(zc->notif);
- ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
+ ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
&msg.msg_iter);
if (unlikely(ret))
return ret;
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 1ce065709724..77576835a848 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -554,12 +554,12 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
int io_readv_prep_async(struct io_kiocb *req)
{
- return io_rw_prep_async(req, READ);
+ return io_rw_prep_async(req, ITER_DEST);
}
int io_writev_prep_async(struct io_kiocb *req)
{
- return io_rw_prep_async(req, WRITE);
+ return io_rw_prep_async(req, ITER_SOURCE);
}
/*
@@ -710,7 +710,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
loff_t *ppos;
if (!req_has_async_data(req)) {
- ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
+ ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
} else {
@@ -722,7 +722,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
* buffers, as we dropped the selected one before retry.
*/
if (io_do_buffer_select(req)) {
- ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
+ ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
}
@@ -857,7 +857,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
loff_t *ppos;
if (!req_has_async_data(req)) {
- ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
+ ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
} else {
diff --git a/io_uring/xattr.c b/io_uring/xattr.c
index 99df641594d7..6201a9f442c6 100644
--- a/io_uring/xattr.c
+++ b/io_uring/xattr.c
@@ -112,7 +112,7 @@ int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
- ret = do_getxattr(mnt_user_ns(req->file->f_path.mnt),
+ ret = do_getxattr(mnt_idmap(req->file->f_path.mnt),
req->file->f_path.dentry,
&ix->ctx);
@@ -133,9 +133,7 @@ int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
retry:
ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
if (!ret) {
- ret = do_getxattr(mnt_user_ns(path.mnt),
- path.dentry,
- &ix->ctx);
+ ret = do_getxattr(mnt_idmap(path.mnt), path.dentry, &ix->ctx);
path_put(&path);
if (retry_estale(ret, lookup_flags)) {
@@ -213,7 +211,7 @@ static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags,
ret = mnt_want_write(path->mnt);
if (!ret) {
- ret = do_setxattr(mnt_user_ns(path->mnt), path->dentry, &ix->ctx);
+ ret = do_setxattr(mnt_idmap(path->mnt), path->dentry, &ix->ctx);
mnt_drop_write(path->mnt);
}