summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-30 06:11:33 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-30 06:11:33 +0300
commitc1b7fcf3f6d94c2c3528bf77054bf174a5ef63d7 (patch)
treecc7655b6b42a386436dcdb4d50f89c7ad9bf1db1 /io_uring/io_uring.c
parentadfd671676c922bada16477eb68b5eb5f065addc (diff)
parent644c4a7a721fb90356cdd42219c9928a3c386230 (diff)
downloadlinux-c1b7fcf3f6d94c2c3528bf77054bf174a5ef63d7.tar.xz
Merge tag 'for-6.6/io_uring-2023-08-28' of git://git.kernel.dk/linux
Pull io_uring updates from Jens Axboe: "Fairly quiet round in terms of features, mostly just improvements all over the map for existing code. In detail: - Initial support for socket operations through io_uring. Latter half of this will likely land with the 6.7 kernel, then allowing things like get/setsockopt (Breno) - Cleanup of the cancel code, and then adding support for canceling requests with the opcode as the key (me) - Improvements for the io-wq locking (me) - Fix affinity setting for SQPOLL based io-wq (me) - Remove the io_uring userspace code. These were added initially as copies from liburing, but all of them have since bitrotted and are way out of date at this point. Rather than attempt to keep them in sync, just get rid of them. People will have liburing available anyway for these examples. (Pavel) - Series improving the CQ/SQ ring caching (Pavel) - Misc fixes and cleanups (Pavel, Yue, me)" * tag 'for-6.6/io_uring-2023-08-28' of git://git.kernel.dk/linux: (47 commits) io_uring: move iopoll ctx fields around io_uring: move multishot cqe cache in ctx io_uring: separate task_work/waiting cache line io_uring: banish non-hot data to end of io_ring_ctx io_uring: move non aligned field to the end io_uring: add option to remove SQ indirection io_uring: compact SQ/CQ heads/tails io_uring: force inline io_fill_cqe_req io_uring: merge iopoll and normal completion paths io_uring: reorder cqring_flush and wakeups io_uring: optimise extra io_get_cqe null check io_uring: refactor __io_get_cqe() io_uring: simplify big_cqe handling io_uring: cqe init hardening io_uring: improve cqe !tracing hot path io_uring/rsrc: Annotate struct io_mapped_ubuf with __counted_by io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used io_uring: simplify io_run_task_work_sig return io_uring/rsrc: keep one global dummy_ubuf io_uring: never overflow io_aux_cqe ...
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c225
1 files changed, 123 insertions, 102 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index bb201503f0db..e7675355048d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -147,8 +147,6 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
bool cancel_all);
static void io_queue_sqe(struct io_kiocb *req);
-static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
-static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
struct kmem_cache *req_cachep;
@@ -229,7 +227,6 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res)
static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
{
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
- kasan_poison_object_data(req_cachep, req);
}
static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
@@ -292,13 +289,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
goto err;
if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
goto err;
-
- ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
- if (!ctx->dummy_ubuf)
- goto err;
- /* set invalid range, so io_import_fixed() fails meeting it */
- ctx->dummy_ubuf->ubuf = -1UL;
-
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
0, GFP_KERNEL))
goto err;
@@ -337,7 +327,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
return ctx;
err:
- kfree(ctx->dummy_ubuf);
kfree(ctx->cancel_table.hbs);
kfree(ctx->cancel_table_locked.hbs);
kfree(ctx->io_bl);
@@ -626,7 +615,7 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
static inline void __io_cq_lock(struct io_ring_ctx *ctx)
{
- if (!ctx->task_complete)
+ if (!ctx->lockless_cq)
spin_lock(&ctx->completion_lock);
}
@@ -639,19 +628,14 @@ static inline void io_cq_lock(struct io_ring_ctx *ctx)
static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
{
io_commit_cqring(ctx);
-
- if (ctx->task_complete) {
- /*
- * ->task_complete implies that only current might be waiting
- * for CQEs, and obviously, we currently don't. No one is
- * waiting, wakeups are futile, skip them.
- */
- io_commit_cqring_flush(ctx);
- } else {
- spin_unlock(&ctx->completion_lock);
- io_commit_cqring_flush(ctx);
- io_cqring_wake(ctx);
+ if (!ctx->task_complete) {
+ if (!ctx->lockless_cq)
+ spin_unlock(&ctx->completion_lock);
+ /* IOPOLL rings only need to wake up if it's also SQPOLL */
+ if (!ctx->syscall_iopoll)
+ io_cqring_wake(ctx);
}
+ io_commit_cqring_flush(ctx);
}
static void io_cq_unlock_post(struct io_ring_ctx *ctx)
@@ -659,8 +643,8 @@ static void io_cq_unlock_post(struct io_ring_ctx *ctx)
{
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
- io_commit_cqring_flush(ctx);
io_cqring_wake(ctx);
+ io_commit_cqring_flush(ctx);
}
/* Returns true if there are no backlogged entries after the flush */
@@ -693,10 +677,10 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
io_cq_lock(ctx);
while (!list_empty(&ctx->cq_overflow_list)) {
- struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
+ struct io_uring_cqe *cqe;
struct io_overflow_cqe *ocqe;
- if (!cqe)
+ if (!io_get_cqe_overflow(ctx, &cqe, true))
break;
ocqe = list_first_entry(&ctx->cq_overflow_list,
struct io_overflow_cqe, list);
@@ -815,15 +799,12 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
return true;
}
-bool io_req_cqe_overflow(struct io_kiocb *req)
+void io_req_cqe_overflow(struct io_kiocb *req)
{
- if (!(req->flags & REQ_F_CQE32_INIT)) {
- req->extra1 = 0;
- req->extra2 = 0;
- }
- return io_cqring_event_overflow(req->ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- req->extra1, req->extra2);
+ io_cqring_event_overflow(req->ctx, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ req->big_cqe.extra1, req->big_cqe.extra2);
+ memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
/*
@@ -831,7 +812,7 @@ bool io_req_cqe_overflow(struct io_kiocb *req)
* control dependency is enough as we're using WRITE_ONCE to
* fill the cq entry
*/
-struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
+bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow)
{
struct io_rings *rings = ctx->rings;
unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
@@ -843,7 +824,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
* Force overflow the completion.
*/
if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
- return NULL;
+ return false;
/* userspace may cheat modifying the tail, be safe and do min */
queued = min(__io_cqring_events(ctx), ctx->cq_entries);
@@ -851,7 +832,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
/* we need a contiguous range, limit based on the current array offset */
len = min(free, ctx->cq_entries - off);
if (!len)
- return NULL;
+ return false;
if (ctx->flags & IORING_SETUP_CQE32) {
off <<= 1;
@@ -860,12 +841,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
ctx->cqe_cached = &rings->cqes[off];
ctx->cqe_sentinel = ctx->cqe_cached + len;
-
- ctx->cached_cq_tail++;
- ctx->cqe_cached++;
- if (ctx->flags & IORING_SETUP_CQE32)
- ctx->cqe_cached++;
- return &rings->cqes[off];
+ return true;
}
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
@@ -880,8 +856,7 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
* submission (by quite a lot). Increment the overflow count in
* the ring.
*/
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
+ if (likely(io_get_cqe(ctx, &cqe))) {
trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
WRITE_ONCE(cqe->user_data, user_data);
@@ -905,7 +880,7 @@ static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
lockdep_assert_held(&ctx->uring_lock);
for (i = 0; i < state->cqes_count; i++) {
- struct io_uring_cqe *cqe = &state->cqes[i];
+ struct io_uring_cqe *cqe = &ctx->completion_cqes[i];
if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) {
if (ctx->task_complete) {
@@ -941,19 +916,22 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
}
-bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
- bool allow_overflow)
+/*
+ * A helper for multishot requests posting additional CQEs.
+ * Should only be used from a task_work including IO_URING_F_MULTISHOT.
+ */
+bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags)
{
struct io_ring_ctx *ctx = req->ctx;
u64 user_data = req->cqe.user_data;
struct io_uring_cqe *cqe;
if (!defer)
- return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow);
+ return __io_post_aux_cqe(ctx, user_data, res, cflags, false);
lockdep_assert_held(&ctx->uring_lock);
- if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->submit_state.cqes)) {
+ if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->completion_cqes)) {
__io_cq_lock(ctx);
__io_flush_post_cqes(ctx);
/* no need to flush - flush is deferred */
@@ -964,10 +942,10 @@ bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
* however it's main job is to prevent unbounded posted completions,
* and in that it works just as well.
*/
- if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
+ if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
return false;
- cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++];
+ cqe = &ctx->completion_cqes[ctx->submit_state.cqes_count++];
cqe->user_data = user_data;
cqe->res = res;
cqe->flags = cflags;
@@ -980,8 +958,10 @@ static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
struct io_rsrc_node *rsrc_node = NULL;
io_cq_lock(ctx);
- if (!(req->flags & REQ_F_CQE_SKIP))
- io_fill_cqe_req(ctx, req);
+ if (!(req->flags & REQ_F_CQE_SKIP)) {
+ if (!io_fill_cqe_req(ctx, req))
+ io_req_cqe_overflow(req);
+ }
/*
* If we're the last reference to this request, add to our locked
@@ -999,8 +979,7 @@ static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
io_put_kbuf_comp(req);
if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
io_clean_op(req);
- if (!(req->flags & REQ_F_FIXED_FILE))
- io_put_file(req->file);
+ io_put_file(req);
rsrc_node = req->rsrc_node;
/*
@@ -1062,7 +1041,8 @@ static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
req->link = NULL;
req->async_data = NULL;
/* not necessary, but safer to zero */
- req->cqe.res = 0;
+ memset(&req->cqe, 0, sizeof(req->cqe));
+ memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
@@ -1507,7 +1487,8 @@ void io_queue_next(struct io_kiocb *req)
io_req_task_queue(nxt);
}
-void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
+static void io_free_batch_list(struct io_ring_ctx *ctx,
+ struct io_wq_work_node *node)
__must_hold(&ctx->uring_lock)
{
do {
@@ -1534,8 +1515,7 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
io_clean_op(req);
}
- if (!(req->flags & REQ_F_FIXED_FILE))
- io_put_file(req->file);
+ io_put_file(req);
io_req_put_rsrc_locked(req, ctx);
@@ -1545,7 +1525,7 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
} while (node);
}
-static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
+void __io_submit_flush_completions(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
struct io_submit_state *state = &ctx->submit_state;
@@ -1560,7 +1540,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
comp_list);
if (!(req->flags & REQ_F_CQE_SKIP) &&
- unlikely(!__io_fill_cqe_req(ctx, req))) {
+ unlikely(!io_fill_cqe_req(ctx, req))) {
if (ctx->task_complete) {
spin_lock(&ctx->completion_lock);
io_req_cqe_overflow(req);
@@ -1616,7 +1596,6 @@ static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
{
unsigned int nr_events = 0;
- int ret = 0;
unsigned long check_cq;
if (!io_allowed_run_tw(ctx))
@@ -1642,6 +1621,8 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
return 0;
do {
+ int ret = 0;
+
/*
* If a submit got punted to a workqueue, we can have the
* application entering polling for a command before it gets
@@ -1670,13 +1651,18 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
break;
}
ret = io_do_iopoll(ctx, !min);
- if (ret < 0)
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (task_sigpending(current))
+ return -EINTR;
+ if (need_resched())
break;
+
nr_events += ret;
- ret = 0;
- } while (nr_events < min && !need_resched());
+ } while (nr_events < min);
- return ret;
+ return 0;
}
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
@@ -2361,8 +2347,21 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
*/
static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
{
- unsigned head, mask = ctx->sq_entries - 1;
- unsigned sq_idx = ctx->cached_sq_head++ & mask;
+ unsigned mask = ctx->sq_entries - 1;
+ unsigned head = ctx->cached_sq_head++ & mask;
+
+ if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) {
+ head = READ_ONCE(ctx->sq_array[head]);
+ if (unlikely(head >= ctx->sq_entries)) {
+ /* drop invalid entries */
+ spin_lock(&ctx->completion_lock);
+ ctx->cq_extra--;
+ spin_unlock(&ctx->completion_lock);
+ WRITE_ONCE(ctx->rings->sq_dropped,
+ READ_ONCE(ctx->rings->sq_dropped) + 1);
+ return false;
+ }
+ }
/*
* The cached sq head (or cq tail) serves two purposes:
@@ -2372,20 +2371,12 @@ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
* 2) allows the kernel side to track the head on its own, even
* though the application is the one updating it.
*/
- head = READ_ONCE(ctx->sq_array[sq_idx]);
- if (likely(head < ctx->sq_entries)) {
- /* double index for 128-byte SQEs, twice as long */
- if (ctx->flags & IORING_SETUP_SQE128)
- head <<= 1;
- *sqe = &ctx->sq_sqes[head];
- return true;
- }
- /* drop invalid entries */
- ctx->cq_extra--;
- WRITE_ONCE(ctx->rings->sq_dropped,
- READ_ONCE(ctx->rings->sq_dropped) + 1);
- return false;
+ /* double index for 128-byte SQEs, twice as long */
+ if (ctx->flags & IORING_SETUP_SQE128)
+ head <<= 1;
+ *sqe = &ctx->sq_sqes[head];
+ return true;
}
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
@@ -2484,10 +2475,10 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
if (!llist_empty(&ctx->work_llist)) {
__set_current_state(TASK_RUNNING);
if (io_run_local_work(ctx) > 0)
- return 1;
+ return 0;
}
if (io_run_task_work() > 0)
- return 1;
+ return 0;
if (task_sigpending(current))
return -EINTR;
return 0;
@@ -2761,6 +2752,12 @@ static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries
return SIZE_MAX;
#endif
+ if (ctx->flags & IORING_SETUP_NO_SQARRAY) {
+ if (sq_offset)
+ *sq_offset = SIZE_MAX;
+ return off;
+ }
+
if (sq_offset)
*sq_offset = off;
@@ -2903,7 +2900,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_wq_put_hash(ctx->hash_map);
kfree(ctx->cancel_table.hbs);
kfree(ctx->cancel_table_locked.hbs);
- kfree(ctx->dummy_ubuf);
kfree(ctx->io_bl);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
@@ -3733,7 +3729,8 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
return PTR_ERR(rings);
ctx->rings = rings;
- ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
+ if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
+ ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
rings->sq_ring_mask = p->sq_entries - 1;
rings->cq_ring_mask = p->cq_entries - 1;
rings->sq_ring_entries = p->sq_entries;
@@ -3862,6 +3859,9 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
!(ctx->flags & IORING_SETUP_SQPOLL))
ctx->task_complete = true;
+ if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL))
+ ctx->lockless_cq = true;
+
/*
* lazy poll_wq activation relies on ->task_complete for synchronisation
* purposes, see io_activate_pollwq()
@@ -3941,7 +3941,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
p->sq_off.flags = offsetof(struct io_rings, sq_flags);
p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
- p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
+ if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
+ p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
p->sq_off.resv1 = 0;
if (!(ctx->flags & IORING_SETUP_NO_MMAP))
p->sq_off.user_addr = 0;
@@ -4030,7 +4031,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN |
- IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY))
+ IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY |
+ IORING_SETUP_NO_SQARRAY))
return -EINVAL;
return io_uring_create(entries, &p, params);
@@ -4193,16 +4195,28 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
return 0;
}
+static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
+ cpumask_var_t new_mask)
+{
+ int ret;
+
+ if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+ ret = io_wq_cpu_affinity(current->io_uring, new_mask);
+ } else {
+ mutex_unlock(&ctx->uring_lock);
+ ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
+ mutex_lock(&ctx->uring_lock);
+ }
+
+ return ret;
+}
+
static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
void __user *arg, unsigned len)
{
- struct io_uring_task *tctx = current->io_uring;
cpumask_var_t new_mask;
int ret;
- if (!tctx || !tctx->io_wq)
- return -EINVAL;
-
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
@@ -4223,19 +4237,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
return -EFAULT;
}
- ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
+ ret = __io_register_iowq_aff(ctx, new_mask);
free_cpumask_var(new_mask);
return ret;
}
static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
{
- struct io_uring_task *tctx = current->io_uring;
-
- if (!tctx || !tctx->io_wq)
- return -EINVAL;
-
- return io_wq_cpu_affinity(tctx->io_wq, NULL);
+ return __io_register_iowq_aff(ctx, NULL);
}
static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
@@ -4611,8 +4620,20 @@ static int __init io_uring_init(void)
io_uring_optable_init();
- req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
- SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
+ /*
+ * Allow user copy in the per-command field, which starts after the
+ * file in io_kiocb and until the opcode field. The openat2 handling
+ * requires copying in user memory into the io_kiocb object in that
+ * range, and HARDENED_USERCOPY will complain if we haven't
+ * correctly annotated this range.
+ */
+ req_cachep = kmem_cache_create_usercopy("io_kiocb",
+ sizeof(struct io_kiocb), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+ SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU,
+ offsetof(struct io_kiocb, cmd.data),
+ sizeof_field(struct io_kiocb, cmd.data), NULL);
+
return 0;
};
__initcall(io_uring_init);