summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-10-07 21:48:53 +0300
committerJens Axboe <axboe@kernel.dk>2020-10-07 21:55:42 +0300
commitfaf7b51c06973f947776af6c8f8a513475a2bfa1 (patch)
tree3953847c6a16afc19872e9b3f94be0a78a3e164a /fs
parent87c4311fd2c28e83545cdfa4702b57db15ed1d9b (diff)
downloadlinux-faf7b51c06973f947776af6c8f8a513475a2bfa1.tar.xz
io_uring: batch account ->req_issue and task struct references
Identical to how we handle the ctx reference counts, increase by the batch we're expecting to submit, and handle any slow path residual, if any. The request alloc-and-issue path is very hot, and this makes a noticeable difference by avoiding an two atomic incs for each individual request. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c409af7bd444..85e8ad9970be 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6429,8 +6429,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
req->task = current;
- get_task_struct(req->task);
- atomic_long_inc(&req->task->io_uring->req_issue);
req->result = 0;
if (unlikely(req->opcode >= IORING_OP_LAST))
@@ -6488,6 +6486,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
if (!percpu_ref_tryget_many(&ctx->refs, nr))
return -EAGAIN;
+ atomic_long_add(nr, &current->io_uring->req_issue);
+ refcount_add(nr, &current->usage);
+
io_submit_state_start(&state, ctx, nr);
for (i = 0; i < nr; i++) {
@@ -6530,6 +6531,8 @@ fail_req:
int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
percpu_ref_put_many(&ctx->refs, nr - ref_used);
+ atomic_long_sub(nr - ref_used, &current->io_uring->req_issue);
+ put_task_struct_many(current, nr - ref_used);
}
if (link)
io_queue_link_head(link, &state.comp);