summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-06-28 12:52:32 +0300
committerJens Axboe <axboe@kernel.dk>2020-06-28 17:10:17 +0300
commitc3524383333e4ff2f720ab0c02b3a329f72de78b (patch)
treeb18e611e0d79ca224a2ad1494784e59e6c1c5d25 /fs/io_uring.c
parent2757a23e7f6441eabf605ca59eeb88c34071757d (diff)
downloadlinux-c3524383333e4ff2f720ab0c02b3a329f72de78b.tar.xz
io_uring: batch-free linked requests as well
There is no reason to not batch deallocation of linked requests. Take away its next req first and handle it as everything else in io_req_multi_free(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8cb5252269d7..af8d1d64f858 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1728,17 +1728,21 @@ static void io_req_task_queue(struct io_kiocb *req)
wake_up_process(tsk);
}
-static void io_free_req(struct io_kiocb *req)
+static void io_queue_next(struct io_kiocb *req)
{
struct io_kiocb *nxt = NULL;
io_req_find_next(req, &nxt);
- __io_free_req(req);
-
if (nxt)
io_req_task_queue(nxt);
}
+static void io_free_req(struct io_kiocb *req)
+{
+ io_queue_next(req);
+ __io_free_req(req);
+}
+
/*
* Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request.
@@ -1835,16 +1839,19 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}
-static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
+static inline void io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
{
- if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
- return false;
+ if (unlikely(io_is_fallback_req(req))) {
+ io_free_req(req);
+ return;
+ }
+ if (req->flags & REQ_F_LINK_HEAD)
+ io_queue_next(req);
io_dismantle_req(req);
rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
io_free_req_many(req->ctx, rb);
- return true;
}
static int io_put_kbuf(struct io_kiocb *req)
@@ -1910,9 +1917,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
__io_cqring_fill_event(req, req->result, cflags);
(*nr_events)++;
- if (refcount_dec_and_test(&req->refs) &&
- !io_req_multi_free(&rb, req))
- io_free_req(req);
+ if (refcount_dec_and_test(&req->refs))
+ io_req_multi_free(&rb, req);
}
io_commit_cqring(ctx);