summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-07-18 11:32:52 +0300
committerJens Axboe <axboe@kernel.dk>2020-07-24 22:00:46 +0300
commit5af1d13e8f0d8839db04a71ec786f369b0e67234 (patch)
tree23bced79fc69e24855e922ab3efacdf0b0ee8e19 /fs/io_uring.c
parentdd6f843a9fca8f225c86fee5f50da429c369c045 (diff)
downloadlinux-5af1d13e8f0d8839db04a71ec786f369b0e67234.tar.xz
io_uring: batch put_task_struct()
As every iopoll request have a task ref, it becomes expensive to put them one by one, instead we can put several at once integrating that into io_req_free_batch(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 680b16f71a03..3a415d924b93 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1544,7 +1544,6 @@ static void io_dismantle_req(struct io_kiocb *req)
kfree(req->io);
if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
- __io_put_req_task(req);
io_req_clean_work(req);
if (req->flags & REQ_F_INFLIGHT) {
@@ -1564,6 +1563,7 @@ static void __io_free_req(struct io_kiocb *req)
struct io_ring_ctx *ctx;
io_dismantle_req(req);
+ __io_put_req_task(req);
ctx = req->ctx;
if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req);
@@ -1807,8 +1807,18 @@ static void io_free_req(struct io_kiocb *req)
struct req_batch {
void *reqs[IO_IOPOLL_BATCH];
int to_free;
+
+ struct task_struct *task;
+ int task_refs;
};
+static inline void io_init_req_batch(struct req_batch *rb)
+{
+ rb->to_free = 0;
+ rb->task_refs = 0;
+ rb->task = NULL;
+}
+
static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
struct req_batch *rb)
{
@@ -1822,6 +1832,10 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
{
if (rb->to_free)
__io_req_free_batch_flush(ctx, rb);
+ if (rb->task) {
+ put_task_struct_many(rb->task, rb->task_refs);
+ rb->task = NULL;
+ }
}
static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
@@ -1833,6 +1847,17 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
if (req->flags & REQ_F_LINK_HEAD)
io_queue_next(req);
+ if (req->flags & REQ_F_TASK_PINNED) {
+ if (req->task != rb->task) {
+ if (rb->task)
+ put_task_struct_many(rb->task, rb->task_refs);
+ rb->task = req->task;
+ rb->task_refs = 0;
+ }
+ rb->task_refs++;
+ req->flags &= ~REQ_F_TASK_PINNED;
+ }
+
io_dismantle_req(req);
rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
@@ -1978,7 +2003,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
/* order with ->result store in io_complete_rw_iopoll() */
smp_rmb();
- rb.to_free = 0;
+ io_init_req_batch(&rb);
while (!list_empty(done)) {
int cflags = 0;