summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-10-12 18:28:46 +0300
committerJens Axboe <axboe@kernel.dk>2021-10-18 23:40:46 +0300
commitb688f11e86c9a22169a0e522530982735d2db19b (patch)
treee8a8c0f3d88d4194576c61993e056075e4029fb6 /fs
parentc234a65392062504acf04afe0ae404cca61a8e1a (diff)
downloadlinux-b688f11e86c9a22169a0e522530982735d2db19b.tar.xz
io_uring: utilize the io batching infrastructure for more efficient polled IO
Wire up using an io_comp_batch for f_op->iopoll(). If the lower stack supports it, we can handle high rates of polled IO more efficiently. This raises the single core efficiency on my system from ~6.1M IOPS to ~6.6M IOPS running a random read workload at depth 128 on two gen2 Optane drives. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index cd77a137f2d8..d4631a55a692 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2458,6 +2458,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
{
struct io_kiocb *req, *tmp;
unsigned int poll_flags = BLK_POLL_NOSLEEP;
+ DEFINE_IO_COMP_BATCH(iob);
LIST_HEAD(done);
/*
@@ -2483,17 +2484,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
if (!list_empty(&done))
break;
- ret = kiocb->ki_filp->f_op->iopoll(kiocb, NULL, poll_flags);
+ ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
if (unlikely(ret < 0))
return ret;
else if (ret)
poll_flags |= BLK_POLL_ONESHOT;
/* iopoll may have completed current req */
- if (READ_ONCE(req->iopoll_completed))
+ if (!rq_list_empty(iob.req_list) ||
+ READ_ONCE(req->iopoll_completed))
list_move_tail(&req->inflight_entry, &done);
}
+ if (!rq_list_empty(iob.req_list))
+ iob.complete(&iob);
if (!list_empty(&done))
io_iopoll_complete(ctx, nr_events, &done);