diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-08-20 20:03:11 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-08-29 09:30:27 +0300 |
commit | 7fe55f17deee0def02df3f604c13d78e94db6e04 (patch) | |
tree | 1a526acd11e3eb458f337fd4f1bd09e689171eab | |
parent | 2b2c2647a19ce1a6a67a9765dfef9f7f3d11d71e (diff) | |
download | linux-7fe55f17deee0def02df3f604c13d78e94db6e04.tar.xz |
io_uring: don't enter poll loop if we have CQEs pending
[ Upstream commit a3a0e43fd77013819e4b6f55e37e0efe8e35d805 ]
We need to check if we have CQEs pending before starting a poll loop,
as those could be the events we will be spinning for (and hence we'll
find none). This can happen if a CQE triggers an error, or if it is
found by eg an IRQ before we get a chance to find it through polling.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r-- | fs/io_uring.c | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 5bb01d84f38d..83e3cede1122 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -618,6 +618,13 @@ static void io_put_req(struct io_kiocb *req) io_free_req(req); } +static unsigned io_cqring_events(struct io_cq_ring *ring) +{ + /* See comment at the top of this file */ + smp_rmb(); + return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); +} + /* * Find and free completed poll iocbs */ @@ -757,6 +764,14 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, int tmin = 0; /* + * Don't enter poll loop if we already have events pending. + * If we do, we can potentially be spinning for commands that + * already triggered a CQE (eg in error). + */ + if (io_cqring_events(ctx->cq_ring)) + break; + + /* * If a submit got punted to a workqueue, we can have the * application entering polling for a command before it gets * issued. That app will hold the uring_lock for the duration @@ -2232,13 +2247,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit) return submit; } -static unsigned io_cqring_events(struct io_cq_ring *ring) -{ - /* See comment at the top of this file */ - smp_rmb(); - return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); -} - /* * Wait until events become available, if we don't already have some. The * application must reap them itself, as they reside on the shared cq ring. |