summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-01-16 07:51:17 +0300
committerJens Axboe <axboe@kernel.dk>2020-01-16 07:51:17 +0300
commit11ba820bf163e224bf5dd44e545a66a44a5b1d7a (patch)
tree6ffcfb94ea666db7b6a2cebefd2ad9a92958f38d /fs
parent797f3f535d59f05ad12c629338beef6cb801d19e (diff)
downloadlinux-11ba820bf163e224bf5dd44e545a66a44a5b1d7a.tar.xz
io_uring: ensure workqueue offload grabs ring mutex for poll list
A previous commit moved the locking for the async sqthread, but didn't take into account that the io-wq workers still need it. We can't use req->in_async for this anymore as both the sqthread and io-wq workers set it, gate the need for locking on io_wq_current_is_worker() instead. Fixes: 8a4955ff1cca ("io_uring: sqthread should grab ctx->uring_lock for submissions") Reported-by: Bijan Mottahedeh <bijan.mottahedeh@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3130ed16456e..52e5764540e4 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3286,10 +3286,19 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return ret;
if (ctx->flags & IORING_SETUP_IOPOLL) {
+ const bool in_async = io_wq_current_is_worker();
+
if (req->result == -EAGAIN)
return -EAGAIN;
+ /* workqueue context doesn't hold uring_lock, grab it now */
+ if (in_async)
+ mutex_lock(&ctx->uring_lock);
+
io_iopoll_req_issued(req);
+
+ if (in_async)
+ mutex_unlock(&ctx->uring_lock);
}
return 0;