summaryrefslogtreecommitdiff
path: root/io_uring/rw.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-11-23 14:33:36 +0300
committerJens Axboe <axboe@kernel.dk>2022-11-23 20:44:00 +0300
commit2ccc92f4effcfa1c51c4fcf1e34d769099d3cad4 (patch)
tree46d7433d8f0ace2fd19ae191c211113f5c40ce7e /io_uring/rw.c
parent6c16fe3c16bdc420719768f7ea97b82bd6303eec (diff)
downloadlinux-2ccc92f4effcfa1c51c4fcf1e34d769099d3cad4.tar.xz
io_uring: add completion locking for iopoll
There are pieces of code that may allow iopoll to race filling cqes, temporarily add spinlocking around posting events. Cc: stable@vger.kernel.org Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/84d86b5c117feda075471c5c9e65208e0dccf5d0.1669203009.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r--io_uring/rw.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 1ce065709724..61c326831949 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -1049,6 +1049,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
else if (!pos)
return 0;
+ spin_lock(&ctx->completion_lock);
prev = start;
wq_list_for_each_resume(pos, prev) {
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
@@ -1063,11 +1064,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
req->cqe.flags = io_put_kbuf(req, 0);
__io_fill_cqe_req(req->ctx, req);
}
-
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
if (unlikely(!nr_events))
return 0;
- io_commit_cqring(ctx);
io_cqring_ev_posted_iopoll(ctx);
pos = start ? start->next : ctx->iopoll_list.first;
wq_list_cut(&ctx->iopoll_list, prev, start);