io_uring: add completion locking for iopoll

commit 2ccc92f4ef upstream.

There are pieces of code that may allow iopoll to race filling cqes,
temporarily add spinlocking around posting events.

Cc: stable@vger.kernel.org
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/84d86b5c117feda075471c5c9e65208e0dccf5d0.1669203009.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Pavel Begunkov 2022-11-23 11:33:36 +00:00 committed by Greg Kroah-Hartman
parent 3a6ee45e3c
commit 16225abb3c

View file

@ -1043,6 +1043,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
else if (!pos)
return 0;
spin_lock(&ctx->completion_lock);
prev = start;
wq_list_for_each_resume(pos, prev) {
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
@ -1057,11 +1058,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
req->cqe.flags = io_put_kbuf(req, 0);
__io_fill_cqe_req(req->ctx, req);
}
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
if (unlikely(!nr_events))
return 0;
io_commit_cqring(ctx);
io_cqring_ev_posted_iopoll(ctx);
pos = start ? start->next : ctx->iopoll_list.first;
wq_list_cut(&ctx->iopoll_list, prev, start);