io_uring: cancel more aggressively in exit_work

While io_ring_exit_work() is running new requests of all sorts may be
issued, so it should do a bit more to cancel them, otherwise they may
just get stuck. e.g. in io-wq, in poll lists, etc.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-01-04 20:43:30 +00:00 committed by Jens Axboe
parent de7f1d9e99
commit 90df08538c

View file

@ -992,6 +992,9 @@ enum io_mem_account {
ACCT_PINNED, ACCT_PINNED,
}; };
static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
struct task_struct *task);
static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node); static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
static struct fixed_file_ref_node *alloc_fixed_file_ref_node( static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
struct io_ring_ctx *ctx); struct io_ring_ctx *ctx);
@ -8675,7 +8678,7 @@ static void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them. * as nobody else will be looking for them.
*/ */
do { do {
io_iopoll_try_reap_events(ctx); __io_uring_cancel_task_requests(ctx, NULL);
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx); io_ring_ctx_free(ctx);
} }
@ -8830,9 +8833,11 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
enum io_wq_cancel cret; enum io_wq_cancel cret;
bool ret = false; bool ret = false;
cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); if (ctx->io_wq) {
if (cret != IO_WQ_CANCEL_NOTFOUND) cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
ret = true; &cancel, true);
ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
}
/* SQPOLL thread does its own polling */ /* SQPOLL thread does its own polling */
if (!(ctx->flags & IORING_SETUP_SQPOLL)) { if (!(ctx->flags & IORING_SETUP_SQPOLL)) {