From 521223d7c229f83915619f888c99e952f24dc39f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 28 Jan 2024 20:11:55 -0700 Subject: [PATCH] io_uring/cancel: don't default to setting req->work.cancel_seq Just leave it unset by default, avoiding dipping into the last cacheline (which is otherwise untouched) for the fast path of using poll to drive networked traffic. Add a flag that tells us if the sequence is valid or not, and then we can defer actually assigning the flag and sequence until someone runs cancelations. Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 3 +++ io_uring/cancel.c | 3 +-- io_uring/cancel.h | 10 ++++++++++ io_uring/io_uring.c | 1 - io_uring/poll.c | 6 +----- 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 56bf733d3ee6..e19698daae1a 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -463,6 +463,7 @@ enum { REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, REQ_F_POLL_NO_LAZY_BIT, + REQ_F_CANCEL_SEQ_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -535,6 +536,8 @@ enum { REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT), /* don't use lazy poll wake for this request */ REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), + /* cancel sequence is set and valid */ + REQ_F_CANCEL_SEQ = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/io_uring/cancel.c b/io_uring/cancel.c index 8a8b07dfc444..acfcdd7f059a 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -58,9 +58,8 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd) return false; if (cd->flags & IORING_ASYNC_CANCEL_ALL) { check_seq: - if (cd->seq == req->work.cancel_seq) + if (io_cancel_match_sequence(req, cd->seq)) return false; - req->work.cancel_seq = cd->seq; } return true; diff --git a/io_uring/cancel.h b/io_uring/cancel.h index c0a8e7c520b6..76b32e65c03c 100644 --- a/io_uring/cancel.h +++ b/io_uring/cancel.h @@ -25,4 +25,14 @@ void init_hash_table(struct io_hash_table *table, unsigned size); int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg); bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd); +static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence) +{ + if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq) + return true; + + req->flags |= REQ_F_CANCEL_SEQ; + req->work.cancel_seq = sequence; + return false; +} + #endif diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index b8ca907b77eb..fd552b260eef 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -463,7 +463,6 @@ static void io_prep_async_work(struct io_kiocb *req) req->work.list.next = NULL; req->work.flags = 0; - req->work.cancel_seq = atomic_read(&ctx->cancel_seq); if (req->flags & REQ_F_FORCE_ASYNC) req->work.flags |= IO_WQ_WORK_CONCURRENT; diff --git a/io_uring/poll.c b/io_uring/poll.c index 7513afc7b702..c2b0a2d0762b 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -588,10 +588,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, struct io_poll_table *ipt, __poll_t mask, unsigned issue_flags) { - struct io_ring_ctx *ctx = req->ctx; - INIT_HLIST_NODE(&req->hash_node); - req->work.cancel_seq = atomic_read(&ctx->cancel_seq); io_init_poll_iocb(poll, mask); poll->file = req->file; req->apoll_events = poll->events; @@ -818,9 +815,8 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, if (poll_only && req->opcode != IORING_OP_POLL_ADD) continue; if (cd->flags & IORING_ASYNC_CANCEL_ALL) { - if (cd->seq == req->work.cancel_seq) + if (io_cancel_match_sequence(req, cd->seq)) continue; - req->work.cancel_seq = cd->seq; } *out_bucket = hb; return req;