io_uring: optimise req->ctx reloads

Don't load req->ctx in advance, it takes an extra register and the field
stays valid even after opcode handlers. It also optimises out req->ctx
load in io_iopoll_req_issued() once it's inlined.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1e45ff671c44be0eb904f2e448a211734893fa0b.1634314022.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-10-15 17:09:11 +01:00 committed by Jens Axboe
parent 607b6fb801
commit 9983028e76

View file

@ -6588,7 +6588,6 @@ static void io_clean_op(struct io_kiocb *req)
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
const struct cred *creds = NULL;
int ret;
@ -6715,7 +6714,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (ret)
return ret;
/* If the op doesn't have a file, we're not polling for it */
if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
io_iopoll_req_issued(req);
return 0;