io_uring: refactor DEFER_TASKRUN multishot checks
Commit e0e4ab52d1
upstream.
We disallow DEFER_TASKRUN multishots from running by io-wq, which is
checked by individual opcodes in the issue path. We can consolidate all
it in io_wq_submit_work() at the same time moving the checks out of the
hot path.
Suggested-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e492f0f11588bb5aa11d7d24e6f53b7c7628afdb.1709905727.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
fba8ca3e6f
commit
f0194e4a9e
|
@ -949,6 +949,8 @@ bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags)
|
|||
u64 user_data = req->cqe.user_data;
|
||||
struct io_uring_cqe *cqe;
|
||||
|
||||
lockdep_assert(!io_wq_current_is_worker());
|
||||
|
||||
if (!defer)
|
||||
return __io_post_aux_cqe(ctx, user_data, res, cflags, false);
|
||||
|
||||
|
@ -1950,6 +1952,24 @@ fail:
|
|||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
|
||||
* submitter task context. Final request completions are handed to the
|
||||
* right context, however this is not the case of auxiliary CQEs,
|
||||
* which is the main mean of operation for multishot requests.
|
||||
* Don't allow any multishot execution from io-wq. It's more restrictive
|
||||
* than necessary and also cleaner.
|
||||
*/
|
||||
if (req->flags & REQ_F_APOLL_MULTISHOT) {
|
||||
err = -EBADFD;
|
||||
if (!file_can_poll(req->file))
|
||||
goto fail;
|
||||
err = -ECANCELED;
|
||||
if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
|
||||
goto fail;
|
||||
return;
|
||||
}
|
||||
|
||||
if (req->flags & REQ_F_FORCE_ASYNC) {
|
||||
bool opcode_poll = def->pollin || def->pollout;
|
||||
|
||||
|
|
|
@ -78,19 +78,6 @@ struct io_sr_msg {
|
|||
*/
|
||||
#define MULTISHOT_MAX_RETRY 32
|
||||
|
||||
static inline bool io_check_multishot(struct io_kiocb *req,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
/*
|
||||
* When ->locked_cq is set we only allow to post CQEs from the original
|
||||
* task context. Usual request completions will be handled in other
|
||||
* generic paths but multipoll may decide to post extra cqes.
|
||||
*/
|
||||
return !(issue_flags & IO_URING_F_IOWQ) ||
|
||||
!(req->flags & REQ_F_APOLL_MULTISHOT) ||
|
||||
!req->ctx->task_complete;
|
||||
}
|
||||
|
||||
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
|
||||
|
@ -837,9 +824,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
|
|||
(sr->flags & IORING_RECVSEND_POLL_FIRST))
|
||||
return io_setup_async_msg(req, kmsg, issue_flags);
|
||||
|
||||
if (!io_check_multishot(req, issue_flags))
|
||||
return io_setup_async_msg(req, kmsg, issue_flags);
|
||||
|
||||
retry_multishot:
|
||||
if (io_do_buffer_select(req)) {
|
||||
void __user *buf;
|
||||
|
@ -935,9 +919,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
|||
(sr->flags & IORING_RECVSEND_POLL_FIRST))
|
||||
return -EAGAIN;
|
||||
|
||||
if (!io_check_multishot(req, issue_flags))
|
||||
return -EAGAIN;
|
||||
|
||||
sock = sock_from_file(req->file);
|
||||
if (unlikely(!sock))
|
||||
return -ENOTSOCK;
|
||||
|
@ -1386,8 +1367,6 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
|
|||
struct file *file;
|
||||
int ret, fd;
|
||||
|
||||
if (!io_check_multishot(req, issue_flags))
|
||||
return -EAGAIN;
|
||||
retry:
|
||||
if (!fixed) {
|
||||
fd = __get_unused_fd_flags(accept->flags, accept->nofile);
|
||||
|
|
|
@ -932,8 +932,6 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
|
|||
*/
|
||||
if (!file_can_poll(req->file))
|
||||
return -EBADFD;
|
||||
if (issue_flags & IO_URING_F_IOWQ)
|
||||
return -EAGAIN;
|
||||
|
||||
ret = __io_read(req, issue_flags);
|
||||
|
||||
|
|
Loading…
Reference in New Issue