io_uring: keep timeout in io_wait_queue

Move waiting timeout into io_wait_queue

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e4b48a9e26a3b1cf97c80121e62d4b5ab873d28d.1672916894.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2023-01-05 11:22:29 +00:00 committed by Jens Axboe
parent 46ae7eef44
commit d33a39e577
1 changed files with 14 additions and 14 deletions

View File

@ -2414,6 +2414,7 @@ struct io_wait_queue {
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
unsigned cq_tail; unsigned cq_tail;
unsigned nr_timeouts; unsigned nr_timeouts;
ktime_t timeout;
}; };
static inline bool io_has_work(struct io_ring_ctx *ctx) static inline bool io_has_work(struct io_ring_ctx *ctx)
@ -2466,8 +2467,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
/* when returns >0, the caller should retry */ /* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq, struct io_wait_queue *iowq)
ktime_t *timeout)
{ {
if (unlikely(READ_ONCE(ctx->check_cq))) if (unlikely(READ_ONCE(ctx->check_cq)))
return 1; return 1;
@ -2479,9 +2479,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
return -EINTR; return -EINTR;
if (unlikely(io_should_wake(iowq))) if (unlikely(io_should_wake(iowq)))
return 0; return 0;
if (*timeout == KTIME_MAX) if (iowq->timeout == KTIME_MAX)
schedule(); schedule();
else if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS)) else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
return -ETIME; return -ETIME;
return 0; return 0;
} }
@ -2496,7 +2496,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
{ {
struct io_wait_queue iowq; struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings; struct io_rings *rings = ctx->rings;
ktime_t timeout = KTIME_MAX;
int ret; int ret;
if (!io_allowed_run_tw(ctx)) if (!io_allowed_run_tw(ctx))
@ -2522,20 +2521,21 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret; return ret;
} }
if (uts) {
struct timespec64 ts;
if (get_timespec64(&ts, uts))
return -EFAULT;
timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
}
init_waitqueue_func_entry(&iowq.wq, io_wake_function); init_waitqueue_func_entry(&iowq.wq, io_wake_function);
iowq.wq.private = current; iowq.wq.private = current;
INIT_LIST_HEAD(&iowq.wq.entry); INIT_LIST_HEAD(&iowq.wq.entry);
iowq.ctx = ctx; iowq.ctx = ctx;
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
iowq.timeout = KTIME_MAX;
if (uts) {
struct timespec64 ts;
if (get_timespec64(&ts, uts))
return -EFAULT;
iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
}
trace_io_uring_cqring_wait(ctx, min_events); trace_io_uring_cqring_wait(ctx, min_events);
do { do {
@ -2543,7 +2543,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE); TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, &timeout); ret = io_cqring_wait_schedule(ctx, &iowq);
if (ret < 0) if (ret < 0)
break; break;
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);