io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used

From: Jens Axboe <axboe@kernel.dk>

[ upstream commit ebdfefc09c ]

If we setup the ring with SQPOLL, then that polling thread has its
own io-wq setup. This means that if the application uses
IORING_REGISTER_IOWQ_AFF to set the io-wq affinity, we should not be
setting it for the invoking task, but rather the sqpoll task.

Add an sqpoll helper that parks the thread and updates the affinity,
and use that one if we're using SQPOLL.

Fixes: fe76421d1d ("io_uring: allow user configurable IO thread CPU affinity")
Cc: stable@vger.kernel.org # 5.10+
Link: https://github.com/axboe/liburing/discussions/884
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Pavel Begunkov 2023-09-12 14:57:07 +01:00 committed by Greg Kroah-Hartman
parent 605d055452
commit 9704cfcf1f
5 changed files with 40 additions and 14 deletions

View File

@ -1350,13 +1350,16 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
return __io_wq_cpu_online(wq, cpu, false);
}
int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
{
int i;
if (!tctx || !tctx->io_wq)
return -EINVAL;
rcu_read_lock();
for_each_node(i) {
struct io_wqe *wqe = wq->wqes[i];
struct io_wqe *wqe = tctx->io_wq->wqes[i];
if (mask)
cpumask_copy(wqe->cpu_mask, mask);

View File

@ -50,7 +50,7 @@ void io_wq_put_and_exit(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_hash_work(struct io_wq_work *work, void *val);
int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
int io_wq_max_workers(struct io_wq *wq, int *new_count);
bool io_wq_worker_stopped(void);

View File

@ -3835,16 +3835,28 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
return 0;
}
static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
cpumask_var_t new_mask)
{
int ret;
if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
ret = io_wq_cpu_affinity(current->io_uring, new_mask);
} else {
mutex_unlock(&ctx->uring_lock);
ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
mutex_lock(&ctx->uring_lock);
}
return ret;
}
static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
void __user *arg, unsigned len)
{
struct io_uring_task *tctx = current->io_uring;
cpumask_var_t new_mask;
int ret;
if (!tctx || !tctx->io_wq)
return -EINVAL;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
@ -3865,19 +3877,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
return -EFAULT;
}
ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
ret = __io_register_iowq_aff(ctx, new_mask);
free_cpumask_var(new_mask);
return ret;
}
static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx = current->io_uring;
if (!tctx || !tctx->io_wq)
return -EINVAL;
return io_wq_cpu_affinity(tctx->io_wq, NULL);
return __io_register_iowq_aff(ctx, NULL);
}
static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,

View File

@ -423,3 +423,18 @@ err:
io_sq_thread_finish(ctx);
return ret;
}
__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
cpumask_var_t mask)
{
struct io_sq_data *sqd = ctx->sq_data;
int ret = -EINVAL;
if (sqd) {
io_sq_thread_park(sqd);
ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
io_sq_thread_unpark(sqd);
}
return ret;
}

View File

@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data *sqd);
void io_sq_thread_unpark(struct io_sq_data *sqd);
void io_put_sq_data(struct io_sq_data *sqd);
int io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);