sched/psi: Use task->psi_flags to clear in CPU migration

The commit d583d360a6 ("psi: Fix psi state corruption when schedule()
races with cgroup move") fixed a race problem by making cgroup_move_task()
use task->psi_flags instead of looking at the scheduler state.

We can extend task->psi_flags usage to CPU migration, which should be
a minor optimization for performance and code simplicity.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Link: https://lore.kernel.org/r/20220926081931.45420-1-zhouchengming@bytedance.com
This commit is contained in:
Chengming Zhou 2022-09-26 16:19:31 +08:00 committed by Peter Zijlstra
parent 710ffe671e
commit 52b33d87b9
3 changed files with 5 additions and 22 deletions

View File

@ -888,9 +888,6 @@ struct task_struct {
unsigned sched_reset_on_fork:1; unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1; unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1; unsigned sched_migrated:1;
#ifdef CONFIG_PSI
unsigned sched_psi_wake_requeue:1;
#endif
/* Force alignment to the next boundary: */ /* Force alignment to the next boundary: */
unsigned :0; unsigned :0;

View File

@ -2053,7 +2053,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & ENQUEUE_RESTORE)) { if (!(flags & ENQUEUE_RESTORE)) {
sched_info_enqueue(rq, p); sched_info_enqueue(rq, p);
psi_enqueue(p, flags & ENQUEUE_WAKEUP); psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
} }
uclamp_rq_inc(rq, p); uclamp_rq_inc(rq, p);

View File

@ -128,11 +128,9 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
if (p->in_memstall) if (p->in_memstall)
set |= TSK_MEMSTALL_RUNNING; set |= TSK_MEMSTALL_RUNNING;
if (!wakeup || p->sched_psi_wake_requeue) { if (!wakeup) {
if (p->in_memstall) if (p->in_memstall)
set |= TSK_MEMSTALL; set |= TSK_MEMSTALL;
if (p->sched_psi_wake_requeue)
p->sched_psi_wake_requeue = 0;
} else { } else {
if (p->in_iowait) if (p->in_iowait)
clear |= TSK_IOWAIT; clear |= TSK_IOWAIT;
@ -143,8 +141,6 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
static inline void psi_dequeue(struct task_struct *p, bool sleep) static inline void psi_dequeue(struct task_struct *p, bool sleep)
{ {
int clear = TSK_RUNNING;
if (static_branch_likely(&psi_disabled)) if (static_branch_likely(&psi_disabled))
return; return;
@ -157,10 +153,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
if (sleep) if (sleep)
return; return;
if (p->in_memstall) psi_task_change(p, p->psi_flags, 0);
clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
psi_task_change(p, clear, 0);
} }
static inline void psi_ttwu_dequeue(struct task_struct *p) static inline void psi_ttwu_dequeue(struct task_struct *p)
@ -172,19 +165,12 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
* deregister its sleep-persistent psi states from the old * deregister its sleep-persistent psi states from the old
* queue, and let psi_enqueue() know it has to requeue. * queue, and let psi_enqueue() know it has to requeue.
*/ */
if (unlikely(p->in_iowait || p->in_memstall)) { if (unlikely(p->psi_flags)) {
struct rq_flags rf; struct rq_flags rf;
struct rq *rq; struct rq *rq;
int clear = 0;
if (p->in_iowait)
clear |= TSK_IOWAIT;
if (p->in_memstall)
clear |= TSK_MEMSTALL;
rq = __task_rq_lock(p, &rf); rq = __task_rq_lock(p, &rf);
psi_task_change(p, clear, 0); psi_task_change(p, p->psi_flags, 0);
p->sched_psi_wake_requeue = 1;
__task_rq_unlock(rq, &rf); __task_rq_unlock(rq, &rf);
} }
} }