sched: Simplify tg_set_cfs_bandwidth()

[ Upstream commit 6fb4546061 ]

Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Stable-dep-of: 1aa09b9379 ("powercap: intel_rapl: Fix locking in TPMI RAPL")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Peter Zijlstra 2023-06-09 20:45:16 +02:00 committed by Greg Kroah-Hartman
parent 0641908b90
commit c4c2f7e672
2 changed files with 21 additions and 19 deletions

View File

@ -157,6 +157,8 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
#endif /* !CONFIG_HOTPLUG_CPU */
DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
extern void thaw_secondary_cpus(void);

View File

@ -10868,11 +10868,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
* Prevent race between setting of cfs_rq->runtime_enabled and
* unthrottle_offline_cfs_rqs().
*/
cpus_read_lock();
mutex_lock(&cfs_constraints_mutex);
guard(cpus_read_lock)();
guard(mutex)(&cfs_constraints_mutex);
ret = __cfs_schedulable(tg, period, quota);
if (ret)
goto out_unlock;
return ret;
runtime_enabled = quota != RUNTIME_INF;
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
@ -10882,39 +10883,38 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
*/
if (runtime_enabled && !runtime_was_enabled)
cfs_bandwidth_usage_inc();
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
cfs_b->burst = burst;
__refill_cfs_bandwidth_runtime(cfs_b);
scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
cfs_b->burst = burst;
/* Restart the period timer (if active) to handle new period expiry: */
if (runtime_enabled)
start_cfs_bandwidth(cfs_b);
__refill_cfs_bandwidth_runtime(cfs_b);
raw_spin_unlock_irq(&cfs_b->lock);
/*
* Restart the period timer (if active) to handle new
* period expiry:
*/
if (runtime_enabled)
start_cfs_bandwidth(cfs_b);
}
for_each_online_cpu(i) {
struct cfs_rq *cfs_rq = tg->cfs_rq[i];
struct rq *rq = cfs_rq->rq;
struct rq_flags rf;
rq_lock_irq(rq, &rf);
guard(rq_lock_irq)(rq);
cfs_rq->runtime_enabled = runtime_enabled;
cfs_rq->runtime_remaining = 0;
if (cfs_rq->throttled)
unthrottle_cfs_rq(cfs_rq);
rq_unlock_irq(rq, &rf);
}
if (runtime_was_enabled && !runtime_enabled)
cfs_bandwidth_usage_dec();
out_unlock:
mutex_unlock(&cfs_constraints_mutex);
cpus_read_unlock();
return ret;
return 0;
}
static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)