sched/fair: Remove capacity inversion detection

commit a2e90611b9 upstream.

Remove the capacity inversion detection which is now handled by
util_fits_cpu() returning -1 when we need to continue to look for a
potential CPU with better performance.

This ends up almost reverting patches below except for some comments:
commit da07d2f9c1 ("sched/fair: Fixes for capacity inversion detection")
commit aa69c36f31 ("sched/fair: Consider capacity inversion in util_fits_cpu()")
commit 44c7b80bff ("sched/fair: Detect capacity inversion")

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230201143628.270912-3-vincent.guittot@linaro.org
Signed-off-by: Qais Yousef (Google) <qyousef@layalina.io>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Vincent Guittot 2023-02-01 15:36:28 +01:00 committed by Greg Kroah-Hartman
parent e8acf9971f
commit 8517d73992
2 changed files with 5 additions and 98 deletions

View File

@ -4464,17 +4464,9 @@ static inline int util_fits_cpu(unsigned long util,
*
* For uclamp_max, we can tolerate a drop in performance level as the
* goal is to cap the task. So it's okay if it's getting less.
*
* In case of capacity inversion we should honour the inverted capacity
* for both uclamp_min and uclamp_max all the time.
*/
capacity_orig = cpu_in_capacity_inversion(cpu);
if (capacity_orig) {
capacity_orig_thermal = capacity_orig;
} else {
capacity_orig = capacity_orig_of(cpu);
capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
}
capacity_orig = capacity_orig_of(cpu);
capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
/*
* We want to force a task to fit a cpu as implied by uclamp_max.
@ -8929,82 +8921,16 @@ static unsigned long scale_rt_capacity(int cpu)
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
unsigned long capacity_orig = arch_scale_cpu_capacity(cpu);
unsigned long capacity = scale_rt_capacity(cpu);
struct sched_group *sdg = sd->groups;
struct rq *rq = cpu_rq(cpu);
rq->cpu_capacity_orig = capacity_orig;
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
if (!capacity)
capacity = 1;
rq->cpu_capacity = capacity;
/*
* Detect if the performance domain is in capacity inversion state.
*
* Capacity inversion happens when another perf domain with equal or
* lower capacity_orig_of() ends up having higher capacity than this
* domain after subtracting thermal pressure.
*
* We only take into account thermal pressure in this detection as it's
* the only metric that actually results in *real* reduction of
* capacity due to performance points (OPPs) being dropped/become
* unreachable due to thermal throttling.
*
* We assume:
* * That all cpus in a perf domain have the same capacity_orig
* (same uArch).
* * Thermal pressure will impact all cpus in this perf domain
* equally.
*/
if (sched_energy_enabled()) {
unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
struct perf_domain *pd;
rcu_read_lock();
pd = rcu_dereference(rq->rd->pd);
rq->cpu_capacity_inverted = 0;
for (; pd; pd = pd->next) {
struct cpumask *pd_span = perf_domain_span(pd);
unsigned long pd_cap_orig, pd_cap;
/* We can't be inverted against our own pd */
if (cpumask_test_cpu(cpu_of(rq), pd_span))
continue;
cpu = cpumask_any(pd_span);
pd_cap_orig = arch_scale_cpu_capacity(cpu);
if (capacity_orig < pd_cap_orig)
continue;
/*
* handle the case of multiple perf domains have the
* same capacity_orig but one of them is under higher
* thermal pressure. We record it as capacity
* inversion.
*/
if (capacity_orig == pd_cap_orig) {
pd_cap = pd_cap_orig - thermal_load_avg(cpu_rq(cpu));
if (pd_cap > inv_cap) {
rq->cpu_capacity_inverted = inv_cap;
break;
}
} else if (pd_cap_orig > inv_cap) {
rq->cpu_capacity_inverted = inv_cap;
break;
}
}
rcu_read_unlock();
}
trace_sched_cpu_capacity_tp(rq);
cpu_rq(cpu)->cpu_capacity = capacity;
trace_sched_cpu_capacity_tp(cpu_rq(cpu));
sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = capacity;

View File

@ -1041,7 +1041,6 @@ struct rq {
unsigned long cpu_capacity;
unsigned long cpu_capacity_orig;
unsigned long cpu_capacity_inverted;
struct balance_callback *balance_callback;
@ -2879,24 +2878,6 @@ static inline unsigned long capacity_orig_of(int cpu)
return cpu_rq(cpu)->cpu_capacity_orig;
}
/*
* Returns inverted capacity if the CPU is in capacity inversion state.
* 0 otherwise.
*
* Capacity inversion detection only considers thermal impact where actual
* performance points (OPPs) gets dropped.
*
* Capacity inversion state happens when another performance domain that has
* equal or lower capacity_orig_of() becomes effectively larger than the perf
* domain this CPU belongs to due to thermal pressure throttling it hard.
*
* See comment in update_cpu_capacity().
*/
static inline unsigned long cpu_in_capacity_inversion(int cpu)
{
return cpu_rq(cpu)->cpu_capacity_inverted;
}
/**
* enum cpu_util_type - CPU utilization type
* @FREQUENCY_UTIL: Utilization used to select frequency