sched/fair: Rework sched_use_asym_prio() and sched_asym_prefer()
sched_use_asym_prio() and sched_asym_prefer() are used together in various places. Consolidate them into a single function sched_asym(). The existing sched_asym() function is only used when collecting statistics of a scheduling group. Rename it as sched_group_asym(), and remove the obsolete function description. This makes the code easier to read. No functional changes. Signed-off-by: Alex Shi <alexs@kernel.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Tested-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Reviewed-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lore.kernel.org/r/20240210113924.1130448-3-alexs@kernel.org
This commit is contained in:
parent
5a64983731
commit
45de206234
|
@ -9750,8 +9750,18 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
|
|||
return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
|
||||
}
|
||||
|
||||
static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu)
|
||||
{
|
||||
/*
|
||||
* First check if @dst_cpu can do asym_packing load balance. Only do it
|
||||
* if it has higher priority than @src_cpu.
|
||||
*/
|
||||
return sched_use_asym_prio(sd, dst_cpu) &&
|
||||
sched_asym_prefer(dst_cpu, src_cpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* sched_asym - Check if the destination CPU can do asym_packing load balance
|
||||
* sched_group_asym - Check if the destination CPU can do asym_packing balance
|
||||
* @env: The load balancing environment
|
||||
* @sgs: Load-balancing statistics of the candidate busiest group
|
||||
* @group: The candidate busiest group
|
||||
|
@ -9759,34 +9769,21 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
|
|||
* @env::dst_cpu can do asym_packing if it has higher priority than the
|
||||
* preferred CPU of @group.
|
||||
*
|
||||
* SMT is a special case. If we are balancing load between cores, @env::dst_cpu
|
||||
* can do asym_packing balance only if all its SMT siblings are idle. Also, it
|
||||
* can only do it if @group is an SMT group and has exactly on busy CPU. Larger
|
||||
* imbalances in the number of CPUS are dealt with in find_busiest_group().
|
||||
*
|
||||
* If we are balancing load within an SMT core, or at PKG domain level, always
|
||||
* proceed.
|
||||
*
|
||||
* Return: true if @env::dst_cpu can do with asym_packing load balance. False
|
||||
* otherwise.
|
||||
*/
|
||||
static inline bool
|
||||
sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
|
||||
sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
|
||||
{
|
||||
/* Ensure that the whole local core is idle, if applicable. */
|
||||
if (!sched_use_asym_prio(env->sd, env->dst_cpu))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* CPU priorities does not make sense for SMT cores with more than one
|
||||
* CPU priorities do not make sense for SMT cores with more than one
|
||||
* busy sibling.
|
||||
*/
|
||||
if (group->flags & SD_SHARE_CPUCAPACITY) {
|
||||
if (sgs->group_weight - sgs->idle_cpus != 1)
|
||||
return false;
|
||||
}
|
||||
if ((group->flags & SD_SHARE_CPUCAPACITY) &&
|
||||
(sgs->group_weight - sgs->idle_cpus != 1))
|
||||
return false;
|
||||
|
||||
return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
|
||||
return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
|
||||
}
|
||||
|
||||
/* One group has more than one SMT CPU while the other group does not */
|
||||
|
@ -9942,7 +9939,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|||
/* Check if dst CPU is idle and preferred to this group */
|
||||
if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
|
||||
env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
|
||||
sched_asym(env, sgs, group)) {
|
||||
sched_group_asym(env, sgs, group)) {
|
||||
sgs->group_asym_packing = 1;
|
||||
}
|
||||
|
||||
|
@ -11028,8 +11025,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
|
|||
* SMT cores with more than one busy sibling.
|
||||
*/
|
||||
if ((env->sd->flags & SD_ASYM_PACKING) &&
|
||||
sched_use_asym_prio(env->sd, i) &&
|
||||
sched_asym_prefer(i, env->dst_cpu) &&
|
||||
sched_asym(env->sd, i, env->dst_cpu) &&
|
||||
nr_running == 1)
|
||||
continue;
|
||||
|
||||
|
@ -11899,8 +11895,7 @@ static void nohz_balancer_kick(struct rq *rq)
|
|||
* preferred CPU must be idle.
|
||||
*/
|
||||
for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
|
||||
if (sched_use_asym_prio(sd, i) &&
|
||||
sched_asym_prefer(i, cpu)) {
|
||||
if (sched_asym(sd, i, cpu)) {
|
||||
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
|
||||
goto unlock;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue