mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
sched/fair: Move select_task_rq_fair() slow-path into its own function
In preparation for changes that would otherwise require adding a new level of indentation to the while(sd) loop, create a new function find_idlest_cpu() which contains this loop, and rename the existing find_idlest_cpu() to find_idlest_group_cpu(). Code inside the while(sd) loop is unchanged. @new_cpu is added as a variable in the new function, with the same initial value as the @new_cpu in select_task_rq_fair(). Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Brendan Jackman <brendan.jackman@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Josef Bacik <jbacik@fb.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20171005114516.18617-2-brendan.jackman@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
583ffd99d7
commit
18bd1b4bd5
1 changed files with 48 additions and 35 deletions
|
@ -5859,10 +5859,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
|||
}
|
||||
|
||||
/*
|
||||
* find_idlest_cpu - find the idlest cpu among the cpus in group.
|
||||
* find_idlest_group_cpu - find the idlest cpu among the cpus in group.
|
||||
*/
|
||||
static int
|
||||
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
||||
find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
||||
{
|
||||
unsigned long load, min_load = ULONG_MAX;
|
||||
unsigned int min_exit_latency = UINT_MAX;
|
||||
|
@ -5911,6 +5911,50 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
|||
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
|
||||
}
|
||||
|
||||
static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
|
||||
int cpu, int prev_cpu, int sd_flag)
|
||||
{
|
||||
int new_cpu = prev_cpu;
|
||||
|
||||
while (sd) {
|
||||
struct sched_group *group;
|
||||
struct sched_domain *tmp;
|
||||
int weight;
|
||||
|
||||
if (!(sd->flags & sd_flag)) {
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
group = find_idlest_group(sd, p, cpu, sd_flag);
|
||||
if (!group) {
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
new_cpu = find_idlest_group_cpu(group, p, cpu);
|
||||
if (new_cpu == -1 || new_cpu == cpu) {
|
||||
/* Now try balancing at a lower domain level of cpu */
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Now try balancing at a lower domain level of new_cpu */
|
||||
cpu = new_cpu;
|
||||
weight = sd->span_weight;
|
||||
sd = NULL;
|
||||
for_each_domain(cpu, tmp) {
|
||||
if (weight <= tmp->span_weight)
|
||||
break;
|
||||
if (tmp->flags & sd_flag)
|
||||
sd = tmp;
|
||||
}
|
||||
/* while loop will break here if sd == NULL */
|
||||
}
|
||||
|
||||
return new_cpu;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
|
||||
static inline void set_idle_cores(int cpu, int val)
|
||||
|
@ -6277,39 +6321,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|||
if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
|
||||
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
|
||||
|
||||
} else while (sd) {
|
||||
struct sched_group *group;
|
||||
int weight;
|
||||
|
||||
if (!(sd->flags & sd_flag)) {
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
group = find_idlest_group(sd, p, cpu, sd_flag);
|
||||
if (!group) {
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
new_cpu = find_idlest_cpu(group, p, cpu);
|
||||
if (new_cpu == -1 || new_cpu == cpu) {
|
||||
/* Now try balancing at a lower domain level of cpu */
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Now try balancing at a lower domain level of new_cpu */
|
||||
cpu = new_cpu;
|
||||
weight = sd->span_weight;
|
||||
sd = NULL;
|
||||
for_each_domain(cpu, tmp) {
|
||||
if (weight <= tmp->span_weight)
|
||||
break;
|
||||
if (tmp->flags & sd_flag)
|
||||
sd = tmp;
|
||||
}
|
||||
/* while loop will break here if sd == NULL */
|
||||
} else {
|
||||
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
Loading…
Reference in a new issue