sched/fair: Rename sg_lb_stats::sum_nr_running to sum_h_nr_running

Rename sum_nr_running to sum_h_nr_running because it effectively tracks
cfs->h_nr_running so we can use sum_nr_running to track rq->nr_running
when needed.

There are no functional changes.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Acked-by: Rik van Riel <riel@surriel.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: hdanton@sina.com
Cc: parth@linux.ibm.com
Cc: pauld@redhat.com
Cc: quentin.perret@arm.com
Cc: srikar@linux.vnet.ibm.com
Link: https://lkml.kernel.org/r/1571405198-27570-3-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Vincent Guittot 2019-10-18 15:26:29 +02:00 committed by Ingo Molnar
parent 490ba971d8
commit a349834703
1 changed files with 16 additions and 16 deletions

View File

@ -7660,7 +7660,7 @@ struct sg_lb_stats {
unsigned long load_per_task;
unsigned long group_capacity;
unsigned long group_util; /* Total utilization of the group */
unsigned int sum_nr_running; /* Nr tasks running in the group */
unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
unsigned int idle_cpus;
unsigned int group_weight;
enum group_type group_type;
@ -7705,7 +7705,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
.total_capacity = 0UL,
.busiest_stat = {
.avg_load = 0UL,
.sum_nr_running = 0,
.sum_h_nr_running = 0,
.group_type = group_other,
},
};
@ -7896,7 +7896,7 @@ static inline int sg_imbalanced(struct sched_group *group)
static inline bool
group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running < sgs->group_weight)
if (sgs->sum_h_nr_running < sgs->group_weight)
return true;
if ((sgs->group_capacity * 100) >
@ -7917,7 +7917,7 @@ group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
static inline bool
group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running <= sgs->group_weight)
if (sgs->sum_h_nr_running <= sgs->group_weight)
return false;
if ((sgs->group_capacity * 100) <
@ -8009,7 +8009,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_load += cpu_runnable_load(rq);
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;
sgs->sum_h_nr_running += rq->cfs.h_nr_running;
nr_running = rq->nr_running;
if (nr_running > 1)
@ -8039,8 +8039,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_capacity = group->sgc->capacity;
sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
if (sgs->sum_nr_running)
sgs->load_per_task = sgs->group_load / sgs->sum_nr_running;
if (sgs->sum_h_nr_running)
sgs->load_per_task = sgs->group_load / sgs->sum_h_nr_running;
sgs->group_weight = group->group_weight;
@ -8097,7 +8097,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* capable CPUs may harm throughput. Maximize throughput,
* power/energy consequences are not considered.
*/
if (sgs->sum_nr_running <= sgs->group_weight &&
if (sgs->sum_h_nr_running <= sgs->group_weight &&
group_smaller_min_cpu_capacity(sds->local, sg))
return false;
@ -8128,7 +8128,7 @@ asym_packing:
* perform better since they share less core resources. Hence when we
* have idle threads, we want them to be the higher ones.
*/
if (sgs->sum_nr_running &&
if (sgs->sum_h_nr_running &&
sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
sgs->group_asym_packing = 1;
if (!sds->busiest)
@ -8146,9 +8146,9 @@ asym_packing:
#ifdef CONFIG_NUMA_BALANCING
static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running > sgs->nr_numa_running)
if (sgs->sum_h_nr_running > sgs->nr_numa_running)
return regular;
if (sgs->sum_nr_running > sgs->nr_preferred_running)
if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
return remote;
return all;
}
@ -8223,7 +8223,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
*/
if (prefer_sibling && sds->local &&
group_has_capacity(env, local) &&
(sgs->sum_nr_running > local->sum_nr_running + 1)) {
(sgs->sum_h_nr_running > local->sum_h_nr_running + 1)) {
sgs->group_no_capacity = 1;
sgs->group_type = group_classify(sg, sgs);
}
@ -8235,7 +8235,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
next_group:
/* Now, start updating sd_lb_stats */
sds->total_running += sgs->sum_nr_running;
sds->total_running += sgs->sum_h_nr_running;
sds->total_load += sgs->group_load;
sds->total_capacity += sgs->group_capacity;
@ -8289,7 +8289,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
local = &sds->local_stat;
busiest = &sds->busiest_stat;
if (!local->sum_nr_running)
if (!local->sum_h_nr_running)
local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
else if (busiest->load_per_task > local->load_per_task)
imbn = 1;
@ -8387,7 +8387,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
*/
if (busiest->group_type == group_overloaded &&
local->group_type == group_overloaded) {
load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
load_above_capacity = busiest->sum_h_nr_running * SCHED_CAPACITY_SCALE;
if (load_above_capacity > busiest->group_capacity) {
load_above_capacity -= busiest->group_capacity;
load_above_capacity *= scale_load_down(NICE_0_LOAD);
@ -8468,7 +8468,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
goto force_balance;
/* There is no busy sibling group to pull tasks from */
if (!sds.busiest || busiest->sum_nr_running == 0)
if (!sds.busiest || busiest->sum_h_nr_running == 0)
goto out_balanced;
/* XXX broken for overlapping NUMA groups */