From af218122b103900fa33d408aea0c2468791e698c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 1 May 2017 08:51:05 +0200 Subject: [PATCH] sched/topology: Simplify sched_group_mask() usage While writing the comments, it occurred to me that: sg_cpus & sg_mask == sg_mask at least conceptually; the !overlap case sets the all 1s mask. If we correct that we can simplify things and directly use sg_mask. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 7 +++---- kernel/sched/topology.c | 5 +++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f80c825e2b43..1eb32d4513ea 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7996,7 +7996,7 @@ static int active_load_balance_cpu_stop(void *data); static int should_we_balance(struct lb_env *env) { struct sched_group *sg = env->sd->groups; - struct cpumask *sg_cpus, *sg_mask; + struct cpumask *sg_mask; int cpu, balance_cpu = -1; /* @@ -8006,11 +8006,10 @@ static int should_we_balance(struct lb_env *env) if (env->idle == CPU_NEWLY_IDLE) return 1; - sg_cpus = sched_group_cpus(sg); sg_mask = sched_group_mask(sg); /* Try to find first idle cpu */ - for_each_cpu_and(cpu, sg_cpus, env->cpus) { - if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu)) + for_each_cpu_and(cpu, sg_mask, env->cpus) { + if (!idle_cpu(cpu)) continue; balance_cpu = cpu; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index dea1950b42a5..bf53a99eb511 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -85,7 +85,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, group->sgc->id, cpumask_pr_args(sched_group_cpus(group))); - if ((sd->flags & SD_OVERLAP) && !cpumask_full(sched_group_mask(group))) { + if ((sd->flags & SD_OVERLAP) && + !cpumask_equal(sched_group_mask(group), sched_group_cpus(group))) { printk(KERN_CONT " mask=%*pbl", cpumask_pr_args(sched_group_mask(group))); } @@ -505,7 +506,7 @@ enum s_alloc { */ int group_balance_cpu(struct sched_group *sg) { - return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); + return cpumask_first(sched_group_mask(sg)); }