mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
sched/core: Disable SD_PREFER_SIBLING on asymmetric CPU capacity domains
The 'prefer sibling' sched_domain flag is intended to encourage spreading tasks to sibling sched_domain to take advantage of more caches and core for SMT systems. It has recently been changed to be on all non-NUMA topology level. However, spreading across domains with CPU capacity asymmetry isn't desirable, e.g. spreading from high capacity to low capacity CPUs even if high capacity CPUs aren't overutilized might give access to more cache but the CPU will be slower and possibly lead to worse overall throughput. To prevent this, we need to remove SD_PREFER_SIBLING on the sched_domain level immediately below SD_ASYM_CPUCAPACITY. Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-13-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
4ad3831a9d
commit
9c63e84db2
1 changed files with 8 additions and 4 deletions
|
@ -1126,7 +1126,7 @@ sd_init(struct sched_domain_topology_level *tl,
|
||||||
| 0*SD_SHARE_CPUCAPACITY
|
| 0*SD_SHARE_CPUCAPACITY
|
||||||
| 0*SD_SHARE_PKG_RESOURCES
|
| 0*SD_SHARE_PKG_RESOURCES
|
||||||
| 0*SD_SERIALIZE
|
| 0*SD_SERIALIZE
|
||||||
| 0*SD_PREFER_SIBLING
|
| 1*SD_PREFER_SIBLING
|
||||||
| 0*SD_NUMA
|
| 0*SD_NUMA
|
||||||
| sd_flags
|
| sd_flags
|
||||||
,
|
,
|
||||||
|
@ -1152,17 +1152,21 @@ sd_init(struct sched_domain_topology_level *tl,
|
||||||
if (sd->flags & SD_ASYM_CPUCAPACITY) {
|
if (sd->flags & SD_ASYM_CPUCAPACITY) {
|
||||||
struct sched_domain *t = sd;
|
struct sched_domain *t = sd;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't attempt to spread across CPUs of different capacities.
|
||||||
|
*/
|
||||||
|
if (sd->child)
|
||||||
|
sd->child->flags &= ~SD_PREFER_SIBLING;
|
||||||
|
|
||||||
for_each_lower_domain(t)
|
for_each_lower_domain(t)
|
||||||
t->flags |= SD_BALANCE_WAKE;
|
t->flags |= SD_BALANCE_WAKE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sd->flags & SD_SHARE_CPUCAPACITY) {
|
if (sd->flags & SD_SHARE_CPUCAPACITY) {
|
||||||
sd->flags |= SD_PREFER_SIBLING;
|
|
||||||
sd->imbalance_pct = 110;
|
sd->imbalance_pct = 110;
|
||||||
sd->smt_gain = 1178; /* ~15% */
|
sd->smt_gain = 1178; /* ~15% */
|
||||||
|
|
||||||
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
|
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
|
||||||
sd->flags |= SD_PREFER_SIBLING;
|
|
||||||
sd->imbalance_pct = 117;
|
sd->imbalance_pct = 117;
|
||||||
sd->cache_nice_tries = 1;
|
sd->cache_nice_tries = 1;
|
||||||
sd->busy_idx = 2;
|
sd->busy_idx = 2;
|
||||||
|
@ -1173,6 +1177,7 @@ sd_init(struct sched_domain_topology_level *tl,
|
||||||
sd->busy_idx = 3;
|
sd->busy_idx = 3;
|
||||||
sd->idle_idx = 2;
|
sd->idle_idx = 2;
|
||||||
|
|
||||||
|
sd->flags &= ~SD_PREFER_SIBLING;
|
||||||
sd->flags |= SD_SERIALIZE;
|
sd->flags |= SD_SERIALIZE;
|
||||||
if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
|
if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
|
||||||
sd->flags &= ~(SD_BALANCE_EXEC |
|
sd->flags &= ~(SD_BALANCE_EXEC |
|
||||||
|
@ -1182,7 +1187,6 @@ sd_init(struct sched_domain_topology_level *tl,
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
sd->flags |= SD_PREFER_SIBLING;
|
|
||||||
sd->cache_nice_tries = 1;
|
sd->cache_nice_tries = 1;
|
||||||
sd->busy_idx = 2;
|
sd->busy_idx = 2;
|
||||||
sd->idle_idx = 1;
|
sd->idle_idx = 1;
|
||||||
|
|
Loading…
Reference in a new issue