From a9280514bf1e54775b8d7cd93d87c05c2b5273e6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 11 Sep 2015 16:10:59 +0200 Subject: [PATCH] sched/fair: Make the entity load aging on attaching tunable In case there are problems with the aging on attach, provide a debug knob to turn it off. Signed-off-by: Peter Zijlstra (Intel) Cc: Byungchul Park Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Cc: yuyang.du@intel.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 4 ++++ kernel/sched/features.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5143ea0cb55b..5cd7054aac85 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2712,6 +2712,9 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg) static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { + if (!sched_feat(ATTACH_AGE_LOAD)) + goto skip_aging; + /* * If we got migrated (either between CPUs or between cgroups) we'll * have aged the average right before clearing @last_update_time. @@ -2726,6 +2729,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s */ } +skip_aging: se->avg.last_update_time = cfs_rq->avg.last_update_time; cfs_rq->avg.load_avg += se->avg.load_avg; cfs_rq->avg.load_sum += se->avg.load_sum; diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 83a50e7ca533..e6fd23b7459b 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -73,6 +73,8 @@ SCHED_FEAT(FORCE_SD_OVERLAP, false) SCHED_FEAT(RT_RUNTIME_SHARE, true) SCHED_FEAT(LB_MIN, false) +SCHED_FEAT(ATTACH_AGE_LOAD, true) + /* * Apply the automatic NUMA scheduling policy. Enabled automatically * at runtime if running on a NUMA machine. Can be controlled via