diff --git a/include/linux/sched.h b/include/linux/sched.h index 8abb8aa59664..840f1277492f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1900,6 +1900,7 @@ extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; extern unsigned int sysctl_sched_time_avg; extern unsigned int sysctl_timer_migration; +extern unsigned int sysctl_sched_shares_window; int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index b320753aa6c9..6c84439ce987 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -89,6 +89,13 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL; +/* + * The exponential sliding window over which load is averaged for shares + * distribution. + * (default: 10msec) + */ +unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; + static const struct sched_class fair_sched_class; /************************************************************** @@ -688,7 +695,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED static void update_cfs_load(struct cfs_rq *cfs_rq) { - u64 period = sched_avg_period(); + u64 period = sysctl_sched_shares_window; u64 now, delta; unsigned long load = cfs_rq->load.weight; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3132b25193db..9b520d74f052 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -332,6 +332,13 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "sched_shares_window", + .data = &sysctl_sched_shares_window, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "timer_migration", .data = &sysctl_timer_migration,