mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-13 22:25:03 +00:00
sched: clean up calc_weighted()
clean up calc_weighted() - we always use the normalized shift so it's not needed to pass that in. Also, push the non-nice0 branch into the function. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
1091985b48
commit
08e2388aa1
1 changed files with 8 additions and 23 deletions
|
@ -397,27 +397,16 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
|
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We calculate fair deltas here, so protect against the random effects
|
|
||||||
* of a multiplication overflow by capping it to the runtime limit:
|
|
||||||
*/
|
|
||||||
#if BITS_PER_LONG == 32
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
calc_weighted(unsigned long delta, unsigned long weight, int shift)
|
calc_weighted(unsigned long delta, struct sched_entity *se)
|
||||||
{
|
{
|
||||||
u64 tmp = (u64)delta * weight >> shift;
|
unsigned long weight = se->load.weight;
|
||||||
|
|
||||||
if (unlikely(tmp > sysctl_sched_runtime_limit*2))
|
if (unlikely(weight != NICE_0_LOAD))
|
||||||
return sysctl_sched_runtime_limit*2;
|
return (u64)delta * se->load.weight >> NICE_0_SHIFT;
|
||||||
return tmp;
|
else
|
||||||
|
return delta;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static inline unsigned long
|
|
||||||
calc_weighted(unsigned long delta, unsigned long weight, int shift)
|
|
||||||
{
|
|
||||||
return delta * weight >> shift;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Task is being enqueued - update stats:
|
* Task is being enqueued - update stats:
|
||||||
|
@ -469,9 +458,7 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||||
schedstat_set(se->wait_max, max(se->wait_max,
|
schedstat_set(se->wait_max, max(se->wait_max,
|
||||||
rq_of(cfs_rq)->clock - se->wait_start));
|
rq_of(cfs_rq)->clock - se->wait_start));
|
||||||
|
|
||||||
if (unlikely(se->load.weight != NICE_0_LOAD))
|
delta_fair = calc_weighted(delta_fair, se);
|
||||||
delta_fair = calc_weighted(delta_fair, se->load.weight,
|
|
||||||
NICE_0_SHIFT);
|
|
||||||
|
|
||||||
add_wait_runtime(cfs_rq, se, delta_fair);
|
add_wait_runtime(cfs_rq, se, delta_fair);
|
||||||
}
|
}
|
||||||
|
@ -554,9 +541,7 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||||
delta_fair = div64_likely32((u64)delta_fair * load,
|
delta_fair = div64_likely32((u64)delta_fair * load,
|
||||||
load + se->load.weight);
|
load + se->load.weight);
|
||||||
|
|
||||||
if (unlikely(se->load.weight != NICE_0_LOAD))
|
delta_fair = calc_weighted(delta_fair, se);
|
||||||
delta_fair = calc_weighted(delta_fair, se->load.weight,
|
|
||||||
NICE_0_SHIFT);
|
|
||||||
|
|
||||||
prev_runtime = se->wait_runtime;
|
prev_runtime = se->wait_runtime;
|
||||||
__add_wait_runtime(cfs_rq, se, delta_fair);
|
__add_wait_runtime(cfs_rq, se, delta_fair);
|
||||||
|
|
Loading…
Reference in a new issue