mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
intel_pstate: Add a few comments
Add a few comments in the code which calculates busyness to clarify parts of the algorithm. Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
parent
aa4ea34da9
commit
e0d4c8f808
1 changed files with 31 additions and 1 deletions
|
@ -199,7 +199,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
|
||||||
|
|
||||||
pid->integral += fp_error;
|
pid->integral += fp_error;
|
||||||
|
|
||||||
/* limit the integral term */
|
/*
|
||||||
|
* We limit the integral here so that it will never
|
||||||
|
* get higher than 30. This prevents it from becoming
|
||||||
|
* too large an input over long periods of time and allows
|
||||||
|
* it to get factored out sooner.
|
||||||
|
*
|
||||||
|
* The value of 30 was chosen through experimentation.
|
||||||
|
*/
|
||||||
integral_limit = int_tofp(30);
|
integral_limit = int_tofp(30);
|
||||||
if (pid->integral > integral_limit)
|
if (pid->integral > integral_limit)
|
||||||
pid->integral = integral_limit;
|
pid->integral = integral_limit;
|
||||||
|
@ -616,6 +623,11 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
||||||
if (limits.no_turbo || limits.turbo_disabled)
|
if (limits.no_turbo || limits.turbo_disabled)
|
||||||
max_perf = cpu->pstate.max_pstate;
|
max_perf = cpu->pstate.max_pstate;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* performance can be limited by user through sysfs, by cpufreq
|
||||||
|
* policy, or by cpu specific default values determined through
|
||||||
|
* experimentation.
|
||||||
|
*/
|
||||||
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
|
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
|
||||||
*max = clamp_t(int, max_perf_adj,
|
*max = clamp_t(int, max_perf_adj,
|
||||||
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
|
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
|
||||||
|
@ -717,11 +729,29 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
|
||||||
u32 duration_us;
|
u32 duration_us;
|
||||||
u32 sample_time;
|
u32 sample_time;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* core_busy is the ratio of actual performance to max
|
||||||
|
* max_pstate is the max non turbo pstate available
|
||||||
|
* current_pstate was the pstate that was requested during
|
||||||
|
* the last sample period.
|
||||||
|
*
|
||||||
|
* We normalize core_busy, which was our actual percent
|
||||||
|
* performance to what we requested during the last sample
|
||||||
|
* period. The result will be a percentage of busy at a
|
||||||
|
* specified pstate.
|
||||||
|
*/
|
||||||
core_busy = cpu->sample.core_pct_busy;
|
core_busy = cpu->sample.core_pct_busy;
|
||||||
max_pstate = int_tofp(cpu->pstate.max_pstate);
|
max_pstate = int_tofp(cpu->pstate.max_pstate);
|
||||||
current_pstate = int_tofp(cpu->pstate.current_pstate);
|
current_pstate = int_tofp(cpu->pstate.current_pstate);
|
||||||
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we have a deferred timer, it will not fire unless
|
||||||
|
* we are in C0. So, determine if the actual elapsed time
|
||||||
|
* is significantly greater (3x) than our sample interval. If it
|
||||||
|
* is, then we were idle for a long enough period of time
|
||||||
|
* to adjust our busyness.
|
||||||
|
*/
|
||||||
sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
|
sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
|
||||||
duration_us = (u32) ktime_us_delta(cpu->sample.time,
|
duration_us = (u32) ktime_us_delta(cpu->sample.time,
|
||||||
cpu->last_sample_time);
|
cpu->last_sample_time);
|
||||||
|
|
Loading…
Reference in a new issue