|
@@ -1259,43 +1259,38 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
|
|
|
|
|
|
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
|
|
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
|
|
{
|
|
{
|
|
- int32_t core_busy, max_pstate, current_pstate, sample_ratio;
|
|
|
|
|
|
+ int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
|
|
u64 duration_ns;
|
|
u64 duration_ns;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * core_busy is the ratio of actual performance to max
|
|
|
|
- * max_pstate is the max non turbo pstate available
|
|
|
|
- * current_pstate was the pstate that was requested during
|
|
|
|
- * the last sample period.
|
|
|
|
- *
|
|
|
|
- * We normalize core_busy, which was our actual percent
|
|
|
|
- * performance to what we requested during the last sample
|
|
|
|
- * period. The result will be a percentage of busy at a
|
|
|
|
- * specified pstate.
|
|
|
|
|
|
+ * perf_scaled is the average performance during the last sampling
|
|
|
|
+ * period scaled by the ratio of the maximum P-state to the P-state
|
|
|
|
+ * requested last time (in percent). That measures the system's
|
|
|
|
+ * response to the previous P-state selection.
|
|
*/
|
|
*/
|
|
max_pstate = cpu->pstate.max_pstate_physical;
|
|
max_pstate = cpu->pstate.max_pstate_physical;
|
|
current_pstate = cpu->pstate.current_pstate;
|
|
current_pstate = cpu->pstate.current_pstate;
|
|
- core_busy = mul_ext_fp(cpu->sample.core_avg_perf,
|
|
|
|
|
|
+ perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
|
|
div_fp(100 * max_pstate, current_pstate));
|
|
div_fp(100 * max_pstate, current_pstate));
|
|
|
|
|
|
/*
|
|
/*
|
|
* Since our utilization update callback will not run unless we are
|
|
* Since our utilization update callback will not run unless we are
|
|
* in C0, check if the actual elapsed time is significantly greater (3x)
|
|
* in C0, check if the actual elapsed time is significantly greater (3x)
|
|
* than our sample interval. If it is, then we were idle for a long
|
|
* than our sample interval. If it is, then we were idle for a long
|
|
- * enough period of time to adjust our busyness.
|
|
|
|
|
|
+ * enough period of time to adjust our performance metric.
|
|
*/
|
|
*/
|
|
duration_ns = cpu->sample.time - cpu->last_sample_time;
|
|
duration_ns = cpu->sample.time - cpu->last_sample_time;
|
|
if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
|
|
if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
|
|
sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
|
|
sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
|
|
- core_busy = mul_fp(core_busy, sample_ratio);
|
|
|
|
|
|
+ perf_scaled = mul_fp(perf_scaled, sample_ratio);
|
|
} else {
|
|
} else {
|
|
sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
|
|
sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
|
|
if (sample_ratio < int_tofp(1))
|
|
if (sample_ratio < int_tofp(1))
|
|
- core_busy = 0;
|
|
|
|
|
|
+ perf_scaled = 0;
|
|
}
|
|
}
|
|
|
|
|
|
- cpu->sample.busy_scaled = core_busy;
|
|
|
|
- return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
|
|
|
|
|
|
+ cpu->sample.busy_scaled = perf_scaled;
|
|
|
|
+ return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
|
|
static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
|