|
@@ -42,9 +42,6 @@
|
|
|
*/
|
|
|
static struct cppc_cpudata **all_cpu_data;
|
|
|
|
|
|
-/* Capture the max KHz from DMI */
|
|
|
-static u64 cppc_dmi_max_khz;
|
|
|
-
|
|
|
/* Callback function used to retrieve the max frequency from DMI */
|
|
|
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
|
|
|
{
|
|
@@ -75,6 +72,64 @@ static u64 cppc_get_dmi_max_khz(void)
|
|
|
return (1000 * mhz);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * If CPPC lowest_freq and nominal_freq registers are exposed then we can
|
|
|
+ * use them to convert perf to freq and vice versa
|
|
|
+ *
|
|
|
+ * If the perf/freq point lies between Nominal and Lowest, we can treat
|
|
|
+ * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
|
|
|
+ * and extrapolate the rest
|
|
|
+ * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
|
|
|
+ */
|
|
|
+static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
|
|
|
+ unsigned int perf)
|
|
|
+{
|
|
|
+ static u64 max_khz;
|
|
|
+ struct cppc_perf_caps *caps = &cpu->perf_caps;
|
|
|
+ u64 mul, div;
|
|
|
+
|
|
|
+ if (caps->lowest_freq && caps->nominal_freq) {
|
|
|
+ if (perf >= caps->nominal_perf) {
|
|
|
+ mul = caps->nominal_freq;
|
|
|
+ div = caps->nominal_perf;
|
|
|
+ } else {
|
|
|
+ mul = caps->nominal_freq - caps->lowest_freq;
|
|
|
+ div = caps->nominal_perf - caps->lowest_perf;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (!max_khz)
|
|
|
+ max_khz = cppc_get_dmi_max_khz();
|
|
|
+ mul = max_khz;
|
|
|
+ div = cpu->perf_caps.highest_perf;
|
|
|
+ }
|
|
|
+ return (u64)perf * mul / div;
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
|
|
|
+ unsigned int freq)
|
|
|
+{
|
|
|
+ static u64 max_khz;
|
|
|
+ struct cppc_perf_caps *caps = &cpu->perf_caps;
|
|
|
+ u64 mul, div;
|
|
|
+
|
|
|
+ if (caps->lowest_freq && caps->nominal_freq) {
|
|
|
+ if (freq >= caps->nominal_freq) {
|
|
|
+ mul = caps->nominal_perf;
|
|
|
+ div = caps->nominal_freq;
|
|
|
+ } else {
|
|
|
+ mul = caps->lowest_perf;
|
|
|
+ div = caps->lowest_freq;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (!max_khz)
|
|
|
+ max_khz = cppc_get_dmi_max_khz();
|
|
|
+ mul = cpu->perf_caps.highest_perf;
|
|
|
+ div = max_khz;
|
|
|
+ }
|
|
|
+
|
|
|
+ return (u64)freq * mul / div;
|
|
|
+}
|
|
|
+
|
|
|
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
|
|
unsigned int target_freq,
|
|
|
unsigned int relation)
|
|
@@ -86,7 +141,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
|
|
|
|
|
cpu = all_cpu_data[policy->cpu];
|
|
|
|
|
|
- desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
|
|
|
+ desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
|
|
|
/* Return if it is exactly the same perf */
|
|
|
if (desired_perf == cpu->perf_ctrls.desired_perf)
|
|
|
return ret;
|
|
@@ -143,24 +198,24 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- cppc_dmi_max_khz = cppc_get_dmi_max_khz();
|
|
|
+ /* Convert the lowest and nominal freq from MHz to KHz */
|
|
|
+ cpu->perf_caps.lowest_freq *= 1000;
|
|
|
+ cpu->perf_caps.nominal_freq *= 1000;
|
|
|
|
|
|
/*
|
|
|
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
|
|
|
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
|
|
|
*/
|
|
|
- policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz /
|
|
|
- cpu->perf_caps.highest_perf;
|
|
|
- policy->max = cppc_dmi_max_khz;
|
|
|
+ policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
|
|
|
+ policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
|
|
|
|
|
|
/*
|
|
|
* Set cpuinfo.min_freq to Lowest to make the full range of performance
|
|
|
* available if userspace wants to use any perf between lowest & lowest
|
|
|
* nonlinear perf
|
|
|
*/
|
|
|
- policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz /
|
|
|
- cpu->perf_caps.highest_perf;
|
|
|
- policy->cpuinfo.max_freq = cppc_dmi_max_khz;
|
|
|
+ policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
|
|
|
+ policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
|
|
|
|
|
|
policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
|
|
|
NSEC_PER_USEC;
|
|
@@ -187,7 +242,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|
|
cpu->cur_policy = policy;
|
|
|
|
|
|
/* Set policy->cur to max now. The governors will adjust later. */
|
|
|
- policy->cur = cppc_dmi_max_khz;
|
|
|
+ policy->cur = cppc_cpufreq_perf_to_khz(cpu,
|
|
|
+ cpu->perf_caps.highest_perf);
|
|
|
cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
|
|
|
|
|
|
ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
|