|
@@ -373,10 +373,28 @@ static void intel_pstate_set_itmt_prio(int cpu)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+static int intel_pstate_get_cppc_guranteed(int cpu)
|
|
|
+{
|
|
|
+ struct cppc_perf_caps cppc_perf;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ return cppc_perf.guaranteed_perf;
|
|
|
+}
|
|
|
+
|
|
|
#else
|
|
|
static void intel_pstate_set_itmt_prio(int cpu)
|
|
|
{
|
|
|
}
|
|
|
+
|
|
|
+static int intel_pstate_get_cppc_guranteed(int cpu)
|
|
|
+{
|
|
|
+ return -ENOTSUPP;
|
|
|
+}
|
|
|
#endif
|
|
|
|
|
|
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
|
|
@@ -699,9 +717,29 @@ static ssize_t show_energy_performance_preference(
|
|
|
|
|
|
cpufreq_freq_attr_rw(energy_performance_preference);
|
|
|
|
|
|
+static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
|
|
|
+{
|
|
|
+ struct cpudata *cpu;
|
|
|
+ u64 cap;
|
|
|
+ int ratio;
|
|
|
+
|
|
|
+ ratio = intel_pstate_get_cppc_guranteed(policy->cpu);
|
|
|
+ if (ratio <= 0) {
|
|
|
+ rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
|
|
|
+ ratio = HWP_GUARANTEED_PERF(cap);
|
|
|
+ }
|
|
|
+
|
|
|
+ cpu = all_cpu_data[policy->cpu];
|
|
|
+
|
|
|
+ return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling);
|
|
|
+}
|
|
|
+
|
|
|
+cpufreq_freq_attr_ro(base_frequency);
|
|
|
+
|
|
|
static struct freq_attr *hwp_cpufreq_attrs[] = {
|
|
|
&energy_performance_preference,
|
|
|
&energy_performance_available_preferences,
|
|
|
+ &base_frequency,
|
|
|
NULL,
|
|
|
};
|
|
|
|