|
@@ -61,6 +61,11 @@ struct sugov_cpu {
|
|
unsigned long util;
|
|
unsigned long util;
|
|
unsigned long max;
|
|
unsigned long max;
|
|
unsigned int flags;
|
|
unsigned int flags;
|
|
|
|
+
|
|
|
|
+ /* The field below is for single-CPU policies only. */
|
|
|
|
+#ifdef CONFIG_NO_HZ_COMMON
|
|
|
|
+ unsigned long saved_idle_calls;
|
|
|
|
+#endif
|
|
};
|
|
};
|
|
|
|
|
|
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
|
|
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
|
|
@@ -192,6 +197,19 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
|
|
sg_cpu->iowait_boost >>= 1;
|
|
sg_cpu->iowait_boost >>= 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_NO_HZ_COMMON
|
|
|
|
+static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
|
|
|
|
+{
|
|
|
|
+ unsigned long idle_calls = tick_nohz_get_idle_calls();
|
|
|
|
+ bool ret = idle_calls == sg_cpu->saved_idle_calls;
|
|
|
|
+
|
|
|
|
+ sg_cpu->saved_idle_calls = idle_calls;
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
|
|
|
|
+#endif /* CONFIG_NO_HZ_COMMON */
|
|
|
|
+
|
|
static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|
static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|
unsigned int flags)
|
|
unsigned int flags)
|
|
{
|
|
{
|
|
@@ -200,6 +218,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
|
unsigned long util, max;
|
|
unsigned long util, max;
|
|
unsigned int next_f;
|
|
unsigned int next_f;
|
|
|
|
+ bool busy;
|
|
|
|
|
|
sugov_set_iowait_boost(sg_cpu, time, flags);
|
|
sugov_set_iowait_boost(sg_cpu, time, flags);
|
|
sg_cpu->last_update = time;
|
|
sg_cpu->last_update = time;
|
|
@@ -207,12 +226,20 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|
if (!sugov_should_update_freq(sg_policy, time))
|
|
if (!sugov_should_update_freq(sg_policy, time))
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ busy = sugov_cpu_is_busy(sg_cpu);
|
|
|
|
+
|
|
if (flags & SCHED_CPUFREQ_RT_DL) {
|
|
if (flags & SCHED_CPUFREQ_RT_DL) {
|
|
next_f = policy->cpuinfo.max_freq;
|
|
next_f = policy->cpuinfo.max_freq;
|
|
} else {
|
|
} else {
|
|
sugov_get_util(&util, &max);
|
|
sugov_get_util(&util, &max);
|
|
sugov_iowait_boost(sg_cpu, &util, &max);
|
|
sugov_iowait_boost(sg_cpu, &util, &max);
|
|
next_f = get_next_freq(sg_policy, util, max);
|
|
next_f = get_next_freq(sg_policy, util, max);
|
|
|
|
+ /*
|
|
|
|
+ * Do not reduce the frequency if the CPU has not been idle
|
|
|
|
+ * recently, as the reduction is likely to be premature then.
|
|
|
|
+ */
|
|
|
|
+ if (busy && next_f < sg_policy->next_freq)
|
|
|
|
+ next_f = sg_policy->next_freq;
|
|
}
|
|
}
|
|
sugov_update_commit(sg_policy, time, next_f);
|
|
sugov_update_commit(sg_policy, time, next_f);
|
|
}
|
|
}
|