|
|
@@ -95,15 +95,8 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
|
|
if (sg_policy->work_in_progress)
|
|
|
return false;
|
|
|
|
|
|
- if (unlikely(sg_policy->need_freq_update)) {
|
|
|
- sg_policy->need_freq_update = false;
|
|
|
- /*
|
|
|
- * This happens when limits change, so forget the previous
|
|
|
- * next_freq value and force an update.
|
|
|
- */
|
|
|
- sg_policy->next_freq = UINT_MAX;
|
|
|
+ if (unlikely(sg_policy->need_freq_update))
|
|
|
return true;
|
|
|
- }
|
|
|
|
|
|
delta_ns = time - sg_policy->last_freq_update_time;
|
|
|
|
|
|
@@ -165,8 +158,10 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
|
|
|
|
|
|
freq = (freq + (freq >> 2)) * util / max;
|
|
|
|
|
|
- if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
|
|
|
+ if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
|
|
|
return sg_policy->next_freq;
|
|
|
+
|
|
|
+ sg_policy->need_freq_update = false;
|
|
|
sg_policy->cached_raw_freq = freq;
|
|
|
return cpufreq_driver_resolve_freq(policy, freq);
|
|
|
}
|
|
|
@@ -305,8 +300,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|
|
* Do not reduce the frequency if the CPU has not been idle
|
|
|
* recently, as the reduction is likely to be premature then.
|
|
|
*/
|
|
|
- if (busy && next_f < sg_policy->next_freq &&
|
|
|
- sg_policy->next_freq != UINT_MAX) {
|
|
|
+ if (busy && next_f < sg_policy->next_freq) {
|
|
|
next_f = sg_policy->next_freq;
|
|
|
|
|
|
/* Reset cached freq as next_freq has changed */
|
|
|
@@ -654,7 +648,7 @@ static int sugov_start(struct cpufreq_policy *policy)
|
|
|
|
|
|
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
|
|
|
sg_policy->last_freq_update_time = 0;
|
|
|
- sg_policy->next_freq = UINT_MAX;
|
|
|
+ sg_policy->next_freq = 0;
|
|
|
sg_policy->work_in_progress = false;
|
|
|
sg_policy->need_freq_update = false;
|
|
|
sg_policy->cached_raw_freq = 0;
|