|
@@ -179,12 +179,17 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
|
|
static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
|
|
static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
|
|
{
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq *rq = cpu_rq(cpu);
|
|
- unsigned long cfs_max;
|
|
|
|
|
|
+ unsigned long util_cfs = cpu_util_cfs(rq);
|
|
|
|
+ unsigned long util_dl = cpu_util_dl(rq);
|
|
|
|
|
|
- cfs_max = arch_scale_cpu_capacity(NULL, cpu);
|
|
|
|
|
|
+ *max = arch_scale_cpu_capacity(NULL, cpu);
|
|
|
|
|
|
- *util = min(rq->cfs.avg.util_avg, cfs_max);
|
|
|
|
- *max = cfs_max;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Ideally we would like to set util_dl as min/guaranteed freq and
|
|
|
|
+ * util_cfs + util_dl as requested freq. However, cpufreq is not yet
|
|
|
|
+ * ready for such an interface. So, we only do the latter for now.
|
|
|
|
+ */
|
|
|
|
+ *util = min(util_cfs + util_dl, *max);
|
|
}
|
|
}
|
|
|
|
|
|
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time)
|
|
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time)
|
|
@@ -271,7 +276,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|
|
|
|
|
busy = sugov_cpu_is_busy(sg_cpu);
|
|
busy = sugov_cpu_is_busy(sg_cpu);
|
|
|
|
|
|
- if (flags & SCHED_CPUFREQ_RT_DL) {
|
|
|
|
|
|
+ if (flags & SCHED_CPUFREQ_RT) {
|
|
next_f = policy->cpuinfo.max_freq;
|
|
next_f = policy->cpuinfo.max_freq;
|
|
} else {
|
|
} else {
|
|
sugov_get_util(&util, &max, sg_cpu->cpu);
|
|
sugov_get_util(&util, &max, sg_cpu->cpu);
|
|
@@ -316,7 +321,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
|
j_sg_cpu->iowait_boost_pending = false;
|
|
j_sg_cpu->iowait_boost_pending = false;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
- if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
|
|
|
|
|
|
+ if (j_sg_cpu->flags & SCHED_CPUFREQ_RT)
|
|
return policy->cpuinfo.max_freq;
|
|
return policy->cpuinfo.max_freq;
|
|
|
|
|
|
j_util = j_sg_cpu->util;
|
|
j_util = j_sg_cpu->util;
|
|
@@ -352,7 +357,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
|
|
sg_cpu->last_update = time;
|
|
sg_cpu->last_update = time;
|
|
|
|
|
|
if (sugov_should_update_freq(sg_policy, time)) {
|
|
if (sugov_should_update_freq(sg_policy, time)) {
|
|
- if (flags & SCHED_CPUFREQ_RT_DL)
|
|
|
|
|
|
+ if (flags & SCHED_CPUFREQ_RT)
|
|
next_f = sg_policy->policy->cpuinfo.max_freq;
|
|
next_f = sg_policy->policy->cpuinfo.max_freq;
|
|
else
|
|
else
|
|
next_f = sugov_next_freq_shared(sg_cpu, time);
|
|
next_f = sugov_next_freq_shared(sg_cpu, time);
|
|
@@ -382,9 +387,9 @@ static void sugov_irq_work(struct irq_work *irq_work)
|
|
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
|
|
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * For RT and deadline tasks, the schedutil governor shoots the
|
|
|
|
- * frequency to maximum. Special care must be taken to ensure that this
|
|
|
|
- * kthread doesn't result in the same behavior.
|
|
|
|
|
|
+ * For RT tasks, the schedutil governor shoots the frequency to maximum.
|
|
|
|
+ * Special care must be taken to ensure that this kthread doesn't result
|
|
|
|
+ * in the same behavior.
|
|
*
|
|
*
|
|
* This is (mostly) guaranteed by the work_in_progress flag. The flag is
|
|
* This is (mostly) guaranteed by the work_in_progress flag. The flag is
|
|
* updated only at the end of the sugov_work() function and before that
|
|
* updated only at the end of the sugov_work() function and before that
|