|
@@ -223,6 +223,8 @@ struct global_params {
|
|
* operation
|
|
* operation
|
|
* @hwp_req_cached: Cached value of the last HWP Request MSR
|
|
* @hwp_req_cached: Cached value of the last HWP Request MSR
|
|
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
|
|
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
|
|
|
|
+ * @last_io_update: Last time when IO wake flag was set
|
|
|
|
+ * @sched_flags: Store scheduler flags for possible cross CPU update
|
|
* @hwp_boost_min: Last HWP boosted min performance
|
|
* @hwp_boost_min: Last HWP boosted min performance
|
|
*
|
|
*
|
|
* This structure stores per CPU instance data for all CPUs.
|
|
* This structure stores per CPU instance data for all CPUs.
|
|
@@ -258,6 +260,8 @@ struct cpudata {
|
|
s16 epp_saved;
|
|
s16 epp_saved;
|
|
u64 hwp_req_cached;
|
|
u64 hwp_req_cached;
|
|
u64 hwp_cap_cached;
|
|
u64 hwp_cap_cached;
|
|
|
|
+ u64 last_io_update;
|
|
|
|
+ unsigned int sched_flags;
|
|
u32 hwp_boost_min;
|
|
u32 hwp_boost_min;
|
|
};
|
|
};
|
|
|
|
|
|
@@ -1460,9 +1464,44 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
|
|
cpu->last_update = cpu->sample.time;
|
|
cpu->last_update = cpu->sample.time;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
|
|
|
|
+ u64 time)
|
|
|
|
+{
|
|
|
|
+ cpu->sample.time = time;
|
|
|
|
+
|
|
|
|
+ if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
|
|
|
|
+ bool do_io = false;
|
|
|
|
+
|
|
|
|
+ cpu->sched_flags = 0;
|
|
|
|
+ /*
|
|
|
|
+ * Set iowait_boost flag and update time. Since IO WAIT flag
|
|
|
|
+ * is set all the time, we can't just conclude that there is
|
|
|
|
+ * some IO bound activity is scheduled on this CPU with just
|
|
|
|
+ * one occurrence. If we receive at least two in two
|
|
|
|
+ * consecutive ticks, then we treat as boost candidate.
|
|
|
|
+ */
|
|
|
|
+ if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
|
|
|
|
+ do_io = true;
|
|
|
|
+
|
|
|
|
+ cpu->last_io_update = time;
|
|
|
|
+
|
|
|
|
+ if (do_io)
|
|
|
|
+ intel_pstate_hwp_boost_up(cpu);
|
|
|
|
+
|
|
|
|
+ } else {
|
|
|
|
+ intel_pstate_hwp_boost_down(cpu);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
|
|
static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
|
|
u64 time, unsigned int flags)
|
|
u64 time, unsigned int flags)
|
|
{
|
|
{
|
|
|
|
+ struct cpudata *cpu = container_of(data, struct cpudata, update_util);
|
|
|
|
+
|
|
|
|
+ cpu->sched_flags |= flags;
|
|
|
|
+
|
|
|
|
+ if (smp_processor_id() == cpu->cpu)
|
|
|
|
+ intel_pstate_update_util_hwp_local(cpu, time);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
|
|
static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
|