|
|
@@ -74,6 +74,7 @@
|
|
|
#include <linux/context_tracking.h>
|
|
|
#include <linux/compiler.h>
|
|
|
#include <linux/frame.h>
|
|
|
+#include <linux/prefetch.h>
|
|
|
|
|
|
#include <asm/switch_to.h>
|
|
|
#include <asm/tlb.h>
|
|
|
@@ -2971,6 +2972,23 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
|
|
EXPORT_PER_CPU_SYMBOL(kstat);
|
|
|
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
|
|
|
|
|
|
+/*
|
|
|
+ * The function fair_sched_class.update_curr accesses the struct curr
|
|
|
+ * and its field curr->exec_start; when called from task_sched_runtime(),
|
|
|
+ * we observe a high rate of cache misses in practice.
|
|
|
+ * Prefetching this data results in improved performance.
|
|
|
+ */
|
|
|
+static inline void prefetch_curr_exec_start(struct task_struct *p)
|
|
|
+{
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
+ struct sched_entity *curr = (&p->se)->cfs_rq->curr;
|
|
|
+#else
|
|
|
+ struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
|
|
|
+#endif
|
|
|
+ prefetch(curr);
|
|
|
+ prefetch(&curr->exec_start);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Return accounted runtime for the task.
|
|
|
* In case the task is currently running, return the runtime plus current's
|
|
|
@@ -3005,6 +3023,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
|
|
* thread, breaking clock_gettime().
|
|
|
*/
|
|
|
if (task_current(rq, p) && task_on_rq_queued(p)) {
|
|
|
+ prefetch_curr_exec_start(p);
|
|
|
update_rq_clock(rq);
|
|
|
p->sched_class->update_curr(rq);
|
|
|
}
|