|
@@ -1670,9 +1670,6 @@ static inline int hrtick_enabled(struct rq *rq)
|
|
|
|
|
|
#endif /* CONFIG_SCHED_HRTICK */
|
|
#endif /* CONFIG_SCHED_HRTICK */
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
|
-extern void sched_avg_update(struct rq *rq);
|
|
|
|
-
|
|
|
|
#ifndef arch_scale_freq_capacity
|
|
#ifndef arch_scale_freq_capacity
|
|
static __always_inline
|
|
static __always_inline
|
|
unsigned long arch_scale_freq_capacity(int cpu)
|
|
unsigned long arch_scale_freq_capacity(int cpu)
|
|
@@ -1681,6 +1678,9 @@ unsigned long arch_scale_freq_capacity(int cpu)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+extern void sched_avg_update(struct rq *rq);
|
|
|
|
+
|
|
#ifndef arch_scale_cpu_capacity
|
|
#ifndef arch_scale_cpu_capacity
|
|
static __always_inline
|
|
static __always_inline
|
|
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
|
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
|
@@ -1698,6 +1698,13 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
|
|
sched_avg_update(rq);
|
|
sched_avg_update(rq);
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
|
|
+#ifndef arch_scale_cpu_capacity
|
|
|
|
+static __always_inline
|
|
|
|
+unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
|
|
|
|
+{
|
|
|
|
+ return SCHED_CAPACITY_SCALE;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
|
|
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
|
|
static inline void sched_avg_update(struct rq *rq) { }
|
|
static inline void sched_avg_update(struct rq *rq) { }
|
|
#endif
|
|
#endif
|