|
@@ -2484,8 +2484,6 @@ static u32 __compute_runnable_contrib(u64 n)
|
|
|
return contrib + runnable_avg_yN_sum[n];
|
|
|
}
|
|
|
|
|
|
-unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
|
|
|
-
|
|
|
/*
|
|
|
* We can represent the historical contribution to runnable average as the
|
|
|
* coefficients of a geometric series. To do this we sub-divide our runnable
|
|
@@ -6010,16 +6008,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
|
|
return load_idx;
|
|
|
}
|
|
|
|
|
|
-static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
|
|
|
-{
|
|
|
- return SCHED_CAPACITY_SCALE;
|
|
|
-}
|
|
|
-
|
|
|
-unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
|
|
|
-{
|
|
|
- return default_scale_capacity(sd, cpu);
|
|
|
-}
|
|
|
-
|
|
|
static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
|
|
{
|
|
|
if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
|