|
@@ -2021,6 +2021,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
|
|
|
#ifdef CONFIG_SMP
|
|
|
inline struct dl_bw *dl_bw_of(int i)
|
|
|
{
|
|
|
+ rcu_lockdep_assert(rcu_read_lock_sched_held(),
|
|
|
+ "sched RCU must be held");
|
|
|
return &cpu_rq(i)->rd->dl_bw;
|
|
|
}
|
|
|
|
|
@@ -2029,6 +2031,8 @@ static inline int dl_bw_cpus(int i)
|
|
|
struct root_domain *rd = cpu_rq(i)->rd;
|
|
|
int cpus = 0;
|
|
|
|
|
|
+ rcu_lockdep_assert(rcu_read_lock_sched_held(),
|
|
|
+ "sched RCU must be held");
|
|
|
for_each_cpu_and(i, rd->span, cpu_active_mask)
|
|
|
cpus++;
|
|
|
|
|
@@ -7645,6 +7649,8 @@ static int sched_dl_global_constraints(void)
|
|
|
int cpu, ret = 0;
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
+
|
|
|
/*
|
|
|
* Here we want to check the bandwidth not being set to some
|
|
|
* value smaller than the currently allocated bandwidth in
|
|
@@ -7666,6 +7672,8 @@ static int sched_dl_global_constraints(void)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -7681,6 +7689,7 @@ static void sched_dl_do_global(void)
|
|
|
if (global_rt_runtime() != RUNTIME_INF)
|
|
|
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
/*
|
|
|
* FIXME: As above...
|
|
|
*/
|
|
@@ -7691,6 +7700,7 @@ static void sched_dl_do_global(void)
|
|
|
dl_b->bw = new_bw;
|
|
|
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
|
}
|
|
|
+ rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
static int sched_rt_global_validate(void)
|