|
@@ -5264,6 +5264,7 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
long cpu = (long)hcpu;
|
|
|
+ struct dl_bw *dl_b;
|
|
|
|
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
case CPU_DOWN_PREPARE:
|
|
@@ -5271,15 +5272,19 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
|
|
|
|
|
|
/* explicitly allow suspend */
|
|
|
if (!(action & CPU_TASKS_FROZEN)) {
|
|
|
- struct dl_bw *dl_b = dl_bw_of(cpu);
|
|
|
bool overflow;
|
|
|
int cpus;
|
|
|
|
|
|
+ rcu_read_lock_sched();
|
|
|
+ dl_b = dl_bw_of(cpu);
|
|
|
+
|
|
|
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
|
|
cpus = dl_bw_cpus(cpu);
|
|
|
overflow = __dl_overflow(dl_b, cpus, 0, 0);
|
|
|
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
|
|
|
|
+ rcu_read_unlock_sched();
|
|
|
+
|
|
|
if (overflow)
|
|
|
return notifier_from_errno(-EBUSY);
|
|
|
}
|
|
@@ -7647,11 +7652,10 @@ static int sched_dl_global_constraints(void)
|
|
|
u64 runtime = global_rt_runtime();
|
|
|
u64 period = global_rt_period();
|
|
|
u64 new_bw = to_ratio(period, runtime);
|
|
|
+ struct dl_bw *dl_b;
|
|
|
int cpu, ret = 0;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
-
|
|
|
/*
|
|
|
* Here we want to check the bandwidth not being set to some
|
|
|
* value smaller than the currently allocated bandwidth in
|
|
@@ -7662,25 +7666,27 @@ static int sched_dl_global_constraints(void)
|
|
|
* solutions is welcome!
|
|
|
*/
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
- struct dl_bw *dl_b = dl_bw_of(cpu);
|
|
|
+ rcu_read_lock_sched();
|
|
|
+ dl_b = dl_bw_of(cpu);
|
|
|
|
|
|
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
|
|
if (new_bw < dl_b->total_bw)
|
|
|
ret = -EBUSY;
|
|
|
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
|
|
|
|
+ rcu_read_unlock_sched();
|
|
|
+
|
|
|
if (ret)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- rcu_read_unlock();
|
|
|
-
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
static void sched_dl_do_global(void)
|
|
|
{
|
|
|
u64 new_bw = -1;
|
|
|
+ struct dl_bw *dl_b;
|
|
|
int cpu;
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -7690,18 +7696,19 @@ static void sched_dl_do_global(void)
|
|
|
if (global_rt_runtime() != RUNTIME_INF)
|
|
|
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
/*
|
|
|
* FIXME: As above...
|
|
|
*/
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
- struct dl_bw *dl_b = dl_bw_of(cpu);
|
|
|
+ rcu_read_lock_sched();
|
|
|
+ dl_b = dl_bw_of(cpu);
|
|
|
|
|
|
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
|
|
dl_b->bw = new_bw;
|
|
|
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
|
+
|
|
|
+ rcu_read_unlock_sched();
|
|
|
}
|
|
|
- rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
static int sched_rt_global_validate(void)
|