|
@@ -4661,6 +4661,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
|
|
struct dl_bw *cur_dl_b;
|
|
struct dl_bw *cur_dl_b;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
+ rcu_read_lock_sched();
|
|
cur_dl_b = dl_bw_of(cpumask_any(cur));
|
|
cur_dl_b = dl_bw_of(cpumask_any(cur));
|
|
trial_cpus = cpumask_weight(trial);
|
|
trial_cpus = cpumask_weight(trial);
|
|
|
|
|
|
@@ -4669,6 +4670,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
|
|
cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
|
|
cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
|
|
ret = 0;
|
|
ret = 0;
|
|
raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
|
|
raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
|
|
|
|
+ rcu_read_unlock_sched();
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -4697,11 +4699,13 @@ int task_can_attach(struct task_struct *p,
|
|
cs_cpus_allowed)) {
|
|
cs_cpus_allowed)) {
|
|
unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
|
|
unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
|
|
cs_cpus_allowed);
|
|
cs_cpus_allowed);
|
|
- struct dl_bw *dl_b = dl_bw_of(dest_cpu);
|
|
|
|
|
|
+ struct dl_bw *dl_b;
|
|
bool overflow;
|
|
bool overflow;
|
|
int cpus;
|
|
int cpus;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
+ rcu_read_lock_sched();
|
|
|
|
+ dl_b = dl_bw_of(dest_cpu);
|
|
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
|
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
|
cpus = dl_bw_cpus(dest_cpu);
|
|
cpus = dl_bw_cpus(dest_cpu);
|
|
overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
|
|
overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
|
|
@@ -4717,6 +4721,7 @@ int task_can_attach(struct task_struct *p,
|
|
__dl_add(dl_b, p->dl.dl_bw);
|
|
__dl_add(dl_b, p->dl.dl_bw);
|
|
}
|
|
}
|
|
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
|
|
+ rcu_read_unlock_sched();
|
|
|
|
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|