|
|
@@ -5337,36 +5337,13 @@ static int sched_cpu_active(struct notifier_block *nfb,
|
|
|
static int sched_cpu_inactive(struct notifier_block *nfb,
|
|
|
unsigned long action, void *hcpu)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
- long cpu = (long)hcpu;
|
|
|
- struct dl_bw *dl_b;
|
|
|
-
|
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
- set_cpu_active(cpu, false);
|
|
|
-
|
|
|
- /* explicitly allow suspend */
|
|
|
- if (!(action & CPU_TASKS_FROZEN)) {
|
|
|
- bool overflow;
|
|
|
- int cpus;
|
|
|
-
|
|
|
- rcu_read_lock_sched();
|
|
|
- dl_b = dl_bw_of(cpu);
|
|
|
-
|
|
|
- raw_spin_lock_irqsave(&dl_b->lock, flags);
|
|
|
- cpus = dl_bw_cpus(cpu);
|
|
|
- overflow = __dl_overflow(dl_b, cpus, 0, 0);
|
|
|
- raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
|
-
|
|
|
- rcu_read_unlock_sched();
|
|
|
-
|
|
|
- if (overflow)
|
|
|
- return notifier_from_errno(-EBUSY);
|
|
|
- }
|
|
|
+ set_cpu_active((long)hcpu, false);
|
|
|
return NOTIFY_OK;
|
|
|
+ default:
|
|
|
+ return NOTIFY_DONE;
|
|
|
}
|
|
|
-
|
|
|
- return NOTIFY_DONE;
|
|
|
}
|
|
|
|
|
|
static int __init migration_init(void)
|
|
|
@@ -7006,7 +6983,6 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
|
|
|
*/
|
|
|
|
|
|
case CPU_ONLINE:
|
|
|
- case CPU_DOWN_FAILED:
|
|
|
cpuset_update_active_cpus(true);
|
|
|
break;
|
|
|
default:
|
|
|
@@ -7018,8 +6994,32 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
|
|
|
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
|
|
|
void *hcpu)
|
|
|
{
|
|
|
- switch (action) {
|
|
|
+ unsigned long flags;
|
|
|
+ long cpu = (long)hcpu;
|
|
|
+ struct dl_bw *dl_b;
|
|
|
+
|
|
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
+ /* explicitly allow suspend */
|
|
|
+ if (!(action & CPU_TASKS_FROZEN)) {
|
|
|
+ bool overflow;
|
|
|
+ int cpus;
|
|
|
+
|
|
|
+ rcu_read_lock_sched();
|
|
|
+ dl_b = dl_bw_of(cpu);
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&dl_b->lock, flags);
|
|
|
+ cpus = dl_bw_cpus(cpu);
|
|
|
+ overflow = __dl_overflow(dl_b, cpus, 0, 0);
|
|
|
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
|
+
|
|
|
+ rcu_read_unlock_sched();
|
|
|
+
|
|
|
+ if (overflow) {
|
|
|
+ trace_printk("hotplug failed for cpu %lu", cpu);
|
|
|
+ return notifier_from_errno(-EBUSY);
|
|
|
+ }
|
|
|
+ }
|
|
|
cpuset_update_active_cpus(false);
|
|
|
break;
|
|
|
case CPU_DOWN_PREPARE_FROZEN:
|