|
@@ -703,21 +703,6 @@ static int takedown_cpu(unsigned int cpu)
|
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
|
int err;
|
|
|
|
|
|
- /*
|
|
|
- * By now we've cleared cpu_active_mask, wait for all preempt-disabled
|
|
|
- * and RCU users of this state to go away such that all new such users
|
|
|
- * will observe it.
|
|
|
- *
|
|
|
- * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
|
|
|
- * not imply sync_sched(), so wait for both.
|
|
|
- *
|
|
|
- * Do sync before park smpboot threads to take care the rcu boost case.
|
|
|
- */
|
|
|
- if (IS_ENABLED(CONFIG_PREEMPT))
|
|
|
- synchronize_rcu_mult(call_rcu, call_rcu_sched);
|
|
|
- else
|
|
|
- synchronize_rcu();
|
|
|
-
|
|
|
/* Park the smpboot threads */
|
|
|
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
|
|
|
smpboot_park_threads(cpu);
|