|
@@ -1073,11 +1073,11 @@ EXPORT_SYMBOL_GPL(rcu_is_watching);
|
|
|
* offline to continue to use RCU for one jiffy after marking itself
|
|
|
* offline in the cpu_online_mask. This leniency is necessary given the
|
|
|
* non-atomic nature of the online and offline processing, for example,
|
|
|
- * the fact that a CPU enters the scheduler after completing the CPU_DYING
|
|
|
- * notifiers.
|
|
|
+ * the fact that a CPU enters the scheduler after completing the teardown
|
|
|
+ * of the CPU.
|
|
|
*
|
|
|
- * This is also why RCU internally marks CPUs online during the
|
|
|
- * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
|
|
|
+ * This is also why RCU internally marks CPUs online during in the
|
|
|
+ * preparation phase and offline after the CPU has been taken down.
|
|
|
*
|
|
|
* Disable checking if in an NMI handler because we cannot safely report
|
|
|
* errors from NMI handlers anyway.
|
|
@@ -3806,12 +3806,58 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
|
|
|
-static void rcu_prepare_cpu(int cpu)
|
|
|
+int rcutree_prepare_cpu(unsigned int cpu)
|
|
|
{
|
|
|
struct rcu_state *rsp;
|
|
|
|
|
|
for_each_rcu_flavor(rsp)
|
|
|
rcu_init_percpu_data(cpu, rsp);
|
|
|
+
|
|
|
+ rcu_prepare_kthreads(cpu);
|
|
|
+ rcu_spawn_all_nocb_kthreads(cpu);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
|
|
|
+{
|
|
|
+ struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
|
|
|
+
|
|
|
+ rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
|
|
|
+}
|
|
|
+
|
|
|
+int rcutree_online_cpu(unsigned int cpu)
|
|
|
+{
|
|
|
+ sync_sched_exp_online_cleanup(cpu);
|
|
|
+ rcutree_affinity_setting(cpu, -1);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int rcutree_offline_cpu(unsigned int cpu)
|
|
|
+{
|
|
|
+ rcutree_affinity_setting(cpu, cpu);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+int rcutree_dying_cpu(unsigned int cpu)
|
|
|
+{
|
|
|
+ struct rcu_state *rsp;
|
|
|
+
|
|
|
+ for_each_rcu_flavor(rsp)
|
|
|
+ rcu_cleanup_dying_cpu(rsp);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int rcutree_dead_cpu(unsigned int cpu)
|
|
|
+{
|
|
|
+ struct rcu_state *rsp;
|
|
|
+
|
|
|
+ for_each_rcu_flavor(rsp) {
|
|
|
+ rcu_cleanup_dead_cpu(cpu, rsp);
|
|
|
+ do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
@@ -3851,52 +3897,6 @@ void rcu_report_dead(unsigned int cpu)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
- * Handle CPU online/offline notification events.
|
|
|
- */
|
|
|
-int rcu_cpu_notify(struct notifier_block *self,
|
|
|
- unsigned long action, void *hcpu)
|
|
|
-{
|
|
|
- long cpu = (long)hcpu;
|
|
|
- struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
|
|
|
- struct rcu_node *rnp = rdp->mynode;
|
|
|
- struct rcu_state *rsp;
|
|
|
-
|
|
|
- switch (action) {
|
|
|
- case CPU_UP_PREPARE:
|
|
|
- case CPU_UP_PREPARE_FROZEN:
|
|
|
- rcu_prepare_cpu(cpu);
|
|
|
- rcu_prepare_kthreads(cpu);
|
|
|
- rcu_spawn_all_nocb_kthreads(cpu);
|
|
|
- break;
|
|
|
- case CPU_ONLINE:
|
|
|
- case CPU_DOWN_FAILED:
|
|
|
- sync_sched_exp_online_cleanup(cpu);
|
|
|
- rcu_boost_kthread_setaffinity(rnp, -1);
|
|
|
- break;
|
|
|
- case CPU_DOWN_PREPARE:
|
|
|
- rcu_boost_kthread_setaffinity(rnp, cpu);
|
|
|
- break;
|
|
|
- case CPU_DYING:
|
|
|
- case CPU_DYING_FROZEN:
|
|
|
- for_each_rcu_flavor(rsp)
|
|
|
- rcu_cleanup_dying_cpu(rsp);
|
|
|
- break;
|
|
|
- case CPU_DEAD:
|
|
|
- case CPU_DEAD_FROZEN:
|
|
|
- case CPU_UP_CANCELED:
|
|
|
- case CPU_UP_CANCELED_FROZEN:
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- rcu_cleanup_dead_cpu(cpu, rsp);
|
|
|
- do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
|
|
|
- }
|
|
|
- break;
|
|
|
- default:
|
|
|
- break;
|
|
|
- }
|
|
|
- return NOTIFY_OK;
|
|
|
-}
|
|
|
-
|
|
|
static int rcu_pm_notify(struct notifier_block *self,
|
|
|
unsigned long action, void *hcpu)
|
|
|
{
|
|
@@ -4208,10 +4208,9 @@ void __init rcu_init(void)
|
|
|
* this is called early in boot, before either interrupts
|
|
|
* or the scheduler are operational.
|
|
|
*/
|
|
|
- cpu_notifier(rcu_cpu_notify, 0);
|
|
|
pm_notifier(rcu_pm_notify, 0);
|
|
|
for_each_online_cpu(cpu)
|
|
|
- rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
|
|
|
+ rcutree_prepare_cpu(cpu);
|
|
|
}
|
|
|
|
|
|
#include "tree_exp.h"
|