|
@@ -2606,28 +2606,6 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
|
|
|
- * function. We now remove it from the rcu_node tree's ->qsmaskinit
|
|
|
- * bit masks.
|
|
|
- */
|
|
|
-static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- unsigned long mask;
|
|
|
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
|
|
-
|
|
|
- if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
|
|
- return;
|
|
|
-
|
|
|
- /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
|
|
- mask = rdp->grpmask;
|
|
|
- raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
|
|
- rnp->qsmaskinitnext &= ~mask;
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* The CPU has been completely removed, and some other CPU is reporting
|
|
|
* this fact from process context. Do the remainder of the cleanup,
|
|
@@ -4247,6 +4225,43 @@ static void rcu_prepare_cpu(int cpu)
|
|
|
rcu_init_percpu_data(cpu, rsp);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+/*
|
|
|
+ * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
|
|
|
+ * function. We now remove it from the rcu_node tree's ->qsmaskinit
|
|
|
+ * bit masks.
|
|
|
+ */
|
|
|
+static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned long mask;
|
|
|
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
+ struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
|
|
+
|
|
|
+ if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
|
|
+ mask = rdp->grpmask;
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
|
|
+ rnp->qsmaskinitnext &= ~mask;
|
|
|
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+void rcu_report_dead(unsigned int cpu)
|
|
|
+{
|
|
|
+ struct rcu_state *rsp;
|
|
|
+
|
|
|
+ /* QS for any half-done expedited RCU-sched GP. */
|
|
|
+ preempt_disable();
|
|
|
+ rcu_report_exp_rdp(&rcu_sched_state,
|
|
|
+ this_cpu_ptr(rcu_sched_state.rda), true);
|
|
|
+ preempt_enable();
|
|
|
+ for_each_rcu_flavor(rsp)
|
|
|
+ rcu_cleanup_dying_idle_cpu(cpu, rsp);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Handle CPU online/offline notification events.
|
|
|
*/
|
|
@@ -4278,17 +4293,6 @@ int rcu_cpu_notify(struct notifier_block *self,
|
|
|
for_each_rcu_flavor(rsp)
|
|
|
rcu_cleanup_dying_cpu(rsp);
|
|
|
break;
|
|
|
- case CPU_DYING_IDLE:
|
|
|
- /* QS for any half-done expedited RCU-sched GP. */
|
|
|
- preempt_disable();
|
|
|
- rcu_report_exp_rdp(&rcu_sched_state,
|
|
|
- this_cpu_ptr(rcu_sched_state.rda), true);
|
|
|
- preempt_enable();
|
|
|
-
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- rcu_cleanup_dying_idle_cpu(cpu, rsp);
|
|
|
- }
|
|
|
- break;
|
|
|
case CPU_DEAD:
|
|
|
case CPU_DEAD_FROZEN:
|
|
|
case CPU_UP_CANCELED:
|