|
@@ -708,65 +708,6 @@ static void sync_rcu_exp_handler(void *info)
|
|
|
rcu_report_exp_rdp(rsp, rdp, true);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Select the nodes that the upcoming expedited grace period needs
|
|
|
- * to wait for.
|
|
|
- */
|
|
|
-static void sync_rcu_exp_select_cpus(struct rcu_state *rsp)
|
|
|
-{
|
|
|
- int cpu;
|
|
|
- unsigned long flags;
|
|
|
- unsigned long mask;
|
|
|
- unsigned long mask_ofl_test;
|
|
|
- unsigned long mask_ofl_ipi;
|
|
|
- int ret;
|
|
|
- struct rcu_node *rnp;
|
|
|
-
|
|
|
- sync_exp_reset_tree(rsp);
|
|
|
- rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
-
|
|
|
- /* Each pass checks a CPU for identity, offline, and idle. */
|
|
|
- mask_ofl_test = 0;
|
|
|
- for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
|
|
|
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
|
|
-
|
|
|
- if (raw_smp_processor_id() == cpu ||
|
|
|
- cpu_is_offline(cpu) ||
|
|
|
- !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
|
|
|
- mask_ofl_test |= rdp->grpmask;
|
|
|
- }
|
|
|
- mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
|
|
|
-
|
|
|
- /*
|
|
|
- * Need to wait for any blocked tasks as well. Note that
|
|
|
- * additional blocking tasks will also block the expedited
|
|
|
- * GP until such time as the ->expmask bits are cleared.
|
|
|
- */
|
|
|
- if (rcu_preempt_has_tasks(rnp))
|
|
|
- rnp->exp_tasks = rnp->blkd_tasks.next;
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
-
|
|
|
- /* IPI the remaining CPUs for expedited quiescent state. */
|
|
|
- mask = 1;
|
|
|
- for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
|
|
|
- if (!(mask_ofl_ipi & mask))
|
|
|
- continue;
|
|
|
- ret = smp_call_function_single(cpu,
|
|
|
- sync_rcu_exp_handler,
|
|
|
- rsp, 0);
|
|
|
- if (!ret)
|
|
|
- mask_ofl_ipi &= ~mask;
|
|
|
- }
|
|
|
- /* Report quiescent states for those that went offline. */
|
|
|
- mask_ofl_test |= mask_ofl_ipi;
|
|
|
- if (mask_ofl_test)
|
|
|
- rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* synchronize_rcu_expedited - Brute-force RCU grace period
|
|
|
*
|
|
@@ -795,7 +736,7 @@ void synchronize_rcu_expedited(void)
|
|
|
rcu_exp_gp_seq_start(rsp);
|
|
|
|
|
|
/* Initialize the rcu_node tree in preparation for the wait. */
|
|
|
- sync_rcu_exp_select_cpus(rsp);
|
|
|
+ sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
|
|
|
|
|
|
/* Wait for snapshotted ->blkd_tasks lists to drain. */
|
|
|
rnp = rcu_get_root(rsp);
|