|
@@ -358,8 +358,10 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
|
|
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
|
|
|
|
|
+ rdp->exp_dynticks_snap =
|
|
|
+ atomic_add_return(0, &rdtp->dynticks);
|
|
|
if (raw_smp_processor_id() == cpu ||
|
|
|
- !(atomic_add_return(0, &rdtp->dynticks) & 0x1) ||
|
|
|
+ !(rdp->exp_dynticks_snap & 0x1) ||
|
|
|
!(rnp->qsmaskinitnext & rdp->grpmask))
|
|
|
mask_ofl_test |= rdp->grpmask;
|
|
|
}
|
|
@@ -377,9 +379,17 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
|
|
/* IPI the remaining CPUs for expedited quiescent state. */
|
|
|
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
|
|
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
|
|
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
|
|
+
|
|
|
if (!(mask_ofl_ipi & mask))
|
|
|
continue;
|
|
|
retry_ipi:
|
|
|
+ if (atomic_add_return(0, &rdtp->dynticks) !=
|
|
|
+ rdp->exp_dynticks_snap) {
|
|
|
+ mask_ofl_test |= mask;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
ret = smp_call_function_single(cpu, func, rsp, 0);
|
|
|
if (!ret) {
|
|
|
mask_ofl_ipi &= ~mask;
|