|
@@ -101,6 +101,7 @@ struct rcu_state sname##_state = { \
|
|
|
.abbr = sabbr, \
|
|
|
.exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
|
|
|
.exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
|
|
|
+ .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
|
|
|
}
|
|
|
|
|
|
RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
|
|
@@ -1900,11 +1901,13 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
*/
|
|
|
rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
rcu_gp_slow(rsp, gp_preinit_delay);
|
|
|
+ spin_lock(&rsp->ofl_lock);
|
|
|
raw_spin_lock_irq_rcu_node(rnp);
|
|
|
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
|
|
|
!rnp->wait_blkd_tasks) {
|
|
|
/* Nothing to do on this leaf rcu_node structure. */
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
+ spin_unlock(&rsp->ofl_lock);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1940,6 +1943,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
}
|
|
|
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
+ spin_unlock(&rsp->ofl_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3749,6 +3753,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
|
|
|
|
|
|
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
|
|
mask = rdp->grpmask;
|
|
|
+ spin_lock(&rsp->ofl_lock);
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
|
|
if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
|
|
|
/* Report quiescent state -before- changing ->qsmaskinitnext! */
|
|
@@ -3757,6 +3762,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
|
|
|
}
|
|
|
rnp->qsmaskinitnext &= ~mask;
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
+ spin_unlock(&rsp->ofl_lock);
|
|
|
}
|
|
|
|
|
|
/*
|