|
@@ -97,7 +97,7 @@ struct rcu_state rcu_state = {
|
|
|
.abbr = RCU_ABBR,
|
|
|
.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
|
|
|
.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
|
|
|
- .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
|
|
|
+ .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
|
|
|
};
|
|
|
|
|
|
/* Dump rcu_node combining tree at boot to verify correct setup. */
|
|
@@ -1776,13 +1776,13 @@ static bool rcu_gp_init(void)
|
|
|
*/
|
|
|
rcu_state.gp_state = RCU_GP_ONOFF;
|
|
|
rcu_for_each_leaf_node(rnp) {
|
|
|
- spin_lock(&rcu_state.ofl_lock);
|
|
|
+ raw_spin_lock(&rcu_state.ofl_lock);
|
|
|
raw_spin_lock_irq_rcu_node(rnp);
|
|
|
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
|
|
|
!rnp->wait_blkd_tasks) {
|
|
|
/* Nothing to do on this leaf rcu_node structure. */
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
- spin_unlock(&rcu_state.ofl_lock);
|
|
|
+ raw_spin_unlock(&rcu_state.ofl_lock);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1818,7 +1818,7 @@ static bool rcu_gp_init(void)
|
|
|
}
|
|
|
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
- spin_unlock(&rcu_state.ofl_lock);
|
|
|
+ raw_spin_unlock(&rcu_state.ofl_lock);
|
|
|
}
|
|
|
rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
|
|
|
|
|
@@ -3377,7 +3377,7 @@ void rcu_report_dead(unsigned int cpu)
|
|
|
|
|
|
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
|
|
mask = rdp->grpmask;
|
|
|
- spin_lock(&rcu_state.ofl_lock);
|
|
|
+ raw_spin_lock(&rcu_state.ofl_lock);
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
|
|
rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
|
|
|
rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
|
|
@@ -3388,7 +3388,7 @@ void rcu_report_dead(unsigned int cpu)
|
|
|
}
|
|
|
rnp->qsmaskinitnext &= ~mask;
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
- spin_unlock(&rcu_state.ofl_lock);
|
|
|
+ raw_spin_unlock(&rcu_state.ofl_lock);
|
|
|
|
|
|
per_cpu(rcu_cpu_started, cpu) = 0;
|
|
|
}
|