|
@@ -1534,10 +1534,8 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
|
|
|
* hold it, acquire the root rcu_node structure's lock in order to
|
|
|
* start one (if needed).
|
|
|
*/
|
|
|
- if (rnp != rnp_root) {
|
|
|
- raw_spin_lock(&rnp_root->lock);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
- }
|
|
|
+ if (rnp != rnp_root)
|
|
|
+ raw_spin_lock_rcu_node(rnp_root);
|
|
|
|
|
|
/*
|
|
|
* Get a new grace-period number. If there really is no grace
|
|
@@ -1786,11 +1784,10 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
|
|
|
rdp->completed == READ_ONCE(rnp->completed) &&
|
|
|
!unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
|
|
|
- !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
|
|
|
+ !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
|
|
|
local_irq_restore(flags);
|
|
|
return;
|
|
|
}
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
needwake = __note_gp_changes(rsp, rnp, rdp);
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
if (needwake)
|
|
@@ -1814,8 +1811,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
|
|
|
WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
- raw_spin_lock_irq(&rnp->lock);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irq_rcu_node(rnp);
|
|
|
if (!READ_ONCE(rsp->gp_flags)) {
|
|
|
/* Spurious wakeup, tell caller to go back to sleep. */
|
|
|
raw_spin_unlock_irq(&rnp->lock);
|
|
@@ -1847,8 +1843,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|
|
*/
|
|
|
rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
rcu_gp_slow(rsp, gp_preinit_delay);
|
|
|
- raw_spin_lock_irq(&rnp->lock);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irq_rcu_node(rnp);
|
|
|
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
|
|
|
!rnp->wait_blkd_tasks) {
|
|
|
/* Nothing to do on this leaf rcu_node structure. */
|
|
@@ -1904,8 +1899,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|
|
*/
|
|
|
rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
|
rcu_gp_slow(rsp, gp_init_delay);
|
|
|
- raw_spin_lock_irq(&rnp->lock);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irq_rcu_node(rnp);
|
|
|
rdp = this_cpu_ptr(rsp->rda);
|
|
|
rcu_preempt_check_blocked_tasks(rnp);
|
|
|
rnp->qsmask = rnp->qsmaskinit;
|
|
@@ -1973,8 +1967,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
|
|
|
}
|
|
|
/* Clear flag to prevent immediate re-entry. */
|
|
|
if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
|
|
- raw_spin_lock_irq(&rnp->lock);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irq_rcu_node(rnp);
|
|
|
WRITE_ONCE(rsp->gp_flags,
|
|
|
READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
|
|
|
raw_spin_unlock_irq(&rnp->lock);
|
|
@@ -1993,8 +1986,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
|
|
|
WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
- raw_spin_lock_irq(&rnp->lock);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irq_rcu_node(rnp);
|
|
|
gp_duration = jiffies - rsp->gp_start;
|
|
|
if (gp_duration > rsp->gp_max)
|
|
|
rsp->gp_max = gp_duration;
|
|
@@ -2019,8 +2011,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
* grace period is recorded in any of the rcu_node structures.
|
|
|
*/
|
|
|
rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
|
- raw_spin_lock_irq(&rnp->lock);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irq_rcu_node(rnp);
|
|
|
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
|
|
|
WARN_ON_ONCE(rnp->qsmask);
|
|
|
WRITE_ONCE(rnp->completed, rsp->gpnum);
|
|
@@ -2035,8 +2026,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
rcu_gp_slow(rsp, gp_cleanup_delay);
|
|
|
}
|
|
|
rnp = rcu_get_root(rsp);
|
|
|
- raw_spin_lock_irq(&rnp->lock);
|
|
|
- smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
|
|
|
+ raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
|
|
|
rcu_nocb_gp_set(rnp, nocb);
|
|
|
|
|
|
/* Declare grace period done. */
|
|
@@ -2284,8 +2274,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
rnp_c = rnp;
|
|
|
rnp = rnp->parent;
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
oldmask = rnp_c->qsmask;
|
|
|
}
|
|
|
|
|
@@ -2332,8 +2321,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
|
|
|
gps = rnp->gpnum;
|
|
|
mask = rnp->grpmask;
|
|
|
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
- raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
|
|
|
rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
|
|
|
}
|
|
|
|
|
@@ -2355,8 +2343,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
struct rcu_node *rnp;
|
|
|
|
|
|
rnp = rdp->mynode;
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
if ((rdp->cpu_no_qs.b.norm &&
|
|
|
rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
|
|
|
rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
|
|
@@ -2582,8 +2569,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
|
|
|
rnp = rnp->parent;
|
|
|
if (!rnp)
|
|
|
break;
|
|
|
- raw_spin_lock(&rnp->lock); /* irqs already disabled. */
|
|
|
- smp_mb__after_unlock_lock(); /* GP memory ordering. */
|
|
|
+ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
|
|
|
rnp->qsmaskinit &= ~mask;
|
|
|
rnp->qsmask &= ~mask;
|
|
|
if (rnp->qsmaskinit) {
|
|
@@ -2611,8 +2597,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
|
|
|
|
|
|
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
|
|
mask = rdp->grpmask;
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
|
|
rnp->qsmaskinitnext &= ~mask;
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
}
|
|
@@ -2809,8 +2794,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
|
|
|
rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
cond_resched_rcu_qs();
|
|
|
mask = 0;
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
if (rnp->qsmask == 0) {
|
|
|
if (rcu_state_p == &rcu_sched_state ||
|
|
|
rsp != rcu_state_p ||
|
|
@@ -2881,8 +2865,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|
|
/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
|
|
|
|
|
|
/* Reached the root of the rcu_node tree, acquire lock. */
|
|
|
- raw_spin_lock_irqsave(&rnp_old->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
|
|
|
raw_spin_unlock(&rnp_old->fqslock);
|
|
|
if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
|
|
rsp->n_force_qs_lh++;
|
|
@@ -3005,8 +2988,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
|
|
if (!rcu_gp_in_progress(rsp)) {
|
|
|
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
|
|
|
|
|
- raw_spin_lock(&rnp_root->lock);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_rcu_node(rnp_root);
|
|
|
needwake = rcu_start_gp(rsp);
|
|
|
raw_spin_unlock(&rnp_root->lock);
|
|
|
if (needwake)
|
|
@@ -3426,8 +3408,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
|
|
|
* CPUs for the current rcu_node structure up the rcu_node tree.
|
|
|
*/
|
|
|
rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
if (rnp->expmaskinit == rnp->expmaskinitnext) {
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
continue; /* No new CPUs, nothing to do. */
|
|
@@ -3447,8 +3428,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
|
|
|
rnp_up = rnp->parent;
|
|
|
done = false;
|
|
|
while (rnp_up) {
|
|
|
- raw_spin_lock_irqsave(&rnp_up->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
|
|
|
if (rnp_up->expmaskinit)
|
|
|
done = true;
|
|
|
rnp_up->expmaskinit |= mask;
|
|
@@ -3472,8 +3452,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
|
|
|
|
|
|
sync_exp_reset_tree_hotplug(rsp);
|
|
|
rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
WARN_ON_ONCE(rnp->expmask);
|
|
|
rnp->expmask = rnp->expmaskinit;
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
@@ -3531,8 +3510,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
mask = rnp->grpmask;
|
|
|
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
|
|
|
rnp = rnp->parent;
|
|
|
- raw_spin_lock(&rnp->lock); /* irqs already disabled */
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
|
|
|
WARN_ON_ONCE(!(rnp->expmask & mask));
|
|
|
rnp->expmask &= ~mask;
|
|
|
}
|
|
@@ -3549,8 +3527,7 @@ static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
__rcu_report_exp_rnp(rsp, rnp, wake, flags);
|
|
|
}
|
|
|
|
|
@@ -3564,8 +3541,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
if (!(rnp->expmask & mask)) {
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
return;
|
|
@@ -3708,8 +3684,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
|
|
|
|
|
sync_exp_reset_tree(rsp);
|
|
|
rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
- raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
|
|
|
/* Each pass checks a CPU for identity, offline, and idle. */
|
|
|
mask_ofl_test = 0;
|
|
@@ -4198,8 +4173,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
*/
|
|
|
rnp = rdp->mynode;
|
|
|
mask = rdp->grpmask;
|
|
|
- raw_spin_lock(&rnp->lock); /* irqs already disabled. */
|
|
|
- smp_mb__after_unlock_lock();
|
|
|
+ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
|
|
|
rnp->qsmaskinitnext |= mask;
|
|
|
rnp->expmaskinitnext |= mask;
|
|
|
if (!rdp->beenonline)
|