|
@@ -1245,7 +1245,7 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
|
|
|
if (rnp->qsmask & (1UL << cpu))
|
|
|
dump_cpu_task(rnp->grplo + cpu);
|
|
|
}
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1265,12 +1265,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
delta = jiffies - READ_ONCE(rsp->jiffies_stall);
|
|
|
if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
return;
|
|
|
}
|
|
|
WRITE_ONCE(rsp->jiffies_stall,
|
|
|
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
|
|
|
/*
|
|
|
* OK, time to rat on our buddy...
|
|
@@ -1291,7 +1291,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
|
|
|
ndetected++;
|
|
|
}
|
|
|
}
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
|
|
|
print_cpu_stall_info_end();
|
|
@@ -1356,7 +1356,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|
|
if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
|
|
|
WRITE_ONCE(rsp->jiffies_stall,
|
|
|
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
|
|
|
/*
|
|
|
* Attempt to revive the RCU machinery by forcing a context switch.
|
|
@@ -1594,7 +1594,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
|
|
|
}
|
|
|
unlock_out:
|
|
|
if (rnp != rnp_root)
|
|
|
- raw_spin_unlock(&rnp_root->lock);
|
|
|
+ raw_spin_unlock_rcu_node(rnp_root);
|
|
|
out:
|
|
|
if (c_out != NULL)
|
|
|
*c_out = c;
|
|
@@ -1814,7 +1814,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
return;
|
|
|
}
|
|
|
needwake = __note_gp_changes(rsp, rnp, rdp);
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
if (needwake)
|
|
|
rcu_gp_kthread_wake(rsp);
|
|
|
}
|
|
@@ -1839,7 +1839,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
raw_spin_lock_irq_rcu_node(rnp);
|
|
|
if (!READ_ONCE(rsp->gp_flags)) {
|
|
|
/* Spurious wakeup, tell caller to go back to sleep. */
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
return false;
|
|
|
}
|
|
|
WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
|
|
@@ -1849,7 +1849,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
* Grace period already in progress, don't start another.
|
|
|
* Not supposed to be able to happen.
|
|
|
*/
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
return false;
|
|
|
}
|
|
|
|
|
@@ -1858,7 +1858,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
/* Record GP times before starting GP, hence smp_store_release(). */
|
|
|
smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
|
|
|
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
|
|
|
/*
|
|
|
* Apply per-leaf buffered online and offline operations to the
|
|
@@ -1872,7 +1872,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
|
|
|
!rnp->wait_blkd_tasks) {
|
|
|
/* Nothing to do on this leaf rcu_node structure. */
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1906,7 +1906,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
rcu_cleanup_dead_rnp(rnp);
|
|
|
}
|
|
|
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1937,7 +1937,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
|
|
|
rnp->level, rnp->grplo,
|
|
|
rnp->grphi, rnp->qsmask);
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
cond_resched_rcu_qs();
|
|
|
WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
}
|
|
@@ -1995,7 +1995,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
|
|
|
raw_spin_lock_irq_rcu_node(rnp);
|
|
|
WRITE_ONCE(rsp->gp_flags,
|
|
|
READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2024,7 +2024,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
* safe for us to drop the lock in order to mark the grace
|
|
|
* period as completed in all of the rcu_node structures.
|
|
|
*/
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
|
|
|
/*
|
|
|
* Propagate new ->completed value to rcu_node structures so
|
|
@@ -2045,7 +2045,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
|
|
|
/* smp_mb() provided by prior unlock-lock pair. */
|
|
|
nocb += rcu_future_gp_cleanup(rsp, rnp);
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
cond_resched_rcu_qs();
|
|
|
WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
rcu_gp_slow(rsp, gp_cleanup_delay);
|
|
@@ -2067,7 +2067,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
READ_ONCE(rsp->gpnum),
|
|
|
TPS("newreq"));
|
|
|
}
|
|
|
- raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2246,7 +2246,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
|
|
|
{
|
|
|
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
|
|
WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
|
|
|
- raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
|
|
|
rcu_gp_kthread_wake(rsp);
|
|
|
}
|
|
|
|
|
@@ -2276,7 +2276,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|
|
* Our bit has already been cleared, or the
|
|
|
* relevant grace period is already over, so done.
|
|
|
*/
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
return;
|
|
|
}
|
|
|
WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
|
|
@@ -2288,7 +2288,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|
|
if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
|
|
|
|
|
|
/* Other bits still set at this level, so done. */
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
return;
|
|
|
}
|
|
|
mask = rnp->grpmask;
|
|
@@ -2298,7 +2298,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|
|
|
|
|
break;
|
|
|
}
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
rnp_c = rnp;
|
|
|
rnp = rnp->parent;
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
@@ -2330,7 +2330,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
|
|
|
|
|
|
if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
|
|
|
rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
return; /* Still need more quiescent states! */
|
|
|
}
|
|
|
|
|
@@ -2347,7 +2347,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
|
|
|
/* Report up the rest of the hierarchy, tracking current ->gpnum. */
|
|
|
gps = rnp->gpnum;
|
|
|
mask = rnp->grpmask;
|
|
|
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
+ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
|
|
raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
|
|
|
rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
|
|
|
}
|
|
@@ -2384,12 +2384,12 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
*/
|
|
|
rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
|
|
|
rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
return;
|
|
|
}
|
|
|
mask = rdp->grpmask;
|
|
|
if ((rnp->qsmask & mask) == 0) {
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
} else {
|
|
|
rdp->core_needs_qs = false;
|
|
|
|
|
@@ -2600,10 +2600,11 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
|
|
|
rnp->qsmaskinit &= ~mask;
|
|
|
rnp->qsmask &= ~mask;
|
|
|
if (rnp->qsmaskinit) {
|
|
|
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
+ raw_spin_unlock_rcu_node(rnp);
|
|
|
+ /* irqs remain disabled. */
|
|
|
return;
|
|
|
}
|
|
|
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
+ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2626,7 +2627,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
|
|
|
mask = rdp->grpmask;
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
|
|
rnp->qsmaskinitnext &= ~mask;
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2860,7 +2861,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
|
|
|
rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
|
|
|
} else {
|
|
|
/* Nothing to do here, so just drop the lock. */
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -2896,11 +2897,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|
|
raw_spin_unlock(&rnp_old->fqslock);
|
|
|
if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
|
|
rsp->n_force_qs_lh++;
|
|
|
- raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
|
|
|
return; /* Someone beat us to it. */
|
|
|
}
|
|
|
WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
|
|
|
- raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
|
|
|
rcu_gp_kthread_wake(rsp);
|
|
|
}
|
|
|
|
|
@@ -2926,7 +2927,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
|
|
|
if (cpu_needs_another_gp(rsp, rdp)) {
|
|
|
raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
|
|
|
needwake = rcu_start_gp(rsp);
|
|
|
- raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
|
|
|
if (needwake)
|
|
|
rcu_gp_kthread_wake(rsp);
|
|
|
} else {
|
|
@@ -3017,7 +3018,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
|
|
|
|
|
raw_spin_lock_rcu_node(rnp_root);
|
|
|
needwake = rcu_start_gp(rsp);
|
|
|
- raw_spin_unlock(&rnp_root->lock);
|
|
|
+ raw_spin_unlock_rcu_node(rnp_root);
|
|
|
if (needwake)
|
|
|
rcu_gp_kthread_wake(rsp);
|
|
|
} else {
|
|
@@ -3437,14 +3438,14 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
|
|
|
rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
if (rnp->expmaskinit == rnp->expmaskinitnext) {
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
continue; /* No new CPUs, nothing to do. */
|
|
|
}
|
|
|
|
|
|
/* Update this node's mask, track old value for propagation. */
|
|
|
oldmask = rnp->expmaskinit;
|
|
|
rnp->expmaskinit = rnp->expmaskinitnext;
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
|
|
|
/* If was already nonzero, nothing to propagate. */
|
|
|
if (oldmask)
|
|
@@ -3459,7 +3460,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
|
|
|
if (rnp_up->expmaskinit)
|
|
|
done = true;
|
|
|
rnp_up->expmaskinit |= mask;
|
|
|
- raw_spin_unlock_irqrestore(&rnp_up->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
|
|
|
if (done)
|
|
|
break;
|
|
|
mask = rnp_up->grpmask;
|
|
@@ -3482,7 +3483,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
WARN_ON_ONCE(rnp->expmask);
|
|
|
rnp->expmask = rnp->expmaskinit;
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3523,11 +3524,11 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
if (!rnp->expmask)
|
|
|
rcu_initiate_boost(rnp, flags);
|
|
|
else
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
break;
|
|
|
}
|
|
|
if (rnp->parent == NULL) {
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
if (wake) {
|
|
|
smp_mb(); /* EGP done before wake_up(). */
|
|
|
wake_up(&rsp->expedited_wq);
|
|
@@ -3535,7 +3536,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
break;
|
|
|
}
|
|
|
mask = rnp->grpmask;
|
|
|
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
|
|
|
+ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
|
|
|
rnp = rnp->parent;
|
|
|
raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
|
|
|
WARN_ON_ONCE(!(rnp->expmask & mask));
|
|
@@ -3570,7 +3571,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
if (!(rnp->expmask & mask)) {
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
return;
|
|
|
}
|
|
|
rnp->expmask &= ~mask;
|
|
@@ -3731,7 +3732,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
|
|
*/
|
|
|
if (rcu_preempt_has_tasks(rnp))
|
|
|
rnp->exp_tasks = rnp->blkd_tasks.next;
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
|
|
|
/* IPI the remaining CPUs for expedited quiescent state. */
|
|
|
mask = 1;
|
|
@@ -3748,7 +3749,7 @@ retry_ipi:
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
if (cpu_online(cpu) &&
|
|
|
(rnp->expmask & mask)) {
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
schedule_timeout_uninterruptible(1);
|
|
|
if (cpu_online(cpu) &&
|
|
|
(rnp->expmask & mask))
|
|
@@ -3757,7 +3758,7 @@ retry_ipi:
|
|
|
}
|
|
|
if (!(rnp->expmask & mask))
|
|
|
mask_ofl_ipi &= ~mask;
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
/* Report quiescent states for those that went offline. */
|
|
|
mask_ofl_test |= mask_ofl_ipi;
|
|
@@ -4164,7 +4165,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
|
|
|
return;
|
|
|
raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
|
|
|
rnp->qsmaskinit |= mask;
|
|
|
- raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
|
|
|
+ raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -4188,7 +4189,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
rdp->rsp = rsp;
|
|
|
mutex_init(&rdp->exp_funnel_mutex);
|
|
|
rcu_boot_init_nocb_percpu_data(rdp);
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -4216,7 +4217,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
rcu_sysidle_init_percpu_data(rdp->dynticks);
|
|
|
atomic_set(&rdp->dynticks->dynticks,
|
|
|
(atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
|
|
|
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
+ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
|
|
|
|
|
/*
|
|
|
* Add CPU to leaf rcu_node pending-online bitmask. Any needed
|
|
@@ -4237,7 +4238,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
|
|
|
rdp->core_needs_qs = false;
|
|
|
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
|
|
|
static void rcu_prepare_cpu(int cpu)
|
|
@@ -4359,7 +4360,7 @@ static int __init rcu_spawn_gp_kthread(void)
|
|
|
sp.sched_priority = kthread_prio;
|
|
|
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
|
|
}
|
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
wake_up_process(t);
|
|
|
}
|
|
|
rcu_spawn_nocb_kthreads();
|
|
@@ -4450,8 +4451,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
|
|
cpustride *= levelspread[i];
|
|
|
rnp = rsp->level[i];
|
|
|
for (j = 0; j < levelcnt[i]; j++, rnp++) {
|
|
|
- raw_spin_lock_init(&rnp->lock);
|
|
|
- lockdep_set_class_and_name(&rnp->lock,
|
|
|
+ raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
|
|
|
+ lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
|
|
|
&rcu_node_class[i], buf[i]);
|
|
|
raw_spin_lock_init(&rnp->fqslock);
|
|
|
lockdep_set_class_and_name(&rnp->fqslock,
|