|
@@ -20,6 +20,8 @@
|
|
* Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
|
* Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
|
*/
|
|
*/
|
|
|
|
|
|
|
|
+#include <linux/lockdep.h>
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Record the start of an expedited grace period.
|
|
* Record the start of an expedited grace period.
|
|
*/
|
|
*/
|
|
@@ -158,10 +160,30 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
|
|
*/
|
|
*/
|
|
static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
|
|
static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
|
|
{
|
|
{
|
|
|
|
+ raw_lockdep_assert_held_rcu_node(rnp);
|
|
|
|
+
|
|
return rnp->exp_tasks == NULL &&
|
|
return rnp->exp_tasks == NULL &&
|
|
READ_ONCE(rnp->expmask) == 0;
|
|
READ_ONCE(rnp->expmask) == 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
|
|
|
|
+ * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
|
|
|
|
+ * itself
|
|
|
|
+ */
|
|
|
|
+static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ bool ret;
|
|
|
|
+
|
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
|
+ ret = sync_rcu_preempt_exp_done(rnp);
|
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Report the exit from RCU read-side critical section for the last task
|
|
* Report the exit from RCU read-side critical section for the last task
|
|
* that queued itself during or before the current expedited preemptible-RCU
|
|
* that queued itself during or before the current expedited preemptible-RCU
|
|
@@ -501,9 +523,9 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
|
for (;;) {
|
|
for (;;) {
|
|
ret = swait_event_timeout(
|
|
ret = swait_event_timeout(
|
|
rsp->expedited_wq,
|
|
rsp->expedited_wq,
|
|
- sync_rcu_preempt_exp_done(rnp_root),
|
|
|
|
|
|
+ sync_rcu_preempt_exp_done_unlocked(rnp_root),
|
|
jiffies_stall);
|
|
jiffies_stall);
|
|
- if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
|
|
|
|
|
|
+ if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
|
|
return;
|
|
return;
|
|
WARN_ON(ret < 0); /* workqueues should not be signaled. */
|
|
WARN_ON(ret < 0); /* workqueues should not be signaled. */
|
|
if (rcu_cpu_stall_suppress)
|
|
if (rcu_cpu_stall_suppress)
|
|
@@ -536,7 +558,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
|
rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
if (rnp == rnp_root)
|
|
if (rnp == rnp_root)
|
|
continue; /* printed unconditionally */
|
|
continue; /* printed unconditionally */
|
|
- if (sync_rcu_preempt_exp_done(rnp))
|
|
|
|
|
|
+ if (sync_rcu_preempt_exp_done_unlocked(rnp))
|
|
continue;
|
|
continue;
|
|
pr_cont(" l=%u:%d-%d:%#lx/%c",
|
|
pr_cont(" l=%u:%d-%d:%#lx/%c",
|
|
rnp->level, rnp->grplo, rnp->grphi,
|
|
rnp->level, rnp->grplo, rnp->grphi,
|