|
@@ -322,9 +322,10 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
|
|
|
*/
|
|
|
void rcu_read_unlock_special(struct task_struct *t)
|
|
|
{
|
|
|
- int empty_exp;
|
|
|
- int empty_norm;
|
|
|
- int empty_exp_now;
|
|
|
+ bool empty;
|
|
|
+ bool empty_exp;
|
|
|
+ bool empty_norm;
|
|
|
+ bool empty_exp_now;
|
|
|
unsigned long flags;
|
|
|
struct list_head *np;
|
|
|
#ifdef CONFIG_RCU_BOOST
|
|
@@ -376,6 +377,7 @@ void rcu_read_unlock_special(struct task_struct *t)
|
|
|
break;
|
|
|
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
}
|
|
|
+ empty = !rcu_preempt_has_tasks(rnp);
|
|
|
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
|
|
|
empty_exp = !rcu_preempted_readers_exp(rnp);
|
|
|
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
|
|
@@ -395,6 +397,14 @@ void rcu_read_unlock_special(struct task_struct *t)
|
|
|
drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
|
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
|
|
|
+ /*
|
|
|
+ * If this was the last task on the list, go see if we
|
|
|
+ * need to propagate ->qsmaskinit bit clearing up the
|
|
|
+ * rcu_node tree.
|
|
|
+ */
|
|
|
+ if (!empty && !rcu_preempt_has_tasks(rnp))
|
|
|
+ rcu_cleanup_dead_rnp(rnp);
|
|
|
+
|
|
|
/*
|
|
|
* If this was the last task on the current list, and if
|
|
|
* we aren't waiting on any CPUs, report the quiescent state.
|