|
@@ -540,7 +540,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
|
|
|
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
|
|
{
|
|
|
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
|
|
|
- if (!list_empty(&rnp->blkd_tasks))
|
|
|
+ if (rcu_preempt_has_tasks(rnp))
|
|
|
rnp->gp_tasks = rnp->blkd_tasks.next;
|
|
|
WARN_ON_ONCE(rnp->qsmask);
|
|
|
}
|
|
@@ -706,7 +706,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
|
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
smp_mb__after_unlock_lock();
|
|
|
- if (list_empty(&rnp->blkd_tasks)) {
|
|
|
+ if (!rcu_preempt_has_tasks(rnp)) {
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
} else {
|
|
|
rnp->exp_tasks = rnp->blkd_tasks.next;
|
|
@@ -985,7 +985,7 @@ void exit_rcu(void)
|
|
|
|
|
|
static void rcu_initiate_boost_trace(struct rcu_node *rnp)
|
|
|
{
|
|
|
- if (list_empty(&rnp->blkd_tasks))
|
|
|
+ if (!rcu_preempt_has_tasks(rnp))
|
|
|
rnp->n_balk_blkd_tasks++;
|
|
|
else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
|
|
|
rnp->n_balk_exp_gp_tasks++;
|