|
@@ -2633,9 +2633,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
|
|
|
return;
|
|
|
|
|
|
/* Do the accounting first. */
|
|
|
- rdp->n_cbs_adopted += rcu_cblist_n_cbs(&rsp->orphan_done);
|
|
|
- if (rcu_cblist_n_lazy_cbs(&rsp->orphan_done) !=
|
|
|
- rcu_cblist_n_cbs(&rsp->orphan_done))
|
|
|
+ rdp->n_cbs_adopted += rsp->orphan_done.len;
|
|
|
+ if (rcu_cblist_n_lazy_cbs(&rsp->orphan_done) != rsp->orphan_done.len)
|
|
|
rcu_idle_count_callbacks_posted();
|
|
|
rcu_segcblist_insert_count(&rdp->cblist, &rsp->orphan_done);
|
|
|
|
|
@@ -2792,14 +2791,14 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
* Stop only if limit reached and CPU has something to do.
|
|
|
* Note: The rcl structure counts down from zero.
|
|
|
*/
|
|
|
- if (-rcu_cblist_n_cbs(&rcl) >= bl &&
|
|
|
+ if (-rcl.len >= bl &&
|
|
|
(need_resched() ||
|
|
|
(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- count = -rcu_cblist_n_cbs(&rcl);
|
|
|
+ count = -rcl.len;
|
|
|
trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
|
|
|
is_idle_task(current), rcu_is_callbacks_kthread());
|
|
|
|