|
@@ -365,6 +365,7 @@ early_initcall(check_cpu_stall_init);
|
|
|
/* Global list of callbacks and associated lock. */
|
|
|
static struct rcu_head *rcu_tasks_cbs_head;
|
|
|
static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
|
|
|
+static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
|
|
|
static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
|
|
|
|
|
|
/* Track exiting tasks in order to allow them to be waited for. */
|
|
@@ -378,13 +379,17 @@ module_param(rcu_task_stall_timeout, int, 0644);
|
|
|
void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp))
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
+ bool needwake;
|
|
|
|
|
|
rhp->next = NULL;
|
|
|
rhp->func = func;
|
|
|
raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
|
|
|
+ needwake = !rcu_tasks_cbs_head;
|
|
|
*rcu_tasks_cbs_tail = rhp;
|
|
|
rcu_tasks_cbs_tail = &rhp->next;
|
|
|
raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
|
|
|
+ if (needwake)
|
|
|
+ wake_up(&rcu_tasks_cbs_wq);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(call_rcu_tasks);
|
|
|
|
|
@@ -495,8 +500,12 @@ static int __noreturn rcu_tasks_kthread(void *arg)
|
|
|
|
|
|
/* If there were none, wait a bit and start over. */
|
|
|
if (!list) {
|
|
|
- schedule_timeout_interruptible(HZ);
|
|
|
- WARN_ON(signal_pending(current));
|
|
|
+ wait_event_interruptible(rcu_tasks_cbs_wq,
|
|
|
+ rcu_tasks_cbs_head);
|
|
|
+ if (!rcu_tasks_cbs_head) {
|
|
|
+ WARN_ON(signal_pending(current));
|
|
|
+ schedule_timeout_interruptible(HZ/10);
|
|
|
+ }
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -602,6 +611,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
|
|
|
list = next;
|
|
|
cond_resched();
|
|
|
}
|
|
|
+ schedule_timeout_uninterruptible(HZ/10);
|
|
|
}
|
|
|
}
|
|
|
|