|
@@ -359,7 +359,8 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
|
|
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
|
|
|
|
|
if (raw_smp_processor_id() == cpu ||
|
|
|
- !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
|
|
|
+ !(atomic_add_return(0, &rdtp->dynticks) & 0x1) ||
|
|
|
+ !(rnp->qsmaskinitnext & rdp->grpmask))
|
|
|
mask_ofl_test |= rdp->grpmask;
|
|
|
}
|
|
|
mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
|
|
@@ -384,17 +385,16 @@ retry_ipi:
|
|
|
mask_ofl_ipi &= ~mask;
|
|
|
continue;
|
|
|
}
|
|
|
- /* Failed, raced with offline. */
|
|
|
+ /* Failed, raced with CPU hotplug operation. */
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
- if (cpu_online(cpu) &&
|
|
|
+ if ((rnp->qsmaskinitnext & mask) &&
|
|
|
(rnp->expmask & mask)) {
|
|
|
+ /* Online, so delay for a bit and try again. */
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
schedule_timeout_uninterruptible(1);
|
|
|
- if (cpu_online(cpu) &&
|
|
|
- (rnp->expmask & mask))
|
|
|
- goto retry_ipi;
|
|
|
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
+ goto retry_ipi;
|
|
|
}
|
|
|
+ /* CPU really is offline, so we can ignore it. */
|
|
|
if (!(rnp->expmask & mask))
|
|
|
mask_ofl_ipi &= ~mask;
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
@@ -427,12 +427,10 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
|
|
jiffies_stall);
|
|
|
if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
|
|
|
return;
|
|
|
- if (ret < 0) {
|
|
|
- /* Hit a signal, disable CPU stall warnings. */
|
|
|
- swait_event(rsp->expedited_wq,
|
|
|
- sync_rcu_preempt_exp_done(rnp_root));
|
|
|
- return;
|
|
|
- }
|
|
|
+ WARN_ON(ret < 0); /* workqueues should not be signaled. */
|
|
|
+ if (rcu_cpu_stall_suppress)
|
|
|
+ continue;
|
|
|
+ panic_on_rcu_stall();
|
|
|
pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
|
|
|
rsp->name);
|
|
|
ndetected = 0;
|
|
@@ -500,7 +498,6 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
|
|
|
* next GP, to proceed.
|
|
|
*/
|
|
|
mutex_lock(&rsp->exp_wake_mutex);
|
|
|
- mutex_unlock(&rsp->exp_mutex);
|
|
|
|
|
|
rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
|
if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
|
|
@@ -516,6 +513,70 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
|
|
|
mutex_unlock(&rsp->exp_wake_mutex);
|
|
|
}
|
|
|
|
|
|
+/* Let the workqueue handler know what it is supposed to do. */
|
|
|
+struct rcu_exp_work {
|
|
|
+ smp_call_func_t rew_func;
|
|
|
+ struct rcu_state *rew_rsp;
|
|
|
+ unsigned long rew_s;
|
|
|
+ struct work_struct rew_work;
|
|
|
+};
|
|
|
+
|
|
|
+/*
|
|
|
+ * Work-queue handler to drive an expedited grace period forward.
|
|
|
+ */
|
|
|
+static void wait_rcu_exp_gp(struct work_struct *wp)
|
|
|
+{
|
|
|
+ struct rcu_exp_work *rewp;
|
|
|
+
|
|
|
+ /* Initialize the rcu_node tree in preparation for the wait. */
|
|
|
+ rewp = container_of(wp, struct rcu_exp_work, rew_work);
|
|
|
+ sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func);
|
|
|
+
|
|
|
+ /* Wait and clean up, including waking everyone. */
|
|
|
+ rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Given an rcu_state pointer and a smp_call_function() handler, kick
|
|
|
+ * off the specified flavor of expedited grace period.
|
|
|
+ */
|
|
|
+static void _synchronize_rcu_expedited(struct rcu_state *rsp,
|
|
|
+ smp_call_func_t func)
|
|
|
+{
|
|
|
+ struct rcu_data *rdp;
|
|
|
+ struct rcu_exp_work rew;
|
|
|
+ struct rcu_node *rnp;
|
|
|
+ unsigned long s;
|
|
|
+
|
|
|
+ /* If expedited grace periods are prohibited, fall back to normal. */
|
|
|
+ if (rcu_gp_is_normal()) {
|
|
|
+ wait_rcu_gp(rsp->call);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Take a snapshot of the sequence number. */
|
|
|
+ s = rcu_exp_gp_seq_snap(rsp);
|
|
|
+ if (exp_funnel_lock(rsp, s))
|
|
|
+ return; /* Someone else did our work for us. */
|
|
|
+
|
|
|
+ /* Marshall arguments and schedule the expedited grace period. */
|
|
|
+ rew.rew_func = func;
|
|
|
+ rew.rew_rsp = rsp;
|
|
|
+ rew.rew_s = s;
|
|
|
+ INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
|
|
|
+ schedule_work(&rew.rew_work);
|
|
|
+
|
|
|
+ /* Wait for expedited grace period to complete. */
|
|
|
+ rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
|
|
|
+ rnp = rcu_get_root(rsp);
|
|
|
+ wait_event(rnp->exp_wq[(s >> 1) & 0x3],
|
|
|
+ sync_exp_work_done(rsp,
|
|
|
+ &rdp->exp_workdone0, s));
|
|
|
+
|
|
|
+ /* Let the next expedited grace period start. */
|
|
|
+ mutex_unlock(&rsp->exp_mutex);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* synchronize_sched_expedited - Brute-force RCU-sched grace period
|
|
|
*
|
|
@@ -534,29 +595,13 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
|
|
|
*/
|
|
|
void synchronize_sched_expedited(void)
|
|
|
{
|
|
|
- unsigned long s;
|
|
|
struct rcu_state *rsp = &rcu_sched_state;
|
|
|
|
|
|
/* If only one CPU, this is automatically a grace period. */
|
|
|
if (rcu_blocking_is_gp())
|
|
|
return;
|
|
|
|
|
|
- /* If expedited grace periods are prohibited, fall back to normal. */
|
|
|
- if (rcu_gp_is_normal()) {
|
|
|
- wait_rcu_gp(call_rcu_sched);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- /* Take a snapshot of the sequence number. */
|
|
|
- s = rcu_exp_gp_seq_snap(rsp);
|
|
|
- if (exp_funnel_lock(rsp, s))
|
|
|
- return; /* Someone else did our work for us. */
|
|
|
-
|
|
|
- /* Initialize the rcu_node tree in preparation for the wait. */
|
|
|
- sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
|
|
|
-
|
|
|
- /* Wait and clean up, including waking everyone. */
|
|
|
- rcu_exp_wait_wake(rsp, s);
|
|
|
+ _synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
|
|
|
|
|
@@ -620,23 +665,8 @@ static void sync_rcu_exp_handler(void *info)
|
|
|
void synchronize_rcu_expedited(void)
|
|
|
{
|
|
|
struct rcu_state *rsp = rcu_state_p;
|
|
|
- unsigned long s;
|
|
|
-
|
|
|
- /* If expedited grace periods are prohibited, fall back to normal. */
|
|
|
- if (rcu_gp_is_normal()) {
|
|
|
- wait_rcu_gp(call_rcu);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- s = rcu_exp_gp_seq_snap(rsp);
|
|
|
- if (exp_funnel_lock(rsp, s))
|
|
|
- return; /* Someone else did our work for us. */
|
|
|
-
|
|
|
- /* Initialize the rcu_node tree in preparation for the wait. */
|
|
|
- sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
|
|
|
|
|
|
- /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */
|
|
|
- rcu_exp_wait_wake(rsp, s);
|
|
|
+ _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
|
|
|