|
@@ -1018,25 +1018,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * Has this CPU encountered a cond_resched() since the beginning
|
|
|
|
- * of the grace period? For this to be the case, the CPU has to
|
|
|
|
- * have noticed the current grace period. This might not be the
|
|
|
|
- * case for nohz_full CPUs looping in the kernel.
|
|
|
|
- */
|
|
|
|
- jtsq = jiffies_till_sched_qs;
|
|
|
|
- ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
|
|
|
|
- if (time_after(jiffies, rcu_state.gp_start + jtsq) &&
|
|
|
|
- READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
|
|
|
|
- rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
|
|
|
|
- trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
|
|
|
|
- rcu_gpnum_ovf(rnp, rdp);
|
|
|
|
- return 1;
|
|
|
|
- } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
|
|
|
|
- /* Load rcu_qs_ctr before store to rcu_urgent_qs. */
|
|
|
|
- smp_store_release(ruqp, true);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/* If waiting too long on an offline CPU, complain. */
|
|
/* If waiting too long on an offline CPU, complain. */
|
|
if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
|
|
if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
|
|
time_after(jiffies, rcu_state.gp_start + HZ)) {
|
|
time_after(jiffies, rcu_state.gp_start + HZ)) {
|
|
@@ -1060,29 +1041,27 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
|
|
|
|
/*
|
|
/*
|
|
* A CPU running for an extended time within the kernel can
|
|
* A CPU running for an extended time within the kernel can
|
|
- * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
|
|
|
|
- * even context-switching back and forth between a pair of
|
|
|
|
- * in-kernel CPU-bound tasks cannot advance grace periods.
|
|
|
|
- * So if the grace period is old enough, make the CPU pay attention.
|
|
|
|
- * Note that the unsynchronized assignments to the per-CPU
|
|
|
|
- * rcu_need_heavy_qs variable are safe. Yes, setting of
|
|
|
|
- * bits can be lost, but they will be set again on the next
|
|
|
|
- * force-quiescent-state pass. So lost bit sets do not result
|
|
|
|
- * in incorrect behavior, merely in a grace period lasting
|
|
|
|
- * a few jiffies longer than it might otherwise. Because
|
|
|
|
- * there are at most four threads involved, and because the
|
|
|
|
- * updates are only once every few jiffies, the probability of
|
|
|
|
- * lossage (and thus of slight grace-period extension) is
|
|
|
|
- * quite low.
|
|
|
|
|
|
+ * delay RCU grace periods: (1) At age jiffies_till_sched_qs,
|
|
|
|
+ * set .rcu_urgent_qs, (2) At age 2*jiffies_till_sched_qs, set
|
|
|
|
+ * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
|
|
|
|
+ * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
|
|
|
|
+ * variable are safe because the assignments are repeated if this
|
|
|
|
+ * CPU failed to pass through a quiescent state. This code
|
|
|
|
+ * also checks .jiffies_resched in case jiffies_till_sched_qs
|
|
|
|
+ * is set way high.
|
|
*/
|
|
*/
|
|
|
|
+ jtsq = jiffies_till_sched_qs;
|
|
|
|
+ ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
|
|
rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
|
|
rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
|
|
if (!READ_ONCE(*rnhqp) &&
|
|
if (!READ_ONCE(*rnhqp) &&
|
|
- (time_after(jiffies, rcu_state.gp_start + jtsq) ||
|
|
|
|
|
|
+ (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
|
|
time_after(jiffies, rcu_state.jiffies_resched))) {
|
|
time_after(jiffies, rcu_state.jiffies_resched))) {
|
|
WRITE_ONCE(*rnhqp, true);
|
|
WRITE_ONCE(*rnhqp, true);
|
|
/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
|
|
/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
|
|
smp_store_release(ruqp, true);
|
|
smp_store_release(ruqp, true);
|
|
rcu_state.jiffies_resched += jtsq; /* Re-enable beating. */
|
|
rcu_state.jiffies_resched += jtsq; /* Re-enable beating. */
|
|
|
|
+ } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
|
|
|
|
+ WRITE_ONCE(*ruqp, true);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1091,7 +1070,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
* see if the CPU is getting hammered with interrupts, but only
|
|
* see if the CPU is getting hammered with interrupts, but only
|
|
* once per grace period, just to keep the IPIs down to a dull roar.
|
|
* once per grace period, just to keep the IPIs down to a dull roar.
|
|
*/
|
|
*/
|
|
- if (jiffies - rcu_state.gp_start > rcu_jiffies_till_stall_check() / 2) {
|
|
|
|
|
|
+ if (time_after(jiffies, rcu_state.jiffies_resched)) {
|
|
resched_cpu(rdp->cpu);
|
|
resched_cpu(rdp->cpu);
|
|
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
|
|
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
|
|
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
|
|
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
|
|
@@ -1669,7 +1648,6 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
|
|
trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
|
|
trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
|
|
need_gp = !!(rnp->qsmask & rdp->grpmask);
|
|
need_gp = !!(rnp->qsmask & rdp->grpmask);
|
|
rdp->cpu_no_qs.b.norm = need_gp;
|
|
rdp->cpu_no_qs.b.norm = need_gp;
|
|
- rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
|
|
|
|
rdp->core_needs_qs = need_gp;
|
|
rdp->core_needs_qs = need_gp;
|
|
zero_cpu_stall_ticks(rdp);
|
|
zero_cpu_stall_ticks(rdp);
|
|
}
|
|
}
|
|
@@ -2230,7 +2208,6 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
|
|
* within the current grace period.
|
|
* within the current grace period.
|
|
*/
|
|
*/
|
|
rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
|
|
rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
|
|
- rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
|
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -3213,7 +3190,6 @@ int rcutree_prepare_cpu(unsigned int cpu)
|
|
rdp->gp_seq = rnp->gp_seq;
|
|
rdp->gp_seq = rnp->gp_seq;
|
|
rdp->gp_seq_needed = rnp->gp_seq;
|
|
rdp->gp_seq_needed = rnp->gp_seq;
|
|
rdp->cpu_no_qs.b.norm = true;
|
|
rdp->cpu_no_qs.b.norm = true;
|
|
- rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
|
|
|
|
rdp->core_needs_qs = false;
|
|
rdp->core_needs_qs = false;
|
|
rdp->rcu_iw_pending = false;
|
|
rdp->rcu_iw_pending = false;
|
|
rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
|
|
rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
|