|
@@ -95,8 +95,6 @@ struct rcu_state sname##_state = { \
|
|
|
.rda = &sname##_data, \
|
|
|
.call = cr, \
|
|
|
.gp_state = RCU_GP_IDLE, \
|
|
|
- .gpnum = 0UL - 300UL, \
|
|
|
- .completed = 0UL - 300UL, \
|
|
|
.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \
|
|
|
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
|
|
|
.name = RCU_STATE_NAME(sname), \
|
|
@@ -1349,8 +1347,6 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
|
|
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
long totqlen = 0;
|
|
|
|
|
|
- WARN_ON_ONCE(gp_seq & 0x2); /* Remove when ->gpnum removed. */
|
|
|
-
|
|
|
/* Kick and suppress, if so configured. */
|
|
|
rcu_stall_kick_kthreads(rsp);
|
|
|
if (rcu_cpu_stall_suppress)
|
|
@@ -1582,8 +1578,6 @@ static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
|
|
|
* not be released.
|
|
|
*/
|
|
|
raw_lockdep_assert_held_rcu_node(rnp);
|
|
|
- WARN_ON_ONCE(c & 0x2); /* Catch any lingering use of ->gpnum. */
|
|
|
- WARN_ON_ONCE(((rnp->completed << RCU_SEQ_CTR_SHIFT) >> RCU_SEQ_CTR_SHIFT) != rcu_seq_ctr(rnp->gp_seq)); /* Catch any ->completed/->gp_seq mismatches. */
|
|
|
trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
|
|
|
for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
|
|
|
if (rnp_root != rnp)
|
|
@@ -1757,8 +1751,6 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
|
|
|
unlikely(READ_ONCE(rdp->gpwrap))) {
|
|
|
ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
|
|
|
- /* Remember that we saw this grace-period completion. */
|
|
|
- rdp->completed = rnp->completed;
|
|
|
trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
|
|
|
} else {
|
|
|
ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
|
|
@@ -1772,7 +1764,6 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
* set up to detect a quiescent state, otherwise don't
|
|
|
* go looking for one.
|
|
|
*/
|
|
|
- rdp->gpnum = rnp->gpnum;
|
|
|
trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpustart"));
|
|
|
need_gp = !!(rnp->qsmask & rdp->grpmask);
|
|
|
rdp->cpu_no_qs.b.norm = need_gp;
|
|
@@ -1843,13 +1834,8 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
|
|
|
/* Advance to a new grace period and initialize state. */
|
|
|
record_gp_stall_check_time(rsp);
|
|
|
- /* Record GP times before starting GP, hence smp_store_release(). */
|
|
|
- WARN_ON_ONCE(rsp->gpnum << RCU_SEQ_CTR_SHIFT != rsp->gp_seq);
|
|
|
- smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
|
|
|
- smp_mb(); /* Pairs with barriers in stall-warning code. */
|
|
|
+ /* Record GP times before starting GP, hence rcu_seq_start(). */
|
|
|
rcu_seq_start(&rsp->gp_seq);
|
|
|
- if (WARN_ON_ONCE(((rnp->completed << RCU_SEQ_CTR_SHIFT) >> RCU_SEQ_CTR_SHIFT) != rcu_seq_ctr(rnp->gp_seq))) /* Catch any ->completed/->gp_seq mismatches. */
|
|
|
- pr_info("%s ->completed: %#lx (%#lx) ->gp_seq %#lx (%#lx)\n", __func__, rnp->completed, (rnp->completed << RCU_SEQ_CTR_SHIFT) >> RCU_SEQ_CTR_SHIFT, rnp->gp_seq, rcu_seq_ctr(rnp->gp_seq));
|
|
|
trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
|
|
@@ -1920,9 +1906,6 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
rdp = this_cpu_ptr(rsp->rda);
|
|
|
rcu_preempt_check_blocked_tasks(rnp);
|
|
|
rnp->qsmask = rnp->qsmaskinit;
|
|
|
- WRITE_ONCE(rnp->gpnum, rsp->gpnum);
|
|
|
- if (WARN_ON_ONCE(rnp->completed != rsp->completed))
|
|
|
- WRITE_ONCE(rnp->completed, rsp->completed);
|
|
|
WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
|
|
|
if (rnp == rdp->mynode)
|
|
|
(void)__note_gp_changes(rsp, rnp, rdp);
|
|
@@ -2012,13 +1995,13 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
|
|
|
/*
|
|
|
- * Propagate new ->completed value to rcu_node structures so
|
|
|
- * that other CPUs don't have to wait until the start of the next
|
|
|
- * grace period to process their callbacks. This also avoids
|
|
|
- * some nasty RCU grace-period initialization races by forcing
|
|
|
- * the end of the current grace period to be completely recorded in
|
|
|
- * all of the rcu_node structures before the beginning of the next
|
|
|
- * grace period is recorded in any of the rcu_node structures.
|
|
|
+ * Propagate new ->gp_seq value to rcu_node structures so that
|
|
|
+ * other CPUs don't have to wait until the start of the next grace
|
|
|
+ * period to process their callbacks. This also avoids some nasty
|
|
|
+ * RCU grace-period initialization races by forcing the end of
|
|
|
+ * the current grace period to be completely recorded in all of
|
|
|
+ * the rcu_node structures before the beginning of the next grace
|
|
|
+ * period is recorded in any of the rcu_node structures.
|
|
|
*/
|
|
|
new_gp_seq = rsp->gp_seq;
|
|
|
rcu_seq_end(&new_gp_seq);
|
|
@@ -2027,7 +2010,6 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
|
|
|
dump_blkd_tasks(rnp, 10);
|
|
|
WARN_ON_ONCE(rnp->qsmask);
|
|
|
- WRITE_ONCE(rnp->completed, rsp->gpnum);
|
|
|
WRITE_ONCE(rnp->gp_seq, new_gp_seq);
|
|
|
rdp = this_cpu_ptr(rsp->rda);
|
|
|
if (rnp == rdp->mynode)
|
|
@@ -2045,7 +2027,6 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
|
|
|
|
|
|
/* Declare grace period done. */
|
|
|
- WRITE_ONCE(rsp->completed, rsp->gpnum);
|
|
|
rcu_seq_end(&rsp->gp_seq);
|
|
|
trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
|
|
|
rsp->gp_state = RCU_GP_IDLE;
|
|
@@ -3496,9 +3477,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
|
|
|
/*
|
|
|
* Initialize a CPU's per-CPU RCU data. Note that only one online or
|
|
|
- * offline event can be happening at a given time. Note also that we
|
|
|
- * can accept some slop in the rsp->completed access due to the fact
|
|
|
- * that this CPU cannot possibly have any RCU callbacks in flight yet.
|
|
|
+ * offline event can be happening at a given time. Note also that we can
|
|
|
+ * accept some slop in the rsp->gp_seq access due to the fact that this
|
|
|
+ * CPU cannot possibly have any RCU callbacks in flight yet.
|
|
|
*/
|
|
|
static void
|
|
|
rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
@@ -3527,8 +3508,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
rnp = rdp->mynode;
|
|
|
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
|
|
|
rdp->beenonline = true; /* We have now been online. */
|
|
|
- rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
|
|
|
- rdp->completed = rnp->completed;
|
|
|
rdp->gp_seq = rnp->gp_seq;
|
|
|
rdp->gp_seq_needed = rnp->gp_seq;
|
|
|
rdp->cpu_no_qs.b.norm = true;
|
|
@@ -3908,8 +3887,6 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
|
|
raw_spin_lock_init(&rnp->fqslock);
|
|
|
lockdep_set_class_and_name(&rnp->fqslock,
|
|
|
&rcu_fqs_class[i], fqs[i]);
|
|
|
- rnp->gpnum = rsp->gpnum;
|
|
|
- rnp->completed = rsp->completed;
|
|
|
rnp->gp_seq = rsp->gp_seq;
|
|
|
rnp->gp_seq_needed = rsp->gp_seq;
|
|
|
rnp->completedqs = rsp->gp_seq;
|