|
@@ -336,17 +336,48 @@ static void rcu_dynticks_eqs_online(void)
|
|
|
atomic_add(0x1, &rdtp->dynticks);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Is the current CPU in an extended quiescent state?
|
|
|
+ *
|
|
|
+ * No ordering, as we are sampling CPU-local information.
|
|
|
+ */
|
|
|
+bool rcu_dynticks_curr_cpu_in_eqs(void)
|
|
|
+{
|
|
|
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
+
|
|
|
+ return !(atomic_read(&rdtp->dynticks) & 0x1);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Snapshot the ->dynticks counter with full ordering so as to allow
|
|
|
* stable comparison of this counter with past and future snapshots.
|
|
|
*/
|
|
|
-static int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
|
|
|
+int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
|
|
|
{
|
|
|
int snap = atomic_add_return(0, &rdtp->dynticks);
|
|
|
|
|
|
return snap;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Return true if the snapshot returned from rcu_dynticks_snap()
|
|
|
+ * indicates that RCU is in an extended quiescent state.
|
|
|
+ */
|
|
|
+static bool rcu_dynticks_in_eqs(int snap)
|
|
|
+{
|
|
|
+ return !(snap & 0x1);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Return true if the CPU corresponding to the specified rcu_dynticks
|
|
|
+ * structure has spent some time in an extended quiescent state since
|
|
|
+ * rcu_dynticks_snap() returned the specified snapshot.
|
|
|
+ */
|
|
|
+static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
|
|
|
+{
|
|
|
+ return snap != rcu_dynticks_snap(rdtp);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Do a double-increment of the ->dynticks counter to emulate a
|
|
|
* momentary idle-CPU quiescent state.
|
|
@@ -1045,7 +1076,7 @@ void rcu_nmi_enter(void)
|
|
|
* to be in the outermost NMI handler that interrupted an RCU-idle
|
|
|
* period (observation due to Andy Lutomirski).
|
|
|
*/
|
|
|
- if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
|
|
|
+ if (rcu_dynticks_curr_cpu_in_eqs()) {
|
|
|
rcu_dynticks_eqs_exit();
|
|
|
incby = 1;
|
|
|
}
|
|
@@ -1071,7 +1102,7 @@ void rcu_nmi_exit(void)
|
|
|
* to us!)
|
|
|
*/
|
|
|
WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
|
|
|
- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
|
|
+ WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
|
|
|
|
|
|
/*
|
|
|
* If the nesting level is not 1, the CPU wasn't RCU-idle, so
|
|
@@ -1097,9 +1128,7 @@ void rcu_nmi_exit(void)
|
|
|
*/
|
|
|
bool notrace __rcu_is_watching(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
-
|
|
|
- return atomic_read(&rdtp->dynticks) & 0x1;
|
|
|
+ return !rcu_dynticks_curr_cpu_in_eqs();
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1184,7 +1213,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
|
|
|
{
|
|
|
rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
|
|
|
rcu_sysidle_check_cpu(rdp, isidle, maxj);
|
|
|
- if ((rdp->dynticks_snap & 0x1) == 0) {
|
|
|
+ if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
|
|
|
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
|
|
|
if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
|
|
|
rdp->mynode->gpnum))
|
|
@@ -1203,12 +1232,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
|
|
|
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
|
|
bool *isidle, unsigned long *maxj)
|
|
|
{
|
|
|
- unsigned int curr;
|
|
|
int *rcrmp;
|
|
|
- unsigned int snap;
|
|
|
-
|
|
|
- curr = (unsigned int)rcu_dynticks_snap(rdp->dynticks);
|
|
|
- snap = (unsigned int)rdp->dynticks_snap;
|
|
|
|
|
|
/*
|
|
|
* If the CPU passed through or entered a dynticks idle phase with
|
|
@@ -1218,7 +1242,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
|
|
* read-side critical section that started before the beginning
|
|
|
* of the current RCU grace period.
|
|
|
*/
|
|
|
- if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
|
|
|
+ if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
|
|
|
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
|
|
|
rdp->dynticks_fqs++;
|
|
|
return 1;
|
|
@@ -3807,7 +3831,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
|
|
|
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
|
|
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
|
|
|
- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
|
|
|
+ WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
|
|
|
rdp->cpu = cpu;
|
|
|
rdp->rsp = rsp;
|
|
|
rcu_boot_init_nocb_percpu_data(rdp);
|