|
@@ -274,9 +274,19 @@ void rcu_bh_qs(void)
|
|
|
|
|
|
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
|
|
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Steal a bit from the bottom of ->dynticks for idle entry/exit
|
|
|
|
+ * control. Initially this is for TLB flushing.
|
|
|
|
+ */
|
|
|
|
+#define RCU_DYNTICK_CTRL_MASK 0x1
|
|
|
|
+#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
|
|
|
|
+#ifndef rcu_eqs_special_exit
|
|
|
|
+#define rcu_eqs_special_exit() do { } while (0)
|
|
|
|
+#endif
|
|
|
|
+
|
|
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
|
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
|
- .dynticks = ATOMIC_INIT(1),
|
|
|
|
|
|
+ .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
|
|
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
|
|
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
|
|
.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
|
|
.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
|
|
.dynticks_idle = ATOMIC_INIT(1),
|
|
.dynticks_idle = ATOMIC_INIT(1),
|
|
@@ -290,15 +300,20 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|
static void rcu_dynticks_eqs_enter(void)
|
|
static void rcu_dynticks_eqs_enter(void)
|
|
{
|
|
{
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
- int special;
|
|
|
|
|
|
+ int seq;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * CPUs seeing atomic_inc_return() must see prior RCU read-side
|
|
|
|
|
|
+ * CPUs seeing atomic_add_return() must see prior RCU read-side
|
|
* critical sections, and we also must force ordering with the
|
|
* critical sections, and we also must force ordering with the
|
|
* next idle sojourn.
|
|
* next idle sojourn.
|
|
*/
|
|
*/
|
|
- special = atomic_inc_return(&rdtp->dynticks);
|
|
|
|
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && special & 0x1);
|
|
|
|
|
|
+ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
|
|
|
|
+ /* Better be in an extended quiescent state! */
|
|
|
|
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
|
|
|
+ (seq & RCU_DYNTICK_CTRL_CTR));
|
|
|
|
+ /* Better not have special action (TLB flush) pending! */
|
|
|
|
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
|
|
|
+ (seq & RCU_DYNTICK_CTRL_MASK));
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -308,15 +323,22 @@ static void rcu_dynticks_eqs_enter(void)
|
|
static void rcu_dynticks_eqs_exit(void)
|
|
static void rcu_dynticks_eqs_exit(void)
|
|
{
|
|
{
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
- int special;
|
|
|
|
|
|
+ int seq;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * CPUs seeing atomic_inc_return() must see prior idle sojourns,
|
|
|
|
|
|
+ * CPUs seeing atomic_add_return() must see prior idle sojourns,
|
|
* and we also must force ordering with the next RCU read-side
|
|
* and we also must force ordering with the next RCU read-side
|
|
* critical section.
|
|
* critical section.
|
|
*/
|
|
*/
|
|
- special = atomic_inc_return(&rdtp->dynticks);
|
|
|
|
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(special & 0x1));
|
|
|
|
|
|
+ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
|
|
|
|
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
|
|
|
+ !(seq & RCU_DYNTICK_CTRL_CTR));
|
|
|
|
+ if (seq & RCU_DYNTICK_CTRL_MASK) {
|
|
|
|
+ atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
|
|
|
|
+ smp_mb__after_atomic(); /* _exit after clearing mask. */
|
|
|
|
+ /* Prefer duplicate flushes to losing a flush. */
|
|
|
|
+ rcu_eqs_special_exit();
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -333,9 +355,9 @@ static void rcu_dynticks_eqs_online(void)
|
|
{
|
|
{
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
|
|
|
- if (atomic_read(&rdtp->dynticks) & 0x1)
|
|
|
|
|
|
+ if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
|
|
return;
|
|
return;
|
|
- atomic_add(0x1, &rdtp->dynticks);
|
|
|
|
|
|
+ atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -347,7 +369,7 @@ bool rcu_dynticks_curr_cpu_in_eqs(void)
|
|
{
|
|
{
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
|
|
|
- return !(atomic_read(&rdtp->dynticks) & 0x1);
|
|
|
|
|
|
+ return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -358,7 +380,7 @@ int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
|
|
{
|
|
{
|
|
int snap = atomic_add_return(0, &rdtp->dynticks);
|
|
int snap = atomic_add_return(0, &rdtp->dynticks);
|
|
|
|
|
|
- return snap;
|
|
|
|
|
|
+ return snap & ~RCU_DYNTICK_CTRL_MASK;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -367,7 +389,7 @@ int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
|
|
*/
|
|
*/
|
|
static bool rcu_dynticks_in_eqs(int snap)
|
|
static bool rcu_dynticks_in_eqs(int snap)
|
|
{
|
|
{
|
|
- return !(snap & 0x1);
|
|
|
|
|
|
+ return !(snap & RCU_DYNTICK_CTRL_CTR);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -387,10 +409,33 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
|
|
static void rcu_dynticks_momentary_idle(void)
|
|
static void rcu_dynticks_momentary_idle(void)
|
|
{
|
|
{
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
- int special = atomic_add_return(2, &rdtp->dynticks);
|
|
|
|
|
|
+ int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
|
|
|
|
+ &rdtp->dynticks);
|
|
|
|
|
|
/* It is illegal to call this from idle state. */
|
|
/* It is illegal to call this from idle state. */
|
|
- WARN_ON_ONCE(!(special & 0x1));
|
|
|
|
|
|
+ WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Set the special (bottom) bit of the specified CPU so that it
|
|
|
|
+ * will take special action (such as flushing its TLB) on the
|
|
|
|
+ * next exit from an extended quiescent state. Returns true if
|
|
|
|
+ * the bit was successfully set, or false if the CPU was not in
|
|
|
|
+ * an extended quiescent state.
|
|
|
|
+ */
|
|
|
|
+bool rcu_eqs_special_set(int cpu)
|
|
|
|
+{
|
|
|
|
+ int old;
|
|
|
|
+ int new;
|
|
|
|
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ old = atomic_read(&rdtp->dynticks);
|
|
|
|
+ if (old & RCU_DYNTICK_CTRL_CTR)
|
|
|
|
+ return false;
|
|
|
|
+ new = old | RCU_DYNTICK_CTRL_MASK;
|
|
|
|
+ } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
|
|
|
|
+ return true;
|
|
}
|
|
}
|
|
|
|
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
|