|
@@ -61,6 +61,7 @@
|
|
|
#include <linux/trace_events.h>
|
|
|
#include <linux/suspend.h>
|
|
|
#include <linux/ftrace.h>
|
|
|
+#include <linux/tick.h>
|
|
|
|
|
|
#include "tree.h"
|
|
|
#include "rcu.h"
|
|
@@ -73,45 +74,31 @@
|
|
|
/* Data structures. */
|
|
|
|
|
|
/*
|
|
|
- * In order to export the rcu_state name to the tracing tools, it
|
|
|
- * needs to be added in the __tracepoint_string section.
|
|
|
- * This requires defining a separate variable tp_<sname>_varname
|
|
|
- * that points to the string being used, and this will allow
|
|
|
- * the tracing userspace tools to be able to decipher the string
|
|
|
- * address to the matching string.
|
|
|
+ * Steal a bit from the bottom of ->dynticks for idle entry/exit
|
|
|
+ * control. Initially this is for TLB flushing.
|
|
|
*/
|
|
|
-#ifdef CONFIG_TRACING
|
|
|
-# define DEFINE_RCU_TPS(sname) \
|
|
|
-static char sname##_varname[] = #sname; \
|
|
|
-static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
|
|
|
-# define RCU_STATE_NAME(sname) sname##_varname
|
|
|
-#else
|
|
|
-# define DEFINE_RCU_TPS(sname)
|
|
|
-# define RCU_STATE_NAME(sname) __stringify(sname)
|
|
|
+#define RCU_DYNTICK_CTRL_MASK 0x1
|
|
|
+#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
|
|
|
+#ifndef rcu_eqs_special_exit
|
|
|
+#define rcu_eqs_special_exit() do { } while (0)
|
|
|
#endif
|
|
|
|
|
|
-#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
|
|
|
-DEFINE_RCU_TPS(sname) \
|
|
|
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
|
|
|
-struct rcu_state sname##_state = { \
|
|
|
- .level = { &sname##_state.node[0] }, \
|
|
|
- .rda = &sname##_data, \
|
|
|
- .call = cr, \
|
|
|
- .gp_state = RCU_GP_IDLE, \
|
|
|
- .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \
|
|
|
- .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
|
|
|
- .name = RCU_STATE_NAME(sname), \
|
|
|
- .abbr = sabbr, \
|
|
|
- .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
|
|
|
- .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
|
|
|
- .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
|
|
|
-}
|
|
|
-
|
|
|
-RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
|
|
|
-RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
|
|
|
-
|
|
|
-static struct rcu_state *const rcu_state_p;
|
|
|
-LIST_HEAD(rcu_struct_flavors);
|
|
|
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
|
|
|
+ .dynticks_nesting = 1,
|
|
|
+ .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
|
|
|
+ .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
|
|
|
+};
|
|
|
+struct rcu_state rcu_state = {
|
|
|
+ .level = { &rcu_state.node[0] },
|
|
|
+ .gp_state = RCU_GP_IDLE,
|
|
|
+ .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
|
|
|
+ .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
|
|
|
+ .name = RCU_NAME,
|
|
|
+ .abbr = RCU_ABBR,
|
|
|
+ .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
|
|
|
+ .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
|
|
|
+ .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
|
|
|
+};
|
|
|
|
|
|
/* Dump rcu_node combining tree at boot to verify correct setup. */
|
|
|
static bool dump_tree;
|
|
@@ -158,16 +145,14 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
|
|
|
*/
|
|
|
static int rcu_scheduler_fully_active __read_mostly;
|
|
|
|
|
|
-static void
|
|
|
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|
|
- struct rcu_node *rnp, unsigned long gps, unsigned long flags);
|
|
|
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
|
|
|
+ unsigned long gps, unsigned long flags);
|
|
|
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
|
|
|
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
|
|
|
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
|
|
|
static void invoke_rcu_core(void);
|
|
|
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
|
|
|
-static void rcu_report_exp_rdp(struct rcu_state *rsp,
|
|
|
- struct rcu_data *rdp, bool wake);
|
|
|
+static void invoke_rcu_callbacks(struct rcu_data *rdp);
|
|
|
+static void rcu_report_exp_rdp(struct rcu_data *rdp);
|
|
|
static void sync_sched_exp_online_cleanup(int cpu);
|
|
|
|
|
|
/* rcuc/rcub kthread realtime priority */
|
|
@@ -183,7 +168,7 @@ module_param(gp_init_delay, int, 0444);
|
|
|
static int gp_cleanup_delay;
|
|
|
module_param(gp_cleanup_delay, int, 0444);
|
|
|
|
|
|
-/* Retreive RCU kthreads priority for rcutorture */
|
|
|
+/* Retrieve RCU kthreads priority for rcutorture */
|
|
|
int rcu_get_gp_kthreads_prio(void)
|
|
|
{
|
|
|
return kthread_prio;
|
|
@@ -217,67 +202,24 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
|
|
|
* permit this function to be invoked without holding the root rcu_node
|
|
|
* structure's ->lock, but of course results can be subject to change.
|
|
|
*/
|
|
|
-static int rcu_gp_in_progress(struct rcu_state *rsp)
|
|
|
+static int rcu_gp_in_progress(void)
|
|
|
{
|
|
|
- return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Note a quiescent state. Because we do not need to know
|
|
|
- * how many quiescent states passed, just if there was at least
|
|
|
- * one since the start of the grace period, this just sets a flag.
|
|
|
- * The caller must have disabled preemption.
|
|
|
- */
|
|
|
-void rcu_sched_qs(void)
|
|
|
-{
|
|
|
- RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
|
|
|
- if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
|
|
|
- return;
|
|
|
- trace_rcu_grace_period(TPS("rcu_sched"),
|
|
|
- __this_cpu_read(rcu_sched_data.gp_seq),
|
|
|
- TPS("cpuqs"));
|
|
|
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
|
|
|
- if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
|
|
|
- return;
|
|
|
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
|
|
|
- rcu_report_exp_rdp(&rcu_sched_state,
|
|
|
- this_cpu_ptr(&rcu_sched_data), true);
|
|
|
+ return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
|
|
|
}
|
|
|
|
|
|
-void rcu_bh_qs(void)
|
|
|
+void rcu_softirq_qs(void)
|
|
|
{
|
|
|
- RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
|
|
|
- if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
|
|
|
- trace_rcu_grace_period(TPS("rcu_bh"),
|
|
|
- __this_cpu_read(rcu_bh_data.gp_seq),
|
|
|
- TPS("cpuqs"));
|
|
|
- __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
|
|
|
- }
|
|
|
+ rcu_qs();
|
|
|
+ rcu_preempt_deferred_qs(current);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Steal a bit from the bottom of ->dynticks for idle entry/exit
|
|
|
- * control. Initially this is for TLB flushing.
|
|
|
- */
|
|
|
-#define RCU_DYNTICK_CTRL_MASK 0x1
|
|
|
-#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
|
|
|
-#ifndef rcu_eqs_special_exit
|
|
|
-#define rcu_eqs_special_exit() do { } while (0)
|
|
|
-#endif
|
|
|
-
|
|
|
-static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|
|
- .dynticks_nesting = 1,
|
|
|
- .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
|
|
|
- .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
|
|
|
-};
|
|
|
-
|
|
|
/*
|
|
|
* Record entry into an extended quiescent state. This is only to be
|
|
|
* called when not already in an extended quiescent state.
|
|
|
*/
|
|
|
static void rcu_dynticks_eqs_enter(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
int seq;
|
|
|
|
|
|
/*
|
|
@@ -285,7 +227,7 @@ static void rcu_dynticks_eqs_enter(void)
|
|
|
* critical sections, and we also must force ordering with the
|
|
|
* next idle sojourn.
|
|
|
*/
|
|
|
- seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
|
|
|
+ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
|
|
|
/* Better be in an extended quiescent state! */
|
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
|
|
(seq & RCU_DYNTICK_CTRL_CTR));
|
|
@@ -300,7 +242,7 @@ static void rcu_dynticks_eqs_enter(void)
|
|
|
*/
|
|
|
static void rcu_dynticks_eqs_exit(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
int seq;
|
|
|
|
|
|
/*
|
|
@@ -308,11 +250,11 @@ static void rcu_dynticks_eqs_exit(void)
|
|
|
* and we also must force ordering with the next RCU read-side
|
|
|
* critical section.
|
|
|
*/
|
|
|
- seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
|
|
|
+ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
|
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
|
|
!(seq & RCU_DYNTICK_CTRL_CTR));
|
|
|
if (seq & RCU_DYNTICK_CTRL_MASK) {
|
|
|
- atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
|
|
|
+ atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
|
|
|
smp_mb__after_atomic(); /* _exit after clearing mask. */
|
|
|
/* Prefer duplicate flushes to losing a flush. */
|
|
|
rcu_eqs_special_exit();
|
|
@@ -331,11 +273,11 @@ static void rcu_dynticks_eqs_exit(void)
|
|
|
*/
|
|
|
static void rcu_dynticks_eqs_online(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
|
|
|
- if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
|
|
|
+ if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
|
|
|
return;
|
|
|
- atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
|
|
|
+ atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -345,18 +287,18 @@ static void rcu_dynticks_eqs_online(void)
|
|
|
*/
|
|
|
bool rcu_dynticks_curr_cpu_in_eqs(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
|
|
|
- return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
|
|
|
+ return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* Snapshot the ->dynticks counter with full ordering so as to allow
|
|
|
* stable comparison of this counter with past and future snapshots.
|
|
|
*/
|
|
|
-int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
|
|
|
+int rcu_dynticks_snap(struct rcu_data *rdp)
|
|
|
{
|
|
|
- int snap = atomic_add_return(0, &rdtp->dynticks);
|
|
|
+ int snap = atomic_add_return(0, &rdp->dynticks);
|
|
|
|
|
|
return snap & ~RCU_DYNTICK_CTRL_MASK;
|
|
|
}
|
|
@@ -371,13 +313,13 @@ static bool rcu_dynticks_in_eqs(int snap)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Return true if the CPU corresponding to the specified rcu_dynticks
|
|
|
+ * Return true if the CPU corresponding to the specified rcu_data
|
|
|
* structure has spent some time in an extended quiescent state since
|
|
|
* rcu_dynticks_snap() returned the specified snapshot.
|
|
|
*/
|
|
|
-static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
|
|
|
+static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
|
|
|
{
|
|
|
- return snap != rcu_dynticks_snap(rdtp);
|
|
|
+ return snap != rcu_dynticks_snap(rdp);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -391,14 +333,14 @@ bool rcu_eqs_special_set(int cpu)
|
|
|
{
|
|
|
int old;
|
|
|
int new;
|
|
|
- struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
|
|
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
|
|
|
|
|
|
do {
|
|
|
- old = atomic_read(&rdtp->dynticks);
|
|
|
+ old = atomic_read(&rdp->dynticks);
|
|
|
if (old & RCU_DYNTICK_CTRL_CTR)
|
|
|
return false;
|
|
|
new = old | RCU_DYNTICK_CTRL_MASK;
|
|
|
- } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
|
|
|
+ } while (atomic_cmpxchg(&rdp->dynticks, old, new) != old);
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -413,82 +355,30 @@ bool rcu_eqs_special_set(int cpu)
|
|
|
*
|
|
|
* The caller must have disabled interrupts and must not be idle.
|
|
|
*/
|
|
|
-static void rcu_momentary_dyntick_idle(void)
|
|
|
+static void __maybe_unused rcu_momentary_dyntick_idle(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
int special;
|
|
|
|
|
|
- raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
|
|
|
- special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
|
|
|
+ raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
|
|
|
+ special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
|
|
|
+ &this_cpu_ptr(&rcu_data)->dynticks);
|
|
|
/* It is illegal to call this from idle state. */
|
|
|
WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
|
|
|
+ rcu_preempt_deferred_qs(current);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Note a context switch. This is a quiescent state for RCU-sched,
|
|
|
- * and requires special handling for preemptible RCU.
|
|
|
- * The caller must have disabled interrupts.
|
|
|
- */
|
|
|
-void rcu_note_context_switch(bool preempt)
|
|
|
-{
|
|
|
- barrier(); /* Avoid RCU read-side critical sections leaking down. */
|
|
|
- trace_rcu_utilization(TPS("Start context switch"));
|
|
|
- rcu_sched_qs();
|
|
|
- rcu_preempt_note_context_switch(preempt);
|
|
|
- /* Load rcu_urgent_qs before other flags. */
|
|
|
- if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
|
|
|
- goto out;
|
|
|
- this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
|
|
|
- if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
|
|
|
- rcu_momentary_dyntick_idle();
|
|
|
- this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
|
|
|
- if (!preempt)
|
|
|
- rcu_tasks_qs(current);
|
|
|
-out:
|
|
|
- trace_rcu_utilization(TPS("End context switch"));
|
|
|
- barrier(); /* Avoid RCU read-side critical sections leaking up. */
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
|
|
-
|
|
|
-/*
|
|
|
- * Register a quiescent state for all RCU flavors. If there is an
|
|
|
- * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
|
|
|
- * dyntick-idle quiescent state visible to other CPUs (but only for those
|
|
|
- * RCU flavors in desperate need of a quiescent state, which will normally
|
|
|
- * be none of them). Either way, do a lightweight quiescent state for
|
|
|
- * all RCU flavors.
|
|
|
- *
|
|
|
- * The barrier() calls are redundant in the common case when this is
|
|
|
- * called externally, but just in case this is called from within this
|
|
|
- * file.
|
|
|
+/**
|
|
|
+ * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
|
|
|
*
|
|
|
+ * If the current CPU is idle or running at a first-level (not nested)
|
|
|
+ * interrupt from idle, return true. The caller must have at least
|
|
|
+ * disabled preemption.
|
|
|
*/
|
|
|
-void rcu_all_qs(void)
|
|
|
+static int rcu_is_cpu_rrupt_from_idle(void)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
|
|
|
- return;
|
|
|
- preempt_disable();
|
|
|
- /* Load rcu_urgent_qs before other flags. */
|
|
|
- if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
|
|
|
- preempt_enable();
|
|
|
- return;
|
|
|
- }
|
|
|
- this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
|
|
|
- barrier(); /* Avoid RCU read-side critical sections leaking down. */
|
|
|
- if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
|
|
|
- local_irq_save(flags);
|
|
|
- rcu_momentary_dyntick_idle();
|
|
|
- local_irq_restore(flags);
|
|
|
- }
|
|
|
- if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
|
|
|
- rcu_sched_qs();
|
|
|
- this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
|
|
|
- barrier(); /* Avoid RCU read-side critical sections leaking up. */
|
|
|
- preempt_enable();
|
|
|
+ return __this_cpu_read(rcu_data.dynticks_nesting) <= 0 &&
|
|
|
+ __this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 1;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_all_qs);
|
|
|
|
|
|
#define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */
|
|
|
static long blimit = DEFAULT_RCU_BLIMIT;
|
|
@@ -505,13 +395,47 @@ static ulong jiffies_till_first_fqs = ULONG_MAX;
|
|
|
static ulong jiffies_till_next_fqs = ULONG_MAX;
|
|
|
static bool rcu_kick_kthreads;
|
|
|
|
|
|
+/*
|
|
|
+ * How long the grace period must be before we start recruiting
|
|
|
+ * quiescent-state help from rcu_note_context_switch().
|
|
|
+ */
|
|
|
+static ulong jiffies_till_sched_qs = ULONG_MAX;
|
|
|
+module_param(jiffies_till_sched_qs, ulong, 0444);
|
|
|
+static ulong jiffies_to_sched_qs; /* Adjusted version of above if not default */
|
|
|
+module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Make sure that we give the grace-period kthread time to detect any
|
|
|
+ * idle CPUs before taking active measures to force quiescent states.
|
|
|
+ * However, don't go below 100 milliseconds, adjusted upwards for really
|
|
|
+ * large systems.
|
|
|
+ */
|
|
|
+static void adjust_jiffies_till_sched_qs(void)
|
|
|
+{
|
|
|
+ unsigned long j;
|
|
|
+
|
|
|
+ /* If jiffies_till_sched_qs was specified, respect the request. */
|
|
|
+ if (jiffies_till_sched_qs != ULONG_MAX) {
|
|
|
+ WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ j = READ_ONCE(jiffies_till_first_fqs) +
|
|
|
+ 2 * READ_ONCE(jiffies_till_next_fqs);
|
|
|
+ if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
|
|
|
+ j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
|
|
|
+ pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
|
|
|
+ WRITE_ONCE(jiffies_to_sched_qs, j);
|
|
|
+}
|
|
|
+
|
|
|
static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
|
|
|
{
|
|
|
ulong j;
|
|
|
int ret = kstrtoul(val, 0, &j);
|
|
|
|
|
|
- if (!ret)
|
|
|
+ if (!ret) {
|
|
|
WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
|
|
|
+ adjust_jiffies_till_sched_qs();
|
|
|
+ }
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -520,8 +444,10 @@ static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param
|
|
|
ulong j;
|
|
|
int ret = kstrtoul(val, 0, &j);
|
|
|
|
|
|
- if (!ret)
|
|
|
+ if (!ret) {
|
|
|
WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
|
|
|
+ adjust_jiffies_till_sched_qs();
|
|
|
+ }
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -539,15 +465,8 @@ module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_fi
|
|
|
module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
|
|
|
module_param(rcu_kick_kthreads, bool, 0644);
|
|
|
|
|
|
-/*
|
|
|
- * How long the grace period must be before we start recruiting
|
|
|
- * quiescent-state help from rcu_note_context_switch().
|
|
|
- */
|
|
|
-static ulong jiffies_till_sched_qs = HZ / 10;
|
|
|
-module_param(jiffies_till_sched_qs, ulong, 0444);
|
|
|
-
|
|
|
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
|
|
|
-static void force_quiescent_state(struct rcu_state *rsp);
|
|
|
+static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
|
|
|
+static void force_quiescent_state(void);
|
|
|
static int rcu_pending(void);
|
|
|
|
|
|
/*
|
|
@@ -555,28 +474,10 @@ static int rcu_pending(void);
|
|
|
*/
|
|
|
unsigned long rcu_get_gp_seq(void)
|
|
|
{
|
|
|
- return READ_ONCE(rcu_state_p->gp_seq);
|
|
|
+ return READ_ONCE(rcu_state.gp_seq);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
|
|
|
|
|
|
-/*
|
|
|
- * Return the number of RCU-sched GPs completed thus far for debug & stats.
|
|
|
- */
|
|
|
-unsigned long rcu_sched_get_gp_seq(void)
|
|
|
-{
|
|
|
- return READ_ONCE(rcu_sched_state.gp_seq);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
|
|
|
-
|
|
|
-/*
|
|
|
- * Return the number of RCU-bh GPs completed thus far for debug & stats.
|
|
|
- */
|
|
|
-unsigned long rcu_bh_get_gp_seq(void)
|
|
|
-{
|
|
|
- return READ_ONCE(rcu_bh_state.gp_seq);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
|
|
|
-
|
|
|
/*
|
|
|
* Return the number of RCU expedited batches completed thus far for
|
|
|
* debug & stats. Odd numbers mean that a batch is in progress, even
|
|
@@ -585,47 +486,19 @@ EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
|
|
|
*/
|
|
|
unsigned long rcu_exp_batches_completed(void)
|
|
|
{
|
|
|
- return rcu_state_p->expedited_sequence;
|
|
|
+ return rcu_state.expedited_sequence;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
|
|
|
|
|
|
-/*
|
|
|
- * Return the number of RCU-sched expedited batches completed thus far
|
|
|
- * for debug & stats. Similar to rcu_exp_batches_completed().
|
|
|
- */
|
|
|
-unsigned long rcu_exp_batches_completed_sched(void)
|
|
|
-{
|
|
|
- return rcu_sched_state.expedited_sequence;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
|
|
|
-
|
|
|
/*
|
|
|
* Force a quiescent state.
|
|
|
*/
|
|
|
void rcu_force_quiescent_state(void)
|
|
|
{
|
|
|
- force_quiescent_state(rcu_state_p);
|
|
|
+ force_quiescent_state();
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
|
|
|
|
|
|
-/*
|
|
|
- * Force a quiescent state for RCU BH.
|
|
|
- */
|
|
|
-void rcu_bh_force_quiescent_state(void)
|
|
|
-{
|
|
|
- force_quiescent_state(&rcu_bh_state);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
|
|
|
-
|
|
|
-/*
|
|
|
- * Force a quiescent state for RCU-sched.
|
|
|
- */
|
|
|
-void rcu_sched_force_quiescent_state(void)
|
|
|
-{
|
|
|
- force_quiescent_state(&rcu_sched_state);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
|
|
|
-
|
|
|
/*
|
|
|
* Show the state of the grace-period kthreads.
|
|
|
*/
|
|
@@ -634,31 +507,28 @@ void show_rcu_gp_kthreads(void)
|
|
|
int cpu;
|
|
|
struct rcu_data *rdp;
|
|
|
struct rcu_node *rnp;
|
|
|
- struct rcu_state *rsp;
|
|
|
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- pr_info("%s: wait state: %d ->state: %#lx\n",
|
|
|
- rsp->name, rsp->gp_state, rsp->gp_kthread->state);
|
|
|
- rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
|
- if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
|
|
|
- continue;
|
|
|
- pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
|
|
|
- rnp->grplo, rnp->grphi, rnp->gp_seq,
|
|
|
- rnp->gp_seq_needed);
|
|
|
- if (!rcu_is_leaf_node(rnp))
|
|
|
+ pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name,
|
|
|
+ rcu_state.gp_state, rcu_state.gp_kthread->state);
|
|
|
+ rcu_for_each_node_breadth_first(rnp) {
|
|
|
+ if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
|
|
|
+ continue;
|
|
|
+ pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
|
|
|
+ rnp->grplo, rnp->grphi, rnp->gp_seq,
|
|
|
+ rnp->gp_seq_needed);
|
|
|
+ if (!rcu_is_leaf_node(rnp))
|
|
|
+ continue;
|
|
|
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
|
|
|
+ rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
+ if (rdp->gpwrap ||
|
|
|
+ ULONG_CMP_GE(rcu_state.gp_seq,
|
|
|
+ rdp->gp_seq_needed))
|
|
|
continue;
|
|
|
- for_each_leaf_node_possible_cpu(rnp, cpu) {
|
|
|
- rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- if (rdp->gpwrap ||
|
|
|
- ULONG_CMP_GE(rsp->gp_seq,
|
|
|
- rdp->gp_seq_needed))
|
|
|
- continue;
|
|
|
- pr_info("\tcpu %d ->gp_seq_needed %lu\n",
|
|
|
- cpu, rdp->gp_seq_needed);
|
|
|
- }
|
|
|
+ pr_info("\tcpu %d ->gp_seq_needed %lu\n",
|
|
|
+ cpu, rdp->gp_seq_needed);
|
|
|
}
|
|
|
- /* sched_show_task(rsp->gp_kthread); */
|
|
|
}
|
|
|
+ /* sched_show_task(rcu_state.gp_kthread); */
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
|
|
|
|
|
@@ -668,34 +538,25 @@ EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
|
|
|
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
|
|
|
unsigned long *gp_seq)
|
|
|
{
|
|
|
- struct rcu_state *rsp = NULL;
|
|
|
-
|
|
|
switch (test_type) {
|
|
|
case RCU_FLAVOR:
|
|
|
- rsp = rcu_state_p;
|
|
|
- break;
|
|
|
case RCU_BH_FLAVOR:
|
|
|
- rsp = &rcu_bh_state;
|
|
|
- break;
|
|
|
case RCU_SCHED_FLAVOR:
|
|
|
- rsp = &rcu_sched_state;
|
|
|
+ *flags = READ_ONCE(rcu_state.gp_flags);
|
|
|
+ *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
|
|
|
break;
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
|
- if (rsp == NULL)
|
|
|
- return;
|
|
|
- *flags = READ_ONCE(rsp->gp_flags);
|
|
|
- *gp_seq = rcu_seq_current(&rsp->gp_seq);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
|
|
|
|
|
|
/*
|
|
|
- * Return the root node of the specified rcu_state structure.
|
|
|
+ * Return the root node of the rcu_state structure.
|
|
|
*/
|
|
|
-static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
|
|
|
+static struct rcu_node *rcu_get_root(void)
|
|
|
{
|
|
|
- return &rsp->node[0];
|
|
|
+ return &rcu_state.node[0];
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -708,28 +569,25 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
|
|
|
*/
|
|
|
static void rcu_eqs_enter(bool user)
|
|
|
{
|
|
|
- struct rcu_state *rsp;
|
|
|
- struct rcu_data *rdp;
|
|
|
- struct rcu_dynticks *rdtp;
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
|
|
|
- rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
|
|
|
+ WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
|
|
|
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
|
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
|
|
- rdtp->dynticks_nesting == 0);
|
|
|
- if (rdtp->dynticks_nesting != 1) {
|
|
|
- rdtp->dynticks_nesting--;
|
|
|
+ rdp->dynticks_nesting == 0);
|
|
|
+ if (rdp->dynticks_nesting != 1) {
|
|
|
+ rdp->dynticks_nesting--;
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
- trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
|
|
|
+ trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
|
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- rdp = this_cpu_ptr(rsp->rda);
|
|
|
- do_nocb_deferred_wakeup(rdp);
|
|
|
- }
|
|
|
+ rdp = this_cpu_ptr(&rcu_data);
|
|
|
+ do_nocb_deferred_wakeup(rdp);
|
|
|
rcu_prepare_for_idle();
|
|
|
- WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
|
|
|
+ rcu_preempt_deferred_qs(current);
|
|
|
+ WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
|
|
|
rcu_dynticks_eqs_enter();
|
|
|
rcu_dynticks_task_enter();
|
|
|
}
|
|
@@ -770,44 +628,61 @@ void rcu_user_enter(void)
|
|
|
}
|
|
|
#endif /* CONFIG_NO_HZ_FULL */
|
|
|
|
|
|
-/**
|
|
|
- * rcu_nmi_exit - inform RCU of exit from NMI context
|
|
|
- *
|
|
|
+/*
|
|
|
* If we are returning from the outermost NMI handler that interrupted an
|
|
|
- * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
|
|
|
+ * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
|
|
|
* to let the RCU grace-period handling know that the CPU is back to
|
|
|
* being RCU-idle.
|
|
|
*
|
|
|
- * If you add or remove a call to rcu_nmi_exit(), be sure to test
|
|
|
+ * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
|
|
|
* with CONFIG_RCU_EQS_DEBUG=y.
|
|
|
*/
|
|
|
-void rcu_nmi_exit(void)
|
|
|
+static __always_inline void rcu_nmi_exit_common(bool irq)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
|
|
|
/*
|
|
|
* Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
|
|
|
* (We are exiting an NMI handler, so RCU better be paying attention
|
|
|
* to us!)
|
|
|
*/
|
|
|
- WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
|
|
|
+ WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
|
|
|
WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
|
|
|
|
|
|
/*
|
|
|
* If the nesting level is not 1, the CPU wasn't RCU-idle, so
|
|
|
* leave it in non-RCU-idle state.
|
|
|
*/
|
|
|
- if (rdtp->dynticks_nmi_nesting != 1) {
|
|
|
- trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
|
|
|
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
|
|
|
- rdtp->dynticks_nmi_nesting - 2);
|
|
|
+ if (rdp->dynticks_nmi_nesting != 1) {
|
|
|
+ trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
|
|
|
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
|
|
|
+ rdp->dynticks_nmi_nesting - 2);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
|
|
|
- trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
|
|
|
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
|
|
|
+ trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
|
|
|
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
|
|
|
+
|
|
|
+ if (irq)
|
|
|
+ rcu_prepare_for_idle();
|
|
|
+
|
|
|
rcu_dynticks_eqs_enter();
|
|
|
+
|
|
|
+ if (irq)
|
|
|
+ rcu_dynticks_task_enter();
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * rcu_nmi_exit - inform RCU of exit from NMI context
|
|
|
+ * @irq: Is this call from rcu_irq_exit?
|
|
|
+ *
|
|
|
+ * If you add or remove a call to rcu_nmi_exit(), be sure to test
|
|
|
+ * with CONFIG_RCU_EQS_DEBUG=y.
|
|
|
+ */
|
|
|
+void rcu_nmi_exit(void)
|
|
|
+{
|
|
|
+ rcu_nmi_exit_common(false);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -831,14 +706,8 @@ void rcu_nmi_exit(void)
|
|
|
*/
|
|
|
void rcu_irq_exit(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
-
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
- if (rdtp->dynticks_nmi_nesting == 1)
|
|
|
- rcu_prepare_for_idle();
|
|
|
- rcu_nmi_exit();
|
|
|
- if (rdtp->dynticks_nmi_nesting == 0)
|
|
|
- rcu_dynticks_task_enter();
|
|
|
+ rcu_nmi_exit_common(true);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -866,24 +735,25 @@ void rcu_irq_exit_irqson(void)
|
|
|
*/
|
|
|
static void rcu_eqs_exit(bool user)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp;
|
|
|
+ struct rcu_data *rdp;
|
|
|
long oldval;
|
|
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
- rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
- oldval = rdtp->dynticks_nesting;
|
|
|
+ rdp = this_cpu_ptr(&rcu_data);
|
|
|
+ oldval = rdp->dynticks_nesting;
|
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
|
|
|
if (oldval) {
|
|
|
- rdtp->dynticks_nesting++;
|
|
|
+ rdp->dynticks_nesting++;
|
|
|
return;
|
|
|
}
|
|
|
rcu_dynticks_task_exit();
|
|
|
rcu_dynticks_eqs_exit();
|
|
|
rcu_cleanup_after_idle();
|
|
|
- trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks);
|
|
|
+ trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
|
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
|
|
- WRITE_ONCE(rdtp->dynticks_nesting, 1);
|
|
|
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
|
|
|
+ WRITE_ONCE(rdp->dynticks_nesting, 1);
|
|
|
+ WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
|
|
|
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -921,24 +791,25 @@ void rcu_user_exit(void)
|
|
|
#endif /* CONFIG_NO_HZ_FULL */
|
|
|
|
|
|
/**
|
|
|
- * rcu_nmi_enter - inform RCU of entry to NMI context
|
|
|
+ * rcu_nmi_enter_common - inform RCU of entry to NMI context
|
|
|
+ * @irq: Is this call from rcu_irq_enter?
|
|
|
*
|
|
|
- * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
|
|
|
- * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
|
|
|
+ * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
|
|
|
+ * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
|
|
|
* that the CPU is active. This implementation permits nested NMIs, as
|
|
|
* long as the nesting level does not overflow an int. (You will probably
|
|
|
* run out of stack space first.)
|
|
|
*
|
|
|
- * If you add or remove a call to rcu_nmi_enter(), be sure to test
|
|
|
+ * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
|
|
|
* with CONFIG_RCU_EQS_DEBUG=y.
|
|
|
*/
|
|
|
-void rcu_nmi_enter(void)
|
|
|
+static __always_inline void rcu_nmi_enter_common(bool irq)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
long incby = 2;
|
|
|
|
|
|
/* Complain about underflow. */
|
|
|
- WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
|
|
|
+ WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
|
|
|
|
|
|
/*
|
|
|
* If idle from RCU viewpoint, atomically increment ->dynticks
|
|
@@ -949,17 +820,33 @@ void rcu_nmi_enter(void)
|
|
|
* period (observation due to Andy Lutomirski).
|
|
|
*/
|
|
|
if (rcu_dynticks_curr_cpu_in_eqs()) {
|
|
|
+
|
|
|
+ if (irq)
|
|
|
+ rcu_dynticks_task_exit();
|
|
|
+
|
|
|
rcu_dynticks_eqs_exit();
|
|
|
+
|
|
|
+ if (irq)
|
|
|
+ rcu_cleanup_after_idle();
|
|
|
+
|
|
|
incby = 1;
|
|
|
}
|
|
|
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
|
|
|
- rdtp->dynticks_nmi_nesting,
|
|
|
- rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
|
|
|
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
|
|
|
- rdtp->dynticks_nmi_nesting + incby);
|
|
|
+ rdp->dynticks_nmi_nesting,
|
|
|
+ rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
|
|
|
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
|
|
|
+ rdp->dynticks_nmi_nesting + incby);
|
|
|
barrier();
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * rcu_nmi_enter - inform RCU of entry to NMI context
|
|
|
+ */
|
|
|
+void rcu_nmi_enter(void)
|
|
|
+{
|
|
|
+ rcu_nmi_enter_common(false);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
|
|
|
*
|
|
@@ -984,14 +871,8 @@ void rcu_nmi_enter(void)
|
|
|
*/
|
|
|
void rcu_irq_enter(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
-
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
- if (rdtp->dynticks_nmi_nesting == 0)
|
|
|
- rcu_dynticks_task_exit();
|
|
|
- rcu_nmi_enter();
|
|
|
- if (rdtp->dynticks_nmi_nesting == 1)
|
|
|
- rcu_cleanup_after_idle();
|
|
|
+ rcu_nmi_enter_common(true);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1043,7 +924,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
|
|
|
cpu = task_cpu(t);
|
|
|
if (!task_curr(t))
|
|
|
return; /* This task is not running on that CPU. */
|
|
|
- smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
|
|
|
+ smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
|
|
|
}
|
|
|
|
|
|
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
|
@@ -1054,11 +935,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
|
|
|
* Disable preemption to avoid false positives that could otherwise
|
|
|
* happen due to the current CPU number being sampled, this task being
|
|
|
* preempted, its old CPU being taken offline, resuming on some other CPU,
|
|
|
- * then determining that its old CPU is now offline. Because there are
|
|
|
- * multiple flavors of RCU, and because this function can be called in the
|
|
|
- * midst of updating the flavors while a given CPU coming online or going
|
|
|
- * offline, it is necessary to check all flavors. If any of the flavors
|
|
|
- * believe that given CPU is online, it is considered to be online.
|
|
|
+ * then determining that its old CPU is now offline.
|
|
|
*
|
|
|
* Disable checking if in an NMI handler because we cannot safely
|
|
|
* report errors from NMI handlers anyway. In addition, it is OK to use
|
|
@@ -1069,39 +946,22 @@ bool rcu_lockdep_current_cpu_online(void)
|
|
|
{
|
|
|
struct rcu_data *rdp;
|
|
|
struct rcu_node *rnp;
|
|
|
- struct rcu_state *rsp;
|
|
|
+ bool ret = false;
|
|
|
|
|
|
if (in_nmi() || !rcu_scheduler_fully_active)
|
|
|
return true;
|
|
|
preempt_disable();
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- rdp = this_cpu_ptr(rsp->rda);
|
|
|
- rnp = rdp->mynode;
|
|
|
- if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
|
|
|
- preempt_enable();
|
|
|
- return true;
|
|
|
- }
|
|
|
- }
|
|
|
+ rdp = this_cpu_ptr(&rcu_data);
|
|
|
+ rnp = rdp->mynode;
|
|
|
+ if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
|
|
|
+ ret = true;
|
|
|
preempt_enable();
|
|
|
- return false;
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
|
|
|
|
|
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
|
|
|
|
|
|
-/**
|
|
|
- * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
|
|
|
- *
|
|
|
- * If the current CPU is idle or running at a first-level (not nested)
|
|
|
- * interrupt from idle, return true. The caller must have at least
|
|
|
- * disabled preemption.
|
|
|
- */
|
|
|
-static int rcu_is_cpu_rrupt_from_idle(void)
|
|
|
-{
|
|
|
- return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
|
|
|
- __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* We are reporting a quiescent state on behalf of some other CPU, so
|
|
|
* it is our responsibility to check for and handle potential overflow
|
|
@@ -1126,9 +986,9 @@ static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
|
|
|
*/
|
|
|
static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
|
|
{
|
|
|
- rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
|
|
|
+ rdp->dynticks_snap = rcu_dynticks_snap(rdp);
|
|
|
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
|
|
|
- trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
|
|
|
+ trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
|
|
|
rcu_gpnum_ovf(rdp->mynode, rdp);
|
|
|
return 1;
|
|
|
}
|
|
@@ -1177,35 +1037,15 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
|
* read-side critical section that started before the beginning
|
|
|
* of the current RCU grace period.
|
|
|
*/
|
|
|
- if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
|
|
|
- trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
|
|
|
- rdp->dynticks_fqs++;
|
|
|
- rcu_gpnum_ovf(rnp, rdp);
|
|
|
- return 1;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * Has this CPU encountered a cond_resched() since the beginning
|
|
|
- * of the grace period? For this to be the case, the CPU has to
|
|
|
- * have noticed the current grace period. This might not be the
|
|
|
- * case for nohz_full CPUs looping in the kernel.
|
|
|
- */
|
|
|
- jtsq = jiffies_till_sched_qs;
|
|
|
- ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
|
|
|
- if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
|
|
|
- READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
|
|
|
- rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
|
|
|
- trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
|
|
|
+ if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
|
|
|
+ trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
|
|
|
rcu_gpnum_ovf(rnp, rdp);
|
|
|
return 1;
|
|
|
- } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
|
|
|
- /* Load rcu_qs_ctr before store to rcu_urgent_qs. */
|
|
|
- smp_store_release(ruqp, true);
|
|
|
}
|
|
|
|
|
|
/* If waiting too long on an offline CPU, complain. */
|
|
|
if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
|
|
|
- time_after(jiffies, rdp->rsp->gp_start + HZ)) {
|
|
|
+ time_after(jiffies, rcu_state.gp_start + HZ)) {
|
|
|
bool onl;
|
|
|
struct rcu_node *rnp1;
|
|
|
|
|
@@ -1226,39 +1066,56 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
|
|
|
|
/*
|
|
|
* A CPU running for an extended time within the kernel can
|
|
|
- * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
|
|
|
- * even context-switching back and forth between a pair of
|
|
|
- * in-kernel CPU-bound tasks cannot advance grace periods.
|
|
|
- * So if the grace period is old enough, make the CPU pay attention.
|
|
|
- * Note that the unsynchronized assignments to the per-CPU
|
|
|
- * rcu_need_heavy_qs variable are safe. Yes, setting of
|
|
|
- * bits can be lost, but they will be set again on the next
|
|
|
- * force-quiescent-state pass. So lost bit sets do not result
|
|
|
- * in incorrect behavior, merely in a grace period lasting
|
|
|
- * a few jiffies longer than it might otherwise. Because
|
|
|
- * there are at most four threads involved, and because the
|
|
|
- * updates are only once every few jiffies, the probability of
|
|
|
- * lossage (and thus of slight grace-period extension) is
|
|
|
- * quite low.
|
|
|
+ * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
|
|
|
+ * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
|
|
|
+ * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
|
|
|
+ * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
|
|
|
+ * variable are safe because the assignments are repeated if this
|
|
|
+ * CPU failed to pass through a quiescent state. This code
|
|
|
+ * also checks .jiffies_resched in case jiffies_to_sched_qs
|
|
|
+ * is set way high.
|
|
|
*/
|
|
|
- rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
|
|
|
+ jtsq = READ_ONCE(jiffies_to_sched_qs);
|
|
|
+ ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
|
|
|
+ rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
|
|
|
if (!READ_ONCE(*rnhqp) &&
|
|
|
- (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
|
|
|
- time_after(jiffies, rdp->rsp->jiffies_resched))) {
|
|
|
+ (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
|
|
|
+ time_after(jiffies, rcu_state.jiffies_resched))) {
|
|
|
WRITE_ONCE(*rnhqp, true);
|
|
|
/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
|
|
|
smp_store_release(ruqp, true);
|
|
|
- rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */
|
|
|
+ } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
|
|
|
+ WRITE_ONCE(*ruqp, true);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * If more than halfway to RCU CPU stall-warning time, do a
|
|
|
- * resched_cpu() to try to loosen things up a bit. Also check to
|
|
|
- * see if the CPU is getting hammered with interrupts, but only
|
|
|
- * once per grace period, just to keep the IPIs down to a dull roar.
|
|
|
+ * NO_HZ_FULL CPUs can run in-kernel without rcu_check_callbacks!
|
|
|
+ * The above code handles this, but only for straight cond_resched().
|
|
|
+ * And some in-kernel loops check need_resched() before calling
|
|
|
+ * cond_resched(), which defeats the above code for CPUs that are
|
|
|
+ * running in-kernel with scheduling-clock interrupts disabled.
|
|
|
+ * So hit them over the head with the resched_cpu() hammer!
|
|
|
*/
|
|
|
- if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
|
|
|
+ if (tick_nohz_full_cpu(rdp->cpu) &&
|
|
|
+ time_after(jiffies,
|
|
|
+ READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) {
|
|
|
resched_cpu(rdp->cpu);
|
|
|
+ WRITE_ONCE(rdp->last_fqs_resched, jiffies);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If more than halfway to RCU CPU stall-warning time, invoke
|
|
|
+ * resched_cpu() more frequently to try to loosen things up a bit.
|
|
|
+ * Also check to see if the CPU is getting hammered with interrupts,
|
|
|
+ * but only once per grace period, just to keep the IPIs down to
|
|
|
+ * a dull roar.
|
|
|
+ */
|
|
|
+ if (time_after(jiffies, rcu_state.jiffies_resched)) {
|
|
|
+ if (time_after(jiffies,
|
|
|
+ READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
|
|
|
+ resched_cpu(rdp->cpu);
|
|
|
+ WRITE_ONCE(rdp->last_fqs_resched, jiffies);
|
|
|
+ }
|
|
|
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
|
|
|
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
|
|
|
(rnp->ffmask & rdp->grpmask)) {
|
|
@@ -1272,17 +1129,17 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void record_gp_stall_check_time(struct rcu_state *rsp)
|
|
|
+static void record_gp_stall_check_time(void)
|
|
|
{
|
|
|
unsigned long j = jiffies;
|
|
|
unsigned long j1;
|
|
|
|
|
|
- rsp->gp_start = j;
|
|
|
+ rcu_state.gp_start = j;
|
|
|
j1 = rcu_jiffies_till_stall_check();
|
|
|
/* Record ->gp_start before ->jiffies_stall. */
|
|
|
- smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
|
|
|
- rsp->jiffies_resched = j + j1 / 2;
|
|
|
- rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
|
|
|
+ smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
|
|
|
+ rcu_state.jiffies_resched = j + j1 / 2;
|
|
|
+ rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1298,25 +1155,23 @@ static const char *gp_state_getname(short gs)
|
|
|
/*
|
|
|
* Complain about starvation of grace-period kthread.
|
|
|
*/
|
|
|
-static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
|
|
|
+static void rcu_check_gp_kthread_starvation(void)
|
|
|
{
|
|
|
- unsigned long gpa;
|
|
|
+ struct task_struct *gpk = rcu_state.gp_kthread;
|
|
|
unsigned long j;
|
|
|
|
|
|
- j = jiffies;
|
|
|
- gpa = READ_ONCE(rsp->gp_activity);
|
|
|
- if (j - gpa > 2 * HZ) {
|
|
|
+ j = jiffies - READ_ONCE(rcu_state.gp_activity);
|
|
|
+ if (j > 2 * HZ) {
|
|
|
pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
|
|
|
- rsp->name, j - gpa,
|
|
|
- (long)rcu_seq_current(&rsp->gp_seq),
|
|
|
- rsp->gp_flags,
|
|
|
- gp_state_getname(rsp->gp_state), rsp->gp_state,
|
|
|
- rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
|
|
|
- rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1);
|
|
|
- if (rsp->gp_kthread) {
|
|
|
+ rcu_state.name, j,
|
|
|
+ (long)rcu_seq_current(&rcu_state.gp_seq),
|
|
|
+ rcu_state.gp_flags,
|
|
|
+ gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
|
|
|
+ gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
|
|
|
+ if (gpk) {
|
|
|
pr_err("RCU grace-period kthread stack dump:\n");
|
|
|
- sched_show_task(rsp->gp_kthread);
|
|
|
- wake_up_process(rsp->gp_kthread);
|
|
|
+ sched_show_task(gpk);
|
|
|
+ wake_up_process(gpk);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1327,13 +1182,13 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
|
|
|
* that don't support NMI-based stack dumps. The NMI-triggered stack
|
|
|
* traces are more accurate because they are printed by the target CPU.
|
|
|
*/
|
|
|
-static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
|
|
|
+static void rcu_dump_cpu_stacks(void)
|
|
|
{
|
|
|
int cpu;
|
|
|
unsigned long flags;
|
|
|
struct rcu_node *rnp;
|
|
|
|
|
|
- rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
+ rcu_for_each_leaf_node(rnp) {
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
for_each_leaf_node_possible_cpu(rnp, cpu)
|
|
|
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
|
|
@@ -1347,19 +1202,20 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
|
|
|
* If too much time has passed in the current grace period, and if
|
|
|
* so configured, go kick the relevant kthreads.
|
|
|
*/
|
|
|
-static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
|
|
|
+static void rcu_stall_kick_kthreads(void)
|
|
|
{
|
|
|
unsigned long j;
|
|
|
|
|
|
if (!rcu_kick_kthreads)
|
|
|
return;
|
|
|
- j = READ_ONCE(rsp->jiffies_kick_kthreads);
|
|
|
- if (time_after(jiffies, j) && rsp->gp_kthread &&
|
|
|
- (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
|
|
|
- WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
|
|
|
+ j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
|
|
|
+ if (time_after(jiffies, j) && rcu_state.gp_kthread &&
|
|
|
+ (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
|
|
|
+ WARN_ONCE(1, "Kicking %s grace-period kthread\n",
|
|
|
+ rcu_state.name);
|
|
|
rcu_ftrace_dump(DUMP_ALL);
|
|
|
- wake_up_process(rsp->gp_kthread);
|
|
|
- WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
|
|
|
+ wake_up_process(rcu_state.gp_kthread);
|
|
|
+ WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1369,18 +1225,18 @@ static void panic_on_rcu_stall(void)
|
|
|
panic("RCU Stall\n");
|
|
|
}
|
|
|
|
|
|
-static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
|
|
|
+static void print_other_cpu_stall(unsigned long gp_seq)
|
|
|
{
|
|
|
int cpu;
|
|
|
unsigned long flags;
|
|
|
unsigned long gpa;
|
|
|
unsigned long j;
|
|
|
int ndetected = 0;
|
|
|
- struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
+ struct rcu_node *rnp = rcu_get_root();
|
|
|
long totqlen = 0;
|
|
|
|
|
|
/* Kick and suppress, if so configured. */
|
|
|
- rcu_stall_kick_kthreads(rsp);
|
|
|
+ rcu_stall_kick_kthreads();
|
|
|
if (rcu_cpu_stall_suppress)
|
|
|
return;
|
|
|
|
|
@@ -1389,15 +1245,15 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
|
|
|
* See Documentation/RCU/stallwarn.txt for info on how to debug
|
|
|
* RCU CPU stall warnings.
|
|
|
*/
|
|
|
- pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
|
|
|
+ pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name);
|
|
|
print_cpu_stall_info_begin();
|
|
|
- rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
+ rcu_for_each_leaf_node(rnp) {
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
ndetected += rcu_print_task_stall(rnp);
|
|
|
if (rnp->qsmask != 0) {
|
|
|
for_each_leaf_node_possible_cpu(rnp, cpu)
|
|
|
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
|
|
|
- print_cpu_stall_info(rsp, cpu);
|
|
|
+ print_cpu_stall_info(cpu);
|
|
|
ndetected++;
|
|
|
}
|
|
|
}
|
|
@@ -1406,52 +1262,52 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
|
|
|
|
|
|
print_cpu_stall_info_end();
|
|
|
for_each_possible_cpu(cpu)
|
|
|
- totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
|
|
|
+ totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
|
|
|
cpu)->cblist);
|
|
|
pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
|
|
|
- smp_processor_id(), (long)(jiffies - rsp->gp_start),
|
|
|
- (long)rcu_seq_current(&rsp->gp_seq), totqlen);
|
|
|
+ smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
|
|
|
+ (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
|
|
|
if (ndetected) {
|
|
|
- rcu_dump_cpu_stacks(rsp);
|
|
|
+ rcu_dump_cpu_stacks();
|
|
|
|
|
|
/* Complain about tasks blocking the grace period. */
|
|
|
- rcu_print_detail_task_stall(rsp);
|
|
|
+ rcu_print_detail_task_stall();
|
|
|
} else {
|
|
|
- if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
|
|
|
+ if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
|
|
|
pr_err("INFO: Stall ended before state dump start\n");
|
|
|
} else {
|
|
|
j = jiffies;
|
|
|
- gpa = READ_ONCE(rsp->gp_activity);
|
|
|
+ gpa = READ_ONCE(rcu_state.gp_activity);
|
|
|
pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
|
|
|
- rsp->name, j - gpa, j, gpa,
|
|
|
- jiffies_till_next_fqs,
|
|
|
- rcu_get_root(rsp)->qsmask);
|
|
|
+ rcu_state.name, j - gpa, j, gpa,
|
|
|
+ READ_ONCE(jiffies_till_next_fqs),
|
|
|
+ rcu_get_root()->qsmask);
|
|
|
/* In this case, the current CPU might be at fault. */
|
|
|
sched_show_task(current);
|
|
|
}
|
|
|
}
|
|
|
/* Rewrite if needed in case of slow consoles. */
|
|
|
- if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
|
|
|
- WRITE_ONCE(rsp->jiffies_stall,
|
|
|
+ if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
|
|
|
+ WRITE_ONCE(rcu_state.jiffies_stall,
|
|
|
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
|
|
|
|
|
|
- rcu_check_gp_kthread_starvation(rsp);
|
|
|
+ rcu_check_gp_kthread_starvation();
|
|
|
|
|
|
panic_on_rcu_stall();
|
|
|
|
|
|
- force_quiescent_state(rsp); /* Kick them all. */
|
|
|
+ force_quiescent_state(); /* Kick them all. */
|
|
|
}
|
|
|
|
|
|
-static void print_cpu_stall(struct rcu_state *rsp)
|
|
|
+static void print_cpu_stall(void)
|
|
|
{
|
|
|
int cpu;
|
|
|
unsigned long flags;
|
|
|
- struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
|
|
|
- struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
+ struct rcu_node *rnp = rcu_get_root();
|
|
|
long totqlen = 0;
|
|
|
|
|
|
/* Kick and suppress, if so configured. */
|
|
|
- rcu_stall_kick_kthreads(rsp);
|
|
|
+ rcu_stall_kick_kthreads();
|
|
|
if (rcu_cpu_stall_suppress)
|
|
|
return;
|
|
|
|
|
@@ -1460,27 +1316,27 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|
|
* See Documentation/RCU/stallwarn.txt for info on how to debug
|
|
|
* RCU CPU stall warnings.
|
|
|
*/
|
|
|
- pr_err("INFO: %s self-detected stall on CPU", rsp->name);
|
|
|
+ pr_err("INFO: %s self-detected stall on CPU", rcu_state.name);
|
|
|
print_cpu_stall_info_begin();
|
|
|
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
|
|
|
- print_cpu_stall_info(rsp, smp_processor_id());
|
|
|
+ print_cpu_stall_info(smp_processor_id());
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
|
|
|
print_cpu_stall_info_end();
|
|
|
for_each_possible_cpu(cpu)
|
|
|
- totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
|
|
|
+ totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
|
|
|
cpu)->cblist);
|
|
|
pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
|
|
|
- jiffies - rsp->gp_start,
|
|
|
- (long)rcu_seq_current(&rsp->gp_seq), totqlen);
|
|
|
+ jiffies - rcu_state.gp_start,
|
|
|
+ (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
|
|
|
|
|
|
- rcu_check_gp_kthread_starvation(rsp);
|
|
|
+ rcu_check_gp_kthread_starvation();
|
|
|
|
|
|
- rcu_dump_cpu_stacks(rsp);
|
|
|
+ rcu_dump_cpu_stacks();
|
|
|
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
/* Rewrite if needed in case of slow consoles. */
|
|
|
- if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
|
|
|
- WRITE_ONCE(rsp->jiffies_stall,
|
|
|
+ if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
|
|
|
+ WRITE_ONCE(rcu_state.jiffies_stall,
|
|
|
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
|
|
@@ -1493,10 +1349,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|
|
* progress and it could be we're stuck in kernel space without context
|
|
|
* switches for an entirely unreasonable amount of time.
|
|
|
*/
|
|
|
- resched_cpu(smp_processor_id());
|
|
|
+ set_tsk_need_resched(current);
|
|
|
+ set_preempt_need_resched();
|
|
|
}
|
|
|
|
|
|
-static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
+static void check_cpu_stall(struct rcu_data *rdp)
|
|
|
{
|
|
|
unsigned long gs1;
|
|
|
unsigned long gs2;
|
|
@@ -1507,54 +1364,55 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
struct rcu_node *rnp;
|
|
|
|
|
|
if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
|
|
|
- !rcu_gp_in_progress(rsp))
|
|
|
+ !rcu_gp_in_progress())
|
|
|
return;
|
|
|
- rcu_stall_kick_kthreads(rsp);
|
|
|
+ rcu_stall_kick_kthreads();
|
|
|
j = jiffies;
|
|
|
|
|
|
/*
|
|
|
* Lots of memory barriers to reject false positives.
|
|
|
*
|
|
|
- * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall,
|
|
|
- * then rsp->gp_start, and finally another copy of rsp->gp_seq.
|
|
|
- * These values are updated in the opposite order with memory
|
|
|
- * barriers (or equivalent) during grace-period initialization
|
|
|
- * and cleanup. Now, a false positive can occur if we get an new
|
|
|
- * value of rsp->gp_start and a old value of rsp->jiffies_stall.
|
|
|
- * But given the memory barriers, the only way that this can happen
|
|
|
- * is if one grace period ends and another starts between these
|
|
|
- * two fetches. This is detected by comparing the second fetch
|
|
|
- * of rsp->gp_seq with the previous fetch from rsp->gp_seq.
|
|
|
+ * The idea is to pick up rcu_state.gp_seq, then
|
|
|
+ * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
|
|
|
+ * another copy of rcu_state.gp_seq. These values are updated in
|
|
|
+ * the opposite order with memory barriers (or equivalent) during
|
|
|
+ * grace-period initialization and cleanup. Now, a false positive
|
|
|
+ * can occur if we get an new value of rcu_state.gp_start and a old
|
|
|
+ * value of rcu_state.jiffies_stall. But given the memory barriers,
|
|
|
+ * the only way that this can happen is if one grace period ends
|
|
|
+ * and another starts between these two fetches. This is detected
|
|
|
+ * by comparing the second fetch of rcu_state.gp_seq with the
|
|
|
+ * previous fetch from rcu_state.gp_seq.
|
|
|
*
|
|
|
- * Given this check, comparisons of jiffies, rsp->jiffies_stall,
|
|
|
- * and rsp->gp_start suffice to forestall false positives.
|
|
|
+ * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
|
|
|
+ * and rcu_state.gp_start suffice to forestall false positives.
|
|
|
*/
|
|
|
- gs1 = READ_ONCE(rsp->gp_seq);
|
|
|
+ gs1 = READ_ONCE(rcu_state.gp_seq);
|
|
|
smp_rmb(); /* Pick up ->gp_seq first... */
|
|
|
- js = READ_ONCE(rsp->jiffies_stall);
|
|
|
+ js = READ_ONCE(rcu_state.jiffies_stall);
|
|
|
smp_rmb(); /* ...then ->jiffies_stall before the rest... */
|
|
|
- gps = READ_ONCE(rsp->gp_start);
|
|
|
+ gps = READ_ONCE(rcu_state.gp_start);
|
|
|
smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
|
|
|
- gs2 = READ_ONCE(rsp->gp_seq);
|
|
|
+ gs2 = READ_ONCE(rcu_state.gp_seq);
|
|
|
if (gs1 != gs2 ||
|
|
|
ULONG_CMP_LT(j, js) ||
|
|
|
ULONG_CMP_GE(gps, js))
|
|
|
return; /* No stall or GP completed since entering function. */
|
|
|
rnp = rdp->mynode;
|
|
|
jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
|
|
|
- if (rcu_gp_in_progress(rsp) &&
|
|
|
+ if (rcu_gp_in_progress() &&
|
|
|
(READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
|
|
|
- cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
|
|
|
+ cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
|
|
|
|
|
|
/* We haven't checked in, so go dump stack. */
|
|
|
- print_cpu_stall(rsp);
|
|
|
+ print_cpu_stall();
|
|
|
|
|
|
- } else if (rcu_gp_in_progress(rsp) &&
|
|
|
+ } else if (rcu_gp_in_progress() &&
|
|
|
ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
|
|
|
- cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
|
|
|
+ cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
|
|
|
|
|
|
/* They had a few time units to dump stack, so complain. */
|
|
|
- print_other_cpu_stall(rsp, gs2);
|
|
|
+ print_other_cpu_stall(gs2);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1569,17 +1427,14 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
*/
|
|
|
void rcu_cpu_stall_reset(void)
|
|
|
{
|
|
|
- struct rcu_state *rsp;
|
|
|
-
|
|
|
- for_each_rcu_flavor(rsp)
|
|
|
- WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
|
|
|
+ WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
|
|
|
}
|
|
|
|
|
|
/* Trace-event wrapper function for trace_rcu_future_grace_period. */
|
|
|
static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
|
|
|
unsigned long gp_seq_req, const char *s)
|
|
|
{
|
|
|
- trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
|
|
|
+ trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req,
|
|
|
rnp->level, rnp->grplo, rnp->grphi, s);
|
|
|
}
|
|
|
|
|
@@ -1603,7 +1458,6 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
|
|
|
unsigned long gp_seq_req)
|
|
|
{
|
|
|
bool ret = false;
|
|
|
- struct rcu_state *rsp = rdp->rsp;
|
|
|
struct rcu_node *rnp;
|
|
|
|
|
|
/*
|
|
@@ -1647,18 +1501,18 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
|
|
|
}
|
|
|
|
|
|
/* If GP already in progress, just leave, otherwise start one. */
|
|
|
- if (rcu_gp_in_progress(rsp)) {
|
|
|
+ if (rcu_gp_in_progress()) {
|
|
|
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
|
|
|
goto unlock_out;
|
|
|
}
|
|
|
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
|
|
|
- WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
|
|
|
- rsp->gp_req_activity = jiffies;
|
|
|
- if (!rsp->gp_kthread) {
|
|
|
+ WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
|
|
|
+ rcu_state.gp_req_activity = jiffies;
|
|
|
+ if (!rcu_state.gp_kthread) {
|
|
|
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
|
|
|
goto unlock_out;
|
|
|
}
|
|
|
- trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
|
|
|
+ trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), TPS("newreq"));
|
|
|
ret = true; /* Caller must wake GP kthread. */
|
|
|
unlock_out:
|
|
|
/* Push furthest requested GP to leaf node and rcu_data structure. */
|
|
@@ -1675,10 +1529,10 @@ unlock_out:
|
|
|
* Clean up any old requests for the just-ended grace period. Also return
|
|
|
* whether any additional grace periods have been requested.
|
|
|
*/
|
|
|
-static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
|
+static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
|
|
|
{
|
|
|
bool needmore;
|
|
|
- struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
|
|
|
needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
|
|
|
if (!needmore)
|
|
@@ -1689,19 +1543,18 @@ static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Awaken the grace-period kthread for the specified flavor of RCU.
|
|
|
- * Don't do a self-awaken, and don't bother awakening when there is
|
|
|
- * nothing for the grace-period kthread to do (as in several CPUs
|
|
|
- * raced to awaken, and we lost), and finally don't try to awaken
|
|
|
- * a kthread that has not yet been created.
|
|
|
+ * Awaken the grace-period kthread. Don't do a self-awaken, and don't
|
|
|
+ * bother awakening when there is nothing for the grace-period kthread
|
|
|
+ * to do (as in several CPUs raced to awaken, and we lost), and finally
|
|
|
+ * don't try to awaken a kthread that has not yet been created.
|
|
|
*/
|
|
|
-static void rcu_gp_kthread_wake(struct rcu_state *rsp)
|
|
|
+static void rcu_gp_kthread_wake(void)
|
|
|
{
|
|
|
- if (current == rsp->gp_kthread ||
|
|
|
- !READ_ONCE(rsp->gp_flags) ||
|
|
|
- !rsp->gp_kthread)
|
|
|
+ if (current == rcu_state.gp_kthread ||
|
|
|
+ !READ_ONCE(rcu_state.gp_flags) ||
|
|
|
+ !rcu_state.gp_kthread)
|
|
|
return;
|
|
|
- swake_up_one(&rsp->gp_wq);
|
|
|
+ swake_up_one(&rcu_state.gp_wq);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1716,8 +1569,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
|
|
|
*
|
|
|
* The caller must hold rnp->lock with interrupts disabled.
|
|
|
*/
|
|
|
-static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
- struct rcu_data *rdp)
|
|
|
+static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
|
|
|
{
|
|
|
unsigned long gp_seq_req;
|
|
|
bool ret = false;
|
|
@@ -1738,15 +1590,15 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
* accelerating callback invocation to an earlier grace-period
|
|
|
* number.
|
|
|
*/
|
|
|
- gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
|
|
|
+ gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
|
|
|
if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
|
|
|
ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
|
|
|
|
|
|
/* Trace depending on how much we were able to accelerate. */
|
|
|
if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
|
|
|
- trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB"));
|
|
|
+ trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB"));
|
|
|
else
|
|
|
- trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB"));
|
|
|
+ trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB"));
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1757,25 +1609,24 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
* that a new grace-period request be made, invokes rcu_accelerate_cbs()
|
|
|
* while holding the leaf rcu_node structure's ->lock.
|
|
|
*/
|
|
|
-static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
|
|
|
- struct rcu_node *rnp,
|
|
|
+static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
|
|
|
struct rcu_data *rdp)
|
|
|
{
|
|
|
unsigned long c;
|
|
|
bool needwake;
|
|
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
- c = rcu_seq_snap(&rsp->gp_seq);
|
|
|
+ c = rcu_seq_snap(&rcu_state.gp_seq);
|
|
|
if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
|
|
|
/* Old request still live, so mark recent callbacks. */
|
|
|
(void)rcu_segcblist_accelerate(&rdp->cblist, c);
|
|
|
return;
|
|
|
}
|
|
|
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
|
|
|
- needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
|
|
|
+ needwake = rcu_accelerate_cbs(rnp, rdp);
|
|
|
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
|
|
if (needwake)
|
|
|
- rcu_gp_kthread_wake(rsp);
|
|
|
+ rcu_gp_kthread_wake();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1788,8 +1639,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
|
|
|
*
|
|
|
* The caller must hold rnp->lock with interrupts disabled.
|
|
|
*/
|
|
|
-static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
- struct rcu_data *rdp)
|
|
|
+static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
|
|
|
{
|
|
|
raw_lockdep_assert_held_rcu_node(rnp);
|
|
|
|
|
@@ -1804,7 +1654,7 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
|
|
|
|
|
|
/* Classify any remaining callbacks. */
|
|
|
- return rcu_accelerate_cbs(rsp, rnp, rdp);
|
|
|
+ return rcu_accelerate_cbs(rnp, rdp);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1813,8 +1663,7 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
* structure corresponding to the current CPU, and must have irqs disabled.
|
|
|
* Returns true if the grace-period kthread needs to be awakened.
|
|
|
*/
|
|
|
-static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
- struct rcu_data *rdp)
|
|
|
+static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
|
|
|
{
|
|
|
bool ret;
|
|
|
bool need_gp;
|
|
@@ -1827,10 +1676,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
/* Handle the ends of any preceding grace periods first. */
|
|
|
if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
|
|
|
unlikely(READ_ONCE(rdp->gpwrap))) {
|
|
|
- ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
|
|
|
- trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
|
|
|
+ ret = rcu_advance_cbs(rnp, rdp); /* Advance callbacks. */
|
|
|
+ trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
|
|
|
} else {
|
|
|
- ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
|
|
|
+ ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */
|
|
|
}
|
|
|
|
|
|
/* Now handle the beginnings of any new-to-this-CPU grace periods. */
|
|
@@ -1841,10 +1690,9 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
* set up to detect a quiescent state, otherwise don't
|
|
|
* go looking for one.
|
|
|
*/
|
|
|
- trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
|
|
|
+ trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
|
|
|
need_gp = !!(rnp->qsmask & rdp->grpmask);
|
|
|
rdp->cpu_no_qs.b.norm = need_gp;
|
|
|
- rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
|
|
|
rdp->core_needs_qs = need_gp;
|
|
|
zero_cpu_stall_ticks(rdp);
|
|
|
}
|
|
@@ -1856,7 +1704,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
+static void note_gp_changes(struct rcu_data *rdp)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
bool needwake;
|
|
@@ -1870,16 +1718,16 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
local_irq_restore(flags);
|
|
|
return;
|
|
|
}
|
|
|
- needwake = __note_gp_changes(rsp, rnp, rdp);
|
|
|
+ needwake = __note_gp_changes(rnp, rdp);
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
if (needwake)
|
|
|
- rcu_gp_kthread_wake(rsp);
|
|
|
+ rcu_gp_kthread_wake();
|
|
|
}
|
|
|
|
|
|
-static void rcu_gp_slow(struct rcu_state *rsp, int delay)
|
|
|
+static void rcu_gp_slow(int delay)
|
|
|
{
|
|
|
if (delay > 0 &&
|
|
|
- !(rcu_seq_ctr(rsp->gp_seq) %
|
|
|
+ !(rcu_seq_ctr(rcu_state.gp_seq) %
|
|
|
(rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
|
|
|
schedule_timeout_uninterruptible(delay);
|
|
|
}
|
|
@@ -1887,24 +1735,24 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
|
|
|
/*
|
|
|
* Initialize a new grace period. Return false if no grace period required.
|
|
|
*/
|
|
|
-static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
+static bool rcu_gp_init(void)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
unsigned long oldmask;
|
|
|
unsigned long mask;
|
|
|
struct rcu_data *rdp;
|
|
|
- struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
+ struct rcu_node *rnp = rcu_get_root();
|
|
|
|
|
|
- WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
|
|
raw_spin_lock_irq_rcu_node(rnp);
|
|
|
- if (!READ_ONCE(rsp->gp_flags)) {
|
|
|
+ if (!READ_ONCE(rcu_state.gp_flags)) {
|
|
|
/* Spurious wakeup, tell caller to go back to sleep. */
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
return false;
|
|
|
}
|
|
|
- WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
|
|
|
+ WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
|
|
|
|
|
|
- if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
|
|
|
+ if (WARN_ON_ONCE(rcu_gp_in_progress())) {
|
|
|
/*
|
|
|
* Grace period already in progress, don't start another.
|
|
|
* Not supposed to be able to happen.
|
|
@@ -1914,10 +1762,10 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
}
|
|
|
|
|
|
/* Advance to a new grace period and initialize state. */
|
|
|
- record_gp_stall_check_time(rsp);
|
|
|
+ record_gp_stall_check_time();
|
|
|
/* Record GP times before starting GP, hence rcu_seq_start(). */
|
|
|
- rcu_seq_start(&rsp->gp_seq);
|
|
|
- trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
|
|
|
+ rcu_seq_start(&rcu_state.gp_seq);
|
|
|
+ trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
|
|
|
/*
|
|
@@ -1926,15 +1774,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
* for subsequent online CPUs, and that quiescent-state forcing
|
|
|
* will handle subsequent offline CPUs.
|
|
|
*/
|
|
|
- rsp->gp_state = RCU_GP_ONOFF;
|
|
|
- rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
- spin_lock(&rsp->ofl_lock);
|
|
|
+ rcu_state.gp_state = RCU_GP_ONOFF;
|
|
|
+ rcu_for_each_leaf_node(rnp) {
|
|
|
+ raw_spin_lock(&rcu_state.ofl_lock);
|
|
|
raw_spin_lock_irq_rcu_node(rnp);
|
|
|
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
|
|
|
!rnp->wait_blkd_tasks) {
|
|
|
/* Nothing to do on this leaf rcu_node structure. */
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
- spin_unlock(&rsp->ofl_lock);
|
|
|
+ raw_spin_unlock(&rcu_state.ofl_lock);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1970,45 +1818,45 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
}
|
|
|
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
- spin_unlock(&rsp->ofl_lock);
|
|
|
+ raw_spin_unlock(&rcu_state.ofl_lock);
|
|
|
}
|
|
|
- rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
|
|
|
+ rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
|
|
|
|
|
|
/*
|
|
|
* Set the quiescent-state-needed bits in all the rcu_node
|
|
|
- * structures for all currently online CPUs in breadth-first order,
|
|
|
- * starting from the root rcu_node structure, relying on the layout
|
|
|
- * of the tree within the rsp->node[] array. Note that other CPUs
|
|
|
- * will access only the leaves of the hierarchy, thus seeing that no
|
|
|
- * grace period is in progress, at least until the corresponding
|
|
|
- * leaf node has been initialized.
|
|
|
+ * structures for all currently online CPUs in breadth-first
|
|
|
+ * order, starting from the root rcu_node structure, relying on the
|
|
|
+ * layout of the tree within the rcu_state.node[] array. Note that
|
|
|
+ * other CPUs will access only the leaves of the hierarchy, thus
|
|
|
+ * seeing that no grace period is in progress, at least until the
|
|
|
+ * corresponding leaf node has been initialized.
|
|
|
*
|
|
|
* The grace period cannot complete until the initialization
|
|
|
* process finishes, because this kthread handles both.
|
|
|
*/
|
|
|
- rsp->gp_state = RCU_GP_INIT;
|
|
|
- rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
|
- rcu_gp_slow(rsp, gp_init_delay);
|
|
|
+ rcu_state.gp_state = RCU_GP_INIT;
|
|
|
+ rcu_for_each_node_breadth_first(rnp) {
|
|
|
+ rcu_gp_slow(gp_init_delay);
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
- rdp = this_cpu_ptr(rsp->rda);
|
|
|
- rcu_preempt_check_blocked_tasks(rsp, rnp);
|
|
|
+ rdp = this_cpu_ptr(&rcu_data);
|
|
|
+ rcu_preempt_check_blocked_tasks(rnp);
|
|
|
rnp->qsmask = rnp->qsmaskinit;
|
|
|
- WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
|
|
|
+ WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
|
|
|
if (rnp == rdp->mynode)
|
|
|
- (void)__note_gp_changes(rsp, rnp, rdp);
|
|
|
+ (void)__note_gp_changes(rnp, rdp);
|
|
|
rcu_preempt_boost_start_gp(rnp);
|
|
|
- trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
|
|
|
+ trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
|
|
|
rnp->level, rnp->grplo,
|
|
|
rnp->grphi, rnp->qsmask);
|
|
|
/* Quiescent states for tasks on any now-offline CPUs. */
|
|
|
mask = rnp->qsmask & ~rnp->qsmaskinitnext;
|
|
|
rnp->rcu_gp_init_mask = mask;
|
|
|
if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
|
|
|
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
|
|
|
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
|
|
|
else
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
cond_resched_tasks_rcu_qs();
|
|
|
- WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
|
|
}
|
|
|
|
|
|
return true;
|
|
@@ -2018,12 +1866,12 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|
|
* Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
|
|
|
* time.
|
|
|
*/
|
|
|
-static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
|
|
|
+static bool rcu_gp_fqs_check_wake(int *gfp)
|
|
|
{
|
|
|
- struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
+ struct rcu_node *rnp = rcu_get_root();
|
|
|
|
|
|
/* Someone like call_rcu() requested a force-quiescent-state scan. */
|
|
|
- *gfp = READ_ONCE(rsp->gp_flags);
|
|
|
+ *gfp = READ_ONCE(rcu_state.gp_flags);
|
|
|
if (*gfp & RCU_GP_FLAG_FQS)
|
|
|
return true;
|
|
|
|
|
@@ -2037,45 +1885,110 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
|
|
|
/*
|
|
|
* Do one round of quiescent-state forcing.
|
|
|
*/
|
|
|
-static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
|
|
|
+static void rcu_gp_fqs(bool first_time)
|
|
|
{
|
|
|
- struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
+ struct rcu_node *rnp = rcu_get_root();
|
|
|
|
|
|
- WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
- rsp->n_force_qs++;
|
|
|
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
|
|
+ rcu_state.n_force_qs++;
|
|
|
if (first_time) {
|
|
|
/* Collect dyntick-idle snapshots. */
|
|
|
- force_qs_rnp(rsp, dyntick_save_progress_counter);
|
|
|
+ force_qs_rnp(dyntick_save_progress_counter);
|
|
|
} else {
|
|
|
/* Handle dyntick-idle and offline CPUs. */
|
|
|
- force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
|
|
|
+ force_qs_rnp(rcu_implicit_dynticks_qs);
|
|
|
}
|
|
|
/* Clear flag to prevent immediate re-entry. */
|
|
|
- if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
|
|
+ if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
|
|
|
raw_spin_lock_irq_rcu_node(rnp);
|
|
|
- WRITE_ONCE(rsp->gp_flags,
|
|
|
- READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
|
|
|
+ WRITE_ONCE(rcu_state.gp_flags,
|
|
|
+ READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Loop doing repeated quiescent-state forcing until the grace period ends.
|
|
|
+ */
|
|
|
+static void rcu_gp_fqs_loop(void)
|
|
|
+{
|
|
|
+ bool first_gp_fqs;
|
|
|
+ int gf;
|
|
|
+ unsigned long j;
|
|
|
+ int ret;
|
|
|
+ struct rcu_node *rnp = rcu_get_root();
|
|
|
+
|
|
|
+ first_gp_fqs = true;
|
|
|
+ j = READ_ONCE(jiffies_till_first_fqs);
|
|
|
+ ret = 0;
|
|
|
+ for (;;) {
|
|
|
+ if (!ret) {
|
|
|
+ rcu_state.jiffies_force_qs = jiffies + j;
|
|
|
+ WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
|
|
|
+ jiffies + 3 * j);
|
|
|
+ }
|
|
|
+ trace_rcu_grace_period(rcu_state.name,
|
|
|
+ READ_ONCE(rcu_state.gp_seq),
|
|
|
+ TPS("fqswait"));
|
|
|
+ rcu_state.gp_state = RCU_GP_WAIT_FQS;
|
|
|
+ ret = swait_event_idle_timeout_exclusive(
|
|
|
+ rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
|
|
|
+ rcu_state.gp_state = RCU_GP_DOING_FQS;
|
|
|
+ /* Locking provides needed memory barriers. */
|
|
|
+ /* If grace period done, leave loop. */
|
|
|
+ if (!READ_ONCE(rnp->qsmask) &&
|
|
|
+ !rcu_preempt_blocked_readers_cgp(rnp))
|
|
|
+ break;
|
|
|
+ /* If time for quiescent-state forcing, do it. */
|
|
|
+ if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) ||
|
|
|
+ (gf & RCU_GP_FLAG_FQS)) {
|
|
|
+ trace_rcu_grace_period(rcu_state.name,
|
|
|
+ READ_ONCE(rcu_state.gp_seq),
|
|
|
+ TPS("fqsstart"));
|
|
|
+ rcu_gp_fqs(first_gp_fqs);
|
|
|
+ first_gp_fqs = false;
|
|
|
+ trace_rcu_grace_period(rcu_state.name,
|
|
|
+ READ_ONCE(rcu_state.gp_seq),
|
|
|
+ TPS("fqsend"));
|
|
|
+ cond_resched_tasks_rcu_qs();
|
|
|
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
|
|
+ ret = 0; /* Force full wait till next FQS. */
|
|
|
+ j = READ_ONCE(jiffies_till_next_fqs);
|
|
|
+ } else {
|
|
|
+ /* Deal with stray signal. */
|
|
|
+ cond_resched_tasks_rcu_qs();
|
|
|
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
|
|
+ WARN_ON(signal_pending(current));
|
|
|
+ trace_rcu_grace_period(rcu_state.name,
|
|
|
+ READ_ONCE(rcu_state.gp_seq),
|
|
|
+ TPS("fqswaitsig"));
|
|
|
+ ret = 1; /* Keep old FQS timing. */
|
|
|
+ j = jiffies;
|
|
|
+ if (time_after(jiffies, rcu_state.jiffies_force_qs))
|
|
|
+ j = 1;
|
|
|
+ else
|
|
|
+ j = rcu_state.jiffies_force_qs - j;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Clean up after the old grace period.
|
|
|
*/
|
|
|
-static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
+static void rcu_gp_cleanup(void)
|
|
|
{
|
|
|
unsigned long gp_duration;
|
|
|
bool needgp = false;
|
|
|
unsigned long new_gp_seq;
|
|
|
struct rcu_data *rdp;
|
|
|
- struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
+ struct rcu_node *rnp = rcu_get_root();
|
|
|
struct swait_queue_head *sq;
|
|
|
|
|
|
- WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
|
|
raw_spin_lock_irq_rcu_node(rnp);
|
|
|
- gp_duration = jiffies - rsp->gp_start;
|
|
|
- if (gp_duration > rsp->gp_max)
|
|
|
- rsp->gp_max = gp_duration;
|
|
|
+ gp_duration = jiffies - rcu_state.gp_start;
|
|
|
+ if (gp_duration > rcu_state.gp_max)
|
|
|
+ rcu_state.gp_max = gp_duration;
|
|
|
|
|
|
/*
|
|
|
* We know the grace period is complete, but to everyone else
|
|
@@ -2096,48 +2009,50 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
* the rcu_node structures before the beginning of the next grace
|
|
|
* period is recorded in any of the rcu_node structures.
|
|
|
*/
|
|
|
- new_gp_seq = rsp->gp_seq;
|
|
|
+ new_gp_seq = rcu_state.gp_seq;
|
|
|
rcu_seq_end(&new_gp_seq);
|
|
|
- rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
|
+ rcu_for_each_node_breadth_first(rnp) {
|
|
|
raw_spin_lock_irq_rcu_node(rnp);
|
|
|
if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
|
|
|
- dump_blkd_tasks(rsp, rnp, 10);
|
|
|
+ dump_blkd_tasks(rnp, 10);
|
|
|
WARN_ON_ONCE(rnp->qsmask);
|
|
|
WRITE_ONCE(rnp->gp_seq, new_gp_seq);
|
|
|
- rdp = this_cpu_ptr(rsp->rda);
|
|
|
+ rdp = this_cpu_ptr(&rcu_data);
|
|
|
if (rnp == rdp->mynode)
|
|
|
- needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
|
|
|
+ needgp = __note_gp_changes(rnp, rdp) || needgp;
|
|
|
/* smp_mb() provided by prior unlock-lock pair. */
|
|
|
- needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp;
|
|
|
+ needgp = rcu_future_gp_cleanup(rnp) || needgp;
|
|
|
sq = rcu_nocb_gp_get(rnp);
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
rcu_nocb_gp_cleanup(sq);
|
|
|
cond_resched_tasks_rcu_qs();
|
|
|
- WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
- rcu_gp_slow(rsp, gp_cleanup_delay);
|
|
|
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
|
|
+ rcu_gp_slow(gp_cleanup_delay);
|
|
|
}
|
|
|
- rnp = rcu_get_root(rsp);
|
|
|
- raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
|
|
|
+ rnp = rcu_get_root();
|
|
|
+ raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
|
|
|
|
|
|
/* Declare grace period done. */
|
|
|
- rcu_seq_end(&rsp->gp_seq);
|
|
|
- trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
|
|
|
- rsp->gp_state = RCU_GP_IDLE;
|
|
|
+ rcu_seq_end(&rcu_state.gp_seq);
|
|
|
+ trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
|
|
|
+ rcu_state.gp_state = RCU_GP_IDLE;
|
|
|
/* Check for GP requests since above loop. */
|
|
|
- rdp = this_cpu_ptr(rsp->rda);
|
|
|
+ rdp = this_cpu_ptr(&rcu_data);
|
|
|
if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
|
|
|
trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
|
|
|
TPS("CleanupMore"));
|
|
|
needgp = true;
|
|
|
}
|
|
|
/* Advance CBs to reduce false positives below. */
|
|
|
- if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
|
|
|
- WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
|
|
|
- rsp->gp_req_activity = jiffies;
|
|
|
- trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
|
|
|
+ if (!rcu_accelerate_cbs(rnp, rdp) && needgp) {
|
|
|
+ WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
|
|
|
+ rcu_state.gp_req_activity = jiffies;
|
|
|
+ trace_rcu_grace_period(rcu_state.name,
|
|
|
+ READ_ONCE(rcu_state.gp_seq),
|
|
|
TPS("newreq"));
|
|
|
} else {
|
|
|
- WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
|
|
|
+ WRITE_ONCE(rcu_state.gp_flags,
|
|
|
+ rcu_state.gp_flags & RCU_GP_FLAG_INIT);
|
|
|
}
|
|
|
raw_spin_unlock_irq_rcu_node(rnp);
|
|
|
}
|
|
@@ -2145,116 +2060,60 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
/*
|
|
|
* Body of kthread that handles grace periods.
|
|
|
*/
|
|
|
-static int __noreturn rcu_gp_kthread(void *arg)
|
|
|
+static int __noreturn rcu_gp_kthread(void *unused)
|
|
|
{
|
|
|
- bool first_gp_fqs;
|
|
|
- int gf;
|
|
|
- unsigned long j;
|
|
|
- int ret;
|
|
|
- struct rcu_state *rsp = arg;
|
|
|
- struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
-
|
|
|
rcu_bind_gp_kthread();
|
|
|
for (;;) {
|
|
|
|
|
|
/* Handle grace-period start. */
|
|
|
for (;;) {
|
|
|
- trace_rcu_grace_period(rsp->name,
|
|
|
- READ_ONCE(rsp->gp_seq),
|
|
|
+ trace_rcu_grace_period(rcu_state.name,
|
|
|
+ READ_ONCE(rcu_state.gp_seq),
|
|
|
TPS("reqwait"));
|
|
|
- rsp->gp_state = RCU_GP_WAIT_GPS;
|
|
|
- swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
|
|
|
- RCU_GP_FLAG_INIT);
|
|
|
- rsp->gp_state = RCU_GP_DONE_GPS;
|
|
|
+ rcu_state.gp_state = RCU_GP_WAIT_GPS;
|
|
|
+ swait_event_idle_exclusive(rcu_state.gp_wq,
|
|
|
+ READ_ONCE(rcu_state.gp_flags) &
|
|
|
+ RCU_GP_FLAG_INIT);
|
|
|
+ rcu_state.gp_state = RCU_GP_DONE_GPS;
|
|
|
/* Locking provides needed memory barrier. */
|
|
|
- if (rcu_gp_init(rsp))
|
|
|
+ if (rcu_gp_init())
|
|
|
break;
|
|
|
cond_resched_tasks_rcu_qs();
|
|
|
- WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
|
|
WARN_ON(signal_pending(current));
|
|
|
- trace_rcu_grace_period(rsp->name,
|
|
|
- READ_ONCE(rsp->gp_seq),
|
|
|
+ trace_rcu_grace_period(rcu_state.name,
|
|
|
+ READ_ONCE(rcu_state.gp_seq),
|
|
|
TPS("reqwaitsig"));
|
|
|
}
|
|
|
|
|
|
/* Handle quiescent-state forcing. */
|
|
|
- first_gp_fqs = true;
|
|
|
- j = jiffies_till_first_fqs;
|
|
|
- ret = 0;
|
|
|
- for (;;) {
|
|
|
- if (!ret) {
|
|
|
- rsp->jiffies_force_qs = jiffies + j;
|
|
|
- WRITE_ONCE(rsp->jiffies_kick_kthreads,
|
|
|
- jiffies + 3 * j);
|
|
|
- }
|
|
|
- trace_rcu_grace_period(rsp->name,
|
|
|
- READ_ONCE(rsp->gp_seq),
|
|
|
- TPS("fqswait"));
|
|
|
- rsp->gp_state = RCU_GP_WAIT_FQS;
|
|
|
- ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
|
|
|
- rcu_gp_fqs_check_wake(rsp, &gf), j);
|
|
|
- rsp->gp_state = RCU_GP_DOING_FQS;
|
|
|
- /* Locking provides needed memory barriers. */
|
|
|
- /* If grace period done, leave loop. */
|
|
|
- if (!READ_ONCE(rnp->qsmask) &&
|
|
|
- !rcu_preempt_blocked_readers_cgp(rnp))
|
|
|
- break;
|
|
|
- /* If time for quiescent-state forcing, do it. */
|
|
|
- if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
|
|
|
- (gf & RCU_GP_FLAG_FQS)) {
|
|
|
- trace_rcu_grace_period(rsp->name,
|
|
|
- READ_ONCE(rsp->gp_seq),
|
|
|
- TPS("fqsstart"));
|
|
|
- rcu_gp_fqs(rsp, first_gp_fqs);
|
|
|
- first_gp_fqs = false;
|
|
|
- trace_rcu_grace_period(rsp->name,
|
|
|
- READ_ONCE(rsp->gp_seq),
|
|
|
- TPS("fqsend"));
|
|
|
- cond_resched_tasks_rcu_qs();
|
|
|
- WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
- ret = 0; /* Force full wait till next FQS. */
|
|
|
- j = jiffies_till_next_fqs;
|
|
|
- } else {
|
|
|
- /* Deal with stray signal. */
|
|
|
- cond_resched_tasks_rcu_qs();
|
|
|
- WRITE_ONCE(rsp->gp_activity, jiffies);
|
|
|
- WARN_ON(signal_pending(current));
|
|
|
- trace_rcu_grace_period(rsp->name,
|
|
|
- READ_ONCE(rsp->gp_seq),
|
|
|
- TPS("fqswaitsig"));
|
|
|
- ret = 1; /* Keep old FQS timing. */
|
|
|
- j = jiffies;
|
|
|
- if (time_after(jiffies, rsp->jiffies_force_qs))
|
|
|
- j = 1;
|
|
|
- else
|
|
|
- j = rsp->jiffies_force_qs - j;
|
|
|
- }
|
|
|
- }
|
|
|
+ rcu_gp_fqs_loop();
|
|
|
|
|
|
/* Handle grace-period end. */
|
|
|
- rsp->gp_state = RCU_GP_CLEANUP;
|
|
|
- rcu_gp_cleanup(rsp);
|
|
|
- rsp->gp_state = RCU_GP_CLEANED;
|
|
|
+ rcu_state.gp_state = RCU_GP_CLEANUP;
|
|
|
+ rcu_gp_cleanup();
|
|
|
+ rcu_state.gp_state = RCU_GP_CLEANED;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Report a full set of quiescent states to the specified rcu_state data
|
|
|
- * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
|
|
|
- * kthread if another grace period is required. Whether we wake
|
|
|
- * the grace-period kthread or it awakens itself for the next round
|
|
|
- * of quiescent-state forcing, that kthread will clean up after the
|
|
|
- * just-completed grace period. Note that the caller must hold rnp->lock,
|
|
|
- * which is released before return.
|
|
|
+ * Report a full set of quiescent states to the rcu_state data structure.
|
|
|
+ * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
|
|
|
+ * another grace period is required. Whether we wake the grace-period
|
|
|
+ * kthread or it awakens itself for the next round of quiescent-state
|
|
|
+ * forcing, that kthread will clean up after the just-completed grace
|
|
|
+ * period. Note that the caller must hold rnp->lock, which is released
|
|
|
+ * before return.
|
|
|
*/
|
|
|
-static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
|
|
|
- __releases(rcu_get_root(rsp)->lock)
|
|
|
+static void rcu_report_qs_rsp(unsigned long flags)
|
|
|
+ __releases(rcu_get_root()->lock)
|
|
|
{
|
|
|
- raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
|
|
|
- WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
|
|
- WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
|
|
|
- raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
|
|
|
- rcu_gp_kthread_wake(rsp);
|
|
|
+ raw_lockdep_assert_held_rcu_node(rcu_get_root());
|
|
|
+ WARN_ON_ONCE(!rcu_gp_in_progress());
|
|
|
+ WRITE_ONCE(rcu_state.gp_flags,
|
|
|
+ READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
|
|
|
+ rcu_gp_kthread_wake();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2271,9 +2130,8 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
|
|
|
* disabled. This allows propagating quiescent state due to resumed tasks
|
|
|
* during grace-period initialization.
|
|
|
*/
|
|
|
-static void
|
|
|
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|
|
- struct rcu_node *rnp, unsigned long gps, unsigned long flags)
|
|
|
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
|
|
|
+ unsigned long gps, unsigned long flags)
|
|
|
__releases(rnp->lock)
|
|
|
{
|
|
|
unsigned long oldmask = 0;
|
|
@@ -2296,7 +2154,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|
|
WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
|
|
|
rcu_preempt_blocked_readers_cgp(rnp));
|
|
|
rnp->qsmask &= ~mask;
|
|
|
- trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq,
|
|
|
+ trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
|
|
|
mask, rnp->qsmask, rnp->level,
|
|
|
rnp->grplo, rnp->grphi,
|
|
|
!!rnp->gp_tasks);
|
|
@@ -2326,19 +2184,18 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|
|
* state for this grace period. Invoke rcu_report_qs_rsp()
|
|
|
* to clean up and start the next grace period if one is needed.
|
|
|
*/
|
|
|
- rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
|
|
|
+ rcu_report_qs_rsp(flags); /* releases rnp->lock. */
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* Record a quiescent state for all tasks that were previously queued
|
|
|
* on the specified rcu_node structure and that were blocking the current
|
|
|
- * RCU grace period. The caller must hold the specified rnp->lock with
|
|
|
+ * RCU grace period. The caller must hold the corresponding rnp->lock with
|
|
|
* irqs disabled, and this lock is released upon return, but irqs remain
|
|
|
* disabled.
|
|
|
*/
|
|
|
static void __maybe_unused
|
|
|
-rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
|
|
|
- struct rcu_node *rnp, unsigned long flags)
|
|
|
+rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
|
|
|
__releases(rnp->lock)
|
|
|
{
|
|
|
unsigned long gps;
|
|
@@ -2346,8 +2203,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
|
|
|
struct rcu_node *rnp_p;
|
|
|
|
|
|
raw_lockdep_assert_held_rcu_node(rnp);
|
|
|
- if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) ||
|
|
|
- WARN_ON_ONCE(rsp != rcu_state_p) ||
|
|
|
+ if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) ||
|
|
|
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
|
|
|
rnp->qsmask != 0) {
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
@@ -2361,7 +2217,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
|
|
|
* Only one rcu_node structure in the tree, so don't
|
|
|
* try to report up to its nonexistent parent!
|
|
|
*/
|
|
|
- rcu_report_qs_rsp(rsp, flags);
|
|
|
+ rcu_report_qs_rsp(flags);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -2370,7 +2226,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
|
|
|
mask = rnp->grpmask;
|
|
|
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
|
|
raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
|
|
|
- rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
|
|
|
+ rcu_report_qs_rnp(mask, rnp_p, gps, flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2378,7 +2234,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
|
|
|
* structure. This must be called from the specified CPU.
|
|
|
*/
|
|
|
static void
|
|
|
-rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
+rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
unsigned long mask;
|
|
@@ -2397,7 +2253,6 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
* within the current grace period.
|
|
|
*/
|
|
|
rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
|
|
|
- rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
return;
|
|
|
}
|
|
@@ -2411,12 +2266,12 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
* This GP can't end until cpu checks in, so all of our
|
|
|
* callbacks can be processed during the next GP.
|
|
|
*/
|
|
|
- needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
|
|
|
+ needwake = rcu_accelerate_cbs(rnp, rdp);
|
|
|
|
|
|
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
|
|
|
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
|
|
|
/* ^^^ Released rnp->lock */
|
|
|
if (needwake)
|
|
|
- rcu_gp_kthread_wake(rsp);
|
|
|
+ rcu_gp_kthread_wake();
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2427,10 +2282,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
* quiescent state for this grace period, and record that fact if so.
|
|
|
*/
|
|
|
static void
|
|
|
-rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
+rcu_check_quiescent_state(struct rcu_data *rdp)
|
|
|
{
|
|
|
/* Check for grace-period ends and beginnings. */
|
|
|
- note_gp_changes(rsp, rdp);
|
|
|
+ note_gp_changes(rdp);
|
|
|
|
|
|
/*
|
|
|
* Does this CPU still need to do its part for current grace period?
|
|
@@ -2450,24 +2305,26 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
* Tell RCU we are done (but rcu_report_qs_rdp() will be the
|
|
|
* judge of that).
|
|
|
*/
|
|
|
- rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
|
|
|
+ rcu_report_qs_rdp(rdp->cpu, rdp);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Trace the fact that this CPU is going offline.
|
|
|
+ * Near the end of the offline process. Trace the fact that this CPU
|
|
|
+ * is going offline.
|
|
|
*/
|
|
|
-static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
|
|
|
+int rcutree_dying_cpu(unsigned int cpu)
|
|
|
{
|
|
|
RCU_TRACE(bool blkd;)
|
|
|
- RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
|
|
|
+ RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);)
|
|
|
RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
|
|
|
RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
|
|
|
- trace_rcu_grace_period(rsp->name, rnp->gp_seq,
|
|
|
+ trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
|
|
|
blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2521,23 +2378,26 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
|
|
|
* There can only be one CPU hotplug operation at a time, so no need for
|
|
|
* explicit locking.
|
|
|
*/
|
|
|
-static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
|
|
|
+int rcutree_dead_cpu(unsigned int cpu)
|
|
|
{
|
|
|
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
|
|
|
/* Adjust any no-longer-needed kthreads. */
|
|
|
rcu_boost_kthread_setaffinity(rnp, -1);
|
|
|
+ /* Do any needed no-CB deferred wakeups from this CPU. */
|
|
|
+ do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* Invoke any RCU callbacks that have made it to the end of their grace
|
|
|
* period. Thottle as specified by rdp->blimit.
|
|
|
*/
|
|
|
-static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
+static void rcu_do_batch(struct rcu_data *rdp)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rcu_head *rhp;
|
|
@@ -2546,10 +2406,10 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
|
|
|
/* If no callbacks are ready, just return. */
|
|
|
if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
|
|
|
- trace_rcu_batch_start(rsp->name,
|
|
|
+ trace_rcu_batch_start(rcu_state.name,
|
|
|
rcu_segcblist_n_lazy_cbs(&rdp->cblist),
|
|
|
rcu_segcblist_n_cbs(&rdp->cblist), 0);
|
|
|
- trace_rcu_batch_end(rsp->name, 0,
|
|
|
+ trace_rcu_batch_end(rcu_state.name, 0,
|
|
|
!rcu_segcblist_empty(&rdp->cblist),
|
|
|
need_resched(), is_idle_task(current),
|
|
|
rcu_is_callbacks_kthread());
|
|
@@ -2564,7 +2424,8 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
local_irq_save(flags);
|
|
|
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
|
|
|
bl = rdp->blimit;
|
|
|
- trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
|
|
|
+ trace_rcu_batch_start(rcu_state.name,
|
|
|
+ rcu_segcblist_n_lazy_cbs(&rdp->cblist),
|
|
|
rcu_segcblist_n_cbs(&rdp->cblist), bl);
|
|
|
rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
|
|
|
local_irq_restore(flags);
|
|
@@ -2573,7 +2434,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
rhp = rcu_cblist_dequeue(&rcl);
|
|
|
for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
|
|
|
debug_rcu_head_unqueue(rhp);
|
|
|
- if (__rcu_reclaim(rsp->name, rhp))
|
|
|
+ if (__rcu_reclaim(rcu_state.name, rhp))
|
|
|
rcu_cblist_dequeued_lazy(&rcl);
|
|
|
/*
|
|
|
* Stop only if limit reached and CPU has something to do.
|
|
@@ -2587,7 +2448,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
count = -rcl.len;
|
|
|
- trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
|
|
|
+ trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
|
|
|
is_idle_task(current), rcu_is_callbacks_kthread());
|
|
|
|
|
|
/* Update counts and requeue any remaining callbacks. */
|
|
@@ -2603,7 +2464,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
|
|
|
if (count == 0 && rdp->qlen_last_fqs_check != 0) {
|
|
|
rdp->qlen_last_fqs_check = 0;
|
|
|
- rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
|
+ rdp->n_force_qs_snap = rcu_state.n_force_qs;
|
|
|
} else if (count < rdp->qlen_last_fqs_check - qhimark)
|
|
|
rdp->qlen_last_fqs_check = count;
|
|
|
|
|
@@ -2631,37 +2492,17 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
void rcu_check_callbacks(int user)
|
|
|
{
|
|
|
trace_rcu_utilization(TPS("Start scheduler-tick"));
|
|
|
- increment_cpu_stall_ticks();
|
|
|
- if (user || rcu_is_cpu_rrupt_from_idle()) {
|
|
|
-
|
|
|
- /*
|
|
|
- * Get here if this CPU took its interrupt from user
|
|
|
- * mode or from the idle loop, and if this is not a
|
|
|
- * nested interrupt. In this case, the CPU is in
|
|
|
- * a quiescent state, so note it.
|
|
|
- *
|
|
|
- * No memory barrier is required here because both
|
|
|
- * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
|
|
|
- * variables that other CPUs neither access nor modify,
|
|
|
- * at least not while the corresponding CPU is online.
|
|
|
- */
|
|
|
-
|
|
|
- rcu_sched_qs();
|
|
|
- rcu_bh_qs();
|
|
|
- rcu_note_voluntary_context_switch(current);
|
|
|
-
|
|
|
- } else if (!in_softirq()) {
|
|
|
-
|
|
|
- /*
|
|
|
- * Get here if this CPU did not take its interrupt from
|
|
|
- * softirq, in other words, if it is not interrupting
|
|
|
- * a rcu_bh read-side critical section. This is an _bh
|
|
|
- * critical section, so note it.
|
|
|
- */
|
|
|
-
|
|
|
- rcu_bh_qs();
|
|
|
+ raw_cpu_inc(rcu_data.ticks_this_gp);
|
|
|
+ /* The load-acquire pairs with the store-release setting to true. */
|
|
|
+ if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
|
|
|
+ /* Idle and userspace execution already are quiescent states. */
|
|
|
+ if (!rcu_is_cpu_rrupt_from_idle() && !user) {
|
|
|
+ set_tsk_need_resched(current);
|
|
|
+ set_preempt_need_resched();
|
|
|
+ }
|
|
|
+ __this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
|
|
}
|
|
|
- rcu_preempt_check_callbacks();
|
|
|
+ rcu_flavor_check_callbacks(user);
|
|
|
if (rcu_pending())
|
|
|
invoke_rcu_core();
|
|
|
|
|
@@ -2675,20 +2516,19 @@ void rcu_check_callbacks(int user)
|
|
|
*
|
|
|
* The caller must have suppressed start of new grace periods.
|
|
|
*/
|
|
|
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
|
|
|
+static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
|
|
|
{
|
|
|
int cpu;
|
|
|
unsigned long flags;
|
|
|
unsigned long mask;
|
|
|
struct rcu_node *rnp;
|
|
|
|
|
|
- rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
+ rcu_for_each_leaf_node(rnp) {
|
|
|
cond_resched_tasks_rcu_qs();
|
|
|
mask = 0;
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
if (rnp->qsmask == 0) {
|
|
|
- if (rcu_state_p == &rcu_sched_state ||
|
|
|
- rsp != rcu_state_p ||
|
|
|
+ if (!IS_ENABLED(CONFIG_PREEMPT) ||
|
|
|
rcu_preempt_blocked_readers_cgp(rnp)) {
|
|
|
/*
|
|
|
* No point in scanning bits because they
|
|
@@ -2705,13 +2545,13 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
|
|
|
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
|
|
unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
|
|
|
if ((rnp->qsmask & bit) != 0) {
|
|
|
- if (f(per_cpu_ptr(rsp->rda, cpu)))
|
|
|
+ if (f(per_cpu_ptr(&rcu_data, cpu)))
|
|
|
mask |= bit;
|
|
|
}
|
|
|
}
|
|
|
if (mask != 0) {
|
|
|
/* Idle/offline CPUs, report (releases rnp->lock). */
|
|
|
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
|
|
|
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
|
|
|
} else {
|
|
|
/* Nothing to do here, so just drop the lock. */
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
@@ -2723,7 +2563,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
|
|
|
* Force quiescent states on reluctant CPUs, and also detect which
|
|
|
* CPUs are in dyntick-idle mode.
|
|
|
*/
|
|
|
-static void force_quiescent_state(struct rcu_state *rsp)
|
|
|
+static void force_quiescent_state(void)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
bool ret;
|
|
@@ -2731,9 +2571,9 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|
|
struct rcu_node *rnp_old = NULL;
|
|
|
|
|
|
/* Funnel through hierarchy to reduce memory contention. */
|
|
|
- rnp = __this_cpu_read(rsp->rda->mynode);
|
|
|
+ rnp = __this_cpu_read(rcu_data.mynode);
|
|
|
for (; rnp != NULL; rnp = rnp->parent) {
|
|
|
- ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
|
|
|
+ ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
|
|
|
!raw_spin_trylock(&rnp->fqslock);
|
|
|
if (rnp_old != NULL)
|
|
|
raw_spin_unlock(&rnp_old->fqslock);
|
|
@@ -2741,18 +2581,19 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|
|
return;
|
|
|
rnp_old = rnp;
|
|
|
}
|
|
|
- /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
|
|
|
+ /* rnp_old == rcu_get_root(), rnp == NULL. */
|
|
|
|
|
|
/* Reached the root of the rcu_node tree, acquire lock. */
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
|
|
|
raw_spin_unlock(&rnp_old->fqslock);
|
|
|
- if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
|
|
+ if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
|
|
|
return; /* Someone beat us to it. */
|
|
|
}
|
|
|
- WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
|
|
|
+ WRITE_ONCE(rcu_state.gp_flags,
|
|
|
+ READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
|
|
|
- rcu_gp_kthread_wake(rsp);
|
|
|
+ rcu_gp_kthread_wake();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2760,30 +2601,29 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|
|
* RCU to come out of its idle mode.
|
|
|
*/
|
|
|
static void
|
|
|
-rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
- struct rcu_data *rdp)
|
|
|
+rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
|
|
|
{
|
|
|
const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
|
|
|
unsigned long flags;
|
|
|
unsigned long j;
|
|
|
- struct rcu_node *rnp_root = rcu_get_root(rsp);
|
|
|
+ struct rcu_node *rnp_root = rcu_get_root();
|
|
|
static atomic_t warned = ATOMIC_INIT(0);
|
|
|
|
|
|
- if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
|
|
|
+ if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
|
|
|
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
|
|
|
return;
|
|
|
j = jiffies; /* Expensive access, and in common case don't get here. */
|
|
|
- if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
|
|
|
- time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
|
|
|
+ if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
|
|
|
+ time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
|
|
|
atomic_read(&warned))
|
|
|
return;
|
|
|
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
j = jiffies;
|
|
|
- if (rcu_gp_in_progress(rsp) ||
|
|
|
+ if (rcu_gp_in_progress() ||
|
|
|
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
|
|
|
- time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
|
|
|
- time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
|
|
|
+ time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
|
|
|
+ time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
|
|
|
atomic_read(&warned)) {
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
return;
|
|
@@ -2793,21 +2633,21 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
if (rnp_root != rnp)
|
|
|
raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
|
|
|
j = jiffies;
|
|
|
- if (rcu_gp_in_progress(rsp) ||
|
|
|
+ if (rcu_gp_in_progress() ||
|
|
|
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
|
|
|
- time_before(j, rsp->gp_req_activity + gpssdelay) ||
|
|
|
- time_before(j, rsp->gp_activity + gpssdelay) ||
|
|
|
+ time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
|
|
|
+ time_before(j, rcu_state.gp_activity + gpssdelay) ||
|
|
|
atomic_xchg(&warned, 1)) {
|
|
|
raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
return;
|
|
|
}
|
|
|
pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
|
|
|
- __func__, (long)READ_ONCE(rsp->gp_seq),
|
|
|
+ __func__, (long)READ_ONCE(rcu_state.gp_seq),
|
|
|
(long)READ_ONCE(rnp_root->gp_seq_needed),
|
|
|
- j - rsp->gp_req_activity, j - rsp->gp_activity,
|
|
|
- rsp->gp_flags, rsp->gp_state, rsp->name,
|
|
|
- rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
|
|
|
+ j - rcu_state.gp_req_activity, j - rcu_state.gp_activity,
|
|
|
+ rcu_state.gp_flags, rcu_state.gp_state, rcu_state.name,
|
|
|
+ rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL);
|
|
|
WARN_ON(1);
|
|
|
if (rnp_root != rnp)
|
|
|
raw_spin_unlock_rcu_node(rnp_root);
|
|
@@ -2815,69 +2655,65 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * This does the RCU core processing work for the specified rcu_state
|
|
|
- * and rcu_data structures. This may be called only from the CPU to
|
|
|
- * whom the rdp belongs.
|
|
|
+ * This does the RCU core processing work for the specified rcu_data
|
|
|
+ * structures. This may be called only from the CPU to whom the rdp
|
|
|
+ * belongs.
|
|
|
*/
|
|
|
-static void
|
|
|
-__rcu_process_callbacks(struct rcu_state *rsp)
|
|
|
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
|
|
|
+ struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
|
|
|
struct rcu_node *rnp = rdp->mynode;
|
|
|
|
|
|
+ if (cpu_is_offline(smp_processor_id()))
|
|
|
+ return;
|
|
|
+ trace_rcu_utilization(TPS("Start RCU core"));
|
|
|
WARN_ON_ONCE(!rdp->beenonline);
|
|
|
|
|
|
+ /* Report any deferred quiescent states if preemption enabled. */
|
|
|
+ if (!(preempt_count() & PREEMPT_MASK)) {
|
|
|
+ rcu_preempt_deferred_qs(current);
|
|
|
+ } else if (rcu_preempt_need_deferred_qs(current)) {
|
|
|
+ set_tsk_need_resched(current);
|
|
|
+ set_preempt_need_resched();
|
|
|
+ }
|
|
|
+
|
|
|
/* Update RCU state based on any recent quiescent states. */
|
|
|
- rcu_check_quiescent_state(rsp, rdp);
|
|
|
+ rcu_check_quiescent_state(rdp);
|
|
|
|
|
|
/* No grace period and unregistered callbacks? */
|
|
|
- if (!rcu_gp_in_progress(rsp) &&
|
|
|
+ if (!rcu_gp_in_progress() &&
|
|
|
rcu_segcblist_is_enabled(&rdp->cblist)) {
|
|
|
local_irq_save(flags);
|
|
|
if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
|
|
|
- rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
|
|
|
+ rcu_accelerate_cbs_unlocked(rnp, rdp);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
- rcu_check_gp_start_stall(rsp, rnp, rdp);
|
|
|
+ rcu_check_gp_start_stall(rnp, rdp);
|
|
|
|
|
|
/* If there are callbacks ready, invoke them. */
|
|
|
if (rcu_segcblist_ready_cbs(&rdp->cblist))
|
|
|
- invoke_rcu_callbacks(rsp, rdp);
|
|
|
+ invoke_rcu_callbacks(rdp);
|
|
|
|
|
|
/* Do any needed deferred wakeups of rcuo kthreads. */
|
|
|
do_nocb_deferred_wakeup(rdp);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Do RCU core processing for the current CPU.
|
|
|
- */
|
|
|
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
|
|
|
-{
|
|
|
- struct rcu_state *rsp;
|
|
|
-
|
|
|
- if (cpu_is_offline(smp_processor_id()))
|
|
|
- return;
|
|
|
- trace_rcu_utilization(TPS("Start RCU core"));
|
|
|
- for_each_rcu_flavor(rsp)
|
|
|
- __rcu_process_callbacks(rsp);
|
|
|
trace_rcu_utilization(TPS("End RCU core"));
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Schedule RCU callback invocation. If the specified type of RCU
|
|
|
- * does not support RCU priority boosting, just do a direct call,
|
|
|
- * otherwise wake up the per-CPU kernel kthread. Note that because we
|
|
|
- * are running on the current CPU with softirqs disabled, the
|
|
|
- * rcu_cpu_kthread_task cannot disappear out from under us.
|
|
|
+ * Schedule RCU callback invocation. If the running implementation of RCU
|
|
|
+ * does not support RCU priority boosting, just do a direct call, otherwise
|
|
|
+ * wake up the per-CPU kernel kthread. Note that because we are running
|
|
|
+ * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
|
|
|
+ * cannot disappear out from under us.
|
|
|
*/
|
|
|
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
+static void invoke_rcu_callbacks(struct rcu_data *rdp)
|
|
|
{
|
|
|
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
|
|
|
return;
|
|
|
- if (likely(!rsp->boost)) {
|
|
|
- rcu_do_batch(rsp, rdp);
|
|
|
+ if (likely(!rcu_state.boost)) {
|
|
|
+ rcu_do_batch(rdp);
|
|
|
return;
|
|
|
}
|
|
|
invoke_rcu_callbacks_kthread();
|
|
@@ -2892,8 +2728,8 @@ static void invoke_rcu_core(void)
|
|
|
/*
|
|
|
* Handle any core-RCU processing required by a call_rcu() invocation.
|
|
|
*/
|
|
|
-static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
|
|
- struct rcu_head *head, unsigned long flags)
|
|
|
+static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
|
|
|
+ unsigned long flags)
|
|
|
{
|
|
|
/*
|
|
|
* If called from an extended quiescent state, invoke the RCU
|
|
@@ -2917,18 +2753,18 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
|
|
rdp->qlen_last_fqs_check + qhimark)) {
|
|
|
|
|
|
/* Are we ignoring a completed grace period? */
|
|
|
- note_gp_changes(rsp, rdp);
|
|
|
+ note_gp_changes(rdp);
|
|
|
|
|
|
/* Start a new grace period if one not already started. */
|
|
|
- if (!rcu_gp_in_progress(rsp)) {
|
|
|
- rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
|
|
|
+ if (!rcu_gp_in_progress()) {
|
|
|
+ rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
|
|
|
} else {
|
|
|
/* Give the grace period a kick. */
|
|
|
rdp->blimit = LONG_MAX;
|
|
|
- if (rsp->n_force_qs == rdp->n_force_qs_snap &&
|
|
|
+ if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
|
|
|
rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
|
|
|
- force_quiescent_state(rsp);
|
|
|
- rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
|
+ force_quiescent_state();
|
|
|
+ rdp->n_force_qs_snap = rcu_state.n_force_qs;
|
|
|
rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
|
|
|
}
|
|
|
}
|
|
@@ -2944,12 +2780,11 @@ static void rcu_leak_callback(struct rcu_head *rhp)
|
|
|
/*
|
|
|
* Helper function for call_rcu() and friends. The cpu argument will
|
|
|
* normally be -1, indicating "currently running CPU". It may specify
|
|
|
- * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
|
|
|
+ * a CPU only if that CPU is a no-CBs CPU. Currently, only rcu_barrier()
|
|
|
* is expected to specify a CPU.
|
|
|
*/
|
|
|
static void
|
|
|
-__call_rcu(struct rcu_head *head, rcu_callback_t func,
|
|
|
- struct rcu_state *rsp, int cpu, bool lazy)
|
|
|
+__call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rcu_data *rdp;
|
|
@@ -2971,14 +2806,14 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
|
|
|
head->func = func;
|
|
|
head->next = NULL;
|
|
|
local_irq_save(flags);
|
|
|
- rdp = this_cpu_ptr(rsp->rda);
|
|
|
+ rdp = this_cpu_ptr(&rcu_data);
|
|
|
|
|
|
/* Add the callback to our list. */
|
|
|
if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
|
|
|
int offline;
|
|
|
|
|
|
if (cpu != -1)
|
|
|
- rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
+ rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
if (likely(rdp->mynode)) {
|
|
|
/* Post-boot, so this should be for a no-CBs CPU. */
|
|
|
offline = !__call_rcu_nocb(rdp, head, lazy, flags);
|
|
@@ -3001,72 +2836,60 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
|
|
|
rcu_idle_count_callbacks_posted();
|
|
|
|
|
|
if (__is_kfree_rcu_offset((unsigned long)func))
|
|
|
- trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
|
|
|
+ trace_rcu_kfree_callback(rcu_state.name, head,
|
|
|
+ (unsigned long)func,
|
|
|
rcu_segcblist_n_lazy_cbs(&rdp->cblist),
|
|
|
rcu_segcblist_n_cbs(&rdp->cblist));
|
|
|
else
|
|
|
- trace_rcu_callback(rsp->name, head,
|
|
|
+ trace_rcu_callback(rcu_state.name, head,
|
|
|
rcu_segcblist_n_lazy_cbs(&rdp->cblist),
|
|
|
rcu_segcblist_n_cbs(&rdp->cblist));
|
|
|
|
|
|
/* Go handle any RCU core processing required. */
|
|
|
- __call_rcu_core(rsp, rdp, head, flags);
|
|
|
+ __call_rcu_core(rdp, head, flags);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
|
|
|
- * @head: structure to be used for queueing the RCU updates.
|
|
|
- * @func: actual callback function to be invoked after the grace period
|
|
|
- *
|
|
|
- * The callback function will be invoked some time after a full grace
|
|
|
- * period elapses, in other words after all currently executing RCU
|
|
|
- * read-side critical sections have completed. call_rcu_sched() assumes
|
|
|
- * that the read-side critical sections end on enabling of preemption
|
|
|
- * or on voluntary preemption.
|
|
|
- * RCU read-side critical sections are delimited by:
|
|
|
- *
|
|
|
- * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
|
|
|
- * - anything that disables preemption.
|
|
|
- *
|
|
|
- * These may be nested.
|
|
|
- *
|
|
|
- * See the description of call_rcu() for more detailed information on
|
|
|
- * memory ordering guarantees.
|
|
|
- */
|
|
|
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
|
|
|
-{
|
|
|
- __call_rcu(head, func, &rcu_sched_state, -1, 0);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(call_rcu_sched);
|
|
|
-
|
|
|
-/**
|
|
|
- * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
|
|
|
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
|
|
|
* @head: structure to be used for queueing the RCU updates.
|
|
|
* @func: actual callback function to be invoked after the grace period
|
|
|
*
|
|
|
* The callback function will be invoked some time after a full grace
|
|
|
- * period elapses, in other words after all currently executing RCU
|
|
|
- * read-side critical sections have completed. call_rcu_bh() assumes
|
|
|
- * that the read-side critical sections end on completion of a softirq
|
|
|
- * handler. This means that read-side critical sections in process
|
|
|
- * context must not be interrupted by softirqs. This interface is to be
|
|
|
- * used when most of the read-side critical sections are in softirq context.
|
|
|
- * RCU read-side critical sections are delimited by:
|
|
|
- *
|
|
|
- * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR
|
|
|
- * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
|
|
|
- *
|
|
|
- * These may be nested.
|
|
|
- *
|
|
|
- * See the description of call_rcu() for more detailed information on
|
|
|
- * memory ordering guarantees.
|
|
|
- */
|
|
|
-void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
|
|
|
-{
|
|
|
- __call_rcu(head, func, &rcu_bh_state, -1, 0);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(call_rcu_bh);
|
|
|
+ * period elapses, in other words after all pre-existing RCU read-side
|
|
|
+ * critical sections have completed. However, the callback function
|
|
|
+ * might well execute concurrently with RCU read-side critical sections
|
|
|
+ * that started after call_rcu() was invoked. RCU read-side critical
|
|
|
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
|
|
|
+ * may be nested. In addition, regions of code across which interrupts,
|
|
|
+ * preemption, or softirqs have been disabled also serve as RCU read-side
|
|
|
+ * critical sections. This includes hardware interrupt handlers, softirq
|
|
|
+ * handlers, and NMI handlers.
|
|
|
+ *
|
|
|
+ * Note that all CPUs must agree that the grace period extended beyond
|
|
|
+ * all pre-existing RCU read-side critical section. On systems with more
|
|
|
+ * than one CPU, this means that when "func()" is invoked, each CPU is
|
|
|
+ * guaranteed to have executed a full memory barrier since the end of its
|
|
|
+ * last RCU read-side critical section whose beginning preceded the call
|
|
|
+ * to call_rcu(). It also means that each CPU executing an RCU read-side
|
|
|
+ * critical section that continues beyond the start of "func()" must have
|
|
|
+ * executed a memory barrier after the call_rcu() but before the beginning
|
|
|
+ * of that RCU read-side critical section. Note that these guarantees
|
|
|
+ * include CPUs that are offline, idle, or executing in user mode, as
|
|
|
+ * well as CPUs that are executing in the kernel.
|
|
|
+ *
|
|
|
+ * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
|
|
|
+ * resulting RCU callback function "func()", then both CPU A and CPU B are
|
|
|
+ * guaranteed to execute a full memory barrier during the time interval
|
|
|
+ * between the call to call_rcu() and the invocation of "func()" -- even
|
|
|
+ * if CPU A and CPU B are the same CPU (but again only if the system has
|
|
|
+ * more than one CPU).
|
|
|
+ */
|
|
|
+void call_rcu(struct rcu_head *head, rcu_callback_t func)
|
|
|
+{
|
|
|
+ __call_rcu(head, func, -1, 0);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(call_rcu);
|
|
|
|
|
|
/*
|
|
|
* Queue an RCU callback for lazy invocation after a grace period.
|
|
@@ -3075,110 +2898,12 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
|
|
|
* callbacks in the list of pending callbacks. Until then, this
|
|
|
* function may only be called from __kfree_rcu().
|
|
|
*/
|
|
|
-void kfree_call_rcu(struct rcu_head *head,
|
|
|
- rcu_callback_t func)
|
|
|
+void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
|
|
{
|
|
|
- __call_rcu(head, func, rcu_state_p, -1, 1);
|
|
|
+ __call_rcu(head, func, -1, 1);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kfree_call_rcu);
|
|
|
|
|
|
-/*
|
|
|
- * Because a context switch is a grace period for RCU-sched and RCU-bh,
|
|
|
- * any blocking grace-period wait automatically implies a grace period
|
|
|
- * if there is only one CPU online at any point time during execution
|
|
|
- * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
|
|
|
- * occasionally incorrectly indicate that there are multiple CPUs online
|
|
|
- * when there was in fact only one the whole time, as this just adds
|
|
|
- * some overhead: RCU still operates correctly.
|
|
|
- */
|
|
|
-static int rcu_blocking_is_gp(void)
|
|
|
-{
|
|
|
- int ret;
|
|
|
-
|
|
|
- might_sleep(); /* Check for RCU read-side critical section. */
|
|
|
- preempt_disable();
|
|
|
- ret = num_online_cpus() <= 1;
|
|
|
- preempt_enable();
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * synchronize_sched - wait until an rcu-sched grace period has elapsed.
|
|
|
- *
|
|
|
- * Control will return to the caller some time after a full rcu-sched
|
|
|
- * grace period has elapsed, in other words after all currently executing
|
|
|
- * rcu-sched read-side critical sections have completed. These read-side
|
|
|
- * critical sections are delimited by rcu_read_lock_sched() and
|
|
|
- * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
|
|
|
- * local_irq_disable(), and so on may be used in place of
|
|
|
- * rcu_read_lock_sched().
|
|
|
- *
|
|
|
- * This means that all preempt_disable code sequences, including NMI and
|
|
|
- * non-threaded hardware-interrupt handlers, in progress on entry will
|
|
|
- * have completed before this primitive returns. However, this does not
|
|
|
- * guarantee that softirq handlers will have completed, since in some
|
|
|
- * kernels, these handlers can run in process context, and can block.
|
|
|
- *
|
|
|
- * Note that this guarantee implies further memory-ordering guarantees.
|
|
|
- * On systems with more than one CPU, when synchronize_sched() returns,
|
|
|
- * each CPU is guaranteed to have executed a full memory barrier since the
|
|
|
- * end of its last RCU-sched read-side critical section whose beginning
|
|
|
- * preceded the call to synchronize_sched(). In addition, each CPU having
|
|
|
- * an RCU read-side critical section that extends beyond the return from
|
|
|
- * synchronize_sched() is guaranteed to have executed a full memory barrier
|
|
|
- * after the beginning of synchronize_sched() and before the beginning of
|
|
|
- * that RCU read-side critical section. Note that these guarantees include
|
|
|
- * CPUs that are offline, idle, or executing in user mode, as well as CPUs
|
|
|
- * that are executing in the kernel.
|
|
|
- *
|
|
|
- * Furthermore, if CPU A invoked synchronize_sched(), which returned
|
|
|
- * to its caller on CPU B, then both CPU A and CPU B are guaranteed
|
|
|
- * to have executed a full memory barrier during the execution of
|
|
|
- * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
|
|
|
- * again only if the system has more than one CPU).
|
|
|
- */
|
|
|
-void synchronize_sched(void)
|
|
|
-{
|
|
|
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
|
|
|
- lock_is_held(&rcu_lock_map) ||
|
|
|
- lock_is_held(&rcu_sched_lock_map),
|
|
|
- "Illegal synchronize_sched() in RCU-sched read-side critical section");
|
|
|
- if (rcu_blocking_is_gp())
|
|
|
- return;
|
|
|
- if (rcu_gp_is_expedited())
|
|
|
- synchronize_sched_expedited();
|
|
|
- else
|
|
|
- wait_rcu_gp(call_rcu_sched);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(synchronize_sched);
|
|
|
-
|
|
|
-/**
|
|
|
- * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
|
|
|
- *
|
|
|
- * Control will return to the caller some time after a full rcu_bh grace
|
|
|
- * period has elapsed, in other words after all currently executing rcu_bh
|
|
|
- * read-side critical sections have completed. RCU read-side critical
|
|
|
- * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
|
|
|
- * and may be nested.
|
|
|
- *
|
|
|
- * See the description of synchronize_sched() for more detailed information
|
|
|
- * on memory ordering guarantees.
|
|
|
- */
|
|
|
-void synchronize_rcu_bh(void)
|
|
|
-{
|
|
|
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
|
|
|
- lock_is_held(&rcu_lock_map) ||
|
|
|
- lock_is_held(&rcu_sched_lock_map),
|
|
|
- "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
|
|
|
- if (rcu_blocking_is_gp())
|
|
|
- return;
|
|
|
- if (rcu_gp_is_expedited())
|
|
|
- synchronize_rcu_bh_expedited();
|
|
|
- else
|
|
|
- wait_rcu_gp(call_rcu_bh);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
|
|
|
-
|
|
|
/**
|
|
|
* get_state_synchronize_rcu - Snapshot current RCU state
|
|
|
*
|
|
@@ -3193,7 +2918,7 @@ unsigned long get_state_synchronize_rcu(void)
|
|
|
* before the load from ->gp_seq.
|
|
|
*/
|
|
|
smp_mb(); /* ^^^ */
|
|
|
- return rcu_seq_snap(&rcu_state_p->gp_seq);
|
|
|
+ return rcu_seq_snap(&rcu_state.gp_seq);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
|
|
|
|
|
@@ -3213,70 +2938,30 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
|
|
|
*/
|
|
|
void cond_synchronize_rcu(unsigned long oldstate)
|
|
|
{
|
|
|
- if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
|
|
|
+ if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
|
|
|
synchronize_rcu();
|
|
|
else
|
|
|
smp_mb(); /* Ensure GP ends before subsequent accesses. */
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
|
|
|
|
|
|
-/**
|
|
|
- * get_state_synchronize_sched - Snapshot current RCU-sched state
|
|
|
- *
|
|
|
- * Returns a cookie that is used by a later call to cond_synchronize_sched()
|
|
|
- * to determine whether or not a full grace period has elapsed in the
|
|
|
- * meantime.
|
|
|
- */
|
|
|
-unsigned long get_state_synchronize_sched(void)
|
|
|
-{
|
|
|
- /*
|
|
|
- * Any prior manipulation of RCU-protected data must happen
|
|
|
- * before the load from ->gp_seq.
|
|
|
- */
|
|
|
- smp_mb(); /* ^^^ */
|
|
|
- return rcu_seq_snap(&rcu_sched_state.gp_seq);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
|
|
|
-
|
|
|
-/**
|
|
|
- * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
|
|
|
- *
|
|
|
- * @oldstate: return value from earlier call to get_state_synchronize_sched()
|
|
|
- *
|
|
|
- * If a full RCU-sched grace period has elapsed since the earlier call to
|
|
|
- * get_state_synchronize_sched(), just return. Otherwise, invoke
|
|
|
- * synchronize_sched() to wait for a full grace period.
|
|
|
- *
|
|
|
- * Yes, this function does not take counter wrap into account. But
|
|
|
- * counter wrap is harmless. If the counter wraps, we have waited for
|
|
|
- * more than 2 billion grace periods (and way more on a 64-bit system!),
|
|
|
- * so waiting for one additional grace period should be just fine.
|
|
|
- */
|
|
|
-void cond_synchronize_sched(unsigned long oldstate)
|
|
|
-{
|
|
|
- if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate))
|
|
|
- synchronize_sched();
|
|
|
- else
|
|
|
- smp_mb(); /* Ensure GP ends before subsequent accesses. */
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(cond_synchronize_sched);
|
|
|
-
|
|
|
/*
|
|
|
- * Check to see if there is any immediate RCU-related work to be done
|
|
|
- * by the current CPU, for the specified type of RCU, returning 1 if so.
|
|
|
- * The checks are in order of increasing expense: checks that can be
|
|
|
- * carried out against CPU-local state are performed first. However,
|
|
|
- * we must check for CPU stalls first, else we might not get a chance.
|
|
|
+ * Check to see if there is any immediate RCU-related work to be done by
|
|
|
+ * the current CPU, returning 1 if so and zero otherwise. The checks are
|
|
|
+ * in order of increasing expense: checks that can be carried out against
|
|
|
+ * CPU-local state are performed first. However, we must check for CPU
|
|
|
+ * stalls first, else we might not get a chance.
|
|
|
*/
|
|
|
-static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
+static int rcu_pending(void)
|
|
|
{
|
|
|
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
|
|
struct rcu_node *rnp = rdp->mynode;
|
|
|
|
|
|
/* Check for CPU stalls, if enabled. */
|
|
|
- check_cpu_stall(rsp, rdp);
|
|
|
+ check_cpu_stall(rdp);
|
|
|
|
|
|
/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
|
|
|
- if (rcu_nohz_full_cpu(rsp))
|
|
|
+ if (rcu_nohz_full_cpu())
|
|
|
return 0;
|
|
|
|
|
|
/* Is the RCU core waiting for a quiescent state from this CPU? */
|
|
@@ -3288,7 +2973,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
return 1;
|
|
|
|
|
|
/* Has RCU gone idle with this CPU needing another grace period? */
|
|
|
- if (!rcu_gp_in_progress(rsp) &&
|
|
|
+ if (!rcu_gp_in_progress() &&
|
|
|
rcu_segcblist_is_enabled(&rdp->cblist) &&
|
|
|
!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
|
|
|
return 1;
|
|
@@ -3306,21 +2991,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Check to see if there is any immediate RCU-related work to be done
|
|
|
- * by the current CPU, returning 1 if so. This function is part of the
|
|
|
- * RCU implementation; it is -not- an exported member of the RCU API.
|
|
|
- */
|
|
|
-static int rcu_pending(void)
|
|
|
-{
|
|
|
- struct rcu_state *rsp;
|
|
|
-
|
|
|
- for_each_rcu_flavor(rsp)
|
|
|
- if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
|
|
|
- return 1;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Return true if the specified CPU has any callback. If all_lazy is
|
|
|
* non-NULL, store an indication of whether all callbacks are lazy.
|
|
@@ -3331,17 +3001,12 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
|
|
|
bool al = true;
|
|
|
bool hc = false;
|
|
|
struct rcu_data *rdp;
|
|
|
- struct rcu_state *rsp;
|
|
|
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- rdp = this_cpu_ptr(rsp->rda);
|
|
|
- if (rcu_segcblist_empty(&rdp->cblist))
|
|
|
- continue;
|
|
|
+ rdp = this_cpu_ptr(&rcu_data);
|
|
|
+ if (!rcu_segcblist_empty(&rdp->cblist)) {
|
|
|
hc = true;
|
|
|
- if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
|
|
|
+ if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist))
|
|
|
al = false;
|
|
|
- break;
|
|
|
- }
|
|
|
}
|
|
|
if (all_lazy)
|
|
|
*all_lazy = al;
|
|
@@ -3349,81 +3014,80 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Helper function for _rcu_barrier() tracing. If tracing is disabled,
|
|
|
+ * Helper function for rcu_barrier() tracing. If tracing is disabled,
|
|
|
* the compiler is expected to optimize this away.
|
|
|
*/
|
|
|
-static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
|
|
|
- int cpu, unsigned long done)
|
|
|
+static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
|
|
|
{
|
|
|
- trace_rcu_barrier(rsp->name, s, cpu,
|
|
|
- atomic_read(&rsp->barrier_cpu_count), done);
|
|
|
+ trace_rcu_barrier(rcu_state.name, s, cpu,
|
|
|
+ atomic_read(&rcu_state.barrier_cpu_count), done);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * RCU callback function for _rcu_barrier(). If we are last, wake
|
|
|
- * up the task executing _rcu_barrier().
|
|
|
+ * RCU callback function for rcu_barrier(). If we are last, wake
|
|
|
+ * up the task executing rcu_barrier().
|
|
|
*/
|
|
|
static void rcu_barrier_callback(struct rcu_head *rhp)
|
|
|
{
|
|
|
- struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
|
|
|
- struct rcu_state *rsp = rdp->rsp;
|
|
|
-
|
|
|
- if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
|
|
|
- _rcu_barrier_trace(rsp, TPS("LastCB"), -1,
|
|
|
- rsp->barrier_sequence);
|
|
|
- complete(&rsp->barrier_completion);
|
|
|
+ if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
|
|
|
+ rcu_barrier_trace(TPS("LastCB"), -1,
|
|
|
+ rcu_state.barrier_sequence);
|
|
|
+ complete(&rcu_state.barrier_completion);
|
|
|
} else {
|
|
|
- _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
|
|
|
+ rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* Called with preemption disabled, and from cross-cpu IRQ context.
|
|
|
*/
|
|
|
-static void rcu_barrier_func(void *type)
|
|
|
+static void rcu_barrier_func(void *unused)
|
|
|
{
|
|
|
- struct rcu_state *rsp = type;
|
|
|
- struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
|
|
|
+ struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
|
|
|
|
|
|
- _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
|
|
|
+ rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
|
|
|
rdp->barrier_head.func = rcu_barrier_callback;
|
|
|
debug_rcu_head_queue(&rdp->barrier_head);
|
|
|
if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
|
|
|
- atomic_inc(&rsp->barrier_cpu_count);
|
|
|
+ atomic_inc(&rcu_state.barrier_cpu_count);
|
|
|
} else {
|
|
|
debug_rcu_head_unqueue(&rdp->barrier_head);
|
|
|
- _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
|
|
|
- rsp->barrier_sequence);
|
|
|
+ rcu_barrier_trace(TPS("IRQNQ"), -1,
|
|
|
+ rcu_state.barrier_sequence);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Orchestrate the specified type of RCU barrier, waiting for all
|
|
|
- * RCU callbacks of the specified type to complete.
|
|
|
+/**
|
|
|
+ * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
|
|
|
+ *
|
|
|
+ * Note that this primitive does not necessarily wait for an RCU grace period
|
|
|
+ * to complete. For example, if there are no RCU callbacks queued anywhere
|
|
|
+ * in the system, then rcu_barrier() is within its rights to return
|
|
|
+ * immediately, without waiting for anything, much less an RCU grace period.
|
|
|
*/
|
|
|
-static void _rcu_barrier(struct rcu_state *rsp)
|
|
|
+void rcu_barrier(void)
|
|
|
{
|
|
|
int cpu;
|
|
|
struct rcu_data *rdp;
|
|
|
- unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
|
|
|
+ unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
|
|
|
|
|
|
- _rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
|
|
|
+ rcu_barrier_trace(TPS("Begin"), -1, s);
|
|
|
|
|
|
/* Take mutex to serialize concurrent rcu_barrier() requests. */
|
|
|
- mutex_lock(&rsp->barrier_mutex);
|
|
|
+ mutex_lock(&rcu_state.barrier_mutex);
|
|
|
|
|
|
/* Did someone else do our work for us? */
|
|
|
- if (rcu_seq_done(&rsp->barrier_sequence, s)) {
|
|
|
- _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
|
|
|
- rsp->barrier_sequence);
|
|
|
+ if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
|
|
|
+ rcu_barrier_trace(TPS("EarlyExit"), -1,
|
|
|
+ rcu_state.barrier_sequence);
|
|
|
smp_mb(); /* caller's subsequent code after above check. */
|
|
|
- mutex_unlock(&rsp->barrier_mutex);
|
|
|
+ mutex_unlock(&rcu_state.barrier_mutex);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
/* Mark the start of the barrier operation. */
|
|
|
- rcu_seq_start(&rsp->barrier_sequence);
|
|
|
- _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
|
|
|
+ rcu_seq_start(&rcu_state.barrier_sequence);
|
|
|
+ rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
|
|
|
|
|
|
/*
|
|
|
* Initialize the count to one rather than to zero in order to
|
|
@@ -3431,8 +3095,8 @@ static void _rcu_barrier(struct rcu_state *rsp)
|
|
|
* (or preemption of this task). Exclude CPU-hotplug operations
|
|
|
* to ensure that no offline CPU has callbacks queued.
|
|
|
*/
|
|
|
- init_completion(&rsp->barrier_completion);
|
|
|
- atomic_set(&rsp->barrier_cpu_count, 1);
|
|
|
+ init_completion(&rcu_state.barrier_completion);
|
|
|
+ atomic_set(&rcu_state.barrier_cpu_count, 1);
|
|
|
get_online_cpus();
|
|
|
|
|
|
/*
|
|
@@ -3443,26 +3107,26 @@ static void _rcu_barrier(struct rcu_state *rsp)
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
|
|
|
continue;
|
|
|
- rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
+ rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
if (rcu_is_nocb_cpu(cpu)) {
|
|
|
- if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
|
|
|
- _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
|
|
|
- rsp->barrier_sequence);
|
|
|
+ if (!rcu_nocb_cpu_needs_barrier(cpu)) {
|
|
|
+ rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
|
|
|
+ rcu_state.barrier_sequence);
|
|
|
} else {
|
|
|
- _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
|
|
|
- rsp->barrier_sequence);
|
|
|
+ rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
|
|
|
+ rcu_state.barrier_sequence);
|
|
|
smp_mb__before_atomic();
|
|
|
- atomic_inc(&rsp->barrier_cpu_count);
|
|
|
+ atomic_inc(&rcu_state.barrier_cpu_count);
|
|
|
__call_rcu(&rdp->barrier_head,
|
|
|
- rcu_barrier_callback, rsp, cpu, 0);
|
|
|
+ rcu_barrier_callback, cpu, 0);
|
|
|
}
|
|
|
} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
|
|
|
- _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
|
|
|
- rsp->barrier_sequence);
|
|
|
- smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
|
|
|
+ rcu_barrier_trace(TPS("OnlineQ"), cpu,
|
|
|
+ rcu_state.barrier_sequence);
|
|
|
+ smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
|
|
|
} else {
|
|
|
- _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
|
|
|
- rsp->barrier_sequence);
|
|
|
+ rcu_barrier_trace(TPS("OnlineNQ"), cpu,
|
|
|
+ rcu_state.barrier_sequence);
|
|
|
}
|
|
|
}
|
|
|
put_online_cpus();
|
|
@@ -3471,37 +3135,20 @@ static void _rcu_barrier(struct rcu_state *rsp)
|
|
|
* Now that we have an rcu_barrier_callback() callback on each
|
|
|
* CPU, and thus each counted, remove the initial count.
|
|
|
*/
|
|
|
- if (atomic_dec_and_test(&rsp->barrier_cpu_count))
|
|
|
- complete(&rsp->barrier_completion);
|
|
|
+ if (atomic_dec_and_test(&rcu_state.barrier_cpu_count))
|
|
|
+ complete(&rcu_state.barrier_completion);
|
|
|
|
|
|
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
|
|
|
- wait_for_completion(&rsp->barrier_completion);
|
|
|
+ wait_for_completion(&rcu_state.barrier_completion);
|
|
|
|
|
|
/* Mark the end of the barrier operation. */
|
|
|
- _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
|
|
|
- rcu_seq_end(&rsp->barrier_sequence);
|
|
|
+ rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
|
|
|
+ rcu_seq_end(&rcu_state.barrier_sequence);
|
|
|
|
|
|
/* Other rcu_barrier() invocations can now safely proceed. */
|
|
|
- mutex_unlock(&rsp->barrier_mutex);
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
|
|
|
- */
|
|
|
-void rcu_barrier_bh(void)
|
|
|
-{
|
|
|
- _rcu_barrier(&rcu_bh_state);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_barrier_bh);
|
|
|
-
|
|
|
-/**
|
|
|
- * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
|
|
|
- */
|
|
|
-void rcu_barrier_sched(void)
|
|
|
-{
|
|
|
- _rcu_barrier(&rcu_sched_state);
|
|
|
+ mutex_unlock(&rcu_state.barrier_mutex);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_barrier_sched);
|
|
|
+EXPORT_SYMBOL_GPL(rcu_barrier);
|
|
|
|
|
|
/*
|
|
|
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
|
|
@@ -3535,46 +3182,46 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
|
|
|
* Do boot-time initialization of a CPU's per-CPU RCU data.
|
|
|
*/
|
|
|
static void __init
|
|
|
-rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
+rcu_boot_init_percpu_data(int cpu)
|
|
|
{
|
|
|
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
|
|
|
/* Set up local state, ensuring consistent view of global state. */
|
|
|
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
|
|
|
- rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
|
|
- WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
|
|
|
- WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
|
|
|
- rdp->rcu_ofl_gp_seq = rsp->gp_seq;
|
|
|
+ WARN_ON_ONCE(rdp->dynticks_nesting != 1);
|
|
|
+ WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
|
|
|
+ rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
|
|
|
rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
|
|
|
- rdp->rcu_onl_gp_seq = rsp->gp_seq;
|
|
|
+ rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
|
|
|
rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
|
|
|
rdp->cpu = cpu;
|
|
|
- rdp->rsp = rsp;
|
|
|
rcu_boot_init_nocb_percpu_data(rdp);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Initialize a CPU's per-CPU RCU data. Note that only one online or
|
|
|
+ * Invoked early in the CPU-online process, when pretty much all services
|
|
|
+ * are available. The incoming CPU is not present.
|
|
|
+ *
|
|
|
+ * Initializes a CPU's per-CPU RCU data. Note that only one online or
|
|
|
* offline event can be happening at a given time. Note also that we can
|
|
|
* accept some slop in the rsp->gp_seq access due to the fact that this
|
|
|
* CPU cannot possibly have any RCU callbacks in flight yet.
|
|
|
*/
|
|
|
-static void
|
|
|
-rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
+int rcutree_prepare_cpu(unsigned int cpu)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
+ struct rcu_node *rnp = rcu_get_root();
|
|
|
|
|
|
/* Set up local state, ensuring consistent view of global state. */
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
rdp->qlen_last_fqs_check = 0;
|
|
|
- rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
|
+ rdp->n_force_qs_snap = rcu_state.n_force_qs;
|
|
|
rdp->blimit = blimit;
|
|
|
if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
|
|
|
!init_nocb_callback_list(rdp))
|
|
|
rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
|
|
|
- rdp->dynticks->dynticks_nesting = 1; /* CPU not up, no tearing. */
|
|
|
+ rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
|
|
|
rcu_dynticks_eqs_online();
|
|
|
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
|
|
|
|
@@ -3589,25 +3236,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
rdp->gp_seq = rnp->gp_seq;
|
|
|
rdp->gp_seq_needed = rnp->gp_seq;
|
|
|
rdp->cpu_no_qs.b.norm = true;
|
|
|
- rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
|
|
|
rdp->core_needs_qs = false;
|
|
|
rdp->rcu_iw_pending = false;
|
|
|
rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
|
|
|
- trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
|
|
|
+ trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Invoked early in the CPU-online process, when pretty much all
|
|
|
- * services are available. The incoming CPU is not present.
|
|
|
- */
|
|
|
-int rcutree_prepare_cpu(unsigned int cpu)
|
|
|
-{
|
|
|
- struct rcu_state *rsp;
|
|
|
-
|
|
|
- for_each_rcu_flavor(rsp)
|
|
|
- rcu_init_percpu_data(cpu, rsp);
|
|
|
-
|
|
|
rcu_prepare_kthreads(cpu);
|
|
|
rcu_spawn_all_nocb_kthreads(cpu);
|
|
|
|
|
@@ -3619,7 +3252,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
|
|
|
*/
|
|
|
static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
|
|
|
{
|
|
|
- struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
|
|
|
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
|
|
|
rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
|
|
|
}
|
|
@@ -3633,15 +3266,12 @@ int rcutree_online_cpu(unsigned int cpu)
|
|
|
unsigned long flags;
|
|
|
struct rcu_data *rdp;
|
|
|
struct rcu_node *rnp;
|
|
|
- struct rcu_state *rsp;
|
|
|
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- rnp = rdp->mynode;
|
|
|
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
- rnp->ffmask |= rdp->grpmask;
|
|
|
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
- }
|
|
|
+ rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
+ rnp = rdp->mynode;
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
+ rnp->ffmask |= rdp->grpmask;
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
if (IS_ENABLED(CONFIG_TREE_SRCU))
|
|
|
srcu_online_cpu(cpu);
|
|
|
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
|
|
@@ -3660,15 +3290,12 @@ int rcutree_offline_cpu(unsigned int cpu)
|
|
|
unsigned long flags;
|
|
|
struct rcu_data *rdp;
|
|
|
struct rcu_node *rnp;
|
|
|
- struct rcu_state *rsp;
|
|
|
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- rnp = rdp->mynode;
|
|
|
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
- rnp->ffmask &= ~rdp->grpmask;
|
|
|
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
- }
|
|
|
+ rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
+ rnp = rdp->mynode;
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
+ rnp->ffmask &= ~rdp->grpmask;
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
|
|
|
rcutree_affinity_setting(cpu, cpu);
|
|
|
if (IS_ENABLED(CONFIG_TREE_SRCU))
|
|
@@ -3676,32 +3303,6 @@ int rcutree_offline_cpu(unsigned int cpu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Near the end of the offline process. We do only tracing here.
|
|
|
- */
|
|
|
-int rcutree_dying_cpu(unsigned int cpu)
|
|
|
-{
|
|
|
- struct rcu_state *rsp;
|
|
|
-
|
|
|
- for_each_rcu_flavor(rsp)
|
|
|
- rcu_cleanup_dying_cpu(rsp);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * The outgoing CPU is gone and we are running elsewhere.
|
|
|
- */
|
|
|
-int rcutree_dead_cpu(unsigned int cpu)
|
|
|
-{
|
|
|
- struct rcu_state *rsp;
|
|
|
-
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- rcu_cleanup_dead_cpu(cpu, rsp);
|
|
|
- do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static DEFINE_PER_CPU(int, rcu_cpu_started);
|
|
|
|
|
|
/*
|
|
@@ -3723,137 +3324,113 @@ void rcu_cpu_starting(unsigned int cpu)
|
|
|
unsigned long oldmask;
|
|
|
struct rcu_data *rdp;
|
|
|
struct rcu_node *rnp;
|
|
|
- struct rcu_state *rsp;
|
|
|
|
|
|
if (per_cpu(rcu_cpu_started, cpu))
|
|
|
return;
|
|
|
|
|
|
per_cpu(rcu_cpu_started, cpu) = 1;
|
|
|
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- rnp = rdp->mynode;
|
|
|
- mask = rdp->grpmask;
|
|
|
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
- rnp->qsmaskinitnext |= mask;
|
|
|
- oldmask = rnp->expmaskinitnext;
|
|
|
- rnp->expmaskinitnext |= mask;
|
|
|
- oldmask ^= rnp->expmaskinitnext;
|
|
|
- nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
|
|
|
- /* Allow lockless access for expedited grace periods. */
|
|
|
- smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
|
|
|
- rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
|
|
|
- rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
|
|
|
- rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
|
|
|
- if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
|
|
|
- /* Report QS -after- changing ->qsmaskinitnext! */
|
|
|
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
|
|
|
- } else {
|
|
|
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
- }
|
|
|
+ rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
+ rnp = rdp->mynode;
|
|
|
+ mask = rdp->grpmask;
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
+ rnp->qsmaskinitnext |= mask;
|
|
|
+ oldmask = rnp->expmaskinitnext;
|
|
|
+ rnp->expmaskinitnext |= mask;
|
|
|
+ oldmask ^= rnp->expmaskinitnext;
|
|
|
+ nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
|
|
|
+ /* Allow lockless access for expedited grace periods. */
|
|
|
+ smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */
|
|
|
+ rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
|
|
|
+ rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
|
|
|
+ rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
|
|
|
+ if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
|
|
|
+ /* Report QS -after- changing ->qsmaskinitnext! */
|
|
|
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
|
|
|
+ } else {
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
}
|
|
|
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
/*
|
|
|
- * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
|
|
|
- * function. We now remove it from the rcu_node tree's ->qsmaskinitnext
|
|
|
- * bit masks.
|
|
|
+ * The outgoing function has no further need of RCU, so remove it from
|
|
|
+ * the rcu_node tree's ->qsmaskinitnext bit masks.
|
|
|
+ *
|
|
|
+ * Note that this function is special in that it is invoked directly
|
|
|
+ * from the outgoing CPU rather than from the cpuhp_step mechanism.
|
|
|
+ * This is because this function must be invoked at a precise location.
|
|
|
*/
|
|
|
-static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
|
|
|
+void rcu_report_dead(unsigned int cpu)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
unsigned long mask;
|
|
|
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
|
|
|
|
|
+ /* QS for any half-done expedited grace period. */
|
|
|
+ preempt_disable();
|
|
|
+ rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
|
|
|
+ preempt_enable();
|
|
|
+ rcu_preempt_deferred_qs(current);
|
|
|
+
|
|
|
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
|
|
mask = rdp->grpmask;
|
|
|
- spin_lock(&rsp->ofl_lock);
|
|
|
+ raw_spin_lock(&rcu_state.ofl_lock);
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
|
|
- rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
|
|
|
- rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
|
|
|
+ rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
|
|
|
+ rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
|
|
|
if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
|
|
|
/* Report quiescent state -before- changing ->qsmaskinitnext! */
|
|
|
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
|
|
|
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
|
|
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
}
|
|
|
rnp->qsmaskinitnext &= ~mask;
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
- spin_unlock(&rsp->ofl_lock);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * The outgoing function has no further need of RCU, so remove it from
|
|
|
- * the list of CPUs that RCU must track.
|
|
|
- *
|
|
|
- * Note that this function is special in that it is invoked directly
|
|
|
- * from the outgoing CPU rather than from the cpuhp_step mechanism.
|
|
|
- * This is because this function must be invoked at a precise location.
|
|
|
- */
|
|
|
-void rcu_report_dead(unsigned int cpu)
|
|
|
-{
|
|
|
- struct rcu_state *rsp;
|
|
|
-
|
|
|
- /* QS for any half-done expedited RCU-sched GP. */
|
|
|
- preempt_disable();
|
|
|
- rcu_report_exp_rdp(&rcu_sched_state,
|
|
|
- this_cpu_ptr(rcu_sched_state.rda), true);
|
|
|
- preempt_enable();
|
|
|
- for_each_rcu_flavor(rsp)
|
|
|
- rcu_cleanup_dying_idle_cpu(cpu, rsp);
|
|
|
+ raw_spin_unlock(&rcu_state.ofl_lock);
|
|
|
|
|
|
per_cpu(rcu_cpu_started, cpu) = 0;
|
|
|
}
|
|
|
|
|
|
-/* Migrate the dead CPU's callbacks to the current CPU. */
|
|
|
-static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
|
|
|
+/*
|
|
|
+ * The outgoing CPU has just passed through the dying-idle state, and we
|
|
|
+ * are being invoked from the CPU that was IPIed to continue the offline
|
|
|
+ * operation. Migrate the outgoing CPU's callbacks to the current CPU.
|
|
|
+ */
|
|
|
+void rcutree_migrate_callbacks(int cpu)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rcu_data *my_rdp;
|
|
|
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
|
|
|
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
+ struct rcu_node *rnp_root = rcu_get_root();
|
|
|
bool needwake;
|
|
|
|
|
|
if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
|
|
|
return; /* No callbacks to migrate. */
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- my_rdp = this_cpu_ptr(rsp->rda);
|
|
|
+ my_rdp = this_cpu_ptr(&rcu_data);
|
|
|
if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
|
|
|
local_irq_restore(flags);
|
|
|
return;
|
|
|
}
|
|
|
raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
|
|
|
/* Leverage recent GPs and set GP for new callbacks. */
|
|
|
- needwake = rcu_advance_cbs(rsp, rnp_root, rdp) ||
|
|
|
- rcu_advance_cbs(rsp, rnp_root, my_rdp);
|
|
|
+ needwake = rcu_advance_cbs(rnp_root, rdp) ||
|
|
|
+ rcu_advance_cbs(rnp_root, my_rdp);
|
|
|
rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
|
|
|
WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
|
|
|
!rcu_segcblist_n_cbs(&my_rdp->cblist));
|
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
|
|
|
if (needwake)
|
|
|
- rcu_gp_kthread_wake(rsp);
|
|
|
+ rcu_gp_kthread_wake();
|
|
|
WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
|
|
|
!rcu_segcblist_empty(&rdp->cblist),
|
|
|
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
|
|
|
cpu, rcu_segcblist_n_cbs(&rdp->cblist),
|
|
|
rcu_segcblist_first_cb(&rdp->cblist));
|
|
|
}
|
|
|
-
|
|
|
-/*
|
|
|
- * The outgoing CPU has just passed through the dying-idle state,
|
|
|
- * and we are being invoked from the CPU that was IPIed to continue the
|
|
|
- * offline operation. We need to migrate the outgoing CPU's callbacks.
|
|
|
- */
|
|
|
-void rcutree_migrate_callbacks(int cpu)
|
|
|
-{
|
|
|
- struct rcu_state *rsp;
|
|
|
-
|
|
|
- for_each_rcu_flavor(rsp)
|
|
|
- rcu_migrate_callbacks(cpu, rsp);
|
|
|
-}
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -3881,14 +3458,13 @@ static int rcu_pm_notify(struct notifier_block *self,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Spawn the kthreads that handle each RCU flavor's grace periods.
|
|
|
+ * Spawn the kthreads that handle RCU's grace periods.
|
|
|
*/
|
|
|
static int __init rcu_spawn_gp_kthread(void)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int kthread_prio_in = kthread_prio;
|
|
|
struct rcu_node *rnp;
|
|
|
- struct rcu_state *rsp;
|
|
|
struct sched_param sp;
|
|
|
struct task_struct *t;
|
|
|
|
|
@@ -3908,19 +3484,17 @@ static int __init rcu_spawn_gp_kthread(void)
|
|
|
kthread_prio, kthread_prio_in);
|
|
|
|
|
|
rcu_scheduler_fully_active = 1;
|
|
|
- for_each_rcu_flavor(rsp) {
|
|
|
- t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
|
|
|
- BUG_ON(IS_ERR(t));
|
|
|
- rnp = rcu_get_root(rsp);
|
|
|
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
- rsp->gp_kthread = t;
|
|
|
- if (kthread_prio) {
|
|
|
- sp.sched_priority = kthread_prio;
|
|
|
- sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
|
|
- }
|
|
|
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
- wake_up_process(t);
|
|
|
+ t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
|
|
|
+ BUG_ON(IS_ERR(t));
|
|
|
+ rnp = rcu_get_root();
|
|
|
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
|
+ rcu_state.gp_kthread = t;
|
|
|
+ if (kthread_prio) {
|
|
|
+ sp.sched_priority = kthread_prio;
|
|
|
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
|
|
}
|
|
|
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
|
+ wake_up_process(t);
|
|
|
rcu_spawn_nocb_kthreads();
|
|
|
rcu_spawn_boost_kthreads();
|
|
|
return 0;
|
|
@@ -3947,9 +3521,9 @@ void rcu_scheduler_starting(void)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Helper function for rcu_init() that initializes one rcu_state structure.
|
|
|
+ * Helper function for rcu_init() that initializes the rcu_state structure.
|
|
|
*/
|
|
|
-static void __init rcu_init_one(struct rcu_state *rsp)
|
|
|
+static void __init rcu_init_one(void)
|
|
|
{
|
|
|
static const char * const buf[] = RCU_NODE_NAME_INIT;
|
|
|
static const char * const fqs[] = RCU_FQS_NAME_INIT;
|
|
@@ -3971,14 +3545,15 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
|
|
/* Initialize the level-tracking arrays. */
|
|
|
|
|
|
for (i = 1; i < rcu_num_lvls; i++)
|
|
|
- rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
|
|
|
+ rcu_state.level[i] =
|
|
|
+ rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
|
|
|
rcu_init_levelspread(levelspread, num_rcu_lvl);
|
|
|
|
|
|
/* Initialize the elements themselves, starting from the leaves. */
|
|
|
|
|
|
for (i = rcu_num_lvls - 1; i >= 0; i--) {
|
|
|
cpustride *= levelspread[i];
|
|
|
- rnp = rsp->level[i];
|
|
|
+ rnp = rcu_state.level[i];
|
|
|
for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
|
|
|
raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
|
|
|
lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
|
|
@@ -3986,9 +3561,9 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
|
|
raw_spin_lock_init(&rnp->fqslock);
|
|
|
lockdep_set_class_and_name(&rnp->fqslock,
|
|
|
&rcu_fqs_class[i], fqs[i]);
|
|
|
- rnp->gp_seq = rsp->gp_seq;
|
|
|
- rnp->gp_seq_needed = rsp->gp_seq;
|
|
|
- rnp->completedqs = rsp->gp_seq;
|
|
|
+ rnp->gp_seq = rcu_state.gp_seq;
|
|
|
+ rnp->gp_seq_needed = rcu_state.gp_seq;
|
|
|
+ rnp->completedqs = rcu_state.gp_seq;
|
|
|
rnp->qsmask = 0;
|
|
|
rnp->qsmaskinit = 0;
|
|
|
rnp->grplo = j * cpustride;
|
|
@@ -4001,8 +3576,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
|
|
rnp->parent = NULL;
|
|
|
} else {
|
|
|
rnp->grpnum = j % levelspread[i - 1];
|
|
|
- rnp->grpmask = 1UL << rnp->grpnum;
|
|
|
- rnp->parent = rsp->level[i - 1] +
|
|
|
+ rnp->grpmask = BIT(rnp->grpnum);
|
|
|
+ rnp->parent = rcu_state.level[i - 1] +
|
|
|
j / levelspread[i - 1];
|
|
|
}
|
|
|
rnp->level = i;
|
|
@@ -4016,16 +3591,15 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- init_swait_queue_head(&rsp->gp_wq);
|
|
|
- init_swait_queue_head(&rsp->expedited_wq);
|
|
|
- rnp = rcu_first_leaf_node(rsp);
|
|
|
+ init_swait_queue_head(&rcu_state.gp_wq);
|
|
|
+ init_swait_queue_head(&rcu_state.expedited_wq);
|
|
|
+ rnp = rcu_first_leaf_node();
|
|
|
for_each_possible_cpu(i) {
|
|
|
while (i > rnp->grphi)
|
|
|
rnp++;
|
|
|
- per_cpu_ptr(rsp->rda, i)->mynode = rnp;
|
|
|
- rcu_boot_init_percpu_data(i, rsp);
|
|
|
+ per_cpu_ptr(&rcu_data, i)->mynode = rnp;
|
|
|
+ rcu_boot_init_percpu_data(i);
|
|
|
}
|
|
|
- list_add(&rsp->flavors, &rcu_struct_flavors);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -4051,6 +3625,8 @@ static void __init rcu_init_geometry(void)
|
|
|
jiffies_till_first_fqs = d;
|
|
|
if (jiffies_till_next_fqs == ULONG_MAX)
|
|
|
jiffies_till_next_fqs = d;
|
|
|
+ if (jiffies_till_sched_qs == ULONG_MAX)
|
|
|
+ adjust_jiffies_till_sched_qs();
|
|
|
|
|
|
/* If the compile-time values are accurate, just leave. */
|
|
|
if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
|
|
@@ -4109,16 +3685,16 @@ static void __init rcu_init_geometry(void)
|
|
|
|
|
|
/*
|
|
|
* Dump out the structure of the rcu_node combining tree associated
|
|
|
- * with the rcu_state structure referenced by rsp.
|
|
|
+ * with the rcu_state structure.
|
|
|
*/
|
|
|
-static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
|
|
|
+static void __init rcu_dump_rcu_node_tree(void)
|
|
|
{
|
|
|
int level = 0;
|
|
|
struct rcu_node *rnp;
|
|
|
|
|
|
pr_info("rcu_node tree layout dump\n");
|
|
|
pr_info(" ");
|
|
|
- rcu_for_each_node_breadth_first(rsp, rnp) {
|
|
|
+ rcu_for_each_node_breadth_first(rnp) {
|
|
|
if (rnp->level != level) {
|
|
|
pr_cont("\n");
|
|
|
pr_info(" ");
|
|
@@ -4140,11 +3716,9 @@ void __init rcu_init(void)
|
|
|
|
|
|
rcu_bootup_announce();
|
|
|
rcu_init_geometry();
|
|
|
- rcu_init_one(&rcu_bh_state);
|
|
|
- rcu_init_one(&rcu_sched_state);
|
|
|
+ rcu_init_one();
|
|
|
if (dump_tree)
|
|
|
- rcu_dump_rcu_node_tree(&rcu_sched_state);
|
|
|
- __rcu_init_preempt();
|
|
|
+ rcu_dump_rcu_node_tree();
|
|
|
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
|
|
|
|
|
/*
|
|
@@ -4164,6 +3738,7 @@ void __init rcu_init(void)
|
|
|
WARN_ON(!rcu_gp_wq);
|
|
|
rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
|
|
|
WARN_ON(!rcu_par_gp_wq);
|
|
|
+ srcu_init();
|
|
|
}
|
|
|
|
|
|
#include "tree_exp.h"
|