|
@@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head,
|
|
void (*func)(struct rcu_head *rcu),
|
|
void (*func)(struct rcu_head *rcu),
|
|
struct rcu_ctrlblk *rcp);
|
|
struct rcu_ctrlblk *rcp);
|
|
|
|
|
|
-static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
|
|
|
-
|
|
|
|
#include "tiny_plugin.h"
|
|
#include "tiny_plugin.h"
|
|
|
|
|
|
-/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
|
|
|
|
-static void rcu_idle_enter_common(long long newval)
|
|
|
|
-{
|
|
|
|
- if (newval) {
|
|
|
|
- RCU_TRACE(trace_rcu_dyntick(TPS("--="),
|
|
|
|
- rcu_dynticks_nesting, newval));
|
|
|
|
- rcu_dynticks_nesting = newval;
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
- RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
|
|
|
|
- rcu_dynticks_nesting, newval));
|
|
|
|
- if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
|
|
|
|
- struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
|
|
|
|
-
|
|
|
|
- RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
|
|
|
|
- rcu_dynticks_nesting, newval));
|
|
|
|
- ftrace_dump(DUMP_ALL);
|
|
|
|
- WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
|
|
|
- current->pid, current->comm,
|
|
|
|
- idle->pid, idle->comm); /* must be idle task! */
|
|
|
|
- }
|
|
|
|
- rcu_sched_qs(); /* implies rcu_bh_inc() */
|
|
|
|
- barrier();
|
|
|
|
- rcu_dynticks_nesting = newval;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Enter idle, which is an extended quiescent state if we have fully
|
|
* Enter idle, which is an extended quiescent state if we have fully
|
|
- * entered that mode (i.e., if the new value of dynticks_nesting is zero).
|
|
|
|
|
|
+ * entered that mode.
|
|
*/
|
|
*/
|
|
void rcu_idle_enter(void)
|
|
void rcu_idle_enter(void)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
- long long newval;
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
|
|
|
|
- if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
|
|
|
|
- DYNTICK_TASK_NEST_VALUE)
|
|
|
|
- newval = 0;
|
|
|
|
- else
|
|
|
|
- newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
|
|
|
|
- rcu_idle_enter_common(newval);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
|
EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
|
|
|
|
|
@@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
|
*/
|
|
*/
|
|
void rcu_irq_exit(void)
|
|
void rcu_irq_exit(void)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
- long long newval;
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- newval = rcu_dynticks_nesting - 1;
|
|
|
|
- WARN_ON_ONCE(newval < 0);
|
|
|
|
- rcu_idle_enter_common(newval);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_irq_exit);
|
|
EXPORT_SYMBOL_GPL(rcu_irq_exit);
|
|
|
|
|
|
-/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
|
|
|
|
-static void rcu_idle_exit_common(long long oldval)
|
|
|
|
-{
|
|
|
|
- if (oldval) {
|
|
|
|
- RCU_TRACE(trace_rcu_dyntick(TPS("++="),
|
|
|
|
- oldval, rcu_dynticks_nesting));
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
- RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
|
|
|
|
- if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
|
|
|
|
- struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
|
|
|
|
-
|
|
|
|
- RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
|
|
|
|
- oldval, rcu_dynticks_nesting));
|
|
|
|
- ftrace_dump(DUMP_ALL);
|
|
|
|
- WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
|
|
|
- current->pid, current->comm,
|
|
|
|
- idle->pid, idle->comm); /* must be idle task! */
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Exit idle, so that we are no longer in an extended quiescent state.
|
|
* Exit idle, so that we are no longer in an extended quiescent state.
|
|
*/
|
|
*/
|
|
void rcu_idle_exit(void)
|
|
void rcu_idle_exit(void)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
- long long oldval;
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- oldval = rcu_dynticks_nesting;
|
|
|
|
- WARN_ON_ONCE(rcu_dynticks_nesting < 0);
|
|
|
|
- if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
|
|
|
|
- rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
|
|
|
|
- else
|
|
|
|
- rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
|
|
|
- rcu_idle_exit_common(oldval);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
|
EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
|
|
|
|
|
@@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
|
*/
|
|
*/
|
|
void rcu_irq_enter(void)
|
|
void rcu_irq_enter(void)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
- long long oldval;
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- oldval = rcu_dynticks_nesting;
|
|
|
|
- rcu_dynticks_nesting++;
|
|
|
|
- WARN_ON_ONCE(rcu_dynticks_nesting == 0);
|
|
|
|
- rcu_idle_exit_common(oldval);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_irq_enter);
|
|
EXPORT_SYMBOL_GPL(rcu_irq_enter);
|
|
|
|
|
|
@@ -179,7 +89,7 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
|
|
*/
|
|
*/
|
|
bool notrace __rcu_is_watching(void)
|
|
bool notrace __rcu_is_watching(void)
|
|
{
|
|
{
|
|
- return rcu_dynticks_nesting;
|
|
|
|
|
|
+ return true;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(__rcu_is_watching);
|
|
EXPORT_SYMBOL(__rcu_is_watching);
|
|
|
|
|
|
@@ -347,6 +257,11 @@ static void __call_rcu(struct rcu_head *head,
|
|
rcp->curtail = &head->next;
|
|
rcp->curtail = &head->next;
|
|
RCU_TRACE(rcp->qlen++);
|
|
RCU_TRACE(rcp->qlen++);
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
|
|
+
|
|
|
|
+ if (unlikely(is_idle_task(current))) {
|
|
|
|
+ /* force scheduling for rcu_sched_qs() */
|
|
|
|
+ resched_cpu(0);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|