|
@@ -510,11 +510,11 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
* we really have entered idle, and must do the appropriate accounting.
|
|
|
* The caller must have disabled interrupts.
|
|
|
*/
|
|
|
-static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
|
|
- bool user)
|
|
|
+static void rcu_eqs_enter_common(long long oldval, bool user)
|
|
|
{
|
|
|
struct rcu_state *rsp;
|
|
|
struct rcu_data *rdp;
|
|
|
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
|
|
|
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
|
|
|
if (!user && !is_idle_task(current)) {
|
|
@@ -565,7 +565,7 @@ static void rcu_eqs_enter(bool user)
|
|
|
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
|
|
|
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
|
|
|
rdtp->dynticks_nesting = 0;
|
|
|
- rcu_eqs_enter_common(rdtp, oldval, user);
|
|
|
+ rcu_eqs_enter_common(oldval, user);
|
|
|
} else {
|
|
|
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
|
|
|
}
|
|
@@ -589,7 +589,7 @@ void rcu_idle_enter(void)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
rcu_eqs_enter(false);
|
|
|
- rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
|
|
|
+ rcu_sysidle_enter(0);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
|
@@ -639,8 +639,8 @@ void rcu_irq_exit(void)
|
|
|
if (rdtp->dynticks_nesting)
|
|
|
trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
|
|
|
else
|
|
|
- rcu_eqs_enter_common(rdtp, oldval, true);
|
|
|
- rcu_sysidle_enter(rdtp, 1);
|
|
|
+ rcu_eqs_enter_common(oldval, true);
|
|
|
+ rcu_sysidle_enter(1);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
@@ -651,9 +651,10 @@ void rcu_irq_exit(void)
|
|
|
* we really have exited idle, and must do the appropriate accounting.
|
|
|
* The caller must have disabled interrupts.
|
|
|
*/
|
|
|
-static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
|
|
|
- int user)
|
|
|
+static void rcu_eqs_exit_common(long long oldval, int user)
|
|
|
{
|
|
|
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
+
|
|
|
rcu_dynticks_task_exit();
|
|
|
smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
|
|
|
atomic_inc(&rdtp->dynticks);
|
|
@@ -691,7 +692,7 @@ static void rcu_eqs_exit(bool user)
|
|
|
rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
|
|
|
} else {
|
|
|
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
|
|
- rcu_eqs_exit_common(rdtp, oldval, user);
|
|
|
+ rcu_eqs_exit_common(oldval, user);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -712,7 +713,7 @@ void rcu_idle_exit(void)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
rcu_eqs_exit(false);
|
|
|
- rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
|
|
|
+ rcu_sysidle_exit(0);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
|
@@ -763,8 +764,8 @@ void rcu_irq_enter(void)
|
|
|
if (oldval)
|
|
|
trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
|
|
|
else
|
|
|
- rcu_eqs_exit_common(rdtp, oldval, true);
|
|
|
- rcu_sysidle_exit(rdtp, 1);
|
|
|
+ rcu_eqs_exit_common(oldval, true);
|
|
|
+ rcu_sysidle_exit(1);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|