|
@@ -76,21 +76,49 @@ static int cpuidle_idle_call(void)
|
|
|
int next_state, entered_state, ret;
|
|
|
bool broadcast;
|
|
|
|
|
|
+ /*
|
|
|
+ * Check if the idle task must be rescheduled. If it is the
|
|
|
+ * case, exit the function after re-enabling the local irq and
|
|
|
+ * set again the polling flag
|
|
|
+ */
|
|
|
if (current_clr_polling_and_test()) {
|
|
|
local_irq_enable();
|
|
|
__current_set_polling();
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * During the idle period, stop measuring the disabled irqs
|
|
|
+ * critical sections latencies
|
|
|
+ */
|
|
|
stop_critical_timings();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Tell the RCU framework we are entering an idle section,
|
|
|
+ * so no more rcu read side critical sections and one more
|
|
|
+ * step to the grace period
|
|
|
+ */
|
|
|
rcu_idle_enter();
|
|
|
|
|
|
+ /*
|
|
|
+ * Check if the cpuidle framework is ready, otherwise fallback
|
|
|
+ * to the default arch specific idle method
|
|
|
+ */
|
|
|
ret = cpuidle_enabled(drv, dev);
|
|
|
|
|
|
if (!ret) {
|
|
|
- /* ask the governor for the next state */
|
|
|
+ /*
|
|
|
+ * Ask the governor to choose an idle state it thinks
|
|
|
+ * it is convenient to go to. There is *always* a
|
|
|
+ * convenient idle state
|
|
|
+ */
|
|
|
next_state = cpuidle_select(drv, dev);
|
|
|
|
|
|
+ /*
|
|
|
+ * The idle task must be scheduled, it is pointless to
|
|
|
+ * go to idle, just update no idle residency and get
|
|
|
+ * out of this function
|
|
|
+ */
|
|
|
if (current_clr_polling_and_test()) {
|
|
|
dev->last_residency = 0;
|
|
|
entered_state = next_state;
|
|
@@ -100,6 +128,14 @@ static int cpuidle_idle_call(void)
|
|
|
CPUIDLE_FLAG_TIMER_STOP);
|
|
|
|
|
|
if (broadcast)
|
|
|
+ /*
|
|
|
+ * Tell the time framework to switch
|
|
|
+ * to a broadcast timer because our
|
|
|
+ * local timer will be shutdown. If a
|
|
|
+ * local timer is used from another
|
|
|
+ * cpu as a broadcast timer, this call
|
|
|
+ * may fail if it is not available
|
|
|
+ */
|
|
|
ret = clockevents_notify(
|
|
|
CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
|
|
|
&dev->cpu);
|
|
@@ -107,6 +143,14 @@ static int cpuidle_idle_call(void)
|
|
|
if (!ret) {
|
|
|
trace_cpu_idle_rcuidle(next_state, dev->cpu);
|
|
|
|
|
|
+ /*
|
|
|
+ * Enter the idle state previously
|
|
|
+ * returned by the governor
|
|
|
+ * decision. This function will block
|
|
|
+ * until an interrupt occurs and will
|
|
|
+ * take care of re-enabling the local
|
|
|
+ * interrupts
|
|
|
+ */
|
|
|
entered_state = cpuidle_enter(drv, dev,
|
|
|
next_state);
|
|
|
|
|
@@ -118,17 +162,28 @@ static int cpuidle_idle_call(void)
|
|
|
CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
|
|
|
&dev->cpu);
|
|
|
|
|
|
- /* give the governor an opportunity to reflect on the outcome */
|
|
|
+ /*
|
|
|
+ * Give the governor an opportunity to reflect on the
|
|
|
+ * outcome
|
|
|
+ */
|
|
|
cpuidle_reflect(dev, entered_state);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * We can't use the cpuidle framework, let's use the default
|
|
|
+ * idle routine
|
|
|
+ */
|
|
|
if (ret)
|
|
|
arch_cpu_idle();
|
|
|
|
|
|
__current_set_polling();
|
|
|
|
|
|
+ /*
|
|
|
+ * It is up to the idle functions to enable back the local
|
|
|
+ * interrupt
|
|
|
+ */
|
|
|
if (WARN_ON_ONCE(irqs_disabled()))
|
|
|
local_irq_enable();
|
|
|
|