|
@@ -73,7 +73,7 @@ static int cpuidle_idle_call(void)
|
|
{
|
|
{
|
|
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
|
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
|
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
|
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
|
- int next_state, entered_state, ret;
|
|
|
|
|
|
+ int next_state, entered_state;
|
|
bool broadcast;
|
|
bool broadcast;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -102,90 +102,75 @@ static int cpuidle_idle_call(void)
|
|
* Check if the cpuidle framework is ready, otherwise fallback
|
|
* Check if the cpuidle framework is ready, otherwise fallback
|
|
* to the default arch specific idle method
|
|
* to the default arch specific idle method
|
|
*/
|
|
*/
|
|
- ret = cpuidle_enabled(drv, dev);
|
|
|
|
-
|
|
|
|
- if (!ret) {
|
|
|
|
- /*
|
|
|
|
- * Ask the governor to choose an idle state it thinks
|
|
|
|
- * it is convenient to go to. There is *always* a
|
|
|
|
- * convenient idle state
|
|
|
|
- */
|
|
|
|
- next_state = cpuidle_select(drv, dev);
|
|
|
|
-
|
|
|
|
|
|
+ if (cpuidle_enabled(drv, dev)) {
|
|
|
|
+use_default:
|
|
/*
|
|
/*
|
|
- * The idle task must be scheduled, it is pointless to
|
|
|
|
- * go to idle, just update no idle residency and get
|
|
|
|
- * out of this function
|
|
|
|
|
|
+ * We can't use the cpuidle framework, let's use the default
|
|
|
|
+ * idle routine.
|
|
*/
|
|
*/
|
|
- if (current_clr_polling_and_test()) {
|
|
|
|
- dev->last_residency = 0;
|
|
|
|
- entered_state = next_state;
|
|
|
|
|
|
+ if (current_clr_polling_and_test())
|
|
local_irq_enable();
|
|
local_irq_enable();
|
|
- } else {
|
|
|
|
- broadcast = !!(drv->states[next_state].flags &
|
|
|
|
- CPUIDLE_FLAG_TIMER_STOP);
|
|
|
|
-
|
|
|
|
- if (broadcast) {
|
|
|
|
- /*
|
|
|
|
- * Tell the time framework to switch
|
|
|
|
- * to a broadcast timer because our
|
|
|
|
- * local timer will be shutdown. If a
|
|
|
|
- * local timer is used from another
|
|
|
|
- * cpu as a broadcast timer, this call
|
|
|
|
- * may fail if it is not available
|
|
|
|
- */
|
|
|
|
- ret = clockevents_notify(
|
|
|
|
- CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
|
|
|
|
- &dev->cpu);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (!ret) {
|
|
|
|
- trace_cpu_idle_rcuidle(next_state, dev->cpu);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Enter the idle state previously
|
|
|
|
- * returned by the governor
|
|
|
|
- * decision. This function will block
|
|
|
|
- * until an interrupt occurs and will
|
|
|
|
- * take care of re-enabling the local
|
|
|
|
- * interrupts
|
|
|
|
- */
|
|
|
|
- entered_state = cpuidle_enter(drv, dev,
|
|
|
|
- next_state);
|
|
|
|
-
|
|
|
|
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT,
|
|
|
|
- dev->cpu);
|
|
|
|
-
|
|
|
|
- if (broadcast)
|
|
|
|
- clockevents_notify(
|
|
|
|
- CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
|
|
|
|
- &dev->cpu);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Give the governor an opportunity to reflect on the
|
|
|
|
- * outcome
|
|
|
|
- */
|
|
|
|
- cpuidle_reflect(dev, entered_state);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ else
|
|
|
|
+ arch_cpu_idle();
|
|
|
|
+
|
|
|
|
+ goto exit_idle;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * We can't use the cpuidle framework, let's use the default
|
|
|
|
- * idle routine
|
|
|
|
|
|
+ * Ask the governor to choose an idle state it thinks
|
|
|
|
+ * it is convenient to go to. There is *always* a
|
|
|
|
+ * convenient idle state
|
|
*/
|
|
*/
|
|
- if (ret) {
|
|
|
|
- if (!current_clr_polling_and_test())
|
|
|
|
- arch_cpu_idle();
|
|
|
|
- else
|
|
|
|
- local_irq_enable();
|
|
|
|
|
|
+ next_state = cpuidle_select(drv, dev);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * The idle task must be scheduled, it is pointless to
|
|
|
|
+ * go to idle, just update no idle residency and get
|
|
|
|
+ * out of this function
|
|
|
|
+ */
|
|
|
|
+ if (current_clr_polling_and_test()) {
|
|
|
|
+ dev->last_residency = 0;
|
|
|
|
+ entered_state = next_state;
|
|
|
|
+ local_irq_enable();
|
|
|
|
+ goto exit_idle;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Tell the time framework to switch to a broadcast timer
|
|
|
|
+ * because our local timer will be shutdown. If a local timer
|
|
|
|
+ * is used from another cpu as a broadcast timer, this call may
|
|
|
|
+ * fail if it is not available
|
|
|
|
+ */
|
|
|
|
+ if (broadcast &&
|
|
|
|
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
|
|
|
|
+ goto use_default;
|
|
|
|
+
|
|
|
|
+ trace_cpu_idle_rcuidle(next_state, dev->cpu);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Enter the idle state previously returned by the governor decision.
|
|
|
|
+ * This function will block until an interrupt occurs and will take
|
|
|
|
+ * care of re-enabling the local interrupts
|
|
|
|
+ */
|
|
|
|
+ entered_state = cpuidle_enter(drv, dev, next_state);
|
|
|
|
+
|
|
|
|
+ trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
|
|
|
+
|
|
|
|
+ if (broadcast)
|
|
|
|
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Give the governor an opportunity to reflect on the outcome
|
|
|
|
+ */
|
|
|
|
+ cpuidle_reflect(dev, entered_state);
|
|
|
|
+
|
|
|
|
+exit_idle:
|
|
__current_set_polling();
|
|
__current_set_polling();
|
|
|
|
|
|
/*
|
|
/*
|
|
- * It is up to the idle functions to enable back the local
|
|
|
|
- * interrupt
|
|
|
|
|
|
+ * It is up to the idle functions to reenable local interrupts
|
|
*/
|
|
*/
|
|
if (WARN_ON_ONCE(irqs_disabled()))
|
|
if (WARN_ON_ONCE(irqs_disabled()))
|
|
local_irq_enable();
|
|
local_irq_enable();
|