cpuidle44xx.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * OMAP4+ CPU idle Routines
  3. *
  4. * Copyright (C) 2011-2013 Texas Instruments, Inc.
  5. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  6. * Rajendra Nayak <rnayak@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/cpuidle.h>
  14. #include <linux/cpu_pm.h>
  15. #include <linux/export.h>
  16. #include <linux/clockchips.h>
  17. #include <asm/cpuidle.h>
  18. #include "common.h"
  19. #include "pm.h"
  20. #include "prm.h"
  21. #include "clockdomain.h"
  22. #define MAX_CPUS 2
  23. /* Machine specific information */
  24. struct idle_statedata {
  25. u32 cpu_state;
  26. u32 mpu_logic_state;
  27. u32 mpu_state;
  28. };
  29. static struct idle_statedata omap4_idle_data[] = {
  30. {
  31. .cpu_state = PWRDM_POWER_ON,
  32. .mpu_state = PWRDM_POWER_ON,
  33. .mpu_logic_state = PWRDM_POWER_RET,
  34. },
  35. {
  36. .cpu_state = PWRDM_POWER_OFF,
  37. .mpu_state = PWRDM_POWER_RET,
  38. .mpu_logic_state = PWRDM_POWER_RET,
  39. },
  40. {
  41. .cpu_state = PWRDM_POWER_OFF,
  42. .mpu_state = PWRDM_POWER_RET,
  43. .mpu_logic_state = PWRDM_POWER_OFF,
  44. },
  45. };
  46. static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
  47. static struct clockdomain *cpu_clkdm[MAX_CPUS];
  48. static atomic_t abort_barrier;
  49. static bool cpu_done[MAX_CPUS];
  50. static struct idle_statedata *state_ptr = &omap4_idle_data[0];
  51. /* Private functions */
  52. /**
  53. * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
  54. * @dev: cpuidle device
  55. * @drv: cpuidle driver
  56. * @index: the index of state to be entered
  57. *
  58. * Called from the CPUidle framework to program the device to the
  59. * specified low power state selected by the governor.
  60. * Returns the amount of time spent in the low power state.
  61. */
  62. static int omap_enter_idle_simple(struct cpuidle_device *dev,
  63. struct cpuidle_driver *drv,
  64. int index)
  65. {
  66. omap_do_wfi();
  67. return index;
  68. }
  69. static int omap_enter_idle_coupled(struct cpuidle_device *dev,
  70. struct cpuidle_driver *drv,
  71. int index)
  72. {
  73. struct idle_statedata *cx = state_ptr + index;
  74. u32 mpuss_can_lose_context = 0;
  75. int cpu_id = smp_processor_id();
  76. /*
  77. * CPU0 has to wait and stay ON until CPU1 is OFF state.
  78. * This is necessary to honour hardware recommondation
  79. * of triggeing all the possible low power modes once CPU1 is
  80. * out of coherency and in OFF mode.
  81. */
  82. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  83. while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
  84. cpu_relax();
  85. /*
  86. * CPU1 could have already entered & exited idle
  87. * without hitting off because of a wakeup
  88. * or a failed attempt to hit off mode. Check for
  89. * that here, otherwise we could spin forever
  90. * waiting for CPU1 off.
  91. */
  92. if (cpu_done[1])
  93. goto fail;
  94. }
  95. }
  96. mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
  97. (cx->mpu_logic_state == PWRDM_POWER_OFF);
  98. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
  99. /*
  100. * Call idle CPU PM enter notifier chain so that
  101. * VFP and per CPU interrupt context is saved.
  102. */
  103. cpu_pm_enter();
  104. if (dev->cpu == 0) {
  105. pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
  106. omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
  107. /*
  108. * Call idle CPU cluster PM enter notifier chain
  109. * to save GIC and wakeupgen context.
  110. */
  111. if (mpuss_can_lose_context)
  112. cpu_cluster_pm_enter();
  113. }
  114. omap4_enter_lowpower(dev->cpu, cx->cpu_state);
  115. cpu_done[dev->cpu] = true;
  116. /* Wakeup CPU1 only if it is not offlined */
  117. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  118. if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
  119. mpuss_can_lose_context)
  120. gic_dist_disable();
  121. clkdm_wakeup(cpu_clkdm[1]);
  122. omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
  123. clkdm_allow_idle(cpu_clkdm[1]);
  124. if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
  125. mpuss_can_lose_context) {
  126. while (gic_dist_disabled()) {
  127. udelay(1);
  128. cpu_relax();
  129. }
  130. gic_timer_retrigger();
  131. }
  132. }
  133. /*
  134. * Call idle CPU PM exit notifier chain to restore
  135. * VFP and per CPU IRQ context.
  136. */
  137. cpu_pm_exit();
  138. /*
  139. * Call idle CPU cluster PM exit notifier chain
  140. * to restore GIC and wakeupgen context.
  141. */
  142. if (dev->cpu == 0 && mpuss_can_lose_context)
  143. cpu_cluster_pm_exit();
  144. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
  145. fail:
  146. cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
  147. cpu_done[dev->cpu] = false;
  148. return index;
  149. }
  150. /*
  151. * For each cpu, setup the broadcast timer because local timers
  152. * stops for the states above C1.
  153. */
  154. static void omap_setup_broadcast_timer(void *arg)
  155. {
  156. int cpu = smp_processor_id();
  157. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
  158. }
  159. static struct cpuidle_driver omap4_idle_driver = {
  160. .name = "omap4_idle",
  161. .owner = THIS_MODULE,
  162. .states = {
  163. {
  164. /* C1 - CPU0 ON + CPU1 ON + MPU ON */
  165. .exit_latency = 2 + 2,
  166. .target_residency = 5,
  167. .enter = omap_enter_idle_simple,
  168. .name = "C1",
  169. .desc = "CPUx ON, MPUSS ON"
  170. },
  171. {
  172. /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
  173. .exit_latency = 328 + 440,
  174. .target_residency = 960,
  175. .flags = CPUIDLE_FLAG_COUPLED,
  176. .enter = omap_enter_idle_coupled,
  177. .name = "C2",
  178. .desc = "CPUx OFF, MPUSS CSWR",
  179. },
  180. {
  181. /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
  182. .exit_latency = 460 + 518,
  183. .target_residency = 1100,
  184. .flags = CPUIDLE_FLAG_COUPLED,
  185. .enter = omap_enter_idle_coupled,
  186. .name = "C3",
  187. .desc = "CPUx OFF, MPUSS OSWR",
  188. },
  189. },
  190. .state_count = ARRAY_SIZE(omap4_idle_data),
  191. .safe_state_index = 0,
  192. };
  193. /* Public functions */
  194. /**
  195. * omap4_idle_init - Init routine for OMAP4+ idle
  196. *
  197. * Registers the OMAP4+ specific cpuidle driver to the cpuidle
  198. * framework with the valid set of states.
  199. */
  200. int __init omap4_idle_init(void)
  201. {
  202. mpu_pd = pwrdm_lookup("mpu_pwrdm");
  203. cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
  204. cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
  205. if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
  206. return -ENODEV;
  207. cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
  208. cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
  209. if (!cpu_clkdm[0] || !cpu_clkdm[1])
  210. return -ENODEV;
  211. /* Configure the broadcast timer on each cpu */
  212. on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
  213. return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
  214. }