cpuidle-powernv.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. /*
  2. * cpuidle-powernv - idle state cpuidle driver.
  3. * Adapted from drivers/cpuidle/cpuidle-pseries
  4. *
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/moduleparam.h>
  10. #include <linux/cpuidle.h>
  11. #include <linux/cpu.h>
  12. #include <linux/notifier.h>
  13. #include <linux/clockchips.h>
  14. #include <linux/of.h>
  15. #include <linux/slab.h>
  16. #include <asm/machdep.h>
  17. #include <asm/firmware.h>
  18. #include <asm/opal.h>
  19. #include <asm/runlatch.h>
  20. #include <asm/cpuidle.h>
  21. /*
  22. * Expose only those Hardware idle states via the cpuidle framework
  23. * that have latency value below POWERNV_THRESHOLD_LATENCY_NS.
  24. */
  25. #define POWERNV_THRESHOLD_LATENCY_NS 200000
  26. static struct cpuidle_driver powernv_idle_driver = {
  27. .name = "powernv_idle",
  28. .owner = THIS_MODULE,
  29. };
  30. static int max_idle_state;
  31. static struct cpuidle_state *cpuidle_state_table;
  32. struct stop_psscr_table {
  33. u64 val;
  34. u64 mask;
  35. };
  36. static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX];
  37. static u64 snooze_timeout;
  38. static bool snooze_timeout_en;
  39. static int snooze_loop(struct cpuidle_device *dev,
  40. struct cpuidle_driver *drv,
  41. int index)
  42. {
  43. u64 snooze_exit_time;
  44. local_irq_enable();
  45. set_thread_flag(TIF_POLLING_NRFLAG);
  46. snooze_exit_time = get_tb() + snooze_timeout;
  47. ppc64_runlatch_off();
  48. while (!need_resched()) {
  49. HMT_low();
  50. HMT_very_low();
  51. if (snooze_timeout_en && get_tb() > snooze_exit_time)
  52. break;
  53. }
  54. HMT_medium();
  55. ppc64_runlatch_on();
  56. clear_thread_flag(TIF_POLLING_NRFLAG);
  57. smp_mb();
  58. return index;
  59. }
  60. static int nap_loop(struct cpuidle_device *dev,
  61. struct cpuidle_driver *drv,
  62. int index)
  63. {
  64. ppc64_runlatch_off();
  65. power7_idle();
  66. ppc64_runlatch_on();
  67. return index;
  68. }
  69. /* Register for fastsleep only in oneshot mode of broadcast */
  70. #ifdef CONFIG_TICK_ONESHOT
  71. static int fastsleep_loop(struct cpuidle_device *dev,
  72. struct cpuidle_driver *drv,
  73. int index)
  74. {
  75. unsigned long old_lpcr = mfspr(SPRN_LPCR);
  76. unsigned long new_lpcr;
  77. if (unlikely(system_state < SYSTEM_RUNNING))
  78. return index;
  79. new_lpcr = old_lpcr;
  80. /* Do not exit powersave upon decrementer as we've setup the timer
  81. * offload.
  82. */
  83. new_lpcr &= ~LPCR_PECE1;
  84. mtspr(SPRN_LPCR, new_lpcr);
  85. power7_sleep();
  86. mtspr(SPRN_LPCR, old_lpcr);
  87. return index;
  88. }
  89. #endif
  90. static int stop_loop(struct cpuidle_device *dev,
  91. struct cpuidle_driver *drv,
  92. int index)
  93. {
  94. ppc64_runlatch_off();
  95. power9_idle_stop(stop_psscr_table[index].val,
  96. stop_psscr_table[index].mask);
  97. ppc64_runlatch_on();
  98. return index;
  99. }
  100. /*
  101. * States for dedicated partition case.
  102. */
  103. static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = {
  104. { /* Snooze */
  105. .name = "snooze",
  106. .desc = "snooze",
  107. .exit_latency = 0,
  108. .target_residency = 0,
  109. .enter = snooze_loop },
  110. };
  111. static int powernv_cpuidle_cpu_online(unsigned int cpu)
  112. {
  113. struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
  114. if (dev && cpuidle_get_driver()) {
  115. cpuidle_pause_and_lock();
  116. cpuidle_enable_device(dev);
  117. cpuidle_resume_and_unlock();
  118. }
  119. return 0;
  120. }
  121. static int powernv_cpuidle_cpu_dead(unsigned int cpu)
  122. {
  123. struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
  124. if (dev && cpuidle_get_driver()) {
  125. cpuidle_pause_and_lock();
  126. cpuidle_disable_device(dev);
  127. cpuidle_resume_and_unlock();
  128. }
  129. return 0;
  130. }
  131. /*
  132. * powernv_cpuidle_driver_init()
  133. */
  134. static int powernv_cpuidle_driver_init(void)
  135. {
  136. int idle_state;
  137. struct cpuidle_driver *drv = &powernv_idle_driver;
  138. drv->state_count = 0;
  139. for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
  140. /* Is the state not enabled? */
  141. if (cpuidle_state_table[idle_state].enter == NULL)
  142. continue;
  143. drv->states[drv->state_count] = /* structure copy */
  144. cpuidle_state_table[idle_state];
  145. drv->state_count += 1;
  146. }
  147. /*
  148. * On the PowerNV platform cpu_present may be less than cpu_possible in
  149. * cases when firmware detects the CPU, but it is not available to the
  150. * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
  151. * run time and hence cpu_devices are not created for those CPUs by the
  152. * generic topology_init().
  153. *
  154. * drv->cpumask defaults to cpu_possible_mask in
  155. * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
  156. * cpu_devices are not created for CPUs in cpu_possible_mask that
  157. * cannot be hot-added later at run time.
  158. *
  159. * Trying cpuidle_register_device() on a CPU without a cpu_device is
  160. * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
  161. */
  162. drv->cpumask = (struct cpumask *)cpu_present_mask;
  163. return 0;
  164. }
  165. static inline void add_powernv_state(int index, const char *name,
  166. unsigned int flags,
  167. int (*idle_fn)(struct cpuidle_device *,
  168. struct cpuidle_driver *,
  169. int),
  170. unsigned int target_residency,
  171. unsigned int exit_latency,
  172. u64 psscr_val, u64 psscr_mask)
  173. {
  174. strlcpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN);
  175. strlcpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN);
  176. powernv_states[index].flags = flags;
  177. powernv_states[index].target_residency = target_residency;
  178. powernv_states[index].exit_latency = exit_latency;
  179. powernv_states[index].enter = idle_fn;
  180. stop_psscr_table[index].val = psscr_val;
  181. stop_psscr_table[index].mask = psscr_mask;
  182. }
  183. static int powernv_add_idle_states(void)
  184. {
  185. struct device_node *power_mgt;
  186. int nr_idle_states = 1; /* Snooze */
  187. int dt_idle_states;
  188. u32 latency_ns[CPUIDLE_STATE_MAX];
  189. u32 residency_ns[CPUIDLE_STATE_MAX];
  190. u32 flags[CPUIDLE_STATE_MAX];
  191. u64 psscr_val[CPUIDLE_STATE_MAX];
  192. u64 psscr_mask[CPUIDLE_STATE_MAX];
  193. const char *names[CPUIDLE_STATE_MAX];
  194. u32 has_stop_states = 0;
  195. int i, rc;
  196. /* Currently we have snooze statically defined */
  197. power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
  198. if (!power_mgt) {
  199. pr_warn("opal: PowerMgmt Node not found\n");
  200. goto out;
  201. }
  202. /* Read values of any property to determine the num of idle states */
  203. dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
  204. if (dt_idle_states < 0) {
  205. pr_warn("cpuidle-powernv: no idle states found in the DT\n");
  206. goto out;
  207. }
  208. /*
  209. * Since snooze is used as first idle state, max idle states allowed is
  210. * CPUIDLE_STATE_MAX -1
  211. */
  212. if (dt_idle_states > CPUIDLE_STATE_MAX - 1) {
  213. pr_warn("cpuidle-powernv: discovered idle states more than allowed");
  214. dt_idle_states = CPUIDLE_STATE_MAX - 1;
  215. }
  216. if (of_property_read_u32_array(power_mgt,
  217. "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
  218. pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
  219. goto out;
  220. }
  221. if (of_property_read_u32_array(power_mgt,
  222. "ibm,cpu-idle-state-latencies-ns", latency_ns,
  223. dt_idle_states)) {
  224. pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
  225. goto out;
  226. }
  227. if (of_property_read_string_array(power_mgt,
  228. "ibm,cpu-idle-state-names", names, dt_idle_states) < 0) {
  229. pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
  230. goto out;
  231. }
  232. /*
  233. * If the idle states use stop instruction, probe for psscr values
  234. * and psscr mask which are necessary to specify required stop level.
  235. */
  236. has_stop_states = (flags[0] &
  237. (OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP));
  238. if (has_stop_states) {
  239. if (of_property_read_u64_array(power_mgt,
  240. "ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) {
  241. pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
  242. goto out;
  243. }
  244. if (of_property_read_u64_array(power_mgt,
  245. "ibm,cpu-idle-state-psscr-mask",
  246. psscr_mask, dt_idle_states)) {
  247. pr_warn("cpuidle-powernv:Missing ibm,cpu-idle-state-psscr-mask in DT\n");
  248. goto out;
  249. }
  250. }
  251. rc = of_property_read_u32_array(power_mgt,
  252. "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
  253. for (i = 0; i < dt_idle_states; i++) {
  254. unsigned int exit_latency, target_residency;
  255. /*
  256. * If an idle state has exit latency beyond
  257. * POWERNV_THRESHOLD_LATENCY_NS then don't use it
  258. * in cpu-idle.
  259. */
  260. if (latency_ns[i] > POWERNV_THRESHOLD_LATENCY_NS)
  261. continue;
  262. /*
  263. * Firmware passes residency and latency values in ns.
  264. * cpuidle expects it in us.
  265. */
  266. exit_latency = latency_ns[i] / 1000;
  267. if (!rc)
  268. target_residency = residency_ns[i] / 1000;
  269. else
  270. target_residency = 0;
  271. if (has_stop_states) {
  272. int err = validate_psscr_val_mask(&psscr_val[i],
  273. &psscr_mask[i],
  274. flags[i]);
  275. if (err) {
  276. report_invalid_psscr_val(psscr_val[i], err);
  277. continue;
  278. }
  279. }
  280. /*
  281. * For nap and fastsleep, use default target_residency
  282. * values if f/w does not expose it.
  283. */
  284. if (flags[i] & OPAL_PM_NAP_ENABLED) {
  285. if (!rc)
  286. target_residency = 100;
  287. /* Add NAP state */
  288. add_powernv_state(nr_idle_states, "Nap",
  289. CPUIDLE_FLAG_NONE, nap_loop,
  290. target_residency, exit_latency, 0, 0);
  291. } else if ((flags[i] & OPAL_PM_STOP_INST_FAST) &&
  292. !(flags[i] & OPAL_PM_TIMEBASE_STOP)) {
  293. add_powernv_state(nr_idle_states, names[i],
  294. CPUIDLE_FLAG_NONE, stop_loop,
  295. target_residency, exit_latency,
  296. psscr_val[i], psscr_mask[i]);
  297. }
  298. /*
  299. * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
  300. * within this config dependency check.
  301. */
  302. #ifdef CONFIG_TICK_ONESHOT
  303. if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
  304. flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
  305. if (!rc)
  306. target_residency = 300000;
  307. /* Add FASTSLEEP state */
  308. add_powernv_state(nr_idle_states, "FastSleep",
  309. CPUIDLE_FLAG_TIMER_STOP,
  310. fastsleep_loop,
  311. target_residency, exit_latency, 0, 0);
  312. } else if ((flags[i] & OPAL_PM_STOP_INST_DEEP) &&
  313. (flags[i] & OPAL_PM_TIMEBASE_STOP)) {
  314. add_powernv_state(nr_idle_states, names[i],
  315. CPUIDLE_FLAG_TIMER_STOP, stop_loop,
  316. target_residency, exit_latency,
  317. psscr_val[i], psscr_mask[i]);
  318. }
  319. #endif
  320. nr_idle_states++;
  321. }
  322. out:
  323. return nr_idle_states;
  324. }
  325. /*
  326. * powernv_idle_probe()
  327. * Choose state table for shared versus dedicated partition
  328. */
  329. static int powernv_idle_probe(void)
  330. {
  331. if (cpuidle_disable != IDLE_NO_OVERRIDE)
  332. return -ENODEV;
  333. if (firmware_has_feature(FW_FEATURE_OPAL)) {
  334. cpuidle_state_table = powernv_states;
  335. /* Device tree can indicate more idle states */
  336. max_idle_state = powernv_add_idle_states();
  337. if (max_idle_state > 1) {
  338. snooze_timeout_en = true;
  339. snooze_timeout = powernv_states[1].target_residency *
  340. tb_ticks_per_usec;
  341. }
  342. } else
  343. return -ENODEV;
  344. return 0;
  345. }
  346. static int __init powernv_processor_idle_init(void)
  347. {
  348. int retval;
  349. retval = powernv_idle_probe();
  350. if (retval)
  351. return retval;
  352. powernv_cpuidle_driver_init();
  353. retval = cpuidle_register(&powernv_idle_driver, NULL);
  354. if (retval) {
  355. printk(KERN_DEBUG "Registration of powernv driver failed.\n");
  356. return retval;
  357. }
  358. retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
  359. "cpuidle/powernv:online",
  360. powernv_cpuidle_cpu_online, NULL);
  361. WARN_ON(retval < 0);
  362. retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
  363. "cpuidle/powernv:dead", NULL,
  364. powernv_cpuidle_cpu_dead);
  365. WARN_ON(retval < 0);
  366. printk(KERN_DEBUG "powernv_idle_driver registered\n");
  367. return 0;
  368. }
  369. device_initcall(powernv_processor_idle_init);