powernv-cpufreq.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * POWERNV cpufreq driver for the IBM POWER processors
  3. *
  4. * (C) Copyright IBM 2014
  5. *
  6. * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. */
  19. #define pr_fmt(fmt) "powernv-cpufreq: " fmt
  20. #include <linux/kernel.h>
  21. #include <linux/sysfs.h>
  22. #include <linux/cpumask.h>
  23. #include <linux/module.h>
  24. #include <linux/cpufreq.h>
  25. #include <linux/smp.h>
  26. #include <linux/of.h>
  27. #include <linux/reboot.h>
  28. #include <linux/slab.h>
  29. #include <asm/cputhreads.h>
  30. #include <asm/firmware.h>
  31. #include <asm/reg.h>
  32. #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
  33. #include <asm/opal.h>
  34. #define POWERNV_MAX_PSTATES 256
  35. #define PMSR_PSAFE_ENABLE (1UL << 30)
  36. #define PMSR_SPR_EM_DISABLE (1UL << 31)
  37. #define PMSR_MAX(x) ((x >> 32) & 0xFF)
  38. static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
  39. static bool rebooting, throttled, occ_reset;
  40. static struct chip {
  41. unsigned int id;
  42. bool throttled;
  43. cpumask_t mask;
  44. struct work_struct throttle;
  45. bool restore;
  46. } *chips;
  47. static int nr_chips;
  48. /*
  49. * Note: The set of pstates consists of contiguous integers, the
  50. * smallest of which is indicated by powernv_pstate_info.min, the
  51. * largest of which is indicated by powernv_pstate_info.max.
  52. *
  53. * The nominal pstate is the highest non-turbo pstate in this
  54. * platform. This is indicated by powernv_pstate_info.nominal.
  55. */
  56. static struct powernv_pstate_info {
  57. int min;
  58. int max;
  59. int nominal;
  60. int nr_pstates;
  61. } powernv_pstate_info;
  62. /*
  63. * Initialize the freq table based on data obtained
  64. * from the firmware passed via device-tree
  65. */
  66. static int init_powernv_pstates(void)
  67. {
  68. struct device_node *power_mgt;
  69. int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0;
  70. const __be32 *pstate_ids, *pstate_freqs;
  71. u32 len_ids, len_freqs;
  72. power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
  73. if (!power_mgt) {
  74. pr_warn("power-mgt node not found\n");
  75. return -ENODEV;
  76. }
  77. if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
  78. pr_warn("ibm,pstate-min node not found\n");
  79. return -ENODEV;
  80. }
  81. if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
  82. pr_warn("ibm,pstate-max node not found\n");
  83. return -ENODEV;
  84. }
  85. if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
  86. &pstate_nominal)) {
  87. pr_warn("ibm,pstate-nominal not found\n");
  88. return -ENODEV;
  89. }
  90. pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
  91. pstate_nominal, pstate_max);
  92. pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
  93. if (!pstate_ids) {
  94. pr_warn("ibm,pstate-ids not found\n");
  95. return -ENODEV;
  96. }
  97. pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
  98. &len_freqs);
  99. if (!pstate_freqs) {
  100. pr_warn("ibm,pstate-frequencies-mhz not found\n");
  101. return -ENODEV;
  102. }
  103. if (len_ids != len_freqs) {
  104. pr_warn("Entries in ibm,pstate-ids and "
  105. "ibm,pstate-frequencies-mhz does not match\n");
  106. }
  107. nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
  108. if (!nr_pstates) {
  109. pr_warn("No PStates found\n");
  110. return -ENODEV;
  111. }
  112. pr_debug("NR PStates %d\n", nr_pstates);
  113. for (i = 0; i < nr_pstates; i++) {
  114. u32 id = be32_to_cpu(pstate_ids[i]);
  115. u32 freq = be32_to_cpu(pstate_freqs[i]);
  116. pr_debug("PState id %d freq %d MHz\n", id, freq);
  117. powernv_freqs[i].frequency = freq * 1000; /* kHz */
  118. powernv_freqs[i].driver_data = id;
  119. }
  120. /* End of list marker entry */
  121. powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
  122. powernv_pstate_info.min = pstate_min;
  123. powernv_pstate_info.max = pstate_max;
  124. powernv_pstate_info.nominal = pstate_nominal;
  125. powernv_pstate_info.nr_pstates = nr_pstates;
  126. return 0;
  127. }
  128. /* Returns the CPU frequency corresponding to the pstate_id. */
  129. static unsigned int pstate_id_to_freq(int pstate_id)
  130. {
  131. int i;
  132. i = powernv_pstate_info.max - pstate_id;
  133. if (i >= powernv_pstate_info.nr_pstates || i < 0) {
  134. pr_warn("PState id %d outside of PState table, "
  135. "reporting nominal id %d instead\n",
  136. pstate_id, powernv_pstate_info.nominal);
  137. i = powernv_pstate_info.max - powernv_pstate_info.nominal;
  138. }
  139. return powernv_freqs[i].frequency;
  140. }
  141. /*
  142. * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
  143. * the firmware
  144. */
  145. static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
  146. char *buf)
  147. {
  148. return sprintf(buf, "%u\n",
  149. pstate_id_to_freq(powernv_pstate_info.nominal));
  150. }
  151. struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
  152. __ATTR_RO(cpuinfo_nominal_freq);
  153. static struct freq_attr *powernv_cpu_freq_attr[] = {
  154. &cpufreq_freq_attr_scaling_available_freqs,
  155. &cpufreq_freq_attr_cpuinfo_nominal_freq,
  156. NULL,
  157. };
  158. /* Helper routines */
  159. /* Access helpers to power mgt SPR */
  160. static inline unsigned long get_pmspr(unsigned long sprn)
  161. {
  162. switch (sprn) {
  163. case SPRN_PMCR:
  164. return mfspr(SPRN_PMCR);
  165. case SPRN_PMICR:
  166. return mfspr(SPRN_PMICR);
  167. case SPRN_PMSR:
  168. return mfspr(SPRN_PMSR);
  169. }
  170. BUG();
  171. }
  172. static inline void set_pmspr(unsigned long sprn, unsigned long val)
  173. {
  174. switch (sprn) {
  175. case SPRN_PMCR:
  176. mtspr(SPRN_PMCR, val);
  177. return;
  178. case SPRN_PMICR:
  179. mtspr(SPRN_PMICR, val);
  180. return;
  181. }
  182. BUG();
  183. }
  184. /*
  185. * Use objects of this type to query/update
  186. * pstates on a remote CPU via smp_call_function.
  187. */
  188. struct powernv_smp_call_data {
  189. unsigned int freq;
  190. int pstate_id;
  191. };
  192. /*
  193. * powernv_read_cpu_freq: Reads the current frequency on this CPU.
  194. *
  195. * Called via smp_call_function.
  196. *
  197. * Note: The caller of the smp_call_function should pass an argument of
  198. * the type 'struct powernv_smp_call_data *' along with this function.
  199. *
  200. * The current frequency on this CPU will be returned via
  201. * ((struct powernv_smp_call_data *)arg)->freq;
  202. */
  203. static void powernv_read_cpu_freq(void *arg)
  204. {
  205. unsigned long pmspr_val;
  206. s8 local_pstate_id;
  207. struct powernv_smp_call_data *freq_data = arg;
  208. pmspr_val = get_pmspr(SPRN_PMSR);
  209. /*
  210. * The local pstate id corresponds bits 48..55 in the PMSR.
  211. * Note: Watch out for the sign!
  212. */
  213. local_pstate_id = (pmspr_val >> 48) & 0xFF;
  214. freq_data->pstate_id = local_pstate_id;
  215. freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
  216. pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
  217. raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
  218. freq_data->freq);
  219. }
  220. /*
  221. * powernv_cpufreq_get: Returns the CPU frequency as reported by the
  222. * firmware for CPU 'cpu'. This value is reported through the sysfs
  223. * file cpuinfo_cur_freq.
  224. */
  225. static unsigned int powernv_cpufreq_get(unsigned int cpu)
  226. {
  227. struct powernv_smp_call_data freq_data;
  228. smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
  229. &freq_data, 1);
  230. return freq_data.freq;
  231. }
  232. /*
  233. * set_pstate: Sets the pstate on this CPU.
  234. *
  235. * This is called via an smp_call_function.
  236. *
  237. * The caller must ensure that freq_data is of the type
  238. * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
  239. * on this CPU should be present in freq_data->pstate_id.
  240. */
  241. static void set_pstate(void *freq_data)
  242. {
  243. unsigned long val;
  244. unsigned long pstate_ul =
  245. ((struct powernv_smp_call_data *) freq_data)->pstate_id;
  246. val = get_pmspr(SPRN_PMCR);
  247. val = val & 0x0000FFFFFFFFFFFFULL;
  248. pstate_ul = pstate_ul & 0xFF;
  249. /* Set both global(bits 56..63) and local(bits 48..55) PStates */
  250. val = val | (pstate_ul << 56) | (pstate_ul << 48);
  251. pr_debug("Setting cpu %d pmcr to %016lX\n",
  252. raw_smp_processor_id(), val);
  253. set_pmspr(SPRN_PMCR, val);
  254. }
  255. /*
  256. * get_nominal_index: Returns the index corresponding to the nominal
  257. * pstate in the cpufreq table
  258. */
  259. static inline unsigned int get_nominal_index(void)
  260. {
  261. return powernv_pstate_info.max - powernv_pstate_info.nominal;
  262. }
  263. static void powernv_cpufreq_throttle_check(void *data)
  264. {
  265. unsigned int cpu = smp_processor_id();
  266. unsigned long pmsr;
  267. int pmsr_pmax, i;
  268. pmsr = get_pmspr(SPRN_PMSR);
  269. for (i = 0; i < nr_chips; i++)
  270. if (chips[i].id == cpu_to_chip_id(cpu))
  271. break;
  272. /* Check for Pmax Capping */
  273. pmsr_pmax = (s8)PMSR_MAX(pmsr);
  274. if (pmsr_pmax != powernv_pstate_info.max) {
  275. if (chips[i].throttled)
  276. goto next;
  277. chips[i].throttled = true;
  278. pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu,
  279. chips[i].id, pmsr_pmax);
  280. } else if (chips[i].throttled) {
  281. chips[i].throttled = false;
  282. pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
  283. chips[i].id, pmsr_pmax);
  284. }
  285. /* Check if Psafe_mode_active is set in PMSR. */
  286. next:
  287. if (pmsr & PMSR_PSAFE_ENABLE) {
  288. throttled = true;
  289. pr_info("Pstate set to safe frequency\n");
  290. }
  291. /* Check if SPR_EM_DISABLE is set in PMSR */
  292. if (pmsr & PMSR_SPR_EM_DISABLE) {
  293. throttled = true;
  294. pr_info("Frequency Control disabled from OS\n");
  295. }
  296. if (throttled) {
  297. pr_info("PMSR = %16lx\n", pmsr);
  298. pr_crit("CPU Frequency could be throttled\n");
  299. }
  300. }
  301. /*
  302. * powernv_cpufreq_target_index: Sets the frequency corresponding to
  303. * the cpufreq table entry indexed by new_index on the cpus in the
  304. * mask policy->cpus
  305. */
  306. static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
  307. unsigned int new_index)
  308. {
  309. struct powernv_smp_call_data freq_data;
  310. if (unlikely(rebooting) && new_index != get_nominal_index())
  311. return 0;
  312. if (!throttled)
  313. powernv_cpufreq_throttle_check(NULL);
  314. freq_data.pstate_id = powernv_freqs[new_index].driver_data;
  315. /*
  316. * Use smp_call_function to send IPI and execute the
  317. * mtspr on target CPU. We could do that without IPI
  318. * if current CPU is within policy->cpus (core)
  319. */
  320. smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
  321. return 0;
  322. }
  323. static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
  324. {
  325. int base, i;
  326. base = cpu_first_thread_sibling(policy->cpu);
  327. for (i = 0; i < threads_per_core; i++)
  328. cpumask_set_cpu(base + i, policy->cpus);
  329. return cpufreq_table_validate_and_show(policy, powernv_freqs);
  330. }
  331. static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
  332. unsigned long action, void *unused)
  333. {
  334. int cpu;
  335. struct cpufreq_policy cpu_policy;
  336. rebooting = true;
  337. for_each_online_cpu(cpu) {
  338. cpufreq_get_policy(&cpu_policy, cpu);
  339. powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
  340. }
  341. return NOTIFY_DONE;
  342. }
  343. static struct notifier_block powernv_cpufreq_reboot_nb = {
  344. .notifier_call = powernv_cpufreq_reboot_notifier,
  345. };
  346. void powernv_cpufreq_work_fn(struct work_struct *work)
  347. {
  348. struct chip *chip = container_of(work, struct chip, throttle);
  349. unsigned int cpu;
  350. cpumask_var_t mask;
  351. smp_call_function_any(&chip->mask,
  352. powernv_cpufreq_throttle_check, NULL, 0);
  353. if (!chip->restore)
  354. return;
  355. chip->restore = false;
  356. cpumask_copy(mask, &chip->mask);
  357. for_each_cpu_and(cpu, mask, cpu_online_mask) {
  358. int index, tcpu;
  359. struct cpufreq_policy policy;
  360. cpufreq_get_policy(&policy, cpu);
  361. cpufreq_frequency_table_target(&policy, policy.freq_table,
  362. policy.cur,
  363. CPUFREQ_RELATION_C, &index);
  364. powernv_cpufreq_target_index(&policy, index);
  365. for_each_cpu(tcpu, policy.cpus)
  366. cpumask_clear_cpu(tcpu, mask);
  367. }
  368. }
  369. static char throttle_reason[][30] = {
  370. "No throttling",
  371. "Power Cap",
  372. "Processor Over Temperature",
  373. "Power Supply Failure",
  374. "Over Current",
  375. "OCC Reset"
  376. };
  377. static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
  378. unsigned long msg_type, void *_msg)
  379. {
  380. struct opal_msg *msg = _msg;
  381. struct opal_occ_msg omsg;
  382. int i;
  383. if (msg_type != OPAL_MSG_OCC)
  384. return 0;
  385. omsg.type = be64_to_cpu(msg->params[0]);
  386. switch (omsg.type) {
  387. case OCC_RESET:
  388. occ_reset = true;
  389. pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
  390. /*
  391. * powernv_cpufreq_throttle_check() is called in
  392. * target() callback which can detect the throttle state
  393. * for governors like ondemand.
  394. * But static governors will not call target() often thus
  395. * report throttling here.
  396. */
  397. if (!throttled) {
  398. throttled = true;
  399. pr_crit("CPU frequency is throttled for duration\n");
  400. }
  401. break;
  402. case OCC_LOAD:
  403. pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
  404. break;
  405. case OCC_THROTTLE:
  406. omsg.chip = be64_to_cpu(msg->params[1]);
  407. omsg.throttle_status = be64_to_cpu(msg->params[2]);
  408. if (occ_reset) {
  409. occ_reset = false;
  410. throttled = false;
  411. pr_info("OCC Active, CPU frequency is no longer throttled\n");
  412. for (i = 0; i < nr_chips; i++) {
  413. chips[i].restore = true;
  414. schedule_work(&chips[i].throttle);
  415. }
  416. return 0;
  417. }
  418. if (omsg.throttle_status &&
  419. omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
  420. pr_info("OCC: Chip %u Pmax reduced due to %s\n",
  421. (unsigned int)omsg.chip,
  422. throttle_reason[omsg.throttle_status]);
  423. else if (!omsg.throttle_status)
  424. pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
  425. throttle_reason[omsg.throttle_status]);
  426. else
  427. return 0;
  428. for (i = 0; i < nr_chips; i++)
  429. if (chips[i].id == omsg.chip) {
  430. if (!omsg.throttle_status)
  431. chips[i].restore = true;
  432. schedule_work(&chips[i].throttle);
  433. }
  434. }
  435. return 0;
  436. }
  437. static struct notifier_block powernv_cpufreq_opal_nb = {
  438. .notifier_call = powernv_cpufreq_occ_msg,
  439. .next = NULL,
  440. .priority = 0,
  441. };
  442. static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
  443. {
  444. struct powernv_smp_call_data freq_data;
  445. freq_data.pstate_id = powernv_pstate_info.min;
  446. smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
  447. }
  448. static struct cpufreq_driver powernv_cpufreq_driver = {
  449. .name = "powernv-cpufreq",
  450. .flags = CPUFREQ_CONST_LOOPS,
  451. .init = powernv_cpufreq_cpu_init,
  452. .verify = cpufreq_generic_frequency_table_verify,
  453. .target_index = powernv_cpufreq_target_index,
  454. .get = powernv_cpufreq_get,
  455. .stop_cpu = powernv_cpufreq_stop_cpu,
  456. .attr = powernv_cpu_freq_attr,
  457. };
  458. static int init_chip_info(void)
  459. {
  460. unsigned int chip[256];
  461. unsigned int cpu, i;
  462. unsigned int prev_chip_id = UINT_MAX;
  463. for_each_possible_cpu(cpu) {
  464. unsigned int id = cpu_to_chip_id(cpu);
  465. if (prev_chip_id != id) {
  466. prev_chip_id = id;
  467. chip[nr_chips++] = id;
  468. }
  469. }
  470. chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
  471. if (!chips)
  472. return -ENOMEM;
  473. for (i = 0; i < nr_chips; i++) {
  474. chips[i].id = chip[i];
  475. chips[i].throttled = false;
  476. cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
  477. INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
  478. chips[i].restore = false;
  479. }
  480. return 0;
  481. }
  482. static int __init powernv_cpufreq_init(void)
  483. {
  484. int rc = 0;
  485. /* Don't probe on pseries (guest) platforms */
  486. if (!firmware_has_feature(FW_FEATURE_OPALv3))
  487. return -ENODEV;
  488. /* Discover pstates from device tree and init */
  489. rc = init_powernv_pstates();
  490. if (rc) {
  491. pr_info("powernv-cpufreq disabled. System does not support PState control\n");
  492. return rc;
  493. }
  494. /* Populate chip info */
  495. rc = init_chip_info();
  496. if (rc)
  497. return rc;
  498. register_reboot_notifier(&powernv_cpufreq_reboot_nb);
  499. opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
  500. return cpufreq_register_driver(&powernv_cpufreq_driver);
  501. }
  502. module_init(powernv_cpufreq_init);
  503. static void __exit powernv_cpufreq_exit(void)
  504. {
  505. unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
  506. opal_message_notifier_unregister(OPAL_MSG_OCC,
  507. &powernv_cpufreq_opal_nb);
  508. cpufreq_unregister_driver(&powernv_cpufreq_driver);
  509. }
  510. module_exit(powernv_cpufreq_exit);
  511. MODULE_LICENSE("GPL");
  512. MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");