of.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. /*
  2. * Generic OPP OF helpers
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/cpu.h>
  15. #include <linux/errno.h>
  16. #include <linux/device.h>
  17. #include <linux/of.h>
  18. #include <linux/export.h>
  19. #include "opp.h"
  20. static struct opp_table *_managed_opp(const struct device_node *np)
  21. {
  22. struct opp_table *opp_table;
  23. list_for_each_entry_rcu(opp_table, &opp_tables, node) {
  24. if (opp_table->np == np) {
  25. /*
  26. * Multiple devices can point to the same OPP table and
  27. * so will have same node-pointer, np.
  28. *
  29. * But the OPPs will be considered as shared only if the
  30. * OPP table contains a "opp-shared" property.
  31. */
  32. if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
  33. return opp_table;
  34. return NULL;
  35. }
  36. }
  37. return NULL;
  38. }
  39. void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
  40. {
  41. struct device_node *np;
  42. /*
  43. * Only required for backward compatibility with v1 bindings, but isn't
  44. * harmful for other cases. And so we do it unconditionally.
  45. */
  46. np = of_node_get(dev->of_node);
  47. if (np) {
  48. u32 val;
  49. if (!of_property_read_u32(np, "clock-latency", &val))
  50. opp_table->clock_latency_ns_max = val;
  51. of_property_read_u32(np, "voltage-tolerance",
  52. &opp_table->voltage_tolerance_v1);
  53. of_node_put(np);
  54. }
  55. }
  56. static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
  57. struct device_node *np)
  58. {
  59. unsigned int count = opp_table->supported_hw_count;
  60. u32 version;
  61. int ret;
  62. if (!opp_table->supported_hw)
  63. return true;
  64. while (count--) {
  65. ret = of_property_read_u32_index(np, "opp-supported-hw", count,
  66. &version);
  67. if (ret) {
  68. dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
  69. __func__, count, ret);
  70. return false;
  71. }
  72. /* Both of these are bitwise masks of the versions */
  73. if (!(version & opp_table->supported_hw[count]))
  74. return false;
  75. }
  76. return true;
  77. }
  78. /* TODO: Support multiple regulators */
  79. static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
  80. struct opp_table *opp_table)
  81. {
  82. u32 microvolt[3] = {0};
  83. u32 val;
  84. int count, ret;
  85. struct property *prop = NULL;
  86. char name[NAME_MAX];
  87. /* Search for "opp-microvolt-<name>" */
  88. if (opp_table->prop_name) {
  89. snprintf(name, sizeof(name), "opp-microvolt-%s",
  90. opp_table->prop_name);
  91. prop = of_find_property(opp->np, name, NULL);
  92. }
  93. if (!prop) {
  94. /* Search for "opp-microvolt" */
  95. sprintf(name, "opp-microvolt");
  96. prop = of_find_property(opp->np, name, NULL);
  97. /* Missing property isn't a problem, but an invalid entry is */
  98. if (!prop)
  99. return 0;
  100. }
  101. count = of_property_count_u32_elems(opp->np, name);
  102. if (count < 0) {
  103. dev_err(dev, "%s: Invalid %s property (%d)\n",
  104. __func__, name, count);
  105. return count;
  106. }
  107. /* There can be one or three elements here */
  108. if (count != 1 && count != 3) {
  109. dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
  110. __func__, name, count);
  111. return -EINVAL;
  112. }
  113. ret = of_property_read_u32_array(opp->np, name, microvolt, count);
  114. if (ret) {
  115. dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
  116. return -EINVAL;
  117. }
  118. opp->u_volt = microvolt[0];
  119. if (count == 1) {
  120. opp->u_volt_min = opp->u_volt;
  121. opp->u_volt_max = opp->u_volt;
  122. } else {
  123. opp->u_volt_min = microvolt[1];
  124. opp->u_volt_max = microvolt[2];
  125. }
  126. /* Search for "opp-microamp-<name>" */
  127. prop = NULL;
  128. if (opp_table->prop_name) {
  129. snprintf(name, sizeof(name), "opp-microamp-%s",
  130. opp_table->prop_name);
  131. prop = of_find_property(opp->np, name, NULL);
  132. }
  133. if (!prop) {
  134. /* Search for "opp-microamp" */
  135. sprintf(name, "opp-microamp");
  136. prop = of_find_property(opp->np, name, NULL);
  137. }
  138. if (prop && !of_property_read_u32(opp->np, name, &val))
  139. opp->u_amp = val;
  140. return 0;
  141. }
  142. /**
  143. * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
  144. * entries
  145. * @dev: device pointer used to lookup OPP table.
  146. *
  147. * Free OPPs created using static entries present in DT.
  148. *
  149. * Locking: The internal opp_table and opp structures are RCU protected.
  150. * Hence this function indirectly uses RCU updater strategy with mutex locks
  151. * to keep the integrity of the internal data structures. Callers should ensure
  152. * that this function is *NOT* called under RCU protection or in contexts where
  153. * mutex cannot be locked.
  154. */
  155. void dev_pm_opp_of_remove_table(struct device *dev)
  156. {
  157. _dev_pm_opp_remove_table(dev, false);
  158. }
  159. EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
  160. /* Returns opp descriptor node for a device, caller must do of_node_put() */
  161. struct device_node *_of_get_opp_desc_node(struct device *dev)
  162. {
  163. /*
  164. * TODO: Support for multiple OPP tables.
  165. *
  166. * There should be only ONE phandle present in "operating-points-v2"
  167. * property.
  168. */
  169. return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
  170. }
  171. /**
  172. * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  173. * @dev: device for which we do this operation
  174. * @np: device node
  175. *
  176. * This function adds an opp definition to the opp table and returns status. The
  177. * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  178. * removed by dev_pm_opp_remove.
  179. *
  180. * Locking: The internal opp_table and opp structures are RCU protected.
  181. * Hence this function internally uses RCU updater strategy with mutex locks
  182. * to keep the integrity of the internal data structures. Callers should ensure
  183. * that this function is *NOT* called under RCU protection or in contexts where
  184. * mutex cannot be locked.
  185. *
  186. * Return:
  187. * 0 On success OR
  188. * Duplicate OPPs (both freq and volt are same) and opp->available
  189. * -EEXIST Freq are same and volt are different OR
  190. * Duplicate OPPs (both freq and volt are same) and !opp->available
  191. * -ENOMEM Memory allocation failure
  192. * -EINVAL Failed parsing the OPP node
  193. */
  194. static int _opp_add_static_v2(struct device *dev, struct device_node *np)
  195. {
  196. struct opp_table *opp_table;
  197. struct dev_pm_opp *new_opp;
  198. u64 rate;
  199. u32 val;
  200. int ret;
  201. /* Hold our table modification lock here */
  202. mutex_lock(&opp_table_lock);
  203. new_opp = _allocate_opp(dev, &opp_table);
  204. if (!new_opp) {
  205. ret = -ENOMEM;
  206. goto unlock;
  207. }
  208. ret = of_property_read_u64(np, "opp-hz", &rate);
  209. if (ret < 0) {
  210. dev_err(dev, "%s: opp-hz not found\n", __func__);
  211. goto free_opp;
  212. }
  213. /* Check if the OPP supports hardware's hierarchy of versions or not */
  214. if (!_opp_is_supported(dev, opp_table, np)) {
  215. dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
  216. goto free_opp;
  217. }
  218. /*
  219. * Rate is defined as an unsigned long in clk API, and so casting
  220. * explicitly to its type. Must be fixed once rate is 64 bit
  221. * guaranteed in clk API.
  222. */
  223. new_opp->rate = (unsigned long)rate;
  224. new_opp->turbo = of_property_read_bool(np, "turbo-mode");
  225. new_opp->np = np;
  226. new_opp->dynamic = false;
  227. new_opp->available = true;
  228. if (!of_property_read_u32(np, "clock-latency-ns", &val))
  229. new_opp->clock_latency_ns = val;
  230. ret = opp_parse_supplies(new_opp, dev, opp_table);
  231. if (ret)
  232. goto free_opp;
  233. ret = _opp_add(dev, new_opp, opp_table);
  234. if (ret)
  235. goto free_opp;
  236. /* OPP to select on device suspend */
  237. if (of_property_read_bool(np, "opp-suspend")) {
  238. if (opp_table->suspend_opp) {
  239. dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
  240. __func__, opp_table->suspend_opp->rate,
  241. new_opp->rate);
  242. } else {
  243. new_opp->suspend = true;
  244. opp_table->suspend_opp = new_opp;
  245. }
  246. }
  247. if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
  248. opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
  249. mutex_unlock(&opp_table_lock);
  250. pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
  251. __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
  252. new_opp->u_volt_min, new_opp->u_volt_max,
  253. new_opp->clock_latency_ns);
  254. /*
  255. * Notify the changes in the availability of the operable
  256. * frequency/voltage list.
  257. */
  258. srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
  259. return 0;
  260. free_opp:
  261. _opp_remove(opp_table, new_opp, false);
  262. unlock:
  263. mutex_unlock(&opp_table_lock);
  264. return ret;
  265. }
  266. /* Initializes OPP tables based on new bindings */
  267. static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
  268. {
  269. struct device_node *np;
  270. struct opp_table *opp_table;
  271. int ret = 0, count = 0;
  272. mutex_lock(&opp_table_lock);
  273. opp_table = _managed_opp(opp_np);
  274. if (opp_table) {
  275. /* OPPs are already managed */
  276. if (!_add_opp_dev(dev, opp_table))
  277. ret = -ENOMEM;
  278. mutex_unlock(&opp_table_lock);
  279. return ret;
  280. }
  281. mutex_unlock(&opp_table_lock);
  282. /* We have opp-table node now, iterate over it and add OPPs */
  283. for_each_available_child_of_node(opp_np, np) {
  284. count++;
  285. ret = _opp_add_static_v2(dev, np);
  286. if (ret) {
  287. dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
  288. ret);
  289. goto free_table;
  290. }
  291. }
  292. /* There should be one of more OPP defined */
  293. if (WARN_ON(!count))
  294. return -ENOENT;
  295. mutex_lock(&opp_table_lock);
  296. opp_table = _find_opp_table(dev);
  297. if (WARN_ON(IS_ERR(opp_table))) {
  298. ret = PTR_ERR(opp_table);
  299. mutex_unlock(&opp_table_lock);
  300. goto free_table;
  301. }
  302. opp_table->np = opp_np;
  303. if (of_property_read_bool(opp_np, "opp-shared"))
  304. opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
  305. else
  306. opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
  307. mutex_unlock(&opp_table_lock);
  308. return 0;
  309. free_table:
  310. dev_pm_opp_of_remove_table(dev);
  311. return ret;
  312. }
  313. /* Initializes OPP tables based on old-deprecated bindings */
  314. static int _of_add_opp_table_v1(struct device *dev)
  315. {
  316. const struct property *prop;
  317. const __be32 *val;
  318. int nr;
  319. prop = of_find_property(dev->of_node, "operating-points", NULL);
  320. if (!prop)
  321. return -ENODEV;
  322. if (!prop->value)
  323. return -ENODATA;
  324. /*
  325. * Each OPP is a set of tuples consisting of frequency and
  326. * voltage like <freq-kHz vol-uV>.
  327. */
  328. nr = prop->length / sizeof(u32);
  329. if (nr % 2) {
  330. dev_err(dev, "%s: Invalid OPP table\n", __func__);
  331. return -EINVAL;
  332. }
  333. val = prop->value;
  334. while (nr) {
  335. unsigned long freq = be32_to_cpup(val++) * 1000;
  336. unsigned long volt = be32_to_cpup(val++);
  337. if (_opp_add_v1(dev, freq, volt, false))
  338. dev_warn(dev, "%s: Failed to add OPP %ld\n",
  339. __func__, freq);
  340. nr -= 2;
  341. }
  342. return 0;
  343. }
  344. /**
  345. * dev_pm_opp_of_add_table() - Initialize opp table from device tree
  346. * @dev: device pointer used to lookup OPP table.
  347. *
  348. * Register the initial OPP table with the OPP library for given device.
  349. *
  350. * Locking: The internal opp_table and opp structures are RCU protected.
  351. * Hence this function indirectly uses RCU updater strategy with mutex locks
  352. * to keep the integrity of the internal data structures. Callers should ensure
  353. * that this function is *NOT* called under RCU protection or in contexts where
  354. * mutex cannot be locked.
  355. *
  356. * Return:
  357. * 0 On success OR
  358. * Duplicate OPPs (both freq and volt are same) and opp->available
  359. * -EEXIST Freq are same and volt are different OR
  360. * Duplicate OPPs (both freq and volt are same) and !opp->available
  361. * -ENOMEM Memory allocation failure
  362. * -ENODEV when 'operating-points' property is not found or is invalid data
  363. * in device node.
  364. * -ENODATA when empty 'operating-points' property is found
  365. * -EINVAL when invalid entries are found in opp-v2 table
  366. */
  367. int dev_pm_opp_of_add_table(struct device *dev)
  368. {
  369. struct device_node *opp_np;
  370. int ret;
  371. /*
  372. * OPPs have two version of bindings now. The older one is deprecated,
  373. * try for the new binding first.
  374. */
  375. opp_np = _of_get_opp_desc_node(dev);
  376. if (!opp_np) {
  377. /*
  378. * Try old-deprecated bindings for backward compatibility with
  379. * older dtbs.
  380. */
  381. return _of_add_opp_table_v1(dev);
  382. }
  383. ret = _of_add_opp_table_v2(dev, opp_np);
  384. of_node_put(opp_np);
  385. return ret;
  386. }
  387. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
  388. /* CPU device specific helpers */
  389. /**
  390. * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
  391. * @cpumask: cpumask for which OPP table needs to be removed
  392. *
  393. * This removes the OPP tables for CPUs present in the @cpumask.
  394. * This should be used only to remove static entries created from DT.
  395. *
  396. * Locking: The internal opp_table and opp structures are RCU protected.
  397. * Hence this function internally uses RCU updater strategy with mutex locks
  398. * to keep the integrity of the internal data structures. Callers should ensure
  399. * that this function is *NOT* called under RCU protection or in contexts where
  400. * mutex cannot be locked.
  401. */
  402. void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
  403. {
  404. _dev_pm_opp_cpumask_remove_table(cpumask, true);
  405. }
  406. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
  407. /**
  408. * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
  409. * @cpumask: cpumask for which OPP table needs to be added.
  410. *
  411. * This adds the OPP tables for CPUs present in the @cpumask.
  412. *
  413. * Locking: The internal opp_table and opp structures are RCU protected.
  414. * Hence this function internally uses RCU updater strategy with mutex locks
  415. * to keep the integrity of the internal data structures. Callers should ensure
  416. * that this function is *NOT* called under RCU protection or in contexts where
  417. * mutex cannot be locked.
  418. */
  419. int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
  420. {
  421. struct device *cpu_dev;
  422. int cpu, ret = 0;
  423. WARN_ON(cpumask_empty(cpumask));
  424. for_each_cpu(cpu, cpumask) {
  425. cpu_dev = get_cpu_device(cpu);
  426. if (!cpu_dev) {
  427. pr_err("%s: failed to get cpu%d device\n", __func__,
  428. cpu);
  429. continue;
  430. }
  431. ret = dev_pm_opp_of_add_table(cpu_dev);
  432. if (ret) {
  433. pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
  434. __func__, cpu, ret);
  435. /* Free all other OPPs */
  436. dev_pm_opp_of_cpumask_remove_table(cpumask);
  437. break;
  438. }
  439. }
  440. return ret;
  441. }
  442. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
  443. /*
  444. * Works only for OPP v2 bindings.
  445. *
  446. * Returns -ENOENT if operating-points-v2 bindings aren't supported.
  447. */
  448. /**
  449. * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
  450. * @cpu_dev using operating-points-v2
  451. * bindings.
  452. *
  453. * @cpu_dev: CPU device for which we do this operation
  454. * @cpumask: cpumask to update with information of sharing CPUs
  455. *
  456. * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  457. *
  458. * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
  459. *
  460. * Locking: The internal opp_table and opp structures are RCU protected.
  461. * Hence this function internally uses RCU updater strategy with mutex locks
  462. * to keep the integrity of the internal data structures. Callers should ensure
  463. * that this function is *NOT* called under RCU protection or in contexts where
  464. * mutex cannot be locked.
  465. */
  466. int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
  467. struct cpumask *cpumask)
  468. {
  469. struct device_node *np, *tmp_np;
  470. struct device *tcpu_dev;
  471. int cpu, ret = 0;
  472. /* Get OPP descriptor node */
  473. np = _of_get_opp_desc_node(cpu_dev);
  474. if (!np) {
  475. dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
  476. return -ENOENT;
  477. }
  478. cpumask_set_cpu(cpu_dev->id, cpumask);
  479. /* OPPs are shared ? */
  480. if (!of_property_read_bool(np, "opp-shared"))
  481. goto put_cpu_node;
  482. for_each_possible_cpu(cpu) {
  483. if (cpu == cpu_dev->id)
  484. continue;
  485. tcpu_dev = get_cpu_device(cpu);
  486. if (!tcpu_dev) {
  487. dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
  488. __func__, cpu);
  489. ret = -ENODEV;
  490. goto put_cpu_node;
  491. }
  492. /* Get OPP descriptor node */
  493. tmp_np = _of_get_opp_desc_node(tcpu_dev);
  494. if (!tmp_np) {
  495. dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
  496. __func__);
  497. ret = -ENOENT;
  498. goto put_cpu_node;
  499. }
  500. /* CPUs are sharing opp node */
  501. if (np == tmp_np)
  502. cpumask_set_cpu(cpu, cpumask);
  503. of_node_put(tmp_np);
  504. }
  505. put_cpu_node:
  506. of_node_put(np);
  507. return ret;
  508. }
  509. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);