core.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/clk.h>
  15. #include <linux/errno.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/of.h>
  20. #include <linux/export.h>
  21. #include <linux/regulator/consumer.h>
  22. #include "opp.h"
  23. /*
  24. * The root of the list of all opp-tables. All opp_table structures branch off
  25. * from here, with each opp_table containing the list of opps it supports in
  26. * various states of availability.
  27. */
  28. static LIST_HEAD(opp_tables);
  29. /* Lock to allow exclusive modification to the device and opp lists */
  30. DEFINE_MUTEX(opp_table_lock);
  31. #define opp_rcu_lockdep_assert() \
  32. do { \
  33. RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
  34. !lockdep_is_held(&opp_table_lock), \
  35. "Missing rcu_read_lock() or " \
  36. "opp_table_lock protection"); \
  37. } while (0)
  38. static struct opp_device *_find_opp_dev(const struct device *dev,
  39. struct opp_table *opp_table)
  40. {
  41. struct opp_device *opp_dev;
  42. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  43. if (opp_dev->dev == dev)
  44. return opp_dev;
  45. return NULL;
  46. }
  47. static struct opp_table *_managed_opp(const struct device_node *np)
  48. {
  49. struct opp_table *opp_table;
  50. list_for_each_entry_rcu(opp_table, &opp_tables, node) {
  51. if (opp_table->np == np) {
  52. /*
  53. * Multiple devices can point to the same OPP table and
  54. * so will have same node-pointer, np.
  55. *
  56. * But the OPPs will be considered as shared only if the
  57. * OPP table contains a "opp-shared" property.
  58. */
  59. return opp_table->shared_opp ? opp_table : NULL;
  60. }
  61. }
  62. return NULL;
  63. }
  64. /**
  65. * _find_opp_table() - find opp_table struct using device pointer
  66. * @dev: device pointer used to lookup OPP table
  67. *
  68. * Search OPP table for one containing matching device. Does a RCU reader
  69. * operation to grab the pointer needed.
  70. *
  71. * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  72. * -EINVAL based on type of error.
  73. *
  74. * Locking: For readers, this function must be called under rcu_read_lock().
  75. * opp_table is a RCU protected pointer, which means that opp_table is valid
  76. * as long as we are under RCU lock.
  77. *
  78. * For Writers, this function must be called with opp_table_lock held.
  79. */
  80. struct opp_table *_find_opp_table(struct device *dev)
  81. {
  82. struct opp_table *opp_table;
  83. opp_rcu_lockdep_assert();
  84. if (IS_ERR_OR_NULL(dev)) {
  85. pr_err("%s: Invalid parameters\n", __func__);
  86. return ERR_PTR(-EINVAL);
  87. }
  88. list_for_each_entry_rcu(opp_table, &opp_tables, node)
  89. if (_find_opp_dev(dev, opp_table))
  90. return opp_table;
  91. return ERR_PTR(-ENODEV);
  92. }
  93. /**
  94. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  95. * @opp: opp for which voltage has to be returned for
  96. *
  97. * Return: voltage in micro volt corresponding to the opp, else
  98. * return 0
  99. *
  100. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  101. * protected pointer. This means that opp which could have been fetched by
  102. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  103. * under RCU lock. The pointer returned by the opp_find_freq family must be
  104. * used in the same section as the usage of this function with the pointer
  105. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  106. * pointer.
  107. */
  108. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  109. {
  110. struct dev_pm_opp *tmp_opp;
  111. unsigned long v = 0;
  112. opp_rcu_lockdep_assert();
  113. tmp_opp = rcu_dereference(opp);
  114. if (IS_ERR_OR_NULL(tmp_opp))
  115. pr_err("%s: Invalid parameters\n", __func__);
  116. else
  117. v = tmp_opp->u_volt;
  118. return v;
  119. }
  120. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  121. /**
  122. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  123. * @opp: opp for which frequency has to be returned for
  124. *
  125. * Return: frequency in hertz corresponding to the opp, else
  126. * return 0
  127. *
  128. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  129. * protected pointer. This means that opp which could have been fetched by
  130. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  131. * under RCU lock. The pointer returned by the opp_find_freq family must be
  132. * used in the same section as the usage of this function with the pointer
  133. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  134. * pointer.
  135. */
  136. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  137. {
  138. struct dev_pm_opp *tmp_opp;
  139. unsigned long f = 0;
  140. opp_rcu_lockdep_assert();
  141. tmp_opp = rcu_dereference(opp);
  142. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
  143. pr_err("%s: Invalid parameters\n", __func__);
  144. else
  145. f = tmp_opp->rate;
  146. return f;
  147. }
  148. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  149. /**
  150. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  151. * @opp: opp for which turbo mode is being verified
  152. *
  153. * Turbo OPPs are not for normal use, and can be enabled (under certain
  154. * conditions) for short duration of times to finish high throughput work
  155. * quickly. Running on them for longer times may overheat the chip.
  156. *
  157. * Return: true if opp is turbo opp, else false.
  158. *
  159. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  160. * protected pointer. This means that opp which could have been fetched by
  161. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  162. * under RCU lock. The pointer returned by the opp_find_freq family must be
  163. * used in the same section as the usage of this function with the pointer
  164. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  165. * pointer.
  166. */
  167. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  168. {
  169. struct dev_pm_opp *tmp_opp;
  170. opp_rcu_lockdep_assert();
  171. tmp_opp = rcu_dereference(opp);
  172. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
  173. pr_err("%s: Invalid parameters\n", __func__);
  174. return false;
  175. }
  176. return tmp_opp->turbo;
  177. }
  178. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  179. /**
  180. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  181. * @dev: device for which we do this operation
  182. *
  183. * Return: This function returns the max clock latency in nanoseconds.
  184. *
  185. * Locking: This function takes rcu_read_lock().
  186. */
  187. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  188. {
  189. struct opp_table *opp_table;
  190. unsigned long clock_latency_ns;
  191. rcu_read_lock();
  192. opp_table = _find_opp_table(dev);
  193. if (IS_ERR(opp_table))
  194. clock_latency_ns = 0;
  195. else
  196. clock_latency_ns = opp_table->clock_latency_ns_max;
  197. rcu_read_unlock();
  198. return clock_latency_ns;
  199. }
  200. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  201. /**
  202. * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
  203. * @dev: device for which we do this operation
  204. *
  205. * Return: This function returns the max voltage latency in nanoseconds.
  206. *
  207. * Locking: This function takes rcu_read_lock().
  208. */
  209. unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
  210. {
  211. struct opp_table *opp_table;
  212. struct dev_pm_opp *opp;
  213. struct regulator *reg;
  214. unsigned long latency_ns = 0;
  215. unsigned long min_uV = ~0, max_uV = 0;
  216. int ret;
  217. rcu_read_lock();
  218. opp_table = _find_opp_table(dev);
  219. if (IS_ERR(opp_table)) {
  220. rcu_read_unlock();
  221. return 0;
  222. }
  223. reg = opp_table->regulator;
  224. if (IS_ERR(reg)) {
  225. /* Regulator may not be required for device */
  226. if (reg)
  227. dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
  228. PTR_ERR(reg));
  229. rcu_read_unlock();
  230. return 0;
  231. }
  232. list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
  233. if (!opp->available)
  234. continue;
  235. if (opp->u_volt_min < min_uV)
  236. min_uV = opp->u_volt_min;
  237. if (opp->u_volt_max > max_uV)
  238. max_uV = opp->u_volt_max;
  239. }
  240. rcu_read_unlock();
  241. /*
  242. * The caller needs to ensure that opp_table (and hence the regulator)
  243. * isn't freed, while we are executing this routine.
  244. */
  245. ret = regulator_set_voltage_time(reg, min_uV, max_uV);
  246. if (ret > 0)
  247. latency_ns = ret * 1000;
  248. return latency_ns;
  249. }
  250. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
  251. /**
  252. * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
  253. * nanoseconds
  254. * @dev: device for which we do this operation
  255. *
  256. * Return: This function returns the max transition latency, in nanoseconds, to
  257. * switch from one OPP to other.
  258. *
  259. * Locking: This function takes rcu_read_lock().
  260. */
  261. unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
  262. {
  263. return dev_pm_opp_get_max_volt_latency(dev) +
  264. dev_pm_opp_get_max_clock_latency(dev);
  265. }
  266. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
  267. /**
  268. * dev_pm_opp_get_suspend_opp() - Get suspend opp
  269. * @dev: device for which we do this operation
  270. *
  271. * Return: This function returns pointer to the suspend opp if it is
  272. * defined and available, otherwise it returns NULL.
  273. *
  274. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  275. * protected pointer. The reason for the same is that the opp pointer which is
  276. * returned will remain valid for use with opp_get_{voltage, freq} only while
  277. * under the locked area. The pointer returned must be used prior to unlocking
  278. * with rcu_read_unlock() to maintain the integrity of the pointer.
  279. */
  280. struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
  281. {
  282. struct opp_table *opp_table;
  283. opp_rcu_lockdep_assert();
  284. opp_table = _find_opp_table(dev);
  285. if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
  286. !opp_table->suspend_opp->available)
  287. return NULL;
  288. return opp_table->suspend_opp;
  289. }
  290. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
  291. /**
  292. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  293. * @dev: device for which we do this operation
  294. *
  295. * Return: This function returns the number of available opps if there are any,
  296. * else returns 0 if none or the corresponding error value.
  297. *
  298. * Locking: This function takes rcu_read_lock().
  299. */
  300. int dev_pm_opp_get_opp_count(struct device *dev)
  301. {
  302. struct opp_table *opp_table;
  303. struct dev_pm_opp *temp_opp;
  304. int count = 0;
  305. rcu_read_lock();
  306. opp_table = _find_opp_table(dev);
  307. if (IS_ERR(opp_table)) {
  308. count = PTR_ERR(opp_table);
  309. dev_err(dev, "%s: OPP table not found (%d)\n",
  310. __func__, count);
  311. goto out_unlock;
  312. }
  313. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  314. if (temp_opp->available)
  315. count++;
  316. }
  317. out_unlock:
  318. rcu_read_unlock();
  319. return count;
  320. }
  321. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  322. /**
  323. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  324. * @dev: device for which we do this operation
  325. * @freq: frequency to search for
  326. * @available: true/false - match for available opp
  327. *
  328. * Return: Searches for exact match in the opp table and returns pointer to the
  329. * matching opp if found, else returns ERR_PTR in case of error and should
  330. * be handled using IS_ERR. Error return values can be:
  331. * EINVAL: for bad pointer
  332. * ERANGE: no match found for search
  333. * ENODEV: if device not found in list of registered devices
  334. *
  335. * Note: available is a modifier for the search. if available=true, then the
  336. * match is for exact matching frequency and is available in the stored OPP
  337. * table. if false, the match is for exact frequency which is not available.
  338. *
  339. * This provides a mechanism to enable an opp which is not available currently
  340. * or the opposite as well.
  341. *
  342. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  343. * protected pointer. The reason for the same is that the opp pointer which is
  344. * returned will remain valid for use with opp_get_{voltage, freq} only while
  345. * under the locked area. The pointer returned must be used prior to unlocking
  346. * with rcu_read_unlock() to maintain the integrity of the pointer.
  347. */
  348. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  349. unsigned long freq,
  350. bool available)
  351. {
  352. struct opp_table *opp_table;
  353. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  354. opp_rcu_lockdep_assert();
  355. opp_table = _find_opp_table(dev);
  356. if (IS_ERR(opp_table)) {
  357. int r = PTR_ERR(opp_table);
  358. dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
  359. return ERR_PTR(r);
  360. }
  361. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  362. if (temp_opp->available == available &&
  363. temp_opp->rate == freq) {
  364. opp = temp_opp;
  365. break;
  366. }
  367. }
  368. return opp;
  369. }
  370. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  371. /**
  372. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  373. * @dev: device for which we do this operation
  374. * @freq: Start frequency
  375. *
  376. * Search for the matching ceil *available* OPP from a starting freq
  377. * for a device.
  378. *
  379. * Return: matching *opp and refreshes *freq accordingly, else returns
  380. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  381. * values can be:
  382. * EINVAL: for bad pointer
  383. * ERANGE: no match found for search
  384. * ENODEV: if device not found in list of registered devices
  385. *
  386. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  387. * protected pointer. The reason for the same is that the opp pointer which is
  388. * returned will remain valid for use with opp_get_{voltage, freq} only while
  389. * under the locked area. The pointer returned must be used prior to unlocking
  390. * with rcu_read_unlock() to maintain the integrity of the pointer.
  391. */
  392. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  393. unsigned long *freq)
  394. {
  395. struct opp_table *opp_table;
  396. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  397. opp_rcu_lockdep_assert();
  398. if (!dev || !freq) {
  399. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  400. return ERR_PTR(-EINVAL);
  401. }
  402. opp_table = _find_opp_table(dev);
  403. if (IS_ERR(opp_table))
  404. return ERR_CAST(opp_table);
  405. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  406. if (temp_opp->available && temp_opp->rate >= *freq) {
  407. opp = temp_opp;
  408. *freq = opp->rate;
  409. break;
  410. }
  411. }
  412. return opp;
  413. }
  414. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  415. /**
  416. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  417. * @dev: device for which we do this operation
  418. * @freq: Start frequency
  419. *
  420. * Search for the matching floor *available* OPP from a starting freq
  421. * for a device.
  422. *
  423. * Return: matching *opp and refreshes *freq accordingly, else returns
  424. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  425. * values can be:
  426. * EINVAL: for bad pointer
  427. * ERANGE: no match found for search
  428. * ENODEV: if device not found in list of registered devices
  429. *
  430. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  431. * protected pointer. The reason for the same is that the opp pointer which is
  432. * returned will remain valid for use with opp_get_{voltage, freq} only while
  433. * under the locked area. The pointer returned must be used prior to unlocking
  434. * with rcu_read_unlock() to maintain the integrity of the pointer.
  435. */
  436. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  437. unsigned long *freq)
  438. {
  439. struct opp_table *opp_table;
  440. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  441. opp_rcu_lockdep_assert();
  442. if (!dev || !freq) {
  443. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  444. return ERR_PTR(-EINVAL);
  445. }
  446. opp_table = _find_opp_table(dev);
  447. if (IS_ERR(opp_table))
  448. return ERR_CAST(opp_table);
  449. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  450. if (temp_opp->available) {
  451. /* go to the next node, before choosing prev */
  452. if (temp_opp->rate > *freq)
  453. break;
  454. else
  455. opp = temp_opp;
  456. }
  457. }
  458. if (!IS_ERR(opp))
  459. *freq = opp->rate;
  460. return opp;
  461. }
  462. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  463. /*
  464. * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
  465. * while clk returned here is used.
  466. */
  467. static struct clk *_get_opp_clk(struct device *dev)
  468. {
  469. struct opp_table *opp_table;
  470. struct clk *clk;
  471. rcu_read_lock();
  472. opp_table = _find_opp_table(dev);
  473. if (IS_ERR(opp_table)) {
  474. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  475. clk = ERR_CAST(opp_table);
  476. goto unlock;
  477. }
  478. clk = opp_table->clk;
  479. if (IS_ERR(clk))
  480. dev_err(dev, "%s: No clock available for the device\n",
  481. __func__);
  482. unlock:
  483. rcu_read_unlock();
  484. return clk;
  485. }
  486. static int _set_opp_voltage(struct device *dev, struct regulator *reg,
  487. unsigned long u_volt, unsigned long u_volt_min,
  488. unsigned long u_volt_max)
  489. {
  490. int ret;
  491. /* Regulator not available for device */
  492. if (IS_ERR(reg)) {
  493. dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
  494. PTR_ERR(reg));
  495. return 0;
  496. }
  497. dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
  498. u_volt, u_volt_max);
  499. ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
  500. u_volt_max);
  501. if (ret)
  502. dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
  503. __func__, u_volt_min, u_volt, u_volt_max, ret);
  504. return ret;
  505. }
  506. /**
  507. * dev_pm_opp_set_rate() - Configure new OPP based on frequency
  508. * @dev: device for which we do this operation
  509. * @target_freq: frequency to achieve
  510. *
  511. * This configures the power-supplies and clock source to the levels specified
  512. * by the OPP corresponding to the target_freq.
  513. *
  514. * Locking: This function takes rcu_read_lock().
  515. */
  516. int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
  517. {
  518. struct opp_table *opp_table;
  519. struct dev_pm_opp *old_opp, *opp;
  520. struct regulator *reg;
  521. struct clk *clk;
  522. unsigned long freq, old_freq;
  523. unsigned long u_volt, u_volt_min, u_volt_max;
  524. unsigned long ou_volt, ou_volt_min, ou_volt_max;
  525. int ret;
  526. if (unlikely(!target_freq)) {
  527. dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
  528. target_freq);
  529. return -EINVAL;
  530. }
  531. clk = _get_opp_clk(dev);
  532. if (IS_ERR(clk))
  533. return PTR_ERR(clk);
  534. freq = clk_round_rate(clk, target_freq);
  535. if ((long)freq <= 0)
  536. freq = target_freq;
  537. old_freq = clk_get_rate(clk);
  538. /* Return early if nothing to do */
  539. if (old_freq == freq) {
  540. dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
  541. __func__, freq);
  542. return 0;
  543. }
  544. rcu_read_lock();
  545. opp_table = _find_opp_table(dev);
  546. if (IS_ERR(opp_table)) {
  547. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  548. rcu_read_unlock();
  549. return PTR_ERR(opp_table);
  550. }
  551. old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
  552. if (!IS_ERR(old_opp)) {
  553. ou_volt = old_opp->u_volt;
  554. ou_volt_min = old_opp->u_volt_min;
  555. ou_volt_max = old_opp->u_volt_max;
  556. } else {
  557. dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
  558. __func__, old_freq, PTR_ERR(old_opp));
  559. }
  560. opp = dev_pm_opp_find_freq_ceil(dev, &freq);
  561. if (IS_ERR(opp)) {
  562. ret = PTR_ERR(opp);
  563. dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
  564. __func__, freq, ret);
  565. rcu_read_unlock();
  566. return ret;
  567. }
  568. u_volt = opp->u_volt;
  569. u_volt_min = opp->u_volt_min;
  570. u_volt_max = opp->u_volt_max;
  571. reg = opp_table->regulator;
  572. rcu_read_unlock();
  573. /* Scaling up? Scale voltage before frequency */
  574. if (freq > old_freq) {
  575. ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
  576. u_volt_max);
  577. if (ret)
  578. goto restore_voltage;
  579. }
  580. /* Change frequency */
  581. dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
  582. __func__, old_freq, freq);
  583. ret = clk_set_rate(clk, freq);
  584. if (ret) {
  585. dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
  586. ret);
  587. goto restore_voltage;
  588. }
  589. /* Scaling down? Scale voltage after frequency */
  590. if (freq < old_freq) {
  591. ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
  592. u_volt_max);
  593. if (ret)
  594. goto restore_freq;
  595. }
  596. return 0;
  597. restore_freq:
  598. if (clk_set_rate(clk, old_freq))
  599. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  600. __func__, old_freq);
  601. restore_voltage:
  602. /* This shouldn't harm even if the voltages weren't updated earlier */
  603. if (!IS_ERR(old_opp))
  604. _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
  605. return ret;
  606. }
  607. EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
  608. /* OPP-dev Helpers */
  609. static void _kfree_opp_dev_rcu(struct rcu_head *head)
  610. {
  611. struct opp_device *opp_dev;
  612. opp_dev = container_of(head, struct opp_device, rcu_head);
  613. kfree_rcu(opp_dev, rcu_head);
  614. }
  615. static void _remove_opp_dev(struct opp_device *opp_dev,
  616. struct opp_table *opp_table)
  617. {
  618. opp_debug_unregister(opp_dev, opp_table);
  619. list_del(&opp_dev->node);
  620. call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
  621. _kfree_opp_dev_rcu);
  622. }
  623. struct opp_device *_add_opp_dev(const struct device *dev,
  624. struct opp_table *opp_table)
  625. {
  626. struct opp_device *opp_dev;
  627. int ret;
  628. opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
  629. if (!opp_dev)
  630. return NULL;
  631. /* Initialize opp-dev */
  632. opp_dev->dev = dev;
  633. list_add_rcu(&opp_dev->node, &opp_table->dev_list);
  634. /* Create debugfs entries for the opp_table */
  635. ret = opp_debug_register(opp_dev, opp_table);
  636. if (ret)
  637. dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
  638. __func__, ret);
  639. return opp_dev;
  640. }
  641. /**
  642. * _add_opp_table() - Find OPP table or allocate a new one
  643. * @dev: device for which we do this operation
  644. *
  645. * It tries to find an existing table first, if it couldn't find one, it
  646. * allocates a new OPP table and returns that.
  647. *
  648. * Return: valid opp_table pointer if success, else NULL.
  649. */
  650. static struct opp_table *_add_opp_table(struct device *dev)
  651. {
  652. struct opp_table *opp_table;
  653. struct opp_device *opp_dev;
  654. struct device_node *np;
  655. int ret;
  656. /* Check for existing table for 'dev' first */
  657. opp_table = _find_opp_table(dev);
  658. if (!IS_ERR(opp_table))
  659. return opp_table;
  660. /*
  661. * Allocate a new OPP table. In the infrequent case where a new
  662. * device is needed to be added, we pay this penalty.
  663. */
  664. opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
  665. if (!opp_table)
  666. return NULL;
  667. INIT_LIST_HEAD(&opp_table->dev_list);
  668. opp_dev = _add_opp_dev(dev, opp_table);
  669. if (!opp_dev) {
  670. kfree(opp_table);
  671. return NULL;
  672. }
  673. /*
  674. * Only required for backward compatibility with v1 bindings, but isn't
  675. * harmful for other cases. And so we do it unconditionally.
  676. */
  677. np = of_node_get(dev->of_node);
  678. if (np) {
  679. u32 val;
  680. if (!of_property_read_u32(np, "clock-latency", &val))
  681. opp_table->clock_latency_ns_max = val;
  682. of_property_read_u32(np, "voltage-tolerance",
  683. &opp_table->voltage_tolerance_v1);
  684. of_node_put(np);
  685. }
  686. /* Set regulator to a non-NULL error value */
  687. opp_table->regulator = ERR_PTR(-ENXIO);
  688. /* Find clk for the device */
  689. opp_table->clk = clk_get(dev, NULL);
  690. if (IS_ERR(opp_table->clk)) {
  691. ret = PTR_ERR(opp_table->clk);
  692. if (ret != -EPROBE_DEFER)
  693. dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
  694. ret);
  695. }
  696. srcu_init_notifier_head(&opp_table->srcu_head);
  697. INIT_LIST_HEAD(&opp_table->opp_list);
  698. /* Secure the device table modification */
  699. list_add_rcu(&opp_table->node, &opp_tables);
  700. return opp_table;
  701. }
  702. /**
  703. * _kfree_device_rcu() - Free opp_table RCU handler
  704. * @head: RCU head
  705. */
  706. static void _kfree_device_rcu(struct rcu_head *head)
  707. {
  708. struct opp_table *opp_table = container_of(head, struct opp_table,
  709. rcu_head);
  710. kfree_rcu(opp_table, rcu_head);
  711. }
  712. /**
  713. * _remove_opp_table() - Removes a OPP table
  714. * @opp_table: OPP table to be removed.
  715. *
  716. * Removes/frees OPP table if it doesn't contain any OPPs.
  717. */
  718. static void _remove_opp_table(struct opp_table *opp_table)
  719. {
  720. struct opp_device *opp_dev;
  721. if (!list_empty(&opp_table->opp_list))
  722. return;
  723. if (opp_table->supported_hw)
  724. return;
  725. if (opp_table->prop_name)
  726. return;
  727. if (!IS_ERR(opp_table->regulator))
  728. return;
  729. /* Release clk */
  730. if (!IS_ERR(opp_table->clk))
  731. clk_put(opp_table->clk);
  732. opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
  733. node);
  734. _remove_opp_dev(opp_dev, opp_table);
  735. /* dev_list must be empty now */
  736. WARN_ON(!list_empty(&opp_table->dev_list));
  737. list_del_rcu(&opp_table->node);
  738. call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
  739. _kfree_device_rcu);
  740. }
  741. /**
  742. * _kfree_opp_rcu() - Free OPP RCU handler
  743. * @head: RCU head
  744. */
  745. static void _kfree_opp_rcu(struct rcu_head *head)
  746. {
  747. struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
  748. kfree_rcu(opp, rcu_head);
  749. }
  750. /**
  751. * _opp_remove() - Remove an OPP from a table definition
  752. * @opp_table: points back to the opp_table struct this opp belongs to
  753. * @opp: pointer to the OPP to remove
  754. * @notify: OPP_EVENT_REMOVE notification should be sent or not
  755. *
  756. * This function removes an opp definition from the opp table.
  757. *
  758. * Locking: The internal opp_table and opp structures are RCU protected.
  759. * It is assumed that the caller holds required mutex for an RCU updater
  760. * strategy.
  761. */
  762. static void _opp_remove(struct opp_table *opp_table,
  763. struct dev_pm_opp *opp, bool notify)
  764. {
  765. /*
  766. * Notify the changes in the availability of the operable
  767. * frequency/voltage list.
  768. */
  769. if (notify)
  770. srcu_notifier_call_chain(&opp_table->srcu_head,
  771. OPP_EVENT_REMOVE, opp);
  772. opp_debug_remove_one(opp);
  773. list_del_rcu(&opp->node);
  774. call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  775. _remove_opp_table(opp_table);
  776. }
  777. /**
  778. * dev_pm_opp_remove() - Remove an OPP from OPP table
  779. * @dev: device for which we do this operation
  780. * @freq: OPP to remove with matching 'freq'
  781. *
  782. * This function removes an opp from the opp table.
  783. *
  784. * Locking: The internal opp_table and opp structures are RCU protected.
  785. * Hence this function internally uses RCU updater strategy with mutex locks
  786. * to keep the integrity of the internal data structures. Callers should ensure
  787. * that this function is *NOT* called under RCU protection or in contexts where
  788. * mutex cannot be locked.
  789. */
  790. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  791. {
  792. struct dev_pm_opp *opp;
  793. struct opp_table *opp_table;
  794. bool found = false;
  795. /* Hold our table modification lock here */
  796. mutex_lock(&opp_table_lock);
  797. opp_table = _find_opp_table(dev);
  798. if (IS_ERR(opp_table))
  799. goto unlock;
  800. list_for_each_entry(opp, &opp_table->opp_list, node) {
  801. if (opp->rate == freq) {
  802. found = true;
  803. break;
  804. }
  805. }
  806. if (!found) {
  807. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  808. __func__, freq);
  809. goto unlock;
  810. }
  811. _opp_remove(opp_table, opp, true);
  812. unlock:
  813. mutex_unlock(&opp_table_lock);
  814. }
  815. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  816. static struct dev_pm_opp *_allocate_opp(struct device *dev,
  817. struct opp_table **opp_table)
  818. {
  819. struct dev_pm_opp *opp;
  820. /* allocate new OPP node */
  821. opp = kzalloc(sizeof(*opp), GFP_KERNEL);
  822. if (!opp)
  823. return NULL;
  824. INIT_LIST_HEAD(&opp->node);
  825. *opp_table = _add_opp_table(dev);
  826. if (!*opp_table) {
  827. kfree(opp);
  828. return NULL;
  829. }
  830. return opp;
  831. }
  832. static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
  833. struct opp_table *opp_table)
  834. {
  835. struct regulator *reg = opp_table->regulator;
  836. if (!IS_ERR(reg) &&
  837. !regulator_is_supported_voltage(reg, opp->u_volt_min,
  838. opp->u_volt_max)) {
  839. pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
  840. __func__, opp->u_volt_min, opp->u_volt_max);
  841. return false;
  842. }
  843. return true;
  844. }
  845. static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  846. struct opp_table *opp_table)
  847. {
  848. struct dev_pm_opp *opp;
  849. struct list_head *head = &opp_table->opp_list;
  850. int ret;
  851. /*
  852. * Insert new OPP in order of increasing frequency and discard if
  853. * already present.
  854. *
  855. * Need to use &opp_table->opp_list in the condition part of the 'for'
  856. * loop, don't replace it with head otherwise it will become an infinite
  857. * loop.
  858. */
  859. list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
  860. if (new_opp->rate > opp->rate) {
  861. head = &opp->node;
  862. continue;
  863. }
  864. if (new_opp->rate < opp->rate)
  865. break;
  866. /* Duplicate OPPs */
  867. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  868. __func__, opp->rate, opp->u_volt, opp->available,
  869. new_opp->rate, new_opp->u_volt, new_opp->available);
  870. return opp->available && new_opp->u_volt == opp->u_volt ?
  871. 0 : -EEXIST;
  872. }
  873. new_opp->opp_table = opp_table;
  874. list_add_rcu(&new_opp->node, head);
  875. ret = opp_debug_create_one(new_opp, opp_table);
  876. if (ret)
  877. dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
  878. __func__, ret);
  879. if (!_opp_supported_by_regulators(new_opp, opp_table)) {
  880. new_opp->available = false;
  881. dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
  882. __func__, new_opp->rate);
  883. }
  884. return 0;
  885. }
  886. /**
  887. * _opp_add_v1() - Allocate a OPP based on v1 bindings.
  888. * @dev: device for which we do this operation
  889. * @freq: Frequency in Hz for this OPP
  890. * @u_volt: Voltage in uVolts for this OPP
  891. * @dynamic: Dynamically added OPPs.
  892. *
  893. * This function adds an opp definition to the opp table and returns status.
  894. * The opp is made available by default and it can be controlled using
  895. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  896. *
  897. * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  898. * and freed by dev_pm_opp_of_remove_table.
  899. *
  900. * Locking: The internal opp_table and opp structures are RCU protected.
  901. * Hence this function internally uses RCU updater strategy with mutex locks
  902. * to keep the integrity of the internal data structures. Callers should ensure
  903. * that this function is *NOT* called under RCU protection or in contexts where
  904. * mutex cannot be locked.
  905. *
  906. * Return:
  907. * 0 On success OR
  908. * Duplicate OPPs (both freq and volt are same) and opp->available
  909. * -EEXIST Freq are same and volt are different OR
  910. * Duplicate OPPs (both freq and volt are same) and !opp->available
  911. * -ENOMEM Memory allocation failure
  912. */
  913. static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
  914. bool dynamic)
  915. {
  916. struct opp_table *opp_table;
  917. struct dev_pm_opp *new_opp;
  918. unsigned long tol;
  919. int ret;
  920. /* Hold our table modification lock here */
  921. mutex_lock(&opp_table_lock);
  922. new_opp = _allocate_opp(dev, &opp_table);
  923. if (!new_opp) {
  924. ret = -ENOMEM;
  925. goto unlock;
  926. }
  927. /* populate the opp table */
  928. new_opp->rate = freq;
  929. tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
  930. new_opp->u_volt = u_volt;
  931. new_opp->u_volt_min = u_volt - tol;
  932. new_opp->u_volt_max = u_volt + tol;
  933. new_opp->available = true;
  934. new_opp->dynamic = dynamic;
  935. ret = _opp_add(dev, new_opp, opp_table);
  936. if (ret)
  937. goto free_opp;
  938. mutex_unlock(&opp_table_lock);
  939. /*
  940. * Notify the changes in the availability of the operable
  941. * frequency/voltage list.
  942. */
  943. srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
  944. return 0;
  945. free_opp:
  946. _opp_remove(opp_table, new_opp, false);
  947. unlock:
  948. mutex_unlock(&opp_table_lock);
  949. return ret;
  950. }
  951. /* TODO: Support multiple regulators */
  952. static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
  953. struct opp_table *opp_table)
  954. {
  955. u32 microvolt[3] = {0};
  956. u32 val;
  957. int count, ret;
  958. struct property *prop = NULL;
  959. char name[NAME_MAX];
  960. /* Search for "opp-microvolt-<name>" */
  961. if (opp_table->prop_name) {
  962. snprintf(name, sizeof(name), "opp-microvolt-%s",
  963. opp_table->prop_name);
  964. prop = of_find_property(opp->np, name, NULL);
  965. }
  966. if (!prop) {
  967. /* Search for "opp-microvolt" */
  968. sprintf(name, "opp-microvolt");
  969. prop = of_find_property(opp->np, name, NULL);
  970. /* Missing property isn't a problem, but an invalid entry is */
  971. if (!prop)
  972. return 0;
  973. }
  974. count = of_property_count_u32_elems(opp->np, name);
  975. if (count < 0) {
  976. dev_err(dev, "%s: Invalid %s property (%d)\n",
  977. __func__, name, count);
  978. return count;
  979. }
  980. /* There can be one or three elements here */
  981. if (count != 1 && count != 3) {
  982. dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
  983. __func__, name, count);
  984. return -EINVAL;
  985. }
  986. ret = of_property_read_u32_array(opp->np, name, microvolt, count);
  987. if (ret) {
  988. dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
  989. return -EINVAL;
  990. }
  991. opp->u_volt = microvolt[0];
  992. if (count == 1) {
  993. opp->u_volt_min = opp->u_volt;
  994. opp->u_volt_max = opp->u_volt;
  995. } else {
  996. opp->u_volt_min = microvolt[1];
  997. opp->u_volt_max = microvolt[2];
  998. }
  999. /* Search for "opp-microamp-<name>" */
  1000. prop = NULL;
  1001. if (opp_table->prop_name) {
  1002. snprintf(name, sizeof(name), "opp-microamp-%s",
  1003. opp_table->prop_name);
  1004. prop = of_find_property(opp->np, name, NULL);
  1005. }
  1006. if (!prop) {
  1007. /* Search for "opp-microamp" */
  1008. sprintf(name, "opp-microamp");
  1009. prop = of_find_property(opp->np, name, NULL);
  1010. }
  1011. if (prop && !of_property_read_u32(opp->np, name, &val))
  1012. opp->u_amp = val;
  1013. return 0;
  1014. }
  1015. /**
  1016. * dev_pm_opp_set_supported_hw() - Set supported platforms
  1017. * @dev: Device for which supported-hw has to be set.
  1018. * @versions: Array of hierarchy of versions to match.
  1019. * @count: Number of elements in the array.
  1020. *
  1021. * This is required only for the V2 bindings, and it enables a platform to
  1022. * specify the hierarchy of versions it supports. OPP layer will then enable
  1023. * OPPs, which are available for those versions, based on its 'opp-supported-hw'
  1024. * property.
  1025. *
  1026. * Locking: The internal opp_table and opp structures are RCU protected.
  1027. * Hence this function internally uses RCU updater strategy with mutex locks
  1028. * to keep the integrity of the internal data structures. Callers should ensure
  1029. * that this function is *NOT* called under RCU protection or in contexts where
  1030. * mutex cannot be locked.
  1031. */
  1032. int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
  1033. unsigned int count)
  1034. {
  1035. struct opp_table *opp_table;
  1036. int ret = 0;
  1037. /* Hold our table modification lock here */
  1038. mutex_lock(&opp_table_lock);
  1039. opp_table = _add_opp_table(dev);
  1040. if (!opp_table) {
  1041. ret = -ENOMEM;
  1042. goto unlock;
  1043. }
  1044. /* Make sure there are no concurrent readers while updating opp_table */
  1045. WARN_ON(!list_empty(&opp_table->opp_list));
  1046. /* Do we already have a version hierarchy associated with opp_table? */
  1047. if (opp_table->supported_hw) {
  1048. dev_err(dev, "%s: Already have supported hardware list\n",
  1049. __func__);
  1050. ret = -EBUSY;
  1051. goto err;
  1052. }
  1053. opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
  1054. GFP_KERNEL);
  1055. if (!opp_table->supported_hw) {
  1056. ret = -ENOMEM;
  1057. goto err;
  1058. }
  1059. opp_table->supported_hw_count = count;
  1060. mutex_unlock(&opp_table_lock);
  1061. return 0;
  1062. err:
  1063. _remove_opp_table(opp_table);
  1064. unlock:
  1065. mutex_unlock(&opp_table_lock);
  1066. return ret;
  1067. }
  1068. EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
  1069. /**
  1070. * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
  1071. * @dev: Device for which supported-hw has to be put.
  1072. *
  1073. * This is required only for the V2 bindings, and is called for a matching
  1074. * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
  1075. * will not be freed.
  1076. *
  1077. * Locking: The internal opp_table and opp structures are RCU protected.
  1078. * Hence this function internally uses RCU updater strategy with mutex locks
  1079. * to keep the integrity of the internal data structures. Callers should ensure
  1080. * that this function is *NOT* called under RCU protection or in contexts where
  1081. * mutex cannot be locked.
  1082. */
  1083. void dev_pm_opp_put_supported_hw(struct device *dev)
  1084. {
  1085. struct opp_table *opp_table;
  1086. /* Hold our table modification lock here */
  1087. mutex_lock(&opp_table_lock);
  1088. /* Check for existing table for 'dev' first */
  1089. opp_table = _find_opp_table(dev);
  1090. if (IS_ERR(opp_table)) {
  1091. dev_err(dev, "Failed to find opp_table: %ld\n",
  1092. PTR_ERR(opp_table));
  1093. goto unlock;
  1094. }
  1095. /* Make sure there are no concurrent readers while updating opp_table */
  1096. WARN_ON(!list_empty(&opp_table->opp_list));
  1097. if (!opp_table->supported_hw) {
  1098. dev_err(dev, "%s: Doesn't have supported hardware list\n",
  1099. __func__);
  1100. goto unlock;
  1101. }
  1102. kfree(opp_table->supported_hw);
  1103. opp_table->supported_hw = NULL;
  1104. opp_table->supported_hw_count = 0;
  1105. /* Try freeing opp_table if this was the last blocking resource */
  1106. _remove_opp_table(opp_table);
  1107. unlock:
  1108. mutex_unlock(&opp_table_lock);
  1109. }
  1110. EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
  1111. /**
  1112. * dev_pm_opp_set_prop_name() - Set prop-extn name
  1113. * @dev: Device for which the prop-name has to be set.
  1114. * @name: name to postfix to properties.
  1115. *
  1116. * This is required only for the V2 bindings, and it enables a platform to
  1117. * specify the extn to be used for certain property names. The properties to
  1118. * which the extension will apply are opp-microvolt and opp-microamp. OPP core
  1119. * should postfix the property name with -<name> while looking for them.
  1120. *
  1121. * Locking: The internal opp_table and opp structures are RCU protected.
  1122. * Hence this function internally uses RCU updater strategy with mutex locks
  1123. * to keep the integrity of the internal data structures. Callers should ensure
  1124. * that this function is *NOT* called under RCU protection or in contexts where
  1125. * mutex cannot be locked.
  1126. */
  1127. int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
  1128. {
  1129. struct opp_table *opp_table;
  1130. int ret = 0;
  1131. /* Hold our table modification lock here */
  1132. mutex_lock(&opp_table_lock);
  1133. opp_table = _add_opp_table(dev);
  1134. if (!opp_table) {
  1135. ret = -ENOMEM;
  1136. goto unlock;
  1137. }
  1138. /* Make sure there are no concurrent readers while updating opp_table */
  1139. WARN_ON(!list_empty(&opp_table->opp_list));
  1140. /* Do we already have a prop-name associated with opp_table? */
  1141. if (opp_table->prop_name) {
  1142. dev_err(dev, "%s: Already have prop-name %s\n", __func__,
  1143. opp_table->prop_name);
  1144. ret = -EBUSY;
  1145. goto err;
  1146. }
  1147. opp_table->prop_name = kstrdup(name, GFP_KERNEL);
  1148. if (!opp_table->prop_name) {
  1149. ret = -ENOMEM;
  1150. goto err;
  1151. }
  1152. mutex_unlock(&opp_table_lock);
  1153. return 0;
  1154. err:
  1155. _remove_opp_table(opp_table);
  1156. unlock:
  1157. mutex_unlock(&opp_table_lock);
  1158. return ret;
  1159. }
  1160. EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
  1161. /**
  1162. * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
  1163. * @dev: Device for which the prop-name has to be put.
  1164. *
  1165. * This is required only for the V2 bindings, and is called for a matching
  1166. * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
  1167. * will not be freed.
  1168. *
  1169. * Locking: The internal opp_table and opp structures are RCU protected.
  1170. * Hence this function internally uses RCU updater strategy with mutex locks
  1171. * to keep the integrity of the internal data structures. Callers should ensure
  1172. * that this function is *NOT* called under RCU protection or in contexts where
  1173. * mutex cannot be locked.
  1174. */
  1175. void dev_pm_opp_put_prop_name(struct device *dev)
  1176. {
  1177. struct opp_table *opp_table;
  1178. /* Hold our table modification lock here */
  1179. mutex_lock(&opp_table_lock);
  1180. /* Check for existing table for 'dev' first */
  1181. opp_table = _find_opp_table(dev);
  1182. if (IS_ERR(opp_table)) {
  1183. dev_err(dev, "Failed to find opp_table: %ld\n",
  1184. PTR_ERR(opp_table));
  1185. goto unlock;
  1186. }
  1187. /* Make sure there are no concurrent readers while updating opp_table */
  1188. WARN_ON(!list_empty(&opp_table->opp_list));
  1189. if (!opp_table->prop_name) {
  1190. dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
  1191. goto unlock;
  1192. }
  1193. kfree(opp_table->prop_name);
  1194. opp_table->prop_name = NULL;
  1195. /* Try freeing opp_table if this was the last blocking resource */
  1196. _remove_opp_table(opp_table);
  1197. unlock:
  1198. mutex_unlock(&opp_table_lock);
  1199. }
  1200. EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
  1201. /**
  1202. * dev_pm_opp_set_regulator() - Set regulator name for the device
  1203. * @dev: Device for which regulator name is being set.
  1204. * @name: Name of the regulator.
  1205. *
  1206. * In order to support OPP switching, OPP layer needs to know the name of the
  1207. * device's regulator, as the core would be required to switch voltages as well.
  1208. *
  1209. * This must be called before any OPPs are initialized for the device.
  1210. *
  1211. * Locking: The internal opp_table and opp structures are RCU protected.
  1212. * Hence this function internally uses RCU updater strategy with mutex locks
  1213. * to keep the integrity of the internal data structures. Callers should ensure
  1214. * that this function is *NOT* called under RCU protection or in contexts where
  1215. * mutex cannot be locked.
  1216. */
  1217. int dev_pm_opp_set_regulator(struct device *dev, const char *name)
  1218. {
  1219. struct opp_table *opp_table;
  1220. struct regulator *reg;
  1221. int ret;
  1222. mutex_lock(&opp_table_lock);
  1223. opp_table = _add_opp_table(dev);
  1224. if (!opp_table) {
  1225. ret = -ENOMEM;
  1226. goto unlock;
  1227. }
  1228. /* This should be called before OPPs are initialized */
  1229. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1230. ret = -EBUSY;
  1231. goto err;
  1232. }
  1233. /* Already have a regulator set */
  1234. if (WARN_ON(!IS_ERR(opp_table->regulator))) {
  1235. ret = -EBUSY;
  1236. goto err;
  1237. }
  1238. /* Allocate the regulator */
  1239. reg = regulator_get_optional(dev, name);
  1240. if (IS_ERR(reg)) {
  1241. ret = PTR_ERR(reg);
  1242. if (ret != -EPROBE_DEFER)
  1243. dev_err(dev, "%s: no regulator (%s) found: %d\n",
  1244. __func__, name, ret);
  1245. goto err;
  1246. }
  1247. opp_table->regulator = reg;
  1248. mutex_unlock(&opp_table_lock);
  1249. return 0;
  1250. err:
  1251. _remove_opp_table(opp_table);
  1252. unlock:
  1253. mutex_unlock(&opp_table_lock);
  1254. return ret;
  1255. }
  1256. EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
  1257. /**
  1258. * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
  1259. * @dev: Device for which regulator was set.
  1260. *
  1261. * Locking: The internal opp_table and opp structures are RCU protected.
  1262. * Hence this function internally uses RCU updater strategy with mutex locks
  1263. * to keep the integrity of the internal data structures. Callers should ensure
  1264. * that this function is *NOT* called under RCU protection or in contexts where
  1265. * mutex cannot be locked.
  1266. */
  1267. void dev_pm_opp_put_regulator(struct device *dev)
  1268. {
  1269. struct opp_table *opp_table;
  1270. mutex_lock(&opp_table_lock);
  1271. /* Check for existing table for 'dev' first */
  1272. opp_table = _find_opp_table(dev);
  1273. if (IS_ERR(opp_table)) {
  1274. dev_err(dev, "Failed to find opp_table: %ld\n",
  1275. PTR_ERR(opp_table));
  1276. goto unlock;
  1277. }
  1278. if (IS_ERR(opp_table->regulator)) {
  1279. dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
  1280. goto unlock;
  1281. }
  1282. /* Make sure there are no concurrent readers while updating opp_table */
  1283. WARN_ON(!list_empty(&opp_table->opp_list));
  1284. regulator_put(opp_table->regulator);
  1285. opp_table->regulator = ERR_PTR(-ENXIO);
  1286. /* Try freeing opp_table if this was the last blocking resource */
  1287. _remove_opp_table(opp_table);
  1288. unlock:
  1289. mutex_unlock(&opp_table_lock);
  1290. }
  1291. EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
  1292. static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
  1293. struct device_node *np)
  1294. {
  1295. unsigned int count = opp_table->supported_hw_count;
  1296. u32 version;
  1297. int ret;
  1298. if (!opp_table->supported_hw)
  1299. return true;
  1300. while (count--) {
  1301. ret = of_property_read_u32_index(np, "opp-supported-hw", count,
  1302. &version);
  1303. if (ret) {
  1304. dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
  1305. __func__, count, ret);
  1306. return false;
  1307. }
  1308. /* Both of these are bitwise masks of the versions */
  1309. if (!(version & opp_table->supported_hw[count]))
  1310. return false;
  1311. }
  1312. return true;
  1313. }
  1314. /**
  1315. * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  1316. * @dev: device for which we do this operation
  1317. * @np: device node
  1318. *
  1319. * This function adds an opp definition to the opp table and returns status. The
  1320. * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  1321. * removed by dev_pm_opp_remove.
  1322. *
  1323. * Locking: The internal opp_table and opp structures are RCU protected.
  1324. * Hence this function internally uses RCU updater strategy with mutex locks
  1325. * to keep the integrity of the internal data structures. Callers should ensure
  1326. * that this function is *NOT* called under RCU protection or in contexts where
  1327. * mutex cannot be locked.
  1328. *
  1329. * Return:
  1330. * 0 On success OR
  1331. * Duplicate OPPs (both freq and volt are same) and opp->available
  1332. * -EEXIST Freq are same and volt are different OR
  1333. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1334. * -ENOMEM Memory allocation failure
  1335. * -EINVAL Failed parsing the OPP node
  1336. */
  1337. static int _opp_add_static_v2(struct device *dev, struct device_node *np)
  1338. {
  1339. struct opp_table *opp_table;
  1340. struct dev_pm_opp *new_opp;
  1341. u64 rate;
  1342. u32 val;
  1343. int ret;
  1344. /* Hold our table modification lock here */
  1345. mutex_lock(&opp_table_lock);
  1346. new_opp = _allocate_opp(dev, &opp_table);
  1347. if (!new_opp) {
  1348. ret = -ENOMEM;
  1349. goto unlock;
  1350. }
  1351. ret = of_property_read_u64(np, "opp-hz", &rate);
  1352. if (ret < 0) {
  1353. dev_err(dev, "%s: opp-hz not found\n", __func__);
  1354. goto free_opp;
  1355. }
  1356. /* Check if the OPP supports hardware's hierarchy of versions or not */
  1357. if (!_opp_is_supported(dev, opp_table, np)) {
  1358. dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
  1359. goto free_opp;
  1360. }
  1361. /*
  1362. * Rate is defined as an unsigned long in clk API, and so casting
  1363. * explicitly to its type. Must be fixed once rate is 64 bit
  1364. * guaranteed in clk API.
  1365. */
  1366. new_opp->rate = (unsigned long)rate;
  1367. new_opp->turbo = of_property_read_bool(np, "turbo-mode");
  1368. new_opp->np = np;
  1369. new_opp->dynamic = false;
  1370. new_opp->available = true;
  1371. if (!of_property_read_u32(np, "clock-latency-ns", &val))
  1372. new_opp->clock_latency_ns = val;
  1373. ret = opp_parse_supplies(new_opp, dev, opp_table);
  1374. if (ret)
  1375. goto free_opp;
  1376. ret = _opp_add(dev, new_opp, opp_table);
  1377. if (ret)
  1378. goto free_opp;
  1379. /* OPP to select on device suspend */
  1380. if (of_property_read_bool(np, "opp-suspend")) {
  1381. if (opp_table->suspend_opp) {
  1382. dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
  1383. __func__, opp_table->suspend_opp->rate,
  1384. new_opp->rate);
  1385. } else {
  1386. new_opp->suspend = true;
  1387. opp_table->suspend_opp = new_opp;
  1388. }
  1389. }
  1390. if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
  1391. opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
  1392. mutex_unlock(&opp_table_lock);
  1393. pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
  1394. __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
  1395. new_opp->u_volt_min, new_opp->u_volt_max,
  1396. new_opp->clock_latency_ns);
  1397. /*
  1398. * Notify the changes in the availability of the operable
  1399. * frequency/voltage list.
  1400. */
  1401. srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
  1402. return 0;
  1403. free_opp:
  1404. _opp_remove(opp_table, new_opp, false);
  1405. unlock:
  1406. mutex_unlock(&opp_table_lock);
  1407. return ret;
  1408. }
  1409. /**
  1410. * dev_pm_opp_add() - Add an OPP table from a table definitions
  1411. * @dev: device for which we do this operation
  1412. * @freq: Frequency in Hz for this OPP
  1413. * @u_volt: Voltage in uVolts for this OPP
  1414. *
  1415. * This function adds an opp definition to the opp table and returns status.
  1416. * The opp is made available by default and it can be controlled using
  1417. * dev_pm_opp_enable/disable functions.
  1418. *
  1419. * Locking: The internal opp_table and opp structures are RCU protected.
  1420. * Hence this function internally uses RCU updater strategy with mutex locks
  1421. * to keep the integrity of the internal data structures. Callers should ensure
  1422. * that this function is *NOT* called under RCU protection or in contexts where
  1423. * mutex cannot be locked.
  1424. *
  1425. * Return:
  1426. * 0 On success OR
  1427. * Duplicate OPPs (both freq and volt are same) and opp->available
  1428. * -EEXIST Freq are same and volt are different OR
  1429. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1430. * -ENOMEM Memory allocation failure
  1431. */
  1432. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  1433. {
  1434. return _opp_add_v1(dev, freq, u_volt, true);
  1435. }
  1436. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  1437. /**
  1438. * _opp_set_availability() - helper to set the availability of an opp
  1439. * @dev: device for which we do this operation
  1440. * @freq: OPP frequency to modify availability
  1441. * @availability_req: availability status requested for this opp
  1442. *
  1443. * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
  1444. * share a common logic which is isolated here.
  1445. *
  1446. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1447. * copy operation, returns 0 if no modification was done OR modification was
  1448. * successful.
  1449. *
  1450. * Locking: The internal opp_table and opp structures are RCU protected.
  1451. * Hence this function internally uses RCU updater strategy with mutex locks to
  1452. * keep the integrity of the internal data structures. Callers should ensure
  1453. * that this function is *NOT* called under RCU protection or in contexts where
  1454. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1455. */
  1456. static int _opp_set_availability(struct device *dev, unsigned long freq,
  1457. bool availability_req)
  1458. {
  1459. struct opp_table *opp_table;
  1460. struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
  1461. int r = 0;
  1462. /* keep the node allocated */
  1463. new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
  1464. if (!new_opp)
  1465. return -ENOMEM;
  1466. mutex_lock(&opp_table_lock);
  1467. /* Find the opp_table */
  1468. opp_table = _find_opp_table(dev);
  1469. if (IS_ERR(opp_table)) {
  1470. r = PTR_ERR(opp_table);
  1471. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  1472. goto unlock;
  1473. }
  1474. /* Do we have the frequency? */
  1475. list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
  1476. if (tmp_opp->rate == freq) {
  1477. opp = tmp_opp;
  1478. break;
  1479. }
  1480. }
  1481. if (IS_ERR(opp)) {
  1482. r = PTR_ERR(opp);
  1483. goto unlock;
  1484. }
  1485. /* Is update really needed? */
  1486. if (opp->available == availability_req)
  1487. goto unlock;
  1488. /* copy the old data over */
  1489. *new_opp = *opp;
  1490. /* plug in new node */
  1491. new_opp->available = availability_req;
  1492. list_replace_rcu(&opp->node, &new_opp->node);
  1493. mutex_unlock(&opp_table_lock);
  1494. call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  1495. /* Notify the change of the OPP availability */
  1496. if (availability_req)
  1497. srcu_notifier_call_chain(&opp_table->srcu_head,
  1498. OPP_EVENT_ENABLE, new_opp);
  1499. else
  1500. srcu_notifier_call_chain(&opp_table->srcu_head,
  1501. OPP_EVENT_DISABLE, new_opp);
  1502. return 0;
  1503. unlock:
  1504. mutex_unlock(&opp_table_lock);
  1505. kfree(new_opp);
  1506. return r;
  1507. }
  1508. /**
  1509. * dev_pm_opp_enable() - Enable a specific OPP
  1510. * @dev: device for which we do this operation
  1511. * @freq: OPP frequency to enable
  1512. *
  1513. * Enables a provided opp. If the operation is valid, this returns 0, else the
  1514. * corresponding error value. It is meant to be used for users an OPP available
  1515. * after being temporarily made unavailable with dev_pm_opp_disable.
  1516. *
  1517. * Locking: The internal opp_table and opp structures are RCU protected.
  1518. * Hence this function indirectly uses RCU and mutex locks to keep the
  1519. * integrity of the internal data structures. Callers should ensure that
  1520. * this function is *NOT* called under RCU protection or in contexts where
  1521. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1522. *
  1523. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1524. * copy operation, returns 0 if no modification was done OR modification was
  1525. * successful.
  1526. */
  1527. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  1528. {
  1529. return _opp_set_availability(dev, freq, true);
  1530. }
  1531. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  1532. /**
  1533. * dev_pm_opp_disable() - Disable a specific OPP
  1534. * @dev: device for which we do this operation
  1535. * @freq: OPP frequency to disable
  1536. *
  1537. * Disables a provided opp. If the operation is valid, this returns
  1538. * 0, else the corresponding error value. It is meant to be a temporary
  1539. * control by users to make this OPP not available until the circumstances are
  1540. * right to make it available again (with a call to dev_pm_opp_enable).
  1541. *
  1542. * Locking: The internal opp_table and opp structures are RCU protected.
  1543. * Hence this function indirectly uses RCU and mutex locks to keep the
  1544. * integrity of the internal data structures. Callers should ensure that
  1545. * this function is *NOT* called under RCU protection or in contexts where
  1546. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1547. *
  1548. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1549. * copy operation, returns 0 if no modification was done OR modification was
  1550. * successful.
  1551. */
  1552. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  1553. {
  1554. return _opp_set_availability(dev, freq, false);
  1555. }
  1556. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  1557. /**
  1558. * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  1559. * @dev: device pointer used to lookup OPP table.
  1560. *
  1561. * Return: pointer to notifier head if found, otherwise -ENODEV or
  1562. * -EINVAL based on type of error casted as pointer. value must be checked
  1563. * with IS_ERR to determine valid pointer or error result.
  1564. *
  1565. * Locking: This function must be called under rcu_read_lock(). opp_table is a
  1566. * RCU protected pointer. The reason for the same is that the opp pointer which
  1567. * is returned will remain valid for use with opp_get_{voltage, freq} only while
  1568. * under the locked area. The pointer returned must be used prior to unlocking
  1569. * with rcu_read_unlock() to maintain the integrity of the pointer.
  1570. */
  1571. struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
  1572. {
  1573. struct opp_table *opp_table = _find_opp_table(dev);
  1574. if (IS_ERR(opp_table))
  1575. return ERR_CAST(opp_table); /* matching type */
  1576. return &opp_table->srcu_head;
  1577. }
  1578. EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
  1579. #ifdef CONFIG_OF
  1580. /**
  1581. * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
  1582. * entries
  1583. * @dev: device pointer used to lookup OPP table.
  1584. *
  1585. * Free OPPs created using static entries present in DT.
  1586. *
  1587. * Locking: The internal opp_table and opp structures are RCU protected.
  1588. * Hence this function indirectly uses RCU updater strategy with mutex locks
  1589. * to keep the integrity of the internal data structures. Callers should ensure
  1590. * that this function is *NOT* called under RCU protection or in contexts where
  1591. * mutex cannot be locked.
  1592. */
  1593. void dev_pm_opp_of_remove_table(struct device *dev)
  1594. {
  1595. struct opp_table *opp_table;
  1596. struct dev_pm_opp *opp, *tmp;
  1597. /* Hold our table modification lock here */
  1598. mutex_lock(&opp_table_lock);
  1599. /* Check for existing table for 'dev' */
  1600. opp_table = _find_opp_table(dev);
  1601. if (IS_ERR(opp_table)) {
  1602. int error = PTR_ERR(opp_table);
  1603. if (error != -ENODEV)
  1604. WARN(1, "%s: opp_table: %d\n",
  1605. IS_ERR_OR_NULL(dev) ?
  1606. "Invalid device" : dev_name(dev),
  1607. error);
  1608. goto unlock;
  1609. }
  1610. /* Find if opp_table manages a single device */
  1611. if (list_is_singular(&opp_table->dev_list)) {
  1612. /* Free static OPPs */
  1613. list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
  1614. if (!opp->dynamic)
  1615. _opp_remove(opp_table, opp, true);
  1616. }
  1617. } else {
  1618. _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
  1619. }
  1620. unlock:
  1621. mutex_unlock(&opp_table_lock);
  1622. }
  1623. EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
  1624. /* Returns opp descriptor node for a device, caller must do of_node_put() */
  1625. struct device_node *_of_get_opp_desc_node(struct device *dev)
  1626. {
  1627. /*
  1628. * TODO: Support for multiple OPP tables.
  1629. *
  1630. * There should be only ONE phandle present in "operating-points-v2"
  1631. * property.
  1632. */
  1633. return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
  1634. }
  1635. /* Initializes OPP tables based on new bindings */
  1636. static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
  1637. {
  1638. struct device_node *np;
  1639. struct opp_table *opp_table;
  1640. int ret = 0, count = 0;
  1641. mutex_lock(&opp_table_lock);
  1642. opp_table = _managed_opp(opp_np);
  1643. if (opp_table) {
  1644. /* OPPs are already managed */
  1645. if (!_add_opp_dev(dev, opp_table))
  1646. ret = -ENOMEM;
  1647. mutex_unlock(&opp_table_lock);
  1648. return ret;
  1649. }
  1650. mutex_unlock(&opp_table_lock);
  1651. /* We have opp-table node now, iterate over it and add OPPs */
  1652. for_each_available_child_of_node(opp_np, np) {
  1653. count++;
  1654. ret = _opp_add_static_v2(dev, np);
  1655. if (ret) {
  1656. dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
  1657. ret);
  1658. goto free_table;
  1659. }
  1660. }
  1661. /* There should be one of more OPP defined */
  1662. if (WARN_ON(!count))
  1663. return -ENOENT;
  1664. mutex_lock(&opp_table_lock);
  1665. opp_table = _find_opp_table(dev);
  1666. if (WARN_ON(IS_ERR(opp_table))) {
  1667. ret = PTR_ERR(opp_table);
  1668. mutex_unlock(&opp_table_lock);
  1669. goto free_table;
  1670. }
  1671. opp_table->np = opp_np;
  1672. opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
  1673. mutex_unlock(&opp_table_lock);
  1674. return 0;
  1675. free_table:
  1676. dev_pm_opp_of_remove_table(dev);
  1677. return ret;
  1678. }
  1679. /* Initializes OPP tables based on old-deprecated bindings */
  1680. static int _of_add_opp_table_v1(struct device *dev)
  1681. {
  1682. const struct property *prop;
  1683. const __be32 *val;
  1684. int nr;
  1685. prop = of_find_property(dev->of_node, "operating-points", NULL);
  1686. if (!prop)
  1687. return -ENODEV;
  1688. if (!prop->value)
  1689. return -ENODATA;
  1690. /*
  1691. * Each OPP is a set of tuples consisting of frequency and
  1692. * voltage like <freq-kHz vol-uV>.
  1693. */
  1694. nr = prop->length / sizeof(u32);
  1695. if (nr % 2) {
  1696. dev_err(dev, "%s: Invalid OPP table\n", __func__);
  1697. return -EINVAL;
  1698. }
  1699. val = prop->value;
  1700. while (nr) {
  1701. unsigned long freq = be32_to_cpup(val++) * 1000;
  1702. unsigned long volt = be32_to_cpup(val++);
  1703. if (_opp_add_v1(dev, freq, volt, false))
  1704. dev_warn(dev, "%s: Failed to add OPP %ld\n",
  1705. __func__, freq);
  1706. nr -= 2;
  1707. }
  1708. return 0;
  1709. }
  1710. /**
  1711. * dev_pm_opp_of_add_table() - Initialize opp table from device tree
  1712. * @dev: device pointer used to lookup OPP table.
  1713. *
  1714. * Register the initial OPP table with the OPP library for given device.
  1715. *
  1716. * Locking: The internal opp_table and opp structures are RCU protected.
  1717. * Hence this function indirectly uses RCU updater strategy with mutex locks
  1718. * to keep the integrity of the internal data structures. Callers should ensure
  1719. * that this function is *NOT* called under RCU protection or in contexts where
  1720. * mutex cannot be locked.
  1721. *
  1722. * Return:
  1723. * 0 On success OR
  1724. * Duplicate OPPs (both freq and volt are same) and opp->available
  1725. * -EEXIST Freq are same and volt are different OR
  1726. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1727. * -ENOMEM Memory allocation failure
  1728. * -ENODEV when 'operating-points' property is not found or is invalid data
  1729. * in device node.
  1730. * -ENODATA when empty 'operating-points' property is found
  1731. * -EINVAL when invalid entries are found in opp-v2 table
  1732. */
  1733. int dev_pm_opp_of_add_table(struct device *dev)
  1734. {
  1735. struct device_node *opp_np;
  1736. int ret;
  1737. /*
  1738. * OPPs have two version of bindings now. The older one is deprecated,
  1739. * try for the new binding first.
  1740. */
  1741. opp_np = _of_get_opp_desc_node(dev);
  1742. if (!opp_np) {
  1743. /*
  1744. * Try old-deprecated bindings for backward compatibility with
  1745. * older dtbs.
  1746. */
  1747. return _of_add_opp_table_v1(dev);
  1748. }
  1749. ret = _of_add_opp_table_v2(dev, opp_np);
  1750. of_node_put(opp_np);
  1751. return ret;
  1752. }
  1753. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
  1754. #endif